Merge branch 'drm-intel-next' of git://git.freedesktop.org/git/drm-intel into drm-next
- fine-grained display power domains for byt (Imre) - runtime pm prep patches for !hsw from Paulo - WiZ hashing flag updates from Ville - ppgtt setup cleanup and enabling of full 4G range on bdw (Ben) - fixes from Jesse for the inherited intial config code - gpu reset code improvements from Mika - per-pipe num_planes refactoring from Damien - stability fixes around bdw forcewake handling and other bdw w/a from Mika Ken - and as usual a pile of smaller fixes all over * 'drm-intel-next' of git://git.freedesktop.org/git/drm-intel: (107 commits) drm/i915: Go OCD on the Makefile drm/i915: Implement command buffer parsing logic drm/i915: Refactor shmem pread setup drm/i915: Avoid div by zero when pixel clock is large drm/i915: power domains: add vlv power wells drm/i915: factor out intel_set_cpu_fifo_underrun_reporting_nolock drm/i915: vlv: factor out valleyview_display_irq_install drm/i915: sanity check power well sw state against hw state drm/i915: factor out reset_vblank_counter drm/i915: sanitize PUNIT register macro definitions drm/i915: vlv: keep first level vblank IRQs masked drm/i915: check pipe power domain when reading its hw state drm/i915: check port power domain when reading the encoder hw state drm/i915: get port power domain in connector detect handlers drm/i915: add port power domains drm/i915: add noop power well handlers instead of NULL checking them drm/i915: split power well 'set' handler to separate enable/disable/sync_hw drm/i915: add init power domain to always-on power wells drm/i915: move power domain macros to intel_pm.c drm/i915: Disable full ppgtt by default ...
This commit is contained in:
commit
8ad2bc9796
|
@ -3,58 +3,69 @@
|
|||
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
|
||||
|
||||
ccflags-y := -Iinclude/drm
|
||||
i915-y := i915_drv.o i915_dma.o i915_irq.o \
|
||||
i915_gpu_error.o \
|
||||
|
||||
# Please keep these build lists sorted!
|
||||
|
||||
# core driver code
|
||||
i915-y := i915_drv.o \
|
||||
i915_params.o \
|
||||
i915_suspend.o \
|
||||
i915_gem.o \
|
||||
i915_sysfs.o \
|
||||
intel_pm.o
|
||||
i915-$(CONFIG_COMPAT) += i915_ioc32.o
|
||||
i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o
|
||||
|
||||
# GEM code
|
||||
i915-y += i915_cmd_parser.o \
|
||||
i915_gem_context.o \
|
||||
i915_gem_debug.o \
|
||||
i915_gem_dmabuf.o \
|
||||
i915_gem_evict.o \
|
||||
i915_gem_execbuffer.o \
|
||||
i915_gem_gtt.o \
|
||||
i915_gem.o \
|
||||
i915_gem_stolen.o \
|
||||
i915_gem_tiling.o \
|
||||
i915_params.o \
|
||||
i915_sysfs.o \
|
||||
i915_gpu_error.o \
|
||||
i915_irq.o \
|
||||
i915_trace_points.o \
|
||||
i915_ums.o \
|
||||
intel_ringbuffer.o \
|
||||
intel_uncore.o
|
||||
|
||||
# modesetting core code
|
||||
i915-y += intel_bios.o \
|
||||
intel_display.o \
|
||||
intel_modes.o \
|
||||
intel_overlay.o \
|
||||
intel_sideband.o \
|
||||
intel_sprite.o
|
||||
i915-$(CONFIG_ACPI) += intel_acpi.o intel_opregion.o
|
||||
i915-$(CONFIG_DRM_I915_FBDEV) += intel_fbdev.o
|
||||
|
||||
# modesetting output/encoder code
|
||||
i915-y += dvo_ch7017.o \
|
||||
dvo_ch7xxx.o \
|
||||
dvo_ivch.o \
|
||||
dvo_ns2501.o \
|
||||
dvo_sil164.o \
|
||||
dvo_tfp410.o \
|
||||
intel_crt.o \
|
||||
intel_lvds.o \
|
||||
intel_dsi.o \
|
||||
intel_dsi_cmd.o \
|
||||
intel_dsi_pll.o \
|
||||
intel_bios.o \
|
||||
intel_ddi.o \
|
||||
intel_dp.o \
|
||||
intel_hdmi.o \
|
||||
intel_sdvo.o \
|
||||
intel_modes.o \
|
||||
intel_panel.o \
|
||||
intel_pm.o \
|
||||
intel_i2c.o \
|
||||
intel_tv.o \
|
||||
intel_dsi_cmd.o \
|
||||
intel_dsi.o \
|
||||
intel_dsi_pll.o \
|
||||
intel_dvo.o \
|
||||
intel_ringbuffer.o \
|
||||
intel_overlay.o \
|
||||
intel_sprite.o \
|
||||
intel_sideband.o \
|
||||
intel_uncore.o \
|
||||
dvo_ch7xxx.o \
|
||||
dvo_ch7017.o \
|
||||
dvo_ivch.o \
|
||||
dvo_tfp410.o \
|
||||
dvo_sil164.o \
|
||||
dvo_ns2501.o \
|
||||
i915_gem_dmabuf.o
|
||||
intel_hdmi.o \
|
||||
intel_i2c.o \
|
||||
intel_lvds.o \
|
||||
intel_panel.o \
|
||||
intel_sdvo.o \
|
||||
intel_tv.o
|
||||
|
||||
i915-$(CONFIG_COMPAT) += i915_ioc32.o
|
||||
|
||||
i915-$(CONFIG_ACPI) += intel_acpi.o intel_opregion.o
|
||||
|
||||
i915-$(CONFIG_DRM_I915_FBDEV) += intel_fbdev.o
|
||||
|
||||
i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o
|
||||
# legacy horrors
|
||||
i915-y += i915_dma.o \
|
||||
i915_ums.o
|
||||
|
||||
obj-$(CONFIG_DRM_I915) += i915.o
|
||||
|
||||
|
|
|
@ -0,0 +1,485 @@
|
|||
/*
|
||||
* Copyright © 2013 Intel Corporation
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
* IN THE SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Brad Volkin <bradley.d.volkin@intel.com>
|
||||
*
|
||||
*/
|
||||
|
||||
#include "i915_drv.h"
|
||||
|
||||
/**
|
||||
* DOC: i915 batch buffer command parser
|
||||
*
|
||||
* Motivation:
|
||||
* Certain OpenGL features (e.g. transform feedback, performance monitoring)
|
||||
* require userspace code to submit batches containing commands such as
|
||||
* MI_LOAD_REGISTER_IMM to access various registers. Unfortunately, some
|
||||
* generations of the hardware will noop these commands in "unsecure" batches
|
||||
* (which includes all userspace batches submitted via i915) even though the
|
||||
* commands may be safe and represent the intended programming model of the
|
||||
* device.
|
||||
*
|
||||
* The software command parser is similar in operation to the command parsing
|
||||
* done in hardware for unsecure batches. However, the software parser allows
|
||||
* some operations that would be noop'd by hardware, if the parser determines
|
||||
* the operation is safe, and submits the batch as "secure" to prevent hardware
|
||||
* parsing.
|
||||
*
|
||||
* Threats:
|
||||
* At a high level, the hardware (and software) checks attempt to prevent
|
||||
* granting userspace undue privileges. There are three categories of privilege.
|
||||
*
|
||||
* First, commands which are explicitly defined as privileged or which should
|
||||
* only be used by the kernel driver. The parser generally rejects such
|
||||
* commands, though it may allow some from the drm master process.
|
||||
*
|
||||
* Second, commands which access registers. To support correct/enhanced
|
||||
* userspace functionality, particularly certain OpenGL extensions, the parser
|
||||
* provides a whitelist of registers which userspace may safely access (for both
|
||||
* normal and drm master processes).
|
||||
*
|
||||
* Third, commands which access privileged memory (i.e. GGTT, HWS page, etc).
|
||||
* The parser always rejects such commands.
|
||||
*
|
||||
* The majority of the problematic commands fall in the MI_* range, with only a
|
||||
* few specific commands on each ring (e.g. PIPE_CONTROL and MI_FLUSH_DW).
|
||||
*
|
||||
* Implementation:
|
||||
* Each ring maintains tables of commands and registers which the parser uses in
|
||||
* scanning batch buffers submitted to that ring.
|
||||
*
|
||||
* Since the set of commands that the parser must check for is significantly
|
||||
* smaller than the number of commands supported, the parser tables contain only
|
||||
* those commands required by the parser. This generally works because command
|
||||
* opcode ranges have standard command length encodings. So for commands that
|
||||
* the parser does not need to check, it can easily skip them. This is
|
||||
* implementated via a per-ring length decoding vfunc.
|
||||
*
|
||||
* Unfortunately, there are a number of commands that do not follow the standard
|
||||
* length encoding for their opcode range, primarily amongst the MI_* commands.
|
||||
* To handle this, the parser provides a way to define explicit "skip" entries
|
||||
* in the per-ring command tables.
|
||||
*
|
||||
* Other command table entries map fairly directly to high level categories
|
||||
* mentioned above: rejected, master-only, register whitelist. The parser
|
||||
* implements a number of checks, including the privileged memory checks, via a
|
||||
* general bitmasking mechanism.
|
||||
*/
|
||||
|
||||
static u32 gen7_render_get_cmd_length_mask(u32 cmd_header)
|
||||
{
|
||||
u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT;
|
||||
u32 subclient =
|
||||
(cmd_header & INSTR_SUBCLIENT_MASK) >> INSTR_SUBCLIENT_SHIFT;
|
||||
|
||||
if (client == INSTR_MI_CLIENT)
|
||||
return 0x3F;
|
||||
else if (client == INSTR_RC_CLIENT) {
|
||||
if (subclient == INSTR_MEDIA_SUBCLIENT)
|
||||
return 0xFFFF;
|
||||
else
|
||||
return 0xFF;
|
||||
}
|
||||
|
||||
DRM_DEBUG_DRIVER("CMD: Abnormal rcs cmd length! 0x%08X\n", cmd_header);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u32 gen7_bsd_get_cmd_length_mask(u32 cmd_header)
|
||||
{
|
||||
u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT;
|
||||
u32 subclient =
|
||||
(cmd_header & INSTR_SUBCLIENT_MASK) >> INSTR_SUBCLIENT_SHIFT;
|
||||
|
||||
if (client == INSTR_MI_CLIENT)
|
||||
return 0x3F;
|
||||
else if (client == INSTR_RC_CLIENT) {
|
||||
if (subclient == INSTR_MEDIA_SUBCLIENT)
|
||||
return 0xFFF;
|
||||
else
|
||||
return 0xFF;
|
||||
}
|
||||
|
||||
DRM_DEBUG_DRIVER("CMD: Abnormal bsd cmd length! 0x%08X\n", cmd_header);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u32 gen7_blt_get_cmd_length_mask(u32 cmd_header)
|
||||
{
|
||||
u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT;
|
||||
|
||||
if (client == INSTR_MI_CLIENT)
|
||||
return 0x3F;
|
||||
else if (client == INSTR_BC_CLIENT)
|
||||
return 0xFF;
|
||||
|
||||
DRM_DEBUG_DRIVER("CMD: Abnormal blt cmd length! 0x%08X\n", cmd_header);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void validate_cmds_sorted(struct intel_ring_buffer *ring)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!ring->cmd_tables || ring->cmd_table_count == 0)
|
||||
return;
|
||||
|
||||
for (i = 0; i < ring->cmd_table_count; i++) {
|
||||
const struct drm_i915_cmd_table *table = &ring->cmd_tables[i];
|
||||
u32 previous = 0;
|
||||
int j;
|
||||
|
||||
for (j = 0; j < table->count; j++) {
|
||||
const struct drm_i915_cmd_descriptor *desc =
|
||||
&table->table[i];
|
||||
u32 curr = desc->cmd.value & desc->cmd.mask;
|
||||
|
||||
if (curr < previous)
|
||||
DRM_ERROR("CMD: table not sorted ring=%d table=%d entry=%d cmd=0x%08X prev=0x%08X\n",
|
||||
ring->id, i, j, curr, previous);
|
||||
|
||||
previous = curr;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void check_sorted(int ring_id, const u32 *reg_table, int reg_count)
|
||||
{
|
||||
int i;
|
||||
u32 previous = 0;
|
||||
|
||||
for (i = 0; i < reg_count; i++) {
|
||||
u32 curr = reg_table[i];
|
||||
|
||||
if (curr < previous)
|
||||
DRM_ERROR("CMD: table not sorted ring=%d entry=%d reg=0x%08X prev=0x%08X\n",
|
||||
ring_id, i, curr, previous);
|
||||
|
||||
previous = curr;
|
||||
}
|
||||
}
|
||||
|
||||
static void validate_regs_sorted(struct intel_ring_buffer *ring)
|
||||
{
|
||||
check_sorted(ring->id, ring->reg_table, ring->reg_count);
|
||||
check_sorted(ring->id, ring->master_reg_table, ring->master_reg_count);
|
||||
}
|
||||
|
||||
/**
|
||||
* i915_cmd_parser_init_ring() - set cmd parser related fields for a ringbuffer
|
||||
* @ring: the ringbuffer to initialize
|
||||
*
|
||||
* Optionally initializes fields related to batch buffer command parsing in the
|
||||
* struct intel_ring_buffer based on whether the platform requires software
|
||||
* command parsing.
|
||||
*/
|
||||
void i915_cmd_parser_init_ring(struct intel_ring_buffer *ring)
|
||||
{
|
||||
if (!IS_GEN7(ring->dev))
|
||||
return;
|
||||
|
||||
switch (ring->id) {
|
||||
case RCS:
|
||||
ring->get_cmd_length_mask = gen7_render_get_cmd_length_mask;
|
||||
break;
|
||||
case VCS:
|
||||
ring->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
|
||||
break;
|
||||
case BCS:
|
||||
ring->get_cmd_length_mask = gen7_blt_get_cmd_length_mask;
|
||||
break;
|
||||
case VECS:
|
||||
/* VECS can use the same length_mask function as VCS */
|
||||
ring->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("CMD: cmd_parser_init with unknown ring: %d\n",
|
||||
ring->id);
|
||||
BUG();
|
||||
}
|
||||
|
||||
validate_cmds_sorted(ring);
|
||||
validate_regs_sorted(ring);
|
||||
}
|
||||
|
||||
static const struct drm_i915_cmd_descriptor*
|
||||
find_cmd_in_table(const struct drm_i915_cmd_table *table,
|
||||
u32 cmd_header)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < table->count; i++) {
|
||||
const struct drm_i915_cmd_descriptor *desc = &table->table[i];
|
||||
u32 masked_cmd = desc->cmd.mask & cmd_header;
|
||||
u32 masked_value = desc->cmd.value & desc->cmd.mask;
|
||||
|
||||
if (masked_cmd == masked_value)
|
||||
return desc;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns a pointer to a descriptor for the command specified by cmd_header.
|
||||
*
|
||||
* The caller must supply space for a default descriptor via the default_desc
|
||||
* parameter. If no descriptor for the specified command exists in the ring's
|
||||
* command parser tables, this function fills in default_desc based on the
|
||||
* ring's default length encoding and returns default_desc.
|
||||
*/
|
||||
static const struct drm_i915_cmd_descriptor*
|
||||
find_cmd(struct intel_ring_buffer *ring,
|
||||
u32 cmd_header,
|
||||
struct drm_i915_cmd_descriptor *default_desc)
|
||||
{
|
||||
u32 mask;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ring->cmd_table_count; i++) {
|
||||
const struct drm_i915_cmd_descriptor *desc;
|
||||
|
||||
desc = find_cmd_in_table(&ring->cmd_tables[i], cmd_header);
|
||||
if (desc)
|
||||
return desc;
|
||||
}
|
||||
|
||||
mask = ring->get_cmd_length_mask(cmd_header);
|
||||
if (!mask)
|
||||
return NULL;
|
||||
|
||||
BUG_ON(!default_desc);
|
||||
default_desc->flags = CMD_DESC_SKIP;
|
||||
default_desc->length.mask = mask;
|
||||
|
||||
return default_desc;
|
||||
}
|
||||
|
||||
static bool valid_reg(const u32 *table, int count, u32 addr)
|
||||
{
|
||||
if (table && count != 0) {
|
||||
int i;
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
if (table[i] == addr)
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static u32 *vmap_batch(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
int i;
|
||||
void *addr = NULL;
|
||||
struct sg_page_iter sg_iter;
|
||||
struct page **pages;
|
||||
|
||||
pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages));
|
||||
if (pages == NULL) {
|
||||
DRM_DEBUG_DRIVER("Failed to get space for pages\n");
|
||||
goto finish;
|
||||
}
|
||||
|
||||
i = 0;
|
||||
for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
|
||||
pages[i] = sg_page_iter_page(&sg_iter);
|
||||
i++;
|
||||
}
|
||||
|
||||
addr = vmap(pages, i, 0, PAGE_KERNEL);
|
||||
if (addr == NULL) {
|
||||
DRM_DEBUG_DRIVER("Failed to vmap pages\n");
|
||||
goto finish;
|
||||
}
|
||||
|
||||
finish:
|
||||
if (pages)
|
||||
drm_free_large(pages);
|
||||
return (u32*)addr;
|
||||
}
|
||||
|
||||
/**
|
||||
* i915_needs_cmd_parser() - should a given ring use software command parsing?
|
||||
* @ring: the ring in question
|
||||
*
|
||||
* Only certain platforms require software batch buffer command parsing, and
|
||||
* only when enabled via module paramter.
|
||||
*
|
||||
* Return: true if the ring requires software command parsing
|
||||
*/
|
||||
bool i915_needs_cmd_parser(struct intel_ring_buffer *ring)
|
||||
{
|
||||
/* No command tables indicates a platform without parsing */
|
||||
if (!ring->cmd_tables)
|
||||
return false;
|
||||
|
||||
return (i915.enable_cmd_parser == 1);
|
||||
}
|
||||
|
||||
#define LENGTH_BIAS 2
|
||||
|
||||
/**
|
||||
* i915_parse_cmds() - parse a submitted batch buffer for privilege violations
|
||||
* @ring: the ring on which the batch is to execute
|
||||
* @batch_obj: the batch buffer in question
|
||||
* @batch_start_offset: byte offset in the batch at which execution starts
|
||||
* @is_master: is the submitting process the drm master?
|
||||
*
|
||||
* Parses the specified batch buffer looking for privilege violations as
|
||||
* described in the overview.
|
||||
*
|
||||
* Return: non-zero if the parser finds violations or otherwise fails
|
||||
*/
|
||||
int i915_parse_cmds(struct intel_ring_buffer *ring,
|
||||
struct drm_i915_gem_object *batch_obj,
|
||||
u32 batch_start_offset,
|
||||
bool is_master)
|
||||
{
|
||||
int ret = 0;
|
||||
u32 *cmd, *batch_base, *batch_end;
|
||||
struct drm_i915_cmd_descriptor default_desc = { 0 };
|
||||
int needs_clflush = 0;
|
||||
|
||||
ret = i915_gem_obj_prepare_shmem_read(batch_obj, &needs_clflush);
|
||||
if (ret) {
|
||||
DRM_DEBUG_DRIVER("CMD: failed to prep read\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
batch_base = vmap_batch(batch_obj);
|
||||
if (!batch_base) {
|
||||
DRM_DEBUG_DRIVER("CMD: Failed to vmap batch\n");
|
||||
i915_gem_object_unpin_pages(batch_obj);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (needs_clflush)
|
||||
drm_clflush_virt_range((char *)batch_base, batch_obj->base.size);
|
||||
|
||||
cmd = batch_base + (batch_start_offset / sizeof(*cmd));
|
||||
batch_end = cmd + (batch_obj->base.size / sizeof(*batch_end));
|
||||
|
||||
while (cmd < batch_end) {
|
||||
const struct drm_i915_cmd_descriptor *desc;
|
||||
u32 length;
|
||||
|
||||
if (*cmd == MI_BATCH_BUFFER_END)
|
||||
break;
|
||||
|
||||
desc = find_cmd(ring, *cmd, &default_desc);
|
||||
if (!desc) {
|
||||
DRM_DEBUG_DRIVER("CMD: Unrecognized command: 0x%08X\n",
|
||||
*cmd);
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
if (desc->flags & CMD_DESC_FIXED)
|
||||
length = desc->length.fixed;
|
||||
else
|
||||
length = ((*cmd & desc->length.mask) + LENGTH_BIAS);
|
||||
|
||||
if ((batch_end - cmd) < length) {
|
||||
DRM_DEBUG_DRIVER("CMD: Command length exceeds batch length: 0x%08X length=%d batchlen=%ld\n",
|
||||
*cmd,
|
||||
length,
|
||||
batch_end - cmd);
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
if (desc->flags & CMD_DESC_REJECT) {
|
||||
DRM_DEBUG_DRIVER("CMD: Rejected command: 0x%08X\n", *cmd);
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
if ((desc->flags & CMD_DESC_MASTER) && !is_master) {
|
||||
DRM_DEBUG_DRIVER("CMD: Rejected master-only command: 0x%08X\n",
|
||||
*cmd);
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
if (desc->flags & CMD_DESC_REGISTER) {
|
||||
u32 reg_addr = cmd[desc->reg.offset] & desc->reg.mask;
|
||||
|
||||
if (!valid_reg(ring->reg_table,
|
||||
ring->reg_count, reg_addr)) {
|
||||
if (!is_master ||
|
||||
!valid_reg(ring->master_reg_table,
|
||||
ring->master_reg_count,
|
||||
reg_addr)) {
|
||||
DRM_DEBUG_DRIVER("CMD: Rejected register 0x%08X in command: 0x%08X (ring=%d)\n",
|
||||
reg_addr,
|
||||
*cmd,
|
||||
ring->id);
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (desc->flags & CMD_DESC_BITMASK) {
|
||||
int i;
|
||||
|
||||
for (i = 0; i < MAX_CMD_DESC_BITMASKS; i++) {
|
||||
u32 dword;
|
||||
|
||||
if (desc->bits[i].mask == 0)
|
||||
break;
|
||||
|
||||
dword = cmd[desc->bits[i].offset] &
|
||||
desc->bits[i].mask;
|
||||
|
||||
if (dword != desc->bits[i].expected) {
|
||||
DRM_DEBUG_DRIVER("CMD: Rejected command 0x%08X for bitmask 0x%08X (exp=0x%08X act=0x%08X) (ring=%d)\n",
|
||||
*cmd,
|
||||
desc->bits[i].mask,
|
||||
desc->bits[i].expected,
|
||||
dword, ring->id);
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
|
||||
cmd += length;
|
||||
}
|
||||
|
||||
if (cmd >= batch_end) {
|
||||
DRM_DEBUG_DRIVER("CMD: Got to the end of the buffer w/o a BBE cmd!\n");
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
vunmap(batch_base);
|
||||
|
||||
i915_gem_object_unpin_pages(batch_obj);
|
||||
|
||||
return ret;
|
||||
}
|
|
@ -602,7 +602,6 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
|
|||
intel_runtime_pm_get(dev_priv);
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 8) {
|
||||
int i;
|
||||
seq_printf(m, "Master Interrupt Control:\t%08x\n",
|
||||
I915_READ(GEN8_MASTER_IRQ));
|
||||
|
||||
|
@ -615,16 +614,16 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
|
|||
i, I915_READ(GEN8_GT_IER(i)));
|
||||
}
|
||||
|
||||
for_each_pipe(i) {
|
||||
for_each_pipe(pipe) {
|
||||
seq_printf(m, "Pipe %c IMR:\t%08x\n",
|
||||
pipe_name(i),
|
||||
I915_READ(GEN8_DE_PIPE_IMR(i)));
|
||||
pipe_name(pipe),
|
||||
I915_READ(GEN8_DE_PIPE_IMR(pipe)));
|
||||
seq_printf(m, "Pipe %c IIR:\t%08x\n",
|
||||
pipe_name(i),
|
||||
I915_READ(GEN8_DE_PIPE_IIR(i)));
|
||||
pipe_name(pipe),
|
||||
I915_READ(GEN8_DE_PIPE_IIR(pipe)));
|
||||
seq_printf(m, "Pipe %c IER:\t%08x\n",
|
||||
pipe_name(i),
|
||||
I915_READ(GEN8_DE_PIPE_IER(i)));
|
||||
pipe_name(pipe),
|
||||
I915_READ(GEN8_DE_PIPE_IER(pipe)));
|
||||
}
|
||||
|
||||
seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
|
||||
|
@ -1348,6 +1347,8 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
|
|||
return 0;
|
||||
}
|
||||
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
|
||||
if (intel_fbc_enabled(dev)) {
|
||||
seq_puts(m, "FBC enabled\n");
|
||||
} else {
|
||||
|
@ -1391,6 +1392,9 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
|
|||
}
|
||||
seq_putc(m, '\n');
|
||||
}
|
||||
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1405,11 +1409,15 @@ static int i915_ips_status(struct seq_file *m, void *unused)
|
|||
return 0;
|
||||
}
|
||||
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
|
||||
if (IS_BROADWELL(dev) || I915_READ(IPS_CTL) & IPS_ENABLE)
|
||||
seq_puts(m, "enabled\n");
|
||||
else
|
||||
seq_puts(m, "disabled\n");
|
||||
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1420,6 +1428,8 @@ static int i915_sr_status(struct seq_file *m, void *unused)
|
|||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
bool sr_enabled = false;
|
||||
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
|
||||
if (HAS_PCH_SPLIT(dev))
|
||||
sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
|
||||
else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev))
|
||||
|
@ -1429,6 +1439,8 @@ static int i915_sr_status(struct seq_file *m, void *unused)
|
|||
else if (IS_PINEVIEW(dev))
|
||||
sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
|
||||
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
|
||||
seq_printf(m, "self-refresh: %s\n",
|
||||
sr_enabled ? "enabled" : "disabled");
|
||||
|
||||
|
@ -1468,7 +1480,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
|
|||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
int ret;
|
||||
int ret = 0;
|
||||
int gpu_freq, ia_freq;
|
||||
|
||||
if (!(IS_GEN6(dev) || IS_GEN7(dev))) {
|
||||
|
@ -1476,12 +1488,13 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
|
|||
return 0;
|
||||
}
|
||||
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
|
||||
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
|
||||
|
||||
ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
|
||||
if (ret)
|
||||
return ret;
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
goto out;
|
||||
|
||||
seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
|
||||
|
||||
|
@ -1498,10 +1511,11 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
|
|||
((ia_freq >> 8) & 0xff) * 100);
|
||||
}
|
||||
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
mutex_unlock(&dev_priv->rps.hw_lock);
|
||||
|
||||
return 0;
|
||||
out:
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int i915_gfxec(struct seq_file *m, void *unused)
|
||||
|
@ -1757,7 +1771,7 @@ static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
|
|||
return;
|
||||
|
||||
seq_printf(m, "Page directories: %d\n", ppgtt->num_pd_pages);
|
||||
seq_printf(m, "Page tables: %d\n", ppgtt->num_pt_pages);
|
||||
seq_printf(m, "Page tables: %d\n", ppgtt->num_pd_entries);
|
||||
for_each_ring(ring, dev_priv, unused) {
|
||||
seq_printf(m, "%s\n", ring->name);
|
||||
for (i = 0; i < 4; i++) {
|
||||
|
@ -1972,12 +1986,16 @@ static int i915_energy_uJ(struct seq_file *m, void *data)
|
|||
if (INTEL_INFO(dev)->gen < 6)
|
||||
return -ENODEV;
|
||||
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
|
||||
rdmsrl(MSR_RAPL_POWER_UNIT, power);
|
||||
power = (power & 0x1f00) >> 8;
|
||||
units = 1000000 / (1 << power); /* convert to uJ */
|
||||
power = I915_READ(MCH_SECP_NRG_STTS);
|
||||
power *= units;
|
||||
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
|
||||
seq_printf(m, "%llu", (long long unsigned)power);
|
||||
|
||||
return 0;
|
||||
|
@ -1997,7 +2015,7 @@ static int i915_pc8_status(struct seq_file *m, void *unused)
|
|||
mutex_lock(&dev_priv->pc8.lock);
|
||||
seq_printf(m, "Requirements met: %s\n",
|
||||
yesno(dev_priv->pc8.requirements_met));
|
||||
seq_printf(m, "GPU idle: %s\n", yesno(dev_priv->pc8.gpu_idle));
|
||||
seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->mm.busy));
|
||||
seq_printf(m, "Disable count: %d\n", dev_priv->pc8.disable_count);
|
||||
seq_printf(m, "IRQs disabled: %s\n",
|
||||
yesno(dev_priv->pc8.irqs_disabled));
|
||||
|
@ -2030,6 +2048,28 @@ static const char *power_domain_str(enum intel_display_power_domain domain)
|
|||
return "TRANSCODER_C";
|
||||
case POWER_DOMAIN_TRANSCODER_EDP:
|
||||
return "TRANSCODER_EDP";
|
||||
case POWER_DOMAIN_PORT_DDI_A_2_LANES:
|
||||
return "PORT_DDI_A_2_LANES";
|
||||
case POWER_DOMAIN_PORT_DDI_A_4_LANES:
|
||||
return "PORT_DDI_A_4_LANES";
|
||||
case POWER_DOMAIN_PORT_DDI_B_2_LANES:
|
||||
return "PORT_DDI_B_2_LANES";
|
||||
case POWER_DOMAIN_PORT_DDI_B_4_LANES:
|
||||
return "PORT_DDI_B_4_LANES";
|
||||
case POWER_DOMAIN_PORT_DDI_C_2_LANES:
|
||||
return "PORT_DDI_C_2_LANES";
|
||||
case POWER_DOMAIN_PORT_DDI_C_4_LANES:
|
||||
return "PORT_DDI_C_4_LANES";
|
||||
case POWER_DOMAIN_PORT_DDI_D_2_LANES:
|
||||
return "PORT_DDI_D_2_LANES";
|
||||
case POWER_DOMAIN_PORT_DDI_D_4_LANES:
|
||||
return "PORT_DDI_D_4_LANES";
|
||||
case POWER_DOMAIN_PORT_DSI:
|
||||
return "PORT_DSI";
|
||||
case POWER_DOMAIN_PORT_CRT:
|
||||
return "PORT_CRT";
|
||||
case POWER_DOMAIN_PORT_OTHER:
|
||||
return "PORT_OTHER";
|
||||
case POWER_DOMAIN_VGA:
|
||||
return "VGA";
|
||||
case POWER_DOMAIN_AUDIO:
|
||||
|
@ -2180,6 +2220,7 @@ static void intel_connector_info(struct seq_file *m,
|
|||
{
|
||||
struct intel_connector *intel_connector = to_intel_connector(connector);
|
||||
struct intel_encoder *intel_encoder = intel_connector->encoder;
|
||||
struct drm_display_mode *mode;
|
||||
|
||||
seq_printf(m, "connector %d: type %s, status: %s\n",
|
||||
connector->base.id, drm_get_connector_name(connector),
|
||||
|
@ -2202,6 +2243,9 @@ static void intel_connector_info(struct seq_file *m,
|
|||
else if (intel_encoder->type == INTEL_OUTPUT_LVDS)
|
||||
intel_lvds_info(m, intel_connector);
|
||||
|
||||
seq_printf(m, "\tmodes:\n");
|
||||
list_for_each_entry(mode, &connector->modes, head)
|
||||
intel_seq_print_mode(m, 2, mode);
|
||||
}
|
||||
|
||||
static int i915_display_info(struct seq_file *m, void *unused)
|
||||
|
@ -3167,9 +3211,8 @@ i915_wedged_set(void *data, u64 val)
|
|||
{
|
||||
struct drm_device *dev = data;
|
||||
|
||||
DRM_INFO("Manually setting wedged to %llu\n", val);
|
||||
i915_handle_error(dev, val);
|
||||
|
||||
i915_handle_error(dev, val,
|
||||
"Manually setting wedged to %llu", val);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -1321,12 +1321,12 @@ static int i915_load_modeset_init(struct drm_device *dev)
|
|||
if (ret)
|
||||
goto cleanup_vga_switcheroo;
|
||||
|
||||
intel_power_domains_init_hw(dev_priv);
|
||||
|
||||
ret = drm_irq_install(dev);
|
||||
if (ret)
|
||||
goto cleanup_gem_stolen;
|
||||
|
||||
intel_power_domains_init_hw(dev);
|
||||
|
||||
/* Important: The output setup functions called by modeset_init need
|
||||
* working irqs for e.g. gmbus and dp aux transfers. */
|
||||
intel_modeset_init(dev);
|
||||
|
@ -1343,7 +1343,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
|
|||
/* FIXME: do pre/post-mode set stuff in core KMS code */
|
||||
dev->vblank_disable_allowed = true;
|
||||
if (INTEL_INFO(dev)->num_pipes == 0) {
|
||||
intel_display_power_put(dev, POWER_DOMAIN_VGA);
|
||||
intel_display_power_put(dev_priv, POWER_DOMAIN_VGA);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1381,7 +1381,7 @@ cleanup_gem:
|
|||
WARN_ON(dev_priv->mm.aliasing_ppgtt);
|
||||
drm_mm_takedown(&dev_priv->gtt.base.mm);
|
||||
cleanup_power:
|
||||
intel_display_power_put(dev, POWER_DOMAIN_VGA);
|
||||
intel_display_power_put(dev_priv, POWER_DOMAIN_VGA);
|
||||
drm_irq_uninstall(dev);
|
||||
cleanup_gem_stolen:
|
||||
i915_gem_cleanup_stolen(dev);
|
||||
|
@ -1480,12 +1480,16 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
|
|||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_device_info *info;
|
||||
enum pipe pipe;
|
||||
|
||||
info = (struct intel_device_info *)&dev_priv->info;
|
||||
|
||||
info->num_sprites = 1;
|
||||
if (IS_VALLEYVIEW(dev))
|
||||
info->num_sprites = 2;
|
||||
for_each_pipe(pipe)
|
||||
info->num_sprites[pipe] = 2;
|
||||
else
|
||||
for_each_pipe(pipe)
|
||||
info->num_sprites[pipe] = 1;
|
||||
|
||||
if (i915.disable_display) {
|
||||
DRM_INFO("Display disabled (module parameter)\n");
|
||||
|
@ -1702,7 +1706,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
|
|||
goto out_gem_unload;
|
||||
}
|
||||
|
||||
intel_power_domains_init(dev);
|
||||
intel_power_domains_init(dev_priv);
|
||||
|
||||
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
|
||||
ret = i915_load_modeset_init(dev);
|
||||
|
@ -1731,7 +1735,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
|
|||
return 0;
|
||||
|
||||
out_power_well:
|
||||
intel_power_domains_remove(dev);
|
||||
intel_power_domains_remove(dev_priv);
|
||||
drm_vblank_cleanup(dev);
|
||||
out_gem_unload:
|
||||
if (dev_priv->mm.inactive_shrinker.scan_objects)
|
||||
|
@ -1781,8 +1785,8 @@ int i915_driver_unload(struct drm_device *dev)
|
|||
/* The i915.ko module is still not prepared to be loaded when
|
||||
* the power well is not enabled, so just enable it in case
|
||||
* we're going to unload/reload. */
|
||||
intel_display_set_init_power(dev, true);
|
||||
intel_power_domains_remove(dev);
|
||||
intel_display_set_init_power(dev_priv, true);
|
||||
intel_power_domains_remove(dev_priv);
|
||||
|
||||
i915_teardown_sysfs(dev);
|
||||
|
||||
|
|
|
@ -265,6 +265,7 @@ static const struct intel_device_info intel_broadwell_d_info = {
|
|||
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
|
||||
.has_llc = 1,
|
||||
.has_ddi = 1,
|
||||
.has_fbc = 1,
|
||||
GEN_DEFAULT_PIPEOFFSETS,
|
||||
};
|
||||
|
||||
|
@ -274,6 +275,7 @@ static const struct intel_device_info intel_broadwell_m_info = {
|
|||
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
|
||||
.has_llc = 1,
|
||||
.has_ddi = 1,
|
||||
.has_fbc = 1,
|
||||
GEN_DEFAULT_PIPEOFFSETS,
|
||||
};
|
||||
|
||||
|
@ -401,15 +403,13 @@ bool i915_semaphore_is_enabled(struct drm_device *dev)
|
|||
if (INTEL_INFO(dev)->gen < 6)
|
||||
return false;
|
||||
|
||||
/* Until we get further testing... */
|
||||
if (IS_GEN8(dev)) {
|
||||
WARN_ON(!i915.preliminary_hw_support);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (i915.semaphores >= 0)
|
||||
return i915.semaphores;
|
||||
|
||||
/* Until we get further testing... */
|
||||
if (IS_GEN8(dev))
|
||||
return false;
|
||||
|
||||
#ifdef CONFIG_INTEL_IOMMU
|
||||
/* Enable semaphores on SNB when IO remapping is off */
|
||||
if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
|
||||
|
@ -434,7 +434,7 @@ static int i915_drm_freeze(struct drm_device *dev)
|
|||
/* We do a lot of poking in a lot of registers, make sure they work
|
||||
* properly. */
|
||||
hsw_disable_package_c8(dev_priv);
|
||||
intel_display_set_init_power(dev, true);
|
||||
intel_display_set_init_power(dev_priv, true);
|
||||
|
||||
drm_kms_helper_poll_disable(dev);
|
||||
|
||||
|
@ -477,6 +477,8 @@ static int i915_drm_freeze(struct drm_device *dev)
|
|||
intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED);
|
||||
console_unlock();
|
||||
|
||||
dev_priv->suspend_count++;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -556,7 +558,7 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
|
|||
mutex_unlock(&dev->struct_mutex);
|
||||
}
|
||||
|
||||
intel_power_domains_init_hw(dev);
|
||||
intel_power_domains_init_hw(dev_priv);
|
||||
|
||||
i915_restore_state(dev);
|
||||
intel_opregion_setup(dev);
|
||||
|
@ -847,6 +849,7 @@ static int i915_runtime_suspend(struct device *device)
|
|||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
WARN_ON(!HAS_RUNTIME_PM(dev));
|
||||
assert_force_wake_inactive(dev_priv);
|
||||
|
||||
DRM_DEBUG_KMS("Suspending device\n");
|
||||
|
||||
|
|
|
@ -79,7 +79,7 @@ enum plane {
|
|||
};
|
||||
#define plane_name(p) ((p) + 'A')
|
||||
|
||||
#define sprite_name(p, s) ((p) * INTEL_INFO(dev)->num_sprites + (s) + 'A')
|
||||
#define sprite_name(p, s) ((p) * INTEL_INFO(dev)->num_sprites[(p)] + (s) + 'A')
|
||||
|
||||
enum port {
|
||||
PORT_A = 0,
|
||||
|
@ -114,6 +114,17 @@ enum intel_display_power_domain {
|
|||
POWER_DOMAIN_TRANSCODER_B,
|
||||
POWER_DOMAIN_TRANSCODER_C,
|
||||
POWER_DOMAIN_TRANSCODER_EDP,
|
||||
POWER_DOMAIN_PORT_DDI_A_2_LANES,
|
||||
POWER_DOMAIN_PORT_DDI_A_4_LANES,
|
||||
POWER_DOMAIN_PORT_DDI_B_2_LANES,
|
||||
POWER_DOMAIN_PORT_DDI_B_4_LANES,
|
||||
POWER_DOMAIN_PORT_DDI_C_2_LANES,
|
||||
POWER_DOMAIN_PORT_DDI_C_4_LANES,
|
||||
POWER_DOMAIN_PORT_DDI_D_2_LANES,
|
||||
POWER_DOMAIN_PORT_DDI_D_4_LANES,
|
||||
POWER_DOMAIN_PORT_DSI,
|
||||
POWER_DOMAIN_PORT_CRT,
|
||||
POWER_DOMAIN_PORT_OTHER,
|
||||
POWER_DOMAIN_VGA,
|
||||
POWER_DOMAIN_AUDIO,
|
||||
POWER_DOMAIN_INIT,
|
||||
|
@ -121,8 +132,6 @@ enum intel_display_power_domain {
|
|||
POWER_DOMAIN_NUM,
|
||||
};
|
||||
|
||||
#define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
|
||||
|
||||
#define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
|
||||
#define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \
|
||||
((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER)
|
||||
|
@ -130,14 +139,6 @@ enum intel_display_power_domain {
|
|||
((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \
|
||||
(tran) + POWER_DOMAIN_TRANSCODER_A)
|
||||
|
||||
#define HSW_ALWAYS_ON_POWER_DOMAINS ( \
|
||||
BIT(POWER_DOMAIN_PIPE_A) | \
|
||||
BIT(POWER_DOMAIN_TRANSCODER_EDP))
|
||||
#define BDW_ALWAYS_ON_POWER_DOMAINS ( \
|
||||
BIT(POWER_DOMAIN_PIPE_A) | \
|
||||
BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
|
||||
BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER))
|
||||
|
||||
enum hpd_pin {
|
||||
HPD_NONE = 0,
|
||||
HPD_PORT_A = HPD_NONE, /* PORT_A is internal */
|
||||
|
@ -159,6 +160,7 @@ enum hpd_pin {
|
|||
I915_GEM_DOMAIN_VERTEX)
|
||||
|
||||
#define for_each_pipe(p) for ((p) = 0; (p) < INTEL_INFO(dev)->num_pipes; (p)++)
|
||||
#define for_each_sprite(p, s) for ((s) = 0; (s) < INTEL_INFO(dev)->num_sprites[(p)]; (s)++)
|
||||
|
||||
#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
|
||||
list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
|
||||
|
@ -303,6 +305,10 @@ struct drm_i915_error_state {
|
|||
struct kref ref;
|
||||
struct timeval time;
|
||||
|
||||
char error_msg[128];
|
||||
u32 reset_count;
|
||||
u32 suspend_count;
|
||||
|
||||
/* Generic register state */
|
||||
u32 eir;
|
||||
u32 pgtbl_er;
|
||||
|
@ -360,7 +366,7 @@ struct drm_i915_error_state {
|
|||
int page_count;
|
||||
u32 gtt_offset;
|
||||
u32 *pages[0];
|
||||
} *ringbuffer, *batchbuffer, *ctx, *hws_page;
|
||||
} *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page;
|
||||
|
||||
struct drm_i915_error_request {
|
||||
long jiffies;
|
||||
|
@ -375,6 +381,9 @@ struct drm_i915_error_state {
|
|||
u32 pp_dir_base;
|
||||
};
|
||||
} vm_info;
|
||||
|
||||
pid_t pid;
|
||||
char comm[TASK_COMM_LEN];
|
||||
} ring[I915_NUM_RINGS];
|
||||
struct drm_i915_error_buffer {
|
||||
u32 size;
|
||||
|
@ -499,7 +508,7 @@ struct intel_uncore {
|
|||
unsigned fw_rendercount;
|
||||
unsigned fw_mediacount;
|
||||
|
||||
struct delayed_work force_wake_work;
|
||||
struct timer_list force_wake_timer;
|
||||
};
|
||||
|
||||
#define DEV_INFO_FOR_EACH_FLAG(func, sep) \
|
||||
|
@ -534,7 +543,7 @@ struct intel_uncore {
|
|||
struct intel_device_info {
|
||||
u32 display_mmio_offset;
|
||||
u8 num_pipes:3;
|
||||
u8 num_sprites:2;
|
||||
u8 num_sprites[I915_MAX_PIPES];
|
||||
u8 gen;
|
||||
u8 ring_mask; /* Rings supported by the HW */
|
||||
DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON);
|
||||
|
@ -652,12 +661,12 @@ struct i915_address_space {
|
|||
enum i915_cache_level level,
|
||||
bool valid); /* Create a valid PTE */
|
||||
void (*clear_range)(struct i915_address_space *vm,
|
||||
unsigned int first_entry,
|
||||
unsigned int num_entries,
|
||||
uint64_t start,
|
||||
uint64_t length,
|
||||
bool use_scratch);
|
||||
void (*insert_entries)(struct i915_address_space *vm,
|
||||
struct sg_table *st,
|
||||
unsigned int first_entry,
|
||||
uint64_t start,
|
||||
enum i915_cache_level cache_level);
|
||||
void (*cleanup)(struct i915_address_space *vm);
|
||||
};
|
||||
|
@ -691,21 +700,21 @@ struct i915_gtt {
|
|||
};
|
||||
#define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT)
|
||||
|
||||
#define GEN8_LEGACY_PDPS 4
|
||||
struct i915_hw_ppgtt {
|
||||
struct i915_address_space base;
|
||||
struct kref ref;
|
||||
struct drm_mm_node node;
|
||||
unsigned num_pd_entries;
|
||||
unsigned num_pd_pages; /* gen8+ */
|
||||
union {
|
||||
struct page **pt_pages;
|
||||
struct page *gen8_pt_pages;
|
||||
struct page **gen8_pt_pages[GEN8_LEGACY_PDPS];
|
||||
};
|
||||
struct page *pd_pages;
|
||||
int num_pd_pages;
|
||||
int num_pt_pages;
|
||||
union {
|
||||
uint32_t pd_offset;
|
||||
dma_addr_t pd_dma_addr[4];
|
||||
dma_addr_t pd_dma_addr[GEN8_LEGACY_PDPS];
|
||||
};
|
||||
union {
|
||||
dma_addr_t *pt_dma_addr;
|
||||
|
@ -1016,6 +1025,36 @@ struct intel_ilk_power_mgmt {
|
|||
struct drm_i915_gem_object *renderctx;
|
||||
};
|
||||
|
||||
struct drm_i915_private;
|
||||
struct i915_power_well;
|
||||
|
||||
struct i915_power_well_ops {
|
||||
/*
|
||||
* Synchronize the well's hw state to match the current sw state, for
|
||||
* example enable/disable it based on the current refcount. Called
|
||||
* during driver init and resume time, possibly after first calling
|
||||
* the enable/disable handlers.
|
||||
*/
|
||||
void (*sync_hw)(struct drm_i915_private *dev_priv,
|
||||
struct i915_power_well *power_well);
|
||||
/*
|
||||
* Enable the well and resources that depend on it (for example
|
||||
* interrupts located on the well). Called after the 0->1 refcount
|
||||
* transition.
|
||||
*/
|
||||
void (*enable)(struct drm_i915_private *dev_priv,
|
||||
struct i915_power_well *power_well);
|
||||
/*
|
||||
* Disable the well and resources that depend on it. Called after
|
||||
* the 1->0 refcount transition.
|
||||
*/
|
||||
void (*disable)(struct drm_i915_private *dev_priv,
|
||||
struct i915_power_well *power_well);
|
||||
/* Returns the hw enabled state. */
|
||||
bool (*is_enabled)(struct drm_i915_private *dev_priv,
|
||||
struct i915_power_well *power_well);
|
||||
};
|
||||
|
||||
/* Power well structure for haswell */
|
||||
struct i915_power_well {
|
||||
const char *name;
|
||||
|
@ -1023,11 +1062,8 @@ struct i915_power_well {
|
|||
/* power well enable/disable usage count */
|
||||
int count;
|
||||
unsigned long domains;
|
||||
void *data;
|
||||
void (*set)(struct drm_device *dev, struct i915_power_well *power_well,
|
||||
bool enable);
|
||||
bool (*is_enabled)(struct drm_device *dev,
|
||||
struct i915_power_well *power_well);
|
||||
unsigned long data;
|
||||
const struct i915_power_well_ops *ops;
|
||||
};
|
||||
|
||||
struct i915_power_domains {
|
||||
|
@ -1124,6 +1160,14 @@ struct i915_gem_mm {
|
|||
*/
|
||||
bool interruptible;
|
||||
|
||||
/**
|
||||
* Is the GPU currently considered idle, or busy executing userspace
|
||||
* requests? Whilst idle, we attempt to power down the hardware and
|
||||
* display clocks. In order to reduce the effect on performance, there
|
||||
* is a slight delay before we do so.
|
||||
*/
|
||||
bool busy;
|
||||
|
||||
/** Bit 6 swizzling required for X tiling */
|
||||
uint32_t bit_6_swizzle_x;
|
||||
/** Bit 6 swizzling required for Y tiling */
|
||||
|
@ -1313,11 +1357,10 @@ struct ilk_wm_values {
|
|||
* Ideally every piece of our code that needs PC8+ disabled would call
|
||||
* hsw_disable_package_c8, which would increment disable_count and prevent the
|
||||
* system from reaching PC8+. But we don't have a symmetric way to do this for
|
||||
* everything, so we have the requirements_met and gpu_idle variables. When we
|
||||
* switch requirements_met or gpu_idle to true we decrease disable_count, and
|
||||
* increase it in the opposite case. The requirements_met variable is true when
|
||||
* all the CRTCs, encoders and the power well are disabled. The gpu_idle
|
||||
* variable is true when the GPU is idle.
|
||||
* everything, so we have the requirements_met variable. When we switch
|
||||
* requirements_met to true we decrease disable_count, and increase it in the
|
||||
* opposite case. The requirements_met variable is true when all the CRTCs,
|
||||
* encoders and the power well are disabled.
|
||||
*
|
||||
* In addition to everything, we only actually enable PC8+ if disable_count
|
||||
* stays at zero for at least some seconds. This is implemented with the
|
||||
|
@ -1340,7 +1383,6 @@ struct ilk_wm_values {
|
|||
*/
|
||||
struct i915_package_c8 {
|
||||
bool requirements_met;
|
||||
bool gpu_idle;
|
||||
bool irqs_disabled;
|
||||
/* Only true after the delayed work task actually enables it. */
|
||||
bool enabled;
|
||||
|
@ -1427,6 +1469,8 @@ typedef struct drm_i915_private {
|
|||
/* protects the irq masks */
|
||||
spinlock_t irq_lock;
|
||||
|
||||
bool display_irqs_enabled;
|
||||
|
||||
/* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
|
||||
struct pm_qos_request pm_qos;
|
||||
|
||||
|
@ -1594,6 +1638,8 @@ typedef struct drm_i915_private {
|
|||
struct i915_dri1_state dri1;
|
||||
/* Old ums support infrastructure, same warning applies. */
|
||||
struct i915_ums_state ums;
|
||||
|
||||
u32 suspend_count;
|
||||
} drm_i915_private_t;
|
||||
|
||||
static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
|
||||
|
@ -1745,7 +1791,6 @@ struct drm_i915_gem_object {
|
|||
/** for phy allocated objects */
|
||||
struct drm_i915_gem_phys_object *phys_obj;
|
||||
};
|
||||
#define to_gem_object(obj) (&((struct drm_i915_gem_object *)(obj))->base)
|
||||
|
||||
#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
|
||||
|
||||
|
@ -1791,6 +1836,7 @@ struct drm_i915_gem_request {
|
|||
|
||||
struct drm_i915_file_private {
|
||||
struct drm_i915_private *dev_priv;
|
||||
struct drm_file *file;
|
||||
|
||||
struct {
|
||||
spinlock_t lock;
|
||||
|
@ -1803,6 +1849,90 @@ struct drm_i915_file_private {
|
|||
atomic_t rps_wait_boost;
|
||||
};
|
||||
|
||||
/*
|
||||
* A command that requires special handling by the command parser.
|
||||
*/
|
||||
struct drm_i915_cmd_descriptor {
|
||||
/*
|
||||
* Flags describing how the command parser processes the command.
|
||||
*
|
||||
* CMD_DESC_FIXED: The command has a fixed length if this is set,
|
||||
* a length mask if not set
|
||||
* CMD_DESC_SKIP: The command is allowed but does not follow the
|
||||
* standard length encoding for the opcode range in
|
||||
* which it falls
|
||||
* CMD_DESC_REJECT: The command is never allowed
|
||||
* CMD_DESC_REGISTER: The command should be checked against the
|
||||
* register whitelist for the appropriate ring
|
||||
* CMD_DESC_MASTER: The command is allowed if the submitting process
|
||||
* is the DRM master
|
||||
*/
|
||||
u32 flags;
|
||||
#define CMD_DESC_FIXED (1<<0)
|
||||
#define CMD_DESC_SKIP (1<<1)
|
||||
#define CMD_DESC_REJECT (1<<2)
|
||||
#define CMD_DESC_REGISTER (1<<3)
|
||||
#define CMD_DESC_BITMASK (1<<4)
|
||||
#define CMD_DESC_MASTER (1<<5)
|
||||
|
||||
/*
|
||||
* The command's unique identification bits and the bitmask to get them.
|
||||
* This isn't strictly the opcode field as defined in the spec and may
|
||||
* also include type, subtype, and/or subop fields.
|
||||
*/
|
||||
struct {
|
||||
u32 value;
|
||||
u32 mask;
|
||||
} cmd;
|
||||
|
||||
/*
|
||||
* The command's length. The command is either fixed length (i.e. does
|
||||
* not include a length field) or has a length field mask. The flag
|
||||
* CMD_DESC_FIXED indicates a fixed length. Otherwise, the command has
|
||||
* a length mask. All command entries in a command table must include
|
||||
* length information.
|
||||
*/
|
||||
union {
|
||||
u32 fixed;
|
||||
u32 mask;
|
||||
} length;
|
||||
|
||||
/*
|
||||
* Describes where to find a register address in the command to check
|
||||
* against the ring's register whitelist. Only valid if flags has the
|
||||
* CMD_DESC_REGISTER bit set.
|
||||
*/
|
||||
struct {
|
||||
u32 offset;
|
||||
u32 mask;
|
||||
} reg;
|
||||
|
||||
#define MAX_CMD_DESC_BITMASKS 3
|
||||
/*
|
||||
* Describes command checks where a particular dword is masked and
|
||||
* compared against an expected value. If the command does not match
|
||||
* the expected value, the parser rejects it. Only valid if flags has
|
||||
* the CMD_DESC_BITMASK bit set. Only entries where mask is non-zero
|
||||
* are valid.
|
||||
*/
|
||||
struct {
|
||||
u32 offset;
|
||||
u32 mask;
|
||||
u32 expected;
|
||||
} bits[MAX_CMD_DESC_BITMASKS];
|
||||
};
|
||||
|
||||
/*
|
||||
* A table of commands requiring special handling by the command parser.
|
||||
*
|
||||
* Each ring has an array of tables. Each table consists of an array of command
|
||||
* descriptors, which must be sorted with command opcodes in ascending order.
|
||||
*/
|
||||
struct drm_i915_cmd_table {
|
||||
const struct drm_i915_cmd_descriptor *table;
|
||||
int count;
|
||||
};
|
||||
|
||||
#define INTEL_INFO(dev) (&to_i915(dev)->info)
|
||||
|
||||
#define IS_I830(dev) ((dev)->pdev->device == 0x3577)
|
||||
|
@ -1965,6 +2095,7 @@ struct i915_params {
|
|||
int enable_pc8;
|
||||
int pc8_timeout;
|
||||
int invert_brightness;
|
||||
int enable_cmd_parser;
|
||||
/* leave bools at the end to not create holes */
|
||||
bool enable_hangcheck;
|
||||
bool fastboot;
|
||||
|
@ -2004,7 +2135,9 @@ extern void intel_console_resume(struct work_struct *work);
|
|||
|
||||
/* i915_irq.c */
|
||||
void i915_queue_hangcheck(struct drm_device *dev);
|
||||
void i915_handle_error(struct drm_device *dev, bool wedged);
|
||||
__printf(3, 4)
|
||||
void i915_handle_error(struct drm_device *dev, bool wedged,
|
||||
const char *fmt, ...);
|
||||
|
||||
void gen6_set_pm_mask(struct drm_i915_private *dev_priv, u32 pm_iir,
|
||||
int new_delay);
|
||||
|
@ -2025,6 +2158,9 @@ void
|
|||
i915_disable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe,
|
||||
u32 status_mask);
|
||||
|
||||
void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv);
|
||||
void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv);
|
||||
|
||||
/* i915_gem.c */
|
||||
int i915_gem_init_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
|
@ -2097,6 +2233,9 @@ void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv);
|
|||
void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
|
||||
void i915_gem_lastclose(struct drm_device *dev);
|
||||
|
||||
int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
|
||||
int *needs_clflush);
|
||||
|
||||
int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
|
||||
static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
|
||||
{
|
||||
|
@ -2163,8 +2302,10 @@ i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
|
|||
}
|
||||
}
|
||||
|
||||
struct drm_i915_gem_request *
|
||||
i915_gem_find_active_request(struct intel_ring_buffer *ring);
|
||||
|
||||
bool i915_gem_retire_requests(struct drm_device *dev);
|
||||
void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
|
||||
int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
|
||||
bool interruptible);
|
||||
static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
|
||||
|
@ -2365,63 +2506,7 @@ static inline void i915_gem_chipset_flush(struct drm_device *dev)
|
|||
intel_gtt_chipset_flush();
|
||||
}
|
||||
int i915_gem_init_ppgtt(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt);
|
||||
static inline bool intel_enable_ppgtt(struct drm_device *dev, bool full)
|
||||
{
|
||||
if (i915.enable_ppgtt == 0 || !HAS_ALIASING_PPGTT(dev))
|
||||
return false;
|
||||
|
||||
if (i915.enable_ppgtt == 1 && full)
|
||||
return false;
|
||||
|
||||
#ifdef CONFIG_INTEL_IOMMU
|
||||
/* Disable ppgtt on SNB if VT-d is on. */
|
||||
if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) {
|
||||
DRM_INFO("Disabling PPGTT because VT-d is on\n");
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (full)
|
||||
return HAS_PPGTT(dev);
|
||||
else
|
||||
return HAS_ALIASING_PPGTT(dev);
|
||||
}
|
||||
|
||||
static inline void ppgtt_release(struct kref *kref)
|
||||
{
|
||||
struct i915_hw_ppgtt *ppgtt = container_of(kref, struct i915_hw_ppgtt, ref);
|
||||
struct drm_device *dev = ppgtt->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct i915_address_space *vm = &ppgtt->base;
|
||||
|
||||
if (ppgtt == dev_priv->mm.aliasing_ppgtt ||
|
||||
(list_empty(&vm->active_list) && list_empty(&vm->inactive_list))) {
|
||||
ppgtt->base.cleanup(&ppgtt->base);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Make sure vmas are unbound before we take down the drm_mm
|
||||
*
|
||||
* FIXME: Proper refcounting should take care of this, this shouldn't be
|
||||
* needed at all.
|
||||
*/
|
||||
if (!list_empty(&vm->active_list)) {
|
||||
struct i915_vma *vma;
|
||||
|
||||
list_for_each_entry(vma, &vm->active_list, mm_list)
|
||||
if (WARN_ON(list_empty(&vma->vma_link) ||
|
||||
list_is_singular(&vma->vma_link)))
|
||||
break;
|
||||
|
||||
i915_gem_evict_vm(&ppgtt->base, true);
|
||||
} else {
|
||||
i915_gem_retire_requests(dev);
|
||||
i915_gem_evict_vm(&ppgtt->base, false);
|
||||
}
|
||||
|
||||
ppgtt->base.cleanup(&ppgtt->base);
|
||||
}
|
||||
bool intel_enable_ppgtt(struct drm_device *dev, bool full);
|
||||
|
||||
/* i915_gem_stolen.c */
|
||||
int i915_gem_init_stolen(struct drm_device *dev);
|
||||
|
@ -2478,7 +2563,8 @@ static inline void i915_error_state_buf_release(
|
|||
{
|
||||
kfree(eb->buf);
|
||||
}
|
||||
void i915_capture_error_state(struct drm_device *dev);
|
||||
void i915_capture_error_state(struct drm_device *dev, bool wedge,
|
||||
const char *error_msg);
|
||||
void i915_error_state_get(struct drm_device *dev,
|
||||
struct i915_error_state_file_priv *error_priv);
|
||||
void i915_error_state_put(struct i915_error_state_file_priv *error_priv);
|
||||
|
@ -2487,6 +2573,14 @@ void i915_destroy_error_state(struct drm_device *dev);
|
|||
void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone);
|
||||
const char *i915_cache_level_str(int type);
|
||||
|
||||
/* i915_cmd_parser.c */
|
||||
void i915_cmd_parser_init_ring(struct intel_ring_buffer *ring);
|
||||
bool i915_needs_cmd_parser(struct intel_ring_buffer *ring);
|
||||
int i915_parse_cmds(struct intel_ring_buffer *ring,
|
||||
struct drm_i915_gem_object *batch_obj,
|
||||
u32 batch_start_offset,
|
||||
bool is_master);
|
||||
|
||||
/* i915_suspend.c */
|
||||
extern int i915_save_state(struct drm_device *dev);
|
||||
extern int i915_restore_state(struct drm_device *dev);
|
||||
|
@ -2565,6 +2659,7 @@ extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
|
|||
extern void intel_modeset_setup_hw_state(struct drm_device *dev,
|
||||
bool force_restore);
|
||||
extern void i915_redisable_vga(struct drm_device *dev);
|
||||
extern void i915_redisable_vga_power_on(struct drm_device *dev);
|
||||
extern bool intel_fbc_enabled(struct drm_device *dev);
|
||||
extern void intel_disable_fbc(struct drm_device *dev);
|
||||
extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
|
||||
|
@ -2599,6 +2694,7 @@ extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
|
|||
*/
|
||||
void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine);
|
||||
void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine);
|
||||
void assert_force_wake_inactive(struct drm_i915_private *dev_priv);
|
||||
|
||||
int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val);
|
||||
int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val);
|
||||
|
|
|
@ -61,6 +61,7 @@ static unsigned long i915_gem_inactive_scan(struct shrinker *shrinker,
|
|||
static unsigned long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
|
||||
static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
|
||||
static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
|
||||
static void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
|
||||
|
||||
static bool cpu_cache_is_coherent(struct drm_device *dev,
|
||||
enum i915_cache_level level)
|
||||
|
@ -326,6 +327,42 @@ __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Pins the specified object's pages and synchronizes the object with
|
||||
* GPU accesses. Sets needs_clflush to non-zero if the caller should
|
||||
* flush the object from the CPU cache.
|
||||
*/
|
||||
int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
|
||||
int *needs_clflush)
|
||||
{
|
||||
int ret;
|
||||
|
||||
*needs_clflush = 0;
|
||||
|
||||
if (!obj->base.filp)
|
||||
return -EINVAL;
|
||||
|
||||
if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
|
||||
/* If we're not in the cpu read domain, set ourself into the gtt
|
||||
* read domain and manually flush cachelines (if required). This
|
||||
* optimizes for the case when the gpu will dirty the data
|
||||
* anyway again before the next pread happens. */
|
||||
*needs_clflush = !cpu_cache_is_coherent(obj->base.dev,
|
||||
obj->cache_level);
|
||||
ret = i915_gem_object_wait_rendering(obj, true);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = i915_gem_object_get_pages(obj);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
i915_gem_object_pin_pages(obj);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Per-page copy function for the shmem pread fastpath.
|
||||
* Flushes invalid cachelines before reading the target if
|
||||
* needs_clflush is set. */
|
||||
|
@ -423,23 +460,10 @@ i915_gem_shmem_pread(struct drm_device *dev,
|
|||
|
||||
obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
|
||||
|
||||
if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
|
||||
/* If we're not in the cpu read domain, set ourself into the gtt
|
||||
* read domain and manually flush cachelines (if required). This
|
||||
* optimizes for the case when the gpu will dirty the data
|
||||
* anyway again before the next pread happens. */
|
||||
needs_clflush = !cpu_cache_is_coherent(dev, obj->cache_level);
|
||||
ret = i915_gem_object_wait_rendering(obj, true);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = i915_gem_object_get_pages(obj);
|
||||
ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
i915_gem_object_pin_pages(obj);
|
||||
|
||||
offset = args->offset;
|
||||
|
||||
for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
|
||||
|
@ -2148,7 +2172,6 @@ int __i915_add_request(struct intel_ring_buffer *ring,
|
|||
drm_i915_private_t *dev_priv = ring->dev->dev_private;
|
||||
struct drm_i915_gem_request *request;
|
||||
u32 request_ring_position, request_start;
|
||||
int was_empty;
|
||||
int ret;
|
||||
|
||||
request_start = intel_ring_get_tail(ring);
|
||||
|
@ -2199,7 +2222,6 @@ int __i915_add_request(struct intel_ring_buffer *ring,
|
|||
i915_gem_context_reference(request->ctx);
|
||||
|
||||
request->emitted_jiffies = jiffies;
|
||||
was_empty = list_empty(&ring->request_list);
|
||||
list_add_tail(&request->list, &ring->request_list);
|
||||
request->file_priv = NULL;
|
||||
|
||||
|
@ -2220,13 +2242,11 @@ int __i915_add_request(struct intel_ring_buffer *ring,
|
|||
if (!dev_priv->ums.mm_suspended) {
|
||||
i915_queue_hangcheck(ring->dev);
|
||||
|
||||
if (was_empty) {
|
||||
cancel_delayed_work_sync(&dev_priv->mm.idle_work);
|
||||
queue_delayed_work(dev_priv->wq,
|
||||
&dev_priv->mm.retire_work,
|
||||
round_jiffies_up_relative(HZ));
|
||||
intel_mark_busy(dev_priv->dev);
|
||||
}
|
||||
cancel_delayed_work_sync(&dev_priv->mm.idle_work);
|
||||
queue_delayed_work(dev_priv->wq,
|
||||
&dev_priv->mm.retire_work,
|
||||
round_jiffies_up_relative(HZ));
|
||||
intel_mark_busy(dev_priv->dev);
|
||||
}
|
||||
|
||||
if (out_seqno)
|
||||
|
@ -2259,14 +2279,13 @@ static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
|
|||
return true;
|
||||
|
||||
if (elapsed <= DRM_I915_CTX_BAN_PERIOD) {
|
||||
if (dev_priv->gpu_error.stop_rings == 0 &&
|
||||
i915_gem_context_is_default(ctx)) {
|
||||
DRM_ERROR("gpu hanging too fast, banning!\n");
|
||||
} else {
|
||||
if (!i915_gem_context_is_default(ctx)) {
|
||||
DRM_DEBUG("context hanging too fast, banning!\n");
|
||||
return true;
|
||||
} else if (dev_priv->gpu_error.stop_rings == 0) {
|
||||
DRM_ERROR("gpu hanging too fast, banning!\n");
|
||||
return true;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
|
@ -2303,11 +2322,13 @@ static void i915_gem_free_request(struct drm_i915_gem_request *request)
|
|||
kfree(request);
|
||||
}
|
||||
|
||||
static struct drm_i915_gem_request *
|
||||
i915_gem_find_first_non_complete(struct intel_ring_buffer *ring)
|
||||
struct drm_i915_gem_request *
|
||||
i915_gem_find_active_request(struct intel_ring_buffer *ring)
|
||||
{
|
||||
struct drm_i915_gem_request *request;
|
||||
const u32 completed_seqno = ring->get_seqno(ring, false);
|
||||
u32 completed_seqno;
|
||||
|
||||
completed_seqno = ring->get_seqno(ring, false);
|
||||
|
||||
list_for_each_entry(request, &ring->request_list, list) {
|
||||
if (i915_seqno_passed(completed_seqno, request->seqno))
|
||||
|
@ -2325,7 +2346,7 @@ static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
|
|||
struct drm_i915_gem_request *request;
|
||||
bool ring_hung;
|
||||
|
||||
request = i915_gem_find_first_non_complete(ring);
|
||||
request = i915_gem_find_active_request(ring);
|
||||
|
||||
if (request == NULL)
|
||||
return;
|
||||
|
@ -2417,7 +2438,7 @@ void i915_gem_reset(struct drm_device *dev)
|
|||
/**
|
||||
* This function clears the request list as sequence numbers are passed.
|
||||
*/
|
||||
void
|
||||
static void
|
||||
i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
|
||||
{
|
||||
uint32_t seqno;
|
||||
|
@ -2744,7 +2765,7 @@ int i915_vma_unbind(struct i915_vma *vma)
|
|||
|
||||
i915_gem_gtt_finish_object(obj);
|
||||
|
||||
list_del(&vma->mm_list);
|
||||
list_del_init(&vma->mm_list);
|
||||
/* Avoid an unnecessary call to unbind on rebind. */
|
||||
if (i915_is_ggtt(vma->vm))
|
||||
obj->map_and_fenceable = true;
|
||||
|
@ -4860,6 +4881,7 @@ int i915_gem_open(struct drm_device *dev, struct drm_file *file)
|
|||
|
||||
file->driver_priv = file_priv;
|
||||
file_priv->dev_priv = dev->dev_private;
|
||||
file_priv->file = file;
|
||||
|
||||
spin_lock_init(&file_priv->mm.lock);
|
||||
INIT_LIST_HEAD(&file_priv->mm.request_list);
|
||||
|
|
|
@ -99,6 +99,50 @@
|
|||
static int do_switch(struct intel_ring_buffer *ring,
|
||||
struct i915_hw_context *to);
|
||||
|
||||
static void do_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt)
|
||||
{
|
||||
struct drm_device *dev = ppgtt->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct i915_address_space *vm = &ppgtt->base;
|
||||
|
||||
if (ppgtt == dev_priv->mm.aliasing_ppgtt ||
|
||||
(list_empty(&vm->active_list) && list_empty(&vm->inactive_list))) {
|
||||
ppgtt->base.cleanup(&ppgtt->base);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Make sure vmas are unbound before we take down the drm_mm
|
||||
*
|
||||
* FIXME: Proper refcounting should take care of this, this shouldn't be
|
||||
* needed at all.
|
||||
*/
|
||||
if (!list_empty(&vm->active_list)) {
|
||||
struct i915_vma *vma;
|
||||
|
||||
list_for_each_entry(vma, &vm->active_list, mm_list)
|
||||
if (WARN_ON(list_empty(&vma->vma_link) ||
|
||||
list_is_singular(&vma->vma_link)))
|
||||
break;
|
||||
|
||||
i915_gem_evict_vm(&ppgtt->base, true);
|
||||
} else {
|
||||
i915_gem_retire_requests(dev);
|
||||
i915_gem_evict_vm(&ppgtt->base, false);
|
||||
}
|
||||
|
||||
ppgtt->base.cleanup(&ppgtt->base);
|
||||
}
|
||||
|
||||
static void ppgtt_release(struct kref *kref)
|
||||
{
|
||||
struct i915_hw_ppgtt *ppgtt =
|
||||
container_of(kref, struct i915_hw_ppgtt, ref);
|
||||
|
||||
do_ppgtt_cleanup(ppgtt);
|
||||
kfree(ppgtt);
|
||||
}
|
||||
|
||||
static size_t get_context_alignment(struct drm_device *dev)
|
||||
{
|
||||
if (IS_GEN6(dev))
|
||||
|
@ -714,7 +758,7 @@ unpin_out:
|
|||
* i915_switch_context() - perform a GPU context switch.
|
||||
* @ring: ring for which we'll execute the context switch
|
||||
* @file_priv: file_priv associated with the context, may be NULL
|
||||
* @id: context id number
|
||||
* @to: the context to switch to
|
||||
*
|
||||
* The context life cycle is simple. The context refcount is incremented and
|
||||
* decremented by 1 and create and destroy. If the context is in use by the GPU,
|
||||
|
@ -746,9 +790,6 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
|
|||
struct i915_hw_context *ctx;
|
||||
int ret;
|
||||
|
||||
if (!(dev->driver->driver_features & DRIVER_GEM))
|
||||
return -ENODEV;
|
||||
|
||||
if (!HAS_HW_CONTEXTS(dev))
|
||||
return -ENODEV;
|
||||
|
||||
|
@ -775,9 +816,6 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
|
|||
struct i915_hw_context *ctx;
|
||||
int ret;
|
||||
|
||||
if (!(dev->driver->driver_features & DRIVER_GEM))
|
||||
return -ENODEV;
|
||||
|
||||
if (args->ctx_id == DEFAULT_CONTEXT_ID)
|
||||
return -ENOENT;
|
||||
|
||||
|
|
|
@ -1182,6 +1182,24 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
|||
}
|
||||
batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
|
||||
|
||||
if (i915_needs_cmd_parser(ring)) {
|
||||
ret = i915_parse_cmds(ring,
|
||||
batch_obj,
|
||||
args->batch_start_offset,
|
||||
file->is_master);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
/*
|
||||
* XXX: Actually do this when enabling batch copy...
|
||||
*
|
||||
* Set the DISPATCH_SECURE bit to remove the NON_SECURE bit
|
||||
* from MI_BATCH_BUFFER_START commands issued in the
|
||||
* dispatch_execbuffer implementations. We specifically don't
|
||||
* want that set when the command parser is enabled.
|
||||
*/
|
||||
}
|
||||
|
||||
/* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
|
||||
* batch" bit. Hence we need to pin secure batches into the global gtt.
|
||||
* hsw should have this fixed, but bdw mucks it up again. */
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
/*
|
||||
* Copyright © 2010 Daniel Vetter
|
||||
* Copyright © 2011-2014 Intel Corporation
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
|
@ -29,6 +30,29 @@
|
|||
#include "i915_trace.h"
|
||||
#include "intel_drv.h"
|
||||
|
||||
bool intel_enable_ppgtt(struct drm_device *dev, bool full)
|
||||
{
|
||||
if (i915.enable_ppgtt == 0 || !HAS_ALIASING_PPGTT(dev))
|
||||
return false;
|
||||
|
||||
if (i915.enable_ppgtt == 1 && full)
|
||||
return false;
|
||||
|
||||
#ifdef CONFIG_INTEL_IOMMU
|
||||
/* Disable ppgtt on SNB if VT-d is on. */
|
||||
if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) {
|
||||
DRM_INFO("Disabling PPGTT because VT-d is on\n");
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Full ppgtt disabled by default for now due to issues. */
|
||||
if (full)
|
||||
return false; /* HAS_PPGTT(dev) */
|
||||
else
|
||||
return HAS_ALIASING_PPGTT(dev);
|
||||
}
|
||||
|
||||
#define GEN6_PPGTT_PD_ENTRIES 512
|
||||
#define I915_PPGTT_PT_ENTRIES (PAGE_SIZE / sizeof(gen6_gtt_pte_t))
|
||||
typedef uint64_t gen8_gtt_pte_t;
|
||||
|
@ -64,7 +88,19 @@ typedef gen8_gtt_pte_t gen8_ppgtt_pde_t;
|
|||
|
||||
#define GEN8_PTES_PER_PAGE (PAGE_SIZE / sizeof(gen8_gtt_pte_t))
|
||||
#define GEN8_PDES_PER_PAGE (PAGE_SIZE / sizeof(gen8_ppgtt_pde_t))
|
||||
#define GEN8_LEGACY_PDPS 4
|
||||
|
||||
/* GEN8 legacy style addressis defined as a 3 level page table:
|
||||
* 31:30 | 29:21 | 20:12 | 11:0
|
||||
* PDPE | PDE | PTE | offset
|
||||
* The difference as compared to normal x86 3 level page table is the PDPEs are
|
||||
* programmed via register.
|
||||
*/
|
||||
#define GEN8_PDPE_SHIFT 30
|
||||
#define GEN8_PDPE_MASK 0x3
|
||||
#define GEN8_PDE_SHIFT 21
|
||||
#define GEN8_PDE_MASK 0x1ff
|
||||
#define GEN8_PTE_SHIFT 12
|
||||
#define GEN8_PTE_MASK 0x1ff
|
||||
|
||||
#define PPAT_UNCACHED_INDEX (_PAGE_PWT | _PAGE_PCD)
|
||||
#define PPAT_CACHED_PDE_INDEX 0 /* WB LLC */
|
||||
|
@ -254,84 +290,113 @@ static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt,
|
|||
}
|
||||
|
||||
static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
|
||||
unsigned first_entry,
|
||||
unsigned num_entries,
|
||||
uint64_t start,
|
||||
uint64_t length,
|
||||
bool use_scratch)
|
||||
{
|
||||
struct i915_hw_ppgtt *ppgtt =
|
||||
container_of(vm, struct i915_hw_ppgtt, base);
|
||||
gen8_gtt_pte_t *pt_vaddr, scratch_pte;
|
||||
unsigned act_pt = first_entry / GEN8_PTES_PER_PAGE;
|
||||
unsigned first_pte = first_entry % GEN8_PTES_PER_PAGE;
|
||||
unsigned pdpe = start >> GEN8_PDPE_SHIFT & GEN8_PDPE_MASK;
|
||||
unsigned pde = start >> GEN8_PDE_SHIFT & GEN8_PDE_MASK;
|
||||
unsigned pte = start >> GEN8_PTE_SHIFT & GEN8_PTE_MASK;
|
||||
unsigned num_entries = length >> PAGE_SHIFT;
|
||||
unsigned last_pte, i;
|
||||
|
||||
scratch_pte = gen8_pte_encode(ppgtt->base.scratch.addr,
|
||||
I915_CACHE_LLC, use_scratch);
|
||||
|
||||
while (num_entries) {
|
||||
struct page *page_table = &ppgtt->gen8_pt_pages[act_pt];
|
||||
struct page *page_table = ppgtt->gen8_pt_pages[pdpe][pde];
|
||||
|
||||
last_pte = first_pte + num_entries;
|
||||
last_pte = pte + num_entries;
|
||||
if (last_pte > GEN8_PTES_PER_PAGE)
|
||||
last_pte = GEN8_PTES_PER_PAGE;
|
||||
|
||||
pt_vaddr = kmap_atomic(page_table);
|
||||
|
||||
for (i = first_pte; i < last_pte; i++)
|
||||
for (i = pte; i < last_pte; i++) {
|
||||
pt_vaddr[i] = scratch_pte;
|
||||
num_entries--;
|
||||
}
|
||||
|
||||
kunmap_atomic(pt_vaddr);
|
||||
|
||||
num_entries -= last_pte - first_pte;
|
||||
first_pte = 0;
|
||||
act_pt++;
|
||||
pte = 0;
|
||||
if (++pde == GEN8_PDES_PER_PAGE) {
|
||||
pdpe++;
|
||||
pde = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
|
||||
struct sg_table *pages,
|
||||
unsigned first_entry,
|
||||
uint64_t start,
|
||||
enum i915_cache_level cache_level)
|
||||
{
|
||||
struct i915_hw_ppgtt *ppgtt =
|
||||
container_of(vm, struct i915_hw_ppgtt, base);
|
||||
gen8_gtt_pte_t *pt_vaddr;
|
||||
unsigned act_pt = first_entry / GEN8_PTES_PER_PAGE;
|
||||
unsigned act_pte = first_entry % GEN8_PTES_PER_PAGE;
|
||||
unsigned pdpe = start >> GEN8_PDPE_SHIFT & GEN8_PDPE_MASK;
|
||||
unsigned pde = start >> GEN8_PDE_SHIFT & GEN8_PDE_MASK;
|
||||
unsigned pte = start >> GEN8_PTE_SHIFT & GEN8_PTE_MASK;
|
||||
struct sg_page_iter sg_iter;
|
||||
|
||||
pt_vaddr = NULL;
|
||||
for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
|
||||
if (pt_vaddr == NULL)
|
||||
pt_vaddr = kmap_atomic(&ppgtt->gen8_pt_pages[act_pt]);
|
||||
|
||||
pt_vaddr[act_pte] =
|
||||
for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
|
||||
if (WARN_ON(pdpe >= GEN8_LEGACY_PDPS))
|
||||
break;
|
||||
|
||||
if (pt_vaddr == NULL)
|
||||
pt_vaddr = kmap_atomic(ppgtt->gen8_pt_pages[pdpe][pde]);
|
||||
|
||||
pt_vaddr[pte] =
|
||||
gen8_pte_encode(sg_page_iter_dma_address(&sg_iter),
|
||||
cache_level, true);
|
||||
if (++act_pte == GEN8_PTES_PER_PAGE) {
|
||||
if (++pte == GEN8_PTES_PER_PAGE) {
|
||||
kunmap_atomic(pt_vaddr);
|
||||
pt_vaddr = NULL;
|
||||
act_pt++;
|
||||
act_pte = 0;
|
||||
if (++pde == GEN8_PDES_PER_PAGE) {
|
||||
pdpe++;
|
||||
pde = 0;
|
||||
}
|
||||
pte = 0;
|
||||
}
|
||||
}
|
||||
if (pt_vaddr)
|
||||
kunmap_atomic(pt_vaddr);
|
||||
}
|
||||
|
||||
static void gen8_ppgtt_free(struct i915_hw_ppgtt *ppgtt)
|
||||
static void gen8_free_page_tables(struct page **pt_pages)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ppgtt->num_pd_pages ; i++)
|
||||
kfree(ppgtt->gen8_pt_dma_addr[i]);
|
||||
if (pt_pages == NULL)
|
||||
return;
|
||||
|
||||
for (i = 0; i < GEN8_PDES_PER_PAGE; i++)
|
||||
if (pt_pages[i])
|
||||
__free_pages(pt_pages[i], 0);
|
||||
}
|
||||
|
||||
static void gen8_ppgtt_free(const struct i915_hw_ppgtt *ppgtt)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ppgtt->num_pd_pages; i++) {
|
||||
gen8_free_page_tables(ppgtt->gen8_pt_pages[i]);
|
||||
kfree(ppgtt->gen8_pt_pages[i]);
|
||||
kfree(ppgtt->gen8_pt_dma_addr[i]);
|
||||
}
|
||||
|
||||
__free_pages(ppgtt->gen8_pt_pages, get_order(ppgtt->num_pt_pages << PAGE_SHIFT));
|
||||
__free_pages(ppgtt->pd_pages, get_order(ppgtt->num_pd_pages << PAGE_SHIFT));
|
||||
}
|
||||
|
||||
static void gen8_ppgtt_unmap_pages(struct i915_hw_ppgtt *ppgtt)
|
||||
{
|
||||
struct pci_dev *hwdev = ppgtt->base.dev->pdev;
|
||||
int i, j;
|
||||
|
||||
for (i = 0; i < ppgtt->num_pd_pages; i++) {
|
||||
|
@ -340,18 +405,14 @@ static void gen8_ppgtt_unmap_pages(struct i915_hw_ppgtt *ppgtt)
|
|||
if (!ppgtt->pd_dma_addr[i])
|
||||
continue;
|
||||
|
||||
pci_unmap_page(ppgtt->base.dev->pdev,
|
||||
ppgtt->pd_dma_addr[i],
|
||||
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
|
||||
pci_unmap_page(hwdev, ppgtt->pd_dma_addr[i], PAGE_SIZE,
|
||||
PCI_DMA_BIDIRECTIONAL);
|
||||
|
||||
for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
|
||||
dma_addr_t addr = ppgtt->gen8_pt_dma_addr[i][j];
|
||||
if (addr)
|
||||
pci_unmap_page(ppgtt->base.dev->pdev,
|
||||
addr,
|
||||
PAGE_SIZE,
|
||||
PCI_DMA_BIDIRECTIONAL);
|
||||
|
||||
pci_unmap_page(hwdev, addr, PAGE_SIZE,
|
||||
PCI_DMA_BIDIRECTIONAL);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -368,88 +429,198 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
|
|||
gen8_ppgtt_free(ppgtt);
|
||||
}
|
||||
|
||||
/**
|
||||
* GEN8 legacy ppgtt programming is accomplished through 4 PDP registers with a
|
||||
* net effect resembling a 2-level page table in normal x86 terms. Each PDP
|
||||
* represents 1GB of memory
|
||||
* 4 * 512 * 512 * 4096 = 4GB legacy 32b address space.
|
||||
*
|
||||
* TODO: Do something with the size parameter
|
||||
**/
|
||||
static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
|
||||
static struct page **__gen8_alloc_page_tables(void)
|
||||
{
|
||||
struct page *pt_pages;
|
||||
int i, j, ret = -ENOMEM;
|
||||
const int max_pdp = DIV_ROUND_UP(size, 1 << 30);
|
||||
const int num_pt_pages = GEN8_PDES_PER_PAGE * max_pdp;
|
||||
struct page **pt_pages;
|
||||
int i;
|
||||
|
||||
if (size % (1<<30))
|
||||
DRM_INFO("Pages will be wasted unless GTT size (%llu) is divisible by 1GB\n", size);
|
||||
pt_pages = kcalloc(GEN8_PDES_PER_PAGE, sizeof(struct page *), GFP_KERNEL);
|
||||
if (!pt_pages)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
/* FIXME: split allocation into smaller pieces. For now we only ever do
|
||||
* this once, but with full PPGTT, the multiple contiguous allocations
|
||||
* will be bad.
|
||||
for (i = 0; i < GEN8_PDES_PER_PAGE; i++) {
|
||||
pt_pages[i] = alloc_page(GFP_KERNEL);
|
||||
if (!pt_pages[i])
|
||||
goto bail;
|
||||
}
|
||||
|
||||
return pt_pages;
|
||||
|
||||
bail:
|
||||
gen8_free_page_tables(pt_pages);
|
||||
kfree(pt_pages);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
static int gen8_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt,
|
||||
const int max_pdp)
|
||||
{
|
||||
struct page **pt_pages[GEN8_LEGACY_PDPS];
|
||||
int i, ret;
|
||||
|
||||
for (i = 0; i < max_pdp; i++) {
|
||||
pt_pages[i] = __gen8_alloc_page_tables();
|
||||
if (IS_ERR(pt_pages[i])) {
|
||||
ret = PTR_ERR(pt_pages[i]);
|
||||
goto unwind_out;
|
||||
}
|
||||
}
|
||||
|
||||
/* NB: Avoid touching gen8_pt_pages until last to keep the allocation,
|
||||
* "atomic" - for cleanup purposes.
|
||||
*/
|
||||
for (i = 0; i < max_pdp; i++)
|
||||
ppgtt->gen8_pt_pages[i] = pt_pages[i];
|
||||
|
||||
return 0;
|
||||
|
||||
unwind_out:
|
||||
while (i--) {
|
||||
gen8_free_page_tables(pt_pages[i]);
|
||||
kfree(pt_pages[i]);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int gen8_ppgtt_allocate_dma(struct i915_hw_ppgtt *ppgtt)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ppgtt->num_pd_pages; i++) {
|
||||
ppgtt->gen8_pt_dma_addr[i] = kcalloc(GEN8_PDES_PER_PAGE,
|
||||
sizeof(dma_addr_t),
|
||||
GFP_KERNEL);
|
||||
if (!ppgtt->gen8_pt_dma_addr[i])
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gen8_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt,
|
||||
const int max_pdp)
|
||||
{
|
||||
ppgtt->pd_pages = alloc_pages(GFP_KERNEL, get_order(max_pdp << PAGE_SHIFT));
|
||||
if (!ppgtt->pd_pages)
|
||||
return -ENOMEM;
|
||||
|
||||
pt_pages = alloc_pages(GFP_KERNEL, get_order(num_pt_pages << PAGE_SHIFT));
|
||||
if (!pt_pages) {
|
||||
__free_pages(ppgtt->pd_pages, get_order(max_pdp << PAGE_SHIFT));
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ppgtt->gen8_pt_pages = pt_pages;
|
||||
ppgtt->num_pd_pages = 1 << get_order(max_pdp << PAGE_SHIFT);
|
||||
ppgtt->num_pt_pages = 1 << get_order(num_pt_pages << PAGE_SHIFT);
|
||||
ppgtt->num_pd_entries = max_pdp * GEN8_PDES_PER_PAGE;
|
||||
ppgtt->enable = gen8_ppgtt_enable;
|
||||
ppgtt->switch_mm = gen8_mm_switch;
|
||||
ppgtt->base.clear_range = gen8_ppgtt_clear_range;
|
||||
ppgtt->base.insert_entries = gen8_ppgtt_insert_entries;
|
||||
ppgtt->base.cleanup = gen8_ppgtt_cleanup;
|
||||
ppgtt->base.start = 0;
|
||||
ppgtt->base.total = ppgtt->num_pt_pages * GEN8_PTES_PER_PAGE * PAGE_SIZE;
|
||||
|
||||
BUG_ON(ppgtt->num_pd_pages > GEN8_LEGACY_PDPS);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gen8_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt,
|
||||
const int max_pdp)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = gen8_ppgtt_allocate_page_directories(ppgtt, max_pdp);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = gen8_ppgtt_allocate_page_tables(ppgtt, max_pdp);
|
||||
if (ret) {
|
||||
__free_pages(ppgtt->pd_pages, get_order(max_pdp << PAGE_SHIFT));
|
||||
return ret;
|
||||
}
|
||||
|
||||
ppgtt->num_pd_entries = max_pdp * GEN8_PDES_PER_PAGE;
|
||||
|
||||
ret = gen8_ppgtt_allocate_dma(ppgtt);
|
||||
if (ret)
|
||||
gen8_ppgtt_free(ppgtt);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int gen8_ppgtt_setup_page_directories(struct i915_hw_ppgtt *ppgtt,
|
||||
const int pd)
|
||||
{
|
||||
dma_addr_t pd_addr;
|
||||
int ret;
|
||||
|
||||
pd_addr = pci_map_page(ppgtt->base.dev->pdev,
|
||||
&ppgtt->pd_pages[pd], 0,
|
||||
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
|
||||
|
||||
ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pd_addr);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ppgtt->pd_dma_addr[pd] = pd_addr;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gen8_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt,
|
||||
const int pd,
|
||||
const int pt)
|
||||
{
|
||||
dma_addr_t pt_addr;
|
||||
struct page *p;
|
||||
int ret;
|
||||
|
||||
p = ppgtt->gen8_pt_pages[pd][pt];
|
||||
pt_addr = pci_map_page(ppgtt->base.dev->pdev,
|
||||
p, 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
|
||||
ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pt_addr);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ppgtt->gen8_pt_dma_addr[pd][pt] = pt_addr;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
|
||||
* with a net effect resembling a 2-level page table in normal x86 terms. Each
|
||||
* PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address
|
||||
* space.
|
||||
*
|
||||
* FIXME: split allocation into smaller pieces. For now we only ever do this
|
||||
* once, but with full PPGTT, the multiple contiguous allocations will be bad.
|
||||
* TODO: Do something with the size parameter
|
||||
*/
|
||||
static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
|
||||
{
|
||||
const int max_pdp = DIV_ROUND_UP(size, 1 << 30);
|
||||
const int min_pt_pages = GEN8_PDES_PER_PAGE * max_pdp;
|
||||
int i, j, ret;
|
||||
|
||||
if (size % (1<<30))
|
||||
DRM_INFO("Pages will be wasted unless GTT size (%llu) is divisible by 1GB\n", size);
|
||||
|
||||
/* 1. Do all our allocations for page directories and page tables. */
|
||||
ret = gen8_ppgtt_alloc(ppgtt, max_pdp);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* - Create a mapping for the page directories.
|
||||
* - For each page directory:
|
||||
* allocate space for page table mappings.
|
||||
* map each page table
|
||||
* 2. Create DMA mappings for the page directories and page tables.
|
||||
*/
|
||||
for (i = 0; i < max_pdp; i++) {
|
||||
dma_addr_t temp;
|
||||
temp = pci_map_page(ppgtt->base.dev->pdev,
|
||||
&ppgtt->pd_pages[i], 0,
|
||||
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
|
||||
if (pci_dma_mapping_error(ppgtt->base.dev->pdev, temp))
|
||||
goto err_out;
|
||||
|
||||
ppgtt->pd_dma_addr[i] = temp;
|
||||
|
||||
ppgtt->gen8_pt_dma_addr[i] = kmalloc(sizeof(dma_addr_t) * GEN8_PDES_PER_PAGE, GFP_KERNEL);
|
||||
if (!ppgtt->gen8_pt_dma_addr[i])
|
||||
goto err_out;
|
||||
ret = gen8_ppgtt_setup_page_directories(ppgtt, i);
|
||||
if (ret)
|
||||
goto bail;
|
||||
|
||||
for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
|
||||
struct page *p = &pt_pages[i * GEN8_PDES_PER_PAGE + j];
|
||||
temp = pci_map_page(ppgtt->base.dev->pdev,
|
||||
p, 0, PAGE_SIZE,
|
||||
PCI_DMA_BIDIRECTIONAL);
|
||||
|
||||
if (pci_dma_mapping_error(ppgtt->base.dev->pdev, temp))
|
||||
goto err_out;
|
||||
|
||||
ppgtt->gen8_pt_dma_addr[i][j] = temp;
|
||||
ret = gen8_ppgtt_setup_page_tables(ppgtt, i, j);
|
||||
if (ret)
|
||||
goto bail;
|
||||
}
|
||||
}
|
||||
|
||||
/* For now, the PPGTT helper functions all require that the PDEs are
|
||||
/*
|
||||
* 3. Map all the page directory entires to point to the page tables
|
||||
* we've allocated.
|
||||
*
|
||||
* For now, the PPGTT helper functions all require that the PDEs are
|
||||
* plugged in correctly. So we do that now/here. For aliasing PPGTT, we
|
||||
* will never need to touch the PDEs again */
|
||||
* will never need to touch the PDEs again.
|
||||
*/
|
||||
for (i = 0; i < max_pdp; i++) {
|
||||
gen8_ppgtt_pde_t *pd_vaddr;
|
||||
pd_vaddr = kmap_atomic(&ppgtt->pd_pages[i]);
|
||||
|
@ -461,20 +632,26 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
|
|||
kunmap_atomic(pd_vaddr);
|
||||
}
|
||||
|
||||
ppgtt->base.clear_range(&ppgtt->base, 0,
|
||||
ppgtt->num_pd_entries * GEN8_PTES_PER_PAGE,
|
||||
true);
|
||||
ppgtt->enable = gen8_ppgtt_enable;
|
||||
ppgtt->switch_mm = gen8_mm_switch;
|
||||
ppgtt->base.clear_range = gen8_ppgtt_clear_range;
|
||||
ppgtt->base.insert_entries = gen8_ppgtt_insert_entries;
|
||||
ppgtt->base.cleanup = gen8_ppgtt_cleanup;
|
||||
ppgtt->base.start = 0;
|
||||
ppgtt->base.total = ppgtt->num_pd_entries * GEN8_PTES_PER_PAGE * PAGE_SIZE;
|
||||
|
||||
ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true);
|
||||
|
||||
DRM_DEBUG_DRIVER("Allocated %d pages for page directories (%d wasted)\n",
|
||||
ppgtt->num_pd_pages, ppgtt->num_pd_pages - max_pdp);
|
||||
DRM_DEBUG_DRIVER("Allocated %d pages for page tables (%lld wasted)\n",
|
||||
ppgtt->num_pt_pages,
|
||||
(ppgtt->num_pt_pages - num_pt_pages) +
|
||||
size % (1<<30));
|
||||
ppgtt->num_pd_entries,
|
||||
(ppgtt->num_pd_entries - min_pt_pages) + size % (1<<30));
|
||||
return 0;
|
||||
|
||||
err_out:
|
||||
ppgtt->base.cleanup(&ppgtt->base);
|
||||
bail:
|
||||
gen8_ppgtt_unmap_pages(ppgtt);
|
||||
gen8_ppgtt_free(ppgtt);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -776,13 +953,15 @@ static int gen6_ppgtt_enable(struct i915_hw_ppgtt *ppgtt)
|
|||
|
||||
/* PPGTT support for Sandybdrige/Gen6 and later */
|
||||
static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
|
||||
unsigned first_entry,
|
||||
unsigned num_entries,
|
||||
uint64_t start,
|
||||
uint64_t length,
|
||||
bool use_scratch)
|
||||
{
|
||||
struct i915_hw_ppgtt *ppgtt =
|
||||
container_of(vm, struct i915_hw_ppgtt, base);
|
||||
gen6_gtt_pte_t *pt_vaddr, scratch_pte;
|
||||
unsigned first_entry = start >> PAGE_SHIFT;
|
||||
unsigned num_entries = length >> PAGE_SHIFT;
|
||||
unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
|
||||
unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
|
||||
unsigned last_pte, i;
|
||||
|
@ -809,12 +988,13 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
|
|||
|
||||
static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
|
||||
struct sg_table *pages,
|
||||
unsigned first_entry,
|
||||
uint64_t start,
|
||||
enum i915_cache_level cache_level)
|
||||
{
|
||||
struct i915_hw_ppgtt *ppgtt =
|
||||
container_of(vm, struct i915_hw_ppgtt, base);
|
||||
gen6_gtt_pte_t *pt_vaddr;
|
||||
unsigned first_entry = start >> PAGE_SHIFT;
|
||||
unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
|
||||
unsigned act_pte = first_entry % I915_PPGTT_PT_ENTRIES;
|
||||
struct sg_page_iter sg_iter;
|
||||
|
@ -838,38 +1018,49 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
|
|||
kunmap_atomic(pt_vaddr);
|
||||
}
|
||||
|
||||
static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
|
||||
static void gen6_ppgtt_unmap_pages(struct i915_hw_ppgtt *ppgtt)
|
||||
{
|
||||
struct i915_hw_ppgtt *ppgtt =
|
||||
container_of(vm, struct i915_hw_ppgtt, base);
|
||||
int i;
|
||||
|
||||
list_del(&vm->global_link);
|
||||
drm_mm_takedown(&ppgtt->base.mm);
|
||||
drm_mm_remove_node(&ppgtt->node);
|
||||
|
||||
if (ppgtt->pt_dma_addr) {
|
||||
for (i = 0; i < ppgtt->num_pd_entries; i++)
|
||||
pci_unmap_page(ppgtt->base.dev->pdev,
|
||||
ppgtt->pt_dma_addr[i],
|
||||
4096, PCI_DMA_BIDIRECTIONAL);
|
||||
}
|
||||
}
|
||||
|
||||
static void gen6_ppgtt_free(struct i915_hw_ppgtt *ppgtt)
|
||||
{
|
||||
int i;
|
||||
|
||||
kfree(ppgtt->pt_dma_addr);
|
||||
for (i = 0; i < ppgtt->num_pd_entries; i++)
|
||||
__free_page(ppgtt->pt_pages[i]);
|
||||
kfree(ppgtt->pt_pages);
|
||||
kfree(ppgtt);
|
||||
}
|
||||
|
||||
static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
|
||||
static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
|
||||
{
|
||||
struct i915_hw_ppgtt *ppgtt =
|
||||
container_of(vm, struct i915_hw_ppgtt, base);
|
||||
|
||||
list_del(&vm->global_link);
|
||||
drm_mm_takedown(&ppgtt->base.mm);
|
||||
drm_mm_remove_node(&ppgtt->node);
|
||||
|
||||
gen6_ppgtt_unmap_pages(ppgtt);
|
||||
gen6_ppgtt_free(ppgtt);
|
||||
}
|
||||
|
||||
static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
|
||||
{
|
||||
#define GEN6_PD_ALIGN (PAGE_SIZE * 16)
|
||||
#define GEN6_PD_SIZE (GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE)
|
||||
struct drm_device *dev = ppgtt->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
bool retried = false;
|
||||
int i, ret;
|
||||
int ret;
|
||||
|
||||
/* PPGTT PDEs reside in the GGTT and consists of 512 entries. The
|
||||
* allocator works in address space sizes, so it's multiplied by page
|
||||
|
@ -896,8 +1087,85 @@ alloc:
|
|||
if (ppgtt->node.start < dev_priv->gtt.mappable_end)
|
||||
DRM_DEBUG("Forced to use aperture for PDEs\n");
|
||||
|
||||
ppgtt->base.pte_encode = dev_priv->gtt.base.pte_encode;
|
||||
ppgtt->num_pd_entries = GEN6_PPGTT_PD_ENTRIES;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int gen6_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt)
|
||||
{
|
||||
int i;
|
||||
|
||||
ppgtt->pt_pages = kcalloc(ppgtt->num_pd_entries, sizeof(struct page *),
|
||||
GFP_KERNEL);
|
||||
|
||||
if (!ppgtt->pt_pages)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < ppgtt->num_pd_entries; i++) {
|
||||
ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL);
|
||||
if (!ppgtt->pt_pages[i]) {
|
||||
gen6_ppgtt_free(ppgtt);
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gen6_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = gen6_ppgtt_allocate_page_directories(ppgtt);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = gen6_ppgtt_allocate_page_tables(ppgtt);
|
||||
if (ret) {
|
||||
drm_mm_remove_node(&ppgtt->node);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ppgtt->pt_dma_addr = kcalloc(ppgtt->num_pd_entries, sizeof(dma_addr_t),
|
||||
GFP_KERNEL);
|
||||
if (!ppgtt->pt_dma_addr) {
|
||||
drm_mm_remove_node(&ppgtt->node);
|
||||
gen6_ppgtt_free(ppgtt);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gen6_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt)
|
||||
{
|
||||
struct drm_device *dev = ppgtt->base.dev;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ppgtt->num_pd_entries; i++) {
|
||||
dma_addr_t pt_addr;
|
||||
|
||||
pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i], 0, 4096,
|
||||
PCI_DMA_BIDIRECTIONAL);
|
||||
|
||||
if (pci_dma_mapping_error(dev->pdev, pt_addr)) {
|
||||
gen6_ppgtt_unmap_pages(ppgtt);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
ppgtt->pt_dma_addr[i] = pt_addr;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
|
||||
{
|
||||
struct drm_device *dev = ppgtt->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int ret;
|
||||
|
||||
ppgtt->base.pte_encode = dev_priv->gtt.base.pte_encode;
|
||||
if (IS_GEN6(dev)) {
|
||||
ppgtt->enable = gen6_ppgtt_enable;
|
||||
ppgtt->switch_mm = gen6_mm_switch;
|
||||
|
@ -909,72 +1177,35 @@ alloc:
|
|||
ppgtt->switch_mm = gen7_mm_switch;
|
||||
} else
|
||||
BUG();
|
||||
|
||||
ret = gen6_ppgtt_alloc(ppgtt);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = gen6_ppgtt_setup_page_tables(ppgtt);
|
||||
if (ret) {
|
||||
gen6_ppgtt_free(ppgtt);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ppgtt->base.clear_range = gen6_ppgtt_clear_range;
|
||||
ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
|
||||
ppgtt->base.cleanup = gen6_ppgtt_cleanup;
|
||||
ppgtt->base.scratch = dev_priv->gtt.base.scratch;
|
||||
ppgtt->base.start = 0;
|
||||
ppgtt->base.total = GEN6_PPGTT_PD_ENTRIES * I915_PPGTT_PT_ENTRIES * PAGE_SIZE;
|
||||
ppgtt->pt_pages = kcalloc(ppgtt->num_pd_entries, sizeof(struct page *),
|
||||
GFP_KERNEL);
|
||||
if (!ppgtt->pt_pages) {
|
||||
drm_mm_remove_node(&ppgtt->node);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
for (i = 0; i < ppgtt->num_pd_entries; i++) {
|
||||
ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL);
|
||||
if (!ppgtt->pt_pages[i])
|
||||
goto err_pt_alloc;
|
||||
}
|
||||
|
||||
ppgtt->pt_dma_addr = kcalloc(ppgtt->num_pd_entries, sizeof(dma_addr_t),
|
||||
GFP_KERNEL);
|
||||
if (!ppgtt->pt_dma_addr)
|
||||
goto err_pt_alloc;
|
||||
|
||||
for (i = 0; i < ppgtt->num_pd_entries; i++) {
|
||||
dma_addr_t pt_addr;
|
||||
|
||||
pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i], 0, 4096,
|
||||
PCI_DMA_BIDIRECTIONAL);
|
||||
|
||||
if (pci_dma_mapping_error(dev->pdev, pt_addr)) {
|
||||
ret = -EIO;
|
||||
goto err_pd_pin;
|
||||
|
||||
}
|
||||
ppgtt->pt_dma_addr[i] = pt_addr;
|
||||
}
|
||||
|
||||
ppgtt->base.clear_range(&ppgtt->base, 0,
|
||||
ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES, true);
|
||||
ppgtt->debug_dump = gen6_dump_ppgtt;
|
||||
|
||||
ppgtt->pd_offset =
|
||||
ppgtt->node.start / PAGE_SIZE * sizeof(gen6_gtt_pte_t);
|
||||
|
||||
ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true);
|
||||
|
||||
DRM_DEBUG_DRIVER("Allocated pde space (%ldM) at GTT entry: %lx\n",
|
||||
ppgtt->node.size >> 20,
|
||||
ppgtt->node.start / PAGE_SIZE);
|
||||
ppgtt->pd_offset =
|
||||
ppgtt->node.start / PAGE_SIZE * sizeof(gen6_gtt_pte_t);
|
||||
|
||||
return 0;
|
||||
|
||||
err_pd_pin:
|
||||
if (ppgtt->pt_dma_addr) {
|
||||
for (i--; i >= 0; i--)
|
||||
pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i],
|
||||
4096, PCI_DMA_BIDIRECTIONAL);
|
||||
}
|
||||
err_pt_alloc:
|
||||
kfree(ppgtt->pt_dma_addr);
|
||||
for (i = 0; i < ppgtt->num_pd_entries; i++) {
|
||||
if (ppgtt->pt_pages[i])
|
||||
__free_page(ppgtt->pt_pages[i]);
|
||||
}
|
||||
kfree(ppgtt->pt_pages);
|
||||
drm_mm_remove_node(&ppgtt->node);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int i915_gem_init_ppgtt(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
|
||||
|
@ -1012,20 +1243,17 @@ ppgtt_bind_vma(struct i915_vma *vma,
|
|||
enum i915_cache_level cache_level,
|
||||
u32 flags)
|
||||
{
|
||||
const unsigned long entry = vma->node.start >> PAGE_SHIFT;
|
||||
|
||||
WARN_ON(flags);
|
||||
|
||||
vma->vm->insert_entries(vma->vm, vma->obj->pages, entry, cache_level);
|
||||
vma->vm->insert_entries(vma->vm, vma->obj->pages, vma->node.start,
|
||||
cache_level);
|
||||
}
|
||||
|
||||
static void ppgtt_unbind_vma(struct i915_vma *vma)
|
||||
{
|
||||
const unsigned long entry = vma->node.start >> PAGE_SHIFT;
|
||||
|
||||
vma->vm->clear_range(vma->vm,
|
||||
entry,
|
||||
vma->obj->base.size >> PAGE_SHIFT,
|
||||
vma->node.start,
|
||||
vma->obj->base.size,
|
||||
true);
|
||||
}
|
||||
|
||||
|
@ -1109,8 +1337,8 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
|
|||
i915_check_and_clear_faults(dev);
|
||||
|
||||
dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
|
||||
dev_priv->gtt.base.start / PAGE_SIZE,
|
||||
dev_priv->gtt.base.total / PAGE_SIZE,
|
||||
dev_priv->gtt.base.start,
|
||||
dev_priv->gtt.base.total,
|
||||
false);
|
||||
}
|
||||
|
||||
|
@ -1124,8 +1352,8 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
|
|||
|
||||
/* First fill our portion of the GTT with scratch pages */
|
||||
dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
|
||||
dev_priv->gtt.base.start / PAGE_SIZE,
|
||||
dev_priv->gtt.base.total / PAGE_SIZE,
|
||||
dev_priv->gtt.base.start,
|
||||
dev_priv->gtt.base.total,
|
||||
true);
|
||||
|
||||
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
|
||||
|
@ -1186,10 +1414,11 @@ static inline void gen8_set_pte(void __iomem *addr, gen8_gtt_pte_t pte)
|
|||
|
||||
static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
|
||||
struct sg_table *st,
|
||||
unsigned int first_entry,
|
||||
uint64_t start,
|
||||
enum i915_cache_level level)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = vm->dev->dev_private;
|
||||
unsigned first_entry = start >> PAGE_SHIFT;
|
||||
gen8_gtt_pte_t __iomem *gtt_entries =
|
||||
(gen8_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
|
||||
int i = 0;
|
||||
|
@ -1231,10 +1460,11 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
|
|||
*/
|
||||
static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
|
||||
struct sg_table *st,
|
||||
unsigned int first_entry,
|
||||
uint64_t start,
|
||||
enum i915_cache_level level)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = vm->dev->dev_private;
|
||||
unsigned first_entry = start >> PAGE_SHIFT;
|
||||
gen6_gtt_pte_t __iomem *gtt_entries =
|
||||
(gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
|
||||
int i = 0;
|
||||
|
@ -1266,11 +1496,13 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
|
|||
}
|
||||
|
||||
static void gen8_ggtt_clear_range(struct i915_address_space *vm,
|
||||
unsigned int first_entry,
|
||||
unsigned int num_entries,
|
||||
uint64_t start,
|
||||
uint64_t length,
|
||||
bool use_scratch)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = vm->dev->dev_private;
|
||||
unsigned first_entry = start >> PAGE_SHIFT;
|
||||
unsigned num_entries = length >> PAGE_SHIFT;
|
||||
gen8_gtt_pte_t scratch_pte, __iomem *gtt_base =
|
||||
(gen8_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
|
||||
const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
|
||||
|
@ -1290,11 +1522,13 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
|
|||
}
|
||||
|
||||
static void gen6_ggtt_clear_range(struct i915_address_space *vm,
|
||||
unsigned int first_entry,
|
||||
unsigned int num_entries,
|
||||
uint64_t start,
|
||||
uint64_t length,
|
||||
bool use_scratch)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = vm->dev->dev_private;
|
||||
unsigned first_entry = start >> PAGE_SHIFT;
|
||||
unsigned num_entries = length >> PAGE_SHIFT;
|
||||
gen6_gtt_pte_t scratch_pte, __iomem *gtt_base =
|
||||
(gen6_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
|
||||
const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
|
||||
|
@ -1327,10 +1561,12 @@ static void i915_ggtt_bind_vma(struct i915_vma *vma,
|
|||
}
|
||||
|
||||
static void i915_ggtt_clear_range(struct i915_address_space *vm,
|
||||
unsigned int first_entry,
|
||||
unsigned int num_entries,
|
||||
uint64_t start,
|
||||
uint64_t length,
|
||||
bool unused)
|
||||
{
|
||||
unsigned first_entry = start >> PAGE_SHIFT;
|
||||
unsigned num_entries = length >> PAGE_SHIFT;
|
||||
intel_gtt_clear_range(first_entry, num_entries);
|
||||
}
|
||||
|
||||
|
@ -1351,7 +1587,6 @@ static void ggtt_bind_vma(struct i915_vma *vma,
|
|||
struct drm_device *dev = vma->vm->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_gem_object *obj = vma->obj;
|
||||
const unsigned long entry = vma->node.start >> PAGE_SHIFT;
|
||||
|
||||
/* If there is no aliasing PPGTT, or the caller needs a global mapping,
|
||||
* or we have a global mapping already but the cacheability flags have
|
||||
|
@ -1367,7 +1602,8 @@ static void ggtt_bind_vma(struct i915_vma *vma,
|
|||
if (!dev_priv->mm.aliasing_ppgtt || flags & GLOBAL_BIND) {
|
||||
if (!obj->has_global_gtt_mapping ||
|
||||
(cache_level != obj->cache_level)) {
|
||||
vma->vm->insert_entries(vma->vm, obj->pages, entry,
|
||||
vma->vm->insert_entries(vma->vm, obj->pages,
|
||||
vma->node.start,
|
||||
cache_level);
|
||||
obj->has_global_gtt_mapping = 1;
|
||||
}
|
||||
|
@ -1378,7 +1614,9 @@ static void ggtt_bind_vma(struct i915_vma *vma,
|
|||
(cache_level != obj->cache_level))) {
|
||||
struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
|
||||
appgtt->base.insert_entries(&appgtt->base,
|
||||
vma->obj->pages, entry, cache_level);
|
||||
vma->obj->pages,
|
||||
vma->node.start,
|
||||
cache_level);
|
||||
vma->obj->has_aliasing_ppgtt_mapping = 1;
|
||||
}
|
||||
}
|
||||
|
@ -1388,11 +1626,11 @@ static void ggtt_unbind_vma(struct i915_vma *vma)
|
|||
struct drm_device *dev = vma->vm->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_gem_object *obj = vma->obj;
|
||||
const unsigned long entry = vma->node.start >> PAGE_SHIFT;
|
||||
|
||||
if (obj->has_global_gtt_mapping) {
|
||||
vma->vm->clear_range(vma->vm, entry,
|
||||
vma->obj->base.size >> PAGE_SHIFT,
|
||||
vma->vm->clear_range(vma->vm,
|
||||
vma->node.start,
|
||||
obj->base.size,
|
||||
true);
|
||||
obj->has_global_gtt_mapping = 0;
|
||||
}
|
||||
|
@ -1400,8 +1638,8 @@ static void ggtt_unbind_vma(struct i915_vma *vma)
|
|||
if (obj->has_aliasing_ppgtt_mapping) {
|
||||
struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
|
||||
appgtt->base.clear_range(&appgtt->base,
|
||||
entry,
|
||||
obj->base.size >> PAGE_SHIFT,
|
||||
vma->node.start,
|
||||
obj->base.size,
|
||||
true);
|
||||
obj->has_aliasing_ppgtt_mapping = 0;
|
||||
}
|
||||
|
@ -1486,14 +1724,14 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
|
|||
|
||||
/* Clear any non-preallocated blocks */
|
||||
drm_mm_for_each_hole(entry, &ggtt_vm->mm, hole_start, hole_end) {
|
||||
const unsigned long count = (hole_end - hole_start) / PAGE_SIZE;
|
||||
DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
|
||||
hole_start, hole_end);
|
||||
ggtt_vm->clear_range(ggtt_vm, hole_start / PAGE_SIZE, count, true);
|
||||
ggtt_vm->clear_range(ggtt_vm, hole_start,
|
||||
hole_end - hole_start, true);
|
||||
}
|
||||
|
||||
/* And finally clear the reserved guard page */
|
||||
ggtt_vm->clear_range(ggtt_vm, end / PAGE_SIZE - 1, 1, true);
|
||||
ggtt_vm->clear_range(ggtt_vm, end - PAGE_SIZE, PAGE_SIZE, true);
|
||||
}
|
||||
|
||||
void i915_gem_init_global_gtt(struct drm_device *dev)
|
||||
|
@ -1558,11 +1796,6 @@ static inline unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
|
|||
bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
|
||||
if (bdw_gmch_ctl)
|
||||
bdw_gmch_ctl = 1 << bdw_gmch_ctl;
|
||||
if (bdw_gmch_ctl > 4) {
|
||||
WARN_ON(!i915.preliminary_hw_support);
|
||||
return 4<<20;
|
||||
}
|
||||
|
||||
return bdw_gmch_ctl << 20;
|
||||
}
|
||||
|
||||
|
|
|
@ -304,22 +304,54 @@ void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
|
|||
va_end(args);
|
||||
}
|
||||
|
||||
static void print_error_obj(struct drm_i915_error_state_buf *m,
|
||||
struct drm_i915_error_object *obj)
|
||||
{
|
||||
int page, offset, elt;
|
||||
|
||||
for (page = offset = 0; page < obj->page_count; page++) {
|
||||
for (elt = 0; elt < PAGE_SIZE/4; elt++) {
|
||||
err_printf(m, "%08x : %08x\n", offset,
|
||||
obj->pages[page][elt]);
|
||||
offset += 4;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
|
||||
const struct i915_error_state_file_priv *error_priv)
|
||||
{
|
||||
struct drm_device *dev = error_priv->dev;
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
struct drm_i915_error_state *error = error_priv->error;
|
||||
int i, j, page, offset, elt;
|
||||
int i, j, offset, elt;
|
||||
int max_hangcheck_score;
|
||||
|
||||
if (!error) {
|
||||
err_printf(m, "no error state collected\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
err_printf(m, "%s\n", error->error_msg);
|
||||
err_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
|
||||
error->time.tv_usec);
|
||||
err_printf(m, "Kernel: " UTS_RELEASE "\n");
|
||||
max_hangcheck_score = 0;
|
||||
for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
|
||||
if (error->ring[i].hangcheck_score > max_hangcheck_score)
|
||||
max_hangcheck_score = error->ring[i].hangcheck_score;
|
||||
}
|
||||
for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
|
||||
if (error->ring[i].hangcheck_score == max_hangcheck_score &&
|
||||
error->ring[i].pid != -1) {
|
||||
err_printf(m, "Active process (on ring %s): %s [%d]\n",
|
||||
ring_str(i),
|
||||
error->ring[i].comm,
|
||||
error->ring[i].pid);
|
||||
}
|
||||
}
|
||||
err_printf(m, "Reset count: %u\n", error->reset_count);
|
||||
err_printf(m, "Suspend count: %u\n", error->suspend_count);
|
||||
err_printf(m, "PCI ID: 0x%04x\n", dev->pdev->device);
|
||||
err_printf(m, "EIR: 0x%08x\n", error->eir);
|
||||
err_printf(m, "IER: 0x%08x\n", error->ier);
|
||||
|
@ -362,18 +394,23 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
|
|||
for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
|
||||
struct drm_i915_error_object *obj;
|
||||
|
||||
if ((obj = error->ring[i].batchbuffer)) {
|
||||
err_printf(m, "%s --- gtt_offset = 0x%08x\n",
|
||||
dev_priv->ring[i].name,
|
||||
obj = error->ring[i].batchbuffer;
|
||||
if (obj) {
|
||||
err_puts(m, dev_priv->ring[i].name);
|
||||
if (error->ring[i].pid != -1)
|
||||
err_printf(m, " (submitted by %s [%d])",
|
||||
error->ring[i].comm,
|
||||
error->ring[i].pid);
|
||||
err_printf(m, " --- gtt_offset = 0x%08x\n",
|
||||
obj->gtt_offset);
|
||||
offset = 0;
|
||||
for (page = 0; page < obj->page_count; page++) {
|
||||
for (elt = 0; elt < PAGE_SIZE/4; elt++) {
|
||||
err_printf(m, "%08x : %08x\n", offset,
|
||||
obj->pages[page][elt]);
|
||||
offset += 4;
|
||||
}
|
||||
}
|
||||
print_error_obj(m, obj);
|
||||
}
|
||||
|
||||
obj = error->ring[i].wa_batchbuffer;
|
||||
if (obj) {
|
||||
err_printf(m, "%s (w/a) --- gtt_offset = 0x%08x\n",
|
||||
dev_priv->ring[i].name, obj->gtt_offset);
|
||||
print_error_obj(m, obj);
|
||||
}
|
||||
|
||||
if (error->ring[i].num_requests) {
|
||||
|
@ -392,15 +429,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
|
|||
err_printf(m, "%s --- ringbuffer = 0x%08x\n",
|
||||
dev_priv->ring[i].name,
|
||||
obj->gtt_offset);
|
||||
offset = 0;
|
||||
for (page = 0; page < obj->page_count; page++) {
|
||||
for (elt = 0; elt < PAGE_SIZE/4; elt++) {
|
||||
err_printf(m, "%08x : %08x\n",
|
||||
offset,
|
||||
obj->pages[page][elt]);
|
||||
offset += 4;
|
||||
}
|
||||
}
|
||||
print_error_obj(m, obj);
|
||||
}
|
||||
|
||||
if ((obj = error->ring[i].hws_page)) {
|
||||
|
@ -666,7 +695,8 @@ static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
|
|||
* It's only a small step better than a random number in its current form.
|
||||
*/
|
||||
static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv,
|
||||
struct drm_i915_error_state *error)
|
||||
struct drm_i915_error_state *error,
|
||||
int *ring_id)
|
||||
{
|
||||
uint32_t error_code = 0;
|
||||
int i;
|
||||
|
@ -676,9 +706,14 @@ static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv,
|
|||
* synchronization commands which almost always appear in the case
|
||||
* strictly a client bug. Use instdone to differentiate those some.
|
||||
*/
|
||||
for (i = 0; i < I915_NUM_RINGS; i++)
|
||||
if (error->ring[i].hangcheck_action == HANGCHECK_HUNG)
|
||||
for (i = 0; i < I915_NUM_RINGS; i++) {
|
||||
if (error->ring[i].hangcheck_action == HANGCHECK_HUNG) {
|
||||
if (ring_id)
|
||||
*ring_id = i;
|
||||
|
||||
return error->ring[i].ipehr ^ error->ring[i].instdone;
|
||||
}
|
||||
}
|
||||
|
||||
return error_code;
|
||||
}
|
||||
|
@ -716,87 +751,6 @@ static void i915_gem_record_fences(struct drm_device *dev,
|
|||
}
|
||||
}
|
||||
|
||||
/* This assumes all batchbuffers are executed from the PPGTT. It might have to
|
||||
* change in the future. */
|
||||
static bool is_active_vm(struct i915_address_space *vm,
|
||||
struct intel_ring_buffer *ring)
|
||||
{
|
||||
struct drm_device *dev = vm->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct i915_hw_ppgtt *ppgtt;
|
||||
|
||||
if (INTEL_INFO(dev)->gen < 7)
|
||||
return i915_is_ggtt(vm);
|
||||
|
||||
/* FIXME: This ignores that the global gtt vm is also on this list. */
|
||||
ppgtt = container_of(vm, struct i915_hw_ppgtt, base);
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 8) {
|
||||
u64 pdp0 = (u64)I915_READ(GEN8_RING_PDP_UDW(ring, 0)) << 32;
|
||||
pdp0 |= I915_READ(GEN8_RING_PDP_LDW(ring, 0));
|
||||
return pdp0 == ppgtt->pd_dma_addr[0];
|
||||
} else {
|
||||
u32 pp_db;
|
||||
pp_db = I915_READ(RING_PP_DIR_BASE(ring));
|
||||
return (pp_db >> 10) == ppgtt->pd_offset;
|
||||
}
|
||||
}
|
||||
|
||||
static struct drm_i915_error_object *
|
||||
i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
|
||||
struct intel_ring_buffer *ring)
|
||||
{
|
||||
struct i915_address_space *vm;
|
||||
struct i915_vma *vma;
|
||||
struct drm_i915_gem_object *obj;
|
||||
bool found_active = false;
|
||||
u32 seqno;
|
||||
|
||||
if (!ring->get_seqno)
|
||||
return NULL;
|
||||
|
||||
if (HAS_BROKEN_CS_TLB(dev_priv->dev)) {
|
||||
u32 acthd = I915_READ(ACTHD);
|
||||
|
||||
if (WARN_ON(ring->id != RCS))
|
||||
return NULL;
|
||||
|
||||
obj = ring->scratch.obj;
|
||||
if (obj != NULL &&
|
||||
acthd >= i915_gem_obj_ggtt_offset(obj) &&
|
||||
acthd < i915_gem_obj_ggtt_offset(obj) + obj->base.size)
|
||||
return i915_error_ggtt_object_create(dev_priv, obj);
|
||||
}
|
||||
|
||||
seqno = ring->get_seqno(ring, false);
|
||||
list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
|
||||
if (!is_active_vm(vm, ring))
|
||||
continue;
|
||||
|
||||
found_active = true;
|
||||
|
||||
list_for_each_entry(vma, &vm->active_list, mm_list) {
|
||||
obj = vma->obj;
|
||||
if (obj->ring != ring)
|
||||
continue;
|
||||
|
||||
if (i915_seqno_passed(seqno, obj->last_read_seqno))
|
||||
continue;
|
||||
|
||||
if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
|
||||
continue;
|
||||
|
||||
/* We need to copy these to an anonymous buffer as the simplest
|
||||
* method to avoid being overwritten by userspace.
|
||||
*/
|
||||
return i915_error_object_create(dev_priv, obj, vm);
|
||||
}
|
||||
}
|
||||
|
||||
WARN_ON(!found_active);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void i915_record_ring_state(struct drm_device *dev,
|
||||
struct intel_ring_buffer *ring,
|
||||
struct drm_i915_error_ring *ering)
|
||||
|
@ -945,8 +899,39 @@ static void i915_gem_record_rings(struct drm_device *dev,
|
|||
|
||||
i915_record_ring_state(dev, ring, &error->ring[i]);
|
||||
|
||||
error->ring[i].batchbuffer =
|
||||
i915_error_first_batchbuffer(dev_priv, ring);
|
||||
error->ring[i].pid = -1;
|
||||
request = i915_gem_find_active_request(ring);
|
||||
if (request) {
|
||||
/* We need to copy these to an anonymous buffer
|
||||
* as the simplest method to avoid being overwritten
|
||||
* by userspace.
|
||||
*/
|
||||
error->ring[i].batchbuffer =
|
||||
i915_error_object_create(dev_priv,
|
||||
request->batch_obj,
|
||||
request->ctx ?
|
||||
request->ctx->vm :
|
||||
&dev_priv->gtt.base);
|
||||
|
||||
if (HAS_BROKEN_CS_TLB(dev_priv->dev) &&
|
||||
ring->scratch.obj)
|
||||
error->ring[i].wa_batchbuffer =
|
||||
i915_error_ggtt_object_create(dev_priv,
|
||||
ring->scratch.obj);
|
||||
|
||||
if (request->file_priv) {
|
||||
struct task_struct *task;
|
||||
|
||||
rcu_read_lock();
|
||||
task = pid_task(request->file_priv->file->pid,
|
||||
PIDTYPE_PID);
|
||||
if (task) {
|
||||
strcpy(error->ring[i].comm, task->comm);
|
||||
error->ring[i].pid = task->pid;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
}
|
||||
|
||||
error->ring[i].ringbuffer =
|
||||
i915_error_ggtt_object_create(dev_priv, ring->obj);
|
||||
|
@ -1113,6 +1098,40 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
|
|||
i915_get_extra_instdone(dev, error->extra_instdone);
|
||||
}
|
||||
|
||||
static void i915_error_capture_msg(struct drm_device *dev,
|
||||
struct drm_i915_error_state *error,
|
||||
bool wedged,
|
||||
const char *error_msg)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 ecode;
|
||||
int ring_id = -1, len;
|
||||
|
||||
ecode = i915_error_generate_code(dev_priv, error, &ring_id);
|
||||
|
||||
len = scnprintf(error->error_msg, sizeof(error->error_msg),
|
||||
"GPU HANG: ecode %d:0x%08x", ring_id, ecode);
|
||||
|
||||
if (ring_id != -1 && error->ring[ring_id].pid != -1)
|
||||
len += scnprintf(error->error_msg + len,
|
||||
sizeof(error->error_msg) - len,
|
||||
", in %s [%d]",
|
||||
error->ring[ring_id].comm,
|
||||
error->ring[ring_id].pid);
|
||||
|
||||
scnprintf(error->error_msg + len, sizeof(error->error_msg) - len,
|
||||
", reason: %s, action: %s",
|
||||
error_msg,
|
||||
wedged ? "reset" : "continue");
|
||||
}
|
||||
|
||||
static void i915_capture_gen_state(struct drm_i915_private *dev_priv,
|
||||
struct drm_i915_error_state *error)
|
||||
{
|
||||
error->reset_count = i915_reset_count(&dev_priv->gpu_error);
|
||||
error->suspend_count = dev_priv->suspend_count;
|
||||
}
|
||||
|
||||
/**
|
||||
* i915_capture_error_state - capture an error record for later analysis
|
||||
* @dev: drm device
|
||||
|
@ -1122,19 +1141,13 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
|
|||
* out a structure which becomes available in debugfs for user level tools
|
||||
* to pick up.
|
||||
*/
|
||||
void i915_capture_error_state(struct drm_device *dev)
|
||||
void i915_capture_error_state(struct drm_device *dev, bool wedged,
|
||||
const char *error_msg)
|
||||
{
|
||||
static bool warned;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_error_state *error;
|
||||
unsigned long flags;
|
||||
uint32_t ecode;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
|
||||
error = dev_priv->gpu_error.first_error;
|
||||
spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
|
||||
if (error)
|
||||
return;
|
||||
|
||||
/* Account for pipe specific data like PIPE*STAT */
|
||||
error = kzalloc(sizeof(*error), GFP_ATOMIC);
|
||||
|
@ -1143,30 +1156,22 @@ void i915_capture_error_state(struct drm_device *dev)
|
|||
return;
|
||||
}
|
||||
|
||||
DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n",
|
||||
dev->primary->index);
|
||||
kref_init(&error->ref);
|
||||
|
||||
i915_capture_gen_state(dev_priv, error);
|
||||
i915_capture_reg_state(dev_priv, error);
|
||||
i915_gem_capture_buffers(dev_priv, error);
|
||||
i915_gem_record_fences(dev, error);
|
||||
i915_gem_record_rings(dev, error);
|
||||
ecode = i915_error_generate_code(dev_priv, error);
|
||||
|
||||
if (!warned) {
|
||||
DRM_INFO("GPU HANG [%x]\n", ecode);
|
||||
DRM_INFO("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
|
||||
DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
|
||||
DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
|
||||
DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n");
|
||||
warned = true;
|
||||
}
|
||||
|
||||
do_gettimeofday(&error->time);
|
||||
|
||||
error->overlay = intel_overlay_capture_error_state(dev);
|
||||
error->display = intel_display_capture_error_state(dev);
|
||||
|
||||
i915_error_capture_msg(dev, error, wedged, error_msg);
|
||||
DRM_INFO("%s\n", error->error_msg);
|
||||
|
||||
spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
|
||||
if (dev_priv->gpu_error.first_error == NULL) {
|
||||
dev_priv->gpu_error.first_error = error;
|
||||
|
@ -1174,8 +1179,19 @@ void i915_capture_error_state(struct drm_device *dev)
|
|||
}
|
||||
spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
|
||||
|
||||
if (error)
|
||||
if (error) {
|
||||
i915_error_state_free(&error->ref);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!warned) {
|
||||
DRM_INFO("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
|
||||
DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
|
||||
DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
|
||||
DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n");
|
||||
DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n", dev->primary->index);
|
||||
warned = true;
|
||||
}
|
||||
}
|
||||
|
||||
void i915_error_state_get(struct drm_device *dev,
|
||||
|
|
|
@ -387,16 +387,15 @@ static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
|
|||
*
|
||||
* Returns the previous state of underrun reporting.
|
||||
*/
|
||||
bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
|
||||
enum pipe pipe, bool enable)
|
||||
bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
|
||||
enum pipe pipe, bool enable)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
unsigned long flags;
|
||||
bool ret;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, flags);
|
||||
assert_spin_locked(&dev_priv->irq_lock);
|
||||
|
||||
ret = !intel_crtc->cpu_fifo_underrun_disabled;
|
||||
|
||||
|
@ -415,7 +414,20 @@ bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
|
|||
broadwell_set_fifo_underrun_reporting(dev, pipe, enable);
|
||||
|
||||
done:
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
|
||||
enum pipe pipe, bool enable)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
unsigned long flags;
|
||||
bool ret;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, flags);
|
||||
ret = __intel_set_cpu_fifo_underrun_reporting(dev, pipe, enable);
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -482,7 +494,7 @@ done:
|
|||
}
|
||||
|
||||
|
||||
void
|
||||
static void
|
||||
__i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
|
||||
u32 enable_mask, u32 status_mask)
|
||||
{
|
||||
|
@ -506,7 +518,7 @@ __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
|
|||
POSTING_READ(reg);
|
||||
}
|
||||
|
||||
void
|
||||
static void
|
||||
__i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
|
||||
u32 enable_mask, u32 status_mask)
|
||||
{
|
||||
|
@ -1296,8 +1308,8 @@ static void snb_gt_irq_handler(struct drm_device *dev,
|
|||
if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
|
||||
GT_BSD_CS_ERROR_INTERRUPT |
|
||||
GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) {
|
||||
DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
|
||||
i915_handle_error(dev, false);
|
||||
i915_handle_error(dev, false, "GT error interrupt 0x%08x",
|
||||
gt_iir);
|
||||
}
|
||||
|
||||
if (gt_iir & GT_PARITY_ERROR(dev))
|
||||
|
@ -1544,8 +1556,9 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
|
|||
notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
|
||||
|
||||
if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) {
|
||||
DRM_ERROR("VEBOX CS error interrupt 0x%08x\n", pm_iir);
|
||||
i915_handle_error(dev_priv->dev, false);
|
||||
i915_handle_error(dev_priv->dev, false,
|
||||
"VEBOX CS error interrupt 0x%08x",
|
||||
pm_iir);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1865,7 +1878,7 @@ static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
|
|||
static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
enum pipe i;
|
||||
enum pipe pipe;
|
||||
|
||||
if (de_iir & DE_ERR_INT_IVB)
|
||||
ivb_err_int_handler(dev);
|
||||
|
@ -1876,14 +1889,14 @@ static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
|
|||
if (de_iir & DE_GSE_IVB)
|
||||
intel_opregion_asle_intr(dev);
|
||||
|
||||
for_each_pipe(i) {
|
||||
if (de_iir & (DE_PIPE_VBLANK_IVB(i)))
|
||||
drm_handle_vblank(dev, i);
|
||||
for_each_pipe(pipe) {
|
||||
if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)))
|
||||
drm_handle_vblank(dev, pipe);
|
||||
|
||||
/* plane/pipes map 1:1 on ilk+ */
|
||||
if (de_iir & DE_PLANE_FLIP_DONE_IVB(i)) {
|
||||
intel_prepare_page_flip(dev, i);
|
||||
intel_finish_page_flip_plane(dev, i);
|
||||
if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) {
|
||||
intel_prepare_page_flip(dev, pipe);
|
||||
intel_finish_page_flip_plane(dev, pipe);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2277,11 +2290,18 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
|
|||
* so userspace knows something bad happened (should trigger collection
|
||||
* of a ring dump etc.).
|
||||
*/
|
||||
void i915_handle_error(struct drm_device *dev, bool wedged)
|
||||
void i915_handle_error(struct drm_device *dev, bool wedged,
|
||||
const char *fmt, ...)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
va_list args;
|
||||
char error_msg[80];
|
||||
|
||||
i915_capture_error_state(dev);
|
||||
va_start(args, fmt);
|
||||
vscnprintf(error_msg, sizeof(error_msg), fmt, args);
|
||||
va_end(args);
|
||||
|
||||
i915_capture_error_state(dev, wedged, error_msg);
|
||||
i915_report_and_clear_eir(dev);
|
||||
|
||||
if (wedged) {
|
||||
|
@ -2584,9 +2604,9 @@ ring_stuck(struct intel_ring_buffer *ring, u32 acthd)
|
|||
*/
|
||||
tmp = I915_READ_CTL(ring);
|
||||
if (tmp & RING_WAIT) {
|
||||
DRM_ERROR("Kicking stuck wait on %s\n",
|
||||
ring->name);
|
||||
i915_handle_error(dev, false);
|
||||
i915_handle_error(dev, false,
|
||||
"Kicking stuck wait on %s",
|
||||
ring->name);
|
||||
I915_WRITE_CTL(ring, tmp);
|
||||
return HANGCHECK_KICK;
|
||||
}
|
||||
|
@ -2596,9 +2616,9 @@ ring_stuck(struct intel_ring_buffer *ring, u32 acthd)
|
|||
default:
|
||||
return HANGCHECK_HUNG;
|
||||
case 1:
|
||||
DRM_ERROR("Kicking stuck semaphore on %s\n",
|
||||
ring->name);
|
||||
i915_handle_error(dev, false);
|
||||
i915_handle_error(dev, false,
|
||||
"Kicking stuck semaphore on %s",
|
||||
ring->name);
|
||||
I915_WRITE_CTL(ring, tmp);
|
||||
return HANGCHECK_KICK;
|
||||
case 0:
|
||||
|
@ -2720,7 +2740,7 @@ static void i915_hangcheck_elapsed(unsigned long data)
|
|||
}
|
||||
|
||||
if (rings_hung)
|
||||
return i915_handle_error(dev, true);
|
||||
return i915_handle_error(dev, true, "Ring hung");
|
||||
|
||||
if (busy_count)
|
||||
/* Reset timer case chip hangs without another request
|
||||
|
@ -3016,44 +3036,113 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
u32 pipestat_mask;
|
||||
u32 iir_mask;
|
||||
|
||||
pipestat_mask = PIPESTAT_INT_STATUS_MASK |
|
||||
PIPE_FIFO_UNDERRUN_STATUS;
|
||||
|
||||
I915_WRITE(PIPESTAT(PIPE_A), pipestat_mask);
|
||||
I915_WRITE(PIPESTAT(PIPE_B), pipestat_mask);
|
||||
POSTING_READ(PIPESTAT(PIPE_A));
|
||||
|
||||
pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
|
||||
PIPE_CRC_DONE_INTERRUPT_STATUS;
|
||||
|
||||
i915_enable_pipestat(dev_priv, PIPE_A, pipestat_mask |
|
||||
PIPE_GMBUS_INTERRUPT_STATUS);
|
||||
i915_enable_pipestat(dev_priv, PIPE_B, pipestat_mask);
|
||||
|
||||
iir_mask = I915_DISPLAY_PORT_INTERRUPT |
|
||||
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
|
||||
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
|
||||
dev_priv->irq_mask &= ~iir_mask;
|
||||
|
||||
I915_WRITE(VLV_IIR, iir_mask);
|
||||
I915_WRITE(VLV_IIR, iir_mask);
|
||||
I915_WRITE(VLV_IMR, dev_priv->irq_mask);
|
||||
I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
|
||||
POSTING_READ(VLV_IER);
|
||||
}
|
||||
|
||||
static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
u32 pipestat_mask;
|
||||
u32 iir_mask;
|
||||
|
||||
iir_mask = I915_DISPLAY_PORT_INTERRUPT |
|
||||
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
|
||||
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
|
||||
|
||||
dev_priv->irq_mask |= iir_mask;
|
||||
I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
|
||||
I915_WRITE(VLV_IMR, dev_priv->irq_mask);
|
||||
I915_WRITE(VLV_IIR, iir_mask);
|
||||
I915_WRITE(VLV_IIR, iir_mask);
|
||||
POSTING_READ(VLV_IIR);
|
||||
|
||||
pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
|
||||
PIPE_CRC_DONE_INTERRUPT_STATUS;
|
||||
|
||||
i915_disable_pipestat(dev_priv, PIPE_A, pipestat_mask |
|
||||
PIPE_GMBUS_INTERRUPT_STATUS);
|
||||
i915_disable_pipestat(dev_priv, PIPE_B, pipestat_mask);
|
||||
|
||||
pipestat_mask = PIPESTAT_INT_STATUS_MASK |
|
||||
PIPE_FIFO_UNDERRUN_STATUS;
|
||||
I915_WRITE(PIPESTAT(PIPE_A), pipestat_mask);
|
||||
I915_WRITE(PIPESTAT(PIPE_B), pipestat_mask);
|
||||
POSTING_READ(PIPESTAT(PIPE_A));
|
||||
}
|
||||
|
||||
void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
assert_spin_locked(&dev_priv->irq_lock);
|
||||
|
||||
if (dev_priv->display_irqs_enabled)
|
||||
return;
|
||||
|
||||
dev_priv->display_irqs_enabled = true;
|
||||
|
||||
if (dev_priv->dev->irq_enabled)
|
||||
valleyview_display_irqs_install(dev_priv);
|
||||
}
|
||||
|
||||
void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
assert_spin_locked(&dev_priv->irq_lock);
|
||||
|
||||
if (!dev_priv->display_irqs_enabled)
|
||||
return;
|
||||
|
||||
dev_priv->display_irqs_enabled = false;
|
||||
|
||||
if (dev_priv->dev->irq_enabled)
|
||||
valleyview_display_irqs_uninstall(dev_priv);
|
||||
}
|
||||
|
||||
static int valleyview_irq_postinstall(struct drm_device *dev)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
|
||||
u32 enable_mask;
|
||||
u32 pipestat_enable = PLANE_FLIP_DONE_INT_STATUS_VLV |
|
||||
PIPE_CRC_DONE_INTERRUPT_STATUS;
|
||||
unsigned long irqflags;
|
||||
|
||||
enable_mask = I915_DISPLAY_PORT_INTERRUPT;
|
||||
enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
|
||||
I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
|
||||
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
|
||||
I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
|
||||
|
||||
/*
|
||||
*Leave vblank interrupts masked initially. enable/disable will
|
||||
* toggle them based on usage.
|
||||
*/
|
||||
dev_priv->irq_mask = (~enable_mask) |
|
||||
I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
|
||||
I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
|
||||
dev_priv->irq_mask = ~0;
|
||||
|
||||
I915_WRITE(PORT_HOTPLUG_EN, 0);
|
||||
POSTING_READ(PORT_HOTPLUG_EN);
|
||||
|
||||
I915_WRITE(VLV_IMR, dev_priv->irq_mask);
|
||||
I915_WRITE(VLV_IER, enable_mask);
|
||||
I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
|
||||
I915_WRITE(VLV_IIR, 0xffffffff);
|
||||
I915_WRITE(PIPESTAT(0), 0xffff);
|
||||
I915_WRITE(PIPESTAT(1), 0xffff);
|
||||
POSTING_READ(VLV_IER);
|
||||
|
||||
/* Interrupt setup is already guaranteed to be single-threaded, this is
|
||||
* just to make the assert_spin_locked check happy. */
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
|
||||
i915_enable_pipestat(dev_priv, PIPE_A, pipestat_enable);
|
||||
i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
|
||||
i915_enable_pipestat(dev_priv, PIPE_B, pipestat_enable);
|
||||
if (dev_priv->display_irqs_enabled)
|
||||
valleyview_display_irqs_install(dev_priv);
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
|
||||
|
||||
I915_WRITE(VLV_IIR, 0xffffffff);
|
||||
|
@ -3184,6 +3273,7 @@ static void gen8_irq_uninstall(struct drm_device *dev)
|
|||
static void valleyview_irq_uninstall(struct drm_device *dev)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
|
||||
unsigned long irqflags;
|
||||
int pipe;
|
||||
|
||||
if (!dev_priv)
|
||||
|
@ -3197,8 +3287,14 @@ static void valleyview_irq_uninstall(struct drm_device *dev)
|
|||
I915_WRITE(HWSTAM, 0xffffffff);
|
||||
I915_WRITE(PORT_HOTPLUG_EN, 0);
|
||||
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
|
||||
for_each_pipe(pipe)
|
||||
I915_WRITE(PIPESTAT(pipe), 0xffff);
|
||||
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
|
||||
if (dev_priv->display_irqs_enabled)
|
||||
valleyview_display_irqs_uninstall(dev_priv);
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
|
||||
|
||||
dev_priv->irq_mask = 0;
|
||||
|
||||
I915_WRITE(VLV_IIR, 0xffffffff);
|
||||
I915_WRITE(VLV_IMR, 0xffffffff);
|
||||
I915_WRITE(VLV_IER, 0x0);
|
||||
|
@ -3337,7 +3433,9 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
|
|||
*/
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
|
||||
if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
|
||||
i915_handle_error(dev, false);
|
||||
i915_handle_error(dev, false,
|
||||
"Command parser error, iir 0x%08x",
|
||||
iir);
|
||||
|
||||
for_each_pipe(pipe) {
|
||||
int reg = PIPESTAT(pipe);
|
||||
|
@ -3519,7 +3617,9 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
|
|||
*/
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
|
||||
if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
|
||||
i915_handle_error(dev, false);
|
||||
i915_handle_error(dev, false,
|
||||
"Command parser error, iir 0x%08x",
|
||||
iir);
|
||||
|
||||
for_each_pipe(pipe) {
|
||||
int reg = PIPESTAT(pipe);
|
||||
|
@ -3756,7 +3856,9 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
|
|||
*/
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
|
||||
if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
|
||||
i915_handle_error(dev, false);
|
||||
i915_handle_error(dev, false,
|
||||
"Command parser error, iir 0x%08x",
|
||||
iir);
|
||||
|
||||
for_each_pipe(pipe) {
|
||||
int reg = PIPESTAT(pipe);
|
||||
|
|
|
@ -48,6 +48,7 @@ struct i915_params i915 __read_mostly = {
|
|||
.reset = true,
|
||||
.invert_brightness = 0,
|
||||
.disable_display = 0,
|
||||
.enable_cmd_parser = 0,
|
||||
};
|
||||
|
||||
module_param_named(modeset, i915.modeset, int, 0400);
|
||||
|
@ -157,3 +158,7 @@ MODULE_PARM_DESC(invert_brightness,
|
|||
|
||||
module_param_named(disable_display, i915.disable_display, bool, 0600);
|
||||
MODULE_PARM_DESC(disable_display, "Disable display (default: false)");
|
||||
|
||||
module_param_named(enable_cmd_parser, i915.enable_cmd_parser, int, 0600);
|
||||
MODULE_PARM_DESC(enable_cmd_parser,
|
||||
"Enable command parsing (1=enabled, 0=disabled [default])");
|
||||
|
|
|
@ -174,6 +174,18 @@
|
|||
#define VGA_CR_INDEX_CGA 0x3d4
|
||||
#define VGA_CR_DATA_CGA 0x3d5
|
||||
|
||||
/*
|
||||
* Instruction field definitions used by the command parser
|
||||
*/
|
||||
#define INSTR_CLIENT_SHIFT 29
|
||||
#define INSTR_CLIENT_MASK 0xE0000000
|
||||
#define INSTR_MI_CLIENT 0x0
|
||||
#define INSTR_BC_CLIENT 0x2
|
||||
#define INSTR_RC_CLIENT 0x3
|
||||
#define INSTR_SUBCLIENT_SHIFT 27
|
||||
#define INSTR_SUBCLIENT_MASK 0x18000000
|
||||
#define INSTR_MEDIA_SUBCLIENT 0x2
|
||||
|
||||
/*
|
||||
* Memory interface instructions used by the kernel
|
||||
*/
|
||||
|
@ -377,14 +389,30 @@
|
|||
#define DSPFREQSTAT_MASK (0x3 << DSPFREQSTAT_SHIFT)
|
||||
#define DSPFREQGUAR_SHIFT 14
|
||||
#define DSPFREQGUAR_MASK (0x3 << DSPFREQGUAR_SHIFT)
|
||||
|
||||
/* See the PUNIT HAS v0.8 for the below bits */
|
||||
enum punit_power_well {
|
||||
PUNIT_POWER_WELL_RENDER = 0,
|
||||
PUNIT_POWER_WELL_MEDIA = 1,
|
||||
PUNIT_POWER_WELL_DISP2D = 3,
|
||||
PUNIT_POWER_WELL_DPIO_CMN_BC = 5,
|
||||
PUNIT_POWER_WELL_DPIO_TX_B_LANES_01 = 6,
|
||||
PUNIT_POWER_WELL_DPIO_TX_B_LANES_23 = 7,
|
||||
PUNIT_POWER_WELL_DPIO_TX_C_LANES_01 = 8,
|
||||
PUNIT_POWER_WELL_DPIO_TX_C_LANES_23 = 9,
|
||||
PUNIT_POWER_WELL_DPIO_RX0 = 10,
|
||||
PUNIT_POWER_WELL_DPIO_RX1 = 11,
|
||||
|
||||
PUNIT_POWER_WELL_NUM,
|
||||
};
|
||||
|
||||
#define PUNIT_REG_PWRGT_CTRL 0x60
|
||||
#define PUNIT_REG_PWRGT_STATUS 0x61
|
||||
#define PUNIT_CLK_GATE 1
|
||||
#define PUNIT_PWR_RESET 2
|
||||
#define PUNIT_PWR_GATE 3
|
||||
#define RENDER_PWRGT (PUNIT_PWR_GATE << 0)
|
||||
#define MEDIA_PWRGT (PUNIT_PWR_GATE << 2)
|
||||
#define DISP2D_PWRGT (PUNIT_PWR_GATE << 6)
|
||||
#define PUNIT_PWRGT_MASK(power_well) (3 << ((power_well) * 2))
|
||||
#define PUNIT_PWRGT_PWR_ON(power_well) (0 << ((power_well) * 2))
|
||||
#define PUNIT_PWRGT_CLK_GATE(power_well) (1 << ((power_well) * 2))
|
||||
#define PUNIT_PWRGT_RESET(power_well) (2 << ((power_well) * 2))
|
||||
#define PUNIT_PWRGT_PWR_GATE(power_well) (3 << ((power_well) * 2))
|
||||
|
||||
#define PUNIT_REG_GPU_LFM 0xd3
|
||||
#define PUNIT_REG_GPU_FREQ_REQ 0xd4
|
||||
|
@ -798,7 +826,12 @@
|
|||
# define ASYNC_FLIP_PERF_DISABLE (1 << 14)
|
||||
|
||||
#define GEN6_GT_MODE 0x20d0
|
||||
#define GEN6_GT_MODE_HI (1 << 9)
|
||||
#define GEN7_GT_MODE 0x7008
|
||||
#define GEN6_WIZ_HASHING(hi, lo) (((hi) << 9) | ((lo) << 7))
|
||||
#define GEN6_WIZ_HASHING_8x8 GEN6_WIZ_HASHING(0, 0)
|
||||
#define GEN6_WIZ_HASHING_8x4 GEN6_WIZ_HASHING(0, 1)
|
||||
#define GEN6_WIZ_HASHING_16x4 GEN6_WIZ_HASHING(1, 0)
|
||||
#define GEN6_WIZ_HASHING_MASK (GEN6_WIZ_HASHING(1, 1) << 16)
|
||||
#define GEN6_TD_FOUR_ROW_DISPATCH_DISABLE (1 << 5)
|
||||
|
||||
#define GFX_MODE 0x02520
|
||||
|
@ -944,6 +977,9 @@
|
|||
#define GEN6_BLITTER_LOCK_SHIFT 16
|
||||
#define GEN6_BLITTER_FBC_NOTIFY (1<<3)
|
||||
|
||||
#define GEN6_RC_SLEEP_PSMI_CONTROL 0x2050
|
||||
#define GEN8_RC_SEMA_IDLE_MSG_DISABLE (1 << 12)
|
||||
|
||||
#define GEN6_BSD_SLEEP_PSMI_CONTROL 0x12050
|
||||
#define GEN6_BSD_SLEEP_MSG_DISABLE (1 << 0)
|
||||
#define GEN6_BSD_SLEEP_FLUSH_DISABLE (1 << 2)
|
||||
|
@ -1121,13 +1157,6 @@
|
|||
#define FBC_REND_NUKE (1<<2)
|
||||
#define FBC_REND_CACHE_CLEAN (1<<1)
|
||||
|
||||
#define _HSW_PIPE_SLICE_CHICKEN_1_A 0x420B0
|
||||
#define _HSW_PIPE_SLICE_CHICKEN_1_B 0x420B4
|
||||
#define HSW_BYPASS_FBC_QUEUE (1<<22)
|
||||
#define HSW_PIPE_SLICE_CHICKEN_1(pipe) _PIPE(pipe, + \
|
||||
_HSW_PIPE_SLICE_CHICKEN_1_A, + \
|
||||
_HSW_PIPE_SLICE_CHICKEN_1_B)
|
||||
|
||||
/*
|
||||
* GPIO regs
|
||||
*/
|
||||
|
@ -4140,7 +4169,8 @@
|
|||
|
||||
#define _CHICKEN_PIPESL_1_A 0x420b0
|
||||
#define _CHICKEN_PIPESL_1_B 0x420b4
|
||||
#define DPRS_MASK_VBLANK_SRD (1 << 0)
|
||||
#define HSW_FBCQ_DIS (1 << 22)
|
||||
#define BDW_DPRS_MASK_VBLANK_SRD (1 << 0)
|
||||
#define CHICKEN_PIPESL_1(pipe) _PIPE(pipe, _CHICKEN_PIPESL_1_A, _CHICKEN_PIPESL_1_B)
|
||||
|
||||
#define DISP_ARB_CTL 0x45000
|
||||
|
@ -4164,7 +4194,7 @@
|
|||
#define VLV_B0_WA_L3SQCREG1_VALUE 0x00D30000
|
||||
|
||||
#define GEN7_L3CNTLREG1 0xB01C
|
||||
#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C4FFF8C
|
||||
#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C
|
||||
#define GEN7_L3AGDIS (1<<19)
|
||||
|
||||
#define GEN7_L3_CHICKEN_MODE_REGISTER 0xB030
|
||||
|
@ -4898,6 +4928,9 @@
|
|||
#define GEN7_UCGCTL4 0x940c
|
||||
#define GEN7_L3BANK2X_CLOCK_GATE_DISABLE (1<<25)
|
||||
|
||||
#define GEN8_UCGCTL6 0x9430
|
||||
#define GEN8_SDEUNIT_CLOCK_GATE_DISABLE (1<<14)
|
||||
|
||||
#define GEN6_RPNSWREQ 0xA008
|
||||
#define GEN6_TURBO_DISABLE (1<<31)
|
||||
#define GEN6_FREQUENCY(x) ((x)<<25)
|
||||
|
@ -5043,6 +5076,10 @@
|
|||
#define GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE (1<<10)
|
||||
#define GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE (1<<3)
|
||||
|
||||
#define GEN8_ROW_CHICKEN 0xe4f0
|
||||
#define PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE (1<<8)
|
||||
#define STALL_DOP_GATING_DISABLE (1<<5)
|
||||
|
||||
#define GEN7_ROW_CHICKEN2 0xe4f4
|
||||
#define GEN7_ROW_CHICKEN2_GT2 0xf4f4
|
||||
#define DOP_CLOCK_GATING_DISABLE (1<<0)
|
||||
|
|
|
@ -599,14 +599,14 @@ parse_mipi(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
|
|||
{
|
||||
struct bdb_mipi *mipi;
|
||||
|
||||
mipi = find_section(bdb, BDB_MIPI);
|
||||
mipi = find_section(bdb, BDB_MIPI_CONFIG);
|
||||
if (!mipi) {
|
||||
DRM_DEBUG_KMS("No MIPI BDB found");
|
||||
return;
|
||||
}
|
||||
|
||||
/* XXX: add more info */
|
||||
dev_priv->vbt.dsi.panel_id = mipi->panel_id;
|
||||
dev_priv->vbt.dsi.panel_id = MIPI_DSI_GENERIC_PANEL_ID;
|
||||
}
|
||||
|
||||
static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
|
||||
|
|
|
@ -104,7 +104,8 @@ struct vbios_data {
|
|||
#define BDB_LVDS_LFP_DATA 42
|
||||
#define BDB_LVDS_BACKLIGHT 43
|
||||
#define BDB_LVDS_POWER 44
|
||||
#define BDB_MIPI 50
|
||||
#define BDB_MIPI_CONFIG 52
|
||||
#define BDB_MIPI_SEQUENCE 53
|
||||
#define BDB_SKIP 254 /* VBIOS private block, ignore */
|
||||
|
||||
struct bdb_general_features {
|
||||
|
@ -711,44 +712,159 @@ int intel_parse_bios(struct drm_device *dev);
|
|||
#define DVO_PORT_DPD 9
|
||||
#define DVO_PORT_DPA 10
|
||||
|
||||
/* MIPI DSI panel info */
|
||||
struct bdb_mipi {
|
||||
/* Block 52 contains MIPI Panel info
|
||||
* 6 such enteries will there. Index into correct
|
||||
* entery is based on the panel_index in #40 LFP
|
||||
*/
|
||||
#define MAX_MIPI_CONFIGURATIONS 6
|
||||
|
||||
#define MIPI_DSI_UNDEFINED_PANEL_ID 0
|
||||
#define MIPI_DSI_GENERIC_PANEL_ID 1
|
||||
|
||||
struct mipi_config {
|
||||
u16 panel_id;
|
||||
u16 bridge_revision;
|
||||
|
||||
/* General params */
|
||||
u32 dithering:1;
|
||||
u32 bpp_pixel_format:1;
|
||||
/* General Params */
|
||||
u32 enable_dithering:1;
|
||||
u32 rsvd1:1;
|
||||
u32 dphy_valid:1;
|
||||
u32 resvd2:28;
|
||||
u32 is_bridge:1;
|
||||
|
||||
u16 port_info;
|
||||
u16 rsvd3:2;
|
||||
u16 num_lanes:2;
|
||||
u16 rsvd4:12;
|
||||
u32 panel_arch_type:2;
|
||||
u32 is_cmd_mode:1;
|
||||
|
||||
/* DSI config */
|
||||
u16 virt_ch_num:2;
|
||||
u16 vtm:2;
|
||||
u16 rsvd5:12;
|
||||
#define NON_BURST_SYNC_PULSE 0x1
|
||||
#define NON_BURST_SYNC_EVENTS 0x2
|
||||
#define BURST_MODE 0x3
|
||||
u32 video_transfer_mode:2;
|
||||
|
||||
u32 dsi_clock;
|
||||
u32 cabc_supported:1;
|
||||
u32 pwm_blc:1;
|
||||
|
||||
/* Bit 13:10 */
|
||||
#define PIXEL_FORMAT_RGB565 0x1
|
||||
#define PIXEL_FORMAT_RGB666 0x2
|
||||
#define PIXEL_FORMAT_RGB666_LOOSELY_PACKED 0x3
|
||||
#define PIXEL_FORMAT_RGB888 0x4
|
||||
u32 videomode_color_format:4;
|
||||
|
||||
/* Bit 15:14 */
|
||||
#define ENABLE_ROTATION_0 0x0
|
||||
#define ENABLE_ROTATION_90 0x1
|
||||
#define ENABLE_ROTATION_180 0x2
|
||||
#define ENABLE_ROTATION_270 0x3
|
||||
u32 rotation:2;
|
||||
u32 bta_enabled:1;
|
||||
u32 rsvd2:15;
|
||||
|
||||
/* 2 byte Port Description */
|
||||
#define DUAL_LINK_NOT_SUPPORTED 0
|
||||
#define DUAL_LINK_FRONT_BACK 1
|
||||
#define DUAL_LINK_PIXEL_ALT 2
|
||||
u16 dual_link:2;
|
||||
u16 lane_cnt:2;
|
||||
u16 rsvd3:12;
|
||||
|
||||
u16 rsvd4;
|
||||
|
||||
u8 rsvd5[5];
|
||||
u32 dsi_ddr_clk;
|
||||
u32 bridge_ref_clk;
|
||||
u16 rsvd_pwr;
|
||||
|
||||
/* Dphy Params */
|
||||
u32 prepare_cnt:5;
|
||||
u32 rsvd6:3;
|
||||
#define BYTE_CLK_SEL_20MHZ 0
|
||||
#define BYTE_CLK_SEL_10MHZ 1
|
||||
#define BYTE_CLK_SEL_5MHZ 2
|
||||
u8 byte_clk_sel:2;
|
||||
|
||||
u8 rsvd6:6;
|
||||
|
||||
/* DPHY Flags */
|
||||
u16 dphy_param_valid:1;
|
||||
u16 eot_pkt_disabled:1;
|
||||
u16 enable_clk_stop:1;
|
||||
u16 rsvd7:13;
|
||||
|
||||
u32 hs_tx_timeout;
|
||||
u32 lp_rx_timeout;
|
||||
u32 turn_around_timeout;
|
||||
u32 device_reset_timer;
|
||||
u32 master_init_timer;
|
||||
u32 dbi_bw_timer;
|
||||
u32 lp_byte_clk_val;
|
||||
|
||||
/* 4 byte Dphy Params */
|
||||
u32 prepare_cnt:6;
|
||||
u32 rsvd8:2;
|
||||
u32 clk_zero_cnt:8;
|
||||
u32 trail_cnt:5;
|
||||
u32 rsvd7:3;
|
||||
u32 rsvd9:3;
|
||||
u32 exit_zero_cnt:6;
|
||||
u32 rsvd8:2;
|
||||
u32 rsvd10:2;
|
||||
|
||||
u32 hl_switch_cnt;
|
||||
u32 lp_byte_clk;
|
||||
u32 clk_lane_switch_cnt;
|
||||
u32 hl_switch_cnt;
|
||||
|
||||
u32 rsvd11[6];
|
||||
|
||||
/* timings based on dphy spec */
|
||||
u8 tclk_miss;
|
||||
u8 tclk_post;
|
||||
u8 rsvd12;
|
||||
u8 tclk_pre;
|
||||
u8 tclk_prepare;
|
||||
u8 tclk_settle;
|
||||
u8 tclk_term_enable;
|
||||
u8 tclk_trail;
|
||||
u16 tclk_prepare_clkzero;
|
||||
u8 rsvd13;
|
||||
u8 td_term_enable;
|
||||
u8 teot;
|
||||
u8 ths_exit;
|
||||
u8 ths_prepare;
|
||||
u16 ths_prepare_hszero;
|
||||
u8 rsvd14;
|
||||
u8 ths_settle;
|
||||
u8 ths_skip;
|
||||
u8 ths_trail;
|
||||
u8 tinit;
|
||||
u8 tlpx;
|
||||
u8 rsvd15[3];
|
||||
|
||||
/* GPIOs */
|
||||
u8 panel_enable;
|
||||
u8 bl_enable;
|
||||
u8 pwm_enable;
|
||||
u8 reset_r_n;
|
||||
u8 pwr_down_r;
|
||||
u8 stdby_r_n;
|
||||
|
||||
} __packed;
|
||||
|
||||
/* Block 52 contains MIPI configuration block
|
||||
* 6 * bdb_mipi_config, followed by 6 pps data
|
||||
* block below
|
||||
*
|
||||
* all delays has a unit of 100us
|
||||
*/
|
||||
struct mipi_pps_data {
|
||||
u16 panel_on_delay;
|
||||
u16 bl_enable_delay;
|
||||
u16 bl_disable_delay;
|
||||
u16 panel_off_delay;
|
||||
u16 panel_power_cycle_delay;
|
||||
};
|
||||
|
||||
struct bdb_mipi_config {
|
||||
struct mipi_config config[MAX_MIPI_CONFIGURATIONS];
|
||||
struct mipi_pps_data pps[MAX_MIPI_CONFIGURATIONS];
|
||||
};
|
||||
|
||||
/* Block 53 contains MIPI sequences as needed by the panel
|
||||
* for enabling it. This block can be variable in size and
|
||||
* can be maximum of 6 blocks
|
||||
*/
|
||||
struct bdb_mipi_sequence {
|
||||
u8 version;
|
||||
u8 data[0];
|
||||
};
|
||||
|
||||
#endif /* _I830_BIOS_H_ */
|
||||
|
|
|
@ -68,8 +68,13 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
|
|||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crt *crt = intel_encoder_to_crt(encoder);
|
||||
enum intel_display_power_domain power_domain;
|
||||
u32 tmp;
|
||||
|
||||
power_domain = intel_display_port_power_domain(encoder);
|
||||
if (!intel_display_power_enabled(dev_priv, power_domain))
|
||||
return false;
|
||||
|
||||
tmp = I915_READ(crt->adpa_reg);
|
||||
|
||||
if (!(tmp & ADPA_DAC_ENABLE))
|
||||
|
@ -262,6 +267,10 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
|
|||
if (HAS_PCH_LPT(dev))
|
||||
pipe_config->pipe_bpp = 24;
|
||||
|
||||
/* FDI must always be 2.7 GHz */
|
||||
if (HAS_DDI(dev))
|
||||
pipe_config->port_clock = 135000 * 2;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -630,14 +639,22 @@ static enum drm_connector_status
|
|||
intel_crt_detect(struct drm_connector *connector, bool force)
|
||||
{
|
||||
struct drm_device *dev = connector->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crt *crt = intel_attached_crt(connector);
|
||||
struct intel_encoder *intel_encoder = &crt->base;
|
||||
enum intel_display_power_domain power_domain;
|
||||
enum drm_connector_status status;
|
||||
struct intel_load_detect_pipe tmp;
|
||||
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
|
||||
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] force=%d\n",
|
||||
connector->base.id, drm_get_connector_name(connector),
|
||||
force);
|
||||
|
||||
power_domain = intel_display_port_power_domain(intel_encoder);
|
||||
intel_display_power_get(dev_priv, power_domain);
|
||||
|
||||
if (I915_HAS_HOTPLUG(dev)) {
|
||||
/* We can not rely on the HPD pin always being correctly wired
|
||||
* up, for example many KVM do not pass it through, and so
|
||||
|
@ -645,23 +662,30 @@ intel_crt_detect(struct drm_connector *connector, bool force)
|
|||
*/
|
||||
if (intel_crt_detect_hotplug(connector)) {
|
||||
DRM_DEBUG_KMS("CRT detected via hotplug\n");
|
||||
return connector_status_connected;
|
||||
status = connector_status_connected;
|
||||
goto out;
|
||||
} else
|
||||
DRM_DEBUG_KMS("CRT not detected via hotplug\n");
|
||||
}
|
||||
|
||||
if (intel_crt_detect_ddc(connector))
|
||||
return connector_status_connected;
|
||||
if (intel_crt_detect_ddc(connector)) {
|
||||
status = connector_status_connected;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Load detection is broken on HPD capable machines. Whoever wants a
|
||||
* broken monitor (without edid) to work behind a broken kvm (that fails
|
||||
* to have the right resistors for HP detection) needs to fix this up.
|
||||
* For now just bail out. */
|
||||
if (I915_HAS_HOTPLUG(dev))
|
||||
return connector_status_disconnected;
|
||||
if (I915_HAS_HOTPLUG(dev)) {
|
||||
status = connector_status_disconnected;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!force)
|
||||
return connector->status;
|
||||
if (!force) {
|
||||
status = connector->status;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* for pre-945g platforms use load detect */
|
||||
if (intel_get_load_detect_pipe(connector, NULL, &tmp)) {
|
||||
|
@ -673,6 +697,10 @@ intel_crt_detect(struct drm_connector *connector, bool force)
|
|||
} else
|
||||
status = connector_status_unknown;
|
||||
|
||||
out:
|
||||
intel_display_power_put(dev_priv, power_domain);
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
|
@ -686,17 +714,28 @@ static int intel_crt_get_modes(struct drm_connector *connector)
|
|||
{
|
||||
struct drm_device *dev = connector->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crt *crt = intel_attached_crt(connector);
|
||||
struct intel_encoder *intel_encoder = &crt->base;
|
||||
enum intel_display_power_domain power_domain;
|
||||
int ret;
|
||||
struct i2c_adapter *i2c;
|
||||
|
||||
power_domain = intel_display_port_power_domain(intel_encoder);
|
||||
intel_display_power_get(dev_priv, power_domain);
|
||||
|
||||
i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->vbt.crt_ddc_pin);
|
||||
ret = intel_crt_ddc_get_modes(connector, i2c);
|
||||
if (ret || !IS_G4X(dev))
|
||||
return ret;
|
||||
goto out;
|
||||
|
||||
/* Try to probe digital port for output in DVI-I -> VGA mode. */
|
||||
i2c = intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPB);
|
||||
return intel_crt_ddc_get_modes(connector, i2c);
|
||||
ret = intel_crt_ddc_get_modes(connector, i2c);
|
||||
|
||||
out:
|
||||
intel_display_power_put(dev_priv, power_domain);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int intel_crt_set_property(struct drm_connector *connector,
|
||||
|
|
|
@ -1145,9 +1145,14 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
|
|||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
enum port port = intel_ddi_get_encoder_port(encoder);
|
||||
enum intel_display_power_domain power_domain;
|
||||
u32 tmp;
|
||||
int i;
|
||||
|
||||
power_domain = intel_display_port_power_domain(encoder);
|
||||
if (!intel_display_power_enabled(dev_priv, power_domain))
|
||||
return false;
|
||||
|
||||
tmp = I915_READ(DDI_BUF_CTL(port));
|
||||
|
||||
if (!(tmp & DDI_BUF_CTL_ENABLE))
|
||||
|
|
|
@ -1122,7 +1122,7 @@ void assert_pipe(struct drm_i915_private *dev_priv,
|
|||
if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
|
||||
state = true;
|
||||
|
||||
if (!intel_display_power_enabled(dev_priv->dev,
|
||||
if (!intel_display_power_enabled(dev_priv,
|
||||
POWER_DOMAIN_TRANSCODER(cpu_transcoder))) {
|
||||
cur_state = false;
|
||||
} else {
|
||||
|
@ -1188,16 +1188,16 @@ static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
|
|||
enum pipe pipe)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
int reg, i;
|
||||
int reg, sprite;
|
||||
u32 val;
|
||||
|
||||
if (IS_VALLEYVIEW(dev)) {
|
||||
for (i = 0; i < INTEL_INFO(dev)->num_sprites; i++) {
|
||||
reg = SPCNTR(pipe, i);
|
||||
for_each_sprite(pipe, sprite) {
|
||||
reg = SPCNTR(pipe, sprite);
|
||||
val = I915_READ(reg);
|
||||
WARN((val & SP_ENABLE),
|
||||
"sprite %c assertion failure, should be off on pipe %c but is still active\n",
|
||||
sprite_name(pipe, i), pipe_name(pipe));
|
||||
sprite_name(pipe, sprite), pipe_name(pipe));
|
||||
}
|
||||
} else if (INTEL_INFO(dev)->gen >= 7) {
|
||||
reg = SPRCTL(pipe);
|
||||
|
@ -2321,6 +2321,25 @@ intel_finish_fb(struct drm_framebuffer *old_fb)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
unsigned long flags;
|
||||
bool pending;
|
||||
|
||||
if (i915_reset_in_progress(&dev_priv->gpu_error) ||
|
||||
intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
|
||||
return false;
|
||||
|
||||
spin_lock_irqsave(&dev->event_lock, flags);
|
||||
pending = to_intel_crtc(crtc)->unpin_work != NULL;
|
||||
spin_unlock_irqrestore(&dev->event_lock, flags);
|
||||
|
||||
return pending;
|
||||
}
|
||||
|
||||
static int
|
||||
intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
|
||||
struct drm_framebuffer *fb)
|
||||
|
@ -2331,6 +2350,11 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
|
|||
struct drm_framebuffer *old_fb;
|
||||
int ret;
|
||||
|
||||
if (intel_crtc_has_pending_flip(crtc)) {
|
||||
DRM_ERROR("pipe is still busy with an old pageflip\n");
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
/* no fb bound */
|
||||
if (!fb) {
|
||||
DRM_ERROR("No FB bound\n");
|
||||
|
@ -2956,25 +2980,6 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc)
|
|||
udelay(100);
|
||||
}
|
||||
|
||||
static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
unsigned long flags;
|
||||
bool pending;
|
||||
|
||||
if (i915_reset_in_progress(&dev_priv->gpu_error) ||
|
||||
intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
|
||||
return false;
|
||||
|
||||
spin_lock_irqsave(&dev->event_lock, flags);
|
||||
pending = to_intel_crtc(crtc)->unpin_work != NULL;
|
||||
spin_unlock_irqrestore(&dev->event_lock, flags);
|
||||
|
||||
return pending;
|
||||
}
|
||||
|
||||
bool intel_has_pending_fb_unpin(struct drm_device *dev)
|
||||
{
|
||||
struct intel_crtc *crtc;
|
||||
|
@ -3953,6 +3958,117 @@ static void i9xx_pfit_enable(struct intel_crtc *crtc)
|
|||
I915_WRITE(BCLRPAT(crtc->pipe), 0);
|
||||
}
|
||||
|
||||
#define for_each_power_domain(domain, mask) \
|
||||
for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \
|
||||
if ((1 << (domain)) & (mask))
|
||||
|
||||
enum intel_display_power_domain
|
||||
intel_display_port_power_domain(struct intel_encoder *intel_encoder)
|
||||
{
|
||||
struct drm_device *dev = intel_encoder->base.dev;
|
||||
struct intel_digital_port *intel_dig_port;
|
||||
|
||||
switch (intel_encoder->type) {
|
||||
case INTEL_OUTPUT_UNKNOWN:
|
||||
/* Only DDI platforms should ever use this output type */
|
||||
WARN_ON_ONCE(!HAS_DDI(dev));
|
||||
case INTEL_OUTPUT_DISPLAYPORT:
|
||||
case INTEL_OUTPUT_HDMI:
|
||||
case INTEL_OUTPUT_EDP:
|
||||
intel_dig_port = enc_to_dig_port(&intel_encoder->base);
|
||||
switch (intel_dig_port->port) {
|
||||
case PORT_A:
|
||||
return POWER_DOMAIN_PORT_DDI_A_4_LANES;
|
||||
case PORT_B:
|
||||
return POWER_DOMAIN_PORT_DDI_B_4_LANES;
|
||||
case PORT_C:
|
||||
return POWER_DOMAIN_PORT_DDI_C_4_LANES;
|
||||
case PORT_D:
|
||||
return POWER_DOMAIN_PORT_DDI_D_4_LANES;
|
||||
default:
|
||||
WARN_ON_ONCE(1);
|
||||
return POWER_DOMAIN_PORT_OTHER;
|
||||
}
|
||||
case INTEL_OUTPUT_ANALOG:
|
||||
return POWER_DOMAIN_PORT_CRT;
|
||||
case INTEL_OUTPUT_DSI:
|
||||
return POWER_DOMAIN_PORT_DSI;
|
||||
default:
|
||||
return POWER_DOMAIN_PORT_OTHER;
|
||||
}
|
||||
}
|
||||
|
||||
static unsigned long get_crtc_power_domains(struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct intel_encoder *intel_encoder;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
enum pipe pipe = intel_crtc->pipe;
|
||||
bool pfit_enabled = intel_crtc->config.pch_pfit.enabled;
|
||||
unsigned long mask;
|
||||
enum transcoder transcoder;
|
||||
|
||||
transcoder = intel_pipe_to_cpu_transcoder(dev->dev_private, pipe);
|
||||
|
||||
mask = BIT(POWER_DOMAIN_PIPE(pipe));
|
||||
mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
|
||||
if (pfit_enabled)
|
||||
mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
|
||||
|
||||
for_each_encoder_on_crtc(dev, crtc, intel_encoder)
|
||||
mask |= BIT(intel_display_port_power_domain(intel_encoder));
|
||||
|
||||
return mask;
|
||||
}
|
||||
|
||||
void intel_display_set_init_power(struct drm_i915_private *dev_priv,
|
||||
bool enable)
|
||||
{
|
||||
if (dev_priv->power_domains.init_power_on == enable)
|
||||
return;
|
||||
|
||||
if (enable)
|
||||
intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
|
||||
else
|
||||
intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
|
||||
|
||||
dev_priv->power_domains.init_power_on = enable;
|
||||
}
|
||||
|
||||
static void modeset_update_crtc_power_domains(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
unsigned long pipe_domains[I915_MAX_PIPES] = { 0, };
|
||||
struct intel_crtc *crtc;
|
||||
|
||||
/*
|
||||
* First get all needed power domains, then put all unneeded, to avoid
|
||||
* any unnecessary toggling of the power wells.
|
||||
*/
|
||||
list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
|
||||
enum intel_display_power_domain domain;
|
||||
|
||||
if (!crtc->base.enabled)
|
||||
continue;
|
||||
|
||||
pipe_domains[crtc->pipe] = get_crtc_power_domains(&crtc->base);
|
||||
|
||||
for_each_power_domain(domain, pipe_domains[crtc->pipe])
|
||||
intel_display_power_get(dev_priv, domain);
|
||||
}
|
||||
|
||||
list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
|
||||
enum intel_display_power_domain domain;
|
||||
|
||||
for_each_power_domain(domain, crtc->enabled_power_domains)
|
||||
intel_display_power_put(dev_priv, domain);
|
||||
|
||||
crtc->enabled_power_domains = pipe_domains[crtc->pipe];
|
||||
}
|
||||
|
||||
intel_display_set_init_power(dev_priv, false);
|
||||
}
|
||||
|
||||
int valleyview_get_vco(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
|
||||
|
@ -4113,6 +4229,7 @@ static void valleyview_modeset_global_resources(struct drm_device *dev)
|
|||
|
||||
if (req_cdclk != cur_cdclk)
|
||||
valleyview_set_cdclk(dev, req_cdclk);
|
||||
modeset_update_crtc_power_domains(dev);
|
||||
}
|
||||
|
||||
static void valleyview_crtc_enable(struct drm_crtc *crtc)
|
||||
|
@ -5495,6 +5612,10 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
|
|||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
uint32_t tmp;
|
||||
|
||||
if (!intel_display_power_enabled(dev_priv,
|
||||
POWER_DOMAIN_PIPE(crtc->pipe)))
|
||||
return false;
|
||||
|
||||
pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
|
||||
pipe_config->shared_dpll = DPLL_ID_PRIVATE;
|
||||
|
||||
|
@ -6156,7 +6277,7 @@ int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
|
|||
* is 2.5%; use 5% for safety's sake.
|
||||
*/
|
||||
u32 bps = target_clock * bpp * 21 / 20;
|
||||
return bps / (link_bw * 8) + 1;
|
||||
return DIV_ROUND_UP(bps, link_bw * 8);
|
||||
}
|
||||
|
||||
static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
|
||||
|
@ -6812,105 +6933,9 @@ done:
|
|||
mutex_unlock(&dev_priv->pc8.lock);
|
||||
}
|
||||
|
||||
static void hsw_package_c8_gpu_idle(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
if (!HAS_PC8(dev_priv->dev))
|
||||
return;
|
||||
|
||||
mutex_lock(&dev_priv->pc8.lock);
|
||||
if (!dev_priv->pc8.gpu_idle) {
|
||||
dev_priv->pc8.gpu_idle = true;
|
||||
__hsw_enable_package_c8(dev_priv);
|
||||
}
|
||||
mutex_unlock(&dev_priv->pc8.lock);
|
||||
}
|
||||
|
||||
static void hsw_package_c8_gpu_busy(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
if (!HAS_PC8(dev_priv->dev))
|
||||
return;
|
||||
|
||||
mutex_lock(&dev_priv->pc8.lock);
|
||||
if (dev_priv->pc8.gpu_idle) {
|
||||
dev_priv->pc8.gpu_idle = false;
|
||||
__hsw_disable_package_c8(dev_priv);
|
||||
}
|
||||
mutex_unlock(&dev_priv->pc8.lock);
|
||||
}
|
||||
|
||||
#define for_each_power_domain(domain, mask) \
|
||||
for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \
|
||||
if ((1 << (domain)) & (mask))
|
||||
|
||||
static unsigned long get_pipe_power_domains(struct drm_device *dev,
|
||||
enum pipe pipe, bool pfit_enabled)
|
||||
{
|
||||
unsigned long mask;
|
||||
enum transcoder transcoder;
|
||||
|
||||
transcoder = intel_pipe_to_cpu_transcoder(dev->dev_private, pipe);
|
||||
|
||||
mask = BIT(POWER_DOMAIN_PIPE(pipe));
|
||||
mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
|
||||
if (pfit_enabled)
|
||||
mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
|
||||
|
||||
return mask;
|
||||
}
|
||||
|
||||
void intel_display_set_init_power(struct drm_device *dev, bool enable)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
if (dev_priv->power_domains.init_power_on == enable)
|
||||
return;
|
||||
|
||||
if (enable)
|
||||
intel_display_power_get(dev, POWER_DOMAIN_INIT);
|
||||
else
|
||||
intel_display_power_put(dev, POWER_DOMAIN_INIT);
|
||||
|
||||
dev_priv->power_domains.init_power_on = enable;
|
||||
}
|
||||
|
||||
static void modeset_update_power_wells(struct drm_device *dev)
|
||||
{
|
||||
unsigned long pipe_domains[I915_MAX_PIPES] = { 0, };
|
||||
struct intel_crtc *crtc;
|
||||
|
||||
/*
|
||||
* First get all needed power domains, then put all unneeded, to avoid
|
||||
* any unnecessary toggling of the power wells.
|
||||
*/
|
||||
list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
|
||||
enum intel_display_power_domain domain;
|
||||
|
||||
if (!crtc->base.enabled)
|
||||
continue;
|
||||
|
||||
pipe_domains[crtc->pipe] = get_pipe_power_domains(dev,
|
||||
crtc->pipe,
|
||||
crtc->config.pch_pfit.enabled);
|
||||
|
||||
for_each_power_domain(domain, pipe_domains[crtc->pipe])
|
||||
intel_display_power_get(dev, domain);
|
||||
}
|
||||
|
||||
list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
|
||||
enum intel_display_power_domain domain;
|
||||
|
||||
for_each_power_domain(domain, crtc->enabled_power_domains)
|
||||
intel_display_power_put(dev, domain);
|
||||
|
||||
crtc->enabled_power_domains = pipe_domains[crtc->pipe];
|
||||
}
|
||||
|
||||
intel_display_set_init_power(dev, false);
|
||||
}
|
||||
|
||||
static void haswell_modeset_global_resources(struct drm_device *dev)
|
||||
{
|
||||
modeset_update_power_wells(dev);
|
||||
modeset_update_crtc_power_domains(dev);
|
||||
hsw_update_package_c8(dev);
|
||||
}
|
||||
|
||||
|
@ -6961,6 +6986,10 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
|
|||
enum intel_display_power_domain pfit_domain;
|
||||
uint32_t tmp;
|
||||
|
||||
if (!intel_display_power_enabled(dev_priv,
|
||||
POWER_DOMAIN_PIPE(crtc->pipe)))
|
||||
return false;
|
||||
|
||||
pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
|
||||
pipe_config->shared_dpll = DPLL_ID_PRIVATE;
|
||||
|
||||
|
@ -6986,7 +7015,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
|
|||
pipe_config->cpu_transcoder = TRANSCODER_EDP;
|
||||
}
|
||||
|
||||
if (!intel_display_power_enabled(dev,
|
||||
if (!intel_display_power_enabled(dev_priv,
|
||||
POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
|
||||
return false;
|
||||
|
||||
|
@ -7014,7 +7043,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
|
|||
intel_get_pipe_timings(crtc, pipe_config);
|
||||
|
||||
pfit_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
|
||||
if (intel_display_power_enabled(dev, pfit_domain))
|
||||
if (intel_display_power_enabled(dev_priv, pfit_domain))
|
||||
ironlake_get_pfit_config(crtc, pipe_config);
|
||||
|
||||
if (IS_HASWELL(dev))
|
||||
|
@ -7549,7 +7578,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
|
|||
return -ENOENT;
|
||||
|
||||
if (obj->base.size < width * height * 4) {
|
||||
DRM_ERROR("buffer is to small\n");
|
||||
DRM_DEBUG_KMS("buffer is to small\n");
|
||||
ret = -ENOMEM;
|
||||
goto fail;
|
||||
}
|
||||
|
@ -7560,7 +7589,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
|
|||
unsigned alignment;
|
||||
|
||||
if (obj->tiling_mode) {
|
||||
DRM_ERROR("cursor cannot be tiled\n");
|
||||
DRM_DEBUG_KMS("cursor cannot be tiled\n");
|
||||
ret = -EINVAL;
|
||||
goto fail_locked;
|
||||
}
|
||||
|
@ -7576,13 +7605,13 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
|
|||
|
||||
ret = i915_gem_object_pin_to_display_plane(obj, alignment, NULL);
|
||||
if (ret) {
|
||||
DRM_ERROR("failed to move cursor bo into the GTT\n");
|
||||
DRM_DEBUG_KMS("failed to move cursor bo into the GTT\n");
|
||||
goto fail_locked;
|
||||
}
|
||||
|
||||
ret = i915_gem_object_put_fence(obj);
|
||||
if (ret) {
|
||||
DRM_ERROR("failed to release fence for cursor");
|
||||
DRM_DEBUG_KMS("failed to release fence for cursor");
|
||||
goto fail_unpin;
|
||||
}
|
||||
|
||||
|
@ -7593,7 +7622,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
|
|||
(intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1,
|
||||
align);
|
||||
if (ret) {
|
||||
DRM_ERROR("failed to attach phys object\n");
|
||||
DRM_DEBUG_KMS("failed to attach phys object\n");
|
||||
goto fail_locked;
|
||||
}
|
||||
addr = obj->phys_obj->handle->busaddr;
|
||||
|
@ -7692,7 +7721,7 @@ err:
|
|||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
struct drm_framebuffer *
|
||||
static struct drm_framebuffer *
|
||||
intel_framebuffer_create(struct drm_device *dev,
|
||||
struct drm_mode_fb_cmd2 *mode_cmd,
|
||||
struct drm_i915_gem_object *obj)
|
||||
|
@ -8192,8 +8221,12 @@ void intel_mark_busy(struct drm_device *dev)
|
|||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
hsw_package_c8_gpu_busy(dev_priv);
|
||||
if (dev_priv->mm.busy)
|
||||
return;
|
||||
|
||||
hsw_disable_package_c8(dev_priv);
|
||||
i915_update_gfx_val(dev_priv);
|
||||
dev_priv->mm.busy = true;
|
||||
}
|
||||
|
||||
void intel_mark_idle(struct drm_device *dev)
|
||||
|
@ -8201,10 +8234,13 @@ void intel_mark_idle(struct drm_device *dev)
|
|||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_crtc *crtc;
|
||||
|
||||
hsw_package_c8_gpu_idle(dev_priv);
|
||||
if (!dev_priv->mm.busy)
|
||||
return;
|
||||
|
||||
dev_priv->mm.busy = false;
|
||||
|
||||
if (!i915.powersave)
|
||||
return;
|
||||
goto out;
|
||||
|
||||
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
|
||||
if (!crtc->fb)
|
||||
|
@ -8215,6 +8251,9 @@ void intel_mark_idle(struct drm_device *dev)
|
|||
|
||||
if (INTEL_INFO(dev)->gen >= 6)
|
||||
gen6_rps_idle(dev->dev_private);
|
||||
|
||||
out:
|
||||
hsw_enable_package_c8(dev_priv);
|
||||
}
|
||||
|
||||
void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
|
||||
|
@ -8678,6 +8717,9 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
|
|||
fb->pitches[0] != crtc->fb->pitches[0]))
|
||||
return -EINVAL;
|
||||
|
||||
if (i915_terminally_wedged(&dev_priv->gpu_error))
|
||||
goto out_hang;
|
||||
|
||||
work = kzalloc(sizeof(*work), GFP_KERNEL);
|
||||
if (work == NULL)
|
||||
return -ENOMEM;
|
||||
|
@ -8752,6 +8794,13 @@ cleanup:
|
|||
free_work:
|
||||
kfree(work);
|
||||
|
||||
if (ret == -EIO) {
|
||||
out_hang:
|
||||
intel_crtc_wait_for_pending_flips(crtc);
|
||||
ret = intel_pipe_set_base(crtc, crtc->x, crtc->y, fb);
|
||||
if (ret == 0 && event)
|
||||
drm_send_vblank_event(dev, intel_crtc->pipe, event);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -10555,10 +10604,10 @@ static const struct drm_framebuffer_funcs intel_fb_funcs = {
|
|||
.create_handle = intel_user_framebuffer_create_handle,
|
||||
};
|
||||
|
||||
int intel_framebuffer_init(struct drm_device *dev,
|
||||
struct intel_framebuffer *intel_fb,
|
||||
struct drm_mode_fb_cmd2 *mode_cmd,
|
||||
struct drm_i915_gem_object *obj)
|
||||
static int intel_framebuffer_init(struct drm_device *dev,
|
||||
struct intel_framebuffer *intel_fb,
|
||||
struct drm_mode_fb_cmd2 *mode_cmd,
|
||||
struct drm_i915_gem_object *obj)
|
||||
{
|
||||
int aligned_height;
|
||||
int pitch_limit;
|
||||
|
@ -10996,7 +11045,8 @@ void intel_modeset_suspend_hw(struct drm_device *dev)
|
|||
void intel_modeset_init(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int i, j, ret;
|
||||
int sprite, ret;
|
||||
enum pipe pipe;
|
||||
|
||||
drm_mode_config_init(dev);
|
||||
|
||||
|
@ -11033,13 +11083,13 @@ void intel_modeset_init(struct drm_device *dev)
|
|||
INTEL_INFO(dev)->num_pipes,
|
||||
INTEL_INFO(dev)->num_pipes > 1 ? "s" : "");
|
||||
|
||||
for_each_pipe(i) {
|
||||
intel_crtc_init(dev, i);
|
||||
for (j = 0; j < INTEL_INFO(dev)->num_sprites; j++) {
|
||||
ret = intel_plane_init(dev, i, j);
|
||||
for_each_pipe(pipe) {
|
||||
intel_crtc_init(dev, pipe);
|
||||
for_each_sprite(pipe, sprite) {
|
||||
ret = intel_plane_init(dev, pipe, sprite);
|
||||
if (ret)
|
||||
DRM_DEBUG_KMS("pipe %c sprite %c init failed: %d\n",
|
||||
pipe_name(i), sprite_name(i, j), ret);
|
||||
pipe_name(pipe), sprite_name(pipe, sprite), ret);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -11056,7 +11106,9 @@ void intel_modeset_init(struct drm_device *dev)
|
|||
/* Just in case the BIOS is doing something questionable. */
|
||||
intel_disable_fbc(dev);
|
||||
|
||||
mutex_lock(&dev->mode_config.mutex);
|
||||
intel_modeset_setup_hw_state(dev, false);
|
||||
mutex_unlock(&dev->mode_config.mutex);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -11239,11 +11291,21 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
|
|||
* the crtc fixup. */
|
||||
}
|
||||
|
||||
void i915_redisable_vga(struct drm_device *dev)
|
||||
void i915_redisable_vga_power_on(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 vga_reg = i915_vgacntrl_reg(dev);
|
||||
|
||||
if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
|
||||
DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
|
||||
i915_disable_vga(dev);
|
||||
}
|
||||
}
|
||||
|
||||
void i915_redisable_vga(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
/* This function can be called both from intel_modeset_setup_hw_state or
|
||||
* at a very early point in our resume sequence, where the power well
|
||||
* structures are not yet restored. Since this function is at a very
|
||||
|
@ -11251,14 +11313,10 @@ void i915_redisable_vga(struct drm_device *dev)
|
|||
* level, just check if the power well is enabled instead of trying to
|
||||
* follow the "don't touch the power well if we don't need it" policy
|
||||
* the rest of the driver uses. */
|
||||
if ((IS_HASWELL(dev) || IS_BROADWELL(dev)) &&
|
||||
(I915_READ(HSW_PWR_WELL_DRIVER) & HSW_PWR_WELL_STATE_ENABLED) == 0)
|
||||
if (!intel_display_power_enabled(dev_priv, POWER_DOMAIN_VGA))
|
||||
return;
|
||||
|
||||
if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
|
||||
DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
|
||||
i915_disable_vga(dev);
|
||||
}
|
||||
i915_redisable_vga_power_on(dev);
|
||||
}
|
||||
|
||||
static void intel_modeset_readout_hw_state(struct drm_device *dev)
|
||||
|
@ -11602,7 +11660,8 @@ intel_display_capture_error_state(struct drm_device *dev)
|
|||
|
||||
for_each_pipe(i) {
|
||||
error->pipe[i].power_domain_on =
|
||||
intel_display_power_enabled_sw(dev, POWER_DOMAIN_PIPE(i));
|
||||
intel_display_power_enabled_sw(dev_priv,
|
||||
POWER_DOMAIN_PIPE(i));
|
||||
if (!error->pipe[i].power_domain_on)
|
||||
continue;
|
||||
|
||||
|
@ -11640,7 +11699,7 @@ intel_display_capture_error_state(struct drm_device *dev)
|
|||
enum transcoder cpu_transcoder = transcoders[i];
|
||||
|
||||
error->transcoder[i].power_domain_on =
|
||||
intel_display_power_enabled_sw(dev,
|
||||
intel_display_power_enabled_sw(dev_priv,
|
||||
POWER_DOMAIN_TRANSCODER(cpu_transcoder));
|
||||
if (!error->transcoder[i].power_domain_on)
|
||||
continue;
|
||||
|
|
|
@ -916,8 +916,8 @@ intel_dp_compute_config(struct intel_encoder *encoder,
|
|||
mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
|
||||
bpp);
|
||||
|
||||
for (clock = 0; clock <= max_clock; clock++) {
|
||||
for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
|
||||
for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
|
||||
for (clock = 0; clock <= max_clock; clock++) {
|
||||
link_clock = drm_dp_bw_code_to_link_rate(bws[clock]);
|
||||
link_avail = intel_dp_max_data_rate(link_clock,
|
||||
lane_count);
|
||||
|
@ -1333,7 +1333,8 @@ void intel_edp_panel_off(struct intel_dp *intel_dp)
|
|||
pp = ironlake_get_pp_control(intel_dp);
|
||||
/* We need to switch off panel power _and_ force vdd, for otherwise some
|
||||
* panels get very unhappy and cease to work. */
|
||||
pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_BLC_ENABLE);
|
||||
pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
|
||||
EDP_BLC_ENABLE);
|
||||
|
||||
pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
|
||||
|
||||
|
@ -1485,7 +1486,14 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
|
|||
enum port port = dp_to_dig_port(intel_dp)->port;
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 tmp = I915_READ(intel_dp->output_reg);
|
||||
enum intel_display_power_domain power_domain;
|
||||
u32 tmp;
|
||||
|
||||
power_domain = intel_display_port_power_domain(encoder);
|
||||
if (!intel_display_power_enabled(dev_priv, power_domain))
|
||||
return false;
|
||||
|
||||
tmp = I915_READ(intel_dp->output_reg);
|
||||
|
||||
if (!(tmp & DP_PORT_EN))
|
||||
return false;
|
||||
|
@ -1868,9 +1876,11 @@ static void intel_disable_dp(struct intel_encoder *encoder)
|
|||
|
||||
/* Make sure the panel is off before trying to change the mode. But also
|
||||
* ensure that we have vdd while we switch off the panel. */
|
||||
edp_panel_vdd_on(intel_dp);
|
||||
intel_edp_backlight_off(intel_dp);
|
||||
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
|
||||
intel_edp_panel_off(intel_dp);
|
||||
edp_panel_vdd_off(intel_dp, true);
|
||||
|
||||
/* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */
|
||||
if (!(port == PORT_A || IS_VALLEYVIEW(dev)))
|
||||
|
@ -3224,10 +3234,14 @@ intel_dp_detect(struct drm_connector *connector, bool force)
|
|||
struct drm_device *dev = connector->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
enum drm_connector_status status;
|
||||
enum intel_display_power_domain power_domain;
|
||||
struct edid *edid = NULL;
|
||||
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
|
||||
power_domain = intel_display_port_power_domain(intel_encoder);
|
||||
intel_display_power_get(dev_priv, power_domain);
|
||||
|
||||
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
|
||||
connector->base.id, drm_get_connector_name(connector));
|
||||
|
||||
|
@ -3258,21 +3272,32 @@ intel_dp_detect(struct drm_connector *connector, bool force)
|
|||
status = connector_status_connected;
|
||||
|
||||
out:
|
||||
intel_display_power_put(dev_priv, power_domain);
|
||||
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static int intel_dp_get_modes(struct drm_connector *connector)
|
||||
{
|
||||
struct intel_dp *intel_dp = intel_attached_dp(connector);
|
||||
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
|
||||
struct intel_encoder *intel_encoder = &intel_dig_port->base;
|
||||
struct intel_connector *intel_connector = to_intel_connector(connector);
|
||||
struct drm_device *dev = connector->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
enum intel_display_power_domain power_domain;
|
||||
int ret;
|
||||
|
||||
/* We should parse the EDID data and find out if it has an audio sink
|
||||
*/
|
||||
|
||||
power_domain = intel_display_port_power_domain(intel_encoder);
|
||||
intel_display_power_get(dev_priv, power_domain);
|
||||
|
||||
ret = intel_dp_get_edid_modes(connector, &intel_dp->adapter);
|
||||
intel_display_power_put(dev_priv, power_domain);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -3293,15 +3318,25 @@ static bool
|
|||
intel_dp_detect_audio(struct drm_connector *connector)
|
||||
{
|
||||
struct intel_dp *intel_dp = intel_attached_dp(connector);
|
||||
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
|
||||
struct intel_encoder *intel_encoder = &intel_dig_port->base;
|
||||
struct drm_device *dev = connector->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
enum intel_display_power_domain power_domain;
|
||||
struct edid *edid;
|
||||
bool has_audio = false;
|
||||
|
||||
power_domain = intel_display_port_power_domain(intel_encoder);
|
||||
intel_display_power_get(dev_priv, power_domain);
|
||||
|
||||
edid = intel_dp_get_edid(connector, &intel_dp->adapter);
|
||||
if (edid) {
|
||||
has_audio = drm_detect_monitor_audio(edid);
|
||||
kfree(edid);
|
||||
}
|
||||
|
||||
intel_display_power_put(dev_priv, power_domain);
|
||||
|
||||
return has_audio;
|
||||
}
|
||||
|
||||
|
|
|
@ -609,6 +609,8 @@ hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
|
|||
/* i915_irq.c */
|
||||
bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
|
||||
enum pipe pipe, bool enable);
|
||||
bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
|
||||
enum pipe pipe, bool enable);
|
||||
bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
|
||||
enum transcoder pch_transcoder,
|
||||
bool enable);
|
||||
|
@ -732,7 +734,9 @@ ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config,
|
|||
bool intel_crtc_active(struct drm_crtc *crtc);
|
||||
void hsw_enable_ips(struct intel_crtc *crtc);
|
||||
void hsw_disable_ips(struct intel_crtc *crtc);
|
||||
void intel_display_set_init_power(struct drm_device *dev, bool enable);
|
||||
void intel_display_set_init_power(struct drm_i915_private *dev, bool enable);
|
||||
enum intel_display_power_domain
|
||||
intel_display_port_power_domain(struct intel_encoder *intel_encoder);
|
||||
int valleyview_get_vco(struct drm_i915_private *dev_priv);
|
||||
void intel_mode_from_pipe_config(struct drm_display_mode *mode,
|
||||
struct intel_crtc_config *pipe_config);
|
||||
|
@ -871,18 +875,17 @@ bool intel_fbc_enabled(struct drm_device *dev);
|
|||
void intel_update_fbc(struct drm_device *dev);
|
||||
void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
|
||||
void intel_gpu_ips_teardown(void);
|
||||
int intel_power_domains_init(struct drm_device *dev);
|
||||
void intel_power_domains_remove(struct drm_device *dev);
|
||||
bool intel_display_power_enabled(struct drm_device *dev,
|
||||
int intel_power_domains_init(struct drm_i915_private *);
|
||||
void intel_power_domains_remove(struct drm_i915_private *);
|
||||
bool intel_display_power_enabled(struct drm_i915_private *dev_priv,
|
||||
enum intel_display_power_domain domain);
|
||||
bool intel_display_power_enabled_sw(struct drm_device *dev,
|
||||
bool intel_display_power_enabled_sw(struct drm_i915_private *dev_priv,
|
||||
enum intel_display_power_domain domain);
|
||||
void intel_display_power_get(struct drm_device *dev,
|
||||
void intel_display_power_get(struct drm_i915_private *dev_priv,
|
||||
enum intel_display_power_domain domain);
|
||||
void intel_display_power_put(struct drm_device *dev,
|
||||
void intel_display_power_put(struct drm_i915_private *dev_priv,
|
||||
enum intel_display_power_domain domain);
|
||||
void intel_power_domains_init_hw(struct drm_device *dev);
|
||||
void intel_set_power_well(struct drm_device *dev, bool enable);
|
||||
void intel_power_domains_init_hw(struct drm_i915_private *dev_priv);
|
||||
void intel_enable_gt_powersave(struct drm_device *dev);
|
||||
void intel_disable_gt_powersave(struct drm_device *dev);
|
||||
void ironlake_teardown_rc6(struct drm_device *dev);
|
||||
|
|
|
@ -243,11 +243,16 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
|
|||
enum pipe *pipe)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
|
||||
enum intel_display_power_domain power_domain;
|
||||
u32 port, func;
|
||||
enum pipe p;
|
||||
|
||||
DRM_DEBUG_KMS("\n");
|
||||
|
||||
power_domain = intel_display_port_power_domain(encoder);
|
||||
if (!intel_display_power_enabled(dev_priv, power_domain))
|
||||
return false;
|
||||
|
||||
/* XXX: this only works for one DSI output */
|
||||
for (p = PIPE_A; p <= PIPE_B; p++) {
|
||||
port = I915_READ(MIPI_PORT_CTRL(p));
|
||||
|
@ -488,8 +493,19 @@ static enum drm_connector_status
|
|||
intel_dsi_detect(struct drm_connector *connector, bool force)
|
||||
{
|
||||
struct intel_dsi *intel_dsi = intel_attached_dsi(connector);
|
||||
struct intel_encoder *intel_encoder = &intel_dsi->base;
|
||||
enum intel_display_power_domain power_domain;
|
||||
enum drm_connector_status connector_status;
|
||||
struct drm_i915_private *dev_priv = intel_encoder->base.dev->dev_private;
|
||||
|
||||
DRM_DEBUG_KMS("\n");
|
||||
return intel_dsi->dev.dev_ops->detect(&intel_dsi->dev);
|
||||
power_domain = intel_display_port_power_domain(intel_encoder);
|
||||
|
||||
intel_display_power_get(dev_priv, power_domain);
|
||||
connector_status = intel_dsi->dev.dev_ops->detect(&intel_dsi->dev);
|
||||
intel_display_power_put(dev_priv, power_domain);
|
||||
|
||||
return connector_status;
|
||||
}
|
||||
|
||||
static int intel_dsi_get_modes(struct drm_connector *connector)
|
||||
|
|
|
@ -289,7 +289,27 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
|
|||
struct drm_device *dev = fb_helper->dev;
|
||||
int i, j;
|
||||
bool *save_enabled;
|
||||
bool any_enabled = false;
|
||||
bool fallback = true;
|
||||
int num_connectors_enabled = 0;
|
||||
int num_connectors_detected = 0;
|
||||
|
||||
/*
|
||||
* If the user specified any force options, just bail here
|
||||
* and use that config.
|
||||
*/
|
||||
for (i = 0; i < fb_helper->connector_count; i++) {
|
||||
struct drm_fb_helper_connector *fb_conn;
|
||||
struct drm_connector *connector;
|
||||
|
||||
fb_conn = fb_helper->connector_info[i];
|
||||
connector = fb_conn->connector;
|
||||
|
||||
if (!enabled[i])
|
||||
continue;
|
||||
|
||||
if (connector->force != DRM_FORCE_UNSPECIFIED)
|
||||
return false;
|
||||
}
|
||||
|
||||
save_enabled = kcalloc(dev->mode_config.num_connector, sizeof(bool),
|
||||
GFP_KERNEL);
|
||||
|
@ -306,6 +326,10 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
|
|||
|
||||
fb_conn = fb_helper->connector_info[i];
|
||||
connector = fb_conn->connector;
|
||||
|
||||
if (connector->status == connector_status_connected)
|
||||
num_connectors_detected++;
|
||||
|
||||
if (!enabled[i]) {
|
||||
DRM_DEBUG_KMS("connector %d not enabled, skipping\n",
|
||||
connector->base.id);
|
||||
|
@ -320,6 +344,8 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
|
|||
continue;
|
||||
}
|
||||
|
||||
num_connectors_enabled++;
|
||||
|
||||
new_crtc = intel_fb_helper_crtc(fb_helper, encoder->crtc);
|
||||
|
||||
/*
|
||||
|
@ -329,7 +355,8 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
|
|||
*/
|
||||
for (j = 0; j < fb_helper->connector_count; j++) {
|
||||
if (crtcs[j] == new_crtc) {
|
||||
any_enabled = false;
|
||||
DRM_DEBUG_KMS("fallback: cloned configuration\n");
|
||||
fallback = true;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
@ -372,11 +399,25 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
|
|||
encoder->crtc->base.id,
|
||||
modes[i]->name);
|
||||
|
||||
any_enabled = true;
|
||||
fallback = false;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the BIOS didn't enable everything it could, fall back to have the
|
||||
* same user experiencing of lighting up as much as possible like the
|
||||
* fbdev helper library.
|
||||
*/
|
||||
if (num_connectors_enabled != num_connectors_detected &&
|
||||
num_connectors_enabled < INTEL_INFO(dev)->num_pipes) {
|
||||
DRM_DEBUG_KMS("fallback: Not all outputs enabled\n");
|
||||
DRM_DEBUG_KMS("Enabled: %i, detected: %i\n", num_connectors_enabled,
|
||||
num_connectors_detected);
|
||||
fallback = true;
|
||||
}
|
||||
|
||||
out:
|
||||
if (!any_enabled) {
|
||||
if (fallback) {
|
||||
DRM_DEBUG_KMS("Not using firmware configuration\n");
|
||||
memcpy(enabled, save_enabled, dev->mode_config.num_connector);
|
||||
kfree(save_enabled);
|
||||
return false;
|
||||
|
|
|
@ -667,8 +667,13 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
|
|||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
|
||||
enum intel_display_power_domain power_domain;
|
||||
u32 tmp;
|
||||
|
||||
power_domain = intel_display_port_power_domain(encoder);
|
||||
if (!intel_display_power_enabled(dev_priv, power_domain))
|
||||
return false;
|
||||
|
||||
tmp = I915_READ(intel_hdmi->hdmi_reg);
|
||||
|
||||
if (!(tmp & SDVO_ENABLE))
|
||||
|
@ -909,11 +914,15 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
|
|||
struct intel_encoder *intel_encoder = &intel_dig_port->base;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct edid *edid;
|
||||
enum intel_display_power_domain power_domain;
|
||||
enum drm_connector_status status = connector_status_disconnected;
|
||||
|
||||
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
|
||||
connector->base.id, drm_get_connector_name(connector));
|
||||
|
||||
power_domain = intel_display_port_power_domain(intel_encoder);
|
||||
intel_display_power_get(dev_priv, power_domain);
|
||||
|
||||
intel_hdmi->has_hdmi_sink = false;
|
||||
intel_hdmi->has_audio = false;
|
||||
intel_hdmi->rgb_quant_range_selectable = false;
|
||||
|
@ -941,31 +950,48 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
|
|||
intel_encoder->type = INTEL_OUTPUT_HDMI;
|
||||
}
|
||||
|
||||
intel_display_power_put(dev_priv, power_domain);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static int intel_hdmi_get_modes(struct drm_connector *connector)
|
||||
{
|
||||
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
|
||||
struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
|
||||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
|
||||
struct drm_i915_private *dev_priv = connector->dev->dev_private;
|
||||
enum intel_display_power_domain power_domain;
|
||||
int ret;
|
||||
|
||||
/* We should parse the EDID data and find out if it's an HDMI sink so
|
||||
* we can send audio to it.
|
||||
*/
|
||||
|
||||
return intel_ddc_get_modes(connector,
|
||||
power_domain = intel_display_port_power_domain(intel_encoder);
|
||||
intel_display_power_get(dev_priv, power_domain);
|
||||
|
||||
ret = intel_ddc_get_modes(connector,
|
||||
intel_gmbus_get_adapter(dev_priv,
|
||||
intel_hdmi->ddc_bus));
|
||||
|
||||
intel_display_power_put(dev_priv, power_domain);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool
|
||||
intel_hdmi_detect_audio(struct drm_connector *connector)
|
||||
{
|
||||
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
|
||||
struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
|
||||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
|
||||
struct drm_i915_private *dev_priv = connector->dev->dev_private;
|
||||
enum intel_display_power_domain power_domain;
|
||||
struct edid *edid;
|
||||
bool has_audio = false;
|
||||
|
||||
power_domain = intel_display_port_power_domain(intel_encoder);
|
||||
intel_display_power_get(dev_priv, power_domain);
|
||||
|
||||
edid = drm_get_edid(connector,
|
||||
intel_gmbus_get_adapter(dev_priv,
|
||||
intel_hdmi->ddc_bus));
|
||||
|
@ -975,6 +1001,8 @@ intel_hdmi_detect_audio(struct drm_connector *connector)
|
|||
kfree(edid);
|
||||
}
|
||||
|
||||
intel_display_power_put(dev_priv, power_domain);
|
||||
|
||||
return has_audio;
|
||||
}
|
||||
|
||||
|
|
|
@ -1076,7 +1076,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
|
|||
mutex_lock(&dev->struct_mutex);
|
||||
|
||||
if (new_bo->tiling_mode) {
|
||||
DRM_ERROR("buffer used for overlay image can not be tiled\n");
|
||||
DRM_DEBUG_KMS("buffer used for overlay image can not be tiled\n");
|
||||
ret = -EINVAL;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
|
|
@ -294,11 +294,14 @@ static void gen7_enable_fbc(struct drm_crtc *crtc)
|
|||
|
||||
if (IS_IVYBRIDGE(dev)) {
|
||||
/* WaFbcAsynchFlipDisableFbcQueue:ivb */
|
||||
I915_WRITE(ILK_DISPLAY_CHICKEN1, ILK_FBCQ_DIS);
|
||||
I915_WRITE(ILK_DISPLAY_CHICKEN1,
|
||||
I915_READ(ILK_DISPLAY_CHICKEN1) |
|
||||
ILK_FBCQ_DIS);
|
||||
} else {
|
||||
/* WaFbcAsynchFlipDisableFbcQueue:hsw */
|
||||
I915_WRITE(HSW_PIPE_SLICE_CHICKEN_1(intel_crtc->pipe),
|
||||
HSW_BYPASS_FBC_QUEUE);
|
||||
/* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
|
||||
I915_WRITE(CHICKEN_PIPESL_1(intel_crtc->pipe),
|
||||
I915_READ(CHICKEN_PIPESL_1(intel_crtc->pipe)) |
|
||||
HSW_FBCQ_DIS);
|
||||
}
|
||||
|
||||
I915_WRITE(SNB_DPFC_CTL_SA,
|
||||
|
@ -540,7 +543,7 @@ void intel_update_fbc(struct drm_device *dev)
|
|||
DRM_DEBUG_KMS("mode too large for compression, disabling\n");
|
||||
goto out_disable;
|
||||
}
|
||||
if ((INTEL_INFO(dev)->gen < 4 || IS_HASWELL(dev)) &&
|
||||
if ((INTEL_INFO(dev)->gen < 4 || HAS_DDI(dev)) &&
|
||||
intel_crtc->plane != PLANE_A) {
|
||||
if (set_no_fbc_reason(dev_priv, FBC_BAD_PLANE))
|
||||
DRM_DEBUG_KMS("plane not A, disabling compression\n");
|
||||
|
@ -1131,7 +1134,7 @@ static bool g4x_compute_wm0(struct drm_device *dev,
|
|||
*plane_wm = display->max_wm;
|
||||
|
||||
/* Use the large buffer method to calculate cursor watermark */
|
||||
line_time_us = ((htotal * 1000) / clock);
|
||||
line_time_us = max(htotal * 1000 / clock, 1);
|
||||
line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
|
||||
entries = line_count * 64 * pixel_size;
|
||||
tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
|
||||
|
@ -1207,7 +1210,7 @@ static bool g4x_compute_srwm(struct drm_device *dev,
|
|||
hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
|
||||
pixel_size = crtc->fb->bits_per_pixel / 8;
|
||||
|
||||
line_time_us = (htotal * 1000) / clock;
|
||||
line_time_us = max(htotal * 1000 / clock, 1);
|
||||
line_count = (latency_ns / line_time_us + 1000) / 1000;
|
||||
line_size = hdisplay * pixel_size;
|
||||
|
||||
|
@ -1440,7 +1443,7 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
|
|||
unsigned long line_time_us;
|
||||
int entries;
|
||||
|
||||
line_time_us = ((htotal * 1000) / clock);
|
||||
line_time_us = max(htotal * 1000 / clock, 1);
|
||||
|
||||
/* Use ns/us then divide to preserve precision */
|
||||
entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
|
||||
|
@ -1566,7 +1569,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
|
|||
unsigned long line_time_us;
|
||||
int entries;
|
||||
|
||||
line_time_us = (htotal * 1000) / clock;
|
||||
line_time_us = max(htotal * 1000 / clock, 1);
|
||||
|
||||
/* Use ns/us then divide to preserve precision */
|
||||
entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
|
||||
|
@ -4661,6 +4664,17 @@ static void gen6_init_clock_gating(struct drm_device *dev)
|
|||
I915_WRITE(GEN6_GT_MODE,
|
||||
_MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE));
|
||||
|
||||
/*
|
||||
* BSpec recoomends 8x4 when MSAA is used,
|
||||
* however in practice 16x4 seems fastest.
|
||||
*
|
||||
* Note that PS/WM thread counts depend on the WIZ hashing
|
||||
* disable bit, which we don't touch here, but it's good
|
||||
* to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
|
||||
*/
|
||||
I915_WRITE(GEN6_GT_MODE,
|
||||
GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
|
||||
|
||||
ilk_init_lp_watermarks(dev);
|
||||
|
||||
I915_WRITE(CACHE_MODE_0,
|
||||
|
@ -4688,9 +4702,9 @@ static void gen6_init_clock_gating(struct drm_device *dev)
|
|||
GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
|
||||
GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
|
||||
|
||||
/* Bspec says we need to always set all mask bits. */
|
||||
I915_WRITE(_3D_CHICKEN3, (0xFFFF << 16) |
|
||||
_3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL);
|
||||
/* WaStripsFansDisableFastClipPerformanceFix:snb */
|
||||
I915_WRITE(_3D_CHICKEN3,
|
||||
_MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL));
|
||||
|
||||
/*
|
||||
* Bspec says:
|
||||
|
@ -4724,11 +4738,6 @@ static void gen6_init_clock_gating(struct drm_device *dev)
|
|||
|
||||
g4x_disable_trickle_feed(dev);
|
||||
|
||||
/* The default value should be 0x200 according to docs, but the two
|
||||
* platforms I checked have a 0 for this. (Maybe BIOS overrides?) */
|
||||
I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_DISABLE(0xffff));
|
||||
I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_ENABLE(GEN6_GT_MODE_HI));
|
||||
|
||||
cpt_init_clock_gating(dev);
|
||||
|
||||
gen6_check_mch_setup(dev);
|
||||
|
@ -4786,7 +4795,7 @@ static void lpt_suspend_hw(struct drm_device *dev)
|
|||
static void gen8_init_clock_gating(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
enum pipe i;
|
||||
enum pipe pipe;
|
||||
|
||||
I915_WRITE(WM3_LP_ILK, 0);
|
||||
I915_WRITE(WM2_LP_ILK, 0);
|
||||
|
@ -4795,6 +4804,15 @@ static void gen8_init_clock_gating(struct drm_device *dev)
|
|||
/* FIXME(BDW): Check all the w/a, some might only apply to
|
||||
* pre-production hw. */
|
||||
|
||||
/* WaDisablePartialInstShootdown:bdw */
|
||||
I915_WRITE(GEN8_ROW_CHICKEN,
|
||||
_MASKED_BIT_ENABLE(PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE));
|
||||
|
||||
/* WaDisableThreadStallDopClockGating:bdw */
|
||||
/* FIXME: Unclear whether we really need this on production bdw. */
|
||||
I915_WRITE(GEN8_ROW_CHICKEN,
|
||||
_MASKED_BIT_ENABLE(STALL_DOP_GATING_DISABLE));
|
||||
|
||||
/*
|
||||
* This GEN8_CENTROID_PIXEL_OPT_DIS W/A is only needed for
|
||||
* pre-production hardware
|
||||
|
@ -4822,10 +4840,10 @@ static void gen8_init_clock_gating(struct drm_device *dev)
|
|||
I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
|
||||
|
||||
/* WaPsrDPRSUnmaskVBlankInSRD:bdw */
|
||||
for_each_pipe(i) {
|
||||
I915_WRITE(CHICKEN_PIPESL_1(i),
|
||||
I915_READ(CHICKEN_PIPESL_1(i) |
|
||||
DPRS_MASK_VBLANK_SRD));
|
||||
for_each_pipe(pipe) {
|
||||
I915_WRITE(CHICKEN_PIPESL_1(pipe),
|
||||
I915_READ(CHICKEN_PIPESL_1(pipe)) |
|
||||
BDW_DPRS_MASK_VBLANK_SRD);
|
||||
}
|
||||
|
||||
/* Use Force Non-Coherent whenever executing a 3D context. This is a
|
||||
|
@ -4841,6 +4859,24 @@ static void gen8_init_clock_gating(struct drm_device *dev)
|
|||
I915_WRITE(GEN7_FF_THREAD_MODE,
|
||||
I915_READ(GEN7_FF_THREAD_MODE) &
|
||||
~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
|
||||
|
||||
/*
|
||||
* BSpec recommends 8x4 when MSAA is used,
|
||||
* however in practice 16x4 seems fastest.
|
||||
*
|
||||
* Note that PS/WM thread counts depend on the WIZ hashing
|
||||
* disable bit, which we don't touch here, but it's good
|
||||
* to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
|
||||
*/
|
||||
I915_WRITE(GEN7_GT_MODE,
|
||||
GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
|
||||
|
||||
I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
|
||||
_MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
|
||||
|
||||
/* WaDisableSDEUnitClockGating:bdw */
|
||||
I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
|
||||
GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
|
||||
}
|
||||
|
||||
static void haswell_init_clock_gating(struct drm_device *dev)
|
||||
|
@ -4871,6 +4907,17 @@ static void haswell_init_clock_gating(struct drm_device *dev)
|
|||
I915_WRITE(CACHE_MODE_1,
|
||||
_MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
|
||||
|
||||
/*
|
||||
* BSpec recommends 8x4 when MSAA is used,
|
||||
* however in practice 16x4 seems fastest.
|
||||
*
|
||||
* Note that PS/WM thread counts depend on the WIZ hashing
|
||||
* disable bit, which we don't touch here, but it's good
|
||||
* to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
|
||||
*/
|
||||
I915_WRITE(GEN7_GT_MODE,
|
||||
GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
|
||||
|
||||
/* WaSwitchSolVfFArbitrationPriority:hsw */
|
||||
I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
|
||||
|
||||
|
@ -4944,14 +4991,27 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
|
|||
|
||||
gen7_setup_fixed_func_scheduler(dev_priv);
|
||||
|
||||
/* enable HiZ Raw Stall Optimization */
|
||||
I915_WRITE(CACHE_MODE_0_GEN7,
|
||||
_MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
|
||||
if (0) { /* causes HiZ corruption on ivb:gt1 */
|
||||
/* enable HiZ Raw Stall Optimization */
|
||||
I915_WRITE(CACHE_MODE_0_GEN7,
|
||||
_MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
|
||||
}
|
||||
|
||||
/* WaDisable4x2SubspanOptimization:ivb */
|
||||
I915_WRITE(CACHE_MODE_1,
|
||||
_MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
|
||||
|
||||
/*
|
||||
* BSpec recommends 8x4 when MSAA is used,
|
||||
* however in practice 16x4 seems fastest.
|
||||
*
|
||||
* Note that PS/WM thread counts depend on the WIZ hashing
|
||||
* disable bit, which we don't touch here, but it's good
|
||||
* to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
|
||||
*/
|
||||
I915_WRITE(GEN7_GT_MODE,
|
||||
GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
|
||||
|
||||
snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
|
||||
snpcr &= ~GEN6_MBC_SNPCR_MASK;
|
||||
snpcr |= GEN6_MBC_SNPCR_MED;
|
||||
|
@ -5004,9 +5064,6 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
|
|||
_MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP |
|
||||
GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
|
||||
|
||||
/* WaDisableL3CacheAging:vlv */
|
||||
I915_WRITE(GEN7_L3CNTLREG1, I915_READ(GEN7_L3CNTLREG1) | GEN7_L3AGDIS);
|
||||
|
||||
/* WaForceL3Serialization:vlv */
|
||||
I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
|
||||
~L3SQ_URB_READ_CAM_MATCH_DISABLE);
|
||||
|
@ -5167,19 +5224,16 @@ void intel_suspend_hw(struct drm_device *dev)
|
|||
* enable it, so check if it's enabled and also check if we've requested it to
|
||||
* be enabled.
|
||||
*/
|
||||
static bool hsw_power_well_enabled(struct drm_device *dev,
|
||||
static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
|
||||
struct i915_power_well *power_well)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
return I915_READ(HSW_PWR_WELL_DRIVER) ==
|
||||
(HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
|
||||
}
|
||||
|
||||
bool intel_display_power_enabled_sw(struct drm_device *dev,
|
||||
bool intel_display_power_enabled_sw(struct drm_i915_private *dev_priv,
|
||||
enum intel_display_power_domain domain)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct i915_power_domains *power_domains;
|
||||
|
||||
power_domains = &dev_priv->power_domains;
|
||||
|
@ -5187,10 +5241,9 @@ bool intel_display_power_enabled_sw(struct drm_device *dev,
|
|||
return power_domains->domain_use_count[domain];
|
||||
}
|
||||
|
||||
bool intel_display_power_enabled(struct drm_device *dev,
|
||||
bool intel_display_power_enabled(struct drm_i915_private *dev_priv,
|
||||
enum intel_display_power_domain domain)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct i915_power_domains *power_domains;
|
||||
struct i915_power_well *power_well;
|
||||
bool is_enabled;
|
||||
|
@ -5205,7 +5258,7 @@ bool intel_display_power_enabled(struct drm_device *dev,
|
|||
if (power_well->always_on)
|
||||
continue;
|
||||
|
||||
if (!power_well->is_enabled(dev, power_well)) {
|
||||
if (!power_well->ops->is_enabled(dev_priv, power_well)) {
|
||||
is_enabled = false;
|
||||
break;
|
||||
}
|
||||
|
@ -5215,6 +5268,12 @@ bool intel_display_power_enabled(struct drm_device *dev,
|
|||
return is_enabled;
|
||||
}
|
||||
|
||||
/*
|
||||
* Starting with Haswell, we have a "Power Down Well" that can be turned off
|
||||
* when not needed anymore. We have 4 registers that can request the power well
|
||||
* to be enabled, and it will only be disabled if none of the registers is
|
||||
* requesting it to be enabled.
|
||||
*/
|
||||
static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
|
@ -5251,10 +5310,17 @@ static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
|
|||
}
|
||||
}
|
||||
|
||||
static void reset_vblank_counter(struct drm_device *dev, enum pipe pipe)
|
||||
{
|
||||
assert_spin_locked(&dev->vbl_lock);
|
||||
|
||||
dev->vblank[pipe].last = 0;
|
||||
}
|
||||
|
||||
static void hsw_power_well_post_disable(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
enum pipe p;
|
||||
enum pipe pipe;
|
||||
unsigned long irqflags;
|
||||
|
||||
/*
|
||||
|
@ -5265,16 +5331,15 @@ static void hsw_power_well_post_disable(struct drm_i915_private *dev_priv)
|
|||
* FIXME: Should we do this in general in drm_vblank_post_modeset?
|
||||
*/
|
||||
spin_lock_irqsave(&dev->vbl_lock, irqflags);
|
||||
for_each_pipe(p)
|
||||
if (p != PIPE_A)
|
||||
dev->vblank[p].last = 0;
|
||||
for_each_pipe(pipe)
|
||||
if (pipe != PIPE_A)
|
||||
reset_vblank_counter(dev, pipe);
|
||||
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
|
||||
}
|
||||
|
||||
static void hsw_set_power_well(struct drm_device *dev,
|
||||
static void hsw_set_power_well(struct drm_i915_private *dev_priv,
|
||||
struct i915_power_well *power_well, bool enable)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
bool is_enabled, enable_requested;
|
||||
uint32_t tmp;
|
||||
|
||||
|
@ -5308,35 +5373,204 @@ static void hsw_set_power_well(struct drm_device *dev,
|
|||
}
|
||||
}
|
||||
|
||||
static void __intel_power_well_get(struct drm_device *dev,
|
||||
static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
|
||||
struct i915_power_well *power_well)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
hsw_set_power_well(dev_priv, power_well, power_well->count > 0);
|
||||
|
||||
if (!power_well->count++ && power_well->set) {
|
||||
hsw_disable_package_c8(dev_priv);
|
||||
power_well->set(dev, power_well, true);
|
||||
}
|
||||
/*
|
||||
* We're taking over the BIOS, so clear any requests made by it since
|
||||
* the driver is in charge now.
|
||||
*/
|
||||
if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST)
|
||||
I915_WRITE(HSW_PWR_WELL_BIOS, 0);
|
||||
}
|
||||
|
||||
static void __intel_power_well_put(struct drm_device *dev,
|
||||
static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
|
||||
struct i915_power_well *power_well)
|
||||
{
|
||||
hsw_disable_package_c8(dev_priv);
|
||||
hsw_set_power_well(dev_priv, power_well, true);
|
||||
}
|
||||
|
||||
static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
|
||||
struct i915_power_well *power_well)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
WARN_ON(!power_well->count);
|
||||
|
||||
if (!--power_well->count && power_well->set &&
|
||||
i915.disable_power_well) {
|
||||
power_well->set(dev, power_well, false);
|
||||
hsw_enable_package_c8(dev_priv);
|
||||
}
|
||||
hsw_set_power_well(dev_priv, power_well, false);
|
||||
hsw_enable_package_c8(dev_priv);
|
||||
}
|
||||
|
||||
void intel_display_power_get(struct drm_device *dev,
|
||||
static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
|
||||
struct i915_power_well *power_well)
|
||||
{
|
||||
}
|
||||
|
||||
static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
|
||||
struct i915_power_well *power_well)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
static void vlv_set_power_well(struct drm_i915_private *dev_priv,
|
||||
struct i915_power_well *power_well, bool enable)
|
||||
{
|
||||
enum punit_power_well power_well_id = power_well->data;
|
||||
u32 mask;
|
||||
u32 state;
|
||||
u32 ctrl;
|
||||
|
||||
mask = PUNIT_PWRGT_MASK(power_well_id);
|
||||
state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
|
||||
PUNIT_PWRGT_PWR_GATE(power_well_id);
|
||||
|
||||
mutex_lock(&dev_priv->rps.hw_lock);
|
||||
|
||||
#define COND \
|
||||
((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
|
||||
|
||||
if (COND)
|
||||
goto out;
|
||||
|
||||
ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
|
||||
ctrl &= ~mask;
|
||||
ctrl |= state;
|
||||
vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
|
||||
|
||||
if (wait_for(COND, 100))
|
||||
DRM_ERROR("timout setting power well state %08x (%08x)\n",
|
||||
state,
|
||||
vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
|
||||
|
||||
#undef COND
|
||||
|
||||
out:
|
||||
mutex_unlock(&dev_priv->rps.hw_lock);
|
||||
}
|
||||
|
||||
static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv,
|
||||
struct i915_power_well *power_well)
|
||||
{
|
||||
vlv_set_power_well(dev_priv, power_well, power_well->count > 0);
|
||||
}
|
||||
|
||||
static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
|
||||
struct i915_power_well *power_well)
|
||||
{
|
||||
vlv_set_power_well(dev_priv, power_well, true);
|
||||
}
|
||||
|
||||
static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
|
||||
struct i915_power_well *power_well)
|
||||
{
|
||||
vlv_set_power_well(dev_priv, power_well, false);
|
||||
}
|
||||
|
||||
static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
|
||||
struct i915_power_well *power_well)
|
||||
{
|
||||
int power_well_id = power_well->data;
|
||||
bool enabled = false;
|
||||
u32 mask;
|
||||
u32 state;
|
||||
u32 ctrl;
|
||||
|
||||
mask = PUNIT_PWRGT_MASK(power_well_id);
|
||||
ctrl = PUNIT_PWRGT_PWR_ON(power_well_id);
|
||||
|
||||
mutex_lock(&dev_priv->rps.hw_lock);
|
||||
|
||||
state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
|
||||
/*
|
||||
* We only ever set the power-on and power-gate states, anything
|
||||
* else is unexpected.
|
||||
*/
|
||||
WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) &&
|
||||
state != PUNIT_PWRGT_PWR_GATE(power_well_id));
|
||||
if (state == ctrl)
|
||||
enabled = true;
|
||||
|
||||
/*
|
||||
* A transient state at this point would mean some unexpected party
|
||||
* is poking at the power controls too.
|
||||
*/
|
||||
ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
|
||||
WARN_ON(ctrl != state);
|
||||
|
||||
mutex_unlock(&dev_priv->rps.hw_lock);
|
||||
|
||||
return enabled;
|
||||
}
|
||||
|
||||
static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
|
||||
struct i915_power_well *power_well)
|
||||
{
|
||||
WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
|
||||
|
||||
vlv_set_power_well(dev_priv, power_well, true);
|
||||
|
||||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
valleyview_enable_display_irqs(dev_priv);
|
||||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
|
||||
/*
|
||||
* During driver initialization we need to defer enabling hotplug
|
||||
* processing until fbdev is set up.
|
||||
*/
|
||||
if (dev_priv->enable_hotplug_processing)
|
||||
intel_hpd_init(dev_priv->dev);
|
||||
|
||||
i915_redisable_vga_power_on(dev_priv->dev);
|
||||
}
|
||||
|
||||
static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
|
||||
struct i915_power_well *power_well)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
enum pipe pipe;
|
||||
|
||||
WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
|
||||
|
||||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
for_each_pipe(pipe)
|
||||
__intel_set_cpu_fifo_underrun_reporting(dev, pipe, false);
|
||||
|
||||
valleyview_disable_display_irqs(dev_priv);
|
||||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
|
||||
spin_lock_irq(&dev->vbl_lock);
|
||||
for_each_pipe(pipe)
|
||||
reset_vblank_counter(dev, pipe);
|
||||
spin_unlock_irq(&dev->vbl_lock);
|
||||
|
||||
vlv_set_power_well(dev_priv, power_well, false);
|
||||
}
|
||||
|
||||
static void check_power_well_state(struct drm_i915_private *dev_priv,
|
||||
struct i915_power_well *power_well)
|
||||
{
|
||||
bool enabled = power_well->ops->is_enabled(dev_priv, power_well);
|
||||
|
||||
if (power_well->always_on || !i915.disable_power_well) {
|
||||
if (!enabled)
|
||||
goto mismatch;
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
if (enabled != (power_well->count > 0))
|
||||
goto mismatch;
|
||||
|
||||
return;
|
||||
|
||||
mismatch:
|
||||
WARN(1, "state mismatch for '%s' (always_on %d hw state %d use-count %d disable_power_well %d\n",
|
||||
power_well->name, power_well->always_on, enabled,
|
||||
power_well->count, i915.disable_power_well);
|
||||
}
|
||||
|
||||
void intel_display_power_get(struct drm_i915_private *dev_priv,
|
||||
enum intel_display_power_domain domain)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct i915_power_domains *power_domains;
|
||||
struct i915_power_well *power_well;
|
||||
int i;
|
||||
|
@ -5345,18 +5579,23 @@ void intel_display_power_get(struct drm_device *dev,
|
|||
|
||||
mutex_lock(&power_domains->lock);
|
||||
|
||||
for_each_power_well(i, power_well, BIT(domain), power_domains)
|
||||
__intel_power_well_get(dev, power_well);
|
||||
for_each_power_well(i, power_well, BIT(domain), power_domains) {
|
||||
if (!power_well->count++) {
|
||||
DRM_DEBUG_KMS("enabling %s\n", power_well->name);
|
||||
power_well->ops->enable(dev_priv, power_well);
|
||||
}
|
||||
|
||||
check_power_well_state(dev_priv, power_well);
|
||||
}
|
||||
|
||||
power_domains->domain_use_count[domain]++;
|
||||
|
||||
mutex_unlock(&power_domains->lock);
|
||||
}
|
||||
|
||||
void intel_display_power_put(struct drm_device *dev,
|
||||
void intel_display_power_put(struct drm_i915_private *dev_priv,
|
||||
enum intel_display_power_domain domain)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct i915_power_domains *power_domains;
|
||||
struct i915_power_well *power_well;
|
||||
int i;
|
||||
|
@ -5368,8 +5607,16 @@ void intel_display_power_put(struct drm_device *dev,
|
|||
WARN_ON(!power_domains->domain_use_count[domain]);
|
||||
power_domains->domain_use_count[domain]--;
|
||||
|
||||
for_each_power_well_rev(i, power_well, BIT(domain), power_domains)
|
||||
__intel_power_well_put(dev, power_well);
|
||||
for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
|
||||
WARN_ON(!power_well->count);
|
||||
|
||||
if (!--power_well->count && i915.disable_power_well) {
|
||||
DRM_DEBUG_KMS("disabling %s\n", power_well->name);
|
||||
power_well->ops->disable(dev_priv, power_well);
|
||||
}
|
||||
|
||||
check_power_well_state(dev_priv, power_well);
|
||||
}
|
||||
|
||||
mutex_unlock(&power_domains->lock);
|
||||
}
|
||||
|
@ -5386,7 +5633,7 @@ void i915_request_power_well(void)
|
|||
|
||||
dev_priv = container_of(hsw_pwr, struct drm_i915_private,
|
||||
power_domains);
|
||||
intel_display_power_get(dev_priv->dev, POWER_DOMAIN_AUDIO);
|
||||
intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(i915_request_power_well);
|
||||
|
||||
|
@ -5400,29 +5647,99 @@ void i915_release_power_well(void)
|
|||
|
||||
dev_priv = container_of(hsw_pwr, struct drm_i915_private,
|
||||
power_domains);
|
||||
intel_display_power_put(dev_priv->dev, POWER_DOMAIN_AUDIO);
|
||||
intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(i915_release_power_well);
|
||||
|
||||
#define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
|
||||
|
||||
#define HSW_ALWAYS_ON_POWER_DOMAINS ( \
|
||||
BIT(POWER_DOMAIN_PIPE_A) | \
|
||||
BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
|
||||
BIT(POWER_DOMAIN_PORT_CRT) | \
|
||||
BIT(POWER_DOMAIN_INIT))
|
||||
#define HSW_DISPLAY_POWER_DOMAINS ( \
|
||||
(POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) | \
|
||||
BIT(POWER_DOMAIN_INIT))
|
||||
|
||||
#define BDW_ALWAYS_ON_POWER_DOMAINS ( \
|
||||
HSW_ALWAYS_ON_POWER_DOMAINS | \
|
||||
BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER))
|
||||
#define BDW_DISPLAY_POWER_DOMAINS ( \
|
||||
(POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS) | \
|
||||
BIT(POWER_DOMAIN_INIT))
|
||||
|
||||
#define VLV_ALWAYS_ON_POWER_DOMAINS BIT(POWER_DOMAIN_INIT)
|
||||
#define VLV_DISPLAY_POWER_DOMAINS POWER_DOMAIN_MASK
|
||||
|
||||
#define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
|
||||
BIT(POWER_DOMAIN_PORT_CRT) | \
|
||||
BIT(POWER_DOMAIN_INIT))
|
||||
|
||||
#define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
|
||||
BIT(POWER_DOMAIN_INIT))
|
||||
|
||||
#define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
|
||||
BIT(POWER_DOMAIN_INIT))
|
||||
|
||||
#define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
|
||||
BIT(POWER_DOMAIN_INIT))
|
||||
|
||||
#define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
|
||||
BIT(POWER_DOMAIN_INIT))
|
||||
|
||||
static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
|
||||
.sync_hw = i9xx_always_on_power_well_noop,
|
||||
.enable = i9xx_always_on_power_well_noop,
|
||||
.disable = i9xx_always_on_power_well_noop,
|
||||
.is_enabled = i9xx_always_on_power_well_enabled,
|
||||
};
|
||||
|
||||
static struct i915_power_well i9xx_always_on_power_well[] = {
|
||||
{
|
||||
.name = "always-on",
|
||||
.always_on = 1,
|
||||
.domains = POWER_DOMAIN_MASK,
|
||||
.ops = &i9xx_always_on_power_well_ops,
|
||||
},
|
||||
};
|
||||
|
||||
static const struct i915_power_well_ops hsw_power_well_ops = {
|
||||
.sync_hw = hsw_power_well_sync_hw,
|
||||
.enable = hsw_power_well_enable,
|
||||
.disable = hsw_power_well_disable,
|
||||
.is_enabled = hsw_power_well_enabled,
|
||||
};
|
||||
|
||||
static struct i915_power_well hsw_power_wells[] = {
|
||||
{
|
||||
.name = "always-on",
|
||||
.always_on = 1,
|
||||
.domains = HSW_ALWAYS_ON_POWER_DOMAINS,
|
||||
.ops = &i9xx_always_on_power_well_ops,
|
||||
},
|
||||
{
|
||||
.name = "display",
|
||||
.domains = POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS,
|
||||
.is_enabled = hsw_power_well_enabled,
|
||||
.set = hsw_set_power_well,
|
||||
.domains = HSW_DISPLAY_POWER_DOMAINS,
|
||||
.ops = &hsw_power_well_ops,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -5431,12 +5748,83 @@ static struct i915_power_well bdw_power_wells[] = {
|
|||
.name = "always-on",
|
||||
.always_on = 1,
|
||||
.domains = BDW_ALWAYS_ON_POWER_DOMAINS,
|
||||
.ops = &i9xx_always_on_power_well_ops,
|
||||
},
|
||||
{
|
||||
.name = "display",
|
||||
.domains = POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS,
|
||||
.is_enabled = hsw_power_well_enabled,
|
||||
.set = hsw_set_power_well,
|
||||
.domains = BDW_DISPLAY_POWER_DOMAINS,
|
||||
.ops = &hsw_power_well_ops,
|
||||
},
|
||||
};
|
||||
|
||||
static const struct i915_power_well_ops vlv_display_power_well_ops = {
|
||||
.sync_hw = vlv_power_well_sync_hw,
|
||||
.enable = vlv_display_power_well_enable,
|
||||
.disable = vlv_display_power_well_disable,
|
||||
.is_enabled = vlv_power_well_enabled,
|
||||
};
|
||||
|
||||
static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
|
||||
.sync_hw = vlv_power_well_sync_hw,
|
||||
.enable = vlv_power_well_enable,
|
||||
.disable = vlv_power_well_disable,
|
||||
.is_enabled = vlv_power_well_enabled,
|
||||
};
|
||||
|
||||
static struct i915_power_well vlv_power_wells[] = {
|
||||
{
|
||||
.name = "always-on",
|
||||
.always_on = 1,
|
||||
.domains = VLV_ALWAYS_ON_POWER_DOMAINS,
|
||||
.ops = &i9xx_always_on_power_well_ops,
|
||||
},
|
||||
{
|
||||
.name = "display",
|
||||
.domains = VLV_DISPLAY_POWER_DOMAINS,
|
||||
.data = PUNIT_POWER_WELL_DISP2D,
|
||||
.ops = &vlv_display_power_well_ops,
|
||||
},
|
||||
{
|
||||
.name = "dpio-common",
|
||||
.domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
|
||||
.data = PUNIT_POWER_WELL_DPIO_CMN_BC,
|
||||
.ops = &vlv_dpio_power_well_ops,
|
||||
},
|
||||
{
|
||||
.name = "dpio-tx-b-01",
|
||||
.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
|
||||
VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
|
||||
VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
|
||||
VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
|
||||
.ops = &vlv_dpio_power_well_ops,
|
||||
.data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
|
||||
},
|
||||
{
|
||||
.name = "dpio-tx-b-23",
|
||||
.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
|
||||
VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
|
||||
VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
|
||||
VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
|
||||
.ops = &vlv_dpio_power_well_ops,
|
||||
.data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
|
||||
},
|
||||
{
|
||||
.name = "dpio-tx-c-01",
|
||||
.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
|
||||
VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
|
||||
VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
|
||||
VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
|
||||
.ops = &vlv_dpio_power_well_ops,
|
||||
.data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
|
||||
},
|
||||
{
|
||||
.name = "dpio-tx-c-23",
|
||||
.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
|
||||
VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
|
||||
VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
|
||||
VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
|
||||
.ops = &vlv_dpio_power_well_ops,
|
||||
.data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -5445,9 +5833,8 @@ static struct i915_power_well bdw_power_wells[] = {
|
|||
(power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \
|
||||
})
|
||||
|
||||
int intel_power_domains_init(struct drm_device *dev)
|
||||
int intel_power_domains_init(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct i915_power_domains *power_domains = &dev_priv->power_domains;
|
||||
|
||||
mutex_init(&power_domains->lock);
|
||||
|
@ -5456,12 +5843,14 @@ int intel_power_domains_init(struct drm_device *dev)
|
|||
* The enabling order will be from lower to higher indexed wells,
|
||||
* the disabling order is reversed.
|
||||
*/
|
||||
if (IS_HASWELL(dev)) {
|
||||
if (IS_HASWELL(dev_priv->dev)) {
|
||||
set_power_wells(power_domains, hsw_power_wells);
|
||||
hsw_pwr = power_domains;
|
||||
} else if (IS_BROADWELL(dev)) {
|
||||
} else if (IS_BROADWELL(dev_priv->dev)) {
|
||||
set_power_wells(power_domains, bdw_power_wells);
|
||||
hsw_pwr = power_domains;
|
||||
} else if (IS_VALLEYVIEW(dev_priv->dev)) {
|
||||
set_power_wells(power_domains, vlv_power_wells);
|
||||
} else {
|
||||
set_power_wells(power_domains, i9xx_always_on_power_well);
|
||||
}
|
||||
|
@ -5469,47 +5858,28 @@ int intel_power_domains_init(struct drm_device *dev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
void intel_power_domains_remove(struct drm_device *dev)
|
||||
void intel_power_domains_remove(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
hsw_pwr = NULL;
|
||||
}
|
||||
|
||||
static void intel_power_domains_resume(struct drm_device *dev)
|
||||
static void intel_power_domains_resume(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct i915_power_domains *power_domains = &dev_priv->power_domains;
|
||||
struct i915_power_well *power_well;
|
||||
int i;
|
||||
|
||||
mutex_lock(&power_domains->lock);
|
||||
for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
|
||||
if (power_well->set)
|
||||
power_well->set(dev, power_well, power_well->count > 0);
|
||||
}
|
||||
for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains)
|
||||
power_well->ops->sync_hw(dev_priv, power_well);
|
||||
mutex_unlock(&power_domains->lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* Starting with Haswell, we have a "Power Down Well" that can be turned off
|
||||
* when not needed anymore. We have 4 registers that can request the power well
|
||||
* to be enabled, and it will only be disabled if none of the registers is
|
||||
* requesting it to be enabled.
|
||||
*/
|
||||
void intel_power_domains_init_hw(struct drm_device *dev)
|
||||
void intel_power_domains_init_hw(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
/* For now, we need the power well to be always enabled. */
|
||||
intel_display_set_init_power(dev, true);
|
||||
intel_power_domains_resume(dev);
|
||||
|
||||
if (!(IS_HASWELL(dev) || IS_BROADWELL(dev)))
|
||||
return;
|
||||
|
||||
/* We're taking over the BIOS, so clear any requests made by it since
|
||||
* the driver is in charge now. */
|
||||
if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST)
|
||||
I915_WRITE(HSW_PWR_WELL_BIOS, 0);
|
||||
intel_display_set_init_power(dev_priv, true);
|
||||
intel_power_domains_resume(dev_priv);
|
||||
}
|
||||
|
||||
/* Disables PC8 so we can use the GMBUS and DP AUX interrupts. */
|
||||
|
@ -5786,10 +6156,9 @@ void intel_pm_setup(struct drm_device *dev)
|
|||
|
||||
mutex_init(&dev_priv->pc8.lock);
|
||||
dev_priv->pc8.requirements_met = false;
|
||||
dev_priv->pc8.gpu_idle = false;
|
||||
dev_priv->pc8.irqs_disabled = false;
|
||||
dev_priv->pc8.enabled = false;
|
||||
dev_priv->pc8.disable_count = 2; /* requirements_met + gpu_idle */
|
||||
dev_priv->pc8.disable_count = 1; /* requirements_met */
|
||||
INIT_DELAYED_WORK(&dev_priv->pc8.enable_work, hsw_enable_pc8_work);
|
||||
INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
|
||||
intel_gen6_powersave_work);
|
||||
|
|
|
@ -571,7 +571,7 @@ static int init_render_ring(struct intel_ring_buffer *ring)
|
|||
* to use MI_WAIT_FOR_EVENT within the CS. It should already be
|
||||
* programmed to '1' on all products.
|
||||
*
|
||||
* WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
|
||||
* WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw
|
||||
*/
|
||||
if (INTEL_INFO(dev)->gen >= 6)
|
||||
I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
|
||||
|
@ -1388,6 +1388,8 @@ static int intel_init_ring_buffer(struct drm_device *dev,
|
|||
if (IS_I830(ring->dev) || IS_845G(ring->dev))
|
||||
ring->effective_size -= 128;
|
||||
|
||||
i915_cmd_parser_init_ring(ring);
|
||||
|
||||
return 0;
|
||||
|
||||
err_unmap:
|
||||
|
|
|
@ -164,6 +164,38 @@ struct intel_ring_buffer {
|
|||
u32 gtt_offset;
|
||||
volatile u32 *cpu_page;
|
||||
} scratch;
|
||||
|
||||
/*
|
||||
* Tables of commands the command parser needs to know about
|
||||
* for this ring.
|
||||
*/
|
||||
const struct drm_i915_cmd_table *cmd_tables;
|
||||
int cmd_table_count;
|
||||
|
||||
/*
|
||||
* Table of registers allowed in commands that read/write registers.
|
||||
*/
|
||||
const u32 *reg_table;
|
||||
int reg_count;
|
||||
|
||||
/*
|
||||
* Table of registers allowed in commands that read/write registers, but
|
||||
* only from the DRM master.
|
||||
*/
|
||||
const u32 *master_reg_table;
|
||||
int master_reg_count;
|
||||
|
||||
/*
|
||||
* Returns the bitmask for the length field of the specified command.
|
||||
* Return 0 for an unrecognized/invalid command.
|
||||
*
|
||||
* If the command parser finds an entry for a command in the ring's
|
||||
* cmd_tables, it gets the command's length based on the table entry.
|
||||
* If not, it calls this function to determine the per-ring length field
|
||||
* encoding for the command (i.e. certain opcode ranges use certain bits
|
||||
* to encode the command length in the header).
|
||||
*/
|
||||
u32 (*get_cmd_length_mask)(u32 cmd_header);
|
||||
};
|
||||
|
||||
static inline bool
|
||||
|
|
|
@ -40,6 +40,12 @@
|
|||
|
||||
#define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32(dev_priv__, reg__)
|
||||
|
||||
static void
|
||||
assert_device_not_suspended(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
WARN(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended,
|
||||
"Device suspended\n");
|
||||
}
|
||||
|
||||
static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
|
@ -83,14 +89,14 @@ static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv,
|
|||
__gen6_gt_wait_for_thread_c0(dev_priv);
|
||||
}
|
||||
|
||||
static void __gen6_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
|
||||
static void __gen7_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
__raw_i915_write32(dev_priv, FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff));
|
||||
/* something from same cacheline, but !FORCEWAKE_MT */
|
||||
__raw_posting_read(dev_priv, ECOBUS);
|
||||
}
|
||||
|
||||
static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv,
|
||||
static void __gen7_gt_force_wake_mt_get(struct drm_i915_private *dev_priv,
|
||||
int fw_engine)
|
||||
{
|
||||
u32 forcewake_ack;
|
||||
|
@ -136,14 +142,16 @@ static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv,
|
|||
gen6_gt_check_fifodbg(dev_priv);
|
||||
}
|
||||
|
||||
static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv,
|
||||
static void __gen7_gt_force_wake_mt_put(struct drm_i915_private *dev_priv,
|
||||
int fw_engine)
|
||||
{
|
||||
__raw_i915_write32(dev_priv, FORCEWAKE_MT,
|
||||
_MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
|
||||
/* something from same cacheline, but !FORCEWAKE_MT */
|
||||
__raw_posting_read(dev_priv, ECOBUS);
|
||||
gen6_gt_check_fifodbg(dev_priv);
|
||||
|
||||
if (IS_GEN7(dev_priv->dev))
|
||||
gen6_gt_check_fifodbg(dev_priv);
|
||||
}
|
||||
|
||||
static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
|
||||
|
@ -251,16 +259,16 @@ void vlv_force_wake_get(struct drm_i915_private *dev_priv,
|
|||
unsigned long irqflags;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
|
||||
if (FORCEWAKE_RENDER & fw_engine) {
|
||||
if (dev_priv->uncore.fw_rendercount++ == 0)
|
||||
dev_priv->uncore.funcs.force_wake_get(dev_priv,
|
||||
FORCEWAKE_RENDER);
|
||||
}
|
||||
if (FORCEWAKE_MEDIA & fw_engine) {
|
||||
if (dev_priv->uncore.fw_mediacount++ == 0)
|
||||
dev_priv->uncore.funcs.force_wake_get(dev_priv,
|
||||
FORCEWAKE_MEDIA);
|
||||
}
|
||||
|
||||
if (fw_engine & FORCEWAKE_RENDER &&
|
||||
dev_priv->uncore.fw_rendercount++ != 0)
|
||||
fw_engine &= ~FORCEWAKE_RENDER;
|
||||
if (fw_engine & FORCEWAKE_MEDIA &&
|
||||
dev_priv->uncore.fw_mediacount++ != 0)
|
||||
fw_engine &= ~FORCEWAKE_MEDIA;
|
||||
|
||||
if (fw_engine)
|
||||
dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_engine);
|
||||
|
||||
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
|
||||
}
|
||||
|
@ -272,46 +280,45 @@ void vlv_force_wake_put(struct drm_i915_private *dev_priv,
|
|||
|
||||
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
|
||||
|
||||
if (FORCEWAKE_RENDER & fw_engine) {
|
||||
WARN_ON(dev_priv->uncore.fw_rendercount == 0);
|
||||
if (--dev_priv->uncore.fw_rendercount == 0)
|
||||
dev_priv->uncore.funcs.force_wake_put(dev_priv,
|
||||
FORCEWAKE_RENDER);
|
||||
}
|
||||
if (fw_engine & FORCEWAKE_RENDER &&
|
||||
--dev_priv->uncore.fw_rendercount != 0)
|
||||
fw_engine &= ~FORCEWAKE_RENDER;
|
||||
if (fw_engine & FORCEWAKE_MEDIA &&
|
||||
--dev_priv->uncore.fw_mediacount != 0)
|
||||
fw_engine &= ~FORCEWAKE_MEDIA;
|
||||
|
||||
if (FORCEWAKE_MEDIA & fw_engine) {
|
||||
WARN_ON(dev_priv->uncore.fw_mediacount == 0);
|
||||
if (--dev_priv->uncore.fw_mediacount == 0)
|
||||
dev_priv->uncore.funcs.force_wake_put(dev_priv,
|
||||
FORCEWAKE_MEDIA);
|
||||
}
|
||||
if (fw_engine)
|
||||
dev_priv->uncore.funcs.force_wake_put(dev_priv, fw_engine);
|
||||
|
||||
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
|
||||
}
|
||||
|
||||
static void gen6_force_wake_work(struct work_struct *work)
|
||||
static void gen6_force_wake_timer(unsigned long arg)
|
||||
{
|
||||
struct drm_i915_private *dev_priv =
|
||||
container_of(work, typeof(*dev_priv), uncore.force_wake_work.work);
|
||||
struct drm_i915_private *dev_priv = (void *)arg;
|
||||
unsigned long irqflags;
|
||||
|
||||
assert_device_not_suspended(dev_priv);
|
||||
|
||||
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
|
||||
if (--dev_priv->uncore.forcewake_count == 0)
|
||||
dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL);
|
||||
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
|
||||
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
}
|
||||
|
||||
static void intel_uncore_forcewake_reset(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
if (IS_VALLEYVIEW(dev)) {
|
||||
if (IS_VALLEYVIEW(dev))
|
||||
vlv_force_wake_reset(dev_priv);
|
||||
} else if (INTEL_INFO(dev)->gen >= 6) {
|
||||
else if (IS_GEN6(dev) || IS_GEN7(dev))
|
||||
__gen6_gt_force_wake_reset(dev_priv);
|
||||
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
|
||||
__gen6_gt_force_wake_mt_reset(dev_priv);
|
||||
}
|
||||
|
||||
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_GEN8(dev))
|
||||
__gen7_gt_force_wake_mt_reset(dev_priv);
|
||||
}
|
||||
|
||||
void intel_uncore_early_sanitize(struct drm_device *dev)
|
||||
|
@ -354,7 +361,9 @@ void intel_uncore_sanitize(struct drm_device *dev)
|
|||
mutex_lock(&dev_priv->rps.hw_lock);
|
||||
reg_val = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS);
|
||||
|
||||
if (reg_val & (RENDER_PWRGT | MEDIA_PWRGT | DISP2D_PWRGT))
|
||||
if (reg_val & (PUNIT_PWRGT_PWR_GATE(PUNIT_POWER_WELL_RENDER) |
|
||||
PUNIT_PWRGT_PWR_GATE(PUNIT_POWER_WELL_MEDIA) |
|
||||
PUNIT_PWRGT_PWR_GATE(PUNIT_POWER_WELL_DISP2D)))
|
||||
vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, 0x0);
|
||||
|
||||
mutex_unlock(&dev_priv->rps.hw_lock);
|
||||
|
@ -393,25 +402,38 @@ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
|
|||
void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
|
||||
{
|
||||
unsigned long irqflags;
|
||||
bool delayed = false;
|
||||
|
||||
if (!dev_priv->uncore.funcs.force_wake_put)
|
||||
return;
|
||||
|
||||
/* Redirect to VLV specific routine */
|
||||
if (IS_VALLEYVIEW(dev_priv->dev))
|
||||
return vlv_force_wake_put(dev_priv, fw_engine);
|
||||
if (IS_VALLEYVIEW(dev_priv->dev)) {
|
||||
vlv_force_wake_put(dev_priv, fw_engine);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
||||
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
|
||||
if (--dev_priv->uncore.forcewake_count == 0) {
|
||||
dev_priv->uncore.forcewake_count++;
|
||||
mod_delayed_work(dev_priv->wq,
|
||||
&dev_priv->uncore.force_wake_work,
|
||||
1);
|
||||
delayed = true;
|
||||
mod_timer_pinned(&dev_priv->uncore.force_wake_timer,
|
||||
jiffies + 1);
|
||||
}
|
||||
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
|
||||
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
out:
|
||||
if (!delayed)
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
}
|
||||
|
||||
void assert_force_wake_inactive(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
if (!dev_priv->uncore.funcs.force_wake_get)
|
||||
return;
|
||||
|
||||
WARN_ON(dev_priv->uncore.forcewake_count > 0);
|
||||
}
|
||||
|
||||
/* We give fast paths for the really cool registers */
|
||||
|
@ -446,16 +468,10 @@ hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg)
|
|||
}
|
||||
}
|
||||
|
||||
static void
|
||||
assert_device_not_suspended(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
WARN(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended,
|
||||
"Device suspended\n");
|
||||
}
|
||||
|
||||
#define REG_READ_HEADER(x) \
|
||||
unsigned long irqflags; \
|
||||
u##x val = 0; \
|
||||
assert_device_not_suspended(dev_priv); \
|
||||
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
|
||||
|
||||
#define REG_READ_FOOTER \
|
||||
|
@ -484,17 +500,15 @@ gen5_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
|
|||
static u##x \
|
||||
gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
|
||||
REG_READ_HEADER(x); \
|
||||
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
|
||||
if (dev_priv->uncore.forcewake_count == 0) \
|
||||
dev_priv->uncore.funcs.force_wake_get(dev_priv, \
|
||||
FORCEWAKE_ALL); \
|
||||
val = __raw_i915_read##x(dev_priv, reg); \
|
||||
if (dev_priv->uncore.forcewake_count == 0) \
|
||||
dev_priv->uncore.funcs.force_wake_put(dev_priv, \
|
||||
FORCEWAKE_ALL); \
|
||||
} else { \
|
||||
val = __raw_i915_read##x(dev_priv, reg); \
|
||||
if (dev_priv->uncore.forcewake_count == 0 && \
|
||||
NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
|
||||
dev_priv->uncore.funcs.force_wake_get(dev_priv, \
|
||||
FORCEWAKE_ALL); \
|
||||
dev_priv->uncore.forcewake_count++; \
|
||||
mod_timer_pinned(&dev_priv->uncore.force_wake_timer, \
|
||||
jiffies + 1); \
|
||||
} \
|
||||
val = __raw_i915_read##x(dev_priv, reg); \
|
||||
REG_READ_FOOTER; \
|
||||
}
|
||||
|
||||
|
@ -502,27 +516,19 @@ gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
|
|||
static u##x \
|
||||
vlv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
|
||||
unsigned fwengine = 0; \
|
||||
unsigned *fwcount; \
|
||||
REG_READ_HEADER(x); \
|
||||
if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg)) { \
|
||||
fwengine = FORCEWAKE_RENDER; \
|
||||
fwcount = &dev_priv->uncore.fw_rendercount; \
|
||||
} \
|
||||
else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)) { \
|
||||
fwengine = FORCEWAKE_MEDIA; \
|
||||
fwcount = &dev_priv->uncore.fw_mediacount; \
|
||||
if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg)) { \
|
||||
if (dev_priv->uncore.fw_rendercount == 0) \
|
||||
fwengine = FORCEWAKE_RENDER; \
|
||||
} else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)) { \
|
||||
if (dev_priv->uncore.fw_mediacount == 0) \
|
||||
fwengine = FORCEWAKE_MEDIA; \
|
||||
} \
|
||||
if (fwengine != 0) { \
|
||||
if ((*fwcount)++ == 0) \
|
||||
(dev_priv)->uncore.funcs.force_wake_get(dev_priv, \
|
||||
fwengine); \
|
||||
val = __raw_i915_read##x(dev_priv, reg); \
|
||||
if (--(*fwcount) == 0) \
|
||||
(dev_priv)->uncore.funcs.force_wake_put(dev_priv, \
|
||||
fwengine); \
|
||||
} else { \
|
||||
val = __raw_i915_read##x(dev_priv, reg); \
|
||||
} \
|
||||
if (fwengine) \
|
||||
dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \
|
||||
val = __raw_i915_read##x(dev_priv, reg); \
|
||||
if (fwengine) \
|
||||
dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \
|
||||
REG_READ_FOOTER; \
|
||||
}
|
||||
|
||||
|
@ -554,6 +560,7 @@ __gen4_read(64)
|
|||
#define REG_WRITE_HEADER \
|
||||
unsigned long irqflags; \
|
||||
trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
|
||||
assert_device_not_suspended(dev_priv); \
|
||||
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
|
||||
|
||||
#define REG_WRITE_FOOTER \
|
||||
|
@ -584,7 +591,6 @@ gen6_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace
|
|||
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
|
||||
__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
|
||||
} \
|
||||
assert_device_not_suspended(dev_priv); \
|
||||
__raw_i915_write##x(dev_priv, reg, val); \
|
||||
if (unlikely(__fifo_ret)) { \
|
||||
gen6_gt_check_fifodbg(dev_priv); \
|
||||
|
@ -600,7 +606,6 @@ hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace)
|
|||
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
|
||||
__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
|
||||
} \
|
||||
assert_device_not_suspended(dev_priv); \
|
||||
hsw_unclaimed_reg_clear(dev_priv, reg); \
|
||||
__raw_i915_write##x(dev_priv, reg, val); \
|
||||
if (unlikely(__fifo_ret)) { \
|
||||
|
@ -634,16 +639,17 @@ static bool is_gen8_shadowed(struct drm_i915_private *dev_priv, u32 reg)
|
|||
#define __gen8_write(x) \
|
||||
static void \
|
||||
gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
|
||||
bool __needs_put = reg < 0x40000 && !is_gen8_shadowed(dev_priv, reg); \
|
||||
REG_WRITE_HEADER; \
|
||||
if (__needs_put) { \
|
||||
dev_priv->uncore.funcs.force_wake_get(dev_priv, \
|
||||
FORCEWAKE_ALL); \
|
||||
} \
|
||||
__raw_i915_write##x(dev_priv, reg, val); \
|
||||
if (__needs_put) { \
|
||||
dev_priv->uncore.funcs.force_wake_put(dev_priv, \
|
||||
FORCEWAKE_ALL); \
|
||||
if (reg < 0x40000 && !is_gen8_shadowed(dev_priv, reg)) { \
|
||||
if (dev_priv->uncore.forcewake_count == 0) \
|
||||
dev_priv->uncore.funcs.force_wake_get(dev_priv, \
|
||||
FORCEWAKE_ALL); \
|
||||
__raw_i915_write##x(dev_priv, reg, val); \
|
||||
if (dev_priv->uncore.forcewake_count == 0) \
|
||||
dev_priv->uncore.funcs.force_wake_put(dev_priv, \
|
||||
FORCEWAKE_ALL); \
|
||||
} else { \
|
||||
__raw_i915_write##x(dev_priv, reg, val); \
|
||||
} \
|
||||
REG_WRITE_FOOTER; \
|
||||
}
|
||||
|
@ -681,15 +687,15 @@ void intel_uncore_init(struct drm_device *dev)
|
|||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
INIT_DELAYED_WORK(&dev_priv->uncore.force_wake_work,
|
||||
gen6_force_wake_work);
|
||||
setup_timer(&dev_priv->uncore.force_wake_timer,
|
||||
gen6_force_wake_timer, (unsigned long)dev_priv);
|
||||
|
||||
if (IS_VALLEYVIEW(dev)) {
|
||||
dev_priv->uncore.funcs.force_wake_get = __vlv_force_wake_get;
|
||||
dev_priv->uncore.funcs.force_wake_put = __vlv_force_wake_put;
|
||||
} else if (IS_HASWELL(dev) || IS_GEN8(dev)) {
|
||||
dev_priv->uncore.funcs.force_wake_get = __gen6_gt_force_wake_mt_get;
|
||||
dev_priv->uncore.funcs.force_wake_put = __gen6_gt_force_wake_mt_put;
|
||||
dev_priv->uncore.funcs.force_wake_get = __gen7_gt_force_wake_mt_get;
|
||||
dev_priv->uncore.funcs.force_wake_put = __gen7_gt_force_wake_mt_put;
|
||||
} else if (IS_IVYBRIDGE(dev)) {
|
||||
u32 ecobus;
|
||||
|
||||
|
@ -703,16 +709,16 @@ void intel_uncore_init(struct drm_device *dev)
|
|||
* forcewake being disabled.
|
||||
*/
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
__gen6_gt_force_wake_mt_get(dev_priv, FORCEWAKE_ALL);
|
||||
__gen7_gt_force_wake_mt_get(dev_priv, FORCEWAKE_ALL);
|
||||
ecobus = __raw_i915_read32(dev_priv, ECOBUS);
|
||||
__gen6_gt_force_wake_mt_put(dev_priv, FORCEWAKE_ALL);
|
||||
__gen7_gt_force_wake_mt_put(dev_priv, FORCEWAKE_ALL);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
if (ecobus & FORCEWAKE_MT_ENABLE) {
|
||||
dev_priv->uncore.funcs.force_wake_get =
|
||||
__gen6_gt_force_wake_mt_get;
|
||||
__gen7_gt_force_wake_mt_get;
|
||||
dev_priv->uncore.funcs.force_wake_put =
|
||||
__gen6_gt_force_wake_mt_put;
|
||||
__gen7_gt_force_wake_mt_put;
|
||||
} else {
|
||||
DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
|
||||
DRM_INFO("when using vblank-synced partial screen updates.\n");
|
||||
|
@ -794,10 +800,11 @@ void intel_uncore_fini(struct drm_device *dev)
|
|||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
flush_delayed_work(&dev_priv->uncore.force_wake_work);
|
||||
del_timer_sync(&dev_priv->uncore.force_wake_timer);
|
||||
|
||||
/* Paranoia: make sure we have disabled everything before we exit. */
|
||||
intel_uncore_sanitize(dev);
|
||||
intel_uncore_forcewake_reset(dev);
|
||||
}
|
||||
|
||||
static const struct register_whitelist {
|
||||
|
@ -947,6 +954,7 @@ static int gen6_do_reset(struct drm_device *dev)
|
|||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int ret;
|
||||
unsigned long irqflags;
|
||||
u32 fw_engine = 0;
|
||||
|
||||
/* Hold uncore.lock across reset to prevent any register access
|
||||
* with forcewake not set correctly
|
||||
|
@ -966,14 +974,25 @@ static int gen6_do_reset(struct drm_device *dev)
|
|||
|
||||
intel_uncore_forcewake_reset(dev);
|
||||
|
||||
/* If reset with a user forcewake, try to restore, otherwise turn it off */
|
||||
if (dev_priv->uncore.forcewake_count)
|
||||
dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL);
|
||||
else
|
||||
dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL);
|
||||
/* If reset with a user forcewake, try to restore */
|
||||
if (IS_VALLEYVIEW(dev)) {
|
||||
if (dev_priv->uncore.fw_rendercount)
|
||||
fw_engine |= FORCEWAKE_RENDER;
|
||||
|
||||
/* Restore fifo count */
|
||||
dev_priv->uncore.fifo_count = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK;
|
||||
if (dev_priv->uncore.fw_mediacount)
|
||||
fw_engine |= FORCEWAKE_MEDIA;
|
||||
} else {
|
||||
if (dev_priv->uncore.forcewake_count)
|
||||
fw_engine = FORCEWAKE_ALL;
|
||||
}
|
||||
|
||||
if (fw_engine)
|
||||
dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_engine);
|
||||
|
||||
if (IS_GEN6(dev) || IS_GEN7(dev))
|
||||
dev_priv->uncore.fifo_count =
|
||||
__raw_i915_read32(dev_priv, GTFIFOCTL) &
|
||||
GT_FIFO_FREE_ENTRIES_MASK;
|
||||
|
||||
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
|
||||
return ret;
|
||||
|
|
Loading…
Reference in New Issue