Merge tag 'drm-intel-next-2016-10-24' of git://anongit.freedesktop.org/drm-intel into drm-next

- first slice of the gvt device model (Zhenyu et al)
- compression support for gpu error states (Chris)
- sunset clause on gpu errors resulting in dmesg noise telling users
  how to report them
- .rodata diet from Tvrtko
- switch over lots of macros to only take dev_priv (Tvrtko)
- underrun suppression for dp link training (Ville)
- lspcon (hmdi 2.0 on skl/bxt) support from Shashank Sharma, polish
  from Jani
- gen9 wm fixes from Paulo&Lyude
- updated ddi programming for kbl (Rodrigo)
- respect alternate aux/ddc pins (from vbt) for all ddi ports (Ville)

* tag 'drm-intel-next-2016-10-24' of git://anongit.freedesktop.org/drm-intel: (227 commits)
  drm/i915: Update DRIVER_DATE to 20161024
  drm/i915: Stop setting SNB min-freq-table 0 on powersave setup
  drm/i915/dp: add lane_count check in intel_dp_check_link_status
  drm/i915: Fix whitespace issues
  drm/i915: Clean up DDI DDC/AUX CH sanitation
  drm/i915: Respect alternate_ddc_pin for all DDI ports
  drm/i915: Respect alternate_aux_channel for all DDI ports
  drm/i915/gen9: Remove WaEnableYV12BugFixInHalfSliceChicken7
  drm/i915: KBL - Recommended buffer translation programming for DisplayPort
  drm/i915: Move down skl/kbl ddi iboost and n_edp_entires fixup
  drm/i915: Add a sunset clause to GPU hang logging
  drm/i915: Stop reporting error details in dmesg as well as the error-state
  drm/i915/gvt: do not ignore return value of create_scratch_page
  drm/i915/gvt: fix spare warnings on odd constant _Bool cast
  drm/i915/gvt: mark symbols static where possible
  drm/i915/gvt: fix sparse warnings on different address spaces
  drm/i915/gvt: properly access enabled intel_engine_cs
  drm/i915/gvt: Remove defunct vmap_batch()
  drm/i915/gvt: Use common mapping routines for shadow_bb object
  drm/i915/gvt: Use common mapping routines for indirect_ctx object
  ...
This commit is contained in:
Dave Airlie 2016-10-25 16:36:13 +10:00
commit 5481e27f6f
109 changed files with 20208 additions and 2963 deletions

View File

@ -49,6 +49,15 @@ Intel GVT-g Guest Support(vGPU)
.. kernel-doc:: drivers/gpu/drm/i915/i915_vgpu.c
:internal:
Intel GVT-g Host Support(vGPU device model)
-------------------------------------------
.. kernel-doc:: drivers/gpu/drm/i915/intel_gvt.c
:doc: Intel GVT-g host support
.. kernel-doc:: drivers/gpu/drm/i915/intel_gvt.c
:internal:
Display Hardware Handling
=========================

View File

@ -4064,6 +4064,16 @@ F: include/drm/i915*
F: include/uapi/drm/i915_drm.h
F: Documentation/gpu/i915.rst
INTEL GVT-g DRIVERS (Intel GPU Virtualization)
M: Zhenyu Wang <zhenyuw@linux.intel.com>
M: Zhi Wang <zhi.a.wang@intel.com>
L: igvt-g-dev@lists.01.org
L: intel-gfx@lists.freedesktop.org
W: https://01.org/igvt-g
T: git https://github.com/01org/gvt-linux.git
S: Supported
F: drivers/gpu/drm/i915/gvt/
DRM DRIVERS FOR ATMEL HLCDC
M: Boris Brezillon <boris.brezillon@free-electrons.com>
L: dri-devel@lists.freedesktop.org

View File

@ -148,6 +148,14 @@ static bool is_type2_adaptor(uint8_t adaptor_id)
DP_DUAL_MODE_REV_TYPE2);
}
static bool is_lspcon_adaptor(const char hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN],
const uint8_t adaptor_id)
{
return is_hdmi_adaptor(hdmi_id) &&
(adaptor_id == (DP_DUAL_MODE_TYPE_TYPE2 |
DP_DUAL_MODE_TYPE_HAS_DPCD));
}
/**
* drm_dp_dual_mode_detect - Identify the DP dual mode adaptor
* @adapter: I2C adapter for the DDC bus
@ -203,6 +211,8 @@ enum drm_dp_dual_mode_type drm_dp_dual_mode_detect(struct i2c_adapter *adapter)
ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_ADAPTOR_ID,
&adaptor_id, sizeof(adaptor_id));
if (ret == 0) {
if (is_lspcon_adaptor(hdmi_id, adaptor_id))
return DRM_DP_DUAL_MODE_LSPCON;
if (is_type2_adaptor(adaptor_id)) {
if (is_hdmi_adaptor(hdmi_id))
return DRM_DP_DUAL_MODE_TYPE2_HDMI;
@ -364,3 +374,96 @@ const char *drm_dp_get_dual_mode_type_name(enum drm_dp_dual_mode_type type)
}
}
EXPORT_SYMBOL(drm_dp_get_dual_mode_type_name);
/**
* drm_lspcon_get_mode: Get LSPCON's current mode of operation by
* reading offset (0x80, 0x41)
* @adapter: I2C-over-aux adapter
* @mode: current lspcon mode of operation output variable
*
* Returns:
* 0 on success, sets the current_mode value to appropriate mode
* -error on failure
*/
int drm_lspcon_get_mode(struct i2c_adapter *adapter,
enum drm_lspcon_mode *mode)
{
u8 data;
int ret = 0;
if (!mode) {
DRM_ERROR("NULL input\n");
return -EINVAL;
}
/* Read Status: i2c over aux */
ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_LSPCON_CURRENT_MODE,
&data, sizeof(data));
if (ret < 0) {
DRM_ERROR("LSPCON read(0x80, 0x41) failed\n");
return -EFAULT;
}
if (data & DP_DUAL_MODE_LSPCON_MODE_PCON)
*mode = DRM_LSPCON_MODE_PCON;
else
*mode = DRM_LSPCON_MODE_LS;
return 0;
}
EXPORT_SYMBOL(drm_lspcon_get_mode);
/**
* drm_lspcon_set_mode: Change LSPCON's mode of operation by
* writing offset (0x80, 0x40)
* @adapter: I2C-over-aux adapter
* @mode: required mode of operation
*
* Returns:
* 0 on success, -error on failure/timeout
*/
int drm_lspcon_set_mode(struct i2c_adapter *adapter,
enum drm_lspcon_mode mode)
{
u8 data = 0;
int ret;
int time_out = 200;
enum drm_lspcon_mode current_mode;
if (mode == DRM_LSPCON_MODE_PCON)
data = DP_DUAL_MODE_LSPCON_MODE_PCON;
/* Change mode */
ret = drm_dp_dual_mode_write(adapter, DP_DUAL_MODE_LSPCON_MODE_CHANGE,
&data, sizeof(data));
if (ret < 0) {
DRM_ERROR("LSPCON mode change failed\n");
return ret;
}
/*
* Confirm mode change by reading the status bit.
* Sometimes, it takes a while to change the mode,
* so wait and retry until time out or done.
*/
do {
ret = drm_lspcon_get_mode(adapter, &current_mode);
if (ret) {
DRM_ERROR("can't confirm LSPCON mode change\n");
return ret;
} else {
if (current_mode != mode) {
msleep(10);
time_out -= 10;
} else {
DRM_DEBUG_KMS("LSPCON mode changed to %s\n",
mode == DRM_LSPCON_MODE_LS ?
"LS" : "PCON");
return 0;
}
}
} while (time_out);
DRM_ERROR("LSPCON mode change timed out\n");
return -ETIMEDOUT;
}
EXPORT_SYMBOL(drm_lspcon_set_mode);

View File

@ -46,6 +46,31 @@ config DRM_I915_PRELIMINARY_HW_SUPPORT
If in doubt, say "N".
config DRM_I915_CAPTURE_ERROR
bool "Enable capturing GPU state following a hang"
depends on DRM_I915
default y
help
This option enables capturing the GPU state when a hang is detected.
This information is vital for triaging hangs and assists in debugging.
Please report any hang to
https://bugs.freedesktop.org/enter_bug.cgi?product=DRI
for triaging.
If in doubt, say "Y".
config DRM_I915_COMPRESS_ERROR
bool "Compress GPU error state"
depends on DRM_I915_CAPTURE_ERROR
select ZLIB_DEFLATE
default y
help
This option selects ZLIB_DEFLATE if it isn't already
selected and causes any error state captured upon a GPU hang
to be compressed using zlib.
If in doubt, say "Y".
config DRM_I915_USERPTR
bool "Always enable userptr support"
depends on DRM_I915

View File

@ -42,7 +42,6 @@ i915-y += i915_cmd_parser.o \
i915_gem_stolen.o \
i915_gem_tiling.o \
i915_gem_userptr.o \
i915_gpu_error.o \
i915_trace_points.o \
intel_breadcrumbs.o \
intel_engine_cs.o \
@ -102,11 +101,15 @@ i915-y += dvo_ch7017.o \
intel_dvo.o \
intel_hdmi.o \
intel_i2c.o \
intel_lspcon.o \
intel_lvds.o \
intel_panel.o \
intel_sdvo.o \
intel_tv.o
# Post-mortem debug and GPU hang state capture
i915-$(CONFIG_DRM_I915_CAPTURE_ERROR) += i915_gpu_error.o
# virtual gpu code
i915-y += i915_vgpu.o

View File

@ -1,5 +1,7 @@
GVT_DIR := gvt
GVT_SOURCE := gvt.o
GVT_SOURCE := gvt.o aperture_gm.o handlers.o vgpu.o trace_points.o firmware.o \
interrupt.o gtt.o cfg_space.o opregion.o mmio.o display.o edid.o \
execlist.o scheduler.o sched_policy.o render.o cmd_parser.o
ccflags-y += -I$(src) -I$(src)/$(GVT_DIR) -Wall
i915-y += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE))

View File

@ -0,0 +1,352 @@
/*
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Kevin Tian <kevin.tian@intel.com>
* Dexuan Cui
*
* Contributors:
* Pei Zhang <pei.zhang@intel.com>
* Min He <min.he@intel.com>
* Niu Bing <bing.niu@intel.com>
* Yulei Zhang <yulei.zhang@intel.com>
* Zhenyu Wang <zhenyuw@linux.intel.com>
* Zhi Wang <zhi.a.wang@intel.com>
*
*/
#include "i915_drv.h"
#include "gvt.h"
#define MB_TO_BYTES(mb) ((mb) << 20ULL)
#define BYTES_TO_MB(b) ((b) >> 20ULL)
#define HOST_LOW_GM_SIZE MB_TO_BYTES(128)
#define HOST_HIGH_GM_SIZE MB_TO_BYTES(384)
#define HOST_FENCE 4
static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
{
struct intel_gvt *gvt = vgpu->gvt;
struct drm_i915_private *dev_priv = gvt->dev_priv;
u32 alloc_flag, search_flag;
u64 start, end, size;
struct drm_mm_node *node;
int retried = 0;
int ret;
if (high_gm) {
search_flag = DRM_MM_SEARCH_BELOW;
alloc_flag = DRM_MM_CREATE_TOP;
node = &vgpu->gm.high_gm_node;
size = vgpu_hidden_sz(vgpu);
start = gvt_hidden_gmadr_base(gvt);
end = gvt_hidden_gmadr_end(gvt);
} else {
search_flag = DRM_MM_SEARCH_DEFAULT;
alloc_flag = DRM_MM_CREATE_DEFAULT;
node = &vgpu->gm.low_gm_node;
size = vgpu_aperture_sz(vgpu);
start = gvt_aperture_gmadr_base(gvt);
end = gvt_aperture_gmadr_end(gvt);
}
mutex_lock(&dev_priv->drm.struct_mutex);
search_again:
ret = drm_mm_insert_node_in_range_generic(&dev_priv->ggtt.base.mm,
node, size, 4096, 0,
start, end, search_flag,
alloc_flag);
if (ret) {
ret = i915_gem_evict_something(&dev_priv->ggtt.base,
size, 4096, 0, start, end, 0);
if (ret == 0 && ++retried < 3)
goto search_again;
gvt_err("fail to alloc %s gm space from host, retried %d\n",
high_gm ? "high" : "low", retried);
}
mutex_unlock(&dev_priv->drm.struct_mutex);
return ret;
}
static int alloc_vgpu_gm(struct intel_vgpu *vgpu)
{
struct intel_gvt *gvt = vgpu->gvt;
struct drm_i915_private *dev_priv = gvt->dev_priv;
int ret;
ret = alloc_gm(vgpu, false);
if (ret)
return ret;
ret = alloc_gm(vgpu, true);
if (ret)
goto out_free_aperture;
gvt_dbg_core("vgpu%d: alloc low GM start %llx size %llx\n", vgpu->id,
vgpu_aperture_offset(vgpu), vgpu_aperture_sz(vgpu));
gvt_dbg_core("vgpu%d: alloc high GM start %llx size %llx\n", vgpu->id,
vgpu_hidden_offset(vgpu), vgpu_hidden_sz(vgpu));
return 0;
out_free_aperture:
mutex_lock(&dev_priv->drm.struct_mutex);
drm_mm_remove_node(&vgpu->gm.low_gm_node);
mutex_unlock(&dev_priv->drm.struct_mutex);
return ret;
}
static void free_vgpu_gm(struct intel_vgpu *vgpu)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
mutex_lock(&dev_priv->drm.struct_mutex);
drm_mm_remove_node(&vgpu->gm.low_gm_node);
drm_mm_remove_node(&vgpu->gm.high_gm_node);
mutex_unlock(&dev_priv->drm.struct_mutex);
}
/**
* intel_vgpu_write_fence - write fence registers owned by a vGPU
* @vgpu: vGPU instance
* @fence: vGPU fence register number
* @value: Fence register value to be written
*
* This function is used to write fence registers owned by a vGPU. The vGPU
* fence register number will be translated into HW fence register number.
*
*/
void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
u32 fence, u64 value)
{
struct intel_gvt *gvt = vgpu->gvt;
struct drm_i915_private *dev_priv = gvt->dev_priv;
struct drm_i915_fence_reg *reg;
i915_reg_t fence_reg_lo, fence_reg_hi;
assert_rpm_wakelock_held(dev_priv);
if (WARN_ON(fence > vgpu_fence_sz(vgpu)))
return;
reg = vgpu->fence.regs[fence];
if (WARN_ON(!reg))
return;
fence_reg_lo = FENCE_REG_GEN6_LO(reg->id);
fence_reg_hi = FENCE_REG_GEN6_HI(reg->id);
I915_WRITE(fence_reg_lo, 0);
POSTING_READ(fence_reg_lo);
I915_WRITE(fence_reg_hi, upper_32_bits(value));
I915_WRITE(fence_reg_lo, lower_32_bits(value));
POSTING_READ(fence_reg_lo);
}
static void free_vgpu_fence(struct intel_vgpu *vgpu)
{
struct intel_gvt *gvt = vgpu->gvt;
struct drm_i915_private *dev_priv = gvt->dev_priv;
struct drm_i915_fence_reg *reg;
u32 i;
if (WARN_ON(!vgpu_fence_sz(vgpu)))
return;
intel_runtime_pm_get(dev_priv);
mutex_lock(&dev_priv->drm.struct_mutex);
for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
reg = vgpu->fence.regs[i];
intel_vgpu_write_fence(vgpu, i, 0);
list_add_tail(&reg->link,
&dev_priv->mm.fence_list);
}
mutex_unlock(&dev_priv->drm.struct_mutex);
intel_runtime_pm_put(dev_priv);
}
static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
{
struct intel_gvt *gvt = vgpu->gvt;
struct drm_i915_private *dev_priv = gvt->dev_priv;
struct drm_i915_fence_reg *reg;
int i;
struct list_head *pos, *q;
intel_runtime_pm_get(dev_priv);
/* Request fences from host */
mutex_lock(&dev_priv->drm.struct_mutex);
i = 0;
list_for_each_safe(pos, q, &dev_priv->mm.fence_list) {
reg = list_entry(pos, struct drm_i915_fence_reg, link);
if (reg->pin_count || reg->vma)
continue;
list_del(pos);
vgpu->fence.regs[i] = reg;
intel_vgpu_write_fence(vgpu, i, 0);
if (++i == vgpu_fence_sz(vgpu))
break;
}
if (i != vgpu_fence_sz(vgpu))
goto out_free_fence;
mutex_unlock(&dev_priv->drm.struct_mutex);
intel_runtime_pm_put(dev_priv);
return 0;
out_free_fence:
/* Return fences to host, if fail */
for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
reg = vgpu->fence.regs[i];
if (!reg)
continue;
list_add_tail(&reg->link,
&dev_priv->mm.fence_list);
}
mutex_unlock(&dev_priv->drm.struct_mutex);
intel_runtime_pm_put(dev_priv);
return -ENOSPC;
}
static void free_resource(struct intel_vgpu *vgpu)
{
struct intel_gvt *gvt = vgpu->gvt;
gvt->gm.vgpu_allocated_low_gm_size -= vgpu_aperture_sz(vgpu);
gvt->gm.vgpu_allocated_high_gm_size -= vgpu_hidden_sz(vgpu);
gvt->fence.vgpu_allocated_fence_num -= vgpu_fence_sz(vgpu);
}
static int alloc_resource(struct intel_vgpu *vgpu,
struct intel_vgpu_creation_params *param)
{
struct intel_gvt *gvt = vgpu->gvt;
unsigned long request, avail, max, taken;
const char *item;
if (!param->low_gm_sz || !param->high_gm_sz || !param->fence_sz) {
gvt_err("Invalid vGPU creation params\n");
return -EINVAL;
}
item = "low GM space";
max = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE;
taken = gvt->gm.vgpu_allocated_low_gm_size;
avail = max - taken;
request = MB_TO_BYTES(param->low_gm_sz);
if (request > avail)
goto no_enough_resource;
vgpu_aperture_sz(vgpu) = request;
item = "high GM space";
max = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE;
taken = gvt->gm.vgpu_allocated_high_gm_size;
avail = max - taken;
request = MB_TO_BYTES(param->high_gm_sz);
if (request > avail)
goto no_enough_resource;
vgpu_hidden_sz(vgpu) = request;
item = "fence";
max = gvt_fence_sz(gvt) - HOST_FENCE;
taken = gvt->fence.vgpu_allocated_fence_num;
avail = max - taken;
request = param->fence_sz;
if (request > avail)
goto no_enough_resource;
vgpu_fence_sz(vgpu) = request;
gvt->gm.vgpu_allocated_low_gm_size += MB_TO_BYTES(param->low_gm_sz);
gvt->gm.vgpu_allocated_high_gm_size += MB_TO_BYTES(param->high_gm_sz);
gvt->fence.vgpu_allocated_fence_num += param->fence_sz;
return 0;
no_enough_resource:
gvt_err("vgpu%d: fail to allocate resource %s\n", vgpu->id, item);
gvt_err("vgpu%d: request %luMB avail %luMB max %luMB taken %luMB\n",
vgpu->id, BYTES_TO_MB(request), BYTES_TO_MB(avail),
BYTES_TO_MB(max), BYTES_TO_MB(taken));
return -ENOSPC;
}
/**
* inte_gvt_free_vgpu_resource - free HW resource owned by a vGPU
* @vgpu: a vGPU
*
* This function is used to free the HW resource owned by a vGPU.
*
*/
void intel_vgpu_free_resource(struct intel_vgpu *vgpu)
{
free_vgpu_gm(vgpu);
free_vgpu_fence(vgpu);
free_resource(vgpu);
}
/**
* intel_alloc_vgpu_resource - allocate HW resource for a vGPU
* @vgpu: vGPU
* @param: vGPU creation params
*
* This function is used to allocate HW resource for a vGPU. User specifies
* the resource configuration through the creation params.
*
* Returns:
* zero on success, negative error code if failed.
*
*/
int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu,
struct intel_vgpu_creation_params *param)
{
int ret;
ret = alloc_resource(vgpu, param);
if (ret)
return ret;
ret = alloc_vgpu_gm(vgpu);
if (ret)
goto out_free_resource;
ret = alloc_vgpu_fence(vgpu);
if (ret)
goto out_free_vgpu_gm;
return 0;
out_free_vgpu_gm:
free_vgpu_gm(vgpu);
out_free_resource:
free_resource(vgpu);
return ret;
}

View File

@ -0,0 +1,288 @@
/*
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Eddie Dong <eddie.dong@intel.com>
* Jike Song <jike.song@intel.com>
*
* Contributors:
* Zhi Wang <zhi.a.wang@intel.com>
* Min He <min.he@intel.com>
* Bing Niu <bing.niu@intel.com>
*
*/
#include "i915_drv.h"
#include "gvt.h"
enum {
INTEL_GVT_PCI_BAR_GTTMMIO = 0,
INTEL_GVT_PCI_BAR_APERTURE,
INTEL_GVT_PCI_BAR_PIO,
INTEL_GVT_PCI_BAR_MAX,
};
/**
* intel_vgpu_emulate_cfg_read - emulate vGPU configuration space read
*
* Returns:
* Zero on success, negative error code if failed.
*/
int intel_vgpu_emulate_cfg_read(void *__vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
struct intel_vgpu *vgpu = __vgpu;
if (WARN_ON(bytes > 4))
return -EINVAL;
if (WARN_ON(offset + bytes > INTEL_GVT_MAX_CFG_SPACE_SZ))
return -EINVAL;
memcpy(p_data, vgpu_cfg_space(vgpu) + offset, bytes);
return 0;
}
static int map_aperture(struct intel_vgpu *vgpu, bool map)
{
u64 first_gfn, first_mfn;
u64 val;
int ret;
if (map == vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked)
return 0;
val = vgpu_cfg_space(vgpu)[PCI_BASE_ADDRESS_2];
if (val & PCI_BASE_ADDRESS_MEM_TYPE_64)
val = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_2);
else
val = *(u32 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_2);
first_gfn = (val + vgpu_aperture_offset(vgpu)) >> PAGE_SHIFT;
first_mfn = vgpu_aperture_pa_base(vgpu) >> PAGE_SHIFT;
ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu, first_gfn,
first_mfn,
vgpu_aperture_sz(vgpu)
>> PAGE_SHIFT, map,
GVT_MAP_APERTURE);
if (ret)
return ret;
vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked = map;
return 0;
}
static int trap_gttmmio(struct intel_vgpu *vgpu, bool trap)
{
u64 start, end;
u64 val;
int ret;
if (trap == vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].tracked)
return 0;
val = vgpu_cfg_space(vgpu)[PCI_BASE_ADDRESS_0];
if (val & PCI_BASE_ADDRESS_MEM_TYPE_64)
start = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_0);
else
start = *(u32 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_0);
start &= ~GENMASK(3, 0);
end = start + vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].size - 1;
ret = intel_gvt_hypervisor_set_trap_area(vgpu, start, end, trap);
if (ret)
return ret;
vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].tracked = trap;
return 0;
}
static int emulate_pci_command_write(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
u8 old = vgpu_cfg_space(vgpu)[offset];
u8 new = *(u8 *)p_data;
u8 changed = old ^ new;
int ret;
if (!(changed & PCI_COMMAND_MEMORY))
return 0;
if (old & PCI_COMMAND_MEMORY) {
ret = trap_gttmmio(vgpu, false);
if (ret)
return ret;
ret = map_aperture(vgpu, false);
if (ret)
return ret;
} else {
ret = trap_gttmmio(vgpu, true);
if (ret)
return ret;
ret = map_aperture(vgpu, true);
if (ret)
return ret;
}
memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes);
return 0;
}
static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
unsigned int bar_index =
(rounddown(offset, 8) % PCI_BASE_ADDRESS_0) / 8;
u32 new = *(u32 *)(p_data);
bool lo = IS_ALIGNED(offset, 8);
u64 size;
int ret = 0;
bool mmio_enabled =
vgpu_cfg_space(vgpu)[PCI_COMMAND] & PCI_COMMAND_MEMORY;
if (WARN_ON(bar_index >= INTEL_GVT_PCI_BAR_MAX))
return -EINVAL;
if (new == 0xffffffff) {
/*
* Power-up software can determine how much address
* space the device requires by writing a value of
* all 1's to the register and then reading the value
* back. The device will return 0's in all don't-care
* address bits.
*/
size = vgpu->cfg_space.bar[bar_index].size;
if (lo) {
new = rounddown(new, size);
} else {
u32 val = vgpu_cfg_space(vgpu)[rounddown(offset, 8)];
/* for 32bit mode bar it returns all-0 in upper 32
* bit, for 64bit mode bar it will calculate the
* size with lower 32bit and return the corresponding
* value
*/
if (val & PCI_BASE_ADDRESS_MEM_TYPE_64)
new &= (~(size-1)) >> 32;
else
new = 0;
}
/*
* Unmapp & untrap the BAR, since guest hasn't configured a
* valid GPA
*/
switch (bar_index) {
case INTEL_GVT_PCI_BAR_GTTMMIO:
ret = trap_gttmmio(vgpu, false);
break;
case INTEL_GVT_PCI_BAR_APERTURE:
ret = map_aperture(vgpu, false);
break;
}
intel_vgpu_write_pci_bar(vgpu, offset, new, lo);
} else {
/*
* Unmapp & untrap the old BAR first, since guest has
* re-configured the BAR
*/
switch (bar_index) {
case INTEL_GVT_PCI_BAR_GTTMMIO:
ret = trap_gttmmio(vgpu, false);
break;
case INTEL_GVT_PCI_BAR_APERTURE:
ret = map_aperture(vgpu, false);
break;
}
intel_vgpu_write_pci_bar(vgpu, offset, new, lo);
/* Track the new BAR */
if (mmio_enabled) {
switch (bar_index) {
case INTEL_GVT_PCI_BAR_GTTMMIO:
ret = trap_gttmmio(vgpu, true);
break;
case INTEL_GVT_PCI_BAR_APERTURE:
ret = map_aperture(vgpu, true);
break;
}
}
}
return ret;
}
/**
* intel_vgpu_emulate_cfg_read - emulate vGPU configuration space write
*
* Returns:
* Zero on success, negative error code if failed.
*/
int intel_vgpu_emulate_cfg_write(void *__vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
struct intel_vgpu *vgpu = __vgpu;
int ret;
if (WARN_ON(bytes > 4))
return -EINVAL;
if (WARN_ON(offset + bytes >= INTEL_GVT_MAX_CFG_SPACE_SZ))
return -EINVAL;
/* First check if it's PCI_COMMAND */
if (IS_ALIGNED(offset, 2) && offset == PCI_COMMAND) {
if (WARN_ON(bytes > 2))
return -EINVAL;
return emulate_pci_command_write(vgpu, offset, p_data, bytes);
}
switch (rounddown(offset, 4)) {
case PCI_BASE_ADDRESS_0:
case PCI_BASE_ADDRESS_1:
case PCI_BASE_ADDRESS_2:
case PCI_BASE_ADDRESS_3:
if (WARN_ON(!IS_ALIGNED(offset, 4)))
return -EINVAL;
return emulate_pci_bar_write(vgpu, offset, p_data, bytes);
case INTEL_GVT_PCI_SWSCI:
if (WARN_ON(!IS_ALIGNED(offset, 4)))
return -EINVAL;
ret = intel_vgpu_emulate_opregion_request(vgpu, *(u32 *)p_data);
if (ret)
return ret;
break;
case INTEL_GVT_PCI_OPREGION:
if (WARN_ON(!IS_ALIGNED(offset, 4)))
return -EINVAL;
ret = intel_vgpu_init_opregion(vgpu, *(u32 *)p_data);
if (ret)
return ret;
memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes);
break;
default:
memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes);
break;
}
return 0;
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,49 @@
/*
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Ke Yu
* Kevin Tian <kevin.tian@intel.com>
* Zhiyuan Lv <zhiyuan.lv@intel.com>
*
* Contributors:
* Min He <min.he@intel.com>
* Ping Gao <ping.a.gao@intel.com>
* Tina Zhang <tina.zhang@intel.com>
* Yulei Zhang <yulei.zhang@intel.com>
* Zhi Wang <zhi.a.wang@intel.com>
*
*/
#ifndef _GVT_CMD_PARSER_H_
#define _GVT_CMD_PARSER_H_
#define GVT_CMD_HASH_BITS 7
void intel_gvt_clean_cmd_parser(struct intel_gvt *gvt);
int intel_gvt_init_cmd_parser(struct intel_gvt *gvt);
int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload);
int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx);
#endif

View File

@ -24,11 +24,34 @@
#ifndef __GVT_DEBUG_H__
#define __GVT_DEBUG_H__
#define gvt_err(fmt, args...) \
DRM_ERROR("gvt: "fmt, ##args)
#define gvt_dbg_core(fmt, args...) \
DRM_DEBUG_DRIVER("gvt: core: "fmt, ##args)
/*
* Other GVT debug stuff will be introduced in the GVT device model patches.
*/
#define gvt_dbg_irq(fmt, args...) \
DRM_DEBUG_DRIVER("gvt: irq: "fmt, ##args)
#define gvt_dbg_mm(fmt, args...) \
DRM_DEBUG_DRIVER("gvt: mm: "fmt, ##args)
#define gvt_dbg_mmio(fmt, args...) \
DRM_DEBUG_DRIVER("gvt: mmio: "fmt, ##args)
#define gvt_dbg_dpy(fmt, args...) \
DRM_DEBUG_DRIVER("gvt: dpy: "fmt, ##args)
#define gvt_dbg_el(fmt, args...) \
DRM_DEBUG_DRIVER("gvt: el: "fmt, ##args)
#define gvt_dbg_sched(fmt, args...) \
DRM_DEBUG_DRIVER("gvt: sched: "fmt, ##args)
#define gvt_dbg_render(fmt, args...) \
DRM_DEBUG_DRIVER("gvt: render: "fmt, ##args)
#define gvt_dbg_cmd(fmt, args...) \
DRM_DEBUG_DRIVER("gvt: cmd: "fmt, ##args)
#endif

View File

@ -0,0 +1,330 @@
/*
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Ke Yu
* Zhiyuan Lv <zhiyuan.lv@intel.com>
*
* Contributors:
* Terrence Xu <terrence.xu@intel.com>
* Changbin Du <changbin.du@intel.com>
* Bing Niu <bing.niu@intel.com>
* Zhi Wang <zhi.a.wang@intel.com>
*
*/
#include "i915_drv.h"
#include "gvt.h"
static int get_edp_pipe(struct intel_vgpu *vgpu)
{
u32 data = vgpu_vreg(vgpu, _TRANS_DDI_FUNC_CTL_EDP);
int pipe = -1;
switch (data & TRANS_DDI_EDP_INPUT_MASK) {
case TRANS_DDI_EDP_INPUT_A_ON:
case TRANS_DDI_EDP_INPUT_A_ONOFF:
pipe = PIPE_A;
break;
case TRANS_DDI_EDP_INPUT_B_ONOFF:
pipe = PIPE_B;
break;
case TRANS_DDI_EDP_INPUT_C_ONOFF:
pipe = PIPE_C;
break;
}
return pipe;
}
static int edp_pipe_is_enabled(struct intel_vgpu *vgpu)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
if (!(vgpu_vreg(vgpu, PIPECONF(_PIPE_EDP)) & PIPECONF_ENABLE))
return 0;
if (!(vgpu_vreg(vgpu, _TRANS_DDI_FUNC_CTL_EDP) & TRANS_DDI_FUNC_ENABLE))
return 0;
return 1;
}
static int pipe_is_enabled(struct intel_vgpu *vgpu, int pipe)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
if (WARN_ON(pipe < PIPE_A || pipe >= I915_MAX_PIPES))
return -EINVAL;
if (vgpu_vreg(vgpu, PIPECONF(pipe)) & PIPECONF_ENABLE)
return 1;
if (edp_pipe_is_enabled(vgpu) &&
get_edp_pipe(vgpu) == pipe)
return 1;
return 0;
}
/* EDID with 1024x768 as its resolution */
static unsigned char virtual_dp_monitor_edid[] = {
/*Header*/
0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
/* Vendor & Product Identification */
0x22, 0xf0, 0x54, 0x29, 0x00, 0x00, 0x00, 0x00, 0x04, 0x17,
/* Version & Revision */
0x01, 0x04,
/* Basic Display Parameters & Features */
0xa5, 0x34, 0x20, 0x78, 0x23,
/* Color Characteristics */
0xfc, 0x81, 0xa4, 0x55, 0x4d, 0x9d, 0x25, 0x12, 0x50, 0x54,
/* Established Timings: maximum resolution is 1024x768 */
0x21, 0x08, 0x00,
/* Standard Timings. All invalid */
0x00, 0xc0, 0x00, 0xc0, 0x00, 0x40, 0x00, 0x80, 0x00, 0x00,
0x00, 0x40, 0x00, 0x00, 0x00, 0x01,
/* 18 Byte Data Blocks 1: invalid */
0x00, 0x00, 0x80, 0xa0, 0x70, 0xb0,
0x23, 0x40, 0x30, 0x20, 0x36, 0x00, 0x06, 0x44, 0x21, 0x00, 0x00, 0x1a,
/* 18 Byte Data Blocks 2: invalid */
0x00, 0x00, 0x00, 0xfd, 0x00, 0x18, 0x3c, 0x18, 0x50, 0x11, 0x00, 0x0a,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
/* 18 Byte Data Blocks 3: invalid */
0x00, 0x00, 0x00, 0xfc, 0x00, 0x48,
0x50, 0x20, 0x5a, 0x52, 0x32, 0x34, 0x34, 0x30, 0x77, 0x0a, 0x20, 0x20,
/* 18 Byte Data Blocks 4: invalid */
0x00, 0x00, 0x00, 0xff, 0x00, 0x43, 0x4e, 0x34, 0x33, 0x30, 0x34, 0x30,
0x44, 0x58, 0x51, 0x0a, 0x20, 0x20,
/* Extension Block Count */
0x00,
/* Checksum */
0xef,
};
#define DPCD_HEADER_SIZE 0xb
static u8 dpcd_fix_data[DPCD_HEADER_SIZE] = {
0x11, 0x0a, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
};
static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
vgpu_vreg(vgpu, SDEISR) &= ~(SDE_PORTB_HOTPLUG_CPT |
SDE_PORTC_HOTPLUG_CPT |
SDE_PORTD_HOTPLUG_CPT);
if (IS_SKYLAKE(dev_priv))
vgpu_vreg(vgpu, SDEISR) &= ~(SDE_PORTA_HOTPLUG_SPT |
SDE_PORTE_HOTPLUG_SPT);
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B))
vgpu_vreg(vgpu, SDEISR) |= SDE_PORTB_HOTPLUG_CPT;
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C))
vgpu_vreg(vgpu, SDEISR) |= SDE_PORTC_HOTPLUG_CPT;
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_D))
vgpu_vreg(vgpu, SDEISR) |= SDE_PORTD_HOTPLUG_CPT;
if (IS_SKYLAKE(dev_priv) &&
intel_vgpu_has_monitor_on_port(vgpu, PORT_E)) {
vgpu_vreg(vgpu, SDEISR) |= SDE_PORTE_HOTPLUG_SPT;
}
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) {
if (IS_BROADWELL(dev_priv))
vgpu_vreg(vgpu, GEN8_DE_PORT_ISR) |=
GEN8_PORT_DP_A_HOTPLUG;
else
vgpu_vreg(vgpu, SDEISR) |= SDE_PORTA_HOTPLUG_SPT;
}
}
static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num)
{
struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num);
kfree(port->edid);
port->edid = NULL;
kfree(port->dpcd);
port->dpcd = NULL;
}
static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
int type)
{
struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num);
port->edid = kzalloc(sizeof(*(port->edid)), GFP_KERNEL);
if (!port->edid)
return -ENOMEM;
port->dpcd = kzalloc(sizeof(*(port->dpcd)), GFP_KERNEL);
if (!port->dpcd) {
kfree(port->edid);
return -ENOMEM;
}
memcpy(port->edid->edid_block, virtual_dp_monitor_edid,
EDID_SIZE);
port->edid->data_valid = true;
memcpy(port->dpcd->data, dpcd_fix_data, DPCD_HEADER_SIZE);
port->dpcd->data_valid = true;
port->dpcd->data[DPCD_SINK_COUNT] = 0x1;
port->type = type;
emulate_monitor_status_change(vgpu);
return 0;
}
/**
* intel_gvt_check_vblank_emulation - check if vblank emulation timer should
* be turned on/off when a virtual pipe is enabled/disabled.
* @gvt: a GVT device
*
* This function is used to turn on/off vblank timer according to currently
* enabled/disabled virtual pipes.
*
*/
void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt)
{
struct intel_gvt_irq *irq = &gvt->irq;
struct intel_vgpu *vgpu;
bool have_enabled_pipe = false;
int pipe, id;
if (WARN_ON(!mutex_is_locked(&gvt->lock)))
return;
hrtimer_cancel(&irq->vblank_timer.timer);
for_each_active_vgpu(gvt, vgpu, id) {
for (pipe = 0; pipe < I915_MAX_PIPES; pipe++) {
have_enabled_pipe =
pipe_is_enabled(vgpu, pipe);
if (have_enabled_pipe)
break;
}
}
if (have_enabled_pipe)
hrtimer_start(&irq->vblank_timer.timer,
ktime_add_ns(ktime_get(), irq->vblank_timer.period),
HRTIMER_MODE_ABS);
}
static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct intel_vgpu_irq *irq = &vgpu->irq;
int vblank_event[] = {
[PIPE_A] = PIPE_A_VBLANK,
[PIPE_B] = PIPE_B_VBLANK,
[PIPE_C] = PIPE_C_VBLANK,
};
int event;
if (pipe < PIPE_A || pipe > PIPE_C)
return;
for_each_set_bit(event, irq->flip_done_event[pipe],
INTEL_GVT_EVENT_MAX) {
clear_bit(event, irq->flip_done_event[pipe]);
if (!pipe_is_enabled(vgpu, pipe))
continue;
vgpu_vreg(vgpu, PIPE_FLIPCOUNT_G4X(pipe))++;
intel_vgpu_trigger_virtual_event(vgpu, event);
}
if (pipe_is_enabled(vgpu, pipe)) {
vgpu_vreg(vgpu, PIPE_FRMCOUNT_G4X(pipe))++;
intel_vgpu_trigger_virtual_event(vgpu, vblank_event[pipe]);
}
}
static void emulate_vblank(struct intel_vgpu *vgpu)
{
int pipe;
for_each_pipe(vgpu->gvt->dev_priv, pipe)
emulate_vblank_on_pipe(vgpu, pipe);
}
/**
* intel_gvt_emulate_vblank - trigger vblank events for vGPUs on GVT device
* @gvt: a GVT device
*
* This function is used to trigger vblank interrupts for vGPUs on GVT device
*
*/
void intel_gvt_emulate_vblank(struct intel_gvt *gvt)
{
struct intel_vgpu *vgpu;
int id;
if (WARN_ON(!mutex_is_locked(&gvt->lock)))
return;
for_each_active_vgpu(gvt, vgpu, id)
emulate_vblank(vgpu);
}
/**
* intel_vgpu_clean_display - clean vGPU virtual display emulation
* @vgpu: a vGPU
*
* This function is used to clean vGPU virtual display emulation stuffs
*
*/
void intel_vgpu_clean_display(struct intel_vgpu *vgpu)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
if (IS_SKYLAKE(dev_priv))
clean_virtual_dp_monitor(vgpu, PORT_D);
else
clean_virtual_dp_monitor(vgpu, PORT_B);
}
/**
* intel_vgpu_init_display- initialize vGPU virtual display emulation
* @vgpu: a vGPU
*
* This function is used to initialize vGPU virtual display emulation stuffs
*
* Returns:
* Zero on success, negative error code if failed.
*
*/
int intel_vgpu_init_display(struct intel_vgpu *vgpu)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
intel_vgpu_init_i2c_edid(vgpu);
if (IS_SKYLAKE(dev_priv))
return setup_virtual_dp_monitor(vgpu, PORT_D, GVT_DP_D);
else
return setup_virtual_dp_monitor(vgpu, PORT_B, GVT_DP_B);
}

View File

@ -0,0 +1,163 @@
/*
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Ke Yu
* Zhiyuan Lv <zhiyuan.lv@intel.com>
*
* Contributors:
* Terrence Xu <terrence.xu@intel.com>
* Changbin Du <changbin.du@intel.com>
* Bing Niu <bing.niu@intel.com>
* Zhi Wang <zhi.a.wang@intel.com>
*
*/
#ifndef _GVT_DISPLAY_H_
#define _GVT_DISPLAY_H_
#define SBI_REG_MAX 20
#define DPCD_SIZE 0x700
#define intel_vgpu_port(vgpu, port) \
(&(vgpu->display.ports[port]))
#define intel_vgpu_has_monitor_on_port(vgpu, port) \
(intel_vgpu_port(vgpu, port)->edid && \
intel_vgpu_port(vgpu, port)->edid->data_valid)
#define intel_vgpu_port_is_dp(vgpu, port) \
((intel_vgpu_port(vgpu, port)->type == GVT_DP_A) || \
(intel_vgpu_port(vgpu, port)->type == GVT_DP_B) || \
(intel_vgpu_port(vgpu, port)->type == GVT_DP_C) || \
(intel_vgpu_port(vgpu, port)->type == GVT_DP_D))
#define INTEL_GVT_MAX_UEVENT_VARS 3
/* DPCD start */
#define DPCD_SIZE 0x700
/* DPCD */
#define DP_SET_POWER 0x600
#define DP_SET_POWER_D0 0x1
#define AUX_NATIVE_WRITE 0x8
#define AUX_NATIVE_READ 0x9
#define AUX_NATIVE_REPLY_MASK (0x3 << 4)
#define AUX_NATIVE_REPLY_ACK (0x0 << 4)
#define AUX_NATIVE_REPLY_NAK (0x1 << 4)
#define AUX_NATIVE_REPLY_DEFER (0x2 << 4)
#define AUX_BURST_SIZE 16
/* DPCD addresses */
#define DPCD_REV 0x000
#define DPCD_MAX_LINK_RATE 0x001
#define DPCD_MAX_LANE_COUNT 0x002
#define DPCD_TRAINING_PATTERN_SET 0x102
#define DPCD_SINK_COUNT 0x200
#define DPCD_LANE0_1_STATUS 0x202
#define DPCD_LANE2_3_STATUS 0x203
#define DPCD_LANE_ALIGN_STATUS_UPDATED 0x204
#define DPCD_SINK_STATUS 0x205
/* link training */
#define DPCD_TRAINING_PATTERN_SET_MASK 0x03
#define DPCD_LINK_TRAINING_DISABLED 0x00
#define DPCD_TRAINING_PATTERN_1 0x01
#define DPCD_TRAINING_PATTERN_2 0x02
#define DPCD_CP_READY_MASK (1 << 6)
/* lane status */
#define DPCD_LANES_CR_DONE 0x11
#define DPCD_LANES_EQ_DONE 0x22
#define DPCD_SYMBOL_LOCKED 0x44
#define DPCD_INTERLANE_ALIGN_DONE 0x01
#define DPCD_SINK_IN_SYNC 0x03
/* DPCD end */
#define SBI_RESPONSE_MASK 0x3
#define SBI_RESPONSE_SHIFT 0x1
#define SBI_STAT_MASK 0x1
#define SBI_STAT_SHIFT 0x0
#define SBI_OPCODE_SHIFT 8
#define SBI_OPCODE_MASK (0xff << SBI_OPCODE_SHIFT)
#define SBI_CMD_IORD 2
#define SBI_CMD_IOWR 3
#define SBI_CMD_CRRD 6
#define SBI_CMD_CRWR 7
#define SBI_ADDR_OFFSET_SHIFT 16
#define SBI_ADDR_OFFSET_MASK (0xffff << SBI_ADDR_OFFSET_SHIFT)
struct intel_vgpu_sbi_register {
unsigned int offset;
u32 value;
};
struct intel_vgpu_sbi {
int number;
struct intel_vgpu_sbi_register registers[SBI_REG_MAX];
};
enum intel_gvt_plane_type {
PRIMARY_PLANE = 0,
CURSOR_PLANE,
SPRITE_PLANE,
MAX_PLANE
};
struct intel_vgpu_dpcd_data {
bool data_valid;
u8 data[DPCD_SIZE];
};
enum intel_vgpu_port_type {
GVT_CRT = 0,
GVT_DP_A,
GVT_DP_B,
GVT_DP_C,
GVT_DP_D,
GVT_HDMI_B,
GVT_HDMI_C,
GVT_HDMI_D,
GVT_PORT_MAX
};
struct intel_vgpu_port {
/* per display EDID information */
struct intel_vgpu_edid_data *edid;
/* per display DPCD information */
struct intel_vgpu_dpcd_data *dpcd;
int type;
};
void intel_gvt_emulate_vblank(struct intel_gvt *gvt);
void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt);
int intel_vgpu_init_display(struct intel_vgpu *vgpu);
void intel_vgpu_clean_display(struct intel_vgpu *vgpu);
#endif

View File

@ -0,0 +1,532 @@
/*
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Ke Yu
* Zhiyuan Lv <zhiyuan.lv@intel.com>
*
* Contributors:
* Terrence Xu <terrence.xu@intel.com>
* Changbin Du <changbin.du@intel.com>
* Bing Niu <bing.niu@intel.com>
* Zhi Wang <zhi.a.wang@intel.com>
*
*/
#include "i915_drv.h"
#include "gvt.h"
#define GMBUS1_TOTAL_BYTES_SHIFT 16
#define GMBUS1_TOTAL_BYTES_MASK 0x1ff
#define gmbus1_total_byte_count(v) (((v) >> \
GMBUS1_TOTAL_BYTES_SHIFT) & GMBUS1_TOTAL_BYTES_MASK)
#define gmbus1_slave_addr(v) (((v) & 0xff) >> 1)
#define gmbus1_slave_index(v) (((v) >> 8) & 0xff)
#define gmbus1_bus_cycle(v) (((v) >> 25) & 0x7)
/* GMBUS0 bits definitions */
#define _GMBUS_PIN_SEL_MASK (0x7)
static unsigned char edid_get_byte(struct intel_vgpu *vgpu)
{
struct intel_vgpu_i2c_edid *edid = &vgpu->display.i2c_edid;
unsigned char chr = 0;
if (edid->state == I2C_NOT_SPECIFIED || !edid->slave_selected) {
gvt_err("Driver tries to read EDID without proper sequence!\n");
return 0;
}
if (edid->current_edid_read >= EDID_SIZE) {
gvt_err("edid_get_byte() exceeds the size of EDID!\n");
return 0;
}
if (!edid->edid_available) {
gvt_err("Reading EDID but EDID is not available!\n");
return 0;
}
if (intel_vgpu_has_monitor_on_port(vgpu, edid->port)) {
struct intel_vgpu_edid_data *edid_data =
intel_vgpu_port(vgpu, edid->port)->edid;
chr = edid_data->edid_block[edid->current_edid_read];
edid->current_edid_read++;
} else {
gvt_err("No EDID available during the reading?\n");
}
return chr;
}
static inline int get_port_from_gmbus0(u32 gmbus0)
{
int port_select = gmbus0 & _GMBUS_PIN_SEL_MASK;
int port = -EINVAL;
if (port_select == 2)
port = PORT_E;
else if (port_select == 4)
port = PORT_C;
else if (port_select == 5)
port = PORT_B;
else if (port_select == 6)
port = PORT_D;
return port;
}
static void reset_gmbus_controller(struct intel_vgpu *vgpu)
{
vgpu_vreg(vgpu, PCH_GMBUS2) = GMBUS_HW_RDY;
if (!vgpu->display.i2c_edid.edid_available)
vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_SATOER;
vgpu->display.i2c_edid.gmbus.phase = GMBUS_IDLE_PHASE;
}
/* GMBUS0 */
static int gmbus0_mmio_write(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
int port, pin_select;
memcpy(&vgpu_vreg(vgpu, offset), p_data, bytes);
pin_select = vgpu_vreg(vgpu, offset) & _GMBUS_PIN_SEL_MASK;
intel_vgpu_init_i2c_edid(vgpu);
if (pin_select == 0)
return 0;
port = get_port_from_gmbus0(pin_select);
if (WARN_ON(port < 0))
return 0;
vgpu->display.i2c_edid.state = I2C_GMBUS;
vgpu->display.i2c_edid.gmbus.phase = GMBUS_IDLE_PHASE;
vgpu_vreg(vgpu, PCH_GMBUS2) &= ~GMBUS_ACTIVE;
vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_HW_RDY | GMBUS_HW_WAIT_PHASE;
if (intel_vgpu_has_monitor_on_port(vgpu, port) &&
!intel_vgpu_port_is_dp(vgpu, port)) {
vgpu->display.i2c_edid.port = port;
vgpu->display.i2c_edid.edid_available = true;
vgpu_vreg(vgpu, PCH_GMBUS2) &= ~GMBUS_SATOER;
} else
vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_SATOER;
return 0;
}
static int gmbus1_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
struct intel_vgpu_i2c_edid *i2c_edid = &vgpu->display.i2c_edid;
u32 slave_addr;
u32 wvalue = *(u32 *)p_data;
if (vgpu_vreg(vgpu, offset) & GMBUS_SW_CLR_INT) {
if (!(wvalue & GMBUS_SW_CLR_INT)) {
vgpu_vreg(vgpu, offset) &= ~GMBUS_SW_CLR_INT;
reset_gmbus_controller(vgpu);
}
/*
* TODO: "This bit is cleared to zero when an event
* causes the HW_RDY bit transition to occur "
*/
} else {
/*
* per bspec setting this bit can cause:
* 1) INT status bit cleared
* 2) HW_RDY bit asserted
*/
if (wvalue & GMBUS_SW_CLR_INT) {
vgpu_vreg(vgpu, PCH_GMBUS2) &= ~GMBUS_INT;
vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_HW_RDY;
}
/* For virtualization, we suppose that HW is always ready,
* so GMBUS_SW_RDY should always be cleared
*/
if (wvalue & GMBUS_SW_RDY)
wvalue &= ~GMBUS_SW_RDY;
i2c_edid->gmbus.total_byte_count =
gmbus1_total_byte_count(wvalue);
slave_addr = gmbus1_slave_addr(wvalue);
/* vgpu gmbus only support EDID */
if (slave_addr == EDID_ADDR) {
i2c_edid->slave_selected = true;
} else if (slave_addr != 0) {
gvt_dbg_dpy(
"vgpu%d: unsupported gmbus slave addr(0x%x)\n"
" gmbus operations will be ignored.\n",
vgpu->id, slave_addr);
}
if (wvalue & GMBUS_CYCLE_INDEX)
i2c_edid->current_edid_read =
gmbus1_slave_index(wvalue);
i2c_edid->gmbus.cycle_type = gmbus1_bus_cycle(wvalue);
switch (gmbus1_bus_cycle(wvalue)) {
case GMBUS_NOCYCLE:
break;
case GMBUS_STOP:
/* From spec:
* This can only cause a STOP to be generated
* if a GMBUS cycle is generated, the GMBUS is
* currently in a data/wait/idle phase, or it is in a
* WAIT phase
*/
if (gmbus1_bus_cycle(vgpu_vreg(vgpu, offset))
!= GMBUS_NOCYCLE) {
intel_vgpu_init_i2c_edid(vgpu);
/* After the 'stop' cycle, hw state would become
* 'stop phase' and then 'idle phase' after a
* few milliseconds. In emulation, we just set
* it as 'idle phase' ('stop phase' is not
* visible in gmbus interface)
*/
i2c_edid->gmbus.phase = GMBUS_IDLE_PHASE;
vgpu_vreg(vgpu, PCH_GMBUS2) &= ~GMBUS_ACTIVE;
}
break;
case NIDX_NS_W:
case IDX_NS_W:
case NIDX_STOP:
case IDX_STOP:
/* From hw spec the GMBUS phase
* transition like this:
* START (-->INDEX) -->DATA
*/
i2c_edid->gmbus.phase = GMBUS_DATA_PHASE;
vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_ACTIVE;
break;
default:
gvt_err("Unknown/reserved GMBUS cycle detected!\n");
break;
}
/*
* From hw spec the WAIT state will be
* cleared:
* (1) in a new GMBUS cycle
* (2) by generating a stop
*/
vgpu_vreg(vgpu, offset) = wvalue;
}
return 0;
}
static int gmbus3_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
WARN_ON(1);
return 0;
}
static int gmbus3_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
int i;
unsigned char byte_data;
struct intel_vgpu_i2c_edid *i2c_edid = &vgpu->display.i2c_edid;
int byte_left = i2c_edid->gmbus.total_byte_count -
i2c_edid->current_edid_read;
int byte_count = byte_left;
u32 reg_data = 0;
/* Data can only be recevied if previous settings correct */
if (vgpu_vreg(vgpu, PCH_GMBUS1) & GMBUS_SLAVE_READ) {
if (byte_left <= 0) {
memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes);
return 0;
}
if (byte_count > 4)
byte_count = 4;
for (i = 0; i < byte_count; i++) {
byte_data = edid_get_byte(vgpu);
reg_data |= (byte_data << (i << 3));
}
memcpy(&vgpu_vreg(vgpu, offset), &reg_data, byte_count);
memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes);
if (byte_left <= 4) {
switch (i2c_edid->gmbus.cycle_type) {
case NIDX_STOP:
case IDX_STOP:
i2c_edid->gmbus.phase = GMBUS_IDLE_PHASE;
break;
case NIDX_NS_W:
case IDX_NS_W:
default:
i2c_edid->gmbus.phase = GMBUS_WAIT_PHASE;
break;
}
intel_vgpu_init_i2c_edid(vgpu);
}
/*
* Read GMBUS3 during send operation,
* return the latest written value
*/
} else {
memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes);
gvt_err("vgpu%d: warning: gmbus3 read with nothing returned\n",
vgpu->id);
}
return 0;
}
static int gmbus2_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
u32 value = vgpu_vreg(vgpu, offset);
if (!(vgpu_vreg(vgpu, offset) & GMBUS_INUSE))
vgpu_vreg(vgpu, offset) |= GMBUS_INUSE;
memcpy(p_data, (void *)&value, bytes);
return 0;
}
static int gmbus2_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
u32 wvalue = *(u32 *)p_data;
if (wvalue & GMBUS_INUSE)
vgpu_vreg(vgpu, offset) &= ~GMBUS_INUSE;
/* All other bits are read-only */
return 0;
}
/**
* intel_gvt_i2c_handle_gmbus_read - emulate gmbus register mmio read
* @vgpu: a vGPU
*
* This function is used to emulate gmbus register mmio read
*
* Returns:
* Zero on success, negative error code if failed.
*
*/
int intel_gvt_i2c_handle_gmbus_read(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
if (WARN_ON(bytes > 8 && (offset & (bytes - 1))))
return -EINVAL;
if (offset == i915_mmio_reg_offset(PCH_GMBUS2))
return gmbus2_mmio_read(vgpu, offset, p_data, bytes);
else if (offset == i915_mmio_reg_offset(PCH_GMBUS3))
return gmbus3_mmio_read(vgpu, offset, p_data, bytes);
memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes);
return 0;
}
/**
* intel_gvt_i2c_handle_gmbus_write - emulate gmbus register mmio write
* @vgpu: a vGPU
*
* This function is used to emulate gmbus register mmio write
*
* Returns:
* Zero on success, negative error code if failed.
*
*/
int intel_gvt_i2c_handle_gmbus_write(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
if (WARN_ON(bytes > 8 && (offset & (bytes - 1))))
return -EINVAL;
if (offset == i915_mmio_reg_offset(PCH_GMBUS0))
return gmbus0_mmio_write(vgpu, offset, p_data, bytes);
else if (offset == i915_mmio_reg_offset(PCH_GMBUS1))
return gmbus1_mmio_write(vgpu, offset, p_data, bytes);
else if (offset == i915_mmio_reg_offset(PCH_GMBUS2))
return gmbus2_mmio_write(vgpu, offset, p_data, bytes);
else if (offset == i915_mmio_reg_offset(PCH_GMBUS3))
return gmbus3_mmio_write(vgpu, offset, p_data, bytes);
memcpy(&vgpu_vreg(vgpu, offset), p_data, bytes);
return 0;
}
enum {
AUX_CH_CTL = 0,
AUX_CH_DATA1,
AUX_CH_DATA2,
AUX_CH_DATA3,
AUX_CH_DATA4,
AUX_CH_DATA5
};
static inline int get_aux_ch_reg(unsigned int offset)
{
int reg;
switch (offset & 0xff) {
case 0x10:
reg = AUX_CH_CTL;
break;
case 0x14:
reg = AUX_CH_DATA1;
break;
case 0x18:
reg = AUX_CH_DATA2;
break;
case 0x1c:
reg = AUX_CH_DATA3;
break;
case 0x20:
reg = AUX_CH_DATA4;
break;
case 0x24:
reg = AUX_CH_DATA5;
break;
default:
reg = -1;
break;
}
return reg;
}
#define AUX_CTL_MSG_LENGTH(reg) \
((reg & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >> \
DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT)
/**
* intel_gvt_i2c_handle_aux_ch_write - emulate AUX channel register write
* @vgpu: a vGPU
*
* This function is used to emulate AUX channel register write
*
*/
void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu,
int port_idx,
unsigned int offset,
void *p_data)
{
struct intel_vgpu_i2c_edid *i2c_edid = &vgpu->display.i2c_edid;
int msg_length, ret_msg_size;
int msg, addr, ctrl, op;
u32 value = *(u32 *)p_data;
int aux_data_for_write = 0;
int reg = get_aux_ch_reg(offset);
if (reg != AUX_CH_CTL) {
vgpu_vreg(vgpu, offset) = value;
return;
}
msg_length = AUX_CTL_MSG_LENGTH(value);
// check the msg in DATA register.
msg = vgpu_vreg(vgpu, offset + 4);
addr = (msg >> 8) & 0xffff;
ctrl = (msg >> 24) & 0xff;
op = ctrl >> 4;
if (!(value & DP_AUX_CH_CTL_SEND_BUSY)) {
/* The ctl write to clear some states */
return;
}
/* Always set the wanted value for vms. */
ret_msg_size = (((op & 0x1) == GVT_AUX_I2C_READ) ? 2 : 1);
vgpu_vreg(vgpu, offset) =
DP_AUX_CH_CTL_DONE |
((ret_msg_size << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) &
DP_AUX_CH_CTL_MESSAGE_SIZE_MASK);
if (msg_length == 3) {
if (!(op & GVT_AUX_I2C_MOT)) {
/* stop */
intel_vgpu_init_i2c_edid(vgpu);
} else {
/* start or restart */
i2c_edid->aux_ch.i2c_over_aux_ch = true;
i2c_edid->aux_ch.aux_ch_mot = true;
if (addr == 0) {
/* reset the address */
intel_vgpu_init_i2c_edid(vgpu);
} else if (addr == EDID_ADDR) {
i2c_edid->state = I2C_AUX_CH;
i2c_edid->port = port_idx;
i2c_edid->slave_selected = true;
if (intel_vgpu_has_monitor_on_port(vgpu,
port_idx) &&
intel_vgpu_port_is_dp(vgpu, port_idx))
i2c_edid->edid_available = true;
}
}
} else if ((op & 0x1) == GVT_AUX_I2C_WRITE) {
/* TODO
* We only support EDID reading from I2C_over_AUX. And
* we do not expect the index mode to be used. Right now
* the WRITE operation is ignored. It is good enough to
* support the gfx driver to do EDID access.
*/
} else {
if (WARN_ON((op & 0x1) != GVT_AUX_I2C_READ))
return;
if (WARN_ON(msg_length != 4))
return;
if (i2c_edid->edid_available && i2c_edid->slave_selected) {
unsigned char val = edid_get_byte(vgpu);
aux_data_for_write = (val << 16);
}
}
/* write the return value in AUX_CH_DATA reg which includes:
* ACK of I2C_WRITE
* returned byte if it is READ
*/
aux_data_for_write |= (GVT_AUX_I2C_REPLY_ACK & 0xff) << 24;
vgpu_vreg(vgpu, offset + 4) = aux_data_for_write;
}
/**
* intel_vgpu_init_i2c_edid - initialize vGPU i2c edid emulation
* @vgpu: a vGPU
*
* This function is used to initialize vGPU i2c edid emulation stuffs
*
*/
void intel_vgpu_init_i2c_edid(struct intel_vgpu *vgpu)
{
struct intel_vgpu_i2c_edid *edid = &vgpu->display.i2c_edid;
edid->state = I2C_NOT_SPECIFIED;
edid->port = -1;
edid->slave_selected = false;
edid->edid_available = false;
edid->current_edid_read = 0;
memset(&edid->gmbus, 0, sizeof(struct intel_vgpu_i2c_gmbus));
edid->aux_ch.i2c_over_aux_ch = false;
edid->aux_ch.aux_ch_mot = false;
}

View File

@ -0,0 +1,150 @@
/*
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Ke Yu
* Zhiyuan Lv <zhiyuan.lv@intel.com>
*
* Contributors:
* Terrence Xu <terrence.xu@intel.com>
* Changbin Du <changbin.du@intel.com>
* Bing Niu <bing.niu@intel.com>
* Zhi Wang <zhi.a.wang@intel.com>
*
*/
#ifndef _GVT_EDID_H_
#define _GVT_EDID_H_
#define EDID_SIZE 128
#define EDID_ADDR 0x50 /* Linux hvm EDID addr */
#define GVT_AUX_NATIVE_WRITE 0x8
#define GVT_AUX_NATIVE_READ 0x9
#define GVT_AUX_I2C_WRITE 0x0
#define GVT_AUX_I2C_READ 0x1
#define GVT_AUX_I2C_STATUS 0x2
#define GVT_AUX_I2C_MOT 0x4
#define GVT_AUX_I2C_REPLY_ACK (0x0 << 6)
struct intel_vgpu_edid_data {
bool data_valid;
unsigned char edid_block[EDID_SIZE];
};
enum gmbus_cycle_type {
GMBUS_NOCYCLE = 0x0,
NIDX_NS_W = 0x1,
IDX_NS_W = 0x3,
GMBUS_STOP = 0x4,
NIDX_STOP = 0x5,
IDX_STOP = 0x7
};
/*
* States of GMBUS
*
* GMBUS0-3 could be related to the EDID virtualization. Another two GMBUS
* registers, GMBUS4 (interrupt mask) and GMBUS5 (2 byte indes register), are
* not considered here. Below describes the usage of GMBUS registers that are
* cared by the EDID virtualization
*
* GMBUS0:
* R/W
* port selection. value of bit0 - bit2 corresponds to the GPIO registers.
*
* GMBUS1:
* R/W Protect
* Command and Status.
* bit0 is the direction bit: 1 is read; 0 is write.
* bit1 - bit7 is slave 7-bit address.
* bit16 - bit24 total byte count (ignore?)
*
* GMBUS2:
* Most of bits are read only except bit 15 (IN_USE)
* Status register
* bit0 - bit8 current byte count
* bit 11: hardware ready;
*
* GMBUS3:
* Read/Write
* Data for transfer
*/
/* From hw specs, Other phases like START, ADDRESS, INDEX
* are invisible to GMBUS MMIO interface. So no definitions
* in below enum types
*/
enum gvt_gmbus_phase {
GMBUS_IDLE_PHASE = 0,
GMBUS_DATA_PHASE,
GMBUS_WAIT_PHASE,
//GMBUS_STOP_PHASE,
GMBUS_MAX_PHASE
};
struct intel_vgpu_i2c_gmbus {
unsigned int total_byte_count; /* from GMBUS1 */
enum gmbus_cycle_type cycle_type;
enum gvt_gmbus_phase phase;
};
struct intel_vgpu_i2c_aux_ch {
bool i2c_over_aux_ch;
bool aux_ch_mot;
};
enum i2c_state {
I2C_NOT_SPECIFIED = 0,
I2C_GMBUS = 1,
I2C_AUX_CH = 2
};
/* I2C sequences cannot interleave.
* GMBUS and AUX_CH sequences cannot interleave.
*/
struct intel_vgpu_i2c_edid {
enum i2c_state state;
unsigned int port;
bool slave_selected;
bool edid_available;
unsigned int current_edid_read;
struct intel_vgpu_i2c_gmbus gmbus;
struct intel_vgpu_i2c_aux_ch aux_ch;
};
void intel_vgpu_init_i2c_edid(struct intel_vgpu *vgpu);
int intel_gvt_i2c_handle_gmbus_read(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes);
int intel_gvt_i2c_handle_gmbus_write(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes);
void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu,
int port_idx,
unsigned int offset,
void *p_data);
#endif /*_GVT_EDID_H_*/

View File

@ -0,0 +1,860 @@
/*
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Zhiyuan Lv <zhiyuan.lv@intel.com>
* Zhi Wang <zhi.a.wang@intel.com>
*
* Contributors:
* Min He <min.he@intel.com>
* Bing Niu <bing.niu@intel.com>
* Ping Gao <ping.a.gao@intel.com>
* Tina Zhang <tina.zhang@intel.com>
*
*/
#include "i915_drv.h"
#include "gvt.h"
#define _EL_OFFSET_STATUS 0x234
#define _EL_OFFSET_STATUS_BUF 0x370
#define _EL_OFFSET_STATUS_PTR 0x3A0
#define execlist_ring_mmio(gvt, ring_id, offset) \
(gvt->dev_priv->engine[ring_id]->mmio_base + (offset))
#define valid_context(ctx) ((ctx)->valid)
#define same_context(a, b) (((a)->context_id == (b)->context_id) && \
((a)->lrca == (b)->lrca))
static int context_switch_events[] = {
[RCS] = RCS_AS_CONTEXT_SWITCH,
[BCS] = BCS_AS_CONTEXT_SWITCH,
[VCS] = VCS_AS_CONTEXT_SWITCH,
[VCS2] = VCS2_AS_CONTEXT_SWITCH,
[VECS] = VECS_AS_CONTEXT_SWITCH,
};
static int ring_id_to_context_switch_event(int ring_id)
{
if (WARN_ON(ring_id < RCS && ring_id >
ARRAY_SIZE(context_switch_events)))
return -EINVAL;
return context_switch_events[ring_id];
}
static void switch_virtual_execlist_slot(struct intel_vgpu_execlist *execlist)
{
gvt_dbg_el("[before] running slot %d/context %x pending slot %d\n",
execlist->running_slot ?
execlist->running_slot->index : -1,
execlist->running_context ?
execlist->running_context->context_id : 0,
execlist->pending_slot ?
execlist->pending_slot->index : -1);
execlist->running_slot = execlist->pending_slot;
execlist->pending_slot = NULL;
execlist->running_context = execlist->running_context ?
&execlist->running_slot->ctx[0] : NULL;
gvt_dbg_el("[after] running slot %d/context %x pending slot %d\n",
execlist->running_slot ?
execlist->running_slot->index : -1,
execlist->running_context ?
execlist->running_context->context_id : 0,
execlist->pending_slot ?
execlist->pending_slot->index : -1);
}
static void emulate_execlist_status(struct intel_vgpu_execlist *execlist)
{
struct intel_vgpu_execlist_slot *running = execlist->running_slot;
struct intel_vgpu_execlist_slot *pending = execlist->pending_slot;
struct execlist_ctx_descriptor_format *desc = execlist->running_context;
struct intel_vgpu *vgpu = execlist->vgpu;
struct execlist_status_format status;
int ring_id = execlist->ring_id;
u32 status_reg = execlist_ring_mmio(vgpu->gvt,
ring_id, _EL_OFFSET_STATUS);
status.ldw = vgpu_vreg(vgpu, status_reg);
status.udw = vgpu_vreg(vgpu, status_reg + 4);
if (running) {
status.current_execlist_pointer = !!running->index;
status.execlist_write_pointer = !!!running->index;
status.execlist_0_active = status.execlist_0_valid =
!!!(running->index);
status.execlist_1_active = status.execlist_1_valid =
!!(running->index);
} else {
status.context_id = 0;
status.execlist_0_active = status.execlist_0_valid = 0;
status.execlist_1_active = status.execlist_1_valid = 0;
}
status.context_id = desc ? desc->context_id : 0;
status.execlist_queue_full = !!(pending);
vgpu_vreg(vgpu, status_reg) = status.ldw;
vgpu_vreg(vgpu, status_reg + 4) = status.udw;
gvt_dbg_el("vgpu%d: status reg offset %x ldw %x udw %x\n",
vgpu->id, status_reg, status.ldw, status.udw);
}
static void emulate_csb_update(struct intel_vgpu_execlist *execlist,
struct execlist_context_status_format *status,
bool trigger_interrupt_later)
{
struct intel_vgpu *vgpu = execlist->vgpu;
int ring_id = execlist->ring_id;
struct execlist_context_status_pointer_format ctx_status_ptr;
u32 write_pointer;
u32 ctx_status_ptr_reg, ctx_status_buf_reg, offset;
ctx_status_ptr_reg = execlist_ring_mmio(vgpu->gvt, ring_id,
_EL_OFFSET_STATUS_PTR);
ctx_status_buf_reg = execlist_ring_mmio(vgpu->gvt, ring_id,
_EL_OFFSET_STATUS_BUF);
ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg);
write_pointer = ctx_status_ptr.write_ptr;
if (write_pointer == 0x7)
write_pointer = 0;
else {
++write_pointer;
write_pointer %= 0x6;
}
offset = ctx_status_buf_reg + write_pointer * 8;
vgpu_vreg(vgpu, offset) = status->ldw;
vgpu_vreg(vgpu, offset + 4) = status->udw;
ctx_status_ptr.write_ptr = write_pointer;
vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw;
gvt_dbg_el("vgpu%d: w pointer %u reg %x csb l %x csb h %x\n",
vgpu->id, write_pointer, offset, status->ldw, status->udw);
if (trigger_interrupt_later)
return;
intel_vgpu_trigger_virtual_event(vgpu,
ring_id_to_context_switch_event(execlist->ring_id));
}
static int emulate_execlist_ctx_schedule_out(
struct intel_vgpu_execlist *execlist,
struct execlist_ctx_descriptor_format *ctx)
{
struct intel_vgpu_execlist_slot *running = execlist->running_slot;
struct intel_vgpu_execlist_slot *pending = execlist->pending_slot;
struct execlist_ctx_descriptor_format *ctx0 = &running->ctx[0];
struct execlist_ctx_descriptor_format *ctx1 = &running->ctx[1];
struct execlist_context_status_format status;
memset(&status, 0, sizeof(status));
gvt_dbg_el("schedule out context id %x\n", ctx->context_id);
if (WARN_ON(!same_context(ctx, execlist->running_context))) {
gvt_err("schedule out context is not running context,"
"ctx id %x running ctx id %x\n",
ctx->context_id,
execlist->running_context->context_id);
return -EINVAL;
}
/* ctx1 is valid, ctx0/ctx is scheduled-out -> element switch */
if (valid_context(ctx1) && same_context(ctx0, ctx)) {
gvt_dbg_el("ctx 1 valid, ctx/ctx 0 is scheduled-out\n");
execlist->running_context = ctx1;
emulate_execlist_status(execlist);
status.context_complete = status.element_switch = 1;
status.context_id = ctx->context_id;
emulate_csb_update(execlist, &status, false);
/*
* ctx1 is not valid, ctx == ctx0
* ctx1 is valid, ctx1 == ctx
* --> last element is finished
* emulate:
* active-to-idle if there is *no* pending execlist
* context-complete if there *is* pending execlist
*/
} else if ((!valid_context(ctx1) && same_context(ctx0, ctx))
|| (valid_context(ctx1) && same_context(ctx1, ctx))) {
gvt_dbg_el("need to switch virtual execlist slot\n");
switch_virtual_execlist_slot(execlist);
emulate_execlist_status(execlist);
status.context_complete = status.active_to_idle = 1;
status.context_id = ctx->context_id;
if (!pending) {
emulate_csb_update(execlist, &status, false);
} else {
emulate_csb_update(execlist, &status, true);
memset(&status, 0, sizeof(status));
status.idle_to_active = 1;
status.context_id = 0;
emulate_csb_update(execlist, &status, false);
}
} else {
WARN_ON(1);
return -EINVAL;
}
return 0;
}
static struct intel_vgpu_execlist_slot *get_next_execlist_slot(
struct intel_vgpu_execlist *execlist)
{
struct intel_vgpu *vgpu = execlist->vgpu;
int ring_id = execlist->ring_id;
u32 status_reg = execlist_ring_mmio(vgpu->gvt, ring_id,
_EL_OFFSET_STATUS);
struct execlist_status_format status;
status.ldw = vgpu_vreg(vgpu, status_reg);
status.udw = vgpu_vreg(vgpu, status_reg + 4);
if (status.execlist_queue_full) {
gvt_err("virtual execlist slots are full\n");
return NULL;
}
return &execlist->slot[status.execlist_write_pointer];
}
static int emulate_execlist_schedule_in(struct intel_vgpu_execlist *execlist,
struct execlist_ctx_descriptor_format ctx[2])
{
struct intel_vgpu_execlist_slot *running = execlist->running_slot;
struct intel_vgpu_execlist_slot *slot =
get_next_execlist_slot(execlist);
struct execlist_ctx_descriptor_format *ctx0, *ctx1;
struct execlist_context_status_format status;
gvt_dbg_el("emulate schedule-in\n");
if (!slot) {
gvt_err("no available execlist slot\n");
return -EINVAL;
}
memset(&status, 0, sizeof(status));
memset(slot->ctx, 0, sizeof(slot->ctx));
slot->ctx[0] = ctx[0];
slot->ctx[1] = ctx[1];
gvt_dbg_el("alloc slot index %d ctx 0 %x ctx 1 %x\n",
slot->index, ctx[0].context_id,
ctx[1].context_id);
/*
* no running execlist, make this write bundle as running execlist
* -> idle-to-active
*/
if (!running) {
gvt_dbg_el("no current running execlist\n");
execlist->running_slot = slot;
execlist->pending_slot = NULL;
execlist->running_context = &slot->ctx[0];
gvt_dbg_el("running slot index %d running context %x\n",
execlist->running_slot->index,
execlist->running_context->context_id);
emulate_execlist_status(execlist);
status.idle_to_active = 1;
status.context_id = 0;
emulate_csb_update(execlist, &status, false);
return 0;
}
ctx0 = &running->ctx[0];
ctx1 = &running->ctx[1];
gvt_dbg_el("current running slot index %d ctx 0 %x ctx 1 %x\n",
running->index, ctx0->context_id, ctx1->context_id);
/*
* already has an running execlist
* a. running ctx1 is valid,
* ctx0 is finished, and running ctx1 == new execlist ctx[0]
* b. running ctx1 is not valid,
* ctx0 == new execlist ctx[0]
* ----> lite-restore + preempted
*/
if ((valid_context(ctx1) && same_context(ctx1, &slot->ctx[0]) &&
/* condition a */
(!same_context(ctx0, execlist->running_context))) ||
(!valid_context(ctx1) &&
same_context(ctx0, &slot->ctx[0]))) { /* condition b */
gvt_dbg_el("need to switch virtual execlist slot\n");
execlist->pending_slot = slot;
switch_virtual_execlist_slot(execlist);
emulate_execlist_status(execlist);
status.lite_restore = status.preempted = 1;
status.context_id = ctx[0].context_id;
emulate_csb_update(execlist, &status, false);
} else {
gvt_dbg_el("emulate as pending slot\n");
/*
* otherwise
* --> emulate pending execlist exist + but no preemption case
*/
execlist->pending_slot = slot;
emulate_execlist_status(execlist);
}
return 0;
}
static void free_workload(struct intel_vgpu_workload *workload)
{
intel_vgpu_unpin_mm(workload->shadow_mm);
intel_gvt_mm_unreference(workload->shadow_mm);
kmem_cache_free(workload->vgpu->workloads, workload);
}
#define get_desc_from_elsp_dwords(ed, i) \
((struct execlist_ctx_descriptor_format *)&((ed)->data[i * 2]))
#define BATCH_BUFFER_ADDR_MASK ((1UL << 32) - (1U << 2))
#define BATCH_BUFFER_ADDR_HIGH_MASK ((1UL << 16) - (1U))
static int set_gma_to_bb_cmd(struct intel_shadow_bb_entry *entry_obj,
unsigned long add, int gmadr_bytes)
{
if (WARN_ON(gmadr_bytes != 4 && gmadr_bytes != 8))
return -1;
*((u32 *)(entry_obj->bb_start_cmd_va + (1 << 2))) = add &
BATCH_BUFFER_ADDR_MASK;
if (gmadr_bytes == 8) {
*((u32 *)(entry_obj->bb_start_cmd_va + (2 << 2))) =
add & BATCH_BUFFER_ADDR_HIGH_MASK;
}
return 0;
}
static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
{
int gmadr_bytes = workload->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
/* pin the gem object to ggtt */
if (!list_empty(&workload->shadow_bb)) {
struct intel_shadow_bb_entry *entry_obj =
list_first_entry(&workload->shadow_bb,
struct intel_shadow_bb_entry,
list);
struct intel_shadow_bb_entry *temp;
list_for_each_entry_safe(entry_obj, temp, &workload->shadow_bb,
list) {
struct i915_vma *vma;
vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0,
4, 0);
if (IS_ERR(vma)) {
gvt_err("Cannot pin\n");
return;
}
/* FIXME: we are not tracking our pinned VMA leaving it
* up to the core to fix up the stray pin_count upon
* free.
*/
/* update the relocate gma with shadow batch buffer*/
set_gma_to_bb_cmd(entry_obj,
i915_ggtt_offset(vma),
gmadr_bytes);
}
}
}
static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
{
int ring_id = wa_ctx->workload->ring_id;
struct i915_gem_context *shadow_ctx =
wa_ctx->workload->vgpu->shadow_ctx;
struct drm_i915_gem_object *ctx_obj =
shadow_ctx->engine[ring_id].state->obj;
struct execlist_ring_context *shadow_ring_context;
struct page *page;
page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
shadow_ring_context = kmap_atomic(page);
shadow_ring_context->bb_per_ctx_ptr.val =
(shadow_ring_context->bb_per_ctx_ptr.val &
(~PER_CTX_ADDR_MASK)) | wa_ctx->per_ctx.shadow_gma;
shadow_ring_context->rcs_indirect_ctx.val =
(shadow_ring_context->rcs_indirect_ctx.val &
(~INDIRECT_CTX_ADDR_MASK)) | wa_ctx->indirect_ctx.shadow_gma;
kunmap_atomic(shadow_ring_context);
return 0;
}
static void prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
{
struct i915_vma *vma;
unsigned char *per_ctx_va =
(unsigned char *)wa_ctx->indirect_ctx.shadow_va +
wa_ctx->indirect_ctx.size;
if (wa_ctx->indirect_ctx.size == 0)
return;
vma = i915_gem_object_ggtt_pin(wa_ctx->indirect_ctx.obj, NULL,
0, CACHELINE_BYTES, 0);
if (IS_ERR(vma)) {
gvt_err("Cannot pin indirect ctx obj\n");
return;
}
/* FIXME: we are not tracking our pinned VMA leaving it
* up to the core to fix up the stray pin_count upon
* free.
*/
wa_ctx->indirect_ctx.shadow_gma = i915_ggtt_offset(vma);
wa_ctx->per_ctx.shadow_gma = *((unsigned int *)per_ctx_va + 1);
memset(per_ctx_va, 0, CACHELINE_BYTES);
update_wa_ctx_2_shadow_ctx(wa_ctx);
}
static int prepare_execlist_workload(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
struct execlist_ctx_descriptor_format ctx[2];
int ring_id = workload->ring_id;
intel_vgpu_pin_mm(workload->shadow_mm);
intel_vgpu_sync_oos_pages(workload->vgpu);
intel_vgpu_flush_post_shadow(workload->vgpu);
prepare_shadow_batch_buffer(workload);
prepare_shadow_wa_ctx(&workload->wa_ctx);
if (!workload->emulate_schedule_in)
return 0;
ctx[0] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 1);
ctx[1] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 0);
return emulate_execlist_schedule_in(&vgpu->execlist[ring_id], ctx);
}
static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
{
/* release all the shadow batch buffer */
if (!list_empty(&workload->shadow_bb)) {
struct intel_shadow_bb_entry *entry_obj =
list_first_entry(&workload->shadow_bb,
struct intel_shadow_bb_entry,
list);
struct intel_shadow_bb_entry *temp;
list_for_each_entry_safe(entry_obj, temp, &workload->shadow_bb,
list) {
i915_gem_object_unpin_map(entry_obj->obj);
i915_gem_object_put(entry_obj->obj);
list_del(&entry_obj->list);
kfree(entry_obj);
}
}
}
static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
{
if (wa_ctx->indirect_ctx.size == 0)
return;
i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj);
i915_gem_object_put(wa_ctx->indirect_ctx.obj);
}
static int complete_execlist_workload(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_vgpu_execlist *execlist =
&vgpu->execlist[workload->ring_id];
struct intel_vgpu_workload *next_workload;
struct list_head *next = workload_q_head(vgpu, workload->ring_id)->next;
bool lite_restore = false;
int ret;
gvt_dbg_el("complete workload %p status %d\n", workload,
workload->status);
release_shadow_batch_buffer(workload);
release_shadow_wa_ctx(&workload->wa_ctx);
if (workload->status || vgpu->resetting)
goto out;
if (!list_empty(workload_q_head(vgpu, workload->ring_id))) {
struct execlist_ctx_descriptor_format *this_desc, *next_desc;
next_workload = container_of(next,
struct intel_vgpu_workload, list);
this_desc = &workload->ctx_desc;
next_desc = &next_workload->ctx_desc;
lite_restore = same_context(this_desc, next_desc);
}
if (lite_restore) {
gvt_dbg_el("next context == current - no schedule-out\n");
free_workload(workload);
return 0;
}
ret = emulate_execlist_ctx_schedule_out(execlist, &workload->ctx_desc);
if (ret)
goto err;
out:
free_workload(workload);
return 0;
err:
free_workload(workload);
return ret;
}
#define RING_CTX_OFF(x) \
offsetof(struct execlist_ring_context, x)
static void read_guest_pdps(struct intel_vgpu *vgpu,
u64 ring_context_gpa, u32 pdp[8])
{
u64 gpa;
int i;
gpa = ring_context_gpa + RING_CTX_OFF(pdp3_UDW.val);
for (i = 0; i < 8; i++)
intel_gvt_hypervisor_read_gpa(vgpu,
gpa + i * 8, &pdp[7 - i], 4);
}
static int prepare_mm(struct intel_vgpu_workload *workload)
{
struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc;
struct intel_vgpu_mm *mm;
int page_table_level;
u32 pdp[8];
if (desc->addressing_mode == 1) { /* legacy 32-bit */
page_table_level = 3;
} else if (desc->addressing_mode == 3) { /* legacy 64 bit */
page_table_level = 4;
} else {
gvt_err("Advanced Context mode(SVM) is not supported!\n");
return -EINVAL;
}
read_guest_pdps(workload->vgpu, workload->ring_context_gpa, pdp);
mm = intel_vgpu_find_ppgtt_mm(workload->vgpu, page_table_level, pdp);
if (mm) {
intel_gvt_mm_reference(mm);
} else {
mm = intel_vgpu_create_mm(workload->vgpu, INTEL_GVT_MM_PPGTT,
pdp, page_table_level, 0);
if (IS_ERR(mm)) {
gvt_err("fail to create mm object.\n");
return PTR_ERR(mm);
}
}
workload->shadow_mm = mm;
return 0;
}
#define get_last_workload(q) \
(list_empty(q) ? NULL : container_of(q->prev, \
struct intel_vgpu_workload, list))
static int submit_context(struct intel_vgpu *vgpu, int ring_id,
struct execlist_ctx_descriptor_format *desc,
bool emulate_schedule_in)
{
struct list_head *q = workload_q_head(vgpu, ring_id);
struct intel_vgpu_workload *last_workload = get_last_workload(q);
struct intel_vgpu_workload *workload = NULL;
u64 ring_context_gpa;
u32 head, tail, start, ctl, ctx_ctl, per_ctx, indirect_ctx;
int ret;
ring_context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
(u32)((desc->lrca + 1) << GTT_PAGE_SHIFT));
if (ring_context_gpa == INTEL_GVT_INVALID_ADDR) {
gvt_err("invalid guest context LRCA: %x\n", desc->lrca);
return -EINVAL;
}
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
RING_CTX_OFF(ring_header.val), &head, 4);
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
RING_CTX_OFF(ring_tail.val), &tail, 4);
head &= RB_HEAD_OFF_MASK;
tail &= RB_TAIL_OFF_MASK;
if (last_workload && same_context(&last_workload->ctx_desc, desc)) {
gvt_dbg_el("ring id %d cur workload == last\n", ring_id);
gvt_dbg_el("ctx head %x real head %lx\n", head,
last_workload->rb_tail);
/*
* cannot use guest context head pointer here,
* as it might not be updated at this time
*/
head = last_workload->rb_tail;
}
gvt_dbg_el("ring id %d begin a new workload\n", ring_id);
workload = kmem_cache_zalloc(vgpu->workloads, GFP_KERNEL);
if (!workload)
return -ENOMEM;
/* record some ring buffer register values for scan and shadow */
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
RING_CTX_OFF(rb_start.val), &start, 4);
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
RING_CTX_OFF(rb_ctrl.val), &ctl, 4);
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
RING_CTX_OFF(ctx_ctrl.val), &ctx_ctl, 4);
INIT_LIST_HEAD(&workload->list);
INIT_LIST_HEAD(&workload->shadow_bb);
init_waitqueue_head(&workload->shadow_ctx_status_wq);
atomic_set(&workload->shadow_ctx_active, 0);
workload->vgpu = vgpu;
workload->ring_id = ring_id;
workload->ctx_desc = *desc;
workload->ring_context_gpa = ring_context_gpa;
workload->rb_head = head;
workload->rb_tail = tail;
workload->rb_start = start;
workload->rb_ctl = ctl;
workload->prepare = prepare_execlist_workload;
workload->complete = complete_execlist_workload;
workload->status = -EINPROGRESS;
workload->emulate_schedule_in = emulate_schedule_in;
if (ring_id == RCS) {
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
RING_CTX_OFF(bb_per_ctx_ptr.val), &per_ctx, 4);
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
RING_CTX_OFF(rcs_indirect_ctx.val), &indirect_ctx, 4);
workload->wa_ctx.indirect_ctx.guest_gma =
indirect_ctx & INDIRECT_CTX_ADDR_MASK;
workload->wa_ctx.indirect_ctx.size =
(indirect_ctx & INDIRECT_CTX_SIZE_MASK) *
CACHELINE_BYTES;
workload->wa_ctx.per_ctx.guest_gma =
per_ctx & PER_CTX_ADDR_MASK;
workload->wa_ctx.workload = workload;
WARN_ON(workload->wa_ctx.indirect_ctx.size && !(per_ctx & 0x1));
}
if (emulate_schedule_in)
memcpy(&workload->elsp_dwords,
&vgpu->execlist[ring_id].elsp_dwords,
sizeof(workload->elsp_dwords));
gvt_dbg_el("workload %p ring id %d head %x tail %x start %x ctl %x\n",
workload, ring_id, head, tail, start, ctl);
gvt_dbg_el("workload %p emulate schedule_in %d\n", workload,
emulate_schedule_in);
ret = prepare_mm(workload);
if (ret) {
kmem_cache_free(vgpu->workloads, workload);
return ret;
}
queue_workload(workload);
return 0;
}
int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)
{
struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id];
struct execlist_ctx_descriptor_format *desc[2], valid_desc[2];
unsigned long valid_desc_bitmap = 0;
bool emulate_schedule_in = true;
int ret;
int i;
memset(valid_desc, 0, sizeof(valid_desc));
desc[0] = get_desc_from_elsp_dwords(&execlist->elsp_dwords, 1);
desc[1] = get_desc_from_elsp_dwords(&execlist->elsp_dwords, 0);
for (i = 0; i < 2; i++) {
if (!desc[i]->valid)
continue;
if (!desc[i]->privilege_access) {
gvt_err("vgpu%d: unexpected GGTT elsp submission\n",
vgpu->id);
return -EINVAL;
}
/* TODO: add another guest context checks here. */
set_bit(i, &valid_desc_bitmap);
valid_desc[i] = *desc[i];
}
if (!valid_desc_bitmap) {
gvt_err("vgpu%d: no valid desc in a elsp submission\n",
vgpu->id);
return -EINVAL;
}
if (!test_bit(0, (void *)&valid_desc_bitmap) &&
test_bit(1, (void *)&valid_desc_bitmap)) {
gvt_err("vgpu%d: weird elsp submission, desc 0 is not valid\n",
vgpu->id);
return -EINVAL;
}
/* submit workload */
for_each_set_bit(i, (void *)&valid_desc_bitmap, 2) {
ret = submit_context(vgpu, ring_id, &valid_desc[i],
emulate_schedule_in);
if (ret) {
gvt_err("vgpu%d: fail to schedule workload\n",
vgpu->id);
return ret;
}
emulate_schedule_in = false;
}
return 0;
}
static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id)
{
struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id];
struct execlist_context_status_pointer_format ctx_status_ptr;
u32 ctx_status_ptr_reg;
memset(execlist, 0, sizeof(*execlist));
execlist->vgpu = vgpu;
execlist->ring_id = ring_id;
execlist->slot[0].index = 0;
execlist->slot[1].index = 1;
ctx_status_ptr_reg = execlist_ring_mmio(vgpu->gvt, ring_id,
_EL_OFFSET_STATUS_PTR);
ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg);
ctx_status_ptr.read_ptr = ctx_status_ptr.write_ptr = 0x7;
vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw;
}
void intel_vgpu_clean_execlist(struct intel_vgpu *vgpu)
{
kmem_cache_destroy(vgpu->workloads);
}
int intel_vgpu_init_execlist(struct intel_vgpu *vgpu)
{
enum intel_engine_id i;
struct intel_engine_cs *engine;
/* each ring has a virtual execlist engine */
for_each_engine(engine, vgpu->gvt->dev_priv, i) {
init_vgpu_execlist(vgpu, i);
INIT_LIST_HEAD(&vgpu->workload_q_head[i]);
}
vgpu->workloads = kmem_cache_create("gvt-g vgpu workload",
sizeof(struct intel_vgpu_workload), 0,
SLAB_HWCACHE_ALIGN,
NULL);
if (!vgpu->workloads)
return -ENOMEM;
return 0;
}
void intel_vgpu_reset_execlist(struct intel_vgpu *vgpu,
unsigned long ring_bitmap)
{
int bit;
struct list_head *pos, *n;
struct intel_vgpu_workload *workload = NULL;
for_each_set_bit(bit, &ring_bitmap, sizeof(ring_bitmap) * 8) {
if (bit >= I915_NUM_ENGINES)
break;
/* free the unsubmited workload in the queue */
list_for_each_safe(pos, n, &vgpu->workload_q_head[bit]) {
workload = container_of(pos,
struct intel_vgpu_workload, list);
list_del_init(&workload->list);
free_workload(workload);
}
init_vgpu_execlist(vgpu, bit);
}
}

View File

@ -0,0 +1,188 @@
/*
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Zhiyuan Lv <zhiyuan.lv@intel.com>
* Zhi Wang <zhi.a.wang@intel.com>
*
* Contributors:
* Min He <min.he@intel.com>
* Bing Niu <bing.niu@intel.com>
* Ping Gao <ping.a.gao@intel.com>
* Tina Zhang <tina.zhang@intel.com>
*
*/
#ifndef _GVT_EXECLIST_H_
#define _GVT_EXECLIST_H_
struct execlist_ctx_descriptor_format {
union {
u32 udw;
u32 context_id;
};
union {
u32 ldw;
struct {
u32 valid : 1;
u32 force_pd_restore : 1;
u32 force_restore : 1;
u32 addressing_mode : 2;
u32 llc_coherency : 1;
u32 fault_handling : 2;
u32 privilege_access : 1;
u32 reserved : 3;
u32 lrca : 20;
};
};
};
struct execlist_status_format {
union {
u32 ldw;
struct {
u32 current_execlist_pointer :1;
u32 execlist_write_pointer :1;
u32 execlist_queue_full :1;
u32 execlist_1_valid :1;
u32 execlist_0_valid :1;
u32 last_ctx_switch_reason :9;
u32 current_active_elm_status :2;
u32 arbitration_enable :1;
u32 execlist_1_active :1;
u32 execlist_0_active :1;
u32 reserved :13;
};
};
union {
u32 udw;
u32 context_id;
};
};
struct execlist_context_status_pointer_format {
union {
u32 dw;
struct {
u32 write_ptr :3;
u32 reserved :5;
u32 read_ptr :3;
u32 reserved2 :5;
u32 mask :16;
};
};
};
struct execlist_context_status_format {
union {
u32 ldw;
struct {
u32 idle_to_active :1;
u32 preempted :1;
u32 element_switch :1;
u32 active_to_idle :1;
u32 context_complete :1;
u32 wait_on_sync_flip :1;
u32 wait_on_vblank :1;
u32 wait_on_semaphore :1;
u32 wait_on_scanline :1;
u32 reserved :2;
u32 semaphore_wait_mode :1;
u32 display_plane :3;
u32 lite_restore :1;
u32 reserved_2 :16;
};
};
union {
u32 udw;
u32 context_id;
};
};
struct execlist_mmio_pair {
u32 addr;
u32 val;
};
/* The first 52 dwords in register state context */
struct execlist_ring_context {
u32 nop1;
u32 lri_cmd_1;
struct execlist_mmio_pair ctx_ctrl;
struct execlist_mmio_pair ring_header;
struct execlist_mmio_pair ring_tail;
struct execlist_mmio_pair rb_start;
struct execlist_mmio_pair rb_ctrl;
struct execlist_mmio_pair bb_cur_head_UDW;
struct execlist_mmio_pair bb_cur_head_LDW;
struct execlist_mmio_pair bb_state;
struct execlist_mmio_pair second_bb_addr_UDW;
struct execlist_mmio_pair second_bb_addr_LDW;
struct execlist_mmio_pair second_bb_state;
struct execlist_mmio_pair bb_per_ctx_ptr;
struct execlist_mmio_pair rcs_indirect_ctx;
struct execlist_mmio_pair rcs_indirect_ctx_offset;
u32 nop2;
u32 nop3;
u32 nop4;
u32 lri_cmd_2;
struct execlist_mmio_pair ctx_timestamp;
struct execlist_mmio_pair pdp3_UDW;
struct execlist_mmio_pair pdp3_LDW;
struct execlist_mmio_pair pdp2_UDW;
struct execlist_mmio_pair pdp2_LDW;
struct execlist_mmio_pair pdp1_UDW;
struct execlist_mmio_pair pdp1_LDW;
struct execlist_mmio_pair pdp0_UDW;
struct execlist_mmio_pair pdp0_LDW;
};
struct intel_vgpu_elsp_dwords {
u32 data[4];
u32 index;
};
struct intel_vgpu_execlist_slot {
struct execlist_ctx_descriptor_format ctx[2];
u32 index;
};
struct intel_vgpu_execlist {
struct intel_vgpu_execlist_slot slot[2];
struct intel_vgpu_execlist_slot *running_slot;
struct intel_vgpu_execlist_slot *pending_slot;
struct execlist_ctx_descriptor_format *running_context;
int ring_id;
struct intel_vgpu *vgpu;
struct intel_vgpu_elsp_dwords elsp_dwords;
};
void intel_vgpu_clean_execlist(struct intel_vgpu *vgpu);
int intel_vgpu_init_execlist(struct intel_vgpu *vgpu);
int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id);
void intel_vgpu_reset_execlist(struct intel_vgpu *vgpu,
unsigned long ring_bitmap);
#endif /*_GVT_EXECLIST_H_*/

View File

@ -0,0 +1,312 @@
/*
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Zhi Wang <zhi.a.wang@intel.com>
*
* Contributors:
* Changbin Du <changbin.du@intel.com>
*
*/
#include <linux/firmware.h>
#include <linux/crc32.h>
#include "i915_drv.h"
#include "gvt.h"
#include "i915_pvinfo.h"
#define FIRMWARE_VERSION (0x0)
struct gvt_firmware_header {
u64 magic;
u32 crc32; /* protect the data after this field */
u32 version;
u64 cfg_space_size;
u64 cfg_space_offset; /* offset in the file */
u64 mmio_size;
u64 mmio_offset; /* offset in the file */
unsigned char data[1];
};
#define RD(offset) (readl(mmio + offset.reg))
#define WR(v, offset) (writel(v, mmio + offset.reg))
static void bdw_forcewake_get(void __iomem *mmio)
{
WR(_MASKED_BIT_DISABLE(0xffff), FORCEWAKE_MT);
RD(ECOBUS);
if (wait_for((RD(FORCEWAKE_ACK_HSW) & FORCEWAKE_KERNEL) == 0, 50))
gvt_err("fail to wait forcewake idle\n");
WR(_MASKED_BIT_ENABLE(FORCEWAKE_KERNEL), FORCEWAKE_MT);
if (wait_for((RD(FORCEWAKE_ACK_HSW) & FORCEWAKE_KERNEL), 50))
gvt_err("fail to wait forcewake ack\n");
if (wait_for((RD(GEN6_GT_THREAD_STATUS_REG) &
GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 50))
gvt_err("fail to wait c0 wake up\n");
}
#undef RD
#undef WR
#define dev_to_drm_minor(d) dev_get_drvdata((d))
static ssize_t
gvt_firmware_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t offset, size_t count)
{
memcpy(buf, attr->private + offset, count);
return count;
}
static struct bin_attribute firmware_attr = {
.attr = {.name = "gvt_firmware", .mode = (S_IRUSR)},
.read = gvt_firmware_read,
.write = NULL,
.mmap = NULL,
};
static int expose_firmware_sysfs(struct intel_gvt *gvt,
void __iomem *mmio)
{
struct intel_gvt_device_info *info = &gvt->device_info;
struct pci_dev *pdev = gvt->dev_priv->drm.pdev;
struct intel_gvt_mmio_info *e;
struct gvt_firmware_header *h;
void *firmware;
void *p;
unsigned long size;
int i;
int ret;
size = sizeof(*h) + info->mmio_size + info->cfg_space_size - 1;
firmware = vmalloc(size);
if (!firmware)
return -ENOMEM;
h = firmware;
h->magic = VGT_MAGIC;
h->version = FIRMWARE_VERSION;
h->cfg_space_size = info->cfg_space_size;
h->cfg_space_offset = offsetof(struct gvt_firmware_header, data);
h->mmio_size = info->mmio_size;
h->mmio_offset = h->cfg_space_offset + h->cfg_space_size;
p = firmware + h->cfg_space_offset;
for (i = 0; i < h->cfg_space_size; i += 4)
pci_read_config_dword(pdev, i, p + i);
memcpy(gvt->firmware.cfg_space, p, info->cfg_space_size);
p = firmware + h->mmio_offset;
hash_for_each(gvt->mmio.mmio_info_table, i, e, node) {
int j;
for (j = 0; j < e->length; j += 4)
*(u32 *)(p + e->offset + j) =
readl(mmio + e->offset + j);
}
memcpy(gvt->firmware.mmio, p, info->mmio_size);
firmware_attr.size = size;
firmware_attr.private = firmware;
ret = device_create_bin_file(&pdev->dev, &firmware_attr);
if (ret) {
vfree(firmware);
return ret;
}
return 0;
}
static void clean_firmware_sysfs(struct intel_gvt *gvt)
{
struct pci_dev *pdev = gvt->dev_priv->drm.pdev;
device_remove_bin_file(&pdev->dev, &firmware_attr);
vfree(firmware_attr.private);
}
/**
* intel_gvt_free_firmware - free GVT firmware
* @gvt: intel gvt device
*
*/
void intel_gvt_free_firmware(struct intel_gvt *gvt)
{
if (!gvt->firmware.firmware_loaded)
clean_firmware_sysfs(gvt);
kfree(gvt->firmware.cfg_space);
kfree(gvt->firmware.mmio);
}
static int verify_firmware(struct intel_gvt *gvt,
const struct firmware *fw)
{
struct intel_gvt_device_info *info = &gvt->device_info;
struct drm_i915_private *dev_priv = gvt->dev_priv;
struct pci_dev *pdev = dev_priv->drm.pdev;
struct gvt_firmware_header *h;
unsigned long id, crc32_start;
const void *mem;
const char *item;
u64 file, request;
h = (struct gvt_firmware_header *)fw->data;
crc32_start = offsetof(struct gvt_firmware_header, crc32) + 4;
mem = fw->data + crc32_start;
#define VERIFY(s, a, b) do { \
item = (s); file = (u64)(a); request = (u64)(b); \
if ((a) != (b)) \
goto invalid_firmware; \
} while (0)
VERIFY("magic number", h->magic, VGT_MAGIC);
VERIFY("version", h->version, FIRMWARE_VERSION);
VERIFY("crc32", h->crc32, crc32_le(0, mem, fw->size - crc32_start));
VERIFY("cfg space size", h->cfg_space_size, info->cfg_space_size);
VERIFY("mmio size", h->mmio_size, info->mmio_size);
mem = (fw->data + h->cfg_space_offset);
id = *(u16 *)(mem + PCI_VENDOR_ID);
VERIFY("vender id", id, pdev->vendor);
id = *(u16 *)(mem + PCI_DEVICE_ID);
VERIFY("device id", id, pdev->device);
id = *(u8 *)(mem + PCI_REVISION_ID);
VERIFY("revision id", id, pdev->revision);
#undef VERIFY
return 0;
invalid_firmware:
gvt_dbg_core("Invalid firmware: %s [file] 0x%llx [request] 0x%llx\n",
item, file, request);
return -EINVAL;
}
#define GVT_FIRMWARE_PATH "i915/gvt"
/**
* intel_gvt_load_firmware - load GVT firmware
* @gvt: intel gvt device
*
*/
int intel_gvt_load_firmware(struct intel_gvt *gvt)
{
struct intel_gvt_device_info *info = &gvt->device_info;
struct drm_i915_private *dev_priv = gvt->dev_priv;
struct pci_dev *pdev = dev_priv->drm.pdev;
struct intel_gvt_firmware *firmware = &gvt->firmware;
struct gvt_firmware_header *h;
const struct firmware *fw;
char *path;
void __iomem *mmio;
void *mem;
int ret;
path = kmalloc(PATH_MAX, GFP_KERNEL);
if (!path)
return -ENOMEM;
mem = kmalloc(info->cfg_space_size, GFP_KERNEL);
if (!mem) {
kfree(path);
return -ENOMEM;
}
firmware->cfg_space = mem;
mem = kmalloc(info->mmio_size, GFP_KERNEL);
if (!mem) {
kfree(path);
kfree(firmware->cfg_space);
return -ENOMEM;
}
firmware->mmio = mem;
mmio = pci_iomap(pdev, info->mmio_bar, info->mmio_size);
if (!mmio) {
kfree(path);
kfree(firmware->cfg_space);
kfree(firmware->mmio);
return -EINVAL;
}
if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv))
bdw_forcewake_get(mmio);
sprintf(path, "%s/vid_0x%04x_did_0x%04x_rid_0x%04x.golden_hw_state",
GVT_FIRMWARE_PATH, pdev->vendor, pdev->device,
pdev->revision);
gvt_dbg_core("request hw state firmware %s...\n", path);
ret = request_firmware(&fw, path, &dev_priv->drm.pdev->dev);
kfree(path);
if (ret)
goto expose_firmware;
gvt_dbg_core("success.\n");
ret = verify_firmware(gvt, fw);
if (ret)
goto out_free_fw;
gvt_dbg_core("verified.\n");
h = (struct gvt_firmware_header *)fw->data;
memcpy(firmware->cfg_space, fw->data + h->cfg_space_offset,
h->cfg_space_size);
memcpy(firmware->mmio, fw->data + h->mmio_offset,
h->mmio_size);
release_firmware(fw);
firmware->firmware_loaded = true;
pci_iounmap(pdev, mmio);
return 0;
out_free_fw:
release_firmware(fw);
expose_firmware:
expose_firmware_sysfs(gvt, mmio);
pci_iounmap(pdev, mmio);
return 0;
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,270 @@
/*
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Zhi Wang <zhi.a.wang@intel.com>
* Zhenyu Wang <zhenyuw@linux.intel.com>
* Xiao Zheng <xiao.zheng@intel.com>
*
* Contributors:
* Min He <min.he@intel.com>
* Bing Niu <bing.niu@intel.com>
*
*/
#ifndef _GVT_GTT_H_
#define _GVT_GTT_H_
#define GTT_PAGE_SHIFT 12
#define GTT_PAGE_SIZE (1UL << GTT_PAGE_SHIFT)
#define GTT_PAGE_MASK (~(GTT_PAGE_SIZE-1))
struct intel_vgpu_mm;
#define INTEL_GVT_GTT_HASH_BITS 8
#define INTEL_GVT_INVALID_ADDR (~0UL)
struct intel_gvt_gtt_entry {
u64 val64;
int type;
};
struct intel_gvt_gtt_pte_ops {
struct intel_gvt_gtt_entry *(*get_entry)(void *pt,
struct intel_gvt_gtt_entry *e,
unsigned long index, bool hypervisor_access, unsigned long gpa,
struct intel_vgpu *vgpu);
struct intel_gvt_gtt_entry *(*set_entry)(void *pt,
struct intel_gvt_gtt_entry *e,
unsigned long index, bool hypervisor_access, unsigned long gpa,
struct intel_vgpu *vgpu);
bool (*test_present)(struct intel_gvt_gtt_entry *e);
void (*clear_present)(struct intel_gvt_gtt_entry *e);
bool (*test_pse)(struct intel_gvt_gtt_entry *e);
void (*set_pfn)(struct intel_gvt_gtt_entry *e, unsigned long pfn);
unsigned long (*get_pfn)(struct intel_gvt_gtt_entry *e);
};
struct intel_gvt_gtt_gma_ops {
unsigned long (*gma_to_ggtt_pte_index)(unsigned long gma);
unsigned long (*gma_to_pte_index)(unsigned long gma);
unsigned long (*gma_to_pde_index)(unsigned long gma);
unsigned long (*gma_to_l3_pdp_index)(unsigned long gma);
unsigned long (*gma_to_l4_pdp_index)(unsigned long gma);
unsigned long (*gma_to_pml4_index)(unsigned long gma);
};
struct intel_gvt_gtt {
struct intel_gvt_gtt_pte_ops *pte_ops;
struct intel_gvt_gtt_gma_ops *gma_ops;
int (*mm_alloc_page_table)(struct intel_vgpu_mm *mm);
void (*mm_free_page_table)(struct intel_vgpu_mm *mm);
struct list_head oos_page_use_list_head;
struct list_head oos_page_free_list_head;
struct list_head mm_lru_list_head;
};
enum {
INTEL_GVT_MM_GGTT = 0,
INTEL_GVT_MM_PPGTT,
};
struct intel_vgpu_mm {
int type;
bool initialized;
bool shadowed;
int page_table_entry_type;
u32 page_table_entry_size;
u32 page_table_entry_cnt;
void *virtual_page_table;
void *shadow_page_table;
int page_table_level;
bool has_shadow_page_table;
u32 pde_base_index;
struct list_head list;
struct kref ref;
atomic_t pincount;
struct list_head lru_list;
struct intel_vgpu *vgpu;
};
extern struct intel_gvt_gtt_entry *intel_vgpu_mm_get_entry(
struct intel_vgpu_mm *mm,
void *page_table, struct intel_gvt_gtt_entry *e,
unsigned long index);
extern struct intel_gvt_gtt_entry *intel_vgpu_mm_set_entry(
struct intel_vgpu_mm *mm,
void *page_table, struct intel_gvt_gtt_entry *e,
unsigned long index);
#define ggtt_get_guest_entry(mm, e, index) \
intel_vgpu_mm_get_entry(mm, mm->virtual_page_table, e, index)
#define ggtt_set_guest_entry(mm, e, index) \
intel_vgpu_mm_set_entry(mm, mm->virtual_page_table, e, index)
#define ggtt_get_shadow_entry(mm, e, index) \
intel_vgpu_mm_get_entry(mm, mm->shadow_page_table, e, index)
#define ggtt_set_shadow_entry(mm, e, index) \
intel_vgpu_mm_set_entry(mm, mm->shadow_page_table, e, index)
#define ppgtt_get_guest_root_entry(mm, e, index) \
intel_vgpu_mm_get_entry(mm, mm->virtual_page_table, e, index)
#define ppgtt_set_guest_root_entry(mm, e, index) \
intel_vgpu_mm_set_entry(mm, mm->virtual_page_table, e, index)
#define ppgtt_get_shadow_root_entry(mm, e, index) \
intel_vgpu_mm_get_entry(mm, mm->shadow_page_table, e, index)
#define ppgtt_set_shadow_root_entry(mm, e, index) \
intel_vgpu_mm_set_entry(mm, mm->shadow_page_table, e, index)
extern struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu,
int mm_type, void *virtual_page_table, int page_table_level,
u32 pde_base_index);
extern void intel_vgpu_destroy_mm(struct kref *mm_ref);
struct intel_vgpu_guest_page;
struct intel_vgpu_gtt {
struct intel_vgpu_mm *ggtt_mm;
unsigned long active_ppgtt_mm_bitmap;
struct list_head mm_list_head;
DECLARE_HASHTABLE(shadow_page_hash_table, INTEL_GVT_GTT_HASH_BITS);
DECLARE_HASHTABLE(guest_page_hash_table, INTEL_GVT_GTT_HASH_BITS);
atomic_t n_write_protected_guest_page;
struct list_head oos_page_list_head;
struct list_head post_shadow_list_head;
struct page *scratch_page;
unsigned long scratch_page_mfn;
};
extern int intel_vgpu_init_gtt(struct intel_vgpu *vgpu);
extern void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu);
extern int intel_gvt_init_gtt(struct intel_gvt *gvt);
extern void intel_gvt_clean_gtt(struct intel_gvt *gvt);
extern struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu,
int page_table_level, void *root_entry);
struct intel_vgpu_oos_page;
struct intel_vgpu_shadow_page {
void *vaddr;
struct page *page;
int type;
struct hlist_node node;
unsigned long mfn;
};
struct intel_vgpu_guest_page {
struct hlist_node node;
bool writeprotection;
unsigned long gfn;
int (*handler)(void *, u64, void *, int);
void *data;
unsigned long write_cnt;
struct intel_vgpu_oos_page *oos_page;
};
struct intel_vgpu_oos_page {
struct intel_vgpu_guest_page *guest_page;
struct list_head list;
struct list_head vm_list;
int id;
unsigned char mem[GTT_PAGE_SIZE];
};
#define GTT_ENTRY_NUM_IN_ONE_PAGE 512
struct intel_vgpu_ppgtt_spt {
struct intel_vgpu_shadow_page shadow_page;
struct intel_vgpu_guest_page guest_page;
int guest_page_type;
atomic_t refcount;
struct intel_vgpu *vgpu;
DECLARE_BITMAP(post_shadow_bitmap, GTT_ENTRY_NUM_IN_ONE_PAGE);
struct list_head post_shadow_list;
};
int intel_vgpu_init_guest_page(struct intel_vgpu *vgpu,
struct intel_vgpu_guest_page *guest_page,
unsigned long gfn,
int (*handler)(void *gp, u64, void *, int),
void *data);
void intel_vgpu_clean_guest_page(struct intel_vgpu *vgpu,
struct intel_vgpu_guest_page *guest_page);
int intel_vgpu_set_guest_page_writeprotection(struct intel_vgpu *vgpu,
struct intel_vgpu_guest_page *guest_page);
void intel_vgpu_clear_guest_page_writeprotection(struct intel_vgpu *vgpu,
struct intel_vgpu_guest_page *guest_page);
struct intel_vgpu_guest_page *intel_vgpu_find_guest_page(
struct intel_vgpu *vgpu, unsigned long gfn);
int intel_vgpu_sync_oos_pages(struct intel_vgpu *vgpu);
int intel_vgpu_flush_post_shadow(struct intel_vgpu *vgpu);
static inline void intel_gvt_mm_reference(struct intel_vgpu_mm *mm)
{
kref_get(&mm->ref);
}
static inline void intel_gvt_mm_unreference(struct intel_vgpu_mm *mm)
{
kref_put(&mm->ref, intel_vgpu_destroy_mm);
}
int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm);
void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm);
unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm,
unsigned long gma);
struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu,
int page_table_level, void *root_entry);
int intel_vgpu_g2v_create_ppgtt_mm(struct intel_vgpu *vgpu,
int page_table_level);
int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu,
int page_table_level);
int intel_vgpu_emulate_gtt_mmio_read(struct intel_vgpu *vgpu,
unsigned int off, void *p_data, unsigned int bytes);
int intel_vgpu_emulate_gtt_mmio_write(struct intel_vgpu *vgpu,
unsigned int off, void *p_data, unsigned int bytes);
#endif /* _GVT_GTT_H_ */

View File

@ -19,12 +19,23 @@
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Kevin Tian <kevin.tian@intel.com>
* Eddie Dong <eddie.dong@intel.com>
*
* Contributors:
* Niu Bing <bing.niu@intel.com>
* Zhi Wang <zhi.a.wang@intel.com>
*
*/
#include <linux/types.h>
#include <xen/xen.h>
#include <linux/kthread.h>
#include "i915_drv.h"
#include "gvt.h"
struct intel_gvt_host intel_gvt_host;
@ -33,6 +44,13 @@ static const char * const supported_hypervisors[] = {
[INTEL_GVT_HYPERVISOR_KVM] = "KVM",
};
struct intel_gvt_io_emulation_ops intel_gvt_io_emulation_ops = {
.emulate_cfg_read = intel_vgpu_emulate_cfg_read,
.emulate_cfg_write = intel_vgpu_emulate_cfg_write,
.emulate_mmio_read = intel_vgpu_emulate_mmio_read,
.emulate_mmio_write = intel_vgpu_emulate_mmio_write,
};
/**
* intel_gvt_init_host - Load MPT modules and detect if we're running in host
* @gvt: intel gvt device
@ -84,9 +102,66 @@ int intel_gvt_init_host(void)
static void init_device_info(struct intel_gvt *gvt)
{
if (IS_BROADWELL(gvt->dev_priv))
gvt->device_info.max_support_vgpus = 8;
/* This function will grow large in GVT device model patches. */
struct intel_gvt_device_info *info = &gvt->device_info;
if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) {
info->max_support_vgpus = 8;
info->cfg_space_size = 256;
info->mmio_size = 2 * 1024 * 1024;
info->mmio_bar = 0;
info->msi_cap_offset = IS_SKYLAKE(gvt->dev_priv) ? 0xac : 0x90;
info->gtt_start_offset = 8 * 1024 * 1024;
info->gtt_entry_size = 8;
info->gtt_entry_size_shift = 3;
info->gmadr_bytes_in_cmd = 8;
info->max_surface_size = 36 * 1024 * 1024;
}
}
static int gvt_service_thread(void *data)
{
struct intel_gvt *gvt = (struct intel_gvt *)data;
int ret;
gvt_dbg_core("service thread start\n");
while (!kthread_should_stop()) {
ret = wait_event_interruptible(gvt->service_thread_wq,
kthread_should_stop() || gvt->service_request);
if (kthread_should_stop())
break;
if (WARN_ONCE(ret, "service thread is waken up by signal.\n"))
continue;
if (test_and_clear_bit(INTEL_GVT_REQUEST_EMULATE_VBLANK,
(void *)&gvt->service_request)) {
mutex_lock(&gvt->lock);
intel_gvt_emulate_vblank(gvt);
mutex_unlock(&gvt->lock);
}
}
return 0;
}
static void clean_service_thread(struct intel_gvt *gvt)
{
kthread_stop(gvt->service_thread);
}
static int init_service_thread(struct intel_gvt *gvt)
{
init_waitqueue_head(&gvt->service_thread_wq);
gvt->service_thread = kthread_run(gvt_service_thread,
gvt, "gvt_service_thread");
if (IS_ERR(gvt->service_thread)) {
gvt_err("fail to start service thread.\n");
return PTR_ERR(gvt->service_thread);
}
return 0;
}
/**
@ -99,14 +174,23 @@ static void init_device_info(struct intel_gvt *gvt)
*/
void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
{
struct intel_gvt *gvt = &dev_priv->gvt;
struct intel_gvt *gvt = to_gvt(dev_priv);
if (WARN_ON(!gvt->initialized))
if (WARN_ON(!gvt))
return;
/* Other de-initialization of GVT components will be introduced. */
clean_service_thread(gvt);
intel_gvt_clean_cmd_parser(gvt);
intel_gvt_clean_sched_policy(gvt);
intel_gvt_clean_workload_scheduler(gvt);
intel_gvt_clean_opregion(gvt);
intel_gvt_clean_gtt(gvt);
intel_gvt_clean_irq(gvt);
intel_gvt_clean_mmio_info(gvt);
intel_gvt_free_firmware(gvt);
gvt->initialized = false;
kfree(dev_priv->gvt);
dev_priv->gvt = NULL;
}
/**
@ -122,7 +206,9 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
*/
int intel_gvt_init_device(struct drm_i915_private *dev_priv)
{
struct intel_gvt *gvt = &dev_priv->gvt;
struct intel_gvt *gvt;
int ret;
/*
* Cannot initialize GVT device without intel_gvt_host gets
* initialized first.
@ -130,16 +216,76 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
if (WARN_ON(!intel_gvt_host.initialized))
return -EINVAL;
if (WARN_ON(gvt->initialized))
if (WARN_ON(dev_priv->gvt))
return -EEXIST;
gvt = kzalloc(sizeof(struct intel_gvt), GFP_KERNEL);
if (!gvt)
return -ENOMEM;
gvt_dbg_core("init gvt device\n");
mutex_init(&gvt->lock);
gvt->dev_priv = dev_priv;
init_device_info(gvt);
/*
* Other initialization of GVT components will be introduce here.
*/
ret = intel_gvt_setup_mmio_info(gvt);
if (ret)
return ret;
ret = intel_gvt_load_firmware(gvt);
if (ret)
goto out_clean_mmio_info;
ret = intel_gvt_init_irq(gvt);
if (ret)
goto out_free_firmware;
ret = intel_gvt_init_gtt(gvt);
if (ret)
goto out_clean_irq;
ret = intel_gvt_init_opregion(gvt);
if (ret)
goto out_clean_gtt;
ret = intel_gvt_init_workload_scheduler(gvt);
if (ret)
goto out_clean_opregion;
ret = intel_gvt_init_sched_policy(gvt);
if (ret)
goto out_clean_workload_scheduler;
ret = intel_gvt_init_cmd_parser(gvt);
if (ret)
goto out_clean_sched_policy;
ret = init_service_thread(gvt);
if (ret)
goto out_clean_cmd_parser;
gvt_dbg_core("gvt device creation is done\n");
gvt->initialized = true;
dev_priv->gvt = gvt;
return 0;
out_clean_cmd_parser:
intel_gvt_clean_cmd_parser(gvt);
out_clean_sched_policy:
intel_gvt_clean_sched_policy(gvt);
out_clean_workload_scheduler:
intel_gvt_clean_workload_scheduler(gvt);
out_clean_opregion:
intel_gvt_clean_opregion(gvt);
out_clean_gtt:
intel_gvt_clean_gtt(gvt);
out_clean_irq:
intel_gvt_clean_irq(gvt);
out_free_firmware:
intel_gvt_free_firmware(gvt);
out_clean_mmio_info:
intel_gvt_clean_mmio_info(gvt);
kfree(gvt);
return ret;
}

View File

@ -19,6 +19,15 @@
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Kevin Tian <kevin.tian@intel.com>
* Eddie Dong <eddie.dong@intel.com>
*
* Contributors:
* Niu Bing <bing.niu@intel.com>
* Zhi Wang <zhi.a.wang@intel.com>
*
*/
#ifndef _GVT_H_
@ -26,6 +35,17 @@
#include "debug.h"
#include "hypercall.h"
#include "mmio.h"
#include "reg.h"
#include "interrupt.h"
#include "gtt.h"
#include "display.h"
#include "edid.h"
#include "execlist.h"
#include "scheduler.h"
#include "sched_policy.h"
#include "render.h"
#include "cmd_parser.h"
#define GVT_MAX_VGPU 8
@ -45,25 +65,324 @@ extern struct intel_gvt_host intel_gvt_host;
/* Describe per-platform limitations. */
struct intel_gvt_device_info {
u32 max_support_vgpus;
/* This data structure will grow bigger in GVT device model patches */
u32 cfg_space_size;
u32 mmio_size;
u32 mmio_bar;
unsigned long msi_cap_offset;
u32 gtt_start_offset;
u32 gtt_entry_size;
u32 gtt_entry_size_shift;
int gmadr_bytes_in_cmd;
u32 max_surface_size;
};
/* GM resources owned by a vGPU */
struct intel_vgpu_gm {
u64 aperture_sz;
u64 hidden_sz;
struct drm_mm_node low_gm_node;
struct drm_mm_node high_gm_node;
};
#define INTEL_GVT_MAX_NUM_FENCES 32
/* Fences owned by a vGPU */
struct intel_vgpu_fence {
struct drm_i915_fence_reg *regs[INTEL_GVT_MAX_NUM_FENCES];
u32 base;
u32 size;
};
struct intel_vgpu_mmio {
void *vreg;
void *sreg;
bool disable_warn_untrack;
};
#define INTEL_GVT_MAX_CFG_SPACE_SZ 256
#define INTEL_GVT_MAX_BAR_NUM 4
struct intel_vgpu_pci_bar {
u64 size;
bool tracked;
};
struct intel_vgpu_cfg_space {
unsigned char virtual_cfg_space[INTEL_GVT_MAX_CFG_SPACE_SZ];
struct intel_vgpu_pci_bar bar[INTEL_GVT_MAX_BAR_NUM];
};
#define vgpu_cfg_space(vgpu) ((vgpu)->cfg_space.virtual_cfg_space)
#define INTEL_GVT_MAX_PIPE 4
struct intel_vgpu_irq {
bool irq_warn_once[INTEL_GVT_EVENT_MAX];
DECLARE_BITMAP(flip_done_event[INTEL_GVT_MAX_PIPE],
INTEL_GVT_EVENT_MAX);
};
struct intel_vgpu_opregion {
void *va;
u32 gfn[INTEL_GVT_OPREGION_PAGES];
struct page *pages[INTEL_GVT_OPREGION_PAGES];
};
#define vgpu_opregion(vgpu) (&(vgpu->opregion))
#define INTEL_GVT_MAX_PORT 5
struct intel_vgpu_display {
struct intel_vgpu_i2c_edid i2c_edid;
struct intel_vgpu_port ports[INTEL_GVT_MAX_PORT];
struct intel_vgpu_sbi sbi;
};
struct intel_vgpu {
struct intel_gvt *gvt;
int id;
unsigned long handle; /* vGPU handle used by hypervisor MPT modules */
bool active;
bool resetting;
void *sched_data;
struct intel_vgpu_fence fence;
struct intel_vgpu_gm gm;
struct intel_vgpu_cfg_space cfg_space;
struct intel_vgpu_mmio mmio;
struct intel_vgpu_irq irq;
struct intel_vgpu_gtt gtt;
struct intel_vgpu_opregion opregion;
struct intel_vgpu_display display;
struct intel_vgpu_execlist execlist[I915_NUM_ENGINES];
struct list_head workload_q_head[I915_NUM_ENGINES];
struct kmem_cache *workloads;
atomic_t running_workload_num;
DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
struct i915_gem_context *shadow_ctx;
struct notifier_block shadow_ctx_notifier_block;
};
struct intel_gvt_gm {
unsigned long vgpu_allocated_low_gm_size;
unsigned long vgpu_allocated_high_gm_size;
};
struct intel_gvt_fence {
unsigned long vgpu_allocated_fence_num;
};
#define INTEL_GVT_MMIO_HASH_BITS 9
struct intel_gvt_mmio {
u32 *mmio_attribute;
DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS);
};
struct intel_gvt_firmware {
void *cfg_space;
void *mmio;
bool firmware_loaded;
};
struct intel_gvt_opregion {
void __iomem *opregion_va;
u32 opregion_pa;
};
struct intel_gvt {
struct mutex lock;
bool initialized;
struct drm_i915_private *dev_priv;
struct idr vgpu_idr; /* vGPU IDR pool */
struct intel_gvt_device_info device_info;
struct intel_gvt_gm gm;
struct intel_gvt_fence fence;
struct intel_gvt_mmio mmio;
struct intel_gvt_firmware firmware;
struct intel_gvt_irq irq;
struct intel_gvt_gtt gtt;
struct intel_gvt_opregion opregion;
struct intel_gvt_workload_scheduler scheduler;
DECLARE_HASHTABLE(cmd_table, GVT_CMD_HASH_BITS);
struct task_struct *service_thread;
wait_queue_head_t service_thread_wq;
unsigned long service_request;
};
static inline struct intel_gvt *to_gvt(struct drm_i915_private *i915)
{
return i915->gvt;
}
enum {
INTEL_GVT_REQUEST_EMULATE_VBLANK = 0,
};
static inline void intel_gvt_request_service(struct intel_gvt *gvt,
int service)
{
set_bit(service, (void *)&gvt->service_request);
wake_up(&gvt->service_thread_wq);
}
void intel_gvt_free_firmware(struct intel_gvt *gvt);
int intel_gvt_load_firmware(struct intel_gvt *gvt);
/* Aperture/GM space definitions for GVT device */
#define gvt_aperture_sz(gvt) (gvt->dev_priv->ggtt.mappable_end)
#define gvt_aperture_pa_base(gvt) (gvt->dev_priv->ggtt.mappable_base)
#define gvt_ggtt_gm_sz(gvt) (gvt->dev_priv->ggtt.base.total)
#define gvt_ggtt_sz(gvt) \
((gvt->dev_priv->ggtt.base.total >> PAGE_SHIFT) << 3)
#define gvt_hidden_sz(gvt) (gvt_ggtt_gm_sz(gvt) - gvt_aperture_sz(gvt))
#define gvt_aperture_gmadr_base(gvt) (0)
#define gvt_aperture_gmadr_end(gvt) (gvt_aperture_gmadr_base(gvt) \
+ gvt_aperture_sz(gvt) - 1)
#define gvt_hidden_gmadr_base(gvt) (gvt_aperture_gmadr_base(gvt) \
+ gvt_aperture_sz(gvt))
#define gvt_hidden_gmadr_end(gvt) (gvt_hidden_gmadr_base(gvt) \
+ gvt_hidden_sz(gvt) - 1)
#define gvt_fence_sz(gvt) (gvt->dev_priv->num_fence_regs)
/* Aperture/GM space definitions for vGPU */
#define vgpu_aperture_offset(vgpu) ((vgpu)->gm.low_gm_node.start)
#define vgpu_hidden_offset(vgpu) ((vgpu)->gm.high_gm_node.start)
#define vgpu_aperture_sz(vgpu) ((vgpu)->gm.aperture_sz)
#define vgpu_hidden_sz(vgpu) ((vgpu)->gm.hidden_sz)
#define vgpu_aperture_pa_base(vgpu) \
(gvt_aperture_pa_base(vgpu->gvt) + vgpu_aperture_offset(vgpu))
#define vgpu_ggtt_gm_sz(vgpu) ((vgpu)->gm.aperture_sz + (vgpu)->gm.hidden_sz)
#define vgpu_aperture_pa_end(vgpu) \
(vgpu_aperture_pa_base(vgpu) + vgpu_aperture_sz(vgpu) - 1)
#define vgpu_aperture_gmadr_base(vgpu) (vgpu_aperture_offset(vgpu))
#define vgpu_aperture_gmadr_end(vgpu) \
(vgpu_aperture_gmadr_base(vgpu) + vgpu_aperture_sz(vgpu) - 1)
#define vgpu_hidden_gmadr_base(vgpu) (vgpu_hidden_offset(vgpu))
#define vgpu_hidden_gmadr_end(vgpu) \
(vgpu_hidden_gmadr_base(vgpu) + vgpu_hidden_sz(vgpu) - 1)
#define vgpu_fence_base(vgpu) (vgpu->fence.base)
#define vgpu_fence_sz(vgpu) (vgpu->fence.size)
struct intel_vgpu_creation_params {
__u64 handle;
__u64 low_gm_sz; /* in MB */
__u64 high_gm_sz; /* in MB */
__u64 fence_sz;
__s32 primary;
__u64 vgpu_id;
};
int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu,
struct intel_vgpu_creation_params *param);
void intel_vgpu_free_resource(struct intel_vgpu *vgpu);
void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
u32 fence, u64 value);
/* Macros for easily accessing vGPU virtual/shadow register */
#define vgpu_vreg(vgpu, reg) \
(*(u32 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg)))
#define vgpu_vreg8(vgpu, reg) \
(*(u8 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg)))
#define vgpu_vreg16(vgpu, reg) \
(*(u16 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg)))
#define vgpu_vreg64(vgpu, reg) \
(*(u64 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg)))
#define vgpu_sreg(vgpu, reg) \
(*(u32 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg)))
#define vgpu_sreg8(vgpu, reg) \
(*(u8 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg)))
#define vgpu_sreg16(vgpu, reg) \
(*(u16 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg)))
#define vgpu_sreg64(vgpu, reg) \
(*(u64 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg)))
#define for_each_active_vgpu(gvt, vgpu, id) \
idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) \
for_each_if(vgpu->active)
static inline void intel_vgpu_write_pci_bar(struct intel_vgpu *vgpu,
u32 offset, u32 val, bool low)
{
u32 *pval;
/* BAR offset should be 32 bits algiend */
offset = rounddown(offset, 4);
pval = (u32 *)(vgpu_cfg_space(vgpu) + offset);
if (low) {
/*
* only update bit 31 - bit 4,
* leave the bit 3 - bit 0 unchanged.
*/
*pval = (val & GENMASK(31, 4)) | (*pval & GENMASK(3, 0));
}
}
struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
struct intel_vgpu_creation_params *
param);
void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu);
/* validating GM functions */
#define vgpu_gmadr_is_aperture(vgpu, gmadr) \
((gmadr >= vgpu_aperture_gmadr_base(vgpu)) && \
(gmadr <= vgpu_aperture_gmadr_end(vgpu)))
#define vgpu_gmadr_is_hidden(vgpu, gmadr) \
((gmadr >= vgpu_hidden_gmadr_base(vgpu)) && \
(gmadr <= vgpu_hidden_gmadr_end(vgpu)))
#define vgpu_gmadr_is_valid(vgpu, gmadr) \
((vgpu_gmadr_is_aperture(vgpu, gmadr) || \
(vgpu_gmadr_is_hidden(vgpu, gmadr))))
#define gvt_gmadr_is_aperture(gvt, gmadr) \
((gmadr >= gvt_aperture_gmadr_base(gvt)) && \
(gmadr <= gvt_aperture_gmadr_end(gvt)))
#define gvt_gmadr_is_hidden(gvt, gmadr) \
((gmadr >= gvt_hidden_gmadr_base(gvt)) && \
(gmadr <= gvt_hidden_gmadr_end(gvt)))
#define gvt_gmadr_is_valid(gvt, gmadr) \
(gvt_gmadr_is_aperture(gvt, gmadr) || \
gvt_gmadr_is_hidden(gvt, gmadr))
bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size);
int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr);
int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr);
int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index,
unsigned long *h_index);
int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
unsigned long *g_index);
int intel_vgpu_emulate_cfg_read(void *__vgpu, unsigned int offset,
void *p_data, unsigned int bytes);
int intel_vgpu_emulate_cfg_write(void *__vgpu, unsigned int offset,
void *p_data, unsigned int bytes);
void intel_gvt_clean_opregion(struct intel_gvt *gvt);
int intel_gvt_init_opregion(struct intel_gvt *gvt);
void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu);
int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa);
int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci);
#include "mpt.h"
#endif

File diff suppressed because it is too large Load Diff

View File

@ -19,17 +19,51 @@
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Eddie Dong <eddie.dong@intel.com>
* Dexuan Cui
* Jike Song <jike.song@intel.com>
*
* Contributors:
* Zhi Wang <zhi.a.wang@intel.com>
*
*/
#ifndef _GVT_HYPERCALL_H_
#define _GVT_HYPERCALL_H_
struct intel_gvt_io_emulation_ops {
int (*emulate_cfg_read)(void *, unsigned int, void *, unsigned int);
int (*emulate_cfg_write)(void *, unsigned int, void *, unsigned int);
int (*emulate_mmio_read)(void *, u64, void *, unsigned int);
int (*emulate_mmio_write)(void *, u64, void *, unsigned int);
};
extern struct intel_gvt_io_emulation_ops intel_gvt_io_emulation_ops;
/*
* Specific GVT-g MPT modules function collections. Currently GVT-g supports
* both Xen and KVM by providing dedicated hypervisor-related MPT modules.
*/
struct intel_gvt_mpt {
int (*detect_host)(void);
int (*attach_vgpu)(void *vgpu, unsigned long *handle);
void (*detach_vgpu)(unsigned long handle);
int (*inject_msi)(unsigned long handle, u32 addr, u16 data);
unsigned long (*from_virt_to_mfn)(void *p);
int (*set_wp_page)(unsigned long handle, u64 gfn);
int (*unset_wp_page)(unsigned long handle, u64 gfn);
int (*read_gpa)(unsigned long handle, unsigned long gpa, void *buf,
unsigned long len);
int (*write_gpa)(unsigned long handle, unsigned long gpa, void *buf,
unsigned long len);
unsigned long (*gfn_to_mfn)(unsigned long handle, unsigned long gfn);
int (*map_gfn_to_mfn)(unsigned long handle, unsigned long gfn,
unsigned long mfn, unsigned int nr, bool map,
int type);
int (*set_trap_area)(unsigned long handle, u64 start, u64 end,
bool map);
};
extern struct intel_gvt_mpt xengt_mpt;

View File

@ -0,0 +1,741 @@
/*
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Kevin Tian <kevin.tian@intel.com>
* Zhi Wang <zhi.a.wang@intel.com>
*
* Contributors:
* Min he <min.he@intel.com>
*
*/
#include "i915_drv.h"
#include "gvt.h"
/* common offset among interrupt control registers */
#define regbase_to_isr(base) (base)
#define regbase_to_imr(base) (base + 0x4)
#define regbase_to_iir(base) (base + 0x8)
#define regbase_to_ier(base) (base + 0xC)
#define iir_to_regbase(iir) (iir - 0x8)
#define ier_to_regbase(ier) (ier - 0xC)
#define get_event_virt_handler(irq, e) (irq->events[e].v_handler)
#define get_irq_info(irq, e) (irq->events[e].info)
#define irq_to_gvt(irq) \
container_of(irq, struct intel_gvt, irq)
static void update_upstream_irq(struct intel_vgpu *vgpu,
struct intel_gvt_irq_info *info);
static const char * const irq_name[INTEL_GVT_EVENT_MAX] = {
[RCS_MI_USER_INTERRUPT] = "Render CS MI USER INTERRUPT",
[RCS_DEBUG] = "Render EU debug from SVG",
[RCS_MMIO_SYNC_FLUSH] = "Render MMIO sync flush status",
[RCS_CMD_STREAMER_ERR] = "Render CS error interrupt",
[RCS_PIPE_CONTROL] = "Render PIPE CONTROL notify",
[RCS_WATCHDOG_EXCEEDED] = "Render CS Watchdog counter exceeded",
[RCS_PAGE_DIRECTORY_FAULT] = "Render page directory faults",
[RCS_AS_CONTEXT_SWITCH] = "Render AS Context Switch Interrupt",
[VCS_MI_USER_INTERRUPT] = "Video CS MI USER INTERRUPT",
[VCS_MMIO_SYNC_FLUSH] = "Video MMIO sync flush status",
[VCS_CMD_STREAMER_ERR] = "Video CS error interrupt",
[VCS_MI_FLUSH_DW] = "Video MI FLUSH DW notify",
[VCS_WATCHDOG_EXCEEDED] = "Video CS Watchdog counter exceeded",
[VCS_PAGE_DIRECTORY_FAULT] = "Video page directory faults",
[VCS_AS_CONTEXT_SWITCH] = "Video AS Context Switch Interrupt",
[VCS2_MI_USER_INTERRUPT] = "VCS2 Video CS MI USER INTERRUPT",
[VCS2_MI_FLUSH_DW] = "VCS2 Video MI FLUSH DW notify",
[VCS2_AS_CONTEXT_SWITCH] = "VCS2 Context Switch Interrupt",
[BCS_MI_USER_INTERRUPT] = "Blitter CS MI USER INTERRUPT",
[BCS_MMIO_SYNC_FLUSH] = "Billter MMIO sync flush status",
[BCS_CMD_STREAMER_ERR] = "Blitter CS error interrupt",
[BCS_MI_FLUSH_DW] = "Blitter MI FLUSH DW notify",
[BCS_PAGE_DIRECTORY_FAULT] = "Blitter page directory faults",
[BCS_AS_CONTEXT_SWITCH] = "Blitter AS Context Switch Interrupt",
[VECS_MI_FLUSH_DW] = "Video Enhanced Streamer MI FLUSH DW notify",
[VECS_AS_CONTEXT_SWITCH] = "VECS Context Switch Interrupt",
[PIPE_A_FIFO_UNDERRUN] = "Pipe A FIFO underrun",
[PIPE_A_CRC_ERR] = "Pipe A CRC error",
[PIPE_A_CRC_DONE] = "Pipe A CRC done",
[PIPE_A_VSYNC] = "Pipe A vsync",
[PIPE_A_LINE_COMPARE] = "Pipe A line compare",
[PIPE_A_ODD_FIELD] = "Pipe A odd field",
[PIPE_A_EVEN_FIELD] = "Pipe A even field",
[PIPE_A_VBLANK] = "Pipe A vblank",
[PIPE_B_FIFO_UNDERRUN] = "Pipe B FIFO underrun",
[PIPE_B_CRC_ERR] = "Pipe B CRC error",
[PIPE_B_CRC_DONE] = "Pipe B CRC done",
[PIPE_B_VSYNC] = "Pipe B vsync",
[PIPE_B_LINE_COMPARE] = "Pipe B line compare",
[PIPE_B_ODD_FIELD] = "Pipe B odd field",
[PIPE_B_EVEN_FIELD] = "Pipe B even field",
[PIPE_B_VBLANK] = "Pipe B vblank",
[PIPE_C_VBLANK] = "Pipe C vblank",
[DPST_PHASE_IN] = "DPST phase in event",
[DPST_HISTOGRAM] = "DPST histogram event",
[GSE] = "GSE",
[DP_A_HOTPLUG] = "DP A Hotplug",
[AUX_CHANNEL_A] = "AUX Channel A",
[PERF_COUNTER] = "Performance counter",
[POISON] = "Poison",
[GTT_FAULT] = "GTT fault",
[PRIMARY_A_FLIP_DONE] = "Primary Plane A flip done",
[PRIMARY_B_FLIP_DONE] = "Primary Plane B flip done",
[PRIMARY_C_FLIP_DONE] = "Primary Plane C flip done",
[SPRITE_A_FLIP_DONE] = "Sprite Plane A flip done",
[SPRITE_B_FLIP_DONE] = "Sprite Plane B flip done",
[SPRITE_C_FLIP_DONE] = "Sprite Plane C flip done",
[PCU_THERMAL] = "PCU Thermal Event",
[PCU_PCODE2DRIVER_MAILBOX] = "PCU pcode2driver mailbox event",
[FDI_RX_INTERRUPTS_TRANSCODER_A] = "FDI RX Interrupts Combined A",
[AUDIO_CP_CHANGE_TRANSCODER_A] = "Audio CP Change Transcoder A",
[AUDIO_CP_REQUEST_TRANSCODER_A] = "Audio CP Request Transcoder A",
[FDI_RX_INTERRUPTS_TRANSCODER_B] = "FDI RX Interrupts Combined B",
[AUDIO_CP_CHANGE_TRANSCODER_B] = "Audio CP Change Transcoder B",
[AUDIO_CP_REQUEST_TRANSCODER_B] = "Audio CP Request Transcoder B",
[FDI_RX_INTERRUPTS_TRANSCODER_C] = "FDI RX Interrupts Combined C",
[AUDIO_CP_CHANGE_TRANSCODER_C] = "Audio CP Change Transcoder C",
[AUDIO_CP_REQUEST_TRANSCODER_C] = "Audio CP Request Transcoder C",
[ERR_AND_DBG] = "South Error and Debug Interupts Combined",
[GMBUS] = "Gmbus",
[SDVO_B_HOTPLUG] = "SDVO B hotplug",
[CRT_HOTPLUG] = "CRT Hotplug",
[DP_B_HOTPLUG] = "DisplayPort/HDMI/DVI B Hotplug",
[DP_C_HOTPLUG] = "DisplayPort/HDMI/DVI C Hotplug",
[DP_D_HOTPLUG] = "DisplayPort/HDMI/DVI D Hotplug",
[AUX_CHANNEL_B] = "AUX Channel B",
[AUX_CHANNEL_C] = "AUX Channel C",
[AUX_CHANNEL_D] = "AUX Channel D",
[AUDIO_POWER_STATE_CHANGE_B] = "Audio Power State change Port B",
[AUDIO_POWER_STATE_CHANGE_C] = "Audio Power State change Port C",
[AUDIO_POWER_STATE_CHANGE_D] = "Audio Power State change Port D",
[INTEL_GVT_EVENT_RESERVED] = "RESERVED EVENTS!!!",
};
static inline struct intel_gvt_irq_info *regbase_to_irq_info(
struct intel_gvt *gvt,
unsigned int reg)
{
struct intel_gvt_irq *irq = &gvt->irq;
int i;
for_each_set_bit(i, irq->irq_info_bitmap, INTEL_GVT_IRQ_INFO_MAX) {
if (i915_mmio_reg_offset(irq->info[i]->reg_base) == reg)
return irq->info[i];
}
return NULL;
}
/**
* intel_vgpu_reg_imr_handler - Generic IMR register emulation write handler
* @vgpu: a vGPU
* @reg: register offset written by guest
* @p_data: register data written by guest
* @bytes: register data length
*
* This function is used to emulate the generic IMR register bit change
* behavior.
*
* Returns:
* Zero on success, negative error code if failed.
*
*/
int intel_vgpu_reg_imr_handler(struct intel_vgpu *vgpu,
unsigned int reg, void *p_data, unsigned int bytes)
{
struct intel_gvt *gvt = vgpu->gvt;
struct intel_gvt_irq_ops *ops = gvt->irq.ops;
u32 changed, masked, unmasked;
u32 imr = *(u32 *)p_data;
gvt_dbg_irq("write IMR %x with val %x\n",
reg, imr);
gvt_dbg_irq("old vIMR %x\n", vgpu_vreg(vgpu, reg));
/* figure out newly masked/unmasked bits */
changed = vgpu_vreg(vgpu, reg) ^ imr;
masked = (vgpu_vreg(vgpu, reg) & changed) ^ changed;
unmasked = masked ^ changed;
gvt_dbg_irq("changed %x, masked %x, unmasked %x\n",
changed, masked, unmasked);
vgpu_vreg(vgpu, reg) = imr;
ops->check_pending_irq(vgpu);
gvt_dbg_irq("IRQ: new vIMR %x\n", vgpu_vreg(vgpu, reg));
return 0;
}
/**
* intel_vgpu_reg_master_irq_handler - master IRQ write emulation handler
* @vgpu: a vGPU
* @reg: register offset written by guest
* @p_data: register data written by guest
* @bytes: register data length
*
* This function is used to emulate the master IRQ register on gen8+.
*
* Returns:
* Zero on success, negative error code if failed.
*
*/
int intel_vgpu_reg_master_irq_handler(struct intel_vgpu *vgpu,
unsigned int reg, void *p_data, unsigned int bytes)
{
struct intel_gvt *gvt = vgpu->gvt;
struct intel_gvt_irq_ops *ops = gvt->irq.ops;
u32 changed, enabled, disabled;
u32 ier = *(u32 *)p_data;
u32 virtual_ier = vgpu_vreg(vgpu, reg);
gvt_dbg_irq("write master irq reg %x with val %x\n",
reg, ier);
gvt_dbg_irq("old vreg %x\n", vgpu_vreg(vgpu, reg));
/*
* GEN8_MASTER_IRQ is a special irq register,
* only bit 31 is allowed to be modified
* and treated as an IER bit.
*/
ier &= GEN8_MASTER_IRQ_CONTROL;
virtual_ier &= GEN8_MASTER_IRQ_CONTROL;
vgpu_vreg(vgpu, reg) &= ~GEN8_MASTER_IRQ_CONTROL;
vgpu_vreg(vgpu, reg) |= ier;
/* figure out newly enabled/disable bits */
changed = virtual_ier ^ ier;
enabled = (virtual_ier & changed) ^ changed;
disabled = enabled ^ changed;
gvt_dbg_irq("changed %x, enabled %x, disabled %x\n",
changed, enabled, disabled);
ops->check_pending_irq(vgpu);
gvt_dbg_irq("new vreg %x\n", vgpu_vreg(vgpu, reg));
return 0;
}
/**
* intel_vgpu_reg_ier_handler - Generic IER write emulation handler
* @vgpu: a vGPU
* @reg: register offset written by guest
* @p_data: register data written by guest
* @bytes: register data length
*
* This function is used to emulate the generic IER register behavior.
*
* Returns:
* Zero on success, negative error code if failed.
*
*/
int intel_vgpu_reg_ier_handler(struct intel_vgpu *vgpu,
unsigned int reg, void *p_data, unsigned int bytes)
{
struct intel_gvt *gvt = vgpu->gvt;
struct intel_gvt_irq_ops *ops = gvt->irq.ops;
struct intel_gvt_irq_info *info;
u32 changed, enabled, disabled;
u32 ier = *(u32 *)p_data;
gvt_dbg_irq("write IER %x with val %x\n",
reg, ier);
gvt_dbg_irq("old vIER %x\n", vgpu_vreg(vgpu, reg));
/* figure out newly enabled/disable bits */
changed = vgpu_vreg(vgpu, reg) ^ ier;
enabled = (vgpu_vreg(vgpu, reg) & changed) ^ changed;
disabled = enabled ^ changed;
gvt_dbg_irq("changed %x, enabled %x, disabled %x\n",
changed, enabled, disabled);
vgpu_vreg(vgpu, reg) = ier;
info = regbase_to_irq_info(gvt, ier_to_regbase(reg));
if (WARN_ON(!info))
return -EINVAL;
if (info->has_upstream_irq)
update_upstream_irq(vgpu, info);
ops->check_pending_irq(vgpu);
gvt_dbg_irq("new vIER %x\n", vgpu_vreg(vgpu, reg));
return 0;
}
/**
* intel_vgpu_reg_iir_handler - Generic IIR write emulation handler
* @vgpu: a vGPU
* @reg: register offset written by guest
* @p_data: register data written by guest
* @bytes: register data length
*
* This function is used to emulate the generic IIR register behavior.
*
* Returns:
* Zero on success, negative error code if failed.
*
*/
int intel_vgpu_reg_iir_handler(struct intel_vgpu *vgpu, unsigned int reg,
void *p_data, unsigned int bytes)
{
struct intel_gvt_irq_info *info = regbase_to_irq_info(vgpu->gvt,
iir_to_regbase(reg));
u32 iir = *(u32 *)p_data;
gvt_dbg_irq("write IIR %x with val %x\n", reg, iir);
if (WARN_ON(!info))
return -EINVAL;
vgpu_vreg(vgpu, reg) &= ~iir;
if (info->has_upstream_irq)
update_upstream_irq(vgpu, info);
return 0;
}
static struct intel_gvt_irq_map gen8_irq_map[] = {
{ INTEL_GVT_IRQ_INFO_MASTER, 0, INTEL_GVT_IRQ_INFO_GT0, 0xffff },
{ INTEL_GVT_IRQ_INFO_MASTER, 1, INTEL_GVT_IRQ_INFO_GT0, 0xffff0000 },
{ INTEL_GVT_IRQ_INFO_MASTER, 2, INTEL_GVT_IRQ_INFO_GT1, 0xffff },
{ INTEL_GVT_IRQ_INFO_MASTER, 3, INTEL_GVT_IRQ_INFO_GT1, 0xffff0000 },
{ INTEL_GVT_IRQ_INFO_MASTER, 4, INTEL_GVT_IRQ_INFO_GT2, 0xffff },
{ INTEL_GVT_IRQ_INFO_MASTER, 6, INTEL_GVT_IRQ_INFO_GT3, 0xffff },
{ INTEL_GVT_IRQ_INFO_MASTER, 16, INTEL_GVT_IRQ_INFO_DE_PIPE_A, ~0 },
{ INTEL_GVT_IRQ_INFO_MASTER, 17, INTEL_GVT_IRQ_INFO_DE_PIPE_B, ~0 },
{ INTEL_GVT_IRQ_INFO_MASTER, 18, INTEL_GVT_IRQ_INFO_DE_PIPE_C, ~0 },
{ INTEL_GVT_IRQ_INFO_MASTER, 20, INTEL_GVT_IRQ_INFO_DE_PORT, ~0 },
{ INTEL_GVT_IRQ_INFO_MASTER, 22, INTEL_GVT_IRQ_INFO_DE_MISC, ~0 },
{ INTEL_GVT_IRQ_INFO_MASTER, 23, INTEL_GVT_IRQ_INFO_PCH, ~0 },
{ INTEL_GVT_IRQ_INFO_MASTER, 30, INTEL_GVT_IRQ_INFO_PCU, ~0 },
{ -1, -1, ~0 },
};
static void update_upstream_irq(struct intel_vgpu *vgpu,
struct intel_gvt_irq_info *info)
{
struct intel_gvt_irq *irq = &vgpu->gvt->irq;
struct intel_gvt_irq_map *map = irq->irq_map;
struct intel_gvt_irq_info *up_irq_info = NULL;
u32 set_bits = 0;
u32 clear_bits = 0;
int bit;
u32 val = vgpu_vreg(vgpu,
regbase_to_iir(i915_mmio_reg_offset(info->reg_base)))
& vgpu_vreg(vgpu,
regbase_to_ier(i915_mmio_reg_offset(info->reg_base)));
if (!info->has_upstream_irq)
return;
for (map = irq->irq_map; map->up_irq_bit != -1; map++) {
if (info->group != map->down_irq_group)
continue;
if (!up_irq_info)
up_irq_info = irq->info[map->up_irq_group];
else
WARN_ON(up_irq_info != irq->info[map->up_irq_group]);
bit = map->up_irq_bit;
if (val & map->down_irq_bitmask)
set_bits |= (1 << bit);
else
clear_bits |= (1 << bit);
}
WARN_ON(!up_irq_info);
if (up_irq_info->group == INTEL_GVT_IRQ_INFO_MASTER) {
u32 isr = i915_mmio_reg_offset(up_irq_info->reg_base);
vgpu_vreg(vgpu, isr) &= ~clear_bits;
vgpu_vreg(vgpu, isr) |= set_bits;
} else {
u32 iir = regbase_to_iir(
i915_mmio_reg_offset(up_irq_info->reg_base));
u32 imr = regbase_to_imr(
i915_mmio_reg_offset(up_irq_info->reg_base));
vgpu_vreg(vgpu, iir) |= (set_bits & ~vgpu_vreg(vgpu, imr));
}
if (up_irq_info->has_upstream_irq)
update_upstream_irq(vgpu, up_irq_info);
}
static void init_irq_map(struct intel_gvt_irq *irq)
{
struct intel_gvt_irq_map *map;
struct intel_gvt_irq_info *up_info, *down_info;
int up_bit;
for (map = irq->irq_map; map->up_irq_bit != -1; map++) {
up_info = irq->info[map->up_irq_group];
up_bit = map->up_irq_bit;
down_info = irq->info[map->down_irq_group];
set_bit(up_bit, up_info->downstream_irq_bitmap);
down_info->has_upstream_irq = true;
gvt_dbg_irq("[up] grp %d bit %d -> [down] grp %d bitmask %x\n",
up_info->group, up_bit,
down_info->group, map->down_irq_bitmask);
}
}
/* =======================vEvent injection===================== */
static int inject_virtual_interrupt(struct intel_vgpu *vgpu)
{
return intel_gvt_hypervisor_inject_msi(vgpu);
}
static void propagate_event(struct intel_gvt_irq *irq,
enum intel_gvt_event_type event, struct intel_vgpu *vgpu)
{
struct intel_gvt_irq_info *info;
unsigned int reg_base;
int bit;
info = get_irq_info(irq, event);
if (WARN_ON(!info))
return;
reg_base = i915_mmio_reg_offset(info->reg_base);
bit = irq->events[event].bit;
if (!test_bit(bit, (void *)&vgpu_vreg(vgpu,
regbase_to_imr(reg_base)))) {
gvt_dbg_irq("set bit (%d) for (%s) for vgpu (%d)\n",
bit, irq_name[event], vgpu->id);
set_bit(bit, (void *)&vgpu_vreg(vgpu,
regbase_to_iir(reg_base)));
}
}
/* =======================vEvent Handlers===================== */
static void handle_default_event_virt(struct intel_gvt_irq *irq,
enum intel_gvt_event_type event, struct intel_vgpu *vgpu)
{
if (!vgpu->irq.irq_warn_once[event]) {
gvt_dbg_core("vgpu%d: IRQ receive event %d (%s)\n",
vgpu->id, event, irq_name[event]);
vgpu->irq.irq_warn_once[event] = true;
}
propagate_event(irq, event, vgpu);
}
/* =====================GEN specific logic======================= */
/* GEN8 interrupt routines. */
#define DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(regname, regbase) \
static struct intel_gvt_irq_info gen8_##regname##_info = { \
.name = #regname"-IRQ", \
.reg_base = (regbase), \
.bit_to_event = {[0 ... INTEL_GVT_IRQ_BITWIDTH-1] = \
INTEL_GVT_EVENT_RESERVED}, \
}
DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(gt0, GEN8_GT_ISR(0));
DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(gt1, GEN8_GT_ISR(1));
DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(gt2, GEN8_GT_ISR(2));
DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(gt3, GEN8_GT_ISR(3));
DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(de_pipe_a, GEN8_DE_PIPE_ISR(PIPE_A));
DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(de_pipe_b, GEN8_DE_PIPE_ISR(PIPE_B));
DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(de_pipe_c, GEN8_DE_PIPE_ISR(PIPE_C));
DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(de_port, GEN8_DE_PORT_ISR);
DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(de_misc, GEN8_DE_MISC_ISR);
DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(pcu, GEN8_PCU_ISR);
DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(master, GEN8_MASTER_IRQ);
static struct intel_gvt_irq_info gvt_base_pch_info = {
.name = "PCH-IRQ",
.reg_base = SDEISR,
.bit_to_event = {[0 ... INTEL_GVT_IRQ_BITWIDTH-1] =
INTEL_GVT_EVENT_RESERVED},
};
static void gen8_check_pending_irq(struct intel_vgpu *vgpu)
{
struct intel_gvt_irq *irq = &vgpu->gvt->irq;
int i;
if (!(vgpu_vreg(vgpu, i915_mmio_reg_offset(GEN8_MASTER_IRQ)) &
GEN8_MASTER_IRQ_CONTROL))
return;
for_each_set_bit(i, irq->irq_info_bitmap, INTEL_GVT_IRQ_INFO_MAX) {
struct intel_gvt_irq_info *info = irq->info[i];
u32 reg_base;
if (!info->has_upstream_irq)
continue;
reg_base = i915_mmio_reg_offset(info->reg_base);
if ((vgpu_vreg(vgpu, regbase_to_iir(reg_base))
& vgpu_vreg(vgpu, regbase_to_ier(reg_base))))
update_upstream_irq(vgpu, info);
}
if (vgpu_vreg(vgpu, i915_mmio_reg_offset(GEN8_MASTER_IRQ))
& ~GEN8_MASTER_IRQ_CONTROL)
inject_virtual_interrupt(vgpu);
}
static void gen8_init_irq(
struct intel_gvt_irq *irq)
{
struct intel_gvt *gvt = irq_to_gvt(irq);
#define SET_BIT_INFO(s, b, e, i) \
do { \
s->events[e].bit = b; \
s->events[e].info = s->info[i]; \
s->info[i]->bit_to_event[b] = e;\
} while (0)
#define SET_IRQ_GROUP(s, g, i) \
do { \
s->info[g] = i; \
(i)->group = g; \
set_bit(g, s->irq_info_bitmap); \
} while (0)
SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_MASTER, &gen8_master_info);
SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_GT0, &gen8_gt0_info);
SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_GT1, &gen8_gt1_info);
SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_GT2, &gen8_gt2_info);
SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_GT3, &gen8_gt3_info);
SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_DE_PIPE_A, &gen8_de_pipe_a_info);
SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_DE_PIPE_B, &gen8_de_pipe_b_info);
SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_DE_PIPE_C, &gen8_de_pipe_c_info);
SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_DE_PORT, &gen8_de_port_info);
SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_DE_MISC, &gen8_de_misc_info);
SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_PCU, &gen8_pcu_info);
SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_PCH, &gvt_base_pch_info);
/* GEN8 level 2 interrupts. */
/* GEN8 interrupt GT0 events */
SET_BIT_INFO(irq, 0, RCS_MI_USER_INTERRUPT, INTEL_GVT_IRQ_INFO_GT0);
SET_BIT_INFO(irq, 4, RCS_PIPE_CONTROL, INTEL_GVT_IRQ_INFO_GT0);
SET_BIT_INFO(irq, 8, RCS_AS_CONTEXT_SWITCH, INTEL_GVT_IRQ_INFO_GT0);
SET_BIT_INFO(irq, 16, BCS_MI_USER_INTERRUPT, INTEL_GVT_IRQ_INFO_GT0);
SET_BIT_INFO(irq, 20, BCS_MI_FLUSH_DW, INTEL_GVT_IRQ_INFO_GT0);
SET_BIT_INFO(irq, 24, BCS_AS_CONTEXT_SWITCH, INTEL_GVT_IRQ_INFO_GT0);
/* GEN8 interrupt GT1 events */
SET_BIT_INFO(irq, 0, VCS_MI_USER_INTERRUPT, INTEL_GVT_IRQ_INFO_GT1);
SET_BIT_INFO(irq, 4, VCS_MI_FLUSH_DW, INTEL_GVT_IRQ_INFO_GT1);
SET_BIT_INFO(irq, 8, VCS_AS_CONTEXT_SWITCH, INTEL_GVT_IRQ_INFO_GT1);
if (HAS_BSD2(gvt->dev_priv)) {
SET_BIT_INFO(irq, 16, VCS2_MI_USER_INTERRUPT,
INTEL_GVT_IRQ_INFO_GT1);
SET_BIT_INFO(irq, 20, VCS2_MI_FLUSH_DW,
INTEL_GVT_IRQ_INFO_GT1);
SET_BIT_INFO(irq, 24, VCS2_AS_CONTEXT_SWITCH,
INTEL_GVT_IRQ_INFO_GT1);
}
/* GEN8 interrupt GT3 events */
SET_BIT_INFO(irq, 0, VECS_MI_USER_INTERRUPT, INTEL_GVT_IRQ_INFO_GT3);
SET_BIT_INFO(irq, 4, VECS_MI_FLUSH_DW, INTEL_GVT_IRQ_INFO_GT3);
SET_BIT_INFO(irq, 8, VECS_AS_CONTEXT_SWITCH, INTEL_GVT_IRQ_INFO_GT3);
SET_BIT_INFO(irq, 0, PIPE_A_VBLANK, INTEL_GVT_IRQ_INFO_DE_PIPE_A);
SET_BIT_INFO(irq, 0, PIPE_B_VBLANK, INTEL_GVT_IRQ_INFO_DE_PIPE_B);
SET_BIT_INFO(irq, 0, PIPE_C_VBLANK, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
/* GEN8 interrupt DE PORT events */
SET_BIT_INFO(irq, 0, AUX_CHANNEL_A, INTEL_GVT_IRQ_INFO_DE_PORT);
SET_BIT_INFO(irq, 3, DP_A_HOTPLUG, INTEL_GVT_IRQ_INFO_DE_PORT);
/* GEN8 interrupt DE MISC events */
SET_BIT_INFO(irq, 0, GSE, INTEL_GVT_IRQ_INFO_DE_MISC);
/* PCH events */
SET_BIT_INFO(irq, 17, GMBUS, INTEL_GVT_IRQ_INFO_PCH);
SET_BIT_INFO(irq, 19, CRT_HOTPLUG, INTEL_GVT_IRQ_INFO_PCH);
SET_BIT_INFO(irq, 21, DP_B_HOTPLUG, INTEL_GVT_IRQ_INFO_PCH);
SET_BIT_INFO(irq, 22, DP_C_HOTPLUG, INTEL_GVT_IRQ_INFO_PCH);
SET_BIT_INFO(irq, 23, DP_D_HOTPLUG, INTEL_GVT_IRQ_INFO_PCH);
if (IS_BROADWELL(gvt->dev_priv)) {
SET_BIT_INFO(irq, 25, AUX_CHANNEL_B, INTEL_GVT_IRQ_INFO_PCH);
SET_BIT_INFO(irq, 26, AUX_CHANNEL_C, INTEL_GVT_IRQ_INFO_PCH);
SET_BIT_INFO(irq, 27, AUX_CHANNEL_D, INTEL_GVT_IRQ_INFO_PCH);
SET_BIT_INFO(irq, 4, PRIMARY_A_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_A);
SET_BIT_INFO(irq, 5, SPRITE_A_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_A);
SET_BIT_INFO(irq, 4, PRIMARY_B_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_B);
SET_BIT_INFO(irq, 5, SPRITE_B_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_B);
SET_BIT_INFO(irq, 4, PRIMARY_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
SET_BIT_INFO(irq, 5, SPRITE_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
} else if (IS_SKYLAKE(gvt->dev_priv)) {
SET_BIT_INFO(irq, 25, AUX_CHANNEL_B, INTEL_GVT_IRQ_INFO_DE_PORT);
SET_BIT_INFO(irq, 26, AUX_CHANNEL_C, INTEL_GVT_IRQ_INFO_DE_PORT);
SET_BIT_INFO(irq, 27, AUX_CHANNEL_D, INTEL_GVT_IRQ_INFO_DE_PORT);
SET_BIT_INFO(irq, 3, PRIMARY_A_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_A);
SET_BIT_INFO(irq, 3, PRIMARY_B_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_B);
SET_BIT_INFO(irq, 3, PRIMARY_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
}
/* GEN8 interrupt PCU events */
SET_BIT_INFO(irq, 24, PCU_THERMAL, INTEL_GVT_IRQ_INFO_PCU);
SET_BIT_INFO(irq, 25, PCU_PCODE2DRIVER_MAILBOX, INTEL_GVT_IRQ_INFO_PCU);
}
static struct intel_gvt_irq_ops gen8_irq_ops = {
.init_irq = gen8_init_irq,
.check_pending_irq = gen8_check_pending_irq,
};
/**
* intel_vgpu_trigger_virtual_event - Trigger a virtual event for a vGPU
* @vgpu: a vGPU
* @event: interrupt event
*
* This function is used to trigger a virtual interrupt event for vGPU.
* The caller provides the event to be triggered, the framework itself
* will emulate the IRQ register bit change.
*
*/
void intel_vgpu_trigger_virtual_event(struct intel_vgpu *vgpu,
enum intel_gvt_event_type event)
{
struct intel_gvt *gvt = vgpu->gvt;
struct intel_gvt_irq *irq = &gvt->irq;
gvt_event_virt_handler_t handler;
struct intel_gvt_irq_ops *ops = gvt->irq.ops;
handler = get_event_virt_handler(irq, event);
WARN_ON(!handler);
handler(irq, event, vgpu);
ops->check_pending_irq(vgpu);
}
static void init_events(
struct intel_gvt_irq *irq)
{
int i;
for (i = 0; i < INTEL_GVT_EVENT_MAX; i++) {
irq->events[i].info = NULL;
irq->events[i].v_handler = handle_default_event_virt;
}
}
static enum hrtimer_restart vblank_timer_fn(struct hrtimer *data)
{
struct intel_gvt_vblank_timer *vblank_timer;
struct intel_gvt_irq *irq;
struct intel_gvt *gvt;
vblank_timer = container_of(data, struct intel_gvt_vblank_timer, timer);
irq = container_of(vblank_timer, struct intel_gvt_irq, vblank_timer);
gvt = container_of(irq, struct intel_gvt, irq);
intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EMULATE_VBLANK);
hrtimer_add_expires_ns(&vblank_timer->timer, vblank_timer->period);
return HRTIMER_RESTART;
}
/**
* intel_gvt_clean_irq - clean up GVT-g IRQ emulation subsystem
* @gvt: a GVT device
*
* This function is called at driver unloading stage, to clean up GVT-g IRQ
* emulation subsystem.
*
*/
void intel_gvt_clean_irq(struct intel_gvt *gvt)
{
struct intel_gvt_irq *irq = &gvt->irq;
hrtimer_cancel(&irq->vblank_timer.timer);
}
#define VBLNAK_TIMER_PERIOD 16000000
/**
* intel_gvt_init_irq - initialize GVT-g IRQ emulation subsystem
* @gvt: a GVT device
*
* This function is called at driver loading stage, to initialize the GVT-g IRQ
* emulation subsystem.
*
* Returns:
* Zero on success, negative error code if failed.
*/
int intel_gvt_init_irq(struct intel_gvt *gvt)
{
struct intel_gvt_irq *irq = &gvt->irq;
struct intel_gvt_vblank_timer *vblank_timer = &irq->vblank_timer;
gvt_dbg_core("init irq framework\n");
if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) {
irq->ops = &gen8_irq_ops;
irq->irq_map = gen8_irq_map;
} else {
WARN_ON(1);
return -ENODEV;
}
/* common event initialization */
init_events(irq);
/* gen specific initialization */
irq->ops->init_irq(irq);
init_irq_map(irq);
hrtimer_init(&vblank_timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
vblank_timer->timer.function = vblank_timer_fn;
vblank_timer->period = VBLNAK_TIMER_PERIOD;
return 0;
}

View File

@ -0,0 +1,233 @@
/*
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Kevin Tian <kevin.tian@intel.com>
* Zhi Wang <zhi.a.wang@intel.com>
*
* Contributors:
* Min he <min.he@intel.com>
*
*/
#ifndef _GVT_INTERRUPT_H_
#define _GVT_INTERRUPT_H_
enum intel_gvt_event_type {
RCS_MI_USER_INTERRUPT = 0,
RCS_DEBUG,
RCS_MMIO_SYNC_FLUSH,
RCS_CMD_STREAMER_ERR,
RCS_PIPE_CONTROL,
RCS_L3_PARITY_ERR,
RCS_WATCHDOG_EXCEEDED,
RCS_PAGE_DIRECTORY_FAULT,
RCS_AS_CONTEXT_SWITCH,
RCS_MONITOR_BUFF_HALF_FULL,
VCS_MI_USER_INTERRUPT,
VCS_MMIO_SYNC_FLUSH,
VCS_CMD_STREAMER_ERR,
VCS_MI_FLUSH_DW,
VCS_WATCHDOG_EXCEEDED,
VCS_PAGE_DIRECTORY_FAULT,
VCS_AS_CONTEXT_SWITCH,
VCS2_MI_USER_INTERRUPT,
VCS2_MI_FLUSH_DW,
VCS2_AS_CONTEXT_SWITCH,
BCS_MI_USER_INTERRUPT,
BCS_MMIO_SYNC_FLUSH,
BCS_CMD_STREAMER_ERR,
BCS_MI_FLUSH_DW,
BCS_PAGE_DIRECTORY_FAULT,
BCS_AS_CONTEXT_SWITCH,
VECS_MI_USER_INTERRUPT,
VECS_MI_FLUSH_DW,
VECS_AS_CONTEXT_SWITCH,
PIPE_A_FIFO_UNDERRUN,
PIPE_B_FIFO_UNDERRUN,
PIPE_A_CRC_ERR,
PIPE_B_CRC_ERR,
PIPE_A_CRC_DONE,
PIPE_B_CRC_DONE,
PIPE_A_ODD_FIELD,
PIPE_B_ODD_FIELD,
PIPE_A_EVEN_FIELD,
PIPE_B_EVEN_FIELD,
PIPE_A_LINE_COMPARE,
PIPE_B_LINE_COMPARE,
PIPE_C_LINE_COMPARE,
PIPE_A_VBLANK,
PIPE_B_VBLANK,
PIPE_C_VBLANK,
PIPE_A_VSYNC,
PIPE_B_VSYNC,
PIPE_C_VSYNC,
PRIMARY_A_FLIP_DONE,
PRIMARY_B_FLIP_DONE,
PRIMARY_C_FLIP_DONE,
SPRITE_A_FLIP_DONE,
SPRITE_B_FLIP_DONE,
SPRITE_C_FLIP_DONE,
PCU_THERMAL,
PCU_PCODE2DRIVER_MAILBOX,
DPST_PHASE_IN,
DPST_HISTOGRAM,
GSE,
DP_A_HOTPLUG,
AUX_CHANNEL_A,
PERF_COUNTER,
POISON,
GTT_FAULT,
ERROR_INTERRUPT_COMBINED,
FDI_RX_INTERRUPTS_TRANSCODER_A,
AUDIO_CP_CHANGE_TRANSCODER_A,
AUDIO_CP_REQUEST_TRANSCODER_A,
FDI_RX_INTERRUPTS_TRANSCODER_B,
AUDIO_CP_CHANGE_TRANSCODER_B,
AUDIO_CP_REQUEST_TRANSCODER_B,
FDI_RX_INTERRUPTS_TRANSCODER_C,
AUDIO_CP_CHANGE_TRANSCODER_C,
AUDIO_CP_REQUEST_TRANSCODER_C,
ERR_AND_DBG,
GMBUS,
SDVO_B_HOTPLUG,
CRT_HOTPLUG,
DP_B_HOTPLUG,
DP_C_HOTPLUG,
DP_D_HOTPLUG,
AUX_CHANNEL_B,
AUX_CHANNEL_C,
AUX_CHANNEL_D,
AUDIO_POWER_STATE_CHANGE_B,
AUDIO_POWER_STATE_CHANGE_C,
AUDIO_POWER_STATE_CHANGE_D,
INTEL_GVT_EVENT_RESERVED,
INTEL_GVT_EVENT_MAX,
};
struct intel_gvt_irq;
struct intel_gvt;
typedef void (*gvt_event_virt_handler_t)(struct intel_gvt_irq *irq,
enum intel_gvt_event_type event, struct intel_vgpu *vgpu);
struct intel_gvt_irq_ops {
void (*init_irq)(struct intel_gvt_irq *irq);
void (*check_pending_irq)(struct intel_vgpu *vgpu);
};
/* the list of physical interrupt control register groups */
enum intel_gvt_irq_type {
INTEL_GVT_IRQ_INFO_GT,
INTEL_GVT_IRQ_INFO_DPY,
INTEL_GVT_IRQ_INFO_PCH,
INTEL_GVT_IRQ_INFO_PM,
INTEL_GVT_IRQ_INFO_MASTER,
INTEL_GVT_IRQ_INFO_GT0,
INTEL_GVT_IRQ_INFO_GT1,
INTEL_GVT_IRQ_INFO_GT2,
INTEL_GVT_IRQ_INFO_GT3,
INTEL_GVT_IRQ_INFO_DE_PIPE_A,
INTEL_GVT_IRQ_INFO_DE_PIPE_B,
INTEL_GVT_IRQ_INFO_DE_PIPE_C,
INTEL_GVT_IRQ_INFO_DE_PORT,
INTEL_GVT_IRQ_INFO_DE_MISC,
INTEL_GVT_IRQ_INFO_AUD,
INTEL_GVT_IRQ_INFO_PCU,
INTEL_GVT_IRQ_INFO_MAX,
};
#define INTEL_GVT_IRQ_BITWIDTH 32
/* device specific interrupt bit definitions */
struct intel_gvt_irq_info {
char *name;
i915_reg_t reg_base;
enum intel_gvt_event_type bit_to_event[INTEL_GVT_IRQ_BITWIDTH];
unsigned long warned;
int group;
DECLARE_BITMAP(downstream_irq_bitmap, INTEL_GVT_IRQ_BITWIDTH);
bool has_upstream_irq;
};
/* per-event information */
struct intel_gvt_event_info {
int bit; /* map to register bit */
int policy; /* forwarding policy */
struct intel_gvt_irq_info *info; /* register info */
gvt_event_virt_handler_t v_handler; /* for v_event */
};
struct intel_gvt_irq_map {
int up_irq_group;
int up_irq_bit;
int down_irq_group;
u32 down_irq_bitmask;
};
struct intel_gvt_vblank_timer {
struct hrtimer timer;
u64 period;
};
/* structure containing device specific IRQ state */
struct intel_gvt_irq {
struct intel_gvt_irq_ops *ops;
struct intel_gvt_irq_info *info[INTEL_GVT_IRQ_INFO_MAX];
DECLARE_BITMAP(irq_info_bitmap, INTEL_GVT_IRQ_INFO_MAX);
struct intel_gvt_event_info events[INTEL_GVT_EVENT_MAX];
DECLARE_BITMAP(pending_events, INTEL_GVT_EVENT_MAX);
struct intel_gvt_irq_map *irq_map;
struct intel_gvt_vblank_timer vblank_timer;
};
int intel_gvt_init_irq(struct intel_gvt *gvt);
void intel_gvt_clean_irq(struct intel_gvt *gvt);
void intel_vgpu_trigger_virtual_event(struct intel_vgpu *vgpu,
enum intel_gvt_event_type event);
int intel_vgpu_reg_iir_handler(struct intel_vgpu *vgpu, unsigned int reg,
void *p_data, unsigned int bytes);
int intel_vgpu_reg_ier_handler(struct intel_vgpu *vgpu,
unsigned int reg, void *p_data, unsigned int bytes);
int intel_vgpu_reg_master_irq_handler(struct intel_vgpu *vgpu,
unsigned int reg, void *p_data, unsigned int bytes);
int intel_vgpu_reg_imr_handler(struct intel_vgpu *vgpu,
unsigned int reg, void *p_data, unsigned int bytes);
int gvt_ring_id_to_pipe_control_notify_event(int ring_id);
int gvt_ring_id_to_mi_flush_dw_event(int ring_id);
int gvt_ring_id_to_mi_user_interrupt_event(int ring_id);
#endif /* _GVT_INTERRUPT_H_ */

View File

@ -0,0 +1,306 @@
/*
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Ke Yu
* Kevin Tian <kevin.tian@intel.com>
* Dexuan Cui
*
* Contributors:
* Tina Zhang <tina.zhang@intel.com>
* Min He <min.he@intel.com>
* Niu Bing <bing.niu@intel.com>
* Zhi Wang <zhi.a.wang@intel.com>
*
*/
#include "i915_drv.h"
#include "gvt.h"
/**
* intel_vgpu_gpa_to_mmio_offset - translate a GPA to MMIO offset
* @vgpu: a vGPU
*
* Returns:
* Zero on success, negative error code if failed
*/
int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa)
{
u64 gttmmio_gpa = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_0) &
~GENMASK(3, 0);
return gpa - gttmmio_gpa;
}
#define reg_is_mmio(gvt, reg) \
(reg >= 0 && reg < gvt->device_info.mmio_size)
#define reg_is_gtt(gvt, reg) \
(reg >= gvt->device_info.gtt_start_offset \
&& reg < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt))
/**
* intel_vgpu_emulate_mmio_read - emulate MMIO read
* @vgpu: a vGPU
* @pa: guest physical address
* @p_data: data return buffer
* @bytes: access data length
*
* Returns:
* Zero on success, negative error code if failed
*/
int intel_vgpu_emulate_mmio_read(void *__vgpu, uint64_t pa,
void *p_data, unsigned int bytes)
{
struct intel_vgpu *vgpu = __vgpu;
struct intel_gvt *gvt = vgpu->gvt;
struct intel_gvt_mmio_info *mmio;
unsigned int offset = 0;
int ret = -EINVAL;
mutex_lock(&gvt->lock);
if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) {
struct intel_vgpu_guest_page *gp;
gp = intel_vgpu_find_guest_page(vgpu, pa >> PAGE_SHIFT);
if (gp) {
ret = intel_gvt_hypervisor_read_gpa(vgpu, pa,
p_data, bytes);
if (ret) {
gvt_err("vgpu%d: guest page read error %d, "
"gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n",
vgpu->id, ret,
gp->gfn, pa, *(u32 *)p_data, bytes);
}
mutex_unlock(&gvt->lock);
return ret;
}
}
offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
if (WARN_ON(bytes > 8))
goto err;
if (reg_is_gtt(gvt, offset)) {
if (WARN_ON(!IS_ALIGNED(offset, 4) && !IS_ALIGNED(offset, 8)))
goto err;
if (WARN_ON(bytes != 4 && bytes != 8))
goto err;
if (WARN_ON(!reg_is_gtt(gvt, offset + bytes - 1)))
goto err;
ret = intel_vgpu_emulate_gtt_mmio_read(vgpu, offset,
p_data, bytes);
if (ret)
goto err;
mutex_unlock(&gvt->lock);
return ret;
}
if (WARN_ON_ONCE(!reg_is_mmio(gvt, offset))) {
ret = intel_gvt_hypervisor_read_gpa(vgpu, pa, p_data, bytes);
mutex_unlock(&gvt->lock);
return ret;
}
if (WARN_ON(!reg_is_mmio(gvt, offset + bytes - 1)))
goto err;
mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4));
if (!mmio && !vgpu->mmio.disable_warn_untrack) {
gvt_err("vgpu%d: read untracked MMIO %x len %d val %x\n",
vgpu->id, offset, bytes, *(u32 *)p_data);
if (offset == 0x206c) {
gvt_err("------------------------------------------\n");
gvt_err("vgpu%d: likely triggers a gfx reset\n",
vgpu->id);
gvt_err("------------------------------------------\n");
vgpu->mmio.disable_warn_untrack = true;
}
}
if (!intel_gvt_mmio_is_unalign(gvt, offset)) {
if (WARN_ON(!IS_ALIGNED(offset, bytes)))
goto err;
}
if (mmio) {
if (!intel_gvt_mmio_is_unalign(gvt, mmio->offset)) {
if (WARN_ON(offset + bytes > mmio->offset + mmio->size))
goto err;
if (WARN_ON(mmio->offset != offset))
goto err;
}
ret = mmio->read(vgpu, offset, p_data, bytes);
} else
ret = intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
if (ret)
goto err;
intel_gvt_mmio_set_accessed(gvt, offset);
mutex_unlock(&gvt->lock);
return 0;
err:
gvt_err("vgpu%d: fail to emulate MMIO read %08x len %d\n",
vgpu->id, offset, bytes);
mutex_unlock(&gvt->lock);
return ret;
}
/**
* intel_vgpu_emulate_mmio_write - emulate MMIO write
* @vgpu: a vGPU
* @pa: guest physical address
* @p_data: write data buffer
* @bytes: access data length
*
* Returns:
* Zero on success, negative error code if failed
*/
int intel_vgpu_emulate_mmio_write(void *__vgpu, uint64_t pa,
void *p_data, unsigned int bytes)
{
struct intel_vgpu *vgpu = __vgpu;
struct intel_gvt *gvt = vgpu->gvt;
struct intel_gvt_mmio_info *mmio;
unsigned int offset = 0;
u32 old_vreg = 0, old_sreg = 0;
int ret = -EINVAL;
mutex_lock(&gvt->lock);
if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) {
struct intel_vgpu_guest_page *gp;
gp = intel_vgpu_find_guest_page(vgpu, pa >> PAGE_SHIFT);
if (gp) {
ret = gp->handler(gp, pa, p_data, bytes);
if (ret) {
gvt_err("vgpu%d: guest page write error %d, "
"gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n",
vgpu->id, ret,
gp->gfn, pa, *(u32 *)p_data, bytes);
}
mutex_unlock(&gvt->lock);
return ret;
}
}
offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
if (WARN_ON(bytes > 8))
goto err;
if (reg_is_gtt(gvt, offset)) {
if (WARN_ON(!IS_ALIGNED(offset, 4) && !IS_ALIGNED(offset, 8)))
goto err;
if (WARN_ON(bytes != 4 && bytes != 8))
goto err;
if (WARN_ON(!reg_is_gtt(gvt, offset + bytes - 1)))
goto err;
ret = intel_vgpu_emulate_gtt_mmio_write(vgpu, offset,
p_data, bytes);
if (ret)
goto err;
mutex_unlock(&gvt->lock);
return ret;
}
if (WARN_ON_ONCE(!reg_is_mmio(gvt, offset))) {
ret = intel_gvt_hypervisor_write_gpa(vgpu, pa, p_data, bytes);
mutex_unlock(&gvt->lock);
return ret;
}
mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4));
if (!mmio && !vgpu->mmio.disable_warn_untrack)
gvt_err("vgpu%d: write untracked MMIO %x len %d val %x\n",
vgpu->id, offset, bytes, *(u32 *)p_data);
if (!intel_gvt_mmio_is_unalign(gvt, offset)) {
if (WARN_ON(!IS_ALIGNED(offset, bytes)))
goto err;
}
if (mmio) {
u64 ro_mask = mmio->ro_mask;
if (!intel_gvt_mmio_is_unalign(gvt, mmio->offset)) {
if (WARN_ON(offset + bytes > mmio->offset + mmio->size))
goto err;
if (WARN_ON(mmio->offset != offset))
goto err;
}
if (intel_gvt_mmio_has_mode_mask(gvt, mmio->offset)) {
old_vreg = vgpu_vreg(vgpu, offset);
old_sreg = vgpu_sreg(vgpu, offset);
}
if (!ro_mask) {
ret = mmio->write(vgpu, offset, p_data, bytes);
} else {
/* Protect RO bits like HW */
u64 data = 0;
/* all register bits are RO. */
if (ro_mask == ~(u64)0) {
gvt_err("vgpu%d: try to write RO reg %x\n",
vgpu->id, offset);
ret = 0;
goto out;
}
/* keep the RO bits in the virtual register */
memcpy(&data, p_data, bytes);
data &= ~mmio->ro_mask;
data |= vgpu_vreg(vgpu, offset) & mmio->ro_mask;
ret = mmio->write(vgpu, offset, &data, bytes);
}
/* higher 16bits of mode ctl regs are mask bits for change */
if (intel_gvt_mmio_has_mode_mask(gvt, mmio->offset)) {
u32 mask = vgpu_vreg(vgpu, offset) >> 16;
vgpu_vreg(vgpu, offset) = (old_vreg & ~mask)
| (vgpu_vreg(vgpu, offset) & mask);
vgpu_sreg(vgpu, offset) = (old_sreg & ~mask)
| (vgpu_sreg(vgpu, offset) & mask);
}
} else
ret = intel_vgpu_default_mmio_write(vgpu, offset, p_data,
bytes);
if (ret)
goto err;
out:
intel_gvt_mmio_set_accessed(gvt, offset);
mutex_unlock(&gvt->lock);
return 0;
err:
gvt_err("vgpu%d: fail to emulate MMIO write %08x len %d\n",
vgpu->id, offset, bytes);
mutex_unlock(&gvt->lock);
return ret;
}

View File

@ -0,0 +1,105 @@
/*
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Ke Yu
* Kevin Tian <kevin.tian@intel.com>
* Dexuan Cui
*
* Contributors:
* Tina Zhang <tina.zhang@intel.com>
* Min He <min.he@intel.com>
* Niu Bing <bing.niu@intel.com>
* Zhi Wang <zhi.a.wang@intel.com>
*
*/
#ifndef _GVT_MMIO_H_
#define _GVT_MMIO_H_
struct intel_gvt;
struct intel_vgpu;
#define D_SNB (1 << 0)
#define D_IVB (1 << 1)
#define D_HSW (1 << 2)
#define D_BDW (1 << 3)
#define D_SKL (1 << 4)
#define D_GEN9PLUS (D_SKL)
#define D_GEN8PLUS (D_BDW | D_SKL)
#define D_GEN75PLUS (D_HSW | D_BDW | D_SKL)
#define D_GEN7PLUS (D_IVB | D_HSW | D_BDW | D_SKL)
#define D_SKL_PLUS (D_SKL)
#define D_BDW_PLUS (D_BDW | D_SKL)
#define D_HSW_PLUS (D_HSW | D_BDW | D_SKL)
#define D_IVB_PLUS (D_IVB | D_HSW | D_BDW | D_SKL)
#define D_PRE_BDW (D_SNB | D_IVB | D_HSW)
#define D_PRE_SKL (D_SNB | D_IVB | D_HSW | D_BDW)
#define D_ALL (D_SNB | D_IVB | D_HSW | D_BDW | D_SKL)
struct intel_gvt_mmio_info {
u32 offset;
u32 size;
u32 length;
u32 addr_mask;
u64 ro_mask;
u32 device;
int (*read)(struct intel_vgpu *, unsigned int, void *, unsigned int);
int (*write)(struct intel_vgpu *, unsigned int, void *, unsigned int);
u32 addr_range;
struct hlist_node node;
};
unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt);
bool intel_gvt_match_device(struct intel_gvt *gvt, unsigned long device);
int intel_gvt_setup_mmio_info(struct intel_gvt *gvt);
void intel_gvt_clean_mmio_info(struct intel_gvt *gvt);
struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt,
unsigned int offset);
#define INTEL_GVT_MMIO_OFFSET(reg) ({ \
typeof(reg) __reg = reg; \
u32 *offset = (u32 *)&__reg; \
*offset; \
})
int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa);
int intel_vgpu_emulate_mmio_read(void *__vgpu, u64 pa, void *p_data,
unsigned int bytes);
int intel_vgpu_emulate_mmio_write(void *__vgpu, u64 pa, void *p_data,
unsigned int bytes);
bool intel_gvt_mmio_is_cmd_access(struct intel_gvt *gvt,
unsigned int offset);
bool intel_gvt_mmio_is_unalign(struct intel_gvt *gvt, unsigned int offset);
void intel_gvt_mmio_set_accessed(struct intel_gvt *gvt, unsigned int offset);
void intel_gvt_mmio_set_cmd_accessed(struct intel_gvt *gvt,
unsigned int offset);
bool intel_gvt_mmio_has_mode_mask(struct intel_gvt *gvt, unsigned int offset);
int intel_vgpu_default_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes);
int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes);
#endif

View File

@ -19,6 +19,15 @@
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Eddie Dong <eddie.dong@intel.com>
* Dexuan Cui
* Jike Song <jike.song@intel.com>
*
* Contributors:
* Zhi Wang <zhi.a.wang@intel.com>
*
*/
#ifndef _GVT_MPT_H_
@ -46,4 +55,215 @@ static inline int intel_gvt_hypervisor_detect_host(void)
return intel_gvt_host.mpt->detect_host();
}
/**
* intel_gvt_hypervisor_attach_vgpu - call hypervisor to initialize vGPU
* related stuffs inside hypervisor.
*
* Returns:
* Zero on success, negative error code if failed.
*/
static inline int intel_gvt_hypervisor_attach_vgpu(struct intel_vgpu *vgpu)
{
return intel_gvt_host.mpt->attach_vgpu(vgpu, &vgpu->handle);
}
/**
* intel_gvt_hypervisor_detach_vgpu - call hypervisor to release vGPU
* related stuffs inside hypervisor.
*
* Returns:
* Zero on success, negative error code if failed.
*/
static inline void intel_gvt_hypervisor_detach_vgpu(struct intel_vgpu *vgpu)
{
intel_gvt_host.mpt->detach_vgpu(vgpu->handle);
}
#define MSI_CAP_CONTROL(offset) (offset + 2)
#define MSI_CAP_ADDRESS(offset) (offset + 4)
#define MSI_CAP_DATA(offset) (offset + 8)
#define MSI_CAP_EN 0x1
/**
* intel_gvt_hypervisor_inject_msi - inject a MSI interrupt into vGPU
*
* Returns:
* Zero on success, negative error code if failed.
*/
static inline int intel_gvt_hypervisor_inject_msi(struct intel_vgpu *vgpu)
{
unsigned long offset = vgpu->gvt->device_info.msi_cap_offset;
u16 control, data;
u32 addr;
int ret;
control = *(u16 *)(vgpu_cfg_space(vgpu) + MSI_CAP_CONTROL(offset));
addr = *(u32 *)(vgpu_cfg_space(vgpu) + MSI_CAP_ADDRESS(offset));
data = *(u16 *)(vgpu_cfg_space(vgpu) + MSI_CAP_DATA(offset));
/* Do not generate MSI if MSIEN is disable */
if (!(control & MSI_CAP_EN))
return 0;
if (WARN(control & GENMASK(15, 1), "only support one MSI format\n"))
return -EINVAL;
gvt_dbg_irq("vgpu%d: inject msi address %x data%x\n", vgpu->id, addr,
data);
ret = intel_gvt_host.mpt->inject_msi(vgpu->handle, addr, data);
if (ret)
return ret;
return 0;
}
/**
* intel_gvt_hypervisor_set_wp_page - translate a host VA into MFN
* @p: host kernel virtual address
*
* Returns:
* MFN on success, INTEL_GVT_INVALID_ADDR if failed.
*/
static inline unsigned long intel_gvt_hypervisor_virt_to_mfn(void *p)
{
return intel_gvt_host.mpt->from_virt_to_mfn(p);
}
/**
* intel_gvt_hypervisor_set_wp_page - set a guest page to write-protected
* @vgpu: a vGPU
* @p: intel_vgpu_guest_page
*
* Returns:
* Zero on success, negative error code if failed.
*/
static inline int intel_gvt_hypervisor_set_wp_page(struct intel_vgpu *vgpu,
struct intel_vgpu_guest_page *p)
{
int ret;
if (p->writeprotection)
return 0;
ret = intel_gvt_host.mpt->set_wp_page(vgpu->handle, p->gfn);
if (ret)
return ret;
p->writeprotection = true;
atomic_inc(&vgpu->gtt.n_write_protected_guest_page);
return 0;
}
/**
* intel_gvt_hypervisor_unset_wp_page - remove the write-protection of a
* guest page
* @vgpu: a vGPU
* @p: intel_vgpu_guest_page
*
* Returns:
* Zero on success, negative error code if failed.
*/
static inline int intel_gvt_hypervisor_unset_wp_page(struct intel_vgpu *vgpu,
struct intel_vgpu_guest_page *p)
{
int ret;
if (!p->writeprotection)
return 0;
ret = intel_gvt_host.mpt->unset_wp_page(vgpu->handle, p->gfn);
if (ret)
return ret;
p->writeprotection = false;
atomic_dec(&vgpu->gtt.n_write_protected_guest_page);
return 0;
}
/**
* intel_gvt_hypervisor_read_gpa - copy data from GPA to host data buffer
* @vgpu: a vGPU
* @gpa: guest physical address
* @buf: host data buffer
* @len: data length
*
* Returns:
* Zero on success, negative error code if failed.
*/
static inline int intel_gvt_hypervisor_read_gpa(struct intel_vgpu *vgpu,
unsigned long gpa, void *buf, unsigned long len)
{
return intel_gvt_host.mpt->read_gpa(vgpu->handle, gpa, buf, len);
}
/**
* intel_gvt_hypervisor_write_gpa - copy data from host data buffer to GPA
* @vgpu: a vGPU
* @gpa: guest physical address
* @buf: host data buffer
* @len: data length
*
* Returns:
* Zero on success, negative error code if failed.
*/
static inline int intel_gvt_hypervisor_write_gpa(struct intel_vgpu *vgpu,
unsigned long gpa, void *buf, unsigned long len)
{
return intel_gvt_host.mpt->write_gpa(vgpu->handle, gpa, buf, len);
}
/**
* intel_gvt_hypervisor_gfn_to_mfn - translate a GFN to MFN
* @vgpu: a vGPU
* @gpfn: guest pfn
*
* Returns:
* MFN on success, INTEL_GVT_INVALID_ADDR if failed.
*/
static inline unsigned long intel_gvt_hypervisor_gfn_to_mfn(
struct intel_vgpu *vgpu, unsigned long gfn)
{
return intel_gvt_host.mpt->gfn_to_mfn(vgpu->handle, gfn);
}
enum {
GVT_MAP_APERTURE = 0,
GVT_MAP_OPREGION,
};
/**
* intel_gvt_hypervisor_map_gfn_to_mfn - map a GFN region to MFN
* @vgpu: a vGPU
* @gfn: guest PFN
* @mfn: host PFN
* @nr: amount of PFNs
* @map: map or unmap
* @type: map type
*
* Returns:
* Zero on success, negative error code if failed.
*/
static inline int intel_gvt_hypervisor_map_gfn_to_mfn(
struct intel_vgpu *vgpu, unsigned long gfn,
unsigned long mfn, unsigned int nr,
bool map, int type)
{
return intel_gvt_host.mpt->map_gfn_to_mfn(vgpu->handle, gfn, mfn, nr,
map, type);
}
/**
* intel_gvt_hypervisor_set_trap_area - Trap a guest PA region
* @vgpu: a vGPU
* @start: the beginning of the guest physical address region
* @end: the end of the guest physical address region
* @map: map or unmap
*
* Returns:
* Zero on success, negative error code if failed.
*/
static inline int intel_gvt_hypervisor_set_trap_area(
struct intel_vgpu *vgpu, u64 start, u64 end, bool map)
{
return intel_gvt_host.mpt->set_trap_area(vgpu->handle, start, end, map);
}
#endif /* _GVT_MPT_H_ */

View File

@ -0,0 +1,344 @@
/*
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/acpi.h>
#include "i915_drv.h"
#include "gvt.h"
static int init_vgpu_opregion(struct intel_vgpu *vgpu, u32 gpa)
{
void __iomem *host_va = vgpu->gvt->opregion.opregion_va;
u8 *buf;
int i;
if (WARN((vgpu_opregion(vgpu)->va),
"vgpu%d: opregion has been initialized already.\n",
vgpu->id))
return -EINVAL;
vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_ATOMIC |
GFP_DMA32 | __GFP_ZERO,
INTEL_GVT_OPREGION_PORDER);
if (!vgpu_opregion(vgpu)->va)
return -ENOMEM;
memcpy_fromio(vgpu_opregion(vgpu)->va, host_va,
INTEL_GVT_OPREGION_SIZE);
for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++)
vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i;
/* for unknown reason, the value in LID field is incorrect
* which block the windows guest, so workaround it by force
* setting it to "OPEN"
*/
buf = (u8 *)vgpu_opregion(vgpu)->va;
buf[INTEL_GVT_OPREGION_CLID] = 0x3;
return 0;
}
static int map_vgpu_opregion(struct intel_vgpu *vgpu, bool map)
{
u64 mfn;
int i, ret;
for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++) {
mfn = intel_gvt_hypervisor_virt_to_mfn(vgpu_opregion(vgpu)
+ i * PAGE_SIZE);
if (mfn == INTEL_GVT_INVALID_ADDR) {
gvt_err("fail to get MFN from VA\n");
return -EINVAL;
}
ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu,
vgpu_opregion(vgpu)->gfn[i],
mfn, 1, map, GVT_MAP_OPREGION);
if (ret) {
gvt_err("fail to map GFN to MFN, errno: %d\n", ret);
return ret;
}
}
return 0;
}
/**
* intel_vgpu_clean_opregion - clean the stuff used to emulate opregion
* @vgpu: a vGPU
*
*/
void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu)
{
int i;
gvt_dbg_core("vgpu%d: clean vgpu opregion\n", vgpu->id);
if (!vgpu_opregion(vgpu)->va)
return;
if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_KVM) {
vunmap(vgpu_opregion(vgpu)->va);
for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++) {
if (vgpu_opregion(vgpu)->pages[i]) {
put_page(vgpu_opregion(vgpu)->pages[i]);
vgpu_opregion(vgpu)->pages[i] = NULL;
}
}
} else {
map_vgpu_opregion(vgpu, false);
free_pages((unsigned long)vgpu_opregion(vgpu)->va,
INTEL_GVT_OPREGION_PORDER);
}
vgpu_opregion(vgpu)->va = NULL;
}
/**
* intel_vgpu_init_opregion - initialize the stuff used to emulate opregion
* @vgpu: a vGPU
* @gpa: guest physical address of opregion
*
* Returns:
* Zero on success, negative error code if failed.
*/
int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa)
{
int ret;
gvt_dbg_core("vgpu%d: init vgpu opregion\n", vgpu->id);
if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_XEN) {
gvt_dbg_core("emulate opregion from kernel\n");
ret = init_vgpu_opregion(vgpu, gpa);
if (ret)
return ret;
ret = map_vgpu_opregion(vgpu, true);
if (ret)
return ret;
} else {
gvt_dbg_core("emulate opregion from userspace\n");
/*
* If opregion pages are not allocated from host kenrel,
* most of the params are meaningless
*/
ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu,
0, /* not used */
0, /* not used */
2, /* not used */
1,
GVT_MAP_OPREGION);
if (ret)
return ret;
}
return 0;
}
/**
* intel_gvt_clean_opregion - clean host opergion related stuffs
* @gvt: a GVT device
*
*/
void intel_gvt_clean_opregion(struct intel_gvt *gvt)
{
iounmap(gvt->opregion.opregion_va);
gvt->opregion.opregion_va = NULL;
}
/**
* intel_gvt_init_opregion - initialize host opergion related stuffs
* @gvt: a GVT device
*
* Returns:
* Zero on success, negative error code if failed.
*/
int intel_gvt_init_opregion(struct intel_gvt *gvt)
{
gvt_dbg_core("init host opregion\n");
pci_read_config_dword(gvt->dev_priv->drm.pdev, INTEL_GVT_PCI_OPREGION,
&gvt->opregion.opregion_pa);
gvt->opregion.opregion_va = acpi_os_ioremap(gvt->opregion.opregion_pa,
INTEL_GVT_OPREGION_SIZE);
if (!gvt->opregion.opregion_va) {
gvt_err("fail to map host opregion\n");
return -EFAULT;
}
return 0;
}
#define GVT_OPREGION_FUNC(scic) \
({ \
u32 __ret; \
__ret = (scic & OPREGION_SCIC_FUNC_MASK) >> \
OPREGION_SCIC_FUNC_SHIFT; \
__ret; \
})
#define GVT_OPREGION_SUBFUNC(scic) \
({ \
u32 __ret; \
__ret = (scic & OPREGION_SCIC_SUBFUNC_MASK) >> \
OPREGION_SCIC_SUBFUNC_SHIFT; \
__ret; \
})
static const char *opregion_func_name(u32 func)
{
const char *name = NULL;
switch (func) {
case 0 ... 3:
case 5:
case 7 ... 15:
name = "Reserved";
break;
case 4:
name = "Get BIOS Data";
break;
case 6:
name = "System BIOS Callbacks";
break;
default:
name = "Unknown";
break;
}
return name;
}
static const char *opregion_subfunc_name(u32 subfunc)
{
const char *name = NULL;
switch (subfunc) {
case 0:
name = "Supported Calls";
break;
case 1:
name = "Requested Callbacks";
break;
case 2 ... 3:
case 8 ... 9:
name = "Reserved";
break;
case 5:
name = "Boot Display";
break;
case 6:
name = "TV-Standard/Video-Connector";
break;
case 7:
name = "Internal Graphics";
break;
case 10:
name = "Spread Spectrum Clocks";
break;
case 11:
name = "Get AKSV";
break;
default:
name = "Unknown";
break;
}
return name;
};
static bool querying_capabilities(u32 scic)
{
u32 func, subfunc;
func = GVT_OPREGION_FUNC(scic);
subfunc = GVT_OPREGION_SUBFUNC(scic);
if ((func == INTEL_GVT_OPREGION_SCIC_F_GETBIOSDATA &&
subfunc == INTEL_GVT_OPREGION_SCIC_SF_SUPPRTEDCALLS)
|| (func == INTEL_GVT_OPREGION_SCIC_F_GETBIOSDATA &&
subfunc == INTEL_GVT_OPREGION_SCIC_SF_REQEUSTEDCALLBACKS)
|| (func == INTEL_GVT_OPREGION_SCIC_F_GETBIOSCALLBACKS &&
subfunc == INTEL_GVT_OPREGION_SCIC_SF_SUPPRTEDCALLS)) {
return true;
}
return false;
}
/**
* intel_vgpu_emulate_opregion_request - emulating OpRegion request
* @vgpu: a vGPU
* @swsci: SWSCI request
*
* Returns:
* Zero on success, negative error code if failed
*/
int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci)
{
u32 *scic, *parm;
u32 func, subfunc;
scic = vgpu_opregion(vgpu)->va + INTEL_GVT_OPREGION_SCIC;
parm = vgpu_opregion(vgpu)->va + INTEL_GVT_OPREGION_PARM;
if (!(swsci & SWSCI_SCI_SELECT)) {
gvt_err("vgpu%d: requesting SMI service\n", vgpu->id);
return 0;
}
/* ignore non 0->1 trasitions */
if ((vgpu_cfg_space(vgpu)[INTEL_GVT_PCI_SWSCI]
& SWSCI_SCI_TRIGGER) ||
!(swsci & SWSCI_SCI_TRIGGER)) {
return 0;
}
func = GVT_OPREGION_FUNC(*scic);
subfunc = GVT_OPREGION_SUBFUNC(*scic);
if (!querying_capabilities(*scic)) {
gvt_err("vgpu%d: requesting runtime service: func \"%s\","
" subfunc \"%s\"\n",
vgpu->id,
opregion_func_name(func),
opregion_subfunc_name(subfunc));
/*
* emulate exit status of function call, '0' means
* "failure, generic, unsupported or unknown cause"
*/
*scic &= ~OPREGION_SCIC_EXIT_MASK;
return 0;
}
*scic = 0;
*parm = 0;
return 0;
}

View File

@ -0,0 +1,80 @@
/*
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _GVT_REG_H
#define _GVT_REG_H
#define INTEL_GVT_PCI_CLASS_VGA_OTHER 0x80
#define INTEL_GVT_PCI_GMCH_CONTROL 0x50
#define BDW_GMCH_GMS_SHIFT 8
#define BDW_GMCH_GMS_MASK 0xff
#define INTEL_GVT_PCI_SWSCI 0xe8
#define SWSCI_SCI_SELECT (1 << 15)
#define SWSCI_SCI_TRIGGER 1
#define INTEL_GVT_PCI_OPREGION 0xfc
#define INTEL_GVT_OPREGION_CLID 0x1AC
#define INTEL_GVT_OPREGION_SCIC 0x200
#define OPREGION_SCIC_FUNC_MASK 0x1E
#define OPREGION_SCIC_FUNC_SHIFT 1
#define OPREGION_SCIC_SUBFUNC_MASK 0xFF00
#define OPREGION_SCIC_SUBFUNC_SHIFT 8
#define OPREGION_SCIC_EXIT_MASK 0xE0
#define INTEL_GVT_OPREGION_SCIC_F_GETBIOSDATA 4
#define INTEL_GVT_OPREGION_SCIC_F_GETBIOSCALLBACKS 6
#define INTEL_GVT_OPREGION_SCIC_SF_SUPPRTEDCALLS 0
#define INTEL_GVT_OPREGION_SCIC_SF_REQEUSTEDCALLBACKS 1
#define INTEL_GVT_OPREGION_PARM 0x204
#define INTEL_GVT_OPREGION_PAGES 2
#define INTEL_GVT_OPREGION_PORDER 1
#define INTEL_GVT_OPREGION_SIZE (2 * 4096)
#define VGT_SPRSTRIDE(pipe) _PIPE(pipe, _SPRA_STRIDE, _PLANE_STRIDE_2_B)
#define _REG_VECS_EXCC 0x1A028
#define _REG_VCS2_EXCC 0x1c028
#define _REG_701C0(pipe, plane) (0x701c0 + pipe * 0x1000 + (plane - 1) * 0x100)
#define _REG_701C4(pipe, plane) (0x701c4 + pipe * 0x1000 + (plane - 1) * 0x100)
#define GFX_MODE_BIT_SET_IN_MASK(val, bit) \
((((bit) & 0xffff0000) == 0) && !!((val) & (((bit) << 16))))
#define FORCEWAKE_RENDER_GEN9_REG 0xa278
#define FORCEWAKE_ACK_RENDER_GEN9_REG 0x0D84
#define FORCEWAKE_BLITTER_GEN9_REG 0xa188
#define FORCEWAKE_ACK_BLITTER_GEN9_REG 0x130044
#define FORCEWAKE_MEDIA_GEN9_REG 0xa270
#define FORCEWAKE_ACK_MEDIA_GEN9_REG 0x0D88
#define FORCEWAKE_ACK_HSW_REG 0x130044
#define RB_HEAD_OFF_MASK ((1U << 21) - (1U << 2))
#define RB_TAIL_OFF_MASK ((1U << 21) - (1U << 3))
#define RB_TAIL_SIZE_MASK ((1U << 21) - (1U << 12))
#define _RING_CTL_BUF_SIZE(ctl) (((ctl) & RB_TAIL_SIZE_MASK) + GTT_PAGE_SIZE)
#endif

View File

@ -0,0 +1,291 @@
/*
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Eddie Dong <eddie.dong@intel.com>
* Kevin Tian <kevin.tian@intel.com>
*
* Contributors:
* Zhi Wang <zhi.a.wang@intel.com>
* Changbin Du <changbin.du@intel.com>
* Zhenyu Wang <zhenyuw@linux.intel.com>
* Tina Zhang <tina.zhang@intel.com>
* Bing Niu <bing.niu@intel.com>
*
*/
#include "i915_drv.h"
#include "gvt.h"
struct render_mmio {
int ring_id;
i915_reg_t reg;
u32 mask;
bool in_context;
u32 value;
};
static struct render_mmio gen8_render_mmio_list[] = {
{RCS, _MMIO(0x229c), 0xffff, false},
{RCS, _MMIO(0x2248), 0x0, false},
{RCS, _MMIO(0x2098), 0x0, false},
{RCS, _MMIO(0x20c0), 0xffff, true},
{RCS, _MMIO(0x24d0), 0, false},
{RCS, _MMIO(0x24d4), 0, false},
{RCS, _MMIO(0x24d8), 0, false},
{RCS, _MMIO(0x24dc), 0, false},
{RCS, _MMIO(0x7004), 0xffff, true},
{RCS, _MMIO(0x7008), 0xffff, true},
{RCS, _MMIO(0x7000), 0xffff, true},
{RCS, _MMIO(0x7010), 0xffff, true},
{RCS, _MMIO(0x7300), 0xffff, true},
{RCS, _MMIO(0x83a4), 0xffff, true},
{BCS, _MMIO(0x2229c), 0xffff, false},
{BCS, _MMIO(0x2209c), 0xffff, false},
{BCS, _MMIO(0x220c0), 0xffff, false},
{BCS, _MMIO(0x22098), 0x0, false},
{BCS, _MMIO(0x22028), 0x0, false},
};
static struct render_mmio gen9_render_mmio_list[] = {
{RCS, _MMIO(0x229c), 0xffff, false},
{RCS, _MMIO(0x2248), 0x0, false},
{RCS, _MMIO(0x2098), 0x0, false},
{RCS, _MMIO(0x20c0), 0xffff, true},
{RCS, _MMIO(0x24d0), 0, false},
{RCS, _MMIO(0x24d4), 0, false},
{RCS, _MMIO(0x24d8), 0, false},
{RCS, _MMIO(0x24dc), 0, false},
{RCS, _MMIO(0x7004), 0xffff, true},
{RCS, _MMIO(0x7008), 0xffff, true},
{RCS, _MMIO(0x7000), 0xffff, true},
{RCS, _MMIO(0x7010), 0xffff, true},
{RCS, _MMIO(0x7300), 0xffff, true},
{RCS, _MMIO(0x83a4), 0xffff, true},
{RCS, _MMIO(0x40e0), 0, false},
{RCS, _MMIO(0x40e4), 0, false},
{RCS, _MMIO(0x2580), 0xffff, true},
{RCS, _MMIO(0x7014), 0xffff, true},
{RCS, _MMIO(0x20ec), 0xffff, false},
{RCS, _MMIO(0xb118), 0, false},
{RCS, _MMIO(0xe100), 0xffff, true},
{RCS, _MMIO(0xe180), 0xffff, true},
{RCS, _MMIO(0xe184), 0xffff, true},
{RCS, _MMIO(0xe188), 0xffff, true},
{RCS, _MMIO(0xe194), 0xffff, true},
{RCS, _MMIO(0x4de0), 0, false},
{RCS, _MMIO(0x4de4), 0, false},
{RCS, _MMIO(0x4de8), 0, false},
{RCS, _MMIO(0x4dec), 0, false},
{RCS, _MMIO(0x4df0), 0, false},
{RCS, _MMIO(0x4df4), 0, false},
{BCS, _MMIO(0x2229c), 0xffff, false},
{BCS, _MMIO(0x2209c), 0xffff, false},
{BCS, _MMIO(0x220c0), 0xffff, false},
{BCS, _MMIO(0x22098), 0x0, false},
{BCS, _MMIO(0x22028), 0x0, false},
{VCS2, _MMIO(0x1c028), 0xffff, false},
{VECS, _MMIO(0x1a028), 0xffff, false},
};
static u32 gen9_render_mocs[I915_NUM_ENGINES][64];
static u32 gen9_render_mocs_L3[32];
static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
i915_reg_t reg;
u32 regs[] = {
[RCS] = 0x4260,
[VCS] = 0x4264,
[VCS2] = 0x4268,
[BCS] = 0x426c,
[VECS] = 0x4270,
};
if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
return;
if (!test_and_clear_bit(ring_id, (void *)vgpu->tlb_handle_pending))
return;
reg = _MMIO(regs[ring_id]);
I915_WRITE(reg, 0x1);
if (wait_for_atomic((I915_READ(reg) == 0), 50))
gvt_err("timeout in invalidate ring (%d) tlb\n", ring_id);
gvt_dbg_core("invalidate TLB for ring %d\n", ring_id);
}
static void load_mocs(struct intel_vgpu *vgpu, int ring_id)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
i915_reg_t offset, l3_offset;
u32 regs[] = {
[RCS] = 0xc800,
[VCS] = 0xc900,
[VCS2] = 0xca00,
[BCS] = 0xcc00,
[VECS] = 0xcb00,
};
int i;
if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
return;
if (!IS_SKYLAKE(dev_priv))
return;
for (i = 0; i < 64; i++) {
gen9_render_mocs[ring_id][i] = I915_READ(offset);
I915_WRITE(offset, vgpu_vreg(vgpu, offset));
POSTING_READ(offset);
offset.reg += 4;
}
if (ring_id == RCS) {
l3_offset.reg = 0xb020;
for (i = 0; i < 32; i++) {
gen9_render_mocs_L3[i] = I915_READ(l3_offset);
I915_WRITE(l3_offset, vgpu_vreg(vgpu, offset));
POSTING_READ(l3_offset);
l3_offset.reg += 4;
}
}
}
static void restore_mocs(struct intel_vgpu *vgpu, int ring_id)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
i915_reg_t offset, l3_offset;
u32 regs[] = {
[RCS] = 0xc800,
[VCS] = 0xc900,
[VCS2] = 0xca00,
[BCS] = 0xcc00,
[VECS] = 0xcb00,
};
int i;
if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
return;
if (!IS_SKYLAKE(dev_priv))
return;
for (i = 0; i < 64; i++) {
vgpu_vreg(vgpu, offset) = I915_READ(offset);
I915_WRITE(offset, gen9_render_mocs[ring_id][i]);
POSTING_READ(offset);
offset.reg += 4;
}
if (ring_id == RCS) {
l3_offset.reg = 0xb020;
for (i = 0; i < 32; i++) {
vgpu_vreg(vgpu, l3_offset) = I915_READ(l3_offset);
I915_WRITE(l3_offset, gen9_render_mocs_L3[i]);
POSTING_READ(l3_offset);
l3_offset.reg += 4;
}
}
}
void intel_gvt_load_render_mmio(struct intel_vgpu *vgpu, int ring_id)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct render_mmio *mmio;
u32 v;
int i, array_size;
if (IS_SKYLAKE(vgpu->gvt->dev_priv)) {
mmio = gen9_render_mmio_list;
array_size = ARRAY_SIZE(gen9_render_mmio_list);
load_mocs(vgpu, ring_id);
} else {
mmio = gen8_render_mmio_list;
array_size = ARRAY_SIZE(gen8_render_mmio_list);
}
for (i = 0; i < array_size; i++, mmio++) {
if (mmio->ring_id != ring_id)
continue;
mmio->value = I915_READ(mmio->reg);
if (mmio->mask)
v = vgpu_vreg(vgpu, mmio->reg) | (mmio->mask << 16);
else
v = vgpu_vreg(vgpu, mmio->reg);
I915_WRITE(mmio->reg, v);
POSTING_READ(mmio->reg);
gvt_dbg_render("load reg %x old %x new %x\n",
i915_mmio_reg_offset(mmio->reg),
mmio->value, v);
}
handle_tlb_pending_event(vgpu, ring_id);
}
void intel_gvt_restore_render_mmio(struct intel_vgpu *vgpu, int ring_id)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct render_mmio *mmio;
u32 v;
int i, array_size;
if (IS_SKYLAKE(dev_priv)) {
mmio = gen9_render_mmio_list;
array_size = ARRAY_SIZE(gen9_render_mmio_list);
restore_mocs(vgpu, ring_id);
} else {
mmio = gen8_render_mmio_list;
array_size = ARRAY_SIZE(gen8_render_mmio_list);
}
for (i = 0; i < array_size; i++, mmio++) {
if (mmio->ring_id != ring_id)
continue;
vgpu_vreg(vgpu, mmio->reg) = I915_READ(mmio->reg);
if (mmio->mask) {
vgpu_vreg(vgpu, mmio->reg) &= ~(mmio->mask << 16);
v = mmio->value | (mmio->mask << 16);
} else
v = mmio->value;
I915_WRITE(mmio->reg, v);
POSTING_READ(mmio->reg);
gvt_dbg_render("restore reg %x old %x new %x\n",
i915_mmio_reg_offset(mmio->reg),
mmio->value, v);
}
}

View File

@ -0,0 +1,43 @@
/*
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Eddie Dong <eddie.dong@intel.com>
* Kevin Tian <kevin.tian@intel.com>
*
* Contributors:
* Zhi Wang <zhi.a.wang@intel.com>
* Changbin Du <changbin.du@intel.com>
* Zhenyu Wang <zhenyuw@linux.intel.com>
* Tina Zhang <tina.zhang@intel.com>
* Bing Niu <bing.niu@intel.com>
*
*/
#ifndef __GVT_RENDER_H__
#define __GVT_RENDER_H__
void intel_gvt_load_render_mmio(struct intel_vgpu *vgpu, int ring_id);
void intel_gvt_restore_render_mmio(struct intel_vgpu *vgpu, int ring_id);
#endif

View File

@ -0,0 +1,294 @@
/*
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Anhua Xu
* Kevin Tian <kevin.tian@intel.com>
*
* Contributors:
* Min He <min.he@intel.com>
* Bing Niu <bing.niu@intel.com>
* Zhi Wang <zhi.a.wang@intel.com>
*
*/
#include "i915_drv.h"
#include "gvt.h"
static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu)
{
struct intel_vgpu_execlist *execlist;
enum intel_engine_id i;
struct intel_engine_cs *engine;
for_each_engine(engine, vgpu->gvt->dev_priv, i) {
execlist = &vgpu->execlist[i];
if (!list_empty(workload_q_head(vgpu, i)))
return true;
}
return false;
}
static void try_to_schedule_next_vgpu(struct intel_gvt *gvt)
{
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
enum intel_engine_id i;
struct intel_engine_cs *engine;
/* no target to schedule */
if (!scheduler->next_vgpu)
return;
gvt_dbg_sched("try to schedule next vgpu %d\n",
scheduler->next_vgpu->id);
/*
* after the flag is set, workload dispatch thread will
* stop dispatching workload for current vgpu
*/
scheduler->need_reschedule = true;
/* still have uncompleted workload? */
for_each_engine(engine, gvt->dev_priv, i) {
if (scheduler->current_workload[i]) {
gvt_dbg_sched("still have running workload\n");
return;
}
}
gvt_dbg_sched("switch to next vgpu %d\n",
scheduler->next_vgpu->id);
/* switch current vgpu */
scheduler->current_vgpu = scheduler->next_vgpu;
scheduler->next_vgpu = NULL;
scheduler->need_reschedule = false;
/* wake up workload dispatch thread */
for_each_engine(engine, gvt->dev_priv, i)
wake_up(&scheduler->waitq[i]);
}
struct tbs_vgpu_data {
struct list_head list;
struct intel_vgpu *vgpu;
/* put some per-vgpu sched stats here */
};
struct tbs_sched_data {
struct intel_gvt *gvt;
struct delayed_work work;
unsigned long period;
struct list_head runq_head;
};
#define GVT_DEFAULT_TIME_SLICE (1 * HZ / 1000)
static void tbs_sched_func(struct work_struct *work)
{
struct tbs_sched_data *sched_data = container_of(work,
struct tbs_sched_data, work.work);
struct tbs_vgpu_data *vgpu_data;
struct intel_gvt *gvt = sched_data->gvt;
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
struct intel_vgpu *vgpu = NULL;
struct list_head *pos, *head;
mutex_lock(&gvt->lock);
/* no vgpu or has already had a target */
if (list_empty(&sched_data->runq_head) || scheduler->next_vgpu)
goto out;
if (scheduler->current_vgpu) {
vgpu_data = scheduler->current_vgpu->sched_data;
head = &vgpu_data->list;
} else {
gvt_dbg_sched("no current vgpu search from q head\n");
head = &sched_data->runq_head;
}
/* search a vgpu with pending workload */
list_for_each(pos, head) {
if (pos == &sched_data->runq_head)
continue;
vgpu_data = container_of(pos, struct tbs_vgpu_data, list);
if (!vgpu_has_pending_workload(vgpu_data->vgpu))
continue;
vgpu = vgpu_data->vgpu;
break;
}
if (vgpu) {
scheduler->next_vgpu = vgpu;
gvt_dbg_sched("pick next vgpu %d\n", vgpu->id);
}
out:
if (scheduler->next_vgpu) {
gvt_dbg_sched("try to schedule next vgpu %d\n",
scheduler->next_vgpu->id);
try_to_schedule_next_vgpu(gvt);
}
/*
* still have vgpu on runq
* or last schedule haven't finished due to running workload
*/
if (!list_empty(&sched_data->runq_head) || scheduler->next_vgpu)
schedule_delayed_work(&sched_data->work, sched_data->period);
mutex_unlock(&gvt->lock);
}
static int tbs_sched_init(struct intel_gvt *gvt)
{
struct intel_gvt_workload_scheduler *scheduler =
&gvt->scheduler;
struct tbs_sched_data *data;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
INIT_LIST_HEAD(&data->runq_head);
INIT_DELAYED_WORK(&data->work, tbs_sched_func);
data->period = GVT_DEFAULT_TIME_SLICE;
data->gvt = gvt;
scheduler->sched_data = data;
return 0;
}
static void tbs_sched_clean(struct intel_gvt *gvt)
{
struct intel_gvt_workload_scheduler *scheduler =
&gvt->scheduler;
struct tbs_sched_data *data = scheduler->sched_data;
cancel_delayed_work(&data->work);
kfree(data);
scheduler->sched_data = NULL;
}
static int tbs_sched_init_vgpu(struct intel_vgpu *vgpu)
{
struct tbs_vgpu_data *data;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
data->vgpu = vgpu;
INIT_LIST_HEAD(&data->list);
vgpu->sched_data = data;
return 0;
}
static void tbs_sched_clean_vgpu(struct intel_vgpu *vgpu)
{
kfree(vgpu->sched_data);
vgpu->sched_data = NULL;
}
static void tbs_sched_start_schedule(struct intel_vgpu *vgpu)
{
struct tbs_sched_data *sched_data = vgpu->gvt->scheduler.sched_data;
struct tbs_vgpu_data *vgpu_data = vgpu->sched_data;
if (!list_empty(&vgpu_data->list))
return;
list_add_tail(&vgpu_data->list, &sched_data->runq_head);
schedule_delayed_work(&sched_data->work, sched_data->period);
}
static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu)
{
struct tbs_vgpu_data *vgpu_data = vgpu->sched_data;
list_del_init(&vgpu_data->list);
}
static struct intel_gvt_sched_policy_ops tbs_schedule_ops = {
.init = tbs_sched_init,
.clean = tbs_sched_clean,
.init_vgpu = tbs_sched_init_vgpu,
.clean_vgpu = tbs_sched_clean_vgpu,
.start_schedule = tbs_sched_start_schedule,
.stop_schedule = tbs_sched_stop_schedule,
};
int intel_gvt_init_sched_policy(struct intel_gvt *gvt)
{
gvt->scheduler.sched_ops = &tbs_schedule_ops;
return gvt->scheduler.sched_ops->init(gvt);
}
void intel_gvt_clean_sched_policy(struct intel_gvt *gvt)
{
gvt->scheduler.sched_ops->clean(gvt);
}
int intel_vgpu_init_sched_policy(struct intel_vgpu *vgpu)
{
return vgpu->gvt->scheduler.sched_ops->init_vgpu(vgpu);
}
void intel_vgpu_clean_sched_policy(struct intel_vgpu *vgpu)
{
vgpu->gvt->scheduler.sched_ops->clean_vgpu(vgpu);
}
void intel_vgpu_start_schedule(struct intel_vgpu *vgpu)
{
gvt_dbg_core("vgpu%d: start schedule\n", vgpu->id);
vgpu->gvt->scheduler.sched_ops->start_schedule(vgpu);
}
void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
{
struct intel_gvt_workload_scheduler *scheduler =
&vgpu->gvt->scheduler;
gvt_dbg_core("vgpu%d: stop schedule\n", vgpu->id);
scheduler->sched_ops->stop_schedule(vgpu);
if (scheduler->next_vgpu == vgpu)
scheduler->next_vgpu = NULL;
if (scheduler->current_vgpu == vgpu) {
/* stop workload dispatching */
scheduler->need_reschedule = true;
scheduler->current_vgpu = NULL;
}
}

View File

@ -0,0 +1,58 @@
/*
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Anhua Xu
* Kevin Tian <kevin.tian@intel.com>
*
* Contributors:
* Min He <min.he@intel.com>
* Bing Niu <bing.niu@intel.com>
* Zhi Wang <zhi.a.wang@intel.com>
*
*/
#ifndef __GVT_SCHED_POLICY__
#define __GVT_SCHED_POLICY__
struct intel_gvt_sched_policy_ops {
int (*init)(struct intel_gvt *gvt);
void (*clean)(struct intel_gvt *gvt);
int (*init_vgpu)(struct intel_vgpu *vgpu);
void (*clean_vgpu)(struct intel_vgpu *vgpu);
void (*start_schedule)(struct intel_vgpu *vgpu);
void (*stop_schedule)(struct intel_vgpu *vgpu);
};
int intel_gvt_init_sched_policy(struct intel_gvt *gvt);
void intel_gvt_clean_sched_policy(struct intel_gvt *gvt);
int intel_vgpu_init_sched_policy(struct intel_vgpu *vgpu);
void intel_vgpu_clean_sched_policy(struct intel_vgpu *vgpu);
void intel_vgpu_start_schedule(struct intel_vgpu *vgpu);
void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu);
#endif

View File

@ -0,0 +1,578 @@
/*
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Zhi Wang <zhi.a.wang@intel.com>
*
* Contributors:
* Ping Gao <ping.a.gao@intel.com>
* Tina Zhang <tina.zhang@intel.com>
* Chanbin Du <changbin.du@intel.com>
* Min He <min.he@intel.com>
* Bing Niu <bing.niu@intel.com>
* Zhenyu Wang <zhenyuw@linux.intel.com>
*
*/
#include <linux/kthread.h>
#include "i915_drv.h"
#include "gvt.h"
#define RING_CTX_OFF(x) \
offsetof(struct execlist_ring_context, x)
static void set_context_pdp_root_pointer(
struct execlist_ring_context *ring_context,
u32 pdp[8])
{
struct execlist_mmio_pair *pdp_pair = &ring_context->pdp3_UDW;
int i;
for (i = 0; i < 8; i++)
pdp_pair[i].val = pdp[7 - i];
}
static int populate_shadow_context(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_gvt *gvt = vgpu->gvt;
int ring_id = workload->ring_id;
struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
struct drm_i915_gem_object *ctx_obj =
shadow_ctx->engine[ring_id].state->obj;
struct execlist_ring_context *shadow_ring_context;
struct page *page;
void *dst;
unsigned long context_gpa, context_page_num;
int i;
gvt_dbg_sched("ring id %d workload lrca %x", ring_id,
workload->ctx_desc.lrca);
context_page_num = intel_lr_context_size(
gvt->dev_priv->engine[ring_id]);
context_page_num = context_page_num >> PAGE_SHIFT;
if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
context_page_num = 19;
i = 2;
while (i < context_page_num) {
context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
(u32)((workload->ctx_desc.lrca + i) <<
GTT_PAGE_SHIFT));
if (context_gpa == INTEL_GVT_INVALID_ADDR) {
gvt_err("Invalid guest context descriptor\n");
return -EINVAL;
}
page = i915_gem_object_get_page(ctx_obj, LRC_PPHWSP_PN + i);
dst = kmap_atomic(page);
intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst,
GTT_PAGE_SIZE);
kunmap_atomic(dst);
i++;
}
page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
shadow_ring_context = kmap_atomic(page);
#define COPY_REG(name) \
intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
+ RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
COPY_REG(ctx_ctrl);
COPY_REG(ctx_timestamp);
if (ring_id == RCS) {
COPY_REG(bb_per_ctx_ptr);
COPY_REG(rcs_indirect_ctx);
COPY_REG(rcs_indirect_ctx_offset);
}
#undef COPY_REG
set_context_pdp_root_pointer(shadow_ring_context,
workload->shadow_mm->shadow_page_table);
intel_gvt_hypervisor_read_gpa(vgpu,
workload->ring_context_gpa +
sizeof(*shadow_ring_context),
(void *)shadow_ring_context +
sizeof(*shadow_ring_context),
GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
kunmap_atomic(shadow_ring_context);
return 0;
}
static int shadow_context_status_change(struct notifier_block *nb,
unsigned long action, void *data)
{
struct intel_vgpu *vgpu = container_of(nb,
struct intel_vgpu, shadow_ctx_notifier_block);
struct drm_i915_gem_request *req =
(struct drm_i915_gem_request *)data;
struct intel_gvt_workload_scheduler *scheduler =
&vgpu->gvt->scheduler;
struct intel_vgpu_workload *workload =
scheduler->current_workload[req->engine->id];
switch (action) {
case INTEL_CONTEXT_SCHEDULE_IN:
intel_gvt_load_render_mmio(workload->vgpu,
workload->ring_id);
atomic_set(&workload->shadow_ctx_active, 1);
break;
case INTEL_CONTEXT_SCHEDULE_OUT:
intel_gvt_restore_render_mmio(workload->vgpu,
workload->ring_id);
atomic_set(&workload->shadow_ctx_active, 0);
break;
default:
WARN_ON(1);
return NOTIFY_OK;
}
wake_up(&workload->shadow_ctx_status_wq);
return NOTIFY_OK;
}
static int dispatch_workload(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_gvt *gvt = vgpu->gvt;
int ring_id = workload->ring_id;
struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
struct drm_i915_gem_request *rq;
int ret;
gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
ring_id, workload);
shadow_ctx->desc_template = workload->ctx_desc.addressing_mode <<
GEN8_CTX_ADDRESSING_MODE_SHIFT;
rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
if (IS_ERR(rq)) {
gvt_err("fail to allocate gem request\n");
workload->status = PTR_ERR(rq);
return workload->status;
}
gvt_dbg_sched("ring id %d get i915 gem request %p\n", ring_id, rq);
workload->req = i915_gem_request_get(rq);
mutex_lock(&gvt->lock);
ret = intel_gvt_scan_and_shadow_workload(workload);
if (ret)
goto err;
ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
if (ret)
goto err;
ret = populate_shadow_context(workload);
if (ret)
goto err;
if (workload->prepare) {
ret = workload->prepare(workload);
if (ret)
goto err;
}
mutex_unlock(&gvt->lock);
gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
ring_id, workload->req);
i915_add_request_no_flush(rq);
workload->dispatched = true;
return 0;
err:
workload->status = ret;
mutex_unlock(&gvt->lock);
i915_add_request_no_flush(rq);
return ret;
}
static struct intel_vgpu_workload *pick_next_workload(
struct intel_gvt *gvt, int ring_id)
{
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
struct intel_vgpu_workload *workload = NULL;
mutex_lock(&gvt->lock);
/*
* no current vgpu / will be scheduled out / no workload
* bail out
*/
if (!scheduler->current_vgpu) {
gvt_dbg_sched("ring id %d stop - no current vgpu\n", ring_id);
goto out;
}
if (scheduler->need_reschedule) {
gvt_dbg_sched("ring id %d stop - will reschedule\n", ring_id);
goto out;
}
if (list_empty(workload_q_head(scheduler->current_vgpu, ring_id))) {
gvt_dbg_sched("ring id %d stop - no available workload\n",
ring_id);
goto out;
}
/*
* still have current workload, maybe the workload disptacher
* fail to submit it for some reason, resubmit it.
*/
if (scheduler->current_workload[ring_id]) {
workload = scheduler->current_workload[ring_id];
gvt_dbg_sched("ring id %d still have current workload %p\n",
ring_id, workload);
goto out;
}
/*
* pick a workload as current workload
* once current workload is set, schedule policy routines
* will wait the current workload is finished when trying to
* schedule out a vgpu.
*/
scheduler->current_workload[ring_id] = container_of(
workload_q_head(scheduler->current_vgpu, ring_id)->next,
struct intel_vgpu_workload, list);
workload = scheduler->current_workload[ring_id];
gvt_dbg_sched("ring id %d pick new workload %p\n", ring_id, workload);
atomic_inc(&workload->vgpu->running_workload_num);
out:
mutex_unlock(&gvt->lock);
return workload;
}
static void update_guest_context(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_gvt *gvt = vgpu->gvt;
int ring_id = workload->ring_id;
struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
struct drm_i915_gem_object *ctx_obj =
shadow_ctx->engine[ring_id].state->obj;
struct execlist_ring_context *shadow_ring_context;
struct page *page;
void *src;
unsigned long context_gpa, context_page_num;
int i;
gvt_dbg_sched("ring id %d workload lrca %x\n", ring_id,
workload->ctx_desc.lrca);
context_page_num = intel_lr_context_size(
gvt->dev_priv->engine[ring_id]);
context_page_num = context_page_num >> PAGE_SHIFT;
if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
context_page_num = 19;
i = 2;
while (i < context_page_num) {
context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
(u32)((workload->ctx_desc.lrca + i) <<
GTT_PAGE_SHIFT));
if (context_gpa == INTEL_GVT_INVALID_ADDR) {
gvt_err("invalid guest context descriptor\n");
return;
}
page = i915_gem_object_get_page(ctx_obj, LRC_PPHWSP_PN + i);
src = kmap_atomic(page);
intel_gvt_hypervisor_write_gpa(vgpu, context_gpa, src,
GTT_PAGE_SIZE);
kunmap_atomic(src);
i++;
}
intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa +
RING_CTX_OFF(ring_header.val), &workload->rb_tail, 4);
page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
shadow_ring_context = kmap_atomic(page);
#define COPY_REG(name) \
intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + \
RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
COPY_REG(ctx_ctrl);
COPY_REG(ctx_timestamp);
#undef COPY_REG
intel_gvt_hypervisor_write_gpa(vgpu,
workload->ring_context_gpa +
sizeof(*shadow_ring_context),
(void *)shadow_ring_context +
sizeof(*shadow_ring_context),
GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
kunmap_atomic(shadow_ring_context);
}
static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
{
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
struct intel_vgpu_workload *workload;
int event;
mutex_lock(&gvt->lock);
workload = scheduler->current_workload[ring_id];
if (!workload->status && !workload->vgpu->resetting) {
wait_event(workload->shadow_ctx_status_wq,
!atomic_read(&workload->shadow_ctx_active));
update_guest_context(workload);
for_each_set_bit(event, workload->pending_events,
INTEL_GVT_EVENT_MAX)
intel_vgpu_trigger_virtual_event(workload->vgpu,
event);
}
gvt_dbg_sched("ring id %d complete workload %p status %d\n",
ring_id, workload, workload->status);
scheduler->current_workload[ring_id] = NULL;
atomic_dec(&workload->vgpu->running_workload_num);
list_del_init(&workload->list);
workload->complete(workload);
wake_up(&scheduler->workload_complete_wq);
mutex_unlock(&gvt->lock);
}
struct workload_thread_param {
struct intel_gvt *gvt;
int ring_id;
};
static DEFINE_MUTEX(scheduler_mutex);
static int workload_thread(void *priv)
{
struct workload_thread_param *p = (struct workload_thread_param *)priv;
struct intel_gvt *gvt = p->gvt;
int ring_id = p->ring_id;
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
struct intel_vgpu_workload *workload = NULL;
int ret;
bool need_force_wake = IS_SKYLAKE(gvt->dev_priv);
kfree(p);
gvt_dbg_core("workload thread for ring %d started\n", ring_id);
while (!kthread_should_stop()) {
ret = wait_event_interruptible(scheduler->waitq[ring_id],
kthread_should_stop() ||
(workload = pick_next_workload(gvt, ring_id)));
WARN_ON_ONCE(ret);
if (kthread_should_stop())
break;
mutex_lock(&scheduler_mutex);
gvt_dbg_sched("ring id %d next workload %p vgpu %d\n",
workload->ring_id, workload,
workload->vgpu->id);
intel_runtime_pm_get(gvt->dev_priv);
gvt_dbg_sched("ring id %d will dispatch workload %p\n",
workload->ring_id, workload);
if (need_force_wake)
intel_uncore_forcewake_get(gvt->dev_priv,
FORCEWAKE_ALL);
mutex_lock(&gvt->dev_priv->drm.struct_mutex);
ret = dispatch_workload(workload);
mutex_unlock(&gvt->dev_priv->drm.struct_mutex);
if (ret) {
gvt_err("fail to dispatch workload, skip\n");
goto complete;
}
gvt_dbg_sched("ring id %d wait workload %p\n",
workload->ring_id, workload);
workload->status = i915_wait_request(workload->req,
0, NULL, NULL);
if (workload->status != 0)
gvt_err("fail to wait workload, skip\n");
complete:
gvt_dbg_sched("will complete workload %p\n, status: %d\n",
workload, workload->status);
mutex_lock(&gvt->dev_priv->drm.struct_mutex);
complete_current_workload(gvt, ring_id);
mutex_unlock(&gvt->dev_priv->drm.struct_mutex);
i915_gem_request_put(fetch_and_zero(&workload->req));
if (need_force_wake)
intel_uncore_forcewake_put(gvt->dev_priv,
FORCEWAKE_ALL);
intel_runtime_pm_put(gvt->dev_priv);
mutex_unlock(&scheduler_mutex);
}
return 0;
}
void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu)
{
struct intel_gvt *gvt = vgpu->gvt;
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
if (atomic_read(&vgpu->running_workload_num)) {
gvt_dbg_sched("wait vgpu idle\n");
wait_event(scheduler->workload_complete_wq,
!atomic_read(&vgpu->running_workload_num));
}
}
void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt)
{
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
int i;
gvt_dbg_core("clean workload scheduler\n");
for (i = 0; i < I915_NUM_ENGINES; i++) {
if (scheduler->thread[i]) {
kthread_stop(scheduler->thread[i]);
scheduler->thread[i] = NULL;
}
}
}
int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
{
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
struct workload_thread_param *param = NULL;
int ret;
int i;
gvt_dbg_core("init workload scheduler\n");
init_waitqueue_head(&scheduler->workload_complete_wq);
for (i = 0; i < I915_NUM_ENGINES; i++) {
/* check ring mask at init time */
if (!HAS_ENGINE(gvt->dev_priv, i))
continue;
init_waitqueue_head(&scheduler->waitq[i]);
param = kzalloc(sizeof(*param), GFP_KERNEL);
if (!param) {
ret = -ENOMEM;
goto err;
}
param->gvt = gvt;
param->ring_id = i;
scheduler->thread[i] = kthread_run(workload_thread, param,
"gvt workload %d", i);
if (IS_ERR(scheduler->thread[i])) {
gvt_err("fail to create workload thread\n");
ret = PTR_ERR(scheduler->thread[i]);
goto err;
}
}
return 0;
err:
intel_gvt_clean_workload_scheduler(gvt);
kfree(param);
param = NULL;
return ret;
}
void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
atomic_notifier_chain_unregister(&vgpu->shadow_ctx->status_notifier,
&vgpu->shadow_ctx_notifier_block);
mutex_lock(&dev_priv->drm.struct_mutex);
/* a little hacky to mark as ctx closed */
vgpu->shadow_ctx->closed = true;
i915_gem_context_put(vgpu->shadow_ctx);
mutex_unlock(&dev_priv->drm.struct_mutex);
}
int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu)
{
atomic_set(&vgpu->running_workload_num, 0);
vgpu->shadow_ctx = i915_gem_context_create_gvt(
&vgpu->gvt->dev_priv->drm);
if (IS_ERR(vgpu->shadow_ctx))
return PTR_ERR(vgpu->shadow_ctx);
vgpu->shadow_ctx->engine[RCS].initialised = true;
vgpu->shadow_ctx_notifier_block.notifier_call =
shadow_context_status_change;
atomic_notifier_chain_register(&vgpu->shadow_ctx->status_notifier,
&vgpu->shadow_ctx_notifier_block);
return 0;
}

View File

@ -0,0 +1,139 @@
/*
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Zhi Wang <zhi.a.wang@intel.com>
*
* Contributors:
* Ping Gao <ping.a.gao@intel.com>
* Tina Zhang <tina.zhang@intel.com>
* Chanbin Du <changbin.du@intel.com>
* Min He <min.he@intel.com>
* Bing Niu <bing.niu@intel.com>
* Zhenyu Wang <zhenyuw@linux.intel.com>
*
*/
#ifndef _GVT_SCHEDULER_H_
#define _GVT_SCHEDULER_H_
struct intel_gvt_workload_scheduler {
struct intel_vgpu *current_vgpu;
struct intel_vgpu *next_vgpu;
struct intel_vgpu_workload *current_workload[I915_NUM_ENGINES];
bool need_reschedule;
wait_queue_head_t workload_complete_wq;
struct task_struct *thread[I915_NUM_ENGINES];
wait_queue_head_t waitq[I915_NUM_ENGINES];
void *sched_data;
struct intel_gvt_sched_policy_ops *sched_ops;
};
#define INDIRECT_CTX_ADDR_MASK 0xffffffc0
#define INDIRECT_CTX_SIZE_MASK 0x3f
struct shadow_indirect_ctx {
struct drm_i915_gem_object *obj;
unsigned long guest_gma;
unsigned long shadow_gma;
void *shadow_va;
uint32_t size;
};
#define PER_CTX_ADDR_MASK 0xfffff000
struct shadow_per_ctx {
unsigned long guest_gma;
unsigned long shadow_gma;
};
struct intel_shadow_wa_ctx {
struct intel_vgpu_workload *workload;
struct shadow_indirect_ctx indirect_ctx;
struct shadow_per_ctx per_ctx;
};
struct intel_vgpu_workload {
struct intel_vgpu *vgpu;
int ring_id;
struct drm_i915_gem_request *req;
/* if this workload has been dispatched to i915? */
bool dispatched;
int status;
struct intel_vgpu_mm *shadow_mm;
/* different submission model may need different handler */
int (*prepare)(struct intel_vgpu_workload *);
int (*complete)(struct intel_vgpu_workload *);
struct list_head list;
DECLARE_BITMAP(pending_events, INTEL_GVT_EVENT_MAX);
void *shadow_ring_buffer_va;
/* execlist context information */
struct execlist_ctx_descriptor_format ctx_desc;
struct execlist_ring_context *ring_context;
unsigned long rb_head, rb_tail, rb_ctl, rb_start, rb_len;
bool restore_inhibit;
struct intel_vgpu_elsp_dwords elsp_dwords;
bool emulate_schedule_in;
atomic_t shadow_ctx_active;
wait_queue_head_t shadow_ctx_status_wq;
u64 ring_context_gpa;
/* shadow batch buffer */
struct list_head shadow_bb;
struct intel_shadow_wa_ctx wa_ctx;
};
/* Intel shadow batch buffer is a i915 gem object */
struct intel_shadow_bb_entry {
struct list_head list;
struct drm_i915_gem_object *obj;
void *va;
unsigned long len;
void *bb_start_cmd_va;
};
#define workload_q_head(vgpu, ring_id) \
(&(vgpu->workload_q_head[ring_id]))
#define queue_workload(workload) do { \
list_add_tail(&workload->list, \
workload_q_head(workload->vgpu, workload->ring_id)); \
wake_up(&workload->vgpu->gvt-> \
scheduler.waitq[workload->ring_id]); \
} while (0)
int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt);
void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt);
void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu);
int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu);
void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu);
#endif

View File

@ -0,0 +1,286 @@
/*
* Copyright © 2011-2016 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Jike Song <jike.song@intel.com>
*
* Contributors:
* Zhi Wang <zhi.a.wang@intel.com>
*
*/
#if !defined(_GVT_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
#define _GVT_TRACE_H_
#include <linux/types.h>
#include <linux/stringify.h>
#include <linux/tracepoint.h>
#include <asm/tsc.h>
#undef TRACE_SYSTEM
#define TRACE_SYSTEM gvt
TRACE_EVENT(spt_alloc,
TP_PROTO(int id, void *spt, int type, unsigned long mfn,
unsigned long gpt_gfn),
TP_ARGS(id, spt, type, mfn, gpt_gfn),
TP_STRUCT__entry(
__field(int, id)
__field(void *, spt)
__field(int, type)
__field(unsigned long, mfn)
__field(unsigned long, gpt_gfn)
),
TP_fast_assign(
__entry->id = id;
__entry->spt = spt;
__entry->type = type;
__entry->mfn = mfn;
__entry->gpt_gfn = gpt_gfn;
),
TP_printk("VM%d [alloc] spt %p type %d mfn 0x%lx gfn 0x%lx\n",
__entry->id,
__entry->spt,
__entry->type,
__entry->mfn,
__entry->gpt_gfn)
);
TRACE_EVENT(spt_free,
TP_PROTO(int id, void *spt, int type),
TP_ARGS(id, spt, type),
TP_STRUCT__entry(
__field(int, id)
__field(void *, spt)
__field(int, type)
),
TP_fast_assign(
__entry->id = id;
__entry->spt = spt;
__entry->type = type;
),
TP_printk("VM%u [free] spt %p type %d\n",
__entry->id,
__entry->spt,
__entry->type)
);
#define MAX_BUF_LEN 256
TRACE_EVENT(gma_index,
TP_PROTO(const char *prefix, unsigned long gma,
unsigned long index),
TP_ARGS(prefix, gma, index),
TP_STRUCT__entry(
__array(char, buf, MAX_BUF_LEN)
),
TP_fast_assign(
snprintf(__entry->buf, MAX_BUF_LEN,
"%s gma 0x%lx index 0x%lx\n", prefix, gma, index);
),
TP_printk("%s", __entry->buf)
);
TRACE_EVENT(gma_translate,
TP_PROTO(int id, char *type, int ring_id, int pt_level,
unsigned long gma, unsigned long gpa),
TP_ARGS(id, type, ring_id, pt_level, gma, gpa),
TP_STRUCT__entry(
__array(char, buf, MAX_BUF_LEN)
),
TP_fast_assign(
snprintf(__entry->buf, MAX_BUF_LEN,
"VM%d %s ring %d pt_level %d gma 0x%lx -> gpa 0x%lx\n",
id, type, ring_id, pt_level, gma, gpa);
),
TP_printk("%s", __entry->buf)
);
TRACE_EVENT(spt_refcount,
TP_PROTO(int id, char *action, void *spt, int before, int after),
TP_ARGS(id, action, spt, before, after),
TP_STRUCT__entry(
__array(char, buf, MAX_BUF_LEN)
),
TP_fast_assign(
snprintf(__entry->buf, MAX_BUF_LEN,
"VM%d [%s] spt %p before %d -> after %d\n",
id, action, spt, before, after);
),
TP_printk("%s", __entry->buf)
);
TRACE_EVENT(spt_change,
TP_PROTO(int id, char *action, void *spt, unsigned long gfn,
int type),
TP_ARGS(id, action, spt, gfn, type),
TP_STRUCT__entry(
__array(char, buf, MAX_BUF_LEN)
),
TP_fast_assign(
snprintf(__entry->buf, MAX_BUF_LEN,
"VM%d [%s] spt %p gfn 0x%lx type %d\n",
id, action, spt, gfn, type);
),
TP_printk("%s", __entry->buf)
);
TRACE_EVENT(gpt_change,
TP_PROTO(int id, const char *tag, void *spt, int type, u64 v,
unsigned long index),
TP_ARGS(id, tag, spt, type, v, index),
TP_STRUCT__entry(
__array(char, buf, MAX_BUF_LEN)
),
TP_fast_assign(
snprintf(__entry->buf, MAX_BUF_LEN,
"VM%d [%s] spt %p type %d entry 0x%llx index 0x%lx\n",
id, tag, spt, type, v, index);
),
TP_printk("%s", __entry->buf)
);
TRACE_EVENT(oos_change,
TP_PROTO(int id, const char *tag, int page_id, void *gpt, int type),
TP_ARGS(id, tag, page_id, gpt, type),
TP_STRUCT__entry(
__array(char, buf, MAX_BUF_LEN)
),
TP_fast_assign(
snprintf(__entry->buf, MAX_BUF_LEN,
"VM%d [oos %s] page id %d gpt %p type %d\n",
id, tag, page_id, gpt, type);
),
TP_printk("%s", __entry->buf)
);
TRACE_EVENT(oos_sync,
TP_PROTO(int id, int page_id, void *gpt, int type, u64 v,
unsigned long index),
TP_ARGS(id, page_id, gpt, type, v, index),
TP_STRUCT__entry(
__array(char, buf, MAX_BUF_LEN)
),
TP_fast_assign(
snprintf(__entry->buf, MAX_BUF_LEN,
"VM%d [oos sync] page id %d gpt %p type %d entry 0x%llx index 0x%lx\n",
id, page_id, gpt, type, v, index);
),
TP_printk("%s", __entry->buf)
);
#define MAX_CMD_STR_LEN 256
TRACE_EVENT(gvt_command,
TP_PROTO(u8 vm_id, u8 ring_id, u32 ip_gma, u32 *cmd_va, u32 cmd_len, bool ring_buffer_cmd, cycles_t cost_pre_cmd_handler, cycles_t cost_cmd_handler),
TP_ARGS(vm_id, ring_id, ip_gma, cmd_va, cmd_len, ring_buffer_cmd, cost_pre_cmd_handler, cost_cmd_handler),
TP_STRUCT__entry(
__field(u8, vm_id)
__field(u8, ring_id)
__field(int, i)
__array(char, tmp_buf, MAX_CMD_STR_LEN)
__array(char, cmd_str, MAX_CMD_STR_LEN)
),
TP_fast_assign(
__entry->vm_id = vm_id;
__entry->ring_id = ring_id;
__entry->cmd_str[0] = '\0';
snprintf(__entry->tmp_buf, MAX_CMD_STR_LEN, "VM(%d) Ring(%d): %s ip(%08x) pre handler cost (%llu), handler cost (%llu) ", vm_id, ring_id, ring_buffer_cmd ? "RB":"BB", ip_gma, cost_pre_cmd_handler, cost_cmd_handler);
strcat(__entry->cmd_str, __entry->tmp_buf);
entry->i = 0;
while (cmd_len > 0) {
if (cmd_len >= 8) {
snprintf(__entry->tmp_buf, MAX_CMD_STR_LEN, "%08x %08x %08x %08x %08x %08x %08x %08x ",
cmd_va[__entry->i], cmd_va[__entry->i+1], cmd_va[__entry->i+2], cmd_va[__entry->i+3],
cmd_va[__entry->i+4], cmd_va[__entry->i+5], cmd_va[__entry->i+6], cmd_va[__entry->i+7]);
__entry->i += 8;
cmd_len -= 8;
strcat(__entry->cmd_str, __entry->tmp_buf);
} else if (cmd_len >= 4) {
snprintf(__entry->tmp_buf, MAX_CMD_STR_LEN, "%08x %08x %08x %08x ",
cmd_va[__entry->i], cmd_va[__entry->i+1], cmd_va[__entry->i+2], cmd_va[__entry->i+3]);
__entry->i += 4;
cmd_len -= 4;
strcat(__entry->cmd_str, __entry->tmp_buf);
} else if (cmd_len >= 2) {
snprintf(__entry->tmp_buf, MAX_CMD_STR_LEN, "%08x %08x ", cmd_va[__entry->i], cmd_va[__entry->i+1]);
__entry->i += 2;
cmd_len -= 2;
strcat(__entry->cmd_str, __entry->tmp_buf);
} else if (cmd_len == 1) {
snprintf(__entry->tmp_buf, MAX_CMD_STR_LEN, "%08x ", cmd_va[__entry->i]);
__entry->i += 1;
cmd_len -= 1;
strcat(__entry->cmd_str, __entry->tmp_buf);
}
}
strcat(__entry->cmd_str, "\n");
),
TP_printk("%s", __entry->cmd_str)
);
#endif /* _GVT_TRACE_H_ */
/* This part must be out of protection */
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH .
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_FILE trace
#include <trace/define_trace.h>

View File

@ -0,0 +1,36 @@
/*
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Jike Song <jike.song@intel.com>
*
* Contributors:
* Zhi Wang <zhi.a.wang@intel.com>
*
*/
#include "trace.h"
#ifndef __CHECKER__
#define CREATE_TRACE_POINTS
#include "trace.h"
#endif

View File

@ -0,0 +1,274 @@
/*
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Eddie Dong <eddie.dong@intel.com>
* Kevin Tian <kevin.tian@intel.com>
*
* Contributors:
* Ping Gao <ping.a.gao@intel.com>
* Zhi Wang <zhi.a.wang@intel.com>
* Bing Niu <bing.niu@intel.com>
*
*/
#include "i915_drv.h"
#include "gvt.h"
#include "i915_pvinfo.h"
static void clean_vgpu_mmio(struct intel_vgpu *vgpu)
{
vfree(vgpu->mmio.vreg);
vgpu->mmio.vreg = vgpu->mmio.sreg = NULL;
}
static int setup_vgpu_mmio(struct intel_vgpu *vgpu)
{
struct intel_gvt *gvt = vgpu->gvt;
const struct intel_gvt_device_info *info = &gvt->device_info;
vgpu->mmio.vreg = vzalloc(info->mmio_size * 2);
if (!vgpu->mmio.vreg)
return -ENOMEM;
vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size;
memcpy(vgpu->mmio.vreg, gvt->firmware.mmio, info->mmio_size);
memcpy(vgpu->mmio.sreg, gvt->firmware.mmio, info->mmio_size);
vgpu_vreg(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0;
/* set the bit 0:2(Core C-State ) to C0 */
vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0;
return 0;
}
static void setup_vgpu_cfg_space(struct intel_vgpu *vgpu,
struct intel_vgpu_creation_params *param)
{
struct intel_gvt *gvt = vgpu->gvt;
const struct intel_gvt_device_info *info = &gvt->device_info;
u16 *gmch_ctl;
int i;
memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space,
info->cfg_space_size);
if (!param->primary) {
vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] =
INTEL_GVT_PCI_CLASS_VGA_OTHER;
vgpu_cfg_space(vgpu)[PCI_CLASS_PROG] =
INTEL_GVT_PCI_CLASS_VGA_OTHER;
}
/* Show guest that there isn't any stolen memory.*/
gmch_ctl = (u16 *)(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_GMCH_CONTROL);
*gmch_ctl &= ~(BDW_GMCH_GMS_MASK << BDW_GMCH_GMS_SHIFT);
intel_vgpu_write_pci_bar(vgpu, PCI_BASE_ADDRESS_2,
gvt_aperture_pa_base(gvt), true);
vgpu_cfg_space(vgpu)[PCI_COMMAND] &= ~(PCI_COMMAND_IO
| PCI_COMMAND_MEMORY
| PCI_COMMAND_MASTER);
/*
* Clear the bar upper 32bit and let guest to assign the new value
*/
memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_1, 0, 4);
memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_3, 0, 4);
for (i = 0; i < INTEL_GVT_MAX_BAR_NUM; i++) {
vgpu->cfg_space.bar[i].size = pci_resource_len(
gvt->dev_priv->drm.pdev, i * 2);
vgpu->cfg_space.bar[i].tracked = false;
}
}
static void populate_pvinfo_page(struct intel_vgpu *vgpu)
{
/* setup the ballooning information */
vgpu_vreg64(vgpu, vgtif_reg(magic)) = VGT_MAGIC;
vgpu_vreg(vgpu, vgtif_reg(version_major)) = 1;
vgpu_vreg(vgpu, vgtif_reg(version_minor)) = 0;
vgpu_vreg(vgpu, vgtif_reg(display_ready)) = 0;
vgpu_vreg(vgpu, vgtif_reg(vgt_id)) = vgpu->id;
vgpu_vreg(vgpu, vgtif_reg(avail_rs.mappable_gmadr.base)) =
vgpu_aperture_gmadr_base(vgpu);
vgpu_vreg(vgpu, vgtif_reg(avail_rs.mappable_gmadr.size)) =
vgpu_aperture_sz(vgpu);
vgpu_vreg(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.base)) =
vgpu_hidden_gmadr_base(vgpu);
vgpu_vreg(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.size)) =
vgpu_hidden_sz(vgpu);
vgpu_vreg(vgpu, vgtif_reg(avail_rs.fence_num)) = vgpu_fence_sz(vgpu);
gvt_dbg_core("Populate PVINFO PAGE for vGPU %d\n", vgpu->id);
gvt_dbg_core("aperture base [GMADR] 0x%llx size 0x%llx\n",
vgpu_aperture_gmadr_base(vgpu), vgpu_aperture_sz(vgpu));
gvt_dbg_core("hidden base [GMADR] 0x%llx size=0x%llx\n",
vgpu_hidden_gmadr_base(vgpu), vgpu_hidden_sz(vgpu));
gvt_dbg_core("fence size %d\n", vgpu_fence_sz(vgpu));
WARN_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
}
/**
* intel_gvt_destroy_vgpu - destroy a virtual GPU
* @vgpu: virtual GPU
*
* This function is called when user wants to destroy a virtual GPU.
*
*/
void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
{
struct intel_gvt *gvt = vgpu->gvt;
mutex_lock(&gvt->lock);
vgpu->active = false;
idr_remove(&gvt->vgpu_idr, vgpu->id);
if (atomic_read(&vgpu->running_workload_num)) {
mutex_unlock(&gvt->lock);
intel_gvt_wait_vgpu_idle(vgpu);
mutex_lock(&gvt->lock);
}
intel_vgpu_stop_schedule(vgpu);
intel_vgpu_clean_sched_policy(vgpu);
intel_vgpu_clean_gvt_context(vgpu);
intel_vgpu_clean_execlist(vgpu);
intel_vgpu_clean_display(vgpu);
intel_vgpu_clean_opregion(vgpu);
intel_vgpu_clean_gtt(vgpu);
intel_gvt_hypervisor_detach_vgpu(vgpu);
intel_vgpu_free_resource(vgpu);
clean_vgpu_mmio(vgpu);
vfree(vgpu);
mutex_unlock(&gvt->lock);
}
/**
* intel_gvt_create_vgpu - create a virtual GPU
* @gvt: GVT device
* @param: vGPU creation parameters
*
* This function is called when user wants to create a virtual GPU.
*
* Returns:
* pointer to intel_vgpu, error pointer if failed.
*/
struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
struct intel_vgpu_creation_params *param)
{
struct intel_vgpu *vgpu;
int ret;
gvt_dbg_core("handle %llu low %llu MB high %llu MB fence %llu\n",
param->handle, param->low_gm_sz, param->high_gm_sz,
param->fence_sz);
vgpu = vzalloc(sizeof(*vgpu));
if (!vgpu)
return ERR_PTR(-ENOMEM);
mutex_lock(&gvt->lock);
ret = idr_alloc(&gvt->vgpu_idr, vgpu, 1, GVT_MAX_VGPU, GFP_KERNEL);
if (ret < 0)
goto out_free_vgpu;
vgpu->id = ret;
vgpu->handle = param->handle;
vgpu->gvt = gvt;
bitmap_zero(vgpu->tlb_handle_pending, I915_NUM_ENGINES);
setup_vgpu_cfg_space(vgpu, param);
ret = setup_vgpu_mmio(vgpu);
if (ret)
goto out_free_vgpu;
ret = intel_vgpu_alloc_resource(vgpu, param);
if (ret)
goto out_clean_vgpu_mmio;
populate_pvinfo_page(vgpu);
ret = intel_gvt_hypervisor_attach_vgpu(vgpu);
if (ret)
goto out_clean_vgpu_resource;
ret = intel_vgpu_init_gtt(vgpu);
if (ret)
goto out_detach_hypervisor_vgpu;
if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_KVM) {
ret = intel_vgpu_init_opregion(vgpu, 0);
if (ret)
goto out_clean_gtt;
}
ret = intel_vgpu_init_display(vgpu);
if (ret)
goto out_clean_opregion;
ret = intel_vgpu_init_execlist(vgpu);
if (ret)
goto out_clean_display;
ret = intel_vgpu_init_gvt_context(vgpu);
if (ret)
goto out_clean_execlist;
ret = intel_vgpu_init_sched_policy(vgpu);
if (ret)
goto out_clean_shadow_ctx;
vgpu->active = true;
mutex_unlock(&gvt->lock);
return vgpu;
out_clean_shadow_ctx:
intel_vgpu_clean_gvt_context(vgpu);
out_clean_execlist:
intel_vgpu_clean_execlist(vgpu);
out_clean_display:
intel_vgpu_clean_display(vgpu);
out_clean_opregion:
intel_vgpu_clean_opregion(vgpu);
out_clean_gtt:
intel_vgpu_clean_gtt(vgpu);
out_detach_hypervisor_vgpu:
intel_gvt_hypervisor_detach_vgpu(vgpu);
out_clean_vgpu_resource:
intel_vgpu_free_resource(vgpu);
out_clean_vgpu_mmio:
clean_vgpu_mmio(vgpu);
out_free_vgpu:
vfree(vgpu);
mutex_unlock(&gvt->lock);
return ERR_PTR(ret);
}

View File

@ -1308,10 +1308,11 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
bool active = false;
/* If the command parser is not enabled, report 0 - unsupported */
for_each_engine(engine, dev_priv) {
for_each_engine(engine, dev_priv, id) {
if (intel_engine_needs_cmd_parser(engine)) {
active = true;
break;

View File

@ -79,10 +79,8 @@ static int i915_capabilities(struct seq_file *m, void *data)
seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
#define PRINT_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x))
#define SEP_SEMICOLON ;
DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_SEMICOLON);
DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG);
#undef PRINT_FLAG
#undef SEP_SEMICOLON
return 0;
}
@ -109,7 +107,7 @@ static char get_tiling_flag(struct drm_i915_gem_object *obj)
static char get_global_flag(struct drm_i915_gem_object *obj)
{
return i915_gem_object_to_ggtt(obj, NULL) ? 'g' : ' ';
return obj->fault_mappable ? 'g' : ' ';
}
static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
@ -152,7 +150,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
obj->base.size / 1024,
obj->base.read_domains,
obj->base.write_domain);
for_each_engine_id(engine, dev_priv, id)
for_each_engine(engine, dev_priv, id)
seq_printf(m, "%x ",
i915_gem_active_get_seqno(&obj->last_read[id],
&obj->base.dev->struct_mutex));
@ -188,15 +186,6 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
}
if (obj->stolen)
seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
if (obj->pin_display || obj->fault_mappable) {
char s[3], *t = s;
if (obj->pin_display)
*t++ = 'p';
if (obj->fault_mappable)
*t++ = 'f';
*t = '\0';
seq_printf(m, " (%s mappable)", s);
}
engine = i915_gem_active_get_engine(&obj->last_write,
&dev_priv->drm.struct_mutex);
@ -334,11 +323,12 @@ static void print_batch_pool_stats(struct seq_file *m,
struct drm_i915_gem_object *obj;
struct file_stats stats;
struct intel_engine_cs *engine;
enum intel_engine_id id;
int j;
memset(&stats, 0, sizeof(stats));
for_each_engine(engine, dev_priv) {
for_each_engine(engine, dev_priv, id) {
for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
list_for_each_entry(obj,
&engine->batch_pool.cache_list[j],
@ -402,7 +392,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
if (ret)
return ret;
seq_printf(m, "%u objects, %zu bytes\n",
seq_printf(m, "%u objects, %llu bytes\n",
dev_priv->mm.object_count,
dev_priv->mm.object_memory);
@ -607,6 +597,7 @@ static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
struct drm_device *dev = &dev_priv->drm;
struct drm_i915_gem_object *obj;
struct intel_engine_cs *engine;
enum intel_engine_id id;
int total = 0;
int ret, j;
@ -614,7 +605,7 @@ static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
if (ret)
return ret;
for_each_engine(engine, dev_priv) {
for_each_engine(engine, dev_priv, id) {
for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
int count;
@ -645,12 +636,30 @@ static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
return 0;
}
static void print_request(struct seq_file *m,
struct drm_i915_gem_request *rq,
const char *prefix)
{
struct pid *pid = rq->ctx->pid;
struct task_struct *task;
rcu_read_lock();
task = pid ? pid_task(pid, PIDTYPE_PID) : NULL;
seq_printf(m, "%s%x [%x:%x] @ %d: %s [%d]\n", prefix,
rq->fence.seqno, rq->ctx->hw_id, rq->fence.seqno,
jiffies_to_msecs(jiffies - rq->emitted_jiffies),
task ? task->comm : "<unknown>",
task ? task->pid : -1);
rcu_read_unlock();
}
static int i915_gem_request_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct drm_device *dev = &dev_priv->drm;
struct intel_engine_cs *engine;
struct drm_i915_gem_request *req;
struct intel_engine_cs *engine;
enum intel_engine_id id;
int ret, any;
ret = mutex_lock_interruptible(&dev->struct_mutex);
@ -658,7 +667,7 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
return ret;
any = 0;
for_each_engine(engine, dev_priv) {
for_each_engine(engine, dev_priv, id) {
int count;
count = 0;
@ -668,19 +677,8 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
continue;
seq_printf(m, "%s requests: %d\n", engine->name, count);
list_for_each_entry(req, &engine->request_list, link) {
struct pid *pid = req->ctx->pid;
struct task_struct *task;
rcu_read_lock();
task = pid ? pid_task(pid, PIDTYPE_PID) : NULL;
seq_printf(m, " %x @ %d: %s [%d]\n",
req->fence.seqno,
(int) (jiffies - req->emitted_jiffies),
task ? task->comm : "<unknown>",
task ? task->pid : -1);
rcu_read_unlock();
}
list_for_each_entry(req, &engine->request_list, link)
print_request(m, req, " ");
any++;
}
@ -715,8 +713,9 @@ static int i915_gem_seqno_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct intel_engine_cs *engine;
enum intel_engine_id id;
for_each_engine(engine, dev_priv)
for_each_engine(engine, dev_priv, id)
i915_ring_seqno_info(m, engine);
return 0;
@ -727,6 +726,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct intel_engine_cs *engine;
enum intel_engine_id id;
int i, pipe;
intel_runtime_pm_get(dev_priv);
@ -895,7 +895,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
seq_printf(m, "Graphics Interrupt mask: %08x\n",
I915_READ(GTIMR));
}
for_each_engine(engine, dev_priv) {
for_each_engine(engine, dev_priv, id) {
if (INTEL_GEN(dev_priv) >= 6) {
seq_printf(m,
"Graphics Interrupt mask (%s): %08x\n",
@ -943,7 +943,7 @@ static int i915_hws_info(struct seq_file *m, void *data)
const u32 *hws;
int i;
engine = &dev_priv->engine[(uintptr_t)node->info_ent->data];
engine = dev_priv->engine[(uintptr_t)node->info_ent->data];
hws = engine->status_page.page_addr;
if (hws == NULL)
return 0;
@ -956,6 +956,8 @@ static int i915_hws_info(struct seq_file *m, void *data)
return 0;
}
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
static ssize_t
i915_error_state_write(struct file *filp,
const char __user *ubuf,
@ -1038,6 +1040,8 @@ static const struct file_operations i915_error_state_fops = {
.release = i915_error_state_release,
};
#endif
static int
i915_next_seqno_get(void *data, u64 *val)
{
@ -1277,15 +1281,42 @@ out:
return ret;
}
static void i915_instdone_info(struct drm_i915_private *dev_priv,
struct seq_file *m,
struct intel_instdone *instdone)
{
int slice;
int subslice;
seq_printf(m, "\t\tINSTDONE: 0x%08x\n",
instdone->instdone);
if (INTEL_GEN(dev_priv) <= 3)
return;
seq_printf(m, "\t\tSC_INSTDONE: 0x%08x\n",
instdone->slice_common);
if (INTEL_GEN(dev_priv) <= 6)
return;
for_each_instdone_slice_subslice(dev_priv, slice, subslice)
seq_printf(m, "\t\tSAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
slice, subslice, instdone->sampler[slice][subslice]);
for_each_instdone_slice_subslice(dev_priv, slice, subslice)
seq_printf(m, "\t\tROW_INSTDONE[%d][%d]: 0x%08x\n",
slice, subslice, instdone->row[slice][subslice]);
}
static int i915_hangcheck_info(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct intel_engine_cs *engine;
u64 acthd[I915_NUM_ENGINES];
u32 seqno[I915_NUM_ENGINES];
u32 instdone[I915_NUM_INSTDONE_REG];
struct intel_instdone instdone;
enum intel_engine_id id;
int j;
if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
seq_printf(m, "Wedged\n");
@ -1303,12 +1334,12 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
intel_runtime_pm_get(dev_priv);
for_each_engine_id(engine, dev_priv, id) {
for_each_engine(engine, dev_priv, id) {
acthd[id] = intel_engine_get_active_head(engine);
seqno[id] = intel_engine_get_seqno(engine);
}
i915_get_extra_instdone(dev_priv, instdone);
intel_engine_get_instdone(dev_priv->engine[RCS], &instdone);
intel_runtime_pm_put(dev_priv);
@ -1319,7 +1350,10 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
} else
seq_printf(m, "Hangcheck inactive\n");
for_each_engine_id(engine, dev_priv, id) {
for_each_engine(engine, dev_priv, id) {
struct intel_breadcrumbs *b = &engine->breadcrumbs;
struct rb_node *rb;
seq_printf(m, "%s:\n", engine->name);
seq_printf(m, "\tseqno = %x [current %x, last %x]\n",
engine->hangcheck.seqno,
@ -1329,6 +1363,15 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
yesno(intel_engine_has_waiter(engine)),
yesno(test_bit(engine->id,
&dev_priv->gpu_error.missed_irq_rings)));
spin_lock(&b->lock);
for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
struct intel_wait *w = container_of(rb, typeof(*w), node);
seq_printf(m, "\t%s [%d] waiting for %x\n",
w->tsk->comm, w->tsk->pid, w->seqno);
}
spin_unlock(&b->lock);
seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
(long long)engine->hangcheck.acthd,
(long long)acthd[id]);
@ -1336,18 +1379,14 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
seq_printf(m, "\taction = %d\n", engine->hangcheck.action);
if (engine->id == RCS) {
seq_puts(m, "\tinstdone read =");
seq_puts(m, "\tinstdone read =\n");
for (j = 0; j < I915_NUM_INSTDONE_REG; j++)
seq_printf(m, " 0x%08x", instdone[j]);
i915_instdone_info(dev_priv, m, &instdone);
seq_puts(m, "\n\tinstdone accu =");
seq_puts(m, "\tinstdone accu =\n");
for (j = 0; j < I915_NUM_INSTDONE_REG; j++)
seq_printf(m, " 0x%08x",
engine->hangcheck.instdone[j]);
seq_puts(m, "\n");
i915_instdone_info(dev_priv, m,
&engine->hangcheck.instdone);
}
}
@ -1635,7 +1674,8 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
seq_printf(m, "FBC disabled: %s\n",
dev_priv->fbc.no_fbc_reason);
if (INTEL_GEN(dev_priv) >= 7)
if (intel_fbc_is_active(dev_priv) &&
INTEL_GEN(dev_priv) >= 7)
seq_printf(m, "Compressing: %s\n",
yesno(I915_READ(FBC_STATUS2) &
FBC_COMPRESSION_MASK));
@ -1909,6 +1949,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
struct drm_device *dev = &dev_priv->drm;
struct intel_engine_cs *engine;
struct i915_gem_context *ctx;
enum intel_engine_id id;
int ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
@ -1935,7 +1976,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
seq_putc(m, ctx->remap_slice ? 'R' : 'r');
seq_putc(m, '\n');
for_each_engine(engine, dev_priv) {
for_each_engine(engine, dev_priv, id) {
struct intel_context *ce = &ctx->engine[engine->id];
seq_printf(m, "%s: ", engine->name);
@ -2002,6 +2043,7 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)
struct drm_device *dev = &dev_priv->drm;
struct intel_engine_cs *engine;
struct i915_gem_context *ctx;
enum intel_engine_id id;
int ret;
if (!i915.enable_execlists) {
@ -2014,7 +2056,7 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)
return ret;
list_for_each_entry(ctx, &dev_priv->context_list, link)
for_each_engine(engine, dev_priv)
for_each_engine(engine, dev_priv, id)
i915_dump_lrc_obj(m, ctx, engine);
mutex_unlock(&dev->struct_mutex);
@ -2022,84 +2064,6 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)
return 0;
}
static int i915_execlists(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct drm_device *dev = &dev_priv->drm;
struct intel_engine_cs *engine;
u32 status_pointer;
u8 read_pointer;
u8 write_pointer;
u32 status;
u32 ctx_id;
struct list_head *cursor;
int i, ret;
if (!i915.enable_execlists) {
seq_puts(m, "Logical Ring Contexts are disabled\n");
return 0;
}
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
intel_runtime_pm_get(dev_priv);
for_each_engine(engine, dev_priv) {
struct drm_i915_gem_request *head_req = NULL;
int count = 0;
seq_printf(m, "%s\n", engine->name);
status = I915_READ(RING_EXECLIST_STATUS_LO(engine));
ctx_id = I915_READ(RING_EXECLIST_STATUS_HI(engine));
seq_printf(m, "\tExeclist status: 0x%08X, context: %u\n",
status, ctx_id);
status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(engine));
seq_printf(m, "\tStatus pointer: 0x%08X\n", status_pointer);
read_pointer = GEN8_CSB_READ_PTR(status_pointer);
write_pointer = GEN8_CSB_WRITE_PTR(status_pointer);
if (read_pointer > write_pointer)
write_pointer += GEN8_CSB_ENTRIES;
seq_printf(m, "\tRead pointer: 0x%08X, write pointer 0x%08X\n",
read_pointer, write_pointer);
for (i = 0; i < GEN8_CSB_ENTRIES; i++) {
status = I915_READ(RING_CONTEXT_STATUS_BUF_LO(engine, i));
ctx_id = I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine, i));
seq_printf(m, "\tStatus buffer %d: 0x%08X, context: %u\n",
i, status, ctx_id);
}
spin_lock_bh(&engine->execlist_lock);
list_for_each(cursor, &engine->execlist_queue)
count++;
head_req = list_first_entry_or_null(&engine->execlist_queue,
struct drm_i915_gem_request,
execlist_link);
spin_unlock_bh(&engine->execlist_lock);
seq_printf(m, "\t%d requests in queue\n", count);
if (head_req) {
seq_printf(m, "\tHead request context: %u\n",
head_req->ctx->hw_id);
seq_printf(m, "\tHead request tail: %u\n",
head_req->tail);
}
seq_putc(m, '\n');
}
intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
return 0;
}
static const char *swizzle_string(unsigned swizzle)
{
switch (swizzle) {
@ -2201,14 +2165,15 @@ static int per_file_ctx(int id, void *ptr, void *data)
static void gen8_ppgtt_info(struct seq_file *m,
struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
struct intel_engine_cs *engine;
enum intel_engine_id id;
int i;
if (!ppgtt)
return;
for_each_engine(engine, dev_priv) {
for_each_engine(engine, dev_priv, id) {
seq_printf(m, "%s\n", engine->name);
for (i = 0; i < 4; i++) {
u64 pdp = I915_READ(GEN8_RING_PDP_UDW(engine, i));
@ -2223,11 +2188,12 @@ static void gen6_ppgtt_info(struct seq_file *m,
struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
if (IS_GEN6(dev_priv))
seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
for_each_engine(engine, dev_priv) {
for_each_engine(engine, dev_priv, id) {
seq_printf(m, "%s\n", engine->name);
if (IS_GEN7(dev_priv))
seq_printf(m, "GFX_MODE: 0x%08x\n",
@ -2296,9 +2262,10 @@ out_unlock:
static int count_irq_waiters(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
int count = 0;
for_each_engine(engine, i915)
for_each_engine(engine, i915, id)
count += intel_engine_has_waiter(engine);
return count;
@ -2461,7 +2428,7 @@ static void i915_guc_client_info(struct seq_file *m,
seq_printf(m, "\tFailed doorbell: %u\n", client->b_fail);
seq_printf(m, "\tLast submission result: %d\n", client->retcode);
for_each_engine_id(engine, dev_priv, id) {
for_each_engine(engine, dev_priv, id) {
u64 submissions = client->submissions[id];
tot += submissions;
seq_printf(m, "\tSubmissions: %llu %s\n",
@ -2504,7 +2471,7 @@ static int i915_guc_info(struct seq_file *m, void *data)
seq_printf(m, "GuC last action error code: %d\n", guc.action_err);
seq_printf(m, "\nGuC submissions:\n");
for_each_engine_id(engine, dev_priv, id) {
for_each_engine(engine, dev_priv, id) {
u64 submissions = guc.submissions[id];
total += submissions;
seq_printf(m, "\t%-24s: %10llu, last seqno 0x%08x\n",
@ -3121,6 +3088,134 @@ static int i915_display_info(struct seq_file *m, void *unused)
return 0;
}
static int i915_engine_info(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct intel_engine_cs *engine;
enum intel_engine_id id;
for_each_engine(engine, dev_priv, id) {
struct intel_breadcrumbs *b = &engine->breadcrumbs;
struct drm_i915_gem_request *rq;
struct rb_node *rb;
u64 addr;
seq_printf(m, "%s\n", engine->name);
seq_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [score %d]\n",
intel_engine_get_seqno(engine),
engine->last_submitted_seqno,
engine->hangcheck.seqno,
engine->hangcheck.score);
rcu_read_lock();
seq_printf(m, "\tRequests:\n");
rq = list_first_entry(&engine->request_list,
struct drm_i915_gem_request, link);
if (&rq->link != &engine->request_list)
print_request(m, rq, "\t\tfirst ");
rq = list_last_entry(&engine->request_list,
struct drm_i915_gem_request, link);
if (&rq->link != &engine->request_list)
print_request(m, rq, "\t\tlast ");
rq = i915_gem_find_active_request(engine);
if (rq) {
print_request(m, rq, "\t\tactive ");
seq_printf(m,
"\t\t[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]\n",
rq->head, rq->postfix, rq->tail,
rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u,
rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u);
}
seq_printf(m, "\tRING_START: 0x%08x [0x%08x]\n",
I915_READ(RING_START(engine->mmio_base)),
rq ? i915_ggtt_offset(rq->ring->vma) : 0);
seq_printf(m, "\tRING_HEAD: 0x%08x [0x%08x]\n",
I915_READ(RING_HEAD(engine->mmio_base)) & HEAD_ADDR,
rq ? rq->ring->head : 0);
seq_printf(m, "\tRING_TAIL: 0x%08x [0x%08x]\n",
I915_READ(RING_TAIL(engine->mmio_base)) & TAIL_ADDR,
rq ? rq->ring->tail : 0);
seq_printf(m, "\tRING_CTL: 0x%08x [%s]\n",
I915_READ(RING_CTL(engine->mmio_base)),
I915_READ(RING_CTL(engine->mmio_base)) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? "waiting" : "");
rcu_read_unlock();
addr = intel_engine_get_active_head(engine);
seq_printf(m, "\tACTHD: 0x%08x_%08x\n",
upper_32_bits(addr), lower_32_bits(addr));
addr = intel_engine_get_last_batch_head(engine);
seq_printf(m, "\tBBADDR: 0x%08x_%08x\n",
upper_32_bits(addr), lower_32_bits(addr));
if (i915.enable_execlists) {
u32 ptr, read, write;
seq_printf(m, "\tExeclist status: 0x%08x %08x\n",
I915_READ(RING_EXECLIST_STATUS_LO(engine)),
I915_READ(RING_EXECLIST_STATUS_HI(engine)));
ptr = I915_READ(RING_CONTEXT_STATUS_PTR(engine));
read = GEN8_CSB_READ_PTR(ptr);
write = GEN8_CSB_WRITE_PTR(ptr);
seq_printf(m, "\tExeclist CSB read %d, write %d\n",
read, write);
if (read >= GEN8_CSB_ENTRIES)
read = 0;
if (write >= GEN8_CSB_ENTRIES)
write = 0;
if (read > write)
write += GEN8_CSB_ENTRIES;
while (read < write) {
unsigned int idx = ++read % GEN8_CSB_ENTRIES;
seq_printf(m, "\tExeclist CSB[%d]: 0x%08x, context: %d\n",
idx,
I915_READ(RING_CONTEXT_STATUS_BUF_LO(engine, idx)),
I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine, idx)));
}
rcu_read_lock();
rq = READ_ONCE(engine->execlist_port[0].request);
if (rq)
print_request(m, rq, "\t\tELSP[0] ");
else
seq_printf(m, "\t\tELSP[0] idle\n");
rq = READ_ONCE(engine->execlist_port[1].request);
if (rq)
print_request(m, rq, "\t\tELSP[1] ");
else
seq_printf(m, "\t\tELSP[1] idle\n");
rcu_read_unlock();
} else if (INTEL_GEN(dev_priv) > 6) {
seq_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
I915_READ(RING_PP_DIR_BASE(engine)));
seq_printf(m, "\tPP_DIR_BASE_READ: 0x%08x\n",
I915_READ(RING_PP_DIR_BASE_READ(engine)));
seq_printf(m, "\tPP_DIR_DCLV: 0x%08x\n",
I915_READ(RING_PP_DIR_DCLV(engine)));
}
spin_lock(&b->lock);
for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
struct intel_wait *w = container_of(rb, typeof(*w), node);
seq_printf(m, "\t%s [%d] waiting for %x\n",
w->tsk->comm, w->tsk->pid, w->seqno);
}
spin_unlock(&b->lock);
seq_puts(m, "\n");
}
return 0;
}
static int i915_semaphore_status(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
@ -3147,7 +3242,7 @@ static int i915_semaphore_status(struct seq_file *m, void *unused)
page = i915_gem_object_get_page(dev_priv->semaphore->obj, 0);
seqno = (uint64_t *)kmap_atomic(page);
for_each_engine_id(engine, dev_priv, id) {
for_each_engine(engine, dev_priv, id) {
uint64_t offset;
seq_printf(m, "%s\n", engine->name);
@ -3172,7 +3267,7 @@ static int i915_semaphore_status(struct seq_file *m, void *unused)
kunmap_atomic(seqno);
} else {
seq_puts(m, " Last signal:");
for_each_engine(engine, dev_priv)
for_each_engine(engine, dev_priv, id)
for (j = 0; j < num_rings; j++)
seq_printf(m, "0x%08x\n",
I915_READ(engine->semaphore.mbox.signal[j]));
@ -3180,7 +3275,7 @@ static int i915_semaphore_status(struct seq_file *m, void *unused)
}
seq_puts(m, "\nSync seqno:\n");
for_each_engine(engine, dev_priv) {
for_each_engine(engine, dev_priv, id) {
for (j = 0; j < num_rings; j++)
seq_printf(m, " 0x%08x ",
engine->semaphore.sync_seqno[j]);
@ -3236,7 +3331,7 @@ static int i915_wa_registers(struct seq_file *m, void *unused)
intel_runtime_pm_get(dev_priv);
seq_printf(m, "Workarounds applied: %d\n", workarounds->count);
for_each_engine_id(engine, dev_priv, id)
for_each_engine(engine, dev_priv, id)
seq_printf(m, "HW whitelist count for %s: %d\n",
engine->name, workarounds->hw_whitelist_count[id]);
for (i = 0; i < workarounds->count; ++i) {
@ -4462,7 +4557,7 @@ static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
else if (IS_VALLEYVIEW(dev_priv))
num_levels = 1;
else
num_levels = ilk_wm_max_level(dev) + 1;
num_levels = ilk_wm_max_level(dev_priv) + 1;
drm_modeset_lock_all(dev);
@ -4578,7 +4673,7 @@ static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
else if (IS_VALLEYVIEW(dev_priv))
num_levels = 1;
else
num_levels = ilk_wm_max_level(dev) + 1;
num_levels = ilk_wm_max_level(dev_priv) + 1;
if (len >= sizeof(tmp))
return -EINVAL;
@ -5274,7 +5369,6 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
{"i915_context_status", i915_context_status, 0},
{"i915_dump_lrc", i915_dump_lrc, 0},
{"i915_execlists", i915_execlists, 0},
{"i915_forcewake_domains", i915_forcewake_domains, 0},
{"i915_swizzle_info", i915_swizzle_info, 0},
{"i915_ppgtt_info", i915_ppgtt_info, 0},
@ -5286,6 +5380,7 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_power_domain_info", i915_power_domain_info, 0},
{"i915_dmc_info", i915_dmc_info, 0},
{"i915_display_info", i915_display_info, 0},
{"i915_engine_info", i915_engine_info, 0},
{"i915_semaphore_status", i915_semaphore_status, 0},
{"i915_shared_dplls_info", i915_shared_dplls_info, 0},
{"i915_dp_mst_info", i915_dp_mst_info, 0},
@ -5308,7 +5403,9 @@ static const struct i915_debugfs_files {
{"i915_ring_missed_irq", &i915_ring_missed_irq_fops},
{"i915_ring_test_irq", &i915_ring_test_irq_fops},
{"i915_gem_drop_caches", &i915_drop_caches_fops},
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
{"i915_error_state", &i915_error_state_fops},
#endif
{"i915_next_seqno", &i915_next_seqno_fops},
{"i915_display_crc_ctl", &i915_display_crc_ctl_fops},
{"i915_pri_wm_latency", &i915_pri_wm_latency_fops},

View File

@ -114,7 +114,7 @@ static bool i915_error_injected(struct drm_i915_private *dev_priv)
fmt, ##__VA_ARGS__)
static enum intel_pch intel_virt_detect_pch(struct drm_device *dev)
static enum intel_pch intel_virt_detect_pch(struct drm_i915_private *dev_priv)
{
enum intel_pch ret = PCH_NOP;
@ -125,16 +125,16 @@ static enum intel_pch intel_virt_detect_pch(struct drm_device *dev)
* make an educated guess as to which PCH is really there.
*/
if (IS_GEN5(dev)) {
if (IS_GEN5(dev_priv)) {
ret = PCH_IBX;
DRM_DEBUG_KMS("Assuming Ibex Peak PCH\n");
} else if (IS_GEN6(dev) || IS_IVYBRIDGE(dev)) {
} else if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv)) {
ret = PCH_CPT;
DRM_DEBUG_KMS("Assuming CouarPoint PCH\n");
} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
ret = PCH_LPT;
DRM_DEBUG_KMS("Assuming LynxPoint PCH\n");
} else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
} else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
ret = PCH_SPT;
DRM_DEBUG_KMS("Assuming SunrisePoint PCH\n");
}
@ -174,40 +174,46 @@ static void intel_detect_pch(struct drm_device *dev)
if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_IBX;
DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
WARN_ON(!IS_GEN5(dev));
WARN_ON(!IS_GEN5(dev_priv));
} else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_CPT;
DRM_DEBUG_KMS("Found CougarPoint PCH\n");
WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
WARN_ON(!(IS_GEN6(dev_priv) ||
IS_IVYBRIDGE(dev_priv)));
} else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
/* PantherPoint is CPT compatible */
dev_priv->pch_type = PCH_CPT;
DRM_DEBUG_KMS("Found PantherPoint PCH\n");
WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
WARN_ON(!(IS_GEN6(dev_priv) ||
IS_IVYBRIDGE(dev_priv)));
} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_LPT;
DRM_DEBUG_KMS("Found LynxPoint PCH\n");
WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev));
WARN_ON(IS_HSW_ULT(dev) || IS_BDW_ULT(dev));
WARN_ON(!IS_HASWELL(dev_priv) &&
!IS_BROADWELL(dev_priv));
WARN_ON(IS_HSW_ULT(dev_priv) ||
IS_BDW_ULT(dev_priv));
} else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_LPT;
DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev));
WARN_ON(!IS_HSW_ULT(dev) && !IS_BDW_ULT(dev));
WARN_ON(!IS_HASWELL(dev_priv) &&
!IS_BROADWELL(dev_priv));
WARN_ON(!IS_HSW_ULT(dev_priv) &&
!IS_BDW_ULT(dev_priv));
} else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_SPT;
DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
WARN_ON(!IS_SKYLAKE(dev) &&
!IS_KABYLAKE(dev));
WARN_ON(!IS_SKYLAKE(dev_priv) &&
!IS_KABYLAKE(dev_priv));
} else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_SPT;
DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
WARN_ON(!IS_SKYLAKE(dev) &&
!IS_KABYLAKE(dev));
WARN_ON(!IS_SKYLAKE(dev_priv) &&
!IS_KABYLAKE(dev_priv));
} else if (id == INTEL_PCH_KBP_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_KBP;
DRM_DEBUG_KMS("Found KabyPoint PCH\n");
WARN_ON(!IS_KABYLAKE(dev));
WARN_ON(!IS_KABYLAKE(dev_priv));
} else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) ||
(id == INTEL_PCH_P3X_DEVICE_ID_TYPE) ||
((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) &&
@ -215,7 +221,8 @@ static void intel_detect_pch(struct drm_device *dev)
PCI_SUBVENDOR_ID_REDHAT_QUMRANET &&
pch->subsystem_device ==
PCI_SUBDEVICE_ID_QEMU)) {
dev_priv->pch_type = intel_virt_detect_pch(dev);
dev_priv->pch_type =
intel_virt_detect_pch(dev_priv);
} else
continue;
@ -255,16 +262,16 @@ static int i915_getparam(struct drm_device *dev, void *data,
value = dev_priv->overlay ? 1 : 0;
break;
case I915_PARAM_HAS_BSD:
value = intel_engine_initialized(&dev_priv->engine[VCS]);
value = !!dev_priv->engine[VCS];
break;
case I915_PARAM_HAS_BLT:
value = intel_engine_initialized(&dev_priv->engine[BCS]);
value = !!dev_priv->engine[BCS];
break;
case I915_PARAM_HAS_VEBOX:
value = intel_engine_initialized(&dev_priv->engine[VECS]);
value = !!dev_priv->engine[VECS];
break;
case I915_PARAM_HAS_BSD2:
value = intel_engine_initialized(&dev_priv->engine[VCS2]);
value = !!dev_priv->engine[VCS2];
break;
case I915_PARAM_HAS_EXEC_CONSTANTS:
value = INTEL_GEN(dev_priv) >= 4;
@ -417,12 +424,12 @@ intel_setup_mchbar(struct drm_device *dev)
u32 temp;
bool enabled;
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
return;
dev_priv->mchbar_need_disable = false;
if (IS_I915G(dev) || IS_I915GM(dev)) {
if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
pci_read_config_dword(dev_priv->bridge_dev, DEVEN, &temp);
enabled = !!(temp & DEVEN_MCHBAR_EN);
} else {
@ -440,7 +447,7 @@ intel_setup_mchbar(struct drm_device *dev)
dev_priv->mchbar_need_disable = true;
/* Space is allocated or reserved, so enable it. */
if (IS_I915G(dev) || IS_I915GM(dev)) {
if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
temp | DEVEN_MCHBAR_EN);
} else {
@ -456,7 +463,7 @@ intel_teardown_mchbar(struct drm_device *dev)
int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
if (dev_priv->mchbar_need_disable) {
if (IS_I915G(dev) || IS_I915GM(dev)) {
if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
u32 deven_val;
pci_read_config_dword(dev_priv->bridge_dev, DEVEN,
@ -532,32 +539,6 @@ static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
static void i915_gem_fini(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
/*
* Neither the BIOS, ourselves or any other kernel
* expects the system to be in execlists mode on startup,
* so we need to reset the GPU back to legacy mode. And the only
* known way to disable logical contexts is through a GPU reset.
*
* So in order to leave the system in a known default configuration,
* always reset the GPU upon unload. Afterwards we then clean up the
* GEM state tracking, flushing off the requests and leaving the
* system in a known idle state.
*
* Note that is of the upmost importance that the GPU is idle and
* all stray writes are flushed *before* we dismantle the backing
* storage for the pinned objects.
*
* However, since we are uncertain that reseting the GPU on older
* machines is a good idea, we don't - just in case it leaves the
* machine in an unusable condition.
*/
if (HAS_HW_CONTEXTS(dev)) {
int reset = intel_gpu_reset(dev_priv, ALL_ENGINES);
WARN_ON(reset && reset != -ENODEV);
}
mutex_lock(&dev->struct_mutex);
i915_gem_cleanup_engines(dev);
i915_gem_context_fini(dev);
@ -636,6 +617,8 @@ static int i915_load_modeset_init(struct drm_device *dev)
return 0;
cleanup_gem:
if (i915_gem_suspend(dev))
DRM_ERROR("failed to idle hardware; continuing to unload!\n");
i915_gem_fini(dev);
cleanup_irq:
intel_guc_fini(dev);
@ -771,6 +754,19 @@ static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
destroy_workqueue(dev_priv->wq);
}
/*
* We don't keep the workarounds for pre-production hardware, so we expect our
* driver to fail on these machines in one way or another. A little warning on
* dmesg may help both the user and the bug triagers.
*/
static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
{
if (IS_HSW_EARLY_SDV(dev_priv) ||
IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0))
DRM_ERROR("This is a pre-production stepping. "
"It may not be fully functional.\n");
}
/**
* i915_driver_init_early - setup state not requiring device access
* @dev_priv: device private
@ -838,13 +834,7 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
intel_device_info_dump(dev_priv);
/* Not all pre-production machines fall into this category, only the
* very first ones. Almost everything should work, except for maybe
* suspend/resume. And we don't implement workarounds that affect only
* pre-production machines. */
if (IS_HSW_EARLY_SDV(dev_priv))
DRM_INFO("This is an early pre-production Haswell machine. "
"It may not be fully functional.\n");
intel_detect_preproduction_hw(dev_priv);
return 0;
@ -870,7 +860,7 @@ static int i915_mmio_setup(struct drm_device *dev)
int mmio_bar;
int mmio_size;
mmio_bar = IS_GEN2(dev) ? 1 : 0;
mmio_bar = IS_GEN2(dev_priv) ? 1 : 0;
/*
* Before gen4, the registers and the GTT are behind different BARs.
* However, from gen4 onwards, the registers and the GTT are shared
@ -1023,7 +1013,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
pci_set_master(pdev);
/* overlay on gen2 is broken and can't address above 1G */
if (IS_GEN2(dev)) {
if (IS_GEN2(dev_priv)) {
ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(30));
if (ret) {
DRM_ERROR("failed to set DMA mask\n");
@ -1070,7 +1060,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
* be lost or delayed, but we use them anyways to avoid
* stuck interrupts on some machines.
*/
if (!IS_I945G(dev) && !IS_I945GM(dev)) {
if (!IS_I945G(dev_priv) && !IS_I945GM(dev_priv)) {
if (pci_enable_msi(pdev) < 0)
DRM_DEBUG_DRIVER("can't enable MSI");
}
@ -1242,6 +1232,10 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
driver.name, driver.major, driver.minor, driver.patchlevel,
driver.date, pci_name(pdev), dev_priv->drm.primary->index);
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
DRM_INFO("DRM_I915_DEBUG enabled\n");
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
DRM_INFO("DRM_I915_DEBUG_GEM enabled\n");
intel_runtime_pm_put(dev_priv);
@ -1447,8 +1441,6 @@ static int i915_drm_suspend(struct drm_device *dev)
dev_priv->suspend_count++;
intel_display_set_init_power(dev_priv, false);
intel_csr_ucode_suspend(dev_priv);
out:
@ -1466,6 +1458,8 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
disable_rpm_wakeref_asserts(dev_priv);
intel_display_set_init_power(dev_priv, false);
fw_csr = !IS_BROXTON(dev_priv) &&
suspend_to_idle(dev_priv) && dev_priv->csr.dmc_payload;
/*
@ -1721,6 +1715,22 @@ int i915_resume_switcheroo(struct drm_device *dev)
return i915_drm_resume(dev);
}
static void disable_engines_irq(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
/* Ensure irq handler finishes, and not run again. */
disable_irq(dev_priv->drm.irq);
for_each_engine(engine, dev_priv, id)
tasklet_kill(&engine->irq_tasklet);
}
static void enable_engines_irq(struct drm_i915_private *dev_priv)
{
enable_irq(dev_priv->drm.irq);
}
/**
* i915_reset - reset chip after a hang
* @dev: drm device to reset
@ -1754,7 +1764,11 @@ void i915_reset(struct drm_i915_private *dev_priv)
error->reset_count++;
pr_notice("drm/i915: Resetting chip after gpu hang\n");
disable_engines_irq(dev_priv);
ret = intel_gpu_reset(dev_priv, ALL_ENGINES);
enable_engines_irq(dev_priv);
if (ret) {
if (ret != -ENODEV)
DRM_ERROR("Failed to reset chip: %i\n", ret);
@ -2282,7 +2296,7 @@ static int intel_runtime_suspend(struct device *kdev)
if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6())))
return -ENODEV;
if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv)))
return -ENODEV;
DRM_DEBUG_KMS("Suspending device\n");
@ -2386,7 +2400,7 @@ static int intel_runtime_resume(struct device *kdev)
struct drm_i915_private *dev_priv = to_i915(dev);
int ret = 0;
if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv)))
return -ENODEV;
DRM_DEBUG_KMS("Resuming device\n");
@ -2404,7 +2418,7 @@ static int intel_runtime_resume(struct device *kdev)
if (IS_GEN6(dev_priv))
intel_init_pch_refclk(dev);
if (IS_BROXTON(dev)) {
if (IS_BROXTON(dev_priv)) {
bxt_disable_dc9(dev_priv);
bxt_display_core_init(dev_priv, true);
if (dev_priv->csr.dmc_payload &&

View File

@ -70,7 +70,8 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
#define DRIVER_DATE "20160919"
#define DRIVER_DATE "20161024"
#define DRIVER_TIMESTAMP 1477290335
#undef WARN_ON
/* Many gcc seem to no see through this and fall over :( */
@ -185,6 +186,7 @@ enum plane {
#define sprite_name(p, s) ((p) * INTEL_INFO(dev)->num_sprites[(p)] + (s) + 'A')
enum port {
PORT_NONE = -1,
PORT_A = 0,
PORT_B,
PORT_C,
@ -581,13 +583,25 @@ struct intel_uncore_funcs {
uint32_t val, bool trace);
};
struct intel_forcewake_range {
u32 start;
u32 end;
enum forcewake_domains domains;
};
struct intel_uncore {
spinlock_t lock; /** lock is also taken in irq contexts. */
const struct intel_forcewake_range *fw_domains_table;
unsigned int fw_domains_table_entries;
struct intel_uncore_funcs funcs;
unsigned fifo_count;
enum forcewake_domains fw_domains;
enum forcewake_domains fw_domains_active;
struct intel_uncore_forcewake_domain {
struct drm_i915_private *i915;
@ -633,54 +647,53 @@ struct intel_csr {
uint32_t allowed_dc_mask;
};
#define DEV_INFO_FOR_EACH_FLAG(func, sep) \
func(is_mobile) sep \
func(is_i85x) sep \
func(is_i915g) sep \
func(is_i945gm) sep \
func(is_g33) sep \
func(hws_needs_physical) sep \
func(is_g4x) sep \
func(is_pineview) sep \
func(is_broadwater) sep \
func(is_crestline) sep \
func(is_ivybridge) sep \
func(is_valleyview) sep \
func(is_cherryview) sep \
func(is_haswell) sep \
func(is_broadwell) sep \
func(is_skylake) sep \
func(is_broxton) sep \
func(is_kabylake) sep \
func(is_preliminary) sep \
func(has_fbc) sep \
func(has_psr) sep \
func(has_runtime_pm) sep \
func(has_csr) sep \
func(has_resource_streamer) sep \
func(has_rc6) sep \
func(has_rc6p) sep \
func(has_dp_mst) sep \
func(has_gmbus_irq) sep \
func(has_hw_contexts) sep \
func(has_logical_ring_contexts) sep \
func(has_l3_dpf) sep \
func(has_gmch_display) sep \
func(has_guc) sep \
func(has_pipe_cxsr) sep \
func(has_hotplug) sep \
func(cursor_needs_physical) sep \
func(has_overlay) sep \
func(overlay_needs_physical) sep \
func(supports_tv) sep \
func(has_llc) sep \
func(has_snoop) sep \
func(has_ddi) sep \
func(has_fpga_dbg) sep \
func(has_pooled_eu)
#define DEFINE_FLAG(name) u8 name:1
#define SEP_SEMICOLON ;
#define DEV_INFO_FOR_EACH_FLAG(func) \
/* Keep is_* in chronological order */ \
func(is_mobile); \
func(is_i85x); \
func(is_i915g); \
func(is_i945gm); \
func(is_g33); \
func(is_g4x); \
func(is_pineview); \
func(is_broadwater); \
func(is_crestline); \
func(is_ivybridge); \
func(is_valleyview); \
func(is_cherryview); \
func(is_haswell); \
func(is_broadwell); \
func(is_skylake); \
func(is_broxton); \
func(is_kabylake); \
func(is_preliminary); \
/* Keep has_* in alphabetical order */ \
func(has_csr); \
func(has_ddi); \
func(has_dp_mst); \
func(has_fbc); \
func(has_fpga_dbg); \
func(has_gmbus_irq); \
func(has_gmch_display); \
func(has_guc); \
func(has_hotplug); \
func(has_hw_contexts); \
func(has_l3_dpf); \
func(has_llc); \
func(has_logical_ring_contexts); \
func(has_overlay); \
func(has_pipe_cxsr); \
func(has_pooled_eu); \
func(has_psr); \
func(has_rc6); \
func(has_rc6p); \
func(has_resource_streamer); \
func(has_runtime_pm); \
func(has_snoop); \
func(cursor_needs_physical); \
func(hws_needs_physical); \
func(overlay_needs_physical); \
func(supports_tv)
struct sseu_dev_info {
u8 slice_mask;
@ -709,7 +722,9 @@ struct intel_device_info {
u16 gen_mask;
u8 ring_mask; /* Rings supported by the HW */
u8 num_rings;
DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON);
#define DEFINE_FLAG(name) u8 name:1
DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG);
#undef DEFINE_FLAG
u16 ddb_size; /* in blocks */
/* Register offsets for the various display pipes and transcoders */
int pipe_offsets[I915_MAX_TRANSCODERS];
@ -726,15 +741,14 @@ struct intel_device_info {
} color;
};
#undef DEFINE_FLAG
#undef SEP_SEMICOLON
struct intel_display_error_state;
struct drm_i915_error_state {
struct kref ref;
struct timeval time;
struct drm_i915_private *i915;
char error_msg[128];
bool simulated;
int iommu;
@ -759,7 +773,7 @@ struct drm_i915_error_state {
u32 gam_ecochk;
u32 gab_ctl;
u32 gfx_mode;
u32 extra_instdone[I915_NUM_INSTDONE_REG];
u64 fence[I915_MAX_NUM_FENCES];
struct intel_overlay_error_state *overlay;
struct intel_display_error_state *display;
@ -775,6 +789,9 @@ struct drm_i915_error_state {
struct i915_address_space *vm;
int num_requests;
/* position of active request inside the ring */
u32 rq_head, rq_post, rq_tail;
/* our own tracking of ring head and tail */
u32 cpu_ring_head;
u32 cpu_ring_tail;
@ -791,7 +808,6 @@ struct drm_i915_error_state {
u32 hws;
u32 ipeir;
u32 ipehr;
u32 instdone;
u32 bbstate;
u32 instpm;
u32 instps;
@ -802,11 +818,13 @@ struct drm_i915_error_state {
u64 faddr;
u32 rc_psmi; /* sleep state */
u32 semaphore_mboxes[I915_NUM_ENGINES - 1];
struct intel_instdone instdone;
struct drm_i915_error_object {
int page_count;
u64 gtt_offset;
u64 gtt_size;
int page_count;
int unused;
u32 *pages[0];
} *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page;
@ -815,10 +833,11 @@ struct drm_i915_error_state {
struct drm_i915_error_request {
long jiffies;
pid_t pid;
u32 context;
u32 seqno;
u32 head;
u32 tail;
} *requests;
} *requests, execlist[2];
struct drm_i915_error_waiter {
char comm[TASK_COMM_LEN];
@ -972,6 +991,9 @@ struct intel_fbc {
bool enabled;
bool active;
bool underrun_detected;
struct work_struct underrun_work;
struct intel_fbc_state_cache {
struct {
unsigned int mode_flags;
@ -1368,7 +1390,7 @@ struct i915_gem_mm {
/* accounting, useful for userland debugging */
spinlock_t object_stat_lock;
size_t object_memory;
u64 object_memory;
u32 object_count;
};
@ -1620,7 +1642,6 @@ static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1,
}
struct skl_ddb_allocation {
struct skl_ddb_entry pipe[I915_MAX_PIPES];
struct skl_ddb_entry plane[I915_MAX_PIPES][I915_MAX_PLANES]; /* packed/uv */
struct skl_ddb_entry y_plane[I915_MAX_PIPES][I915_MAX_PLANES];
};
@ -1628,15 +1649,12 @@ struct skl_ddb_allocation {
struct skl_wm_values {
unsigned dirty_pipes;
struct skl_ddb_allocation ddb;
uint32_t wm_linetime[I915_MAX_PIPES];
uint32_t plane[I915_MAX_PIPES][I915_MAX_PLANES][8];
uint32_t plane_trans[I915_MAX_PIPES][I915_MAX_PLANES];
};
struct skl_wm_level {
bool plane_en[I915_MAX_PLANES];
uint16_t plane_res_b[I915_MAX_PLANES];
uint8_t plane_res_l[I915_MAX_PLANES];
bool plane_en;
uint16_t plane_res_b;
uint8_t plane_res_l;
};
/*
@ -1759,7 +1777,7 @@ struct drm_i915_private {
struct i915_virtual_gpu vgpu;
struct intel_gvt gvt;
struct intel_gvt *gvt;
struct intel_guc guc;
@ -1787,7 +1805,7 @@ struct drm_i915_private {
struct pci_dev *bridge_dev;
struct i915_gem_context *kernel_context;
struct intel_engine_cs engine[I915_NUM_ENGINES];
struct intel_engine_cs *engine[I915_NUM_ENGINES];
struct i915_vma *semaphore;
u32 next_seqno;
@ -2079,7 +2097,8 @@ struct drm_i915_private {
/* perform PHY state sanity checks? */
bool chv_phy_assert[2];
struct intel_encoder *dig_port_map[I915_MAX_PORTS];
/* Used to save the pipe-to-encoder mapping for audio */
struct intel_encoder *av_enc_map[I915_MAX_PIPES];
/*
* NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch
@ -2103,19 +2122,11 @@ static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc)
}
/* Simple iterator over all initialised engines */
#define for_each_engine(engine__, dev_priv__) \
for ((engine__) = &(dev_priv__)->engine[0]; \
(engine__) < &(dev_priv__)->engine[I915_NUM_ENGINES]; \
(engine__)++) \
for_each_if (intel_engine_initialized(engine__))
/* Iterator with engine_id */
#define for_each_engine_id(engine__, dev_priv__, id__) \
for ((engine__) = &(dev_priv__)->engine[0], (id__) = 0; \
(engine__) < &(dev_priv__)->engine[I915_NUM_ENGINES]; \
(engine__)++) \
for_each_if (((id__) = (engine__)->id, \
intel_engine_initialized(engine__)))
#define for_each_engine(engine__, dev_priv__, id__) \
for ((id__) = 0; \
(id__) < I915_NUM_ENGINES; \
(id__)++) \
for_each_if ((engine__) = (dev_priv__)->engine[(id__)])
#define __mask_next_bit(mask) ({ \
int __idx = ffs(mask) - 1; \
@ -2126,7 +2137,7 @@ static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc)
/* Iterator over subset of engines selected by mask */
#define for_each_engine_masked(engine__, dev_priv__, mask__, tmp__) \
for (tmp__ = mask__ & INTEL_INFO(dev_priv__)->ring_mask; \
tmp__ ? (engine__ = &(dev_priv__)->engine[__mask_next_bit(tmp__)]), 1 : 0; )
tmp__ ? (engine__ = (dev_priv__)->engine[__mask_next_bit(tmp__)]), 1 : 0; )
enum hdmi_force_audio {
HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */
@ -2586,8 +2597,9 @@ struct drm_i915_cmd_table {
__p; \
})
#define INTEL_INFO(p) (&__I915__(p)->info)
#define INTEL_GEN(p) (INTEL_INFO(p)->gen)
#define INTEL_DEVID(p) (INTEL_INFO(p)->device_id)
#define INTEL_GEN(dev_priv) ((dev_priv)->info.gen)
#define INTEL_DEVID(dev_priv) ((dev_priv)->info.device_id)
#define REVID_FOREVER 0xff
#define INTEL_REVID(p) (__I915__(p)->drm.pdev->revision)
@ -2598,7 +2610,7 @@ struct drm_i915_cmd_table {
*
* Use GEN_FOREVER for unbound start and or end.
*/
#define IS_GEN(p, s, e) ({ \
#define IS_GEN(dev_priv, s, e) ({ \
unsigned int __s = (s), __e = (e); \
BUILD_BUG_ON(!__builtin_constant_p(s)); \
BUILD_BUG_ON(!__builtin_constant_p(e)); \
@ -2608,7 +2620,7 @@ struct drm_i915_cmd_table {
__e = BITS_PER_LONG - 1; \
else \
__e = (e) - 1; \
!!(INTEL_INFO(p)->gen_mask & GENMASK((__e), (__s))); \
!!((dev_priv)->info.gen_mask & GENMASK((__e), (__s))); \
})
/*
@ -2619,73 +2631,73 @@ struct drm_i915_cmd_table {
#define IS_REVID(p, since, until) \
(INTEL_REVID(p) >= (since) && INTEL_REVID(p) <= (until))
#define IS_I830(dev) (INTEL_DEVID(dev) == 0x3577)
#define IS_845G(dev) (INTEL_DEVID(dev) == 0x2562)
#define IS_I830(dev_priv) (INTEL_DEVID(dev_priv) == 0x3577)
#define IS_845G(dev_priv) (INTEL_DEVID(dev_priv) == 0x2562)
#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
#define IS_I865G(dev) (INTEL_DEVID(dev) == 0x2572)
#define IS_I865G(dev_priv) (INTEL_DEVID(dev_priv) == 0x2572)
#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
#define IS_I915GM(dev) (INTEL_DEVID(dev) == 0x2592)
#define IS_I945G(dev) (INTEL_DEVID(dev) == 0x2772)
#define IS_I915GM(dev_priv) (INTEL_DEVID(dev_priv) == 0x2592)
#define IS_I945G(dev_priv) (INTEL_DEVID(dev_priv) == 0x2772)
#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm)
#define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater)
#define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline)
#define IS_GM45(dev) (INTEL_DEVID(dev) == 0x2A42)
#define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x)
#define IS_PINEVIEW_G(dev) (INTEL_DEVID(dev) == 0xa001)
#define IS_PINEVIEW_M(dev) (INTEL_DEVID(dev) == 0xa011)
#define IS_GM45(dev_priv) (INTEL_DEVID(dev_priv) == 0x2A42)
#define IS_G4X(dev_priv) ((dev_priv)->info.is_g4x)
#define IS_PINEVIEW_G(dev_priv) (INTEL_DEVID(dev_priv) == 0xa001)
#define IS_PINEVIEW_M(dev_priv) (INTEL_DEVID(dev_priv) == 0xa011)
#define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview)
#define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
#define IS_IRONLAKE_M(dev) (INTEL_DEVID(dev) == 0x0046)
#define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge)
#define IS_IVB_GT1(dev) (INTEL_DEVID(dev) == 0x0156 || \
INTEL_DEVID(dev) == 0x0152 || \
INTEL_DEVID(dev) == 0x015a)
#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
#define IS_CHERRYVIEW(dev) (INTEL_INFO(dev)->is_cherryview)
#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
#define IS_BROADWELL(dev) (INTEL_INFO(dev)->is_broadwell)
#define IS_SKYLAKE(dev) (INTEL_INFO(dev)->is_skylake)
#define IS_BROXTON(dev) (INTEL_INFO(dev)->is_broxton)
#define IS_KABYLAKE(dev) (INTEL_INFO(dev)->is_kabylake)
#define IS_IRONLAKE_M(dev_priv) (INTEL_DEVID(dev_priv) == 0x0046)
#define IS_IVYBRIDGE(dev_priv) ((dev_priv)->info.is_ivybridge)
#define IS_IVB_GT1(dev_priv) (INTEL_DEVID(dev_priv) == 0x0156 || \
INTEL_DEVID(dev_priv) == 0x0152 || \
INTEL_DEVID(dev_priv) == 0x015a)
#define IS_VALLEYVIEW(dev_priv) ((dev_priv)->info.is_valleyview)
#define IS_CHERRYVIEW(dev_priv) ((dev_priv)->info.is_cherryview)
#define IS_HASWELL(dev_priv) ((dev_priv)->info.is_haswell)
#define IS_BROADWELL(dev_priv) ((dev_priv)->info.is_broadwell)
#define IS_SKYLAKE(dev_priv) ((dev_priv)->info.is_skylake)
#define IS_BROXTON(dev_priv) ((dev_priv)->info.is_broxton)
#define IS_KABYLAKE(dev_priv) ((dev_priv)->info.is_kabylake)
#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \
(INTEL_DEVID(dev) & 0xFF00) == 0x0C00)
#define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \
((INTEL_DEVID(dev) & 0xf) == 0x6 || \
(INTEL_DEVID(dev) & 0xf) == 0xb || \
(INTEL_DEVID(dev) & 0xf) == 0xe))
#define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00)
#define IS_BDW_ULT(dev_priv) (IS_BROADWELL(dev_priv) && \
((INTEL_DEVID(dev_priv) & 0xf) == 0x6 || \
(INTEL_DEVID(dev_priv) & 0xf) == 0xb || \
(INTEL_DEVID(dev_priv) & 0xf) == 0xe))
/* ULX machines are also considered ULT. */
#define IS_BDW_ULX(dev) (IS_BROADWELL(dev) && \
(INTEL_DEVID(dev) & 0xf) == 0xe)
#define IS_BDW_GT3(dev) (IS_BROADWELL(dev) && \
(INTEL_DEVID(dev) & 0x00F0) == 0x0020)
#define IS_HSW_ULT(dev) (IS_HASWELL(dev) && \
(INTEL_DEVID(dev) & 0xFF00) == 0x0A00)
#define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \
(INTEL_DEVID(dev) & 0x00F0) == 0x0020)
#define IS_BDW_ULX(dev_priv) (IS_BROADWELL(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0xf) == 0xe)
#define IS_BDW_GT3(dev_priv) (IS_BROADWELL(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0x00F0) == 0x0020)
#define IS_HSW_ULT(dev_priv) (IS_HASWELL(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0xFF00) == 0x0A00)
#define IS_HSW_GT3(dev_priv) (IS_HASWELL(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0x00F0) == 0x0020)
/* ULX machines are also considered ULT. */
#define IS_HSW_ULX(dev) (INTEL_DEVID(dev) == 0x0A0E || \
INTEL_DEVID(dev) == 0x0A1E)
#define IS_SKL_ULT(dev) (INTEL_DEVID(dev) == 0x1906 || \
INTEL_DEVID(dev) == 0x1913 || \
INTEL_DEVID(dev) == 0x1916 || \
INTEL_DEVID(dev) == 0x1921 || \
INTEL_DEVID(dev) == 0x1926)
#define IS_SKL_ULX(dev) (INTEL_DEVID(dev) == 0x190E || \
INTEL_DEVID(dev) == 0x1915 || \
INTEL_DEVID(dev) == 0x191E)
#define IS_KBL_ULT(dev) (INTEL_DEVID(dev) == 0x5906 || \
INTEL_DEVID(dev) == 0x5913 || \
INTEL_DEVID(dev) == 0x5916 || \
INTEL_DEVID(dev) == 0x5921 || \
INTEL_DEVID(dev) == 0x5926)
#define IS_KBL_ULX(dev) (INTEL_DEVID(dev) == 0x590E || \
INTEL_DEVID(dev) == 0x5915 || \
INTEL_DEVID(dev) == 0x591E)
#define IS_SKL_GT3(dev) (IS_SKYLAKE(dev) && \
(INTEL_DEVID(dev) & 0x00F0) == 0x0020)
#define IS_SKL_GT4(dev) (IS_SKYLAKE(dev) && \
(INTEL_DEVID(dev) & 0x00F0) == 0x0030)
#define IS_HSW_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x0A0E || \
INTEL_DEVID(dev_priv) == 0x0A1E)
#define IS_SKL_ULT(dev_priv) (INTEL_DEVID(dev_priv) == 0x1906 || \
INTEL_DEVID(dev_priv) == 0x1913 || \
INTEL_DEVID(dev_priv) == 0x1916 || \
INTEL_DEVID(dev_priv) == 0x1921 || \
INTEL_DEVID(dev_priv) == 0x1926)
#define IS_SKL_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x190E || \
INTEL_DEVID(dev_priv) == 0x1915 || \
INTEL_DEVID(dev_priv) == 0x191E)
#define IS_KBL_ULT(dev_priv) (INTEL_DEVID(dev_priv) == 0x5906 || \
INTEL_DEVID(dev_priv) == 0x5913 || \
INTEL_DEVID(dev_priv) == 0x5916 || \
INTEL_DEVID(dev_priv) == 0x5921 || \
INTEL_DEVID(dev_priv) == 0x5926)
#define IS_KBL_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x590E || \
INTEL_DEVID(dev_priv) == 0x5915 || \
INTEL_DEVID(dev_priv) == 0x591E)
#define IS_SKL_GT3(dev_priv) (IS_SKYLAKE(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0x00F0) == 0x0020)
#define IS_SKL_GT4(dev_priv) (IS_SKYLAKE(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0x00F0) == 0x0030)
#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
@ -2705,7 +2717,8 @@ struct drm_i915_cmd_table {
#define BXT_REVID_B0 0x3
#define BXT_REVID_C0 0x9
#define IS_BXT_REVID(p, since, until) (IS_BROXTON(p) && IS_REVID(p, since, until))
#define IS_BXT_REVID(dev_priv, since, until) \
(IS_BROXTON(dev_priv) && IS_REVID(dev_priv, since, until))
#define KBL_REVID_A0 0x0
#define KBL_REVID_B0 0x1
@ -2713,8 +2726,8 @@ struct drm_i915_cmd_table {
#define KBL_REVID_D0 0x3
#define KBL_REVID_E0 0x4
#define IS_KBL_REVID(p, since, until) \
(IS_KABYLAKE(p) && IS_REVID(p, since, until))
#define IS_KBL_REVID(dev_priv, since, until) \
(IS_KABYLAKE(dev_priv) && IS_REVID(dev_priv, since, until))
/*
* The genX designation typically refers to the render engine, so render
@ -2722,14 +2735,14 @@ struct drm_i915_cmd_table {
* have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular
* chips, etc.).
*/
#define IS_GEN2(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(1)))
#define IS_GEN3(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(2)))
#define IS_GEN4(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(3)))
#define IS_GEN5(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(4)))
#define IS_GEN6(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(5)))
#define IS_GEN7(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(6)))
#define IS_GEN8(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(7)))
#define IS_GEN9(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(8)))
#define IS_GEN2(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(1)))
#define IS_GEN3(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(2)))
#define IS_GEN4(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(3)))
#define IS_GEN5(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(4)))
#define IS_GEN6(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(5)))
#define IS_GEN7(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(6)))
#define IS_GEN8(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(7)))
#define IS_GEN9(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(8)))
#define ENGINE_MASK(id) BIT(id)
#define RENDER_RING ENGINE_MASK(RCS)
@ -2750,8 +2763,8 @@ struct drm_i915_cmd_table {
#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc)
#define HAS_SNOOP(dev) (INTEL_INFO(dev)->has_snoop)
#define HAS_EDRAM(dev) (!!(__I915__(dev)->edram_cap & EDRAM_ENABLED))
#define HAS_WT(dev) ((IS_HASWELL(dev) || IS_BROADWELL(dev)) && \
HAS_EDRAM(dev))
#define HAS_WT(dev_priv) ((IS_HASWELL(dev_priv) || \
IS_BROADWELL(dev_priv)) && HAS_EDRAM(dev_priv))
#define HWS_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->hws_needs_physical)
#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->has_hw_contexts)
@ -2764,7 +2777,7 @@ struct drm_i915_cmd_table {
#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
/* Early gen2 have a totally busted CS tlb and require pinned batches. */
#define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev))
#define HAS_BROKEN_CS_TLB(dev_priv) (IS_I830(dev_priv) || IS_845G(dev_priv))
/* WaRsDisableCoarsePowerGating:skl,bxt */
#define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \
@ -2784,8 +2797,9 @@ struct drm_i915_cmd_table {
/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
* rows, which changed the alignment requirements and fence programming.
*/
#define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \
IS_I915GM(dev)))
#define HAS_128_BYTE_Y_TILING(dev_priv) (!IS_GEN2(dev_priv) && \
!(IS_I915G(dev_priv) || \
IS_I915GM(dev_priv)))
#define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv)
#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
@ -2793,19 +2807,19 @@ struct drm_i915_cmd_table {
#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
#define HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
#define HAS_IPS(dev) (IS_HSW_ULT(dev) || IS_BROADWELL(dev))
#define HAS_IPS(dev_priv) (IS_HSW_ULT(dev_priv) || IS_BROADWELL(dev_priv))
#define HAS_DP_MST(dev) (INTEL_INFO(dev)->has_dp_mst)
#define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi)
#define HAS_DDI(dev_priv) ((dev_priv)->info.has_ddi)
#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg)
#define HAS_PSR(dev) (INTEL_INFO(dev)->has_psr)
#define HAS_RUNTIME_PM(dev) (INTEL_INFO(dev)->has_runtime_pm)
#define HAS_RC6(dev) (INTEL_INFO(dev)->has_rc6)
#define HAS_RC6p(dev) (INTEL_INFO(dev)->has_rc6p)
#define HAS_CSR(dev) (INTEL_INFO(dev)->has_csr)
#define HAS_RUNTIME_PM(dev_priv) ((dev_priv)->info.has_runtime_pm)
/*
* For now, anything with a GuC requires uCode loading, and then supports
* command submission once loaded. But these are logically independent
@ -2832,22 +2846,27 @@ struct drm_i915_cmd_table {
#define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000
#define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */
#define INTEL_PCH_TYPE(dev) (__I915__(dev)->pch_type)
#define HAS_PCH_KBP(dev) (INTEL_PCH_TYPE(dev) == PCH_KBP)
#define HAS_PCH_SPT(dev) (INTEL_PCH_TYPE(dev) == PCH_SPT)
#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
#define HAS_PCH_LPT_LP(dev) (__I915__(dev)->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
#define HAS_PCH_LPT_H(dev) (__I915__(dev)->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE)
#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
#define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP)
#define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE)
#define INTEL_PCH_TYPE(dev_priv) ((dev_priv)->pch_type)
#define HAS_PCH_KBP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_KBP)
#define HAS_PCH_SPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_SPT)
#define HAS_PCH_LPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_LPT)
#define HAS_PCH_LPT_LP(dev_priv) \
((dev_priv)->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
#define HAS_PCH_LPT_H(dev_priv) \
((dev_priv)->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE)
#define HAS_PCH_CPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CPT)
#define HAS_PCH_IBX(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_IBX)
#define HAS_PCH_NOP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_NOP)
#define HAS_PCH_SPLIT(dev_priv) (INTEL_PCH_TYPE(dev_priv) != PCH_NONE)
#define HAS_GMCH_DISPLAY(dev) (INTEL_INFO(dev)->has_gmch_display)
#define HAS_GMCH_DISPLAY(dev_priv) ((dev_priv)->info.has_gmch_display)
#define HAS_LSPCON(dev_priv) (IS_GEN9(dev_priv))
/* DPF == dynamic parity feature */
#define HAS_L3_DPF(dev) (INTEL_INFO(dev)->has_l3_dpf)
#define NUM_L3_SLICES(dev) (IS_HSW_GT3(dev) ? 2 : HAS_L3_DPF(dev))
#define HAS_L3_DPF(dev_priv) ((dev_priv)->info.has_l3_dpf)
#define NUM_L3_SLICES(dev_priv) (IS_HSW_GT3(dev_priv) ? \
2 : HAS_L3_DPF(dev_priv))
#define GT_FREQUENCY_MULTIPLIER 50
#define GEN9_FREQ_SCALER 3
@ -2883,6 +2902,11 @@ __i915_printk(struct drm_i915_private *dev_priv, const char *level,
extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
#endif
extern const struct dev_pm_ops i915_pm_ops;
extern int i915_driver_load(struct pci_dev *pdev,
const struct pci_device_id *ent);
extern void i915_driver_unload(struct drm_device *dev);
extern int intel_gpu_reset(struct drm_i915_private *dev_priv, u32 engine_mask);
extern bool intel_has_gpu_reset(struct drm_i915_private *dev_priv);
extern void i915_reset(struct drm_i915_private *dev_priv);
@ -2969,7 +2993,7 @@ int intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
static inline bool intel_gvt_active(struct drm_i915_private *dev_priv)
{
return dev_priv->gvt.initialized;
return dev_priv->gvt;
}
static inline bool intel_vgpu_active(struct drm_i915_private *dev_priv)
@ -3082,7 +3106,7 @@ void i915_gem_object_free(struct drm_i915_gem_object *obj);
void i915_gem_object_init(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_object_ops *ops);
struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev,
size_t size);
u64 size);
struct drm_i915_gem_object *i915_gem_object_create_from_data(
struct drm_device *dev, const void *data, size_t size);
void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file);
@ -3156,14 +3180,15 @@ i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
{
BUG_ON(obj->pages == NULL);
GEM_BUG_ON(obj->pages == NULL);
obj->pages_pin_count++;
}
static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
{
BUG_ON(obj->pages_pin_count == 0);
GEM_BUG_ON(obj->pages_pin_count == 0);
obj->pages_pin_count--;
GEM_BUG_ON(obj->pages_pin_count < obj->bind_count);
}
enum i915_map_type {
@ -3521,6 +3546,8 @@ static inline void intel_display_crc_init(struct drm_i915_private *dev_priv) {}
#endif
/* i915_gpu_error.c */
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
__printf(2, 3)
void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
int i915_error_state_to_str(struct drm_i915_error_state_buf *estr,
@ -3541,7 +3568,20 @@ void i915_error_state_get(struct drm_device *dev,
void i915_error_state_put(struct i915_error_state_file_priv *error_priv);
void i915_destroy_error_state(struct drm_device *dev);
void i915_get_extra_instdone(struct drm_i915_private *dev_priv, uint32_t *instdone);
#else
static inline void i915_capture_error_state(struct drm_i915_private *dev_priv,
u32 engine_mask,
const char *error_msg)
{
}
static inline void i915_destroy_error_state(struct drm_device *dev)
{
}
#endif
const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
/* i915_cmd_parser.c */
@ -3591,6 +3631,9 @@ bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum por
bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port);
bool intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv,
enum port port);
bool intel_bios_is_lspcon_present(struct drm_i915_private *dev_priv,
enum port port);
/* intel_opregion.c */
#ifdef CONFIG_ACPI
@ -3807,11 +3850,11 @@ __raw_write(64, q)
#define INTEL_BROADCAST_RGB_FULL 1
#define INTEL_BROADCAST_RGB_LIMITED 2
static inline i915_reg_t i915_vgacntrl_reg(struct drm_device *dev)
static inline i915_reg_t i915_vgacntrl_reg(struct drm_i915_private *dev_priv)
{
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
return VLV_VGACNTRL;
else if (INTEL_INFO(dev)->gen >= 5)
else if (INTEL_GEN(dev_priv) >= 5)
return CPU_VGACNTRL;
else
return VGACNTRL;

View File

@ -82,7 +82,7 @@ remove_mappable_node(struct drm_mm_node *node)
/* some bookkeeping */
static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
size_t size)
u64 size)
{
spin_lock(&dev_priv->mm.object_stat_lock);
dev_priv->mm.object_count++;
@ -91,7 +91,7 @@ static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
}
static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
size_t size)
u64 size)
{
spin_lock(&dev_priv->mm.object_stat_lock);
dev_priv->mm.object_count--;
@ -919,8 +919,7 @@ out_unpin:
if (node.allocated) {
wmb();
ggtt->base.clear_range(&ggtt->base,
node.start, node.size,
true);
node.start, node.size);
i915_gem_object_unpin_pages(obj);
remove_mappable_node(&node);
} else {
@ -1228,8 +1227,7 @@ out_unpin:
if (node.allocated) {
wmb();
ggtt->base.clear_range(&ggtt->base,
node.start, node.size,
true);
node.start, node.size);
i915_gem_object_unpin_pages(obj);
remove_mappable_node(&node);
} else {
@ -1813,8 +1811,7 @@ int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf)
view.params.partial.offset = rounddown(page_offset, chunk_size);
view.params.partial.size =
min_t(unsigned int, chunk_size,
(area->vm_end - area->vm_start) / PAGE_SIZE -
view.params.partial.offset);
vma_pages(area) - view.params.partial.offset);
/* If the partial covers the entire object, just create a
* normal VMA.
@ -2208,6 +2205,15 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
return 0;
}
static unsigned int swiotlb_max_size(void)
{
#if IS_ENABLED(CONFIG_SWIOTLB)
return rounddown(swiotlb_nr_tbl() << IO_TLB_SHIFT, PAGE_SIZE);
#else
return 0;
#endif
}
static int
i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
{
@ -2219,6 +2225,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
struct sgt_iter sgt_iter;
struct page *page;
unsigned long last_pfn = 0; /* suppress gcc warning */
unsigned int max_segment;
int ret;
gfp_t gfp;
@ -2229,6 +2236,10 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
max_segment = swiotlb_max_size();
if (!max_segment)
max_segment = rounddown(UINT_MAX, PAGE_SIZE);
st = kmalloc(sizeof(*st), GFP_KERNEL);
if (st == NULL)
return -ENOMEM;
@ -2264,22 +2275,15 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
* our own buffer, now let the real VM do its job and
* go down in flames if truly OOM.
*/
i915_gem_shrink_all(dev_priv);
page = shmem_read_mapping_page(mapping, i);
if (IS_ERR(page)) {
ret = PTR_ERR(page);
goto err_pages;
}
}
#ifdef CONFIG_SWIOTLB
if (swiotlb_nr_tbl()) {
st->nents++;
sg_set_page(sg, page, PAGE_SIZE, 0);
sg = sg_next(sg);
continue;
}
#endif
if (!i || page_to_pfn(page) != last_pfn + 1) {
if (!i ||
sg->length >= max_segment ||
page_to_pfn(page) != last_pfn + 1) {
if (i)
sg = sg_next(sg);
st->nents++;
@ -2292,9 +2296,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
/* Check that the i965g/gm workaround works. */
WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
}
#ifdef CONFIG_SWIOTLB
if (!swiotlb_nr_tbl())
#endif
if (sg) /* loop terminated early; short sg table */
sg_mark_end(sg);
obj->pages = st;
@ -2581,8 +2583,6 @@ static void i915_gem_reset_engine(struct intel_engine_cs *engine)
struct i915_gem_context *incomplete_ctx;
bool ring_hung;
/* Ensure irq handler finishes, and not run again. */
tasklet_kill(&engine->irq_tasklet);
if (engine->irq_seqno_barrier)
engine->irq_seqno_barrier(engine);
@ -2591,6 +2591,9 @@ static void i915_gem_reset_engine(struct intel_engine_cs *engine)
return;
ring_hung = engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
if (engine->hangcheck.seqno != intel_engine_get_seqno(engine))
ring_hung = false;
i915_set_reset_status(request->ctx, ring_hung);
if (!ring_hung)
return;
@ -2621,10 +2624,11 @@ static void i915_gem_reset_engine(struct intel_engine_cs *engine)
void i915_gem_reset(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
i915_gem_retire_requests(dev_priv);
for_each_engine(engine, dev_priv)
for_each_engine(engine, dev_priv, id)
i915_gem_reset_engine(engine);
i915_gem_restore_fences(&dev_priv->drm);
@ -2672,12 +2676,13 @@ static void i915_gem_cleanup_engine(struct intel_engine_cs *engine)
void i915_gem_set_wedged(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
set_bit(I915_WEDGED, &dev_priv->gpu_error.flags);
i915_gem_context_lost(dev_priv);
for_each_engine(engine, dev_priv)
for_each_engine(engine, dev_priv, id)
i915_gem_cleanup_engine(engine);
mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0);
@ -2716,6 +2721,7 @@ i915_gem_idle_work_handler(struct work_struct *work)
container_of(work, typeof(*dev_priv), gt.idle_work.work);
struct drm_device *dev = &dev_priv->drm;
struct intel_engine_cs *engine;
enum intel_engine_id id;
bool rearm_hangcheck;
if (!READ_ONCE(dev_priv->gt.awake))
@ -2738,7 +2744,7 @@ i915_gem_idle_work_handler(struct work_struct *work)
if (dev_priv->gt.active_engines)
goto out_unlock;
for_each_engine(engine, dev_priv)
for_each_engine(engine, dev_priv, id)
i915_gem_batch_pool_fini(&engine->batch_pool);
GEM_BUG_ON(!dev_priv->gt.awake);
@ -2931,9 +2937,10 @@ int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
unsigned int flags)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
int ret;
for_each_engine(engine, dev_priv) {
for_each_engine(engine, dev_priv, id) {
if (engine->last_context == NULL)
continue;
@ -3095,6 +3102,9 @@ search_free:
goto err_unpin;
}
GEM_BUG_ON(vma->node.start < start);
GEM_BUG_ON(vma->node.start + vma->node.size > end);
}
GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level));
@ -3173,7 +3183,7 @@ i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
*/
wmb();
if (INTEL_GEN(dev_priv) >= 6 && !HAS_LLC(dev_priv))
POSTING_READ(RING_ACTHD(dev_priv->engine[RCS].mmio_base));
POSTING_READ(RING_ACTHD(dev_priv->engine[RCS]->mmio_base));
intel_fb_obj_flush(obj, false, write_origin(obj, I915_GEM_DOMAIN_GTT));
@ -3468,7 +3478,7 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
level = I915_CACHE_LLC;
break;
case I915_CACHING_DISPLAY:
level = HAS_WT(dev) ? I915_CACHE_WT : I915_CACHE_NONE;
level = HAS_WT(dev_priv) ? I915_CACHE_WT : I915_CACHE_NONE;
break;
default:
return -EINVAL;
@ -3526,7 +3536,8 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
* with that bit in the PTE to main memory with just one PIPE_CONTROL.
*/
ret = i915_gem_object_set_cache_level(obj,
HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
HAS_WT(to_i915(obj->base.dev)) ?
I915_CACHE_WT : I915_CACHE_NONE);
if (ret) {
vma = ERR_PTR(ret);
goto err_unpin_display;
@ -3793,7 +3804,8 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
u64 alignment,
u64 flags)
{
struct i915_address_space *vm = &to_i915(obj->base.dev)->ggtt.base;
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct i915_address_space *vm = &dev_priv->ggtt.base;
struct i915_vma *vma;
int ret;
@ -3806,6 +3818,41 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
(i915_vma_is_pinned(vma) || i915_vma_is_active(vma)))
return ERR_PTR(-ENOSPC);
if (flags & PIN_MAPPABLE) {
u32 fence_size;
fence_size = i915_gem_get_ggtt_size(dev_priv, vma->size,
i915_gem_object_get_tiling(obj));
/* If the required space is larger than the available
* aperture, we will not able to find a slot for the
* object and unbinding the object now will be in
* vain. Worse, doing so may cause us to ping-pong
* the object in and out of the Global GTT and
* waste a lot of cycles under the mutex.
*/
if (fence_size > dev_priv->ggtt.mappable_end)
return ERR_PTR(-E2BIG);
/* If NONBLOCK is set the caller is optimistically
* trying to cache the full object within the mappable
* aperture, and *must* have a fallback in place for
* situations where we cannot bind the object. We
* can be a little more lax here and use the fallback
* more often to avoid costly migrations of ourselves
* and other objects within the aperture.
*
* Half-the-aperture is used as a simple heuristic.
* More interesting would to do search for a free
* block prior to making the commitment to unbind.
* That caters for the self-harm case, and with a
* little more heuristics (e.g. NOFAULT, NOEVICT)
* we could try to minimise harm to others.
*/
if (flags & PIN_NONBLOCK &&
fence_size > dev_priv->ggtt.mappable_end / 2)
return ERR_PTR(-ENOSPC);
}
WARN(i915_vma_is_pinned(vma),
"bo is already pinned in ggtt with incorrect alignment:"
" offset=%08x, req.alignment=%llx,"
@ -4084,14 +4131,29 @@ static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
.put_pages = i915_gem_object_put_pages_gtt,
};
struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev,
size_t size)
/* Note we don't consider signbits :| */
#define overflows_type(x, T) \
(sizeof(x) > sizeof(T) && (x) >> (sizeof(T) * BITS_PER_BYTE))
struct drm_i915_gem_object *
i915_gem_object_create(struct drm_device *dev, u64 size)
{
struct drm_i915_gem_object *obj;
struct address_space *mapping;
gfp_t mask;
int ret;
/* There is a prevalence of the assumption that we fit the object's
* page count inside a 32bit _signed_ variable. Let's document this and
* catch if we ever need to fix it. In the meantime, if you do spot
* such a local variable, please consider fixing!
*/
if (WARN_ON(size >> PAGE_SHIFT > INT_MAX))
return ERR_PTR(-E2BIG);
if (overflows_type(size, obj->base.size))
return ERR_PTR(-E2BIG);
obj = i915_gem_object_alloc(dev);
if (obj == NULL)
return ERR_PTR(-ENOMEM);
@ -4268,6 +4330,30 @@ int i915_gem_suspend(struct drm_device *dev)
*/
WARN_ON(dev_priv->gt.awake);
/*
* Neither the BIOS, ourselves or any other kernel
* expects the system to be in execlists mode on startup,
* so we need to reset the GPU back to legacy mode. And the only
* known way to disable logical contexts is through a GPU reset.
*
* So in order to leave the system in a known default configuration,
* always reset the GPU upon unload and suspend. Afterwards we then
* clean up the GEM state tracking, flushing off the requests and
* leaving the system in a known idle state.
*
* Note that is of the upmost importance that the GPU is idle and
* all stray writes are flushed *before* we dismantle the backing
* storage for the pinned objects.
*
* However, since we are uncertain that resetting the GPU on older
* machines is a good idea, we don't - just in case it leaves the
* machine in an unusable condition.
*/
if (HAS_HW_CONTEXTS(dev)) {
int reset = intel_gpu_reset(dev_priv, ALL_ENGINES);
WARN_ON(reset && reset != -ENODEV);
}
return 0;
err:
@ -4302,44 +4388,42 @@ void i915_gem_init_swizzling(struct drm_device *dev)
I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
DISP_TILE_SURFACE_SWIZZLING);
if (IS_GEN5(dev))
if (IS_GEN5(dev_priv))
return;
I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
if (IS_GEN6(dev))
if (IS_GEN6(dev_priv))
I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
else if (IS_GEN7(dev))
else if (IS_GEN7(dev_priv))
I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
else if (IS_GEN8(dev))
else if (IS_GEN8(dev_priv))
I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
else
BUG();
}
static void init_unused_ring(struct drm_device *dev, u32 base)
static void init_unused_ring(struct drm_i915_private *dev_priv, u32 base)
{
struct drm_i915_private *dev_priv = to_i915(dev);
I915_WRITE(RING_CTL(base), 0);
I915_WRITE(RING_HEAD(base), 0);
I915_WRITE(RING_TAIL(base), 0);
I915_WRITE(RING_START(base), 0);
}
static void init_unused_rings(struct drm_device *dev)
static void init_unused_rings(struct drm_i915_private *dev_priv)
{
if (IS_I830(dev)) {
init_unused_ring(dev, PRB1_BASE);
init_unused_ring(dev, SRB0_BASE);
init_unused_ring(dev, SRB1_BASE);
init_unused_ring(dev, SRB2_BASE);
init_unused_ring(dev, SRB3_BASE);
} else if (IS_GEN2(dev)) {
init_unused_ring(dev, SRB0_BASE);
init_unused_ring(dev, SRB1_BASE);
} else if (IS_GEN3(dev)) {
init_unused_ring(dev, PRB1_BASE);
init_unused_ring(dev, PRB2_BASE);
if (IS_I830(dev_priv)) {
init_unused_ring(dev_priv, PRB1_BASE);
init_unused_ring(dev_priv, SRB0_BASE);
init_unused_ring(dev_priv, SRB1_BASE);
init_unused_ring(dev_priv, SRB2_BASE);
init_unused_ring(dev_priv, SRB3_BASE);
} else if (IS_GEN2(dev_priv)) {
init_unused_ring(dev_priv, SRB0_BASE);
init_unused_ring(dev_priv, SRB1_BASE);
} else if (IS_GEN3(dev_priv)) {
init_unused_ring(dev_priv, PRB1_BASE);
init_unused_ring(dev_priv, PRB2_BASE);
}
}
@ -4348,6 +4432,7 @@ i915_gem_init_hw(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_engine_cs *engine;
enum intel_engine_id id;
int ret;
/* Double layer security blanket, see i915_gem_init() */
@ -4356,12 +4441,12 @@ i915_gem_init_hw(struct drm_device *dev)
if (HAS_EDRAM(dev) && INTEL_GEN(dev_priv) < 9)
I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
if (IS_HASWELL(dev))
I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev) ?
if (IS_HASWELL(dev_priv))
I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev_priv) ?
LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
if (HAS_PCH_NOP(dev)) {
if (IS_IVYBRIDGE(dev)) {
if (HAS_PCH_NOP(dev_priv)) {
if (IS_IVYBRIDGE(dev_priv)) {
u32 temp = I915_READ(GEN7_MSG_CTL);
temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
I915_WRITE(GEN7_MSG_CTL, temp);
@ -4380,7 +4465,7 @@ i915_gem_init_hw(struct drm_device *dev)
* will prevent c3 entry. Makes sure all unused rings
* are totally idle.
*/
init_unused_rings(dev);
init_unused_rings(dev_priv);
BUG_ON(!dev_priv->kernel_context);
@ -4391,7 +4476,7 @@ i915_gem_init_hw(struct drm_device *dev)
}
/* Need to do basic initialisation of all rings first: */
for_each_engine(engine, dev_priv) {
for_each_engine(engine, dev_priv, id) {
ret = engine->init_hw(engine);
if (ret)
goto out;
@ -4490,17 +4575,12 @@ i915_gem_cleanup_engines(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_engine_cs *engine;
enum intel_engine_id id;
for_each_engine(engine, dev_priv)
for_each_engine(engine, dev_priv, id)
dev_priv->gt.cleanup_engine(engine);
}
static void
init_engine_lists(struct intel_engine_cs *engine)
{
INIT_LIST_HEAD(&engine->request_list);
}
void
i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
{
@ -4537,7 +4617,6 @@ void
i915_gem_load_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
int i;
dev_priv->objects =
kmem_cache_create("i915_gem_object",
@ -4561,8 +4640,6 @@ i915_gem_load_init(struct drm_device *dev)
INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
INIT_LIST_HEAD(&dev_priv->mm.bound_list);
INIT_LIST_HEAD(&dev_priv->mm.fence_list);
for (i = 0; i < I915_NUM_ENGINES; i++)
init_engine_lists(&dev_priv->engine[i]);
INIT_DELAYED_WORK(&dev_priv->gt.retire_work,
i915_gem_retire_work_handler);
INIT_DELAYED_WORK(&dev_priv->gt.idle_work,

View File

@ -192,7 +192,7 @@ i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
* This is only applicable for Ivy Bridge devices since
* later platforms don't have L3 control bits in the PTE.
*/
if (IS_IVYBRIDGE(dev)) {
if (IS_IVYBRIDGE(to_i915(dev))) {
ret = i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC);
/* Failure shouldn't ever happen this early */
if (WARN_ON(ret)) {
@ -474,10 +474,11 @@ int i915_gem_context_init(struct drm_device *dev)
void i915_gem_context_lost(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
for_each_engine(engine, dev_priv) {
for_each_engine(engine, dev_priv, id) {
if (engine->last_context) {
i915_gem_context_unpin(engine->last_context, engine);
engine->last_context = NULL;
@ -492,13 +493,13 @@ void i915_gem_context_lost(struct drm_i915_private *dev_priv)
if (!i915_gem_context_is_default(ctx))
continue;
for_each_engine(engine, dev_priv)
for_each_engine(engine, dev_priv, id)
ctx->engine[engine->id].initialised = false;
ctx->remap_slice = ALL_L3_SLICES(dev_priv);
}
for_each_engine(engine, dev_priv) {
for_each_engine(engine, dev_priv, id) {
struct intel_context *kce =
&dev_priv->kernel_context->engine[engine->id];
@ -563,6 +564,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
struct drm_i915_private *dev_priv = req->i915;
struct intel_ring *ring = req->ring;
struct intel_engine_cs *engine = req->engine;
enum intel_engine_id id;
u32 flags = hw_flags | MI_MM_SPACE_GTT;
const int num_rings =
/* Use an extended w/a on ivb+ if signalling from other rings */
@ -605,7 +607,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
intel_ring_emit(ring,
MI_LOAD_REGISTER_IMM(num_rings));
for_each_engine(signaller, dev_priv) {
for_each_engine(signaller, dev_priv, id) {
if (signaller == engine)
continue;
@ -634,7 +636,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
intel_ring_emit(ring,
MI_LOAD_REGISTER_IMM(num_rings));
for_each_engine(signaller, dev_priv) {
for_each_engine(signaller, dev_priv, id) {
if (signaller == engine)
continue;
@ -929,8 +931,9 @@ int i915_switch_context(struct drm_i915_gem_request *req)
int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
for_each_engine(engine, dev_priv) {
for_each_engine(engine, dev_priv, id) {
struct drm_i915_gem_request *req;
int ret;

View File

@ -37,8 +37,9 @@ static bool
gpu_is_idle(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
for_each_engine(engine, dev_priv) {
for_each_engine(engine, dev_priv, id) {
if (intel_engine_is_active(engine))
return false;
}

View File

@ -370,8 +370,7 @@ static void reloc_cache_fini(struct reloc_cache *cache)
ggtt->base.clear_range(&ggtt->base,
cache->node.start,
cache->node.size,
true);
cache->node.size);
drm_mm_remove_node(&cache->node);
} else {
i915_vma_unpin((struct i915_vma *)cache->node.mm);
@ -429,7 +428,7 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
}
if (cache->vaddr) {
io_mapping_unmap_atomic(unmask_page(cache->vaddr));
io_mapping_unmap_atomic((void __force __iomem *) unmask_page(cache->vaddr));
} else {
struct i915_vma *vma;
int ret;
@ -474,7 +473,7 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
offset += page << PAGE_SHIFT;
}
vaddr = io_mapping_map_atomic_wc(&cache->i915->ggtt.mappable, offset);
vaddr = (void __force *) io_mapping_map_atomic_wc(&cache->i915->ggtt.mappable, offset);
cache->page = page;
cache->vaddr = (unsigned long)vaddr;
@ -552,27 +551,13 @@ repeat:
return 0;
}
static bool object_is_idle(struct drm_i915_gem_object *obj)
{
unsigned long active = i915_gem_object_get_active(obj);
int idx;
for_each_active(active, idx) {
if (!i915_gem_active_is_idle(&obj->last_read[idx],
&obj->base.dev->struct_mutex))
return false;
}
return true;
}
static int
i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
struct eb_vmas *eb,
struct drm_i915_gem_relocation_entry *reloc,
struct reloc_cache *cache)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct drm_gem_object *target_obj;
struct drm_i915_gem_object *target_i915_obj;
struct i915_vma *target_vma;
@ -591,7 +576,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
/* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
* pipe_control writes because the gpu doesn't properly redirect them
* through the ppgtt for non_secure batchbuffers. */
if (unlikely(IS_GEN6(dev) &&
if (unlikely(IS_GEN6(dev_priv) &&
reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION)) {
ret = i915_vma_bind(target_vma, target_i915_obj->cache_level,
PIN_GLOBAL);
@ -649,10 +634,6 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
return -EINVAL;
}
/* We can't wait for rendering with pagefaults disabled */
if (pagefault_disabled() && !object_is_idle(obj))
return -EFAULT;
ret = relocate_entry(obj, reloc, cache, target_offset);
if (ret)
return ret;
@ -679,12 +660,23 @@ i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
remain = entry->relocation_count;
while (remain) {
struct drm_i915_gem_relocation_entry *r = stack_reloc;
int count = remain;
if (count > ARRAY_SIZE(stack_reloc))
count = ARRAY_SIZE(stack_reloc);
unsigned long unwritten;
unsigned int count;
count = min_t(unsigned int, remain, ARRAY_SIZE(stack_reloc));
remain -= count;
if (__copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0]))) {
/* This is the fast path and we cannot handle a pagefault
* whilst holding the struct mutex lest the user pass in the
* relocations contained within a mmaped bo. For in such a case
* we, the page fault handler would call i915_gem_fault() and
* we would try to acquire the struct mutex again. Obviously
* this is bad and so lockdep complains vehemently.
*/
pagefault_disable();
unwritten = __copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0]));
pagefault_enable();
if (unlikely(unwritten)) {
ret = -EFAULT;
goto out;
}
@ -696,11 +688,26 @@ i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
if (ret)
goto out;
if (r->presumed_offset != offset &&
__put_user(r->presumed_offset,
&user_relocs->presumed_offset)) {
ret = -EFAULT;
goto out;
if (r->presumed_offset != offset) {
pagefault_disable();
unwritten = __put_user(r->presumed_offset,
&user_relocs->presumed_offset);
pagefault_enable();
if (unlikely(unwritten)) {
/* Note that reporting an error now
* leaves everything in an inconsistent
* state as we have *already* changed
* the relocation value inside the
* object. As we have not changed the
* reloc.presumed_offset or will not
* change the execobject.offset, on the
* call we may not rewrite the value
* inside the object, leaving it
* dangling and causing a GPU hang.
*/
ret = -EFAULT;
goto out;
}
}
user_relocs++;
@ -740,20 +747,11 @@ i915_gem_execbuffer_relocate(struct eb_vmas *eb)
struct i915_vma *vma;
int ret = 0;
/* This is the fast path and we cannot handle a pagefault whilst
* holding the struct mutex lest the user pass in the relocations
* contained within a mmaped bo. For in such a case we, the page
* fault handler would call i915_gem_fault() and we would try to
* acquire the struct mutex again. Obviously this is bad and so
* lockdep complains vehemently.
*/
pagefault_disable();
list_for_each_entry(vma, &eb->vmas, exec_list) {
ret = i915_gem_execbuffer_relocate_vma(vma, eb);
if (ret)
break;
}
pagefault_enable();
return ret;
}
@ -1599,12 +1597,12 @@ eb_select_engine(struct drm_i915_private *dev_priv,
return NULL;
}
engine = &dev_priv->engine[_VCS(bsd_idx)];
engine = dev_priv->engine[_VCS(bsd_idx)];
} else {
engine = &dev_priv->engine[user_ring_map[user_ring_id]];
engine = dev_priv->engine[user_ring_map[user_ring_id]];
}
if (!intel_engine_initialized(engine)) {
if (!engine) {
DRM_DEBUG("execbuf with invalid ring: %u\n", user_ring_id);
return NULL;
}

View File

@ -290,6 +290,8 @@ i915_vma_put_fence(struct i915_vma *vma)
{
struct drm_i915_fence_reg *fence = vma->fence;
assert_rpm_wakelock_held(to_i915(vma->vm->dev));
if (!fence)
return 0;
@ -341,6 +343,8 @@ i915_vma_get_fence(struct i915_vma *vma)
struct drm_i915_fence_reg *fence;
struct i915_vma *set = i915_gem_object_is_tiled(vma->obj) ? vma : NULL;
assert_rpm_wakelock_held(to_i915(vma->vm->dev));
/* Just update our place in the LRU if our fence is getting reused. */
if (vma->fence) {
fence = vma->fence;
@ -371,6 +375,12 @@ void i915_gem_restore_fences(struct drm_device *dev)
struct drm_i915_private *dev_priv = to_i915(dev);
int i;
/* Note that this may be called outside of struct_mutex, by
* runtime suspend/resume. The barrier we require is enforced by
* rpm itself - all access to fences/GTT are only within an rpm
* wakeref, and to acquire that wakeref you must pass through here.
*/
for (i = 0; i < dev_priv->num_fence_regs; i++) {
struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
struct i915_vma *vma = reg->vma;
@ -379,10 +389,17 @@ void i915_gem_restore_fences(struct drm_device *dev)
* Commit delayed tiling changes if we have an object still
* attached to the fence, otherwise just clear the fence.
*/
if (vma && !i915_gem_object_is_tiled(vma->obj))
vma = NULL;
if (vma && !i915_gem_object_is_tiled(vma->obj)) {
GEM_BUG_ON(!reg->dirty);
GEM_BUG_ON(vma->obj->fault_mappable);
fence_update(reg, vma);
list_move(&reg->link, &dev_priv->mm.fence_list);
vma->fence = NULL;
vma = NULL;
}
fence_write(reg, vma);
reg->vma = vma;
}
}
@ -448,7 +465,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
if (INTEL_INFO(dev)->gen >= 8 || IS_VALLEYVIEW(dev)) {
if (INTEL_GEN(dev_priv) >= 8 || IS_VALLEYVIEW(dev_priv)) {
/*
* On BDW+, swizzling is not used. We leave the CPU memory
* controller in charge of optimizing memory accesses without
@ -487,19 +504,20 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
}
}
} else if (IS_GEN5(dev)) {
} else if (IS_GEN5(dev_priv)) {
/* On Ironlake whatever DRAM config, GPU always do
* same swizzling setup.
*/
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
swizzle_y = I915_BIT_6_SWIZZLE_9;
} else if (IS_GEN2(dev)) {
} else if (IS_GEN2(dev_priv)) {
/* As far as we know, the 865 doesn't have these bit 6
* swizzling issues.
*/
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
} else if (IS_MOBILE(dev) || (IS_GEN3(dev) && !IS_G33(dev))) {
} else if (IS_MOBILE(dev_priv) || (IS_GEN3(dev_priv) &&
!IS_G33(dev_priv))) {
uint32_t dcc;
/* On 9xx chipsets, channel interleave by the CPU is
@ -537,7 +555,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
}
/* check for L-shaped memory aka modified enhanced addressing */
if (IS_GEN4(dev) &&
if (IS_GEN4(dev_priv) &&
!(I915_READ(DCC2) & DCC2_MODIFIED_ENHANCED_DISABLE)) {
swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;

View File

@ -191,15 +191,13 @@ static void ppgtt_unbind_vma(struct i915_vma *vma)
{
vma->vm->clear_range(vma->vm,
vma->node.start,
vma->size,
true);
vma->size);
}
static gen8_pte_t gen8_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
bool valid)
enum i915_cache_level level)
{
gen8_pte_t pte = valid ? _PAGE_PRESENT | _PAGE_RW : 0;
gen8_pte_t pte = _PAGE_PRESENT | _PAGE_RW;
pte |= addr;
switch (level) {
@ -234,9 +232,9 @@ static gen8_pde_t gen8_pde_encode(const dma_addr_t addr,
static gen6_pte_t snb_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
bool valid, u32 unused)
u32 unused)
{
gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
gen6_pte_t pte = GEN6_PTE_VALID;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
switch (level) {
@ -256,9 +254,9 @@ static gen6_pte_t snb_pte_encode(dma_addr_t addr,
static gen6_pte_t ivb_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
bool valid, u32 unused)
u32 unused)
{
gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
gen6_pte_t pte = GEN6_PTE_VALID;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
switch (level) {
@ -280,9 +278,9 @@ static gen6_pte_t ivb_pte_encode(dma_addr_t addr,
static gen6_pte_t byt_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
bool valid, u32 flags)
u32 flags)
{
gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
gen6_pte_t pte = GEN6_PTE_VALID;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
if (!(flags & PTE_READ_ONLY))
@ -296,9 +294,9 @@ static gen6_pte_t byt_pte_encode(dma_addr_t addr,
static gen6_pte_t hsw_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
bool valid, u32 unused)
u32 unused)
{
gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
gen6_pte_t pte = GEN6_PTE_VALID;
pte |= HSW_PTE_ADDR_ENCODE(addr);
if (level != I915_CACHE_NONE)
@ -309,9 +307,9 @@ static gen6_pte_t hsw_pte_encode(dma_addr_t addr,
static gen6_pte_t iris_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
bool valid, u32 unused)
u32 unused)
{
gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
gen6_pte_t pte = GEN6_PTE_VALID;
pte |= HSW_PTE_ADDR_ENCODE(addr);
switch (level) {
@ -373,27 +371,29 @@ static void *kmap_page_dma(struct i915_page_dma *p)
/* We use the flushing unmap only with ppgtt structures:
* page directories, page tables and scratch pages.
*/
static void kunmap_page_dma(struct drm_device *dev, void *vaddr)
static void kunmap_page_dma(struct drm_i915_private *dev_priv, void *vaddr)
{
/* There are only few exceptions for gen >=6. chv and bxt.
* And we are not sure about the latter so play safe for now.
*/
if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev))
if (IS_CHERRYVIEW(dev_priv) || IS_BROXTON(dev_priv))
drm_clflush_virt_range(vaddr, PAGE_SIZE);
kunmap_atomic(vaddr);
}
#define kmap_px(px) kmap_page_dma(px_base(px))
#define kunmap_px(ppgtt, vaddr) kunmap_page_dma((ppgtt)->base.dev, (vaddr))
#define kunmap_px(ppgtt, vaddr) \
kunmap_page_dma(to_i915((ppgtt)->base.dev), (vaddr))
#define setup_px(dev, px) setup_page_dma((dev), px_base(px))
#define cleanup_px(dev, px) cleanup_page_dma((dev), px_base(px))
#define fill_px(dev, px, v) fill_page_dma((dev), px_base(px), (v))
#define fill32_px(dev, px, v) fill_page_dma_32((dev), px_base(px), (v))
#define fill_px(dev_priv, px, v) fill_page_dma((dev_priv), px_base(px), (v))
#define fill32_px(dev_priv, px, v) \
fill_page_dma_32((dev_priv), px_base(px), (v))
static void fill_page_dma(struct drm_device *dev, struct i915_page_dma *p,
const uint64_t val)
static void fill_page_dma(struct drm_i915_private *dev_priv,
struct i915_page_dma *p, const uint64_t val)
{
int i;
uint64_t * const vaddr = kmap_page_dma(p);
@ -401,17 +401,17 @@ static void fill_page_dma(struct drm_device *dev, struct i915_page_dma *p,
for (i = 0; i < 512; i++)
vaddr[i] = val;
kunmap_page_dma(dev, vaddr);
kunmap_page_dma(dev_priv, vaddr);
}
static void fill_page_dma_32(struct drm_device *dev, struct i915_page_dma *p,
const uint32_t val32)
static void fill_page_dma_32(struct drm_i915_private *dev_priv,
struct i915_page_dma *p, const uint32_t val32)
{
uint64_t v = val32;
v = v << 32 | val32;
fill_page_dma(dev, p, v);
fill_page_dma(dev_priv, p, v);
}
static int
@ -472,9 +472,9 @@ static void gen8_initialize_pt(struct i915_address_space *vm,
gen8_pte_t scratch_pte;
scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
I915_CACHE_LLC, true);
I915_CACHE_LLC);
fill_px(vm->dev, pt, scratch_pte);
fill_px(to_i915(vm->dev), pt, scratch_pte);
}
static void gen6_initialize_pt(struct i915_address_space *vm,
@ -485,9 +485,9 @@ static void gen6_initialize_pt(struct i915_address_space *vm,
WARN_ON(vm->scratch_page.daddr == 0);
scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
I915_CACHE_LLC, true, 0);
I915_CACHE_LLC, 0);
fill32_px(vm->dev, pt, scratch_pte);
fill32_px(to_i915(vm->dev), pt, scratch_pte);
}
static struct i915_page_directory *alloc_pd(struct drm_device *dev)
@ -534,7 +534,7 @@ static void gen8_initialize_pd(struct i915_address_space *vm,
scratch_pde = gen8_pde_encode(px_dma(vm->scratch_pt), I915_CACHE_LLC);
fill_px(vm->dev, pd, scratch_pde);
fill_px(to_i915(vm->dev), pd, scratch_pde);
}
static int __pdp_init(struct drm_device *dev,
@ -615,7 +615,7 @@ static void gen8_initialize_pdp(struct i915_address_space *vm,
scratch_pdpe = gen8_pdpe_encode(px_dma(vm->scratch_pd), I915_CACHE_LLC);
fill_px(vm->dev, pdp, scratch_pdpe);
fill_px(to_i915(vm->dev), pdp, scratch_pdpe);
}
static void gen8_initialize_pml4(struct i915_address_space *vm,
@ -626,7 +626,7 @@ static void gen8_initialize_pml4(struct i915_address_space *vm,
scratch_pml4e = gen8_pml4e_encode(px_dma(vm->scratch_pdp),
I915_CACHE_LLC);
fill_px(vm->dev, pml4, scratch_pml4e);
fill_px(to_i915(vm->dev), pml4, scratch_pml4e);
}
static void
@ -706,83 +706,155 @@ static int gen8_48b_mm_switch(struct i915_hw_ppgtt *ppgtt,
return gen8_write_pdp(req, 0, px_dma(&ppgtt->pml4));
}
static void gen8_ppgtt_clear_pte_range(struct i915_address_space *vm,
struct i915_page_directory_pointer *pdp,
uint64_t start,
uint64_t length,
gen8_pte_t scratch_pte)
/* Removes entries from a single page table, releasing it if it's empty.
* Caller can use the return value to update higher-level entries.
*/
static bool gen8_ppgtt_clear_pt(struct i915_address_space *vm,
struct i915_page_table *pt,
uint64_t start,
uint64_t length)
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
unsigned int pte_start = gen8_pte_index(start);
unsigned int num_entries = gen8_pte_count(start, length);
uint64_t pte;
gen8_pte_t *pt_vaddr;
unsigned pdpe = gen8_pdpe_index(start);
unsigned pde = gen8_pde_index(start);
unsigned pte = gen8_pte_index(start);
unsigned num_entries = length >> PAGE_SHIFT;
unsigned last_pte, i;
gen8_pte_t scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
I915_CACHE_LLC);
if (WARN_ON(!pdp))
return;
if (WARN_ON(!px_page(pt)))
return false;
while (num_entries) {
struct i915_page_directory *pd;
struct i915_page_table *pt;
bitmap_clear(pt->used_ptes, pte_start, num_entries);
if (WARN_ON(!pdp->page_directory[pdpe]))
break;
if (bitmap_empty(pt->used_ptes, GEN8_PTES)) {
free_pt(vm->dev, pt);
return true;
}
pd = pdp->page_directory[pdpe];
pt_vaddr = kmap_px(pt);
for (pte = pte_start; pte < num_entries; pte++)
pt_vaddr[pte] = scratch_pte;
kunmap_px(ppgtt, pt_vaddr);
return false;
}
/* Removes entries from a single page dir, releasing it if it's empty.
* Caller can use the return value to update higher-level entries
*/
static bool gen8_ppgtt_clear_pd(struct i915_address_space *vm,
struct i915_page_directory *pd,
uint64_t start,
uint64_t length)
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
struct i915_page_table *pt;
uint64_t pde;
gen8_pde_t *pde_vaddr;
gen8_pde_t scratch_pde = gen8_pde_encode(px_dma(vm->scratch_pt),
I915_CACHE_LLC);
gen8_for_each_pde(pt, pd, start, length, pde) {
if (WARN_ON(!pd->page_table[pde]))
break;
pt = pd->page_table[pde];
if (gen8_ppgtt_clear_pt(vm, pt, start, length)) {
__clear_bit(pde, pd->used_pdes);
pde_vaddr = kmap_px(pd);
pde_vaddr[pde] = scratch_pde;
kunmap_px(ppgtt, pde_vaddr);
}
}
if (WARN_ON(!px_page(pt)))
if (bitmap_empty(pd->used_pdes, I915_PDES)) {
free_pd(vm->dev, pd);
return true;
}
return false;
}
/* Removes entries from a single page dir pointer, releasing it if it's empty.
* Caller can use the return value to update higher-level entries
*/
static bool gen8_ppgtt_clear_pdp(struct i915_address_space *vm,
struct i915_page_directory_pointer *pdp,
uint64_t start,
uint64_t length)
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
struct i915_page_directory *pd;
uint64_t pdpe;
gen8_ppgtt_pdpe_t *pdpe_vaddr;
gen8_ppgtt_pdpe_t scratch_pdpe =
gen8_pdpe_encode(px_dma(vm->scratch_pd), I915_CACHE_LLC);
gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
if (WARN_ON(!pdp->page_directory[pdpe]))
break;
last_pte = pte + num_entries;
if (last_pte > GEN8_PTES)
last_pte = GEN8_PTES;
pt_vaddr = kmap_px(pt);
for (i = pte; i < last_pte; i++) {
pt_vaddr[i] = scratch_pte;
num_entries--;
if (gen8_ppgtt_clear_pd(vm, pd, start, length)) {
__clear_bit(pdpe, pdp->used_pdpes);
if (USES_FULL_48BIT_PPGTT(vm->dev)) {
pdpe_vaddr = kmap_px(pdp);
pdpe_vaddr[pdpe] = scratch_pdpe;
kunmap_px(ppgtt, pdpe_vaddr);
}
}
}
kunmap_px(ppgtt, pt_vaddr);
if (USES_FULL_48BIT_PPGTT(vm->dev) &&
bitmap_empty(pdp->used_pdpes, I915_PDPES_PER_PDP(vm->dev))) {
free_pdp(vm->dev, pdp);
return true;
}
pte = 0;
if (++pde == I915_PDES) {
if (++pdpe == I915_PDPES_PER_PDP(vm->dev))
break;
pde = 0;
return false;
}
/* Removes entries from a single pml4.
* This is the top-level structure in 4-level page tables used on gen8+.
* Empty entries are always scratch pml4e.
*/
static void gen8_ppgtt_clear_pml4(struct i915_address_space *vm,
struct i915_pml4 *pml4,
uint64_t start,
uint64_t length)
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
struct i915_page_directory_pointer *pdp;
uint64_t pml4e;
gen8_ppgtt_pml4e_t *pml4e_vaddr;
gen8_ppgtt_pml4e_t scratch_pml4e =
gen8_pml4e_encode(px_dma(vm->scratch_pdp), I915_CACHE_LLC);
GEM_BUG_ON(!USES_FULL_48BIT_PPGTT(vm->dev));
gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
if (WARN_ON(!pml4->pdps[pml4e]))
break;
if (gen8_ppgtt_clear_pdp(vm, pdp, start, length)) {
__clear_bit(pml4e, pml4->used_pml4es);
pml4e_vaddr = kmap_px(pml4);
pml4e_vaddr[pml4e] = scratch_pml4e;
kunmap_px(ppgtt, pml4e_vaddr);
}
}
}
static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
uint64_t start,
uint64_t length,
bool use_scratch)
uint64_t start, uint64_t length)
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
gen8_pte_t scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
I915_CACHE_LLC, use_scratch);
if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
gen8_ppgtt_clear_pte_range(vm, &ppgtt->pdp, start, length,
scratch_pte);
} else {
uint64_t pml4e;
struct i915_page_directory_pointer *pdp;
gen8_for_each_pml4e(pdp, &ppgtt->pml4, start, length, pml4e) {
gen8_ppgtt_clear_pte_range(vm, pdp, start, length,
scratch_pte);
}
}
if (USES_FULL_48BIT_PPGTT(vm->dev))
gen8_ppgtt_clear_pml4(vm, &ppgtt->pml4, start, length);
else
gen8_ppgtt_clear_pdp(vm, &ppgtt->pdp, start, length);
}
static void
@ -809,7 +881,7 @@ gen8_ppgtt_insert_pte_entries(struct i915_address_space *vm,
pt_vaddr[pte] =
gen8_pte_encode(sg_page_iter_dma_address(sg_iter),
cache_level, true);
cache_level);
if (++pte == GEN8_PTES) {
kunmap_px(ppgtt, pt_vaddr);
pt_vaddr = NULL;
@ -1452,7 +1524,7 @@ static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
uint64_t start = ppgtt->base.start;
uint64_t length = ppgtt->base.total;
gen8_pte_t scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
I915_CACHE_LLC, true);
I915_CACHE_LLC);
if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
gen8_dump_pdp(&ppgtt->pdp, start, length, scratch_pte, m);
@ -1569,7 +1641,7 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
uint32_t start = ppgtt->base.start, length = ppgtt->base.total;
scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
I915_CACHE_LLC, true, 0);
I915_CACHE_LLC, 0);
gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde) {
u32 expected;
@ -1728,8 +1800,9 @@ static void gen8_ppgtt_enable(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_engine_cs *engine;
enum intel_engine_id id;
for_each_engine(engine, dev_priv) {
for_each_engine(engine, dev_priv, id) {
u32 four_level = USES_FULL_48BIT_PPGTT(dev) ? GEN8_GFX_PPGTT_48B : 0;
I915_WRITE(RING_MODE_GEN7(engine),
_MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE | four_level));
@ -1741,12 +1814,13 @@ static void gen7_ppgtt_enable(struct drm_device *dev)
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_engine_cs *engine;
uint32_t ecochk, ecobits;
enum intel_engine_id id;
ecobits = I915_READ(GAC_ECO_BITS);
I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
ecochk = I915_READ(GAM_ECOCHK);
if (IS_HASWELL(dev)) {
if (IS_HASWELL(dev_priv)) {
ecochk |= ECOCHK_PPGTT_WB_HSW;
} else {
ecochk |= ECOCHK_PPGTT_LLC_IVB;
@ -1754,7 +1828,7 @@ static void gen7_ppgtt_enable(struct drm_device *dev)
}
I915_WRITE(GAM_ECOCHK, ecochk);
for_each_engine(engine, dev_priv) {
for_each_engine(engine, dev_priv, id) {
/* GFX_MODE is per-ring on gen7+ */
I915_WRITE(RING_MODE_GEN7(engine),
_MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
@ -1782,8 +1856,7 @@ static void gen6_ppgtt_enable(struct drm_device *dev)
/* PPGTT support for Sandybdrige/Gen6 and later */
static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
uint64_t start,
uint64_t length,
bool use_scratch)
uint64_t length)
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
gen6_pte_t *pt_vaddr, scratch_pte;
@ -1794,7 +1867,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
unsigned last_pte, i;
scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
I915_CACHE_LLC, true, 0);
I915_CACHE_LLC, 0);
while (num_entries) {
last_pte = first_pte + num_entries;
@ -1832,7 +1905,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
pt_vaddr = kmap_px(ppgtt->pd.page_table[act_pt]);
pt_vaddr[act_pte] =
vm->pte_encode(addr, cache_level, true, flags);
vm->pte_encode(addr, cache_level, flags);
if (++act_pte == GEN6_PTES) {
kunmap_px(ppgtt, pt_vaddr);
@ -2056,11 +2129,11 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
int ret;
ppgtt->base.pte_encode = ggtt->base.pte_encode;
if (intel_vgpu_active(dev_priv) || IS_GEN6(dev))
if (intel_vgpu_active(dev_priv) || IS_GEN6(dev_priv))
ppgtt->switch_mm = gen6_mm_switch;
else if (IS_HASWELL(dev))
else if (IS_HASWELL(dev_priv))
ppgtt->switch_mm = hsw_mm_switch;
else if (IS_GEN7(dev))
else if (IS_GEN7(dev_priv))
ppgtt->switch_mm = gen7_mm_switch;
else
BUG();
@ -2129,13 +2202,13 @@ static void gtt_write_workarounds(struct drm_device *dev)
* workarounds here even if they get overwritten by GPU reset.
*/
/* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt */
if (IS_BROADWELL(dev))
if (IS_BROADWELL(dev_priv))
I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
else if (IS_CHERRYVIEW(dev))
else if (IS_CHERRYVIEW(dev_priv))
I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
else if (IS_SKYLAKE(dev))
else if (IS_SKYLAKE(dev_priv))
I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
else if (IS_BROXTON(dev))
else if (IS_BROXTON(dev_priv))
I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
}
@ -2157,6 +2230,8 @@ static int i915_ppgtt_init(struct i915_hw_ppgtt *ppgtt,
int i915_ppgtt_init_hw(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
gtt_write_workarounds(dev);
/* In the case of execlists, PPGTT is enabled by the context descriptor
@ -2168,9 +2243,9 @@ int i915_ppgtt_init_hw(struct drm_device *dev)
if (!USES_PPGTT(dev))
return 0;
if (IS_GEN6(dev))
if (IS_GEN6(dev_priv))
gen6_ppgtt_enable(dev);
else if (IS_GEN7(dev))
else if (IS_GEN7(dev_priv))
gen7_ppgtt_enable(dev);
else if (INTEL_INFO(dev)->gen >= 8)
gen8_ppgtt_enable(dev);
@ -2239,11 +2314,12 @@ static bool needs_idle_maps(struct drm_i915_private *dev_priv)
void i915_check_and_clear_faults(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
if (INTEL_INFO(dev_priv)->gen < 6)
return;
for_each_engine(engine, dev_priv) {
for_each_engine(engine, dev_priv, id) {
u32 fault_reg;
fault_reg = I915_READ(RING_FAULT_REG(engine));
if (fault_reg & RING_FAULT_VALID) {
@ -2260,7 +2336,10 @@ void i915_check_and_clear_faults(struct drm_i915_private *dev_priv)
fault_reg & ~RING_FAULT_VALID);
}
}
POSTING_READ(RING_FAULT_REG(&dev_priv->engine[RCS]));
/* Engine specific init may not have been done till this point. */
if (dev_priv->engine[RCS])
POSTING_READ(RING_FAULT_REG(dev_priv->engine[RCS]));
}
static void i915_ggtt_flush(struct drm_i915_private *dev_priv)
@ -2286,8 +2365,7 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
i915_check_and_clear_faults(dev_priv);
ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total,
true);
ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total);
i915_ggtt_flush(dev_priv);
}
@ -2321,7 +2399,7 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm,
rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
gen8_set_pte(pte, gen8_pte_encode(addr, level, true));
gen8_set_pte(pte, gen8_pte_encode(addr, level));
I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
POSTING_READ(GFX_FLSH_CNTL_GEN6);
@ -2348,7 +2426,7 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm + (start >> PAGE_SHIFT);
for_each_sgt_dma(addr, sgt_iter, st) {
gtt_entry = gen8_pte_encode(addr, level, true);
gtt_entry = gen8_pte_encode(addr, level);
gen8_set_pte(&gtt_entries[i++], gtt_entry);
}
@ -2412,7 +2490,7 @@ static void gen6_ggtt_insert_page(struct i915_address_space *vm,
rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
iowrite32(vm->pte_encode(addr, level, true, flags), pte);
iowrite32(vm->pte_encode(addr, level, flags), pte);
I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
POSTING_READ(GFX_FLSH_CNTL_GEN6);
@ -2445,7 +2523,7 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
gtt_entries = (gen6_pte_t __iomem *)ggtt->gsm + (start >> PAGE_SHIFT);
for_each_sgt_dma(addr, sgt_iter, st) {
gtt_entry = vm->pte_encode(addr, level, true, flags);
gtt_entry = vm->pte_encode(addr, level, flags);
iowrite32(gtt_entry, &gtt_entries[i++]);
}
@ -2469,16 +2547,12 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
}
static void nop_clear_range(struct i915_address_space *vm,
uint64_t start,
uint64_t length,
bool use_scratch)
uint64_t start, uint64_t length)
{
}
static void gen8_ggtt_clear_range(struct i915_address_space *vm,
uint64_t start,
uint64_t length,
bool use_scratch)
uint64_t start, uint64_t length)
{
struct drm_i915_private *dev_priv = to_i915(vm->dev);
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
@ -2498,8 +2572,7 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
num_entries = max_entries;
scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
I915_CACHE_LLC,
use_scratch);
I915_CACHE_LLC);
for (i = 0; i < num_entries; i++)
gen8_set_pte(&gtt_base[i], scratch_pte);
readl(gtt_base);
@ -2509,8 +2582,7 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
static void gen6_ggtt_clear_range(struct i915_address_space *vm,
uint64_t start,
uint64_t length,
bool use_scratch)
uint64_t length)
{
struct drm_i915_private *dev_priv = to_i915(vm->dev);
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
@ -2530,7 +2602,7 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
num_entries = max_entries;
scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
I915_CACHE_LLC, use_scratch, 0);
I915_CACHE_LLC, 0);
for (i = 0; i < num_entries; i++)
iowrite32(scratch_pte, &gtt_base[i]);
@ -2577,8 +2649,7 @@ static void i915_ggtt_insert_entries(struct i915_address_space *vm,
static void i915_ggtt_clear_range(struct i915_address_space *vm,
uint64_t start,
uint64_t length,
bool unused)
uint64_t length)
{
struct drm_i915_private *dev_priv = to_i915(vm->dev);
unsigned first_entry = start >> PAGE_SHIFT;
@ -2662,13 +2733,11 @@ static void ggtt_unbind_vma(struct i915_vma *vma)
if (vma->flags & I915_VMA_GLOBAL_BIND)
vma->vm->clear_range(vma->vm,
vma->node.start, size,
true);
vma->node.start, size);
if (vma->flags & I915_VMA_LOCAL_BIND && appgtt)
appgtt->base.clear_range(&appgtt->base,
vma->node.start, size,
true);
vma->node.start, size);
}
void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
@ -2717,6 +2786,7 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
*/
struct i915_ggtt *ggtt = &dev_priv->ggtt;
unsigned long hole_start, hole_end;
struct i915_hw_ppgtt *ppgtt;
struct drm_mm_node *entry;
int ret;
@ -2724,45 +2794,48 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
if (ret)
return ret;
/* Reserve a mappable slot for our lockless error capture */
ret = drm_mm_insert_node_in_range_generic(&ggtt->base.mm,
&ggtt->error_capture,
4096, 0, -1,
0, ggtt->mappable_end,
0, 0);
if (ret)
return ret;
/* Clear any non-preallocated blocks */
drm_mm_for_each_hole(entry, &ggtt->base.mm, hole_start, hole_end) {
DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
hole_start, hole_end);
ggtt->base.clear_range(&ggtt->base, hole_start,
hole_end - hole_start, true);
hole_end - hole_start);
}
/* And finally clear the reserved guard page */
ggtt->base.clear_range(&ggtt->base,
ggtt->base.total - PAGE_SIZE, PAGE_SIZE,
true);
ggtt->base.total - PAGE_SIZE, PAGE_SIZE);
if (USES_PPGTT(dev_priv) && !USES_FULL_PPGTT(dev_priv)) {
struct i915_hw_ppgtt *ppgtt;
ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
if (!ppgtt)
return -ENOMEM;
ret = __hw_ppgtt_init(ppgtt, dev_priv);
if (ret) {
kfree(ppgtt);
return ret;
if (!ppgtt) {
ret = -ENOMEM;
goto err;
}
if (ppgtt->base.allocate_va_range)
ret = __hw_ppgtt_init(ppgtt, dev_priv);
if (ret)
goto err_ppgtt;
if (ppgtt->base.allocate_va_range) {
ret = ppgtt->base.allocate_va_range(&ppgtt->base, 0,
ppgtt->base.total);
if (ret) {
ppgtt->base.cleanup(&ppgtt->base);
kfree(ppgtt);
return ret;
if (ret)
goto err_ppgtt_cleanup;
}
ppgtt->base.clear_range(&ppgtt->base,
ppgtt->base.start,
ppgtt->base.total,
true);
ppgtt->base.total);
dev_priv->mm.aliasing_ppgtt = ppgtt;
WARN_ON(ggtt->base.bind_vma != ggtt_bind_vma);
@ -2770,6 +2843,14 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
}
return 0;
err_ppgtt_cleanup:
ppgtt->base.cleanup(&ppgtt->base);
err_ppgtt:
kfree(ppgtt);
err:
drm_mm_remove_node(&ggtt->error_capture);
return ret;
}
/**
@ -2788,6 +2869,9 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
i915_gem_cleanup_stolen(&dev_priv->drm);
if (drm_mm_node_allocated(&ggtt->error_capture))
drm_mm_remove_node(&ggtt->error_capture);
if (drm_mm_initialized(&ggtt->base.mm)) {
intel_vgt_deballoon(dev_priv);
@ -2895,7 +2979,7 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
* resort to an uncached mapping. The WC issue is easily caught by the
* readback check when writing GTT PTE entries.
*/
if (IS_BROXTON(ggtt->base.dev))
if (IS_BROXTON(to_i915(ggtt->base.dev)))
ggtt->gsm = ioremap_nocache(phys_addr, size);
else
ggtt->gsm = ioremap_wc(phys_addr, size);
@ -3237,8 +3321,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
i915_check_and_clear_faults(dev_priv);
/* First fill our portion of the GTT with scratch pages */
ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total,
true);
ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total);
ggtt->base.closed = true; /* skip rewriting PTE on VMA unbind */
@ -3267,7 +3350,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
ggtt->base.closed = false;
if (INTEL_INFO(dev)->gen >= 8) {
if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev))
if (IS_CHERRYVIEW(dev_priv) || IS_BROXTON(dev_priv))
chv_setup_private_ppat(dev_priv);
else
bdw_setup_private_ppat(dev_priv);

View File

@ -395,7 +395,7 @@ struct i915_address_space {
/* FIXME: Need a more generic return type */
gen6_pte_t (*pte_encode)(dma_addr_t addr,
enum i915_cache_level level,
bool valid, u32 flags); /* Create a valid PTE */
u32 flags); /* Create a valid PTE */
/* flags for pte_encode */
#define PTE_READ_ONLY (1<<0)
int (*allocate_va_range)(struct i915_address_space *vm,
@ -403,8 +403,7 @@ struct i915_address_space {
uint64_t length);
void (*clear_range)(struct i915_address_space *vm,
uint64_t start,
uint64_t length,
bool use_scratch);
uint64_t length);
void (*insert_page)(struct i915_address_space *vm,
dma_addr_t addr,
uint64_t offset,
@ -450,6 +449,8 @@ struct i915_ggtt {
bool do_idle_maps;
int mtrr;
struct drm_mm_node error_capture;
};
struct i915_hw_ppgtt {

View File

@ -72,9 +72,9 @@ render_state_get_rodata(const struct drm_i915_gem_request *req)
static int render_state_setup(struct render_state *so)
{
struct drm_device *dev = so->vma->vm->dev;
struct drm_i915_private *dev_priv = to_i915(so->vma->vm->dev);
const struct intel_renderstate_rodata *rodata = so->rodata;
const bool has_64bit_reloc = INTEL_GEN(dev) >= 8;
const bool has_64bit_reloc = INTEL_GEN(dev_priv) >= 8;
unsigned int i = 0, reloc_index = 0;
struct page *page;
u32 *d;
@ -115,7 +115,7 @@ static int render_state_setup(struct render_state *so)
so->aux_batch_offset = i * sizeof(u32);
if (HAS_POOLED_EU(dev)) {
if (HAS_POOLED_EU(dev_priv)) {
/*
* We always program 3x6 pool config but depending upon which
* subslice is disabled HW drops down to appropriate config

View File

@ -256,10 +256,11 @@ static int i915_gem_check_wedge(struct drm_i915_private *dev_priv)
static int i915_gem_init_seqno(struct drm_i915_private *dev_priv, u32 seqno)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
int ret;
/* Carefully retire all requests without writing to the rings */
for_each_engine(engine, dev_priv) {
for_each_engine(engine, dev_priv, id) {
ret = intel_engine_idle(engine,
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_LOCKED);
@ -276,7 +277,7 @@ static int i915_gem_init_seqno(struct drm_i915_private *dev_priv, u32 seqno)
}
/* Finally reset hw state */
for_each_engine(engine, dev_priv)
for_each_engine(engine, dev_priv, id)
intel_engine_init_seqno(engine, seqno);
return 0;

View File

@ -182,8 +182,9 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
!is_vmalloc_addr(obj->mapping))
continue;
if ((flags & I915_SHRINK_ACTIVE) == 0 &&
i915_gem_object_is_active(obj))
if (!(flags & I915_SHRINK_ACTIVE) &&
(i915_gem_object_is_active(obj) ||
obj->framebuffer_references))
continue;
if (!can_release_pages(obj))

View File

@ -115,7 +115,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
pci_read_config_dword(pdev, INTEL_BSM, &bsm);
base = bsm & INTEL_BSM_MASK;
} else if (IS_I865G(dev)) {
} else if (IS_I865G(dev_priv)) {
u32 tseg_size = 0;
u16 toud = 0;
u8 tmp;
@ -154,7 +154,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
tom = tmp * MB(32);
base = tom - tseg_size - ggtt->stolen_size;
} else if (IS_845G(dev)) {
} else if (IS_845G(dev_priv)) {
u32 tseg_size = 0;
u32 tom;
u8 tmp;
@ -178,7 +178,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
tom = tmp * MB(32);
base = tom - tseg_size - ggtt->stolen_size;
} else if (IS_I830(dev)) {
} else if (IS_I830(dev_priv)) {
u32 tseg_size = 0;
u32 tom;
u8 tmp;
@ -204,7 +204,8 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
return 0;
/* make sure we don't clobber the GTT if it's within stolen memory */
if (INTEL_INFO(dev)->gen <= 4 && !IS_G33(dev) && !IS_G4X(dev)) {
if (INTEL_GEN(dev_priv) <= 4 && !IS_G33(dev_priv) &&
!IS_G4X(dev_priv)) {
struct {
u32 start, end;
} stolen[2] = {
@ -214,7 +215,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
u64 ggtt_start, ggtt_end;
ggtt_start = I915_READ(PGTBL_CTL);
if (IS_GEN4(dev))
if (IS_GEN4(dev_priv))
ggtt_start = (ggtt_start & PGTBL_ADDRESS_LO_MASK) |
(ggtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
else
@ -270,7 +271,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
* GEN3 firmware likes to smash pci bridges into the stolen
* range. Apparently this works.
*/
if (r == NULL && !IS_GEN3(dev)) {
if (r == NULL && !IS_GEN3(dev_priv)) {
DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n",
base, base + (uint32_t)ggtt->stolen_size);
base = 0;
@ -437,7 +438,7 @@ int i915_gem_init_stolen(struct drm_device *dev)
case 3:
break;
case 4:
if (IS_G4X(dev))
if (IS_G4X(dev_priv))
g4x_get_stolen_reserved(dev_priv, &reserved_base,
&reserved_size);
break;
@ -456,7 +457,7 @@ int i915_gem_init_stolen(struct drm_device *dev)
break;
default:
if (IS_BROADWELL(dev_priv) ||
IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev))
IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
bdw_get_stolen_reserved(dev_priv, &reserved_base,
&reserved_size);
else

View File

@ -62,6 +62,7 @@
static bool
i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
{
struct drm_i915_private *dev_priv = to_i915(dev);
int tile_width;
/* Linear is always fine */
@ -71,8 +72,8 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
if (tiling_mode > I915_TILING_LAST)
return false;
if (IS_GEN2(dev) ||
(tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)))
if (IS_GEN2(dev_priv) ||
(tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev_priv)))
tile_width = 128;
else
tile_width = 512;
@ -90,7 +91,7 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
if (stride > 8192)
return false;
if (IS_GEN3(dev)) {
if (IS_GEN3(dev_priv)) {
if (size > I830_FENCE_MAX_SIZE_VAL << 20)
return false;
} else {

View File

@ -28,6 +28,8 @@
*/
#include <generated/utsrelease.h>
#include <linux/stop_machine.h>
#include <linux/zlib.h>
#include "i915_drv.h"
static const char *engine_str(int engine)
@ -172,6 +174,110 @@ static void i915_error_puts(struct drm_i915_error_state_buf *e,
#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
#define err_puts(e, s) i915_error_puts(e, s)
#ifdef CONFIG_DRM_I915_COMPRESS_ERROR
static bool compress_init(struct z_stream_s *zstream)
{
memset(zstream, 0, sizeof(*zstream));
zstream->workspace =
kmalloc(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL),
GFP_ATOMIC | __GFP_NOWARN);
if (!zstream->workspace)
return false;
if (zlib_deflateInit(zstream, Z_DEFAULT_COMPRESSION) != Z_OK) {
kfree(zstream->workspace);
return false;
}
return true;
}
static int compress_page(struct z_stream_s *zstream,
void *src,
struct drm_i915_error_object *dst)
{
zstream->next_in = src;
zstream->avail_in = PAGE_SIZE;
do {
if (zstream->avail_out == 0) {
unsigned long page;
page = __get_free_page(GFP_ATOMIC | __GFP_NOWARN);
if (!page)
return -ENOMEM;
dst->pages[dst->page_count++] = (void *)page;
zstream->next_out = (void *)page;
zstream->avail_out = PAGE_SIZE;
}
if (zlib_deflate(zstream, Z_SYNC_FLUSH) != Z_OK)
return -EIO;
} while (zstream->avail_in);
/* Fallback to uncompressed if we increase size? */
if (0 && zstream->total_out > zstream->total_in)
return -E2BIG;
return 0;
}
static void compress_fini(struct z_stream_s *zstream,
struct drm_i915_error_object *dst)
{
if (dst) {
zlib_deflate(zstream, Z_FINISH);
dst->unused = zstream->avail_out;
}
zlib_deflateEnd(zstream);
kfree(zstream->workspace);
}
static void err_compression_marker(struct drm_i915_error_state_buf *m)
{
err_puts(m, ":");
}
#else
static bool compress_init(struct z_stream_s *zstream)
{
return true;
}
static int compress_page(struct z_stream_s *zstream,
void *src,
struct drm_i915_error_object *dst)
{
unsigned long page;
page = __get_free_page(GFP_ATOMIC | __GFP_NOWARN);
if (!page)
return -ENOMEM;
dst->pages[dst->page_count++] =
memcpy((void *)page, src, PAGE_SIZE);
return 0;
}
static void compress_fini(struct z_stream_s *zstream,
struct drm_i915_error_object *dst)
{
}
static void err_compression_marker(struct drm_i915_error_state_buf *m)
{
err_puts(m, "~");
}
#endif
static void print_error_buffers(struct drm_i915_error_state_buf *m,
const char *name,
struct drm_i915_error_buffer *err,
@ -228,13 +334,57 @@ static const char *hangcheck_action_to_str(enum intel_engine_hangcheck_action a)
return "unknown";
}
static void error_print_instdone(struct drm_i915_error_state_buf *m,
struct drm_i915_error_engine *ee)
{
int slice;
int subslice;
err_printf(m, " INSTDONE: 0x%08x\n",
ee->instdone.instdone);
if (ee->engine_id != RCS || INTEL_GEN(m->i915) <= 3)
return;
err_printf(m, " SC_INSTDONE: 0x%08x\n",
ee->instdone.slice_common);
if (INTEL_GEN(m->i915) <= 6)
return;
for_each_instdone_slice_subslice(m->i915, slice, subslice)
err_printf(m, " SAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
slice, subslice,
ee->instdone.sampler[slice][subslice]);
for_each_instdone_slice_subslice(m->i915, slice, subslice)
err_printf(m, " ROW_INSTDONE[%d][%d]: 0x%08x\n",
slice, subslice,
ee->instdone.row[slice][subslice]);
}
static void error_print_request(struct drm_i915_error_state_buf *m,
const char *prefix,
struct drm_i915_error_request *erq)
{
if (!erq->seqno)
return;
err_printf(m, "%s pid %d, seqno %8x:%08x, emitted %dms ago, head %08x, tail %08x\n",
prefix, erq->pid,
erq->context, erq->seqno,
jiffies_to_msecs(jiffies - erq->jiffies),
erq->head, erq->tail);
}
static void error_print_engine(struct drm_i915_error_state_buf *m,
struct drm_i915_error_engine *ee)
{
err_printf(m, "%s command stream:\n", engine_str(ee->engine_id));
err_printf(m, " START: 0x%08x\n", ee->start);
err_printf(m, " HEAD: 0x%08x\n", ee->head);
err_printf(m, " TAIL: 0x%08x\n", ee->tail);
err_printf(m, " HEAD: 0x%08x [0x%08x]\n", ee->head, ee->rq_head);
err_printf(m, " TAIL: 0x%08x [0x%08x, 0x%08x]\n",
ee->tail, ee->rq_post, ee->rq_tail);
err_printf(m, " CTL: 0x%08x\n", ee->ctl);
err_printf(m, " MODE: 0x%08x\n", ee->mode);
err_printf(m, " HWS: 0x%08x\n", ee->hws);
@ -242,7 +392,9 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
(u32)(ee->acthd>>32), (u32)ee->acthd);
err_printf(m, " IPEIR: 0x%08x\n", ee->ipeir);
err_printf(m, " IPEHR: 0x%08x\n", ee->ipehr);
err_printf(m, " INSTDONE: 0x%08x\n", ee->instdone);
error_print_instdone(m, ee);
if (ee->batchbuffer) {
u64 start = ee->batchbuffer->gtt_offset;
u64 end = start + ee->batchbuffer->gtt_size;
@ -296,6 +448,8 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
err_printf(m, " hangcheck: %s [%d]\n",
hangcheck_action_to_str(ee->hangcheck_action),
ee->hangcheck_score);
error_print_request(m, " ELSP[0]: ", &ee->execlist[0]);
error_print_request(m, " ELSP[1]: ", &ee->execlist[1]);
}
void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
@ -307,28 +461,72 @@ void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
va_end(args);
}
static int
ascii85_encode_len(int len)
{
return DIV_ROUND_UP(len, 4);
}
static bool
ascii85_encode(u32 in, char *out)
{
int i;
if (in == 0)
return false;
out[5] = '\0';
for (i = 5; i--; ) {
out[i] = '!' + in % 85;
in /= 85;
}
return true;
}
static void print_error_obj(struct drm_i915_error_state_buf *m,
struct intel_engine_cs *engine,
const char *name,
struct drm_i915_error_object *obj)
{
int page, offset, elt;
char out[6];
int page;
for (page = offset = 0; page < obj->page_count; page++) {
for (elt = 0; elt < PAGE_SIZE/4; elt++) {
err_printf(m, "%08x : %08x\n", offset,
obj->pages[page][elt]);
offset += 4;
if (!obj)
return;
if (name) {
err_printf(m, "%s --- %s = 0x%08x %08x\n",
engine ? engine->name : "global", name,
upper_32_bits(obj->gtt_offset),
lower_32_bits(obj->gtt_offset));
}
err_compression_marker(m);
for (page = 0; page < obj->page_count; page++) {
int i, len;
len = PAGE_SIZE;
if (page == obj->page_count - 1)
len -= obj->unused;
len = ascii85_encode_len(len);
for (i = 0; i < len; i++) {
if (ascii85_encode(obj->pages[page][i], out))
err_puts(m, out);
else
err_puts(m, "z");
}
}
err_puts(m, "\n");
}
static void err_print_capabilities(struct drm_i915_error_state_buf *m,
const struct intel_device_info *info)
{
#define PRINT_FLAG(x) err_printf(m, #x ": %s\n", yesno(info->x))
#define SEP_SEMICOLON ;
DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_SEMICOLON);
DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG);
#undef PRINT_FLAG
#undef SEP_SEMICOLON
}
int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
@ -339,8 +537,8 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
struct pci_dev *pdev = dev_priv->drm.pdev;
struct drm_i915_error_state *error = error_priv->error;
struct drm_i915_error_object *obj;
int i, j, offset, elt;
int max_hangcheck_score;
int i, j;
if (!error) {
err_printf(m, "no error state collected\n");
@ -391,7 +589,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
for (i = 0; i < 4; i++)
err_printf(m, "GTIER gt %d: 0x%08x\n", i,
error->gtier[i]);
} else if (HAS_PCH_SPLIT(dev) || IS_VALLEYVIEW(dev))
} else if (HAS_PCH_SPLIT(dev_priv) || IS_VALLEYVIEW(dev_priv))
err_printf(m, "GTIER: 0x%08x\n", error->gtier[0]);
err_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
@ -402,10 +600,6 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
for (i = 0; i < dev_priv->num_fence_regs; i++)
err_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
for (i = 0; i < ARRAY_SIZE(error->extra_instdone); i++)
err_printf(m, " INSTDONE_%d: 0x%08x\n", i,
error->extra_instdone[i]);
if (INTEL_INFO(dev)->gen >= 6) {
err_printf(m, "ERROR: 0x%08x\n", error->error);
@ -416,7 +610,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
}
if (IS_GEN7(dev))
if (IS_GEN7(dev_priv))
err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
@ -438,7 +632,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
len += scnprintf(buf + len, sizeof(buf), "%s%s",
first ? "" : ", ",
dev_priv->engine[j].name);
dev_priv->engine[j]->name);
first = 0;
}
scnprintf(buf + len, sizeof(buf), ")");
@ -456,7 +650,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
obj = ee->batchbuffer;
if (obj) {
err_puts(m, dev_priv->engine[i].name);
err_puts(m, dev_priv->engine[i]->name);
if (ee->pid != -1)
err_printf(m, " (submitted by %s [%d])",
ee->comm,
@ -464,37 +658,23 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
err_printf(m, " --- gtt_offset = 0x%08x %08x\n",
upper_32_bits(obj->gtt_offset),
lower_32_bits(obj->gtt_offset));
print_error_obj(m, obj);
}
obj = ee->wa_batchbuffer;
if (obj) {
err_printf(m, "%s (w/a) --- gtt_offset = 0x%08x\n",
dev_priv->engine[i].name,
lower_32_bits(obj->gtt_offset));
print_error_obj(m, obj);
print_error_obj(m, dev_priv->engine[i], NULL, obj);
}
if (ee->num_requests) {
err_printf(m, "%s --- %d requests\n",
dev_priv->engine[i].name,
dev_priv->engine[i]->name,
ee->num_requests);
for (j = 0; j < ee->num_requests; j++) {
err_printf(m, " pid %d, seqno 0x%08x, emitted %ld, head 0x%08x, tail 0x%08x\n",
ee->requests[j].pid,
ee->requests[j].seqno,
ee->requests[j].jiffies,
ee->requests[j].head,
ee->requests[j].tail);
}
for (j = 0; j < ee->num_requests; j++)
error_print_request(m, " ", &ee->requests[j]);
}
if (IS_ERR(ee->waiters)) {
err_printf(m, "%s --- ? waiters [unable to acquire spinlock]\n",
dev_priv->engine[i].name);
dev_priv->engine[i]->name);
} else if (ee->num_waiters) {
err_printf(m, "%s --- %d waiters\n",
dev_priv->engine[i].name,
dev_priv->engine[i]->name,
ee->num_waiters);
for (j = 0; j < ee->num_waiters; j++) {
err_printf(m, " seqno 0x%08x for %s [%d]\n",
@ -504,77 +684,23 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
}
}
if ((obj = ee->ringbuffer)) {
err_printf(m, "%s --- ringbuffer = 0x%08x\n",
dev_priv->engine[i].name,
lower_32_bits(obj->gtt_offset));
print_error_obj(m, obj);
}
print_error_obj(m, dev_priv->engine[i],
"ringbuffer", ee->ringbuffer);
if ((obj = ee->hws_page)) {
u64 hws_offset = obj->gtt_offset;
u32 *hws_page = &obj->pages[0][0];
print_error_obj(m, dev_priv->engine[i],
"HW Status", ee->hws_page);
if (i915.enable_execlists) {
hws_offset += LRC_PPHWSP_PN * PAGE_SIZE;
hws_page = &obj->pages[LRC_PPHWSP_PN][0];
}
err_printf(m, "%s --- HW Status = 0x%08llx\n",
dev_priv->engine[i].name, hws_offset);
offset = 0;
for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
err_printf(m, "[%04x] %08x %08x %08x %08x\n",
offset,
hws_page[elt],
hws_page[elt+1],
hws_page[elt+2],
hws_page[elt+3]);
offset += 16;
}
}
print_error_obj(m, dev_priv->engine[i],
"HW context", ee->ctx);
obj = ee->wa_ctx;
if (obj) {
u64 wa_ctx_offset = obj->gtt_offset;
u32 *wa_ctx_page = &obj->pages[0][0];
struct intel_engine_cs *engine = &dev_priv->engine[RCS];
u32 wa_ctx_size = (engine->wa_ctx.indirect_ctx.size +
engine->wa_ctx.per_ctx.size);
print_error_obj(m, dev_priv->engine[i],
"WA context", ee->wa_ctx);
err_printf(m, "%s --- WA ctx batch buffer = 0x%08llx\n",
dev_priv->engine[i].name, wa_ctx_offset);
offset = 0;
for (elt = 0; elt < wa_ctx_size; elt += 4) {
err_printf(m, "[%04x] %08x %08x %08x %08x\n",
offset,
wa_ctx_page[elt + 0],
wa_ctx_page[elt + 1],
wa_ctx_page[elt + 2],
wa_ctx_page[elt + 3]);
offset += 16;
}
}
if ((obj = ee->ctx)) {
err_printf(m, "%s --- HW Context = 0x%08x\n",
dev_priv->engine[i].name,
lower_32_bits(obj->gtt_offset));
print_error_obj(m, obj);
}
print_error_obj(m, dev_priv->engine[i],
"WA batchbuffer", ee->wa_batchbuffer);
}
if ((obj = error->semaphore)) {
err_printf(m, "Semaphore page = 0x%08x\n",
lower_32_bits(obj->gtt_offset));
for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
err_printf(m, "[%04x] %08x %08x %08x %08x\n",
elt * 4,
obj->pages[0][elt],
obj->pages[0][elt+1],
obj->pages[0][elt+2],
obj->pages[0][elt+3]);
}
}
print_error_obj(m, NULL, "Semaphores", error->semaphore);
if (error->overlay)
intel_overlay_print_error_state(m, error->overlay);
@ -629,7 +755,7 @@ static void i915_error_object_free(struct drm_i915_error_object *obj)
return;
for (page = 0; page < obj->page_count; page++)
kfree(obj->pages[page]);
free_page((unsigned long)obj->pages[page]);
kfree(obj);
}
@ -667,104 +793,63 @@ static void i915_error_state_free(struct kref *error_ref)
}
static struct drm_i915_error_object *
i915_error_object_create(struct drm_i915_private *dev_priv,
i915_error_object_create(struct drm_i915_private *i915,
struct i915_vma *vma)
{
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct drm_i915_gem_object *src;
struct i915_ggtt *ggtt = &i915->ggtt;
const u64 slot = ggtt->error_capture.start;
struct drm_i915_error_object *dst;
int num_pages;
bool use_ggtt;
int i = 0;
u64 reloc_offset;
struct z_stream_s zstream;
unsigned long num_pages;
struct sgt_iter iter;
dma_addr_t dma;
if (!vma)
return NULL;
src = vma->obj;
if (!src->pages)
return NULL;
num_pages = src->base.size >> PAGE_SHIFT;
dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC);
num_pages = min_t(u64, vma->size, vma->obj->base.size) >> PAGE_SHIFT;
num_pages = DIV_ROUND_UP(10 * num_pages, 8); /* worstcase zlib growth */
dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *),
GFP_ATOMIC | __GFP_NOWARN);
if (!dst)
return NULL;
dst->gtt_offset = vma->node.start;
dst->gtt_size = vma->node.size;
dst->page_count = 0;
dst->unused = 0;
reloc_offset = dst->gtt_offset;
use_ggtt = (src->cache_level == I915_CACHE_NONE &&
(vma->flags & I915_VMA_GLOBAL_BIND) &&
reloc_offset + num_pages * PAGE_SIZE <= ggtt->mappable_end);
/* Cannot access stolen address directly, try to use the aperture */
if (src->stolen) {
use_ggtt = true;
if (!(vma->flags & I915_VMA_GLOBAL_BIND))
goto unwind;
reloc_offset = vma->node.start;
if (reloc_offset + num_pages * PAGE_SIZE > ggtt->mappable_end)
goto unwind;
if (!compress_init(&zstream)) {
kfree(dst);
return NULL;
}
/* Cannot access snooped pages through the aperture */
if (use_ggtt && src->cache_level != I915_CACHE_NONE &&
!HAS_LLC(dev_priv))
goto unwind;
for_each_sgt_dma(dma, iter, vma->pages) {
void __iomem *s;
int ret;
dst->page_count = num_pages;
while (num_pages--) {
unsigned long flags;
void *d;
ggtt->base.insert_page(&ggtt->base, dma, slot,
I915_CACHE_NONE, 0);
d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
if (d == NULL)
s = io_mapping_map_atomic_wc(&ggtt->mappable, slot);
ret = compress_page(&zstream, (void __force *)s, dst);
io_mapping_unmap_atomic(s);
if (ret)
goto unwind;
local_irq_save(flags);
if (use_ggtt) {
void __iomem *s;
/* Simply ignore tiling or any overlapping fence.
* It's part of the error state, and this hopefully
* captures what the GPU read.
*/
s = io_mapping_map_atomic_wc(&ggtt->mappable,
reloc_offset);
memcpy_fromio(d, s, PAGE_SIZE);
io_mapping_unmap_atomic(s);
} else {
struct page *page;
void *s;
page = i915_gem_object_get_page(src, i);
drm_clflush_pages(&page, 1);
s = kmap_atomic(page);
memcpy(d, s, PAGE_SIZE);
kunmap_atomic(s);
drm_clflush_pages(&page, 1);
}
local_irq_restore(flags);
dst->pages[i++] = d;
reloc_offset += PAGE_SIZE;
}
return dst;
goto out;
unwind:
while (i--)
kfree(dst->pages[i]);
while (dst->page_count--)
free_page((unsigned long)dst->pages[dst->page_count]);
kfree(dst);
return NULL;
dst = NULL;
out:
compress_fini(&zstream, dst);
ggtt->base.clear_range(&ggtt->base, slot, PAGE_SIZE);
return dst;
}
/* The error capture is special as tries to run underneath the normal
@ -855,7 +940,8 @@ static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv,
if (engine_id)
*engine_id = i;
return error->engine[i].ipehr ^ error->engine[i].instdone;
return error->engine[i].ipehr ^
error->engine[i].instdone.instdone;
}
}
@ -891,7 +977,7 @@ static void gen8_record_semaphore_state(struct drm_i915_error_state *error,
if (!error->semaphore)
return;
for_each_engine_id(to, dev_priv, id) {
for_each_engine(to, dev_priv, id) {
int idx;
u16 signal_offset;
u32 *tmp;
@ -998,7 +1084,6 @@ static void error_record_engine_registers(struct drm_i915_error_state *error,
ee->faddr = I915_READ(RING_DMA_FADD(engine->mmio_base));
ee->ipeir = I915_READ(RING_IPEIR(engine->mmio_base));
ee->ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
ee->instdone = I915_READ(RING_INSTDONE(engine->mmio_base));
ee->instps = I915_READ(RING_INSTPS(engine->mmio_base));
ee->bbaddr = I915_READ(RING_BBADDR(engine->mmio_base));
if (INTEL_GEN(dev_priv) >= 8) {
@ -1010,9 +1095,10 @@ static void error_record_engine_registers(struct drm_i915_error_state *error,
ee->faddr = I915_READ(DMA_FADD_I8XX);
ee->ipeir = I915_READ(IPEIR);
ee->ipehr = I915_READ(IPEHR);
ee->instdone = I915_READ(GEN2_INSTDONE);
}
intel_engine_get_instdone(engine, &ee->instdone);
ee->waiting = intel_engine_has_waiter(engine);
ee->instpm = I915_READ(RING_INSTPM(engine->mmio_base));
ee->acthd = intel_engine_get_active_head(engine);
@ -1079,6 +1165,20 @@ static void error_record_engine_registers(struct drm_i915_error_state *error,
}
}
static void record_request(struct drm_i915_gem_request *request,
struct drm_i915_error_request *erq)
{
erq->context = request->ctx->hw_id;
erq->seqno = request->fence.seqno;
erq->jiffies = request->emitted_jiffies;
erq->head = request->head;
erq->tail = request->tail;
rcu_read_lock();
erq->pid = request->ctx->pid ? pid_nr(request->ctx->pid) : 0;
rcu_read_unlock();
}
static void engine_record_requests(struct intel_engine_cs *engine,
struct drm_i915_gem_request *first,
struct drm_i915_error_engine *ee)
@ -1102,8 +1202,6 @@ static void engine_record_requests(struct intel_engine_cs *engine,
count = 0;
request = first;
list_for_each_entry_from(request, &engine->request_list, link) {
struct drm_i915_error_request *erq;
if (count >= ee->num_requests) {
/*
* If the ring request list was changed in
@ -1123,19 +1221,22 @@ static void engine_record_requests(struct intel_engine_cs *engine,
break;
}
erq = &ee->requests[count++];
erq->seqno = request->fence.seqno;
erq->jiffies = request->emitted_jiffies;
erq->head = request->head;
erq->tail = request->tail;
rcu_read_lock();
erq->pid = request->ctx->pid ? pid_nr(request->ctx->pid) : 0;
rcu_read_unlock();
record_request(request, &ee->requests[count++]);
}
ee->num_requests = count;
}
static void error_record_engine_execlists(struct intel_engine_cs *engine,
struct drm_i915_error_engine *ee)
{
unsigned int n;
for (n = 0; n < ARRAY_SIZE(engine->execlist_port); n++)
if (engine->execlist_port[n].request)
record_request(engine->execlist_port[n].request,
&ee->execlist[n]);
}
static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
struct drm_i915_error_state *error)
{
@ -1146,20 +1247,21 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
i915_error_object_create(dev_priv, dev_priv->semaphore);
for (i = 0; i < I915_NUM_ENGINES; i++) {
struct intel_engine_cs *engine = &dev_priv->engine[i];
struct intel_engine_cs *engine = dev_priv->engine[i];
struct drm_i915_error_engine *ee = &error->engine[i];
struct drm_i915_gem_request *request;
ee->pid = -1;
ee->engine_id = -1;
if (!intel_engine_initialized(engine))
if (!engine)
continue;
ee->engine_id = i;
error_record_engine_registers(error, engine, ee);
error_record_engine_waiters(engine, ee);
error_record_engine_execlists(engine, ee);
request = i915_gem_find_active_request(engine);
if (request) {
@ -1202,6 +1304,10 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
error->simulated |=
request->ctx->flags & CONTEXT_NO_ERROR_CAPTURE;
ee->rq_head = request->head;
ee->rq_post = request->postfix;
ee->rq_tail = request->tail;
ring = request->ring;
ee->cpu_ring_head = ring->head;
ee->cpu_ring_tail = ring->tail;
@ -1318,13 +1424,13 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
*/
/* 1: Registers specific to a single generation */
if (IS_VALLEYVIEW(dev)) {
if (IS_VALLEYVIEW(dev_priv)) {
error->gtier[0] = I915_READ(GTIER);
error->ier = I915_READ(VLV_IER);
error->forcewake = I915_READ_FW(FORCEWAKE_VLV);
}
if (IS_GEN7(dev))
if (IS_GEN7(dev_priv))
error->err_int = I915_READ(GEN7_ERR_INT);
if (INTEL_INFO(dev)->gen >= 8) {
@ -1332,7 +1438,7 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
error->fault_data1 = I915_READ(GEN8_FAULT_TLB_DATA1);
}
if (IS_GEN6(dev)) {
if (IS_GEN6(dev_priv)) {
error->forcewake = I915_READ_FW(FORCEWAKE);
error->gab_ctl = I915_READ(GAB_CTL);
error->gfx_mode = I915_READ(GFX_MODE);
@ -1349,7 +1455,7 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
}
/* 3: Feature specific registers */
if (IS_GEN6(dev) || IS_GEN7(dev)) {
if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
error->gam_ecochk = I915_READ(GAM_ECOCHK);
error->gac_eco = I915_READ(GAC_ECO_BITS);
}
@ -1362,18 +1468,16 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
error->ier = I915_READ(GEN8_DE_MISC_IER);
for (i = 0; i < 4; i++)
error->gtier[i] = I915_READ(GEN8_GT_IER(i));
} else if (HAS_PCH_SPLIT(dev)) {
} else if (HAS_PCH_SPLIT(dev_priv)) {
error->ier = I915_READ(DEIER);
error->gtier[0] = I915_READ(GTIER);
} else if (IS_GEN2(dev)) {
} else if (IS_GEN2(dev_priv)) {
error->ier = I915_READ16(IER);
} else if (!IS_VALLEYVIEW(dev)) {
} else if (!IS_VALLEYVIEW(dev_priv)) {
error->ier = I915_READ(IER);
}
error->eir = I915_READ(EIR);
error->pgtbl_er = I915_READ(PGTBL_ER);
i915_get_extra_instdone(dev_priv, error->extra_instdone);
}
static void i915_error_capture_msg(struct drm_i915_private *dev_priv,
@ -1418,6 +1522,27 @@ static void i915_capture_gen_state(struct drm_i915_private *dev_priv,
sizeof(error->device_info));
}
static int capture(void *data)
{
struct drm_i915_error_state *error = data;
i915_capture_gen_state(error->i915, error);
i915_capture_reg_state(error->i915, error);
i915_gem_record_fences(error->i915, error);
i915_gem_record_rings(error->i915, error);
i915_capture_active_buffers(error->i915, error);
i915_capture_pinned_buffers(error->i915, error);
do_gettimeofday(&error->time);
error->overlay = intel_overlay_capture_error_state(error->i915);
error->display = intel_display_capture_error_state(error->i915);
return 0;
}
#define DAY_AS_SECONDS(x) (24 * 60 * 60 * (x))
/**
* i915_capture_error_state - capture an error record for later analysis
* @dev: drm device
@ -1435,6 +1560,9 @@ void i915_capture_error_state(struct drm_i915_private *dev_priv,
struct drm_i915_error_state *error;
unsigned long flags;
if (!i915.error_capture)
return;
if (READ_ONCE(dev_priv->gpu_error.first_error))
return;
@ -1446,18 +1574,9 @@ void i915_capture_error_state(struct drm_i915_private *dev_priv,
}
kref_init(&error->ref);
error->i915 = dev_priv;
i915_capture_gen_state(dev_priv, error);
i915_capture_reg_state(dev_priv, error);
i915_gem_record_fences(dev_priv, error);
i915_gem_record_rings(dev_priv, error);
i915_capture_active_buffers(dev_priv, error);
i915_capture_pinned_buffers(dev_priv, error);
do_gettimeofday(&error->time);
error->overlay = intel_overlay_capture_error_state(dev_priv);
error->display = intel_display_capture_error_state(dev_priv);
stop_machine(capture, error, NULL);
i915_error_capture_msg(dev_priv, error, engine_mask, error_msg);
DRM_INFO("%s\n", error->error_msg);
@ -1476,7 +1595,8 @@ void i915_capture_error_state(struct drm_i915_private *dev_priv,
return;
}
if (!warned) {
if (!warned &&
ktime_get_real_seconds() - DRIVER_TIMESTAMP < DAY_AS_SECONDS(180)) {
DRM_INFO("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
@ -1497,7 +1617,6 @@ void i915_error_state_get(struct drm_device *dev,
if (error_priv->error)
kref_get(&error_priv->error->ref);
spin_unlock_irq(&dev_priv->gpu_error.lock);
}
void i915_error_state_put(struct i915_error_state_file_priv *error_priv)
@ -1519,33 +1638,3 @@ void i915_destroy_error_state(struct drm_device *dev)
if (error)
kref_put(&error->ref, i915_error_state_free);
}
const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
{
switch (type) {
case I915_CACHE_NONE: return " uncached";
case I915_CACHE_LLC: return HAS_LLC(i915) ? " LLC" : " snooped";
case I915_CACHE_L3_LLC: return " L3+LLC";
case I915_CACHE_WT: return " WT";
default: return "";
}
}
/* NB: please notice the memset */
void i915_get_extra_instdone(struct drm_i915_private *dev_priv,
uint32_t *instdone)
{
memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
if (IS_GEN2(dev_priv) || IS_GEN3(dev_priv))
instdone[0] = I915_READ(GEN2_INSTDONE);
else if (IS_GEN4(dev_priv) || IS_GEN5(dev_priv) || IS_GEN6(dev_priv)) {
instdone[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE));
instdone[1] = I915_READ(GEN4_INSTDONE1);
} else if (INTEL_GEN(dev_priv) >= 7) {
instdone[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE));
instdone[1] = I915_READ(GEN7_SC_INSTDONE);
instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
}
}

View File

@ -917,6 +917,7 @@ static void guc_addon_create(struct intel_guc *guc)
struct guc_policies *policies;
struct guc_mmio_reg_state *reg_state;
struct intel_engine_cs *engine;
enum intel_engine_id id;
struct page *page;
u32 size;
@ -944,10 +945,10 @@ static void guc_addon_create(struct intel_guc *guc)
* so its address won't change after we've told the GuC where
* to find it.
*/
engine = &dev_priv->engine[RCS];
engine = dev_priv->engine[RCS];
ads->golden_context_lrca = engine->status_page.ggtt_offset;
for_each_engine(engine, dev_priv)
for_each_engine(engine, dev_priv, id)
ads->eng_state_size[engine->guc_id] = intel_lr_context_size(engine);
/* GuC scheduling policies */
@ -960,7 +961,7 @@ static void guc_addon_create(struct intel_guc *guc)
/* MMIO reg state */
reg_state = (void *)policies + sizeof(struct guc_policies);
for_each_engine(engine, dev_priv) {
for_each_engine(engine, dev_priv, id) {
reg_state->mmio_white_list[engine->guc_id].mmio_start =
engine->mmio_base + GUC_MMIO_WHITE_LIST_START;
@ -1014,9 +1015,10 @@ int i915_guc_submission_init(struct drm_i915_private *dev_priv)
int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
{
struct intel_guc *guc = &dev_priv->guc;
struct drm_i915_gem_request *request;
struct i915_guc_client *client;
struct intel_engine_cs *engine;
struct drm_i915_gem_request *request;
enum intel_engine_id id;
/* client for execbuf submission */
client = guc_client_alloc(dev_priv,
@ -1033,7 +1035,7 @@ int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
guc_init_doorbell_hw(guc);
/* Take over from manual control of ELSP (execlists) */
for_each_engine(engine, dev_priv) {
for_each_engine(engine, dev_priv, id) {
engine->submit_request = i915_guc_submit;
/* Replay the current set of previously submitted requests */

View File

@ -1058,8 +1058,9 @@ static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
static bool any_waiters(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
for_each_engine(engine, dev_priv)
for_each_engine(engine, dev_priv, id)
if (intel_engine_has_waiter(engine))
return true;
@ -1257,20 +1258,20 @@ static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv,
u32 gt_iir)
{
if (gt_iir & GT_RENDER_USER_INTERRUPT)
notify_ring(&dev_priv->engine[RCS]);
notify_ring(dev_priv->engine[RCS]);
if (gt_iir & ILK_BSD_USER_INTERRUPT)
notify_ring(&dev_priv->engine[VCS]);
notify_ring(dev_priv->engine[VCS]);
}
static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
u32 gt_iir)
{
if (gt_iir & GT_RENDER_USER_INTERRUPT)
notify_ring(&dev_priv->engine[RCS]);
notify_ring(dev_priv->engine[RCS]);
if (gt_iir & GT_BSD_USER_INTERRUPT)
notify_ring(&dev_priv->engine[VCS]);
notify_ring(dev_priv->engine[VCS]);
if (gt_iir & GT_BLT_USER_INTERRUPT)
notify_ring(&dev_priv->engine[BCS]);
notify_ring(dev_priv->engine[BCS]);
if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
GT_BSD_CS_ERROR_INTERRUPT |
@ -1340,21 +1341,21 @@ static void gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
u32 gt_iir[4])
{
if (gt_iir[0]) {
gen8_cs_irq_handler(&dev_priv->engine[RCS],
gen8_cs_irq_handler(dev_priv->engine[RCS],
gt_iir[0], GEN8_RCS_IRQ_SHIFT);
gen8_cs_irq_handler(&dev_priv->engine[BCS],
gen8_cs_irq_handler(dev_priv->engine[BCS],
gt_iir[0], GEN8_BCS_IRQ_SHIFT);
}
if (gt_iir[1]) {
gen8_cs_irq_handler(&dev_priv->engine[VCS],
gen8_cs_irq_handler(dev_priv->engine[VCS],
gt_iir[1], GEN8_VCS1_IRQ_SHIFT);
gen8_cs_irq_handler(&dev_priv->engine[VCS2],
gen8_cs_irq_handler(dev_priv->engine[VCS2],
gt_iir[1], GEN8_VCS2_IRQ_SHIFT);
}
if (gt_iir[3])
gen8_cs_irq_handler(&dev_priv->engine[VECS],
gen8_cs_irq_handler(dev_priv->engine[VECS],
gt_iir[3], GEN8_VECS_IRQ_SHIFT);
if (gt_iir[2] & dev_priv->pm_rps_events)
@ -1598,7 +1599,7 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
if (HAS_VEBOX(dev_priv)) {
if (pm_iir & PM_VEBOX_USER_INTERRUPT)
notify_ring(&dev_priv->engine[VECS]);
notify_ring(dev_priv->engine[VECS]);
if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
@ -2551,92 +2552,52 @@ static void i915_reset_and_wakeup(struct drm_i915_private *dev_priv)
wake_up_all(&dev_priv->gpu_error.reset_queue);
}
static void i915_report_and_clear_eir(struct drm_i915_private *dev_priv)
static inline void
i915_err_print_instdone(struct drm_i915_private *dev_priv,
struct intel_instdone *instdone)
{
uint32_t instdone[I915_NUM_INSTDONE_REG];
u32 eir = I915_READ(EIR);
int pipe, i;
int slice;
int subslice;
if (!eir)
pr_err(" INSTDONE: 0x%08x\n", instdone->instdone);
if (INTEL_GEN(dev_priv) <= 3)
return;
pr_err("render error detected, EIR: 0x%08x\n", eir);
pr_err(" SC_INSTDONE: 0x%08x\n", instdone->slice_common);
i915_get_extra_instdone(dev_priv, instdone);
if (INTEL_GEN(dev_priv) <= 6)
return;
if (IS_G4X(dev_priv)) {
if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
u32 ipeir = I915_READ(IPEIR_I965);
for_each_instdone_slice_subslice(dev_priv, slice, subslice)
pr_err(" SAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
slice, subslice, instdone->sampler[slice][subslice]);
pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
for (i = 0; i < ARRAY_SIZE(instdone); i++)
pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
I915_WRITE(IPEIR_I965, ipeir);
POSTING_READ(IPEIR_I965);
}
if (eir & GM45_ERROR_PAGE_TABLE) {
u32 pgtbl_err = I915_READ(PGTBL_ER);
pr_err("page table error\n");
pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
I915_WRITE(PGTBL_ER, pgtbl_err);
POSTING_READ(PGTBL_ER);
}
}
for_each_instdone_slice_subslice(dev_priv, slice, subslice)
pr_err(" ROW_INSTDONE[%d][%d]: 0x%08x\n",
slice, subslice, instdone->row[slice][subslice]);
}
if (!IS_GEN2(dev_priv)) {
if (eir & I915_ERROR_PAGE_TABLE) {
u32 pgtbl_err = I915_READ(PGTBL_ER);
pr_err("page table error\n");
pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
I915_WRITE(PGTBL_ER, pgtbl_err);
POSTING_READ(PGTBL_ER);
}
}
static void i915_clear_error_registers(struct drm_i915_private *dev_priv)
{
u32 eir;
if (eir & I915_ERROR_MEMORY_REFRESH) {
pr_err("memory refresh error:\n");
for_each_pipe(dev_priv, pipe)
pr_err("pipe %c stat: 0x%08x\n",
pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
/* pipestat has already been acked */
}
if (eir & I915_ERROR_INSTRUCTION) {
pr_err("instruction error\n");
pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
for (i = 0; i < ARRAY_SIZE(instdone); i++)
pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
if (INTEL_GEN(dev_priv) < 4) {
u32 ipeir = I915_READ(IPEIR);
if (!IS_GEN2(dev_priv))
I915_WRITE(PGTBL_ER, I915_READ(PGTBL_ER));
pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR));
pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD));
I915_WRITE(IPEIR, ipeir);
POSTING_READ(IPEIR);
} else {
u32 ipeir = I915_READ(IPEIR_I965);
if (INTEL_GEN(dev_priv) < 4)
I915_WRITE(IPEIR, I915_READ(IPEIR));
else
I915_WRITE(IPEIR_I965, I915_READ(IPEIR_I965));
pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
I915_WRITE(IPEIR_I965, ipeir);
POSTING_READ(IPEIR_I965);
}
}
I915_WRITE(EIR, eir);
POSTING_READ(EIR);
I915_WRITE(EIR, I915_READ(EIR));
eir = I915_READ(EIR);
if (eir) {
/*
* some errors might have become stuck,
* mask them.
*/
DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir);
I915_WRITE(EMR, I915_READ(EMR) | eir);
I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
}
@ -2665,7 +2626,7 @@ void i915_handle_error(struct drm_i915_private *dev_priv,
va_end(args);
i915_capture_error_state(dev_priv, engine_mask, error_msg);
i915_report_and_clear_eir(dev_priv);
i915_clear_error_registers(dev_priv);
if (!engine_mask)
return;
@ -2694,18 +2655,26 @@ void i915_handle_error(struct drm_i915_private *dev_priv,
/* Called from drm generic code, passed 'crtc' which
* we use as a pipe index
*/
static int i915_enable_vblank(struct drm_device *dev, unsigned int pipe)
static int i8xx_enable_vblank(struct drm_device *dev, unsigned int pipe)
{
struct drm_i915_private *dev_priv = to_i915(dev);
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
if (INTEL_INFO(dev)->gen >= 4)
i915_enable_pipestat(dev_priv, pipe,
PIPE_START_VBLANK_INTERRUPT_STATUS);
else
i915_enable_pipestat(dev_priv, pipe,
PIPE_VBLANK_INTERRUPT_STATUS);
i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
return 0;
}
static int i965_enable_vblank(struct drm_device *dev, unsigned int pipe)
{
struct drm_i915_private *dev_priv = to_i915(dev);
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
i915_enable_pipestat(dev_priv, pipe,
PIPE_START_VBLANK_INTERRUPT_STATUS);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
return 0;
@ -2715,8 +2684,8 @@ static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
{
struct drm_i915_private *dev_priv = to_i915(dev);
unsigned long irqflags;
uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
DE_PIPE_VBLANK(pipe);
uint32_t bit = INTEL_GEN(dev_priv) >= 7 ?
DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
ilk_enable_display_irq(dev_priv, bit);
@ -2725,19 +2694,6 @@ static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
return 0;
}
static int valleyview_enable_vblank(struct drm_device *dev, unsigned int pipe)
{
struct drm_i915_private *dev_priv = to_i915(dev);
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
i915_enable_pipestat(dev_priv, pipe,
PIPE_START_VBLANK_INTERRUPT_STATUS);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
return 0;
}
static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
{
struct drm_i915_private *dev_priv = to_i915(dev);
@ -2753,14 +2709,23 @@ static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
/* Called from drm generic code, passed 'crtc' which
* we use as a pipe index
*/
static void i915_disable_vblank(struct drm_device *dev, unsigned int pipe)
static void i8xx_disable_vblank(struct drm_device *dev, unsigned int pipe)
{
struct drm_i915_private *dev_priv = to_i915(dev);
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
static void i965_disable_vblank(struct drm_device *dev, unsigned int pipe)
{
struct drm_i915_private *dev_priv = to_i915(dev);
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
i915_disable_pipestat(dev_priv, pipe,
PIPE_VBLANK_INTERRUPT_STATUS |
PIPE_START_VBLANK_INTERRUPT_STATUS);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
@ -2769,25 +2734,14 @@ static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
{
struct drm_i915_private *dev_priv = to_i915(dev);
unsigned long irqflags;
uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
DE_PIPE_VBLANK(pipe);
uint32_t bit = INTEL_GEN(dev_priv) >= 7 ?
DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
ilk_disable_display_irq(dev_priv, bit);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
static void valleyview_disable_vblank(struct drm_device *dev, unsigned int pipe)
{
struct drm_i915_private *dev_priv = to_i915(dev);
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
i915_disable_pipestat(dev_priv, pipe,
PIPE_START_VBLANK_INTERRUPT_STATUS);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
{
struct drm_i915_private *dev_priv = to_i915(dev);
@ -2816,9 +2770,10 @@ semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr,
{
struct drm_i915_private *dev_priv = engine->i915;
struct intel_engine_cs *signaller;
enum intel_engine_id id;
if (INTEL_GEN(dev_priv) >= 8) {
for_each_engine(signaller, dev_priv) {
for_each_engine(signaller, dev_priv, id) {
if (engine == signaller)
continue;
@ -2828,7 +2783,7 @@ semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr,
} else {
u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
for_each_engine(signaller, dev_priv) {
for_each_engine(signaller, dev_priv, id) {
if(engine == signaller)
continue;
@ -2949,35 +2904,52 @@ static int semaphore_passed(struct intel_engine_cs *engine)
static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
for_each_engine(engine, dev_priv)
for_each_engine(engine, dev_priv, id)
engine->hangcheck.deadlock = 0;
}
static bool instdone_unchanged(u32 current_instdone, u32 *old_instdone)
{
u32 tmp = current_instdone | *old_instdone;
bool unchanged;
unchanged = tmp == *old_instdone;
*old_instdone |= tmp;
return unchanged;
}
static bool subunits_stuck(struct intel_engine_cs *engine)
{
u32 instdone[I915_NUM_INSTDONE_REG];
struct drm_i915_private *dev_priv = engine->i915;
struct intel_instdone instdone;
struct intel_instdone *accu_instdone = &engine->hangcheck.instdone;
bool stuck;
int i;
int slice;
int subslice;
if (engine->id != RCS)
return true;
i915_get_extra_instdone(engine->i915, instdone);
intel_engine_get_instdone(engine, &instdone);
/* There might be unstable subunit states even when
* actual head is not moving. Filter out the unstable ones by
* accumulating the undone -> done transitions and only
* consider those as progress.
*/
stuck = true;
for (i = 0; i < I915_NUM_INSTDONE_REG; i++) {
const u32 tmp = instdone[i] | engine->hangcheck.instdone[i];
stuck = instdone_unchanged(instdone.instdone,
&accu_instdone->instdone);
stuck &= instdone_unchanged(instdone.slice_common,
&accu_instdone->slice_common);
if (tmp != engine->hangcheck.instdone[i])
stuck = false;
engine->hangcheck.instdone[i] |= tmp;
for_each_instdone_slice_subslice(dev_priv, slice, subslice) {
stuck &= instdone_unchanged(instdone.sampler[slice][subslice],
&accu_instdone->sampler[slice][subslice]);
stuck &= instdone_unchanged(instdone.row[slice][subslice],
&accu_instdone->row[slice][subslice]);
}
return stuck;
@ -2989,7 +2961,7 @@ head_stuck(struct intel_engine_cs *engine, u64 acthd)
if (acthd != engine->hangcheck.acthd) {
/* Clear subunit states on head movement */
memset(engine->hangcheck.instdone, 0,
memset(&engine->hangcheck.instdone, 0,
sizeof(engine->hangcheck.instdone));
return HANGCHECK_ACTIVE;
@ -3061,6 +3033,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
container_of(work, typeof(*dev_priv),
gpu_error.hangcheck_work.work);
struct intel_engine_cs *engine;
enum intel_engine_id id;
unsigned int hung = 0, stuck = 0;
int busy_count = 0;
#define BUSY 1
@ -3080,7 +3053,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
*/
intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
for_each_engine(engine, dev_priv) {
for_each_engine(engine, dev_priv, id) {
bool busy = intel_engine_has_waiter(engine);
u64 acthd;
u32 seqno;
@ -3159,7 +3132,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
/* Clear head and subunit states on seqno movement */
acthd = 0;
memset(engine->hangcheck.instdone, 0,
memset(&engine->hangcheck.instdone, 0,
sizeof(engine->hangcheck.instdone));
}
@ -3197,12 +3170,12 @@ static void ibx_irq_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
if (HAS_PCH_NOP(dev))
if (HAS_PCH_NOP(dev_priv))
return;
GEN5_IRQ_RESET(SDE);
if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
I915_WRITE(SERR_INT, 0xffffffff);
}
@ -3218,7 +3191,7 @@ static void ibx_irq_pre_postinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
if (HAS_PCH_NOP(dev))
if (HAS_PCH_NOP(dev_priv))
return;
WARN_ON(I915_READ(SDEIER) != 0);
@ -3293,7 +3266,7 @@ static void ironlake_irq_reset(struct drm_device *dev)
I915_WRITE(HWSTAM, 0xffffffff);
GEN5_IRQ_RESET(DE);
if (IS_GEN7(dev))
if (IS_GEN7(dev_priv))
I915_WRITE(GEN7_ERR_INT, 0xffffffff);
gen5_gt_irq_reset(dev);
@ -3343,7 +3316,7 @@ static void gen8_irq_reset(struct drm_device *dev)
GEN5_IRQ_RESET(GEN8_DE_MISC_);
GEN5_IRQ_RESET(GEN8_PCU_);
if (HAS_PCH_SPLIT(dev))
if (HAS_PCH_SPLIT(dev_priv))
ibx_irq_reset(dev);
}
@ -3532,10 +3505,10 @@ static void ibx_irq_postinstall(struct drm_device *dev)
struct drm_i915_private *dev_priv = to_i915(dev);
u32 mask;
if (HAS_PCH_NOP(dev))
if (HAS_PCH_NOP(dev_priv))
return;
if (HAS_PCH_IBX(dev))
if (HAS_PCH_IBX(dev_priv))
mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
else
mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
@ -3552,14 +3525,14 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
pm_irqs = gt_irqs = 0;
dev_priv->gt_irq_mask = ~0;
if (HAS_L3_DPF(dev)) {
if (HAS_L3_DPF(dev_priv)) {
/* L3 parity interrupt is always unmasked. */
dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
gt_irqs |= GT_PARITY_ERROR(dev);
dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev_priv);
gt_irqs |= GT_PARITY_ERROR(dev_priv);
}
gt_irqs |= GT_RENDER_USER_INTERRUPT;
if (IS_GEN5(dev)) {
if (IS_GEN5(dev_priv)) {
gt_irqs |= ILK_BSD_USER_INTERRUPT;
} else {
gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
@ -3616,7 +3589,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
ibx_irq_postinstall(dev);
if (IS_IRONLAKE_M(dev)) {
if (IS_IRONLAKE_M(dev_priv)) {
/* Enable PCU event interrupts
*
* spinlocking not required here for correctness since interrupt
@ -3756,13 +3729,13 @@ static int gen8_irq_postinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
if (HAS_PCH_SPLIT(dev))
if (HAS_PCH_SPLIT(dev_priv))
ibx_irq_pre_postinstall(dev);
gen8_gt_irq_postinstall(dev_priv);
gen8_de_irq_postinstall(dev_priv);
if (HAS_PCH_SPLIT(dev))
if (HAS_PCH_SPLIT(dev_priv))
ibx_irq_postinstall(dev);
I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
@ -3971,7 +3944,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
new_iir = I915_READ16(IIR); /* Flush posted writes */
if (iir & I915_USER_INTERRUPT)
notify_ring(&dev_priv->engine[RCS]);
notify_ring(dev_priv->engine[RCS]);
for_each_pipe(dev_priv, pipe) {
int plane = pipe;
@ -4168,7 +4141,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
new_iir = I915_READ(IIR); /* Flush posted writes */
if (iir & I915_USER_INTERRUPT)
notify_ring(&dev_priv->engine[RCS]);
notify_ring(dev_priv->engine[RCS]);
for_each_pipe(dev_priv, pipe) {
int plane = pipe;
@ -4400,9 +4373,9 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
new_iir = I915_READ(IIR); /* Flush posted writes */
if (iir & I915_USER_INTERRUPT)
notify_ring(&dev_priv->engine[RCS]);
notify_ring(dev_priv->engine[RCS]);
if (iir & I915_BSD_USER_INTERRUPT)
notify_ring(&dev_priv->engine[VCS]);
notify_ring(dev_priv->engine[VCS]);
for_each_pipe(dev_priv, pipe) {
if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
@ -4539,16 +4512,16 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
dev->driver->irq_preinstall = cherryview_irq_preinstall;
dev->driver->irq_postinstall = cherryview_irq_postinstall;
dev->driver->irq_uninstall = cherryview_irq_uninstall;
dev->driver->enable_vblank = valleyview_enable_vblank;
dev->driver->disable_vblank = valleyview_disable_vblank;
dev->driver->enable_vblank = i965_enable_vblank;
dev->driver->disable_vblank = i965_disable_vblank;
dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
} else if (IS_VALLEYVIEW(dev_priv)) {
dev->driver->irq_handler = valleyview_irq_handler;
dev->driver->irq_preinstall = valleyview_irq_preinstall;
dev->driver->irq_postinstall = valleyview_irq_postinstall;
dev->driver->irq_uninstall = valleyview_irq_uninstall;
dev->driver->enable_vblank = valleyview_enable_vblank;
dev->driver->disable_vblank = valleyview_disable_vblank;
dev->driver->enable_vblank = i965_enable_vblank;
dev->driver->disable_vblank = i965_disable_vblank;
dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
} else if (INTEL_INFO(dev_priv)->gen >= 8) {
dev->driver->irq_handler = gen8_irq_handler;
@ -4557,13 +4530,13 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
dev->driver->irq_uninstall = gen8_irq_uninstall;
dev->driver->enable_vblank = gen8_enable_vblank;
dev->driver->disable_vblank = gen8_disable_vblank;
if (IS_BROXTON(dev))
if (IS_BROXTON(dev_priv))
dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
else if (HAS_PCH_SPT(dev) || HAS_PCH_KBP(dev))
else if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv))
dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
else
dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
} else if (HAS_PCH_SPLIT(dev)) {
} else if (HAS_PCH_SPLIT(dev_priv)) {
dev->driver->irq_handler = ironlake_irq_handler;
dev->driver->irq_preinstall = ironlake_irq_reset;
dev->driver->irq_postinstall = ironlake_irq_postinstall;
@ -4577,21 +4550,25 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
dev->driver->irq_postinstall = i8xx_irq_postinstall;
dev->driver->irq_handler = i8xx_irq_handler;
dev->driver->irq_uninstall = i8xx_irq_uninstall;
dev->driver->enable_vblank = i8xx_enable_vblank;
dev->driver->disable_vblank = i8xx_disable_vblank;
} else if (IS_GEN3(dev_priv)) {
dev->driver->irq_preinstall = i915_irq_preinstall;
dev->driver->irq_postinstall = i915_irq_postinstall;
dev->driver->irq_uninstall = i915_irq_uninstall;
dev->driver->irq_handler = i915_irq_handler;
dev->driver->enable_vblank = i8xx_enable_vblank;
dev->driver->disable_vblank = i8xx_disable_vblank;
} else {
dev->driver->irq_preinstall = i965_irq_preinstall;
dev->driver->irq_postinstall = i965_irq_postinstall;
dev->driver->irq_uninstall = i965_irq_uninstall;
dev->driver->irq_handler = i965_irq_handler;
dev->driver->enable_vblank = i965_enable_vblank;
dev->driver->disable_vblank = i965_disable_vblank;
}
if (I915_HAS_HOTPLUG(dev_priv))
dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
dev->driver->enable_vblank = i915_enable_vblank;
dev->driver->disable_vblank = i915_disable_vblank;
}
}

View File

@ -47,6 +47,7 @@ struct i915_params i915 __read_mostly = {
.load_detect_test = 0,
.force_reset_modeset_test = 0,
.reset = true,
.error_capture = true,
.invert_brightness = 0,
.disable_display = 0,
.enable_cmd_parser = 1,
@ -115,6 +116,14 @@ MODULE_PARM_DESC(vbt_sdvo_panel_type,
module_param_named_unsafe(reset, i915.reset, bool, 0600);
MODULE_PARM_DESC(reset, "Attempt GPU resets (default: true)");
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
module_param_named(error_capture, i915.error_capture, bool, 0600);
MODULE_PARM_DESC(error_capture,
"Record the GPU state following a hang. "
"This information in /sys/class/drm/card<N>/error is vital for "
"triaging and debugging hangs.");
#endif
module_param_named_unsafe(enable_hangcheck, i915.enable_hangcheck, bool, 0644);
MODULE_PARM_DESC(enable_hangcheck,
"Periodically check GPU activity for detecting hangs. "

View File

@ -59,6 +59,7 @@ struct i915_params {
bool load_detect_test;
bool force_reset_modeset_test;
bool reset;
bool error_capture;
bool disable_display;
bool verbose_state_checks;
bool nuclear_pageflip;

View File

@ -431,9 +431,6 @@ static const struct pci_device_id pciidlist[] = {
};
MODULE_DEVICE_TABLE(pci, pciidlist);
extern int i915_driver_load(struct pci_dev *pdev,
const struct pci_device_id *ent);
static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct intel_device_info *intel_info =
@ -463,8 +460,6 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return i915_driver_load(pdev, ent);
}
extern void i915_driver_unload(struct drm_device *dev);
static void i915_pci_remove(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
@ -473,8 +468,6 @@ static void i915_pci_remove(struct pci_dev *pdev)
drm_dev_unref(dev);
}
extern const struct dev_pm_ops i915_pm_ops;
static struct pci_driver i915_pci_driver = {
.name = DRIVER_NAME,
.id_table = pciidlist,

View File

@ -86,8 +86,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define DEVEN 0x54
#define DEVEN_MCHBAR_EN (1 << 28)
#define BSM 0x5c
#define BSM_MASK (0xFFFF << 20)
/* BSM in include/drm/i915_drm.h */
#define HPLLCC 0xc0 /* 85x only */
#define GC_CLOCK_CONTROL_MASK (0x7 << 0)
@ -1605,6 +1604,7 @@ enum skl_disp_power_wells {
#define RING_HEAD(base) _MMIO((base)+0x34)
#define RING_START(base) _MMIO((base)+0x38)
#define RING_CTL(base) _MMIO((base)+0x3c)
#define RING_CTL_SIZE(size) ((size) - PAGE_SIZE) /* in bytes -> pages */
#define RING_SYNC_0(base) _MMIO((base)+0x40)
#define RING_SYNC_1(base) _MMIO((base)+0x44)
#define RING_SYNC_2(base) _MMIO((base)+0x48)
@ -1708,7 +1708,11 @@ enum skl_disp_power_wells {
#define GEN7_SC_INSTDONE _MMIO(0x7100)
#define GEN7_SAMPLER_INSTDONE _MMIO(0xe160)
#define GEN7_ROW_INSTDONE _MMIO(0xe164)
#define I915_NUM_INSTDONE_REG 4
#define GEN8_MCR_SELECTOR _MMIO(0xfdc)
#define GEN8_MCR_SLICE(slice) (((slice) & 3) << 26)
#define GEN8_MCR_SLICE_MASK GEN8_MCR_SLICE(3)
#define GEN8_MCR_SUBSLICE(subslice) (((subslice) & 3) << 24)
#define GEN8_MCR_SUBSLICE_MASK GEN8_MCR_SUBSLICE(3)
#define RING_IPEIR(base) _MMIO((base)+0x64)
#define RING_IPEHR(base) _MMIO((base)+0x68)
/*
@ -2089,9 +2093,9 @@ enum skl_disp_power_wells {
#define PM_VEBOX_CS_ERROR_INTERRUPT (1 << 12) /* hsw+ */
#define PM_VEBOX_USER_INTERRUPT (1 << 10) /* hsw+ */
#define GT_PARITY_ERROR(dev) \
#define GT_PARITY_ERROR(dev_priv) \
(GT_RENDER_L3_PARITY_ERROR_INTERRUPT | \
(IS_HASWELL(dev) ? GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1 : 0))
(IS_HASWELL(dev_priv) ? GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1 : 0))
/* These are all the "old" interrupts */
#define ILK_BSD_USER_INTERRUPT (1<<5)
@ -7327,6 +7331,10 @@ enum {
#define AUD_CONFIG_UPPER_N_MASK (0xff << 20)
#define AUD_CONFIG_LOWER_N_SHIFT 4
#define AUD_CONFIG_LOWER_N_MASK (0xfff << 4)
#define AUD_CONFIG_N_MASK (AUD_CONFIG_UPPER_N_MASK | AUD_CONFIG_LOWER_N_MASK)
#define AUD_CONFIG_N(n) \
(((((n) >> 12) & 0xff) << AUD_CONFIG_UPPER_N_SHIFT) | \
(((n) & 0xfff) << AUD_CONFIG_LOWER_N_SHIFT))
#define AUD_CONFIG_PIXEL_CLOCK_HDMI_SHIFT 16
#define AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK (0xf << 16)
#define AUD_CONFIG_PIXEL_CLOCK_HDMI_25175 (0 << 16)

View File

@ -38,7 +38,7 @@ static void i915_save_display(struct drm_device *dev)
dev_priv->regfile.saveDSPARB = I915_READ(DSPARB);
/* save FBC interval */
if (HAS_FBC(dev) && INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev))
if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) <= 4 && !IS_G4X(dev_priv))
dev_priv->regfile.saveFBC_CONTROL = I915_READ(FBC_CONTROL);
}
@ -54,7 +54,7 @@ static void i915_restore_display(struct drm_device *dev)
intel_fbc_global_disable(dev_priv);
/* restore FBC interval */
if (HAS_FBC(dev) && INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev))
if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) <= 4 && !IS_G4X(dev_priv))
I915_WRITE(FBC_CONTROL, dev_priv->regfile.saveFBC_CONTROL);
i915_redisable_vga(dev);
@ -70,7 +70,7 @@ int i915_save_state(struct drm_device *dev)
i915_save_display(dev);
if (IS_GEN4(dev))
if (IS_GEN4(dev_priv))
pci_read_config_word(pdev, GCDGMBUS,
&dev_priv->regfile.saveGCDGMBUS);
@ -116,7 +116,7 @@ int i915_restore_state(struct drm_device *dev)
i915_gem_restore_fences(dev);
if (IS_GEN4(dev))
if (IS_GEN4(dev_priv))
pci_write_config_word(pdev, GCDGMBUS,
dev_priv->regfile.saveGCDGMBUS);
i915_restore_display(dev);

View File

@ -514,6 +514,8 @@ static const struct attribute *vlv_attrs[] = {
NULL,
};
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
@ -571,6 +573,21 @@ static struct bin_attribute error_state_attr = {
.write = error_state_write,
};
static void i915_setup_error_capture(struct device *kdev)
{
if (sysfs_create_bin_file(&kdev->kobj, &error_state_attr))
DRM_ERROR("error_state sysfs setup failed\n");
}
static void i915_teardown_error_capture(struct device *kdev)
{
sysfs_remove_bin_file(&kdev->kobj, &error_state_attr);
}
#else
static void i915_setup_error_capture(struct device *kdev) {}
static void i915_teardown_error_capture(struct device *kdev) {}
#endif
void i915_setup_sysfs(struct drm_i915_private *dev_priv)
{
struct device *kdev = dev_priv->drm.primary->kdev;
@ -617,17 +634,15 @@ void i915_setup_sysfs(struct drm_i915_private *dev_priv)
if (ret)
DRM_ERROR("RPS sysfs setup failed\n");
ret = sysfs_create_bin_file(&kdev->kobj,
&error_state_attr);
if (ret)
DRM_ERROR("error_state sysfs setup failed\n");
i915_setup_error_capture(kdev);
}
void i915_teardown_sysfs(struct drm_i915_private *dev_priv)
{
struct device *kdev = dev_priv->drm.primary->kdev;
sysfs_remove_bin_file(&kdev->kobj, &error_state_attr);
i915_teardown_error_capture(kdev);
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
sysfs_remove_files(&kdev->kobj, vlv_attrs);
else

View File

@ -81,7 +81,7 @@ static const struct {
int clock;
int n;
int cts;
} aud_ncts[] = {
} hdmi_aud_ncts[] = {
{ 44100, TMDS_296M, 4459, 234375 },
{ 44100, TMDS_297M, 4704, 247500 },
{ 48000, TMDS_296M, 5824, 281250 },
@ -121,45 +121,20 @@ static u32 audio_config_hdmi_pixel_clock(const struct drm_display_mode *adjusted
return hdmi_audio_clock[i].config;
}
static int audio_config_get_n(const struct drm_display_mode *mode, int rate)
static int audio_config_hdmi_get_n(const struct drm_display_mode *adjusted_mode,
int rate)
{
int i;
for (i = 0; i < ARRAY_SIZE(aud_ncts); i++) {
if ((rate == aud_ncts[i].sample_rate) &&
(mode->clock == aud_ncts[i].clock)) {
return aud_ncts[i].n;
for (i = 0; i < ARRAY_SIZE(hdmi_aud_ncts); i++) {
if (rate == hdmi_aud_ncts[i].sample_rate &&
adjusted_mode->crtc_clock == hdmi_aud_ncts[i].clock) {
return hdmi_aud_ncts[i].n;
}
}
return 0;
}
static uint32_t audio_config_setup_n_reg(int n, uint32_t val)
{
int n_low, n_up;
uint32_t tmp = val;
n_low = n & 0xfff;
n_up = (n >> 12) & 0xff;
tmp &= ~(AUD_CONFIG_UPPER_N_MASK | AUD_CONFIG_LOWER_N_MASK);
tmp |= ((n_up << AUD_CONFIG_UPPER_N_SHIFT) |
(n_low << AUD_CONFIG_LOWER_N_SHIFT) |
AUD_CONFIG_N_PROG_ENABLE);
return tmp;
}
/* check whether N/CTS/M need be set manually */
static bool audio_rate_need_prog(struct intel_crtc *crtc,
const struct drm_display_mode *mode)
{
if (((mode->clock == TMDS_297M) ||
(mode->clock == TMDS_296M)) &&
intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI))
return true;
else
return false;
}
static bool intel_eld_uptodate(struct drm_connector *connector,
i915_reg_t reg_eldv, uint32_t bits_eldv,
i915_reg_t reg_elda, uint32_t bits_elda,
@ -245,6 +220,65 @@ static void g4x_audio_codec_enable(struct drm_connector *connector,
I915_WRITE(G4X_AUD_CNTL_ST, tmp);
}
static void
hsw_dp_audio_config_update(struct intel_crtc *intel_crtc, enum port port,
const struct drm_display_mode *adjusted_mode)
{
struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
enum pipe pipe = intel_crtc->pipe;
u32 tmp;
tmp = I915_READ(HSW_AUD_CFG(pipe));
tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
tmp |= AUD_CONFIG_N_VALUE_INDEX;
I915_WRITE(HSW_AUD_CFG(pipe), tmp);
}
static void
hsw_hdmi_audio_config_update(struct intel_crtc *intel_crtc, enum port port,
const struct drm_display_mode *adjusted_mode)
{
struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
struct i915_audio_component *acomp = dev_priv->audio_component;
int rate = acomp ? acomp->aud_sample_rate[port] : 0;
enum pipe pipe = intel_crtc->pipe;
int n;
u32 tmp;
tmp = I915_READ(HSW_AUD_CFG(pipe));
tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
tmp |= audio_config_hdmi_pixel_clock(adjusted_mode);
if (adjusted_mode->crtc_clock == TMDS_296M ||
adjusted_mode->crtc_clock == TMDS_297M) {
n = audio_config_hdmi_get_n(adjusted_mode, rate);
if (n != 0) {
tmp &= ~AUD_CONFIG_N_MASK;
tmp |= AUD_CONFIG_N(n);
tmp |= AUD_CONFIG_N_PROG_ENABLE;
} else {
DRM_DEBUG_KMS("no suitable N value is found\n");
}
}
I915_WRITE(HSW_AUD_CFG(pipe), tmp);
}
static void
hsw_audio_config_update(struct intel_crtc *intel_crtc, enum port port,
const struct drm_display_mode *adjusted_mode)
{
if (intel_crtc_has_dp_encoder(intel_crtc->config))
hsw_dp_audio_config_update(intel_crtc, port, adjusted_mode);
else
hsw_hdmi_audio_config_update(intel_crtc, port, adjusted_mode);
}
static void hsw_audio_codec_disable(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@ -276,20 +310,16 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder)
}
static void hsw_audio_codec_enable(struct drm_connector *connector,
struct intel_encoder *encoder,
struct intel_encoder *intel_encoder,
const struct drm_display_mode *adjusted_mode)
{
struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
struct intel_crtc *intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
enum pipe pipe = intel_crtc->pipe;
struct i915_audio_component *acomp = dev_priv->audio_component;
enum port port = intel_encoder->port;
const uint8_t *eld = connector->eld;
struct intel_digital_port *intel_dig_port =
enc_to_dig_port(&encoder->base);
enum port port = intel_dig_port->port;
uint32_t tmp;
int len, i;
int n, rate;
DRM_DEBUG_KMS("Enable audio codec on pipe %c, %u bytes ELD\n",
pipe_name(pipe), drm_eld_size(eld));
@ -325,42 +355,17 @@ static void hsw_audio_codec_enable(struct drm_connector *connector,
I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
/* Enable timestamps */
tmp = I915_READ(HSW_AUD_CFG(pipe));
tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
if (intel_crtc_has_dp_encoder(intel_crtc->config))
tmp |= AUD_CONFIG_N_VALUE_INDEX;
else
tmp |= audio_config_hdmi_pixel_clock(adjusted_mode);
tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
if (audio_rate_need_prog(intel_crtc, adjusted_mode)) {
if (!acomp)
rate = 0;
else if (port >= PORT_A && port <= PORT_E)
rate = acomp->aud_sample_rate[port];
else {
DRM_ERROR("invalid port: %d\n", port);
rate = 0;
}
n = audio_config_get_n(adjusted_mode, rate);
if (n != 0)
tmp = audio_config_setup_n_reg(n, tmp);
else
DRM_DEBUG_KMS("no suitable N value is found\n");
}
I915_WRITE(HSW_AUD_CFG(pipe), tmp);
hsw_audio_config_update(intel_crtc, port, adjusted_mode);
mutex_unlock(&dev_priv->av_mutex);
}
static void ilk_audio_codec_disable(struct intel_encoder *encoder)
static void ilk_audio_codec_disable(struct intel_encoder *intel_encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
enum port port = enc_to_dig_port(&encoder->base)->port;
struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
struct intel_crtc *intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
enum pipe pipe = intel_crtc->pipe;
enum port port = intel_encoder->port;
uint32_t tmp, eldv;
i915_reg_t aud_config, aud_cntrl_st2;
@ -400,13 +405,13 @@ static void ilk_audio_codec_disable(struct intel_encoder *encoder)
}
static void ilk_audio_codec_enable(struct drm_connector *connector,
struct intel_encoder *encoder,
struct intel_encoder *intel_encoder,
const struct drm_display_mode *adjusted_mode)
{
struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
enum port port = enc_to_dig_port(&encoder->base)->port;
struct intel_crtc *intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
enum pipe pipe = intel_crtc->pipe;
enum port port = intel_encoder->port;
uint8_t *eld = connector->eld;
uint32_t tmp, eldv;
int len, i;
@ -425,13 +430,13 @@ static void ilk_audio_codec_enable(struct drm_connector *connector,
* infrastructure is not there yet.
*/
if (HAS_PCH_IBX(connector->dev)) {
if (HAS_PCH_IBX(dev_priv)) {
hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID(pipe);
aud_config = IBX_AUD_CFG(pipe);
aud_cntl_st = IBX_AUD_CNTL_ST(pipe);
aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
} else if (IS_VALLEYVIEW(connector->dev) ||
IS_CHERRYVIEW(connector->dev)) {
} else if (IS_VALLEYVIEW(dev_priv) ||
IS_CHERRYVIEW(dev_priv)) {
hdmiw_hdmiedid = VLV_HDMIW_HDMIEDID(pipe);
aud_config = VLV_AUD_CFG(pipe);
aud_cntl_st = VLV_AUD_CNTL_ST(pipe);
@ -490,11 +495,10 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder)
struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
struct drm_connector *connector;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_private *dev_priv = to_i915(encoder->dev);
struct i915_audio_component *acomp = dev_priv->audio_component;
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
enum port port = intel_dig_port->port;
enum port port = intel_encoder->port;
enum pipe pipe = crtc->pipe;
connector = drm_select_eld(encoder);
if (!connector)
@ -518,13 +522,19 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder)
adjusted_mode);
mutex_lock(&dev_priv->av_mutex);
intel_dig_port->audio_connector = connector;
intel_encoder->audio_connector = connector;
/* referred in audio callbacks */
dev_priv->dig_port_map[port] = intel_encoder;
dev_priv->av_enc_map[pipe] = intel_encoder;
mutex_unlock(&dev_priv->av_mutex);
/* audio drivers expect pipe = -1 to indicate Non-MST cases */
if (intel_encoder->type != INTEL_OUTPUT_DP_MST)
pipe = -1;
if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify)
acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr, (int) port);
acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
(int) port, (int) pipe);
}
/**
@ -537,22 +547,27 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder)
void intel_audio_codec_disable(struct intel_encoder *intel_encoder)
{
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_private *dev_priv = to_i915(encoder->dev);
struct i915_audio_component *acomp = dev_priv->audio_component;
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
enum port port = intel_dig_port->port;
enum port port = intel_encoder->port;
struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
enum pipe pipe = crtc->pipe;
if (dev_priv->display.audio_codec_disable)
dev_priv->display.audio_codec_disable(intel_encoder);
mutex_lock(&dev_priv->av_mutex);
intel_dig_port->audio_connector = NULL;
dev_priv->dig_port_map[port] = NULL;
intel_encoder->audio_connector = NULL;
dev_priv->av_enc_map[pipe] = NULL;
mutex_unlock(&dev_priv->av_mutex);
/* audio drivers expect pipe = -1 to indicate Non-MST cases */
if (intel_encoder->type != INTEL_OUTPUT_DP_MST)
pipe = -1;
if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify)
acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr, (int) port);
acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
(int) port, (int) pipe);
}
/**
@ -627,74 +642,67 @@ static int i915_audio_component_get_cdclk_freq(struct device *kdev)
return dev_priv->cdclk_freq;
}
static int i915_audio_component_sync_audio_rate(struct device *kdev,
int port, int rate)
static struct intel_encoder *get_saved_enc(struct drm_i915_private *dev_priv,
int port, int pipe)
{
if (WARN_ON(pipe >= I915_MAX_PIPES))
return NULL;
/* MST */
if (pipe >= 0)
return dev_priv->av_enc_map[pipe];
/* Non-MST */
for_each_pipe(dev_priv, pipe) {
struct intel_encoder *encoder;
encoder = dev_priv->av_enc_map[pipe];
if (encoder == NULL)
continue;
if (port == encoder->port)
return encoder;
}
return NULL;
}
static int i915_audio_component_sync_audio_rate(struct device *kdev, int port,
int pipe, int rate)
{
struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
struct intel_encoder *intel_encoder;
struct intel_crtc *crtc;
struct drm_display_mode *mode;
struct drm_display_mode *adjusted_mode;
struct i915_audio_component *acomp = dev_priv->audio_component;
enum pipe pipe = INVALID_PIPE;
u32 tmp;
int n;
int err = 0;
/* HSW, BDW, SKL, KBL need this fix */
if (!IS_SKYLAKE(dev_priv) &&
!IS_KABYLAKE(dev_priv) &&
!IS_BROADWELL(dev_priv) &&
!IS_HASWELL(dev_priv))
if (!HAS_DDI(dev_priv))
return 0;
i915_audio_component_get_power(kdev);
mutex_lock(&dev_priv->av_mutex);
/* 1. get the pipe */
intel_encoder = dev_priv->dig_port_map[port];
/* intel_encoder might be NULL for DP MST */
intel_encoder = get_saved_enc(dev_priv, port, pipe);
if (!intel_encoder || !intel_encoder->base.crtc ||
intel_encoder->type != INTEL_OUTPUT_HDMI) {
DRM_DEBUG_KMS("no valid port %c\n", port_name(port));
err = -ENODEV;
goto unlock;
}
crtc = to_intel_crtc(intel_encoder->base.crtc);
pipe = crtc->pipe;
if (pipe == INVALID_PIPE) {
DRM_DEBUG_KMS("no pipe for the port %c\n", port_name(port));
DRM_DEBUG_KMS("Not valid for port %c\n", port_name(port));
err = -ENODEV;
goto unlock;
}
DRM_DEBUG_KMS("pipe %c connects port %c\n",
pipe_name(pipe), port_name(port));
mode = &crtc->config->base.adjusted_mode;
/* pipe passed from the audio driver will be -1 for Non-MST case */
crtc = to_intel_crtc(intel_encoder->base.crtc);
pipe = crtc->pipe;
adjusted_mode = &crtc->config->base.adjusted_mode;
/* port must be valid now, otherwise the pipe will be invalid */
acomp->aud_sample_rate[port] = rate;
/* 2. check whether to set the N/CTS/M manually or not */
if (!audio_rate_need_prog(crtc, mode)) {
tmp = I915_READ(HSW_AUD_CFG(pipe));
tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
I915_WRITE(HSW_AUD_CFG(pipe), tmp);
goto unlock;
}
n = audio_config_get_n(mode, rate);
if (n == 0) {
DRM_DEBUG_KMS("Using automatic mode for N value on port %c\n",
port_name(port));
tmp = I915_READ(HSW_AUD_CFG(pipe));
tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
I915_WRITE(HSW_AUD_CFG(pipe), tmp);
goto unlock;
}
/* 3. set the N/CTS/M */
tmp = I915_READ(HSW_AUD_CFG(pipe));
tmp = audio_config_setup_n_reg(n, tmp);
I915_WRITE(HSW_AUD_CFG(pipe), tmp);
hsw_audio_config_update(crtc, port, adjusted_mode);
unlock:
mutex_unlock(&dev_priv->av_mutex);
@ -703,27 +711,29 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev,
}
static int i915_audio_component_get_eld(struct device *kdev, int port,
bool *enabled,
int pipe, bool *enabled,
unsigned char *buf, int max_bytes)
{
struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
struct intel_encoder *intel_encoder;
struct intel_digital_port *intel_dig_port;
const u8 *eld;
int ret = -EINVAL;
mutex_lock(&dev_priv->av_mutex);
intel_encoder = dev_priv->dig_port_map[port];
/* intel_encoder might be NULL for DP MST */
if (intel_encoder) {
ret = 0;
intel_dig_port = enc_to_dig_port(&intel_encoder->base);
*enabled = intel_dig_port->audio_connector != NULL;
if (*enabled) {
eld = intel_dig_port->audio_connector->eld;
ret = drm_eld_size(eld);
memcpy(buf, eld, min(max_bytes, ret));
}
intel_encoder = get_saved_enc(dev_priv, port, pipe);
if (!intel_encoder) {
DRM_DEBUG_KMS("Not valid for port %c\n", port_name(port));
mutex_unlock(&dev_priv->av_mutex);
return ret;
}
ret = 0;
*enabled = intel_encoder->audio_connector != NULL;
if (*enabled) {
eld = intel_encoder->audio_connector->eld;
ret = drm_eld_size(eld);
memcpy(buf, eld, min(max_bytes, ret));
}
mutex_unlock(&dev_priv->av_mutex);

View File

@ -996,6 +996,10 @@ parse_mipi_sequence(struct drm_i915_private *dev_priv,
goto err;
}
/* Log about presence of sequences we won't run. */
if (seq_id == MIPI_SEQ_TEAR_ON || seq_id == MIPI_SEQ_TEAR_OFF)
DRM_DEBUG_KMS("Unsupported sequence %u\n", seq_id);
dev_priv->vbt.dsi.sequence[seq_id] = data + index;
if (sequence->version >= 3)
@ -1031,6 +1035,77 @@ static u8 translate_iboost(u8 val)
return mapping[val];
}
static void sanitize_ddc_pin(struct drm_i915_private *dev_priv,
enum port port)
{
const struct ddi_vbt_port_info *info =
&dev_priv->vbt.ddi_port_info[port];
enum port p;
if (!info->alternate_ddc_pin)
return;
for_each_port_masked(p, (1 << port) - 1) {
struct ddi_vbt_port_info *i = &dev_priv->vbt.ddi_port_info[p];
if (info->alternate_ddc_pin != i->alternate_ddc_pin)
continue;
DRM_DEBUG_KMS("port %c trying to use the same DDC pin (0x%x) as port %c, "
"disabling port %c DVI/HDMI support\n",
port_name(p), i->alternate_ddc_pin,
port_name(port), port_name(p));
/*
* If we have multiple ports supposedly sharing the
* pin, then dvi/hdmi couldn't exist on the shared
* port. Otherwise they share the same ddc bin and
* system couldn't communicate with them separately.
*
* Due to parsing the ports in alphabetical order,
* a higher port will always clobber a lower one.
*/
i->supports_dvi = false;
i->supports_hdmi = false;
i->alternate_ddc_pin = 0;
}
}
static void sanitize_aux_ch(struct drm_i915_private *dev_priv,
enum port port)
{
const struct ddi_vbt_port_info *info =
&dev_priv->vbt.ddi_port_info[port];
enum port p;
if (!info->alternate_aux_channel)
return;
for_each_port_masked(p, (1 << port) - 1) {
struct ddi_vbt_port_info *i = &dev_priv->vbt.ddi_port_info[p];
if (info->alternate_aux_channel != i->alternate_aux_channel)
continue;
DRM_DEBUG_KMS("port %c trying to use the same AUX CH (0x%x) as port %c, "
"disabling port %c DP support\n",
port_name(p), i->alternate_aux_channel,
port_name(port), port_name(p));
/*
* If we have multiple ports supposedlt sharing the
* aux channel, then DP couldn't exist on the shared
* port. Otherwise they share the same aux channel
* and system couldn't communicate with them separately.
*
* Due to parsing the ports in alphabetical order,
* a higher port will always clobber a lower one.
*/
i->supports_dp = false;
i->alternate_aux_channel = 0;
}
}
static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
const struct bdb_header *bdb)
{
@ -1105,54 +1180,15 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
DRM_DEBUG_KMS("Port %c is internal DP\n", port_name(port));
if (is_dvi) {
if (port == PORT_E) {
info->alternate_ddc_pin = ddc_pin;
/* if DDIE share ddc pin with other port, then
* dvi/hdmi couldn't exist on the shared port.
* Otherwise they share the same ddc bin and system
* couldn't communicate with them seperately. */
if (ddc_pin == DDC_PIN_B) {
dev_priv->vbt.ddi_port_info[PORT_B].supports_dvi = 0;
dev_priv->vbt.ddi_port_info[PORT_B].supports_hdmi = 0;
} else if (ddc_pin == DDC_PIN_C) {
dev_priv->vbt.ddi_port_info[PORT_C].supports_dvi = 0;
dev_priv->vbt.ddi_port_info[PORT_C].supports_hdmi = 0;
} else if (ddc_pin == DDC_PIN_D) {
dev_priv->vbt.ddi_port_info[PORT_D].supports_dvi = 0;
dev_priv->vbt.ddi_port_info[PORT_D].supports_hdmi = 0;
}
} else if (ddc_pin == DDC_PIN_B && port != PORT_B)
DRM_DEBUG_KMS("Unexpected DDC pin for port B\n");
else if (ddc_pin == DDC_PIN_C && port != PORT_C)
DRM_DEBUG_KMS("Unexpected DDC pin for port C\n");
else if (ddc_pin == DDC_PIN_D && port != PORT_D)
DRM_DEBUG_KMS("Unexpected DDC pin for port D\n");
info->alternate_ddc_pin = ddc_pin;
sanitize_ddc_pin(dev_priv, port);
}
if (is_dp) {
if (port == PORT_E) {
info->alternate_aux_channel = aux_channel;
/* if DDIE share aux channel with other port, then
* DP couldn't exist on the shared port. Otherwise
* they share the same aux channel and system
* couldn't communicate with them seperately. */
if (aux_channel == DP_AUX_A)
dev_priv->vbt.ddi_port_info[PORT_A].supports_dp = 0;
else if (aux_channel == DP_AUX_B)
dev_priv->vbt.ddi_port_info[PORT_B].supports_dp = 0;
else if (aux_channel == DP_AUX_C)
dev_priv->vbt.ddi_port_info[PORT_C].supports_dp = 0;
else if (aux_channel == DP_AUX_D)
dev_priv->vbt.ddi_port_info[PORT_D].supports_dp = 0;
}
else if (aux_channel == DP_AUX_A && port != PORT_A)
DRM_DEBUG_KMS("Unexpected AUX channel for port A\n");
else if (aux_channel == DP_AUX_B && port != PORT_B)
DRM_DEBUG_KMS("Unexpected AUX channel for port B\n");
else if (aux_channel == DP_AUX_C && port != PORT_C)
DRM_DEBUG_KMS("Unexpected AUX channel for port C\n");
else if (aux_channel == DP_AUX_D && port != PORT_D)
DRM_DEBUG_KMS("Unexpected AUX channel for port D\n");
info->alternate_aux_channel = aux_channel;
sanitize_aux_ch(dev_priv, port);
}
if (bdb->version >= 158) {
@ -1759,3 +1795,52 @@ intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv,
return false;
}
/**
* intel_bios_is_lspcon_present - if LSPCON is attached on %port
* @dev_priv: i915 device instance
* @port: port to check
*
* Return true if LSPCON is present on this port
*/
bool
intel_bios_is_lspcon_present(struct drm_i915_private *dev_priv,
enum port port)
{
int i;
if (!HAS_LSPCON(dev_priv))
return false;
for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
if (!dev_priv->vbt.child_dev[i].common.lspcon)
continue;
switch (dev_priv->vbt.child_dev[i].common.dvo_port) {
case DVO_PORT_DPA:
case DVO_PORT_HDMIA:
if (port == PORT_A)
return true;
break;
case DVO_PORT_DPB:
case DVO_PORT_HDMIB:
if (port == PORT_B)
return true;
break;
case DVO_PORT_DPC:
case DVO_PORT_HDMIC:
if (port == PORT_C)
return true;
break;
case DVO_PORT_DPD:
case DVO_PORT_HDMID:
if (port == PORT_D)
return true;
break;
default:
break;
}
}
return false;
}

View File

@ -621,6 +621,7 @@ void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
unsigned int intel_kick_waiters(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
unsigned int mask = 0;
/* To avoid the task_struct disappearing beneath us as we wake up
@ -628,7 +629,7 @@ unsigned int intel_kick_waiters(struct drm_i915_private *i915)
* RCU lock, i.e. as we call wake_up_process() we must be holding the
* rcu_read_lock().
*/
for_each_engine(engine, i915)
for_each_engine(engine, i915, id)
if (unlikely(intel_engine_wakeup(engine)))
mask |= intel_engine_flag(engine);
@ -638,9 +639,10 @@ unsigned int intel_kick_waiters(struct drm_i915_private *i915)
unsigned int intel_kick_signalers(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
unsigned int mask = 0;
for_each_engine(engine, i915) {
for_each_engine(engine, i915, id) {
if (unlikely(READ_ONCE(engine->breadcrumbs.first_signal))) {
wake_up_process(engine->breadcrumbs.signaler);
mask |= intel_engine_flag(engine);

View File

@ -273,7 +273,7 @@ static void i9xx_load_luts_internal(struct drm_crtc *crtc,
enum pipe pipe = intel_crtc->pipe;
int i;
if (HAS_GMCH_DISPLAY(dev)) {
if (HAS_GMCH_DISPLAY(dev_priv)) {
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
assert_dsi_pll_enabled(dev_priv);
else
@ -288,7 +288,7 @@ static void i9xx_load_luts_internal(struct drm_crtc *crtc,
(drm_color_lut_extract(lut[i].green, 8) << 8) |
drm_color_lut_extract(lut[i].blue, 8);
if (HAS_GMCH_DISPLAY(dev))
if (HAS_GMCH_DISPLAY(dev_priv))
I915_WRITE(PALETTE(pipe, i), word);
else
I915_WRITE(LGC_PALETTE(pipe, i), word);
@ -297,7 +297,7 @@ static void i9xx_load_luts_internal(struct drm_crtc *crtc,
for (i = 0; i < 256; i++) {
uint32_t word = (i << 16) | (i << 8) | i;
if (HAS_GMCH_DISPLAY(dev))
if (HAS_GMCH_DISPLAY(dev_priv))
I915_WRITE(PALETTE(pipe, i), word);
else
I915_WRITE(LGC_PALETTE(pipe, i), word);
@ -326,7 +326,7 @@ static void haswell_load_luts(struct drm_crtc_state *crtc_state)
* Workaround : Do not read or write the pipe palette/gamma data while
* GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
*/
if (IS_HASWELL(dev) && intel_crtc_state->ips_enabled &&
if (IS_HASWELL(dev_priv) && intel_crtc_state->ips_enabled &&
(intel_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)) {
hsw_disable_ips(intel_crtc);
reenable_ips = true;
@ -534,14 +534,14 @@ void intel_color_init(struct drm_crtc *crtc)
drm_mode_crtc_set_gamma_size(crtc, 256);
if (IS_CHERRYVIEW(dev)) {
if (IS_CHERRYVIEW(dev_priv)) {
dev_priv->display.load_csc_matrix = cherryview_load_csc_matrix;
dev_priv->display.load_luts = cherryview_load_luts;
} else if (IS_HASWELL(dev)) {
} else if (IS_HASWELL(dev_priv)) {
dev_priv->display.load_csc_matrix = i9xx_load_csc_matrix;
dev_priv->display.load_luts = haswell_load_luts;
} else if (IS_BROADWELL(dev) || IS_SKYLAKE(dev) ||
IS_BROXTON(dev) || IS_KABYLAKE(dev)) {
} else if (IS_BROADWELL(dev_priv) || IS_SKYLAKE(dev_priv) ||
IS_BROXTON(dev_priv) || IS_KABYLAKE(dev_priv)) {
dev_priv->display.load_csc_matrix = i9xx_load_csc_matrix;
dev_priv->display.load_luts = broadwell_load_luts;
} else {

View File

@ -84,7 +84,7 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
if (!(tmp & ADPA_DAC_ENABLE))
goto out;
if (HAS_PCH_CPT(dev))
if (HAS_PCH_CPT(dev_priv))
*pipe = PORT_TO_PIPE_CPT(tmp);
else
*pipe = PORT_TO_PIPE(tmp);
@ -165,16 +165,16 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder,
adpa |= ADPA_VSYNC_ACTIVE_HIGH;
/* For CPT allow 3 pipe config, for others just use A or B */
if (HAS_PCH_LPT(dev))
if (HAS_PCH_LPT(dev_priv))
; /* Those bits don't exist here */
else if (HAS_PCH_CPT(dev))
else if (HAS_PCH_CPT(dev_priv))
adpa |= PORT_TRANS_SEL_CPT(crtc->pipe);
else if (crtc->pipe == 0)
adpa |= ADPA_PIPE_A_SELECT;
else
adpa |= ADPA_PIPE_B_SELECT;
if (!HAS_PCH_SPLIT(dev))
if (!HAS_PCH_SPLIT(dev_priv))
I915_WRITE(BCLRPAT(crtc->pipe), 0);
switch (mode) {
@ -241,7 +241,8 @@ intel_crt_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct drm_device *dev = connector->dev;
int max_dotclk = to_i915(dev)->max_dotclk_freq;
struct drm_i915_private *dev_priv = to_i915(dev);
int max_dotclk = dev_priv->max_dotclk_freq;
int max_clock;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
@ -250,15 +251,15 @@ intel_crt_mode_valid(struct drm_connector *connector,
if (mode->clock < 25000)
return MODE_CLOCK_LOW;
if (HAS_PCH_LPT(dev))
if (HAS_PCH_LPT(dev_priv))
max_clock = 180000;
else if (IS_VALLEYVIEW(dev))
else if (IS_VALLEYVIEW(dev_priv))
/*
* 270 MHz due to current DPLL limits,
* DAC limit supposedly 355 MHz.
*/
max_clock = 270000;
else if (IS_GEN3(dev) || IS_GEN4(dev))
else if (IS_GEN3(dev_priv) || IS_GEN4(dev_priv))
max_clock = 400000;
else
max_clock = 350000;
@ -269,7 +270,7 @@ intel_crt_mode_valid(struct drm_connector *connector,
return MODE_CLOCK_HIGH;
/* The FDI receiver on LPT only supports 8bpc and only has 2 lanes. */
if (HAS_PCH_LPT(dev) &&
if (HAS_PCH_LPT(dev_priv) &&
(ironlake_get_lanes_required(mode->clock, 270000, 24) > 2))
return MODE_CLOCK_HIGH;
@ -280,13 +281,13 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
if (HAS_PCH_SPLIT(dev))
if (HAS_PCH_SPLIT(dev_priv))
pipe_config->has_pch_encoder = true;
/* LPT FDI RX only supports 8bpc. */
if (HAS_PCH_LPT(dev)) {
if (HAS_PCH_LPT(dev_priv)) {
if (pipe_config->bw_constrained && pipe_config->pipe_bpp < 24) {
DRM_DEBUG_KMS("LPT only supports 24bpp\n");
return false;
@ -296,7 +297,7 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
}
/* FDI must always be 2.7 GHz */
if (HAS_DDI(dev))
if (HAS_DDI(dev_priv))
pipe_config->port_clock = 135000 * 2;
return true;
@ -312,7 +313,7 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
/* The first time through, trigger an explicit detection cycle */
if (crt->force_hotplug_required) {
bool turn_off_dac = HAS_PCH_SPLIT(dev);
bool turn_off_dac = HAS_PCH_SPLIT(dev_priv);
u32 save_adpa;
crt->force_hotplug_required = 0;
@ -419,10 +420,10 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
bool ret = false;
int i, tries = 0;
if (HAS_PCH_SPLIT(dev))
if (HAS_PCH_SPLIT(dev_priv))
return intel_ironlake_crt_detect_hotplug(connector);
if (IS_VALLEYVIEW(dev))
if (IS_VALLEYVIEW(dev_priv))
return valleyview_crt_detect_hotplug(connector);
/*
@ -430,7 +431,7 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
* to get a reliable result.
*/
if (IS_G4X(dev) && !IS_GM45(dev))
if (IS_G4X(dev_priv) && !IS_GM45(dev_priv))
tries = 2;
else
tries = 1;
@ -566,7 +567,7 @@ intel_crt_load_detect(struct intel_crt *crt, uint32_t pipe)
/* Set the border color to purple. */
I915_WRITE(bclrpat_reg, 0x500050);
if (!IS_GEN2(dev)) {
if (!IS_GEN2(dev_priv)) {
uint32_t pipeconf = I915_READ(pipeconf_reg);
I915_WRITE(pipeconf_reg, pipeconf | PIPECONF_FORCE_BORDER);
POSTING_READ(pipeconf_reg);
@ -643,6 +644,32 @@ intel_crt_load_detect(struct intel_crt *crt, uint32_t pipe)
return status;
}
static int intel_spurious_crt_detect_dmi_callback(const struct dmi_system_id *id)
{
DRM_DEBUG_DRIVER("Skipping CRT detection for %s\n", id->ident);
return 1;
}
static const struct dmi_system_id intel_spurious_crt_detect[] = {
{
.callback = intel_spurious_crt_detect_dmi_callback,
.ident = "ACER ZGB",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ACER"),
DMI_MATCH(DMI_PRODUCT_NAME, "ZGB"),
},
},
{
.callback = intel_spurious_crt_detect_dmi_callback,
.ident = "Intel DZ77BH-55K",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Intel Corporation"),
DMI_MATCH(DMI_BOARD_NAME, "DZ77BH-55K"),
},
},
{ }
};
static enum drm_connector_status
intel_crt_detect(struct drm_connector *connector, bool force)
{
@ -659,6 +686,10 @@ intel_crt_detect(struct drm_connector *connector, bool force)
connector->base.id, connector->name,
force);
/* Skip machines without VGA that falsely report hotplug events */
if (dmi_check_system(intel_spurious_crt_detect))
return connector_status_disconnected;
power_domain = intel_display_port_power_domain(intel_encoder);
intel_display_power_get(dev_priv, power_domain);
@ -740,7 +771,7 @@ static int intel_crt_get_modes(struct drm_connector *connector)
i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->vbt.crt_ddc_pin);
ret = intel_crt_ddc_get_modes(connector, i2c);
if (ret || !IS_G4X(dev))
if (ret || !IS_G4X(dev_priv))
goto out;
/* Try to probe digital port for output in DVI-I -> VGA mode. */
@ -808,32 +839,6 @@ static const struct drm_encoder_funcs intel_crt_enc_funcs = {
.destroy = intel_encoder_destroy,
};
static int intel_no_crt_dmi_callback(const struct dmi_system_id *id)
{
DRM_INFO("Skipping CRT initialization for %s\n", id->ident);
return 1;
}
static const struct dmi_system_id intel_no_crt[] = {
{
.callback = intel_no_crt_dmi_callback,
.ident = "ACER ZGB",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ACER"),
DMI_MATCH(DMI_PRODUCT_NAME, "ZGB"),
},
},
{
.callback = intel_no_crt_dmi_callback,
.ident = "DELL XPS 8700",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "XPS 8700"),
},
},
{ }
};
void intel_crt_init(struct drm_device *dev)
{
struct drm_connector *connector;
@ -843,13 +848,9 @@ void intel_crt_init(struct drm_device *dev)
i915_reg_t adpa_reg;
u32 adpa;
/* Skip machines without VGA that falsely report hotplug events */
if (dmi_check_system(intel_no_crt))
return;
if (HAS_PCH_SPLIT(dev))
if (HAS_PCH_SPLIT(dev_priv))
adpa_reg = PCH_ADPA;
else if (IS_VALLEYVIEW(dev))
else if (IS_VALLEYVIEW(dev_priv))
adpa_reg = VLV_ADPA;
else
adpa_reg = ADPA;
@ -893,12 +894,12 @@ void intel_crt_init(struct drm_device *dev)
crt->base.type = INTEL_OUTPUT_ANALOG;
crt->base.cloneable = (1 << INTEL_OUTPUT_DVO) | (1 << INTEL_OUTPUT_HDMI);
if (IS_I830(dev))
if (IS_I830(dev_priv))
crt->base.crtc_mask = (1 << 0);
else
crt->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
if (IS_GEN2(dev))
if (IS_GEN2(dev_priv))
connector->interlace_allowed = 0;
else
connector->interlace_allowed = 1;
@ -907,20 +908,23 @@ void intel_crt_init(struct drm_device *dev)
crt->adpa_reg = adpa_reg;
crt->base.compute_config = intel_crt_compute_config;
if (HAS_PCH_SPLIT(dev)) {
if (HAS_PCH_SPLIT(dev_priv)) {
crt->base.disable = pch_disable_crt;
crt->base.post_disable = pch_post_disable_crt;
} else {
crt->base.disable = intel_disable_crt;
}
crt->base.enable = intel_enable_crt;
if (I915_HAS_HOTPLUG(dev))
if (I915_HAS_HOTPLUG(dev) &&
!dmi_check_system(intel_spurious_crt_detect))
crt->base.hpd_pin = HPD_CRT;
if (HAS_DDI(dev)) {
if (HAS_DDI(dev_priv)) {
crt->base.port = PORT_E;
crt->base.get_config = hsw_crt_get_config;
crt->base.get_hw_state = intel_ddi_get_hw_state;
crt->base.post_disable = hsw_post_disable_crt;
} else {
crt->base.port = PORT_NONE;
crt->base.get_config = intel_crt_get_config;
crt->base.get_hw_state = intel_crt_get_hw_state;
}
@ -941,7 +945,7 @@ void intel_crt_init(struct drm_device *dev)
* polarity and link reversal bits or not, instead of relying on the
* BIOS.
*/
if (HAS_PCH_LPT(dev)) {
if (HAS_PCH_LPT(dev_priv)) {
u32 fdi_config = FDI_RX_POLARITY_REVERSED_LPT |
FDI_RX_LINK_REVERSAL_OVERRIDE;

View File

@ -167,8 +167,47 @@ static const struct ddi_buf_trans skl_y_ddi_translations_dp[] = {
{ 0x80005012, 0x000000C0, 0x3 },
};
/* Kabylake H and S */
static const struct ddi_buf_trans kbl_ddi_translations_dp[] = {
{ 0x00002016, 0x000000A0, 0x0 },
{ 0x00005012, 0x0000009B, 0x0 },
{ 0x00007011, 0x00000088, 0x0 },
{ 0x80009010, 0x000000C0, 0x1 },
{ 0x00002016, 0x0000009B, 0x0 },
{ 0x00005012, 0x00000088, 0x0 },
{ 0x80007011, 0x000000C0, 0x1 },
{ 0x00002016, 0x00000097, 0x0 },
{ 0x80005012, 0x000000C0, 0x1 },
};
/* Kabylake U */
static const struct ddi_buf_trans kbl_u_ddi_translations_dp[] = {
{ 0x0000201B, 0x000000A1, 0x0 },
{ 0x00005012, 0x00000088, 0x0 },
{ 0x80007011, 0x000000CD, 0x3 },
{ 0x80009010, 0x000000C0, 0x3 },
{ 0x0000201B, 0x0000009D, 0x0 },
{ 0x80005012, 0x000000C0, 0x3 },
{ 0x80007011, 0x000000C0, 0x3 },
{ 0x00002016, 0x0000004F, 0x0 },
{ 0x80005012, 0x000000C0, 0x3 },
};
/* Kabylake Y */
static const struct ddi_buf_trans kbl_y_ddi_translations_dp[] = {
{ 0x00001017, 0x000000A1, 0x0 },
{ 0x00005012, 0x00000088, 0x0 },
{ 0x80007011, 0x000000CD, 0x3 },
{ 0x8000800F, 0x000000C0, 0x3 },
{ 0x00001017, 0x0000009D, 0x0 },
{ 0x80005012, 0x000000C0, 0x3 },
{ 0x80007011, 0x000000C0, 0x3 },
{ 0x00001017, 0x0000004C, 0x0 },
{ 0x80005012, 0x000000C0, 0x3 },
};
/*
* Skylake H and S
* Skylake/Kabylake H and S
* eDP 1.4 low vswing translation parameters
*/
static const struct ddi_buf_trans skl_ddi_translations_edp[] = {
@ -185,7 +224,7 @@ static const struct ddi_buf_trans skl_ddi_translations_edp[] = {
};
/*
* Skylake U
* Skylake/Kabylake U
* eDP 1.4 low vswing translation parameters
*/
static const struct ddi_buf_trans skl_u_ddi_translations_edp[] = {
@ -202,7 +241,7 @@ static const struct ddi_buf_trans skl_u_ddi_translations_edp[] = {
};
/*
* Skylake Y
* Skylake/Kabylake Y
* eDP 1.4 low vswing translation parameters
*/
static const struct ddi_buf_trans skl_y_ddi_translations_edp[] = {
@ -218,7 +257,7 @@ static const struct ddi_buf_trans skl_y_ddi_translations_edp[] = {
{ 0x00000018, 0x0000008A, 0x0 },
};
/* Skylake U, H and S */
/* Skylake/Kabylake U, H and S */
static const struct ddi_buf_trans skl_ddi_translations_hdmi[] = {
{ 0x00000018, 0x000000AC, 0x0 },
{ 0x00005012, 0x0000009D, 0x0 },
@ -233,7 +272,7 @@ static const struct ddi_buf_trans skl_ddi_translations_hdmi[] = {
{ 0x80000018, 0x000000C0, 0x1 },
};
/* Skylake Y */
/* Skylake/Kabylake Y */
static const struct ddi_buf_trans skl_y_ddi_translations_hdmi[] = {
{ 0x00000018, 0x000000A1, 0x0 },
{ 0x00005012, 0x000000DF, 0x0 },
@ -334,10 +373,10 @@ bdw_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
static const struct ddi_buf_trans *
skl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
{
if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv)) {
if (IS_SKL_ULX(dev_priv)) {
*n_entries = ARRAY_SIZE(skl_y_ddi_translations_dp);
return skl_y_ddi_translations_dp;
} else if (IS_SKL_ULT(dev_priv) || IS_KBL_ULT(dev_priv)) {
} else if (IS_SKL_ULT(dev_priv)) {
*n_entries = ARRAY_SIZE(skl_u_ddi_translations_dp);
return skl_u_ddi_translations_dp;
} else {
@ -346,6 +385,21 @@ skl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
}
}
static const struct ddi_buf_trans *
kbl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
{
if (IS_KBL_ULX(dev_priv)) {
*n_entries = ARRAY_SIZE(kbl_y_ddi_translations_dp);
return kbl_y_ddi_translations_dp;
} else if (IS_KBL_ULT(dev_priv)) {
*n_entries = ARRAY_SIZE(kbl_u_ddi_translations_dp);
return kbl_u_ddi_translations_dp;
} else {
*n_entries = ARRAY_SIZE(kbl_ddi_translations_dp);
return kbl_ddi_translations_dp;
}
}
static const struct ddi_buf_trans *
skl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
{
@ -362,7 +416,10 @@ skl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
}
}
return skl_get_buf_trans_dp(dev_priv, n_entries);
if (IS_KABYLAKE(dev_priv))
return kbl_get_buf_trans_dp(dev_priv, n_entries);
else
return skl_get_buf_trans_dp(dev_priv, n_entries);
}
static const struct ddi_buf_trans *
@ -430,21 +487,18 @@ void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder)
if (IS_BROXTON(dev_priv))
return;
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
if (IS_KABYLAKE(dev_priv)) {
ddi_translations_fdi = NULL;
ddi_translations_dp =
kbl_get_buf_trans_dp(dev_priv, &n_dp_entries);
ddi_translations_edp =
skl_get_buf_trans_edp(dev_priv, &n_edp_entries);
} else if (IS_SKYLAKE(dev_priv)) {
ddi_translations_fdi = NULL;
ddi_translations_dp =
skl_get_buf_trans_dp(dev_priv, &n_dp_entries);
ddi_translations_edp =
skl_get_buf_trans_edp(dev_priv, &n_edp_entries);
/* If we're boosting the current, set bit 31 of trans1 */
if (dev_priv->vbt.ddi_port_info[port].dp_boost_level)
iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE;
if (WARN_ON(encoder->type == INTEL_OUTPUT_EDP &&
port != PORT_A && port != PORT_E &&
n_edp_entries > 9))
n_edp_entries = 9;
} else if (IS_BROADWELL(dev_priv)) {
ddi_translations_fdi = bdw_ddi_translations_fdi;
ddi_translations_dp = bdw_ddi_translations_dp;
@ -464,6 +518,17 @@ void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder)
n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
}
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
/* If we're boosting the current, set bit 31 of trans1 */
if (dev_priv->vbt.ddi_port_info[port].dp_boost_level)
iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE;
if (WARN_ON(encoder->type == INTEL_OUTPUT_EDP &&
port != PORT_A && port != PORT_E &&
n_edp_entries > 9))
n_edp_entries = 9;
}
switch (encoder->type) {
case INTEL_OUTPUT_EDP:
ddi_translations = ddi_translations_edp;
@ -1020,13 +1085,13 @@ static void bxt_ddi_clock_get(struct intel_encoder *encoder,
void intel_ddi_clock_get(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
if (INTEL_INFO(dev)->gen <= 8)
if (INTEL_GEN(dev_priv) <= 8)
hsw_ddi_clock_get(encoder, pipe_config);
else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
skl_ddi_clock_get(encoder, pipe_config);
else if (IS_BROXTON(dev))
else if (IS_BROXTON(dev_priv))
bxt_ddi_clock_get(encoder, pipe_config);
}
@ -1081,14 +1146,14 @@ bxt_ddi_pll_select(struct intel_crtc *intel_crtc,
bool intel_ddi_pll_select(struct intel_crtc *intel_crtc,
struct intel_crtc_state *crtc_state)
{
struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
struct intel_encoder *intel_encoder =
intel_ddi_get_crtc_new_encoder(crtc_state);
if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
return skl_ddi_pll_select(intel_crtc, crtc_state,
intel_encoder);
else if (IS_BROXTON(dev))
else if (IS_BROXTON(dev_priv))
return bxt_ddi_pll_select(intel_crtc, crtc_state,
intel_encoder);
else
@ -1189,7 +1254,7 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
* eDP when not using the panel fitter, and when not
* using motion blur mitigation (which we don't
* support). */
if (IS_HASWELL(dev) &&
if (IS_HASWELL(dev_priv) &&
(intel_crtc->config->pch_pfit.enabled ||
intel_crtc->config->pch_pfit.force_thru))
temp |= TRANS_DDI_EDP_INPUT_A_ONOFF;
@ -1434,7 +1499,12 @@ static void skl_ddi_set_iboost(struct intel_encoder *encoder, u32 level)
if (dp_iboost) {
iboost = dp_iboost;
} else {
ddi_translations = skl_get_buf_trans_dp(dev_priv, &n_entries);
if (IS_KABYLAKE(dev_priv))
ddi_translations = kbl_get_buf_trans_dp(dev_priv,
&n_entries);
else
ddi_translations = skl_get_buf_trans_dp(dev_priv,
&n_entries);
iboost = ddi_translations[level].i_boost;
}
} else if (type == INTEL_OUTPUT_EDP) {
@ -1742,7 +1812,7 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder,
intel_edp_panel_off(intel_dp);
}
if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
I915_WRITE(DPLL_CTRL2, (I915_READ(DPLL_CTRL2) |
DPLL_CTRL2_DDI_CLK_OFF(port)));
else if (INTEL_INFO(dev)->gen < 9)
@ -2438,7 +2508,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
struct intel_digital_port *intel_dig_port;
struct intel_encoder *intel_encoder;
struct drm_encoder *encoder;
bool init_hdmi, init_dp;
bool init_hdmi, init_dp, init_lspcon = false;
int max_lanes;
if (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES) {
@ -2470,6 +2540,19 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
init_hdmi = (dev_priv->vbt.ddi_port_info[port].supports_dvi ||
dev_priv->vbt.ddi_port_info[port].supports_hdmi);
init_dp = dev_priv->vbt.ddi_port_info[port].supports_dp;
if (intel_bios_is_lspcon_present(dev_priv, port)) {
/*
* Lspcon device needs to be driven with DP connector
* with special detection sequence. So make sure DP
* is initialized before lspcon.
*/
init_dp = true;
init_lspcon = true;
init_hdmi = false;
DRM_DEBUG_KMS("VBT says port %c has lspcon\n", port_name(port));
}
if (!init_dp && !init_hdmi) {
DRM_DEBUG_KMS("VBT says port %c is not DVI/HDMI/DP compatible, respect it\n",
port_name(port));
@ -2509,7 +2592,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
* configuration so that we use the proper lane count for our
* calculations.
*/
if (IS_BROXTON(dev) && port == PORT_A) {
if (IS_BROXTON(dev_priv) && port == PORT_A) {
if (!(intel_dig_port->saved_port_bits & DDI_A_4_LANES)) {
DRM_DEBUG_KMS("BXT BIOS forgot to set DDI_A_4_LANES for port A; fixing\n");
intel_dig_port->saved_port_bits |= DDI_A_4_LANES;
@ -2520,6 +2603,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
intel_dig_port->max_lanes = max_lanes;
intel_encoder->type = INTEL_OUTPUT_UNKNOWN;
intel_encoder->port = port;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
intel_encoder->cloneable = 0;
@ -2532,7 +2616,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
* On BXT A0/A1, sw needs to activate DDIA HPD logic and
* interrupts to check the external panel connection.
*/
if (IS_BXT_REVID(dev, 0, BXT_REVID_A1) && port == PORT_B)
if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1) && port == PORT_B)
dev_priv->hotplug.irq_port[PORT_A] = intel_dig_port;
else
dev_priv->hotplug.irq_port[port] = intel_dig_port;
@ -2545,6 +2629,20 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
goto err;
}
if (init_lspcon) {
if (lspcon_init(intel_dig_port))
/* TODO: handle hdmi info frame part */
DRM_DEBUG_KMS("LSPCON init success on port %c\n",
port_name(port));
else
/*
* LSPCON init faied, but DP init was success, so
* lets try to drive as DP++ port.
*/
DRM_ERROR("LSPCON init failed on port %c\n",
port_name(port));
}
return;
err:

View File

@ -28,20 +28,14 @@ void intel_device_info_dump(struct drm_i915_private *dev_priv)
{
const struct intel_device_info *info = &dev_priv->info;
#define PRINT_S(name) "%s"
#define SEP_EMPTY
#define PRINT_FLAG(name) info->name ? #name "," : ""
#define SEP_COMMA ,
DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x rev=0x%02x flags="
DEV_INFO_FOR_EACH_FLAG(PRINT_S, SEP_EMPTY),
DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x rev=0x%02x",
info->gen,
dev_priv->drm.pdev->device,
dev_priv->drm.pdev->revision,
DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_COMMA));
#undef PRINT_S
#undef SEP_EMPTY
dev_priv->drm.pdev->revision);
#define PRINT_FLAG(name) \
DRM_DEBUG_DRIVER("i915 device info: " #name ": %s", yesno(info->name))
DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG);
#undef PRINT_FLAG
#undef SEP_COMMA
}
static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv)
@ -192,7 +186,7 @@ static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu;
const int s_max = 3, ss_max = 3, eu_max = 8;
int s, ss;
u32 fuse2, eu_disable[s_max];
u32 fuse2, eu_disable[3]; /* s_max */
fuse2 = I915_READ(GEN8_FUSE2);
sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;

File diff suppressed because it is too large Load Diff

View File

@ -344,7 +344,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
DP |= DP_PORT_WIDTH(1);
DP |= DP_LINK_TRAIN_PAT_1;
if (IS_CHERRYVIEW(dev))
if (IS_CHERRYVIEW(dev_priv))
DP |= DP_PIPE_SELECT_CHV(pipe);
else if (pipe == PIPE_B)
DP |= DP_PIPEB_SELECT;
@ -356,10 +356,10 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
* So enable temporarily it if it's not already enabled.
*/
if (!pll_enabled) {
release_cl_override = IS_CHERRYVIEW(dev) &&
release_cl_override = IS_CHERRYVIEW(dev_priv) &&
!chv_phy_powergate_ch(dev_priv, phy, ch, true);
if (vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
if (vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev_priv) ?
&chv_dpll[0].dpll : &vlv_dpll[0].dpll)) {
DRM_ERROR("Failed to force on pll for pipe %c!\n",
pipe_name(pipe));
@ -570,8 +570,8 @@ void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
struct drm_device *dev = &dev_priv->drm;
struct intel_encoder *encoder;
if (WARN_ON(!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) &&
!IS_BROXTON(dev)))
if (WARN_ON(!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
!IS_BROXTON(dev_priv)))
return;
/*
@ -591,7 +591,7 @@ void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
continue;
intel_dp = enc_to_intel_dp(&encoder->base);
if (IS_BROXTON(dev))
if (IS_BROXTON(dev_priv))
intel_dp->pps_reset = true;
else
intel_dp->pps_pipe = INVALID_PIPE;
@ -664,7 +664,7 @@ static int edp_notify_handler(struct notifier_block *this, unsigned long code,
pps_lock(intel_dp);
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
i915_reg_t pp_ctrl_reg, pp_div_reg;
u32 pp_div;
@ -692,7 +692,7 @@ static bool edp_have_panel_power(struct intel_dp *intel_dp)
lockdep_assert_held(&dev_priv->pps_mutex);
if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
intel_dp->pps_pipe == INVALID_PIPE)
return false;
@ -706,7 +706,7 @@ static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
lockdep_assert_held(&dev_priv->pps_mutex);
if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
intel_dp->pps_pipe == INVALID_PIPE)
return false;
@ -821,15 +821,16 @@ static uint32_t g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
uint32_t aux_clock_divider)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv =
to_i915(intel_dig_port->base.base.dev);
uint32_t precharge, timeout;
if (IS_GEN6(dev))
if (IS_GEN6(dev_priv))
precharge = 3;
else
precharge = 5;
if (IS_BROADWELL(dev) && intel_dig_port->port == PORT_A)
if (IS_BROADWELL(dev_priv) && intel_dig_port->port == PORT_A)
timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
else
timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
@ -1108,8 +1109,46 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
return ret;
}
static enum port intel_aux_port(struct drm_i915_private *dev_priv,
enum port port)
{
const struct ddi_vbt_port_info *info =
&dev_priv->vbt.ddi_port_info[port];
enum port aux_port;
if (!info->alternate_aux_channel) {
DRM_DEBUG_KMS("using AUX %c for port %c (platform default)\n",
port_name(port), port_name(port));
return port;
}
switch (info->alternate_aux_channel) {
case DP_AUX_A:
aux_port = PORT_A;
break;
case DP_AUX_B:
aux_port = PORT_B;
break;
case DP_AUX_C:
aux_port = PORT_C;
break;
case DP_AUX_D:
aux_port = PORT_D;
break;
default:
MISSING_CASE(info->alternate_aux_channel);
aux_port = PORT_A;
break;
}
DRM_DEBUG_KMS("using AUX %c for port %c (VBT)\n",
port_name(aux_port), port_name(port));
return aux_port;
}
static i915_reg_t g4x_aux_ctl_reg(struct drm_i915_private *dev_priv,
enum port port)
enum port port)
{
switch (port) {
case PORT_B:
@ -1123,7 +1162,7 @@ static i915_reg_t g4x_aux_ctl_reg(struct drm_i915_private *dev_priv,
}
static i915_reg_t g4x_aux_data_reg(struct drm_i915_private *dev_priv,
enum port port, int index)
enum port port, int index)
{
switch (port) {
case PORT_B:
@ -1137,7 +1176,7 @@ static i915_reg_t g4x_aux_data_reg(struct drm_i915_private *dev_priv,
}
static i915_reg_t ilk_aux_ctl_reg(struct drm_i915_private *dev_priv,
enum port port)
enum port port)
{
switch (port) {
case PORT_A:
@ -1153,7 +1192,7 @@ static i915_reg_t ilk_aux_ctl_reg(struct drm_i915_private *dev_priv,
}
static i915_reg_t ilk_aux_data_reg(struct drm_i915_private *dev_priv,
enum port port, int index)
enum port port, int index)
{
switch (port) {
case PORT_A:
@ -1168,36 +1207,9 @@ static i915_reg_t ilk_aux_data_reg(struct drm_i915_private *dev_priv,
}
}
/*
* On SKL we don't have Aux for port E so we rely
* on VBT to set a proper alternate aux channel.
*/
static enum port skl_porte_aux_port(struct drm_i915_private *dev_priv)
{
const struct ddi_vbt_port_info *info =
&dev_priv->vbt.ddi_port_info[PORT_E];
switch (info->alternate_aux_channel) {
case DP_AUX_A:
return PORT_A;
case DP_AUX_B:
return PORT_B;
case DP_AUX_C:
return PORT_C;
case DP_AUX_D:
return PORT_D;
default:
MISSING_CASE(info->alternate_aux_channel);
return PORT_A;
}
}
static i915_reg_t skl_aux_ctl_reg(struct drm_i915_private *dev_priv,
enum port port)
enum port port)
{
if (port == PORT_E)
port = skl_porte_aux_port(dev_priv);
switch (port) {
case PORT_A:
case PORT_B:
@ -1211,11 +1223,8 @@ static i915_reg_t skl_aux_ctl_reg(struct drm_i915_private *dev_priv,
}
static i915_reg_t skl_aux_data_reg(struct drm_i915_private *dev_priv,
enum port port, int index)
enum port port, int index)
{
if (port == PORT_E)
port = skl_porte_aux_port(dev_priv);
switch (port) {
case PORT_A:
case PORT_B:
@ -1229,7 +1238,7 @@ static i915_reg_t skl_aux_data_reg(struct drm_i915_private *dev_priv,
}
static i915_reg_t intel_aux_ctl_reg(struct drm_i915_private *dev_priv,
enum port port)
enum port port)
{
if (INTEL_INFO(dev_priv)->gen >= 9)
return skl_aux_ctl_reg(dev_priv, port);
@ -1240,7 +1249,7 @@ static i915_reg_t intel_aux_ctl_reg(struct drm_i915_private *dev_priv,
}
static i915_reg_t intel_aux_data_reg(struct drm_i915_private *dev_priv,
enum port port, int index)
enum port port, int index)
{
if (INTEL_INFO(dev_priv)->gen >= 9)
return skl_aux_data_reg(dev_priv, port, index);
@ -1253,7 +1262,8 @@ static i915_reg_t intel_aux_data_reg(struct drm_i915_private *dev_priv,
static void intel_aux_reg_init(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
enum port port = dp_to_dig_port(intel_dp)->port;
enum port port = intel_aux_port(dev_priv,
dp_to_dig_port(intel_dp)->port);
int i;
intel_dp->aux_ch_ctl_reg = intel_aux_ctl_reg(dev_priv, port);
@ -1297,14 +1307,10 @@ intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
/* WaDisableHBR2:skl */
if (IS_SKL_REVID(dev, 0, SKL_REVID_B0))
return false;
if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
(INTEL_INFO(dev)->gen >= 9))
if ((IS_HASWELL(dev_priv) && !IS_HSW_ULX(dev_priv)) ||
IS_BROADWELL(dev_priv) || (INTEL_GEN(dev_priv) >= 9))
return true;
else
return false;
@ -1314,13 +1320,13 @@ static int
intel_dp_source_rates(struct intel_dp *intel_dp, const int **source_rates)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
int size;
if (IS_BROXTON(dev)) {
if (IS_BROXTON(dev_priv)) {
*source_rates = bxt_rates;
size = ARRAY_SIZE(bxt_rates);
} else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
} else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
*source_rates = skl_rates;
size = ARRAY_SIZE(skl_rates);
} else {
@ -1340,19 +1346,20 @@ intel_dp_set_clock(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
const struct dp_link_dpll *divisor = NULL;
int i, count = 0;
if (IS_G4X(dev)) {
if (IS_G4X(dev_priv)) {
divisor = gen4_dpll;
count = ARRAY_SIZE(gen4_dpll);
} else if (HAS_PCH_SPLIT(dev)) {
} else if (HAS_PCH_SPLIT(dev_priv)) {
divisor = pch_dpll;
count = ARRAY_SIZE(pch_dpll);
} else if (IS_CHERRYVIEW(dev)) {
} else if (IS_CHERRYVIEW(dev_priv)) {
divisor = chv_dpll;
count = ARRAY_SIZE(chv_dpll);
} else if (IS_VALLEYVIEW(dev)) {
} else if (IS_VALLEYVIEW(dev_priv)) {
divisor = vlv_dpll;
count = ARRAY_SIZE(vlv_dpll);
}
@ -1569,7 +1576,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
max_clock = common_len - 1;
if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A)
pipe_config->has_pch_encoder = true;
pipe_config->has_drrs = false;
@ -1586,7 +1593,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
return ret;
}
if (HAS_GMCH_DISPLAY(dev))
if (HAS_GMCH_DISPLAY(dev_priv))
intel_gmch_panel_fitting(intel_crtc, pipe_config,
intel_connector->panel.fitting_mode);
else
@ -1711,7 +1718,7 @@ found:
to_intel_atomic_state(pipe_config->base.state)->cdclk_pll_vco = vco;
}
if (!HAS_DDI(dev))
if (!HAS_DDI(dev_priv))
intel_dp_set_clock(encoder, pipe_config);
return true;
@ -1769,7 +1776,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
/* Split out the IBX/CPU vs CPT settings */
if (IS_GEN7(dev) && port == PORT_A) {
if (IS_GEN7(dev_priv) && port == PORT_A) {
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
intel_dp->DP |= DP_SYNC_HS_HIGH;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
@ -1780,7 +1787,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
intel_dp->DP |= DP_ENHANCED_FRAMING;
intel_dp->DP |= crtc->pipe << 29;
} else if (HAS_PCH_CPT(dev) && port != PORT_A) {
} else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
u32 trans_dp;
intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
@ -1792,8 +1799,9 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
trans_dp &= ~TRANS_DP_ENH_FRAMING;
I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
} else {
if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
!IS_CHERRYVIEW(dev) && pipe_config->limited_color_range)
if (!HAS_PCH_SPLIT(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
!IS_CHERRYVIEW(dev_priv) &&
pipe_config->limited_color_range)
intel_dp->DP |= DP_COLOR_RANGE_16_235;
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
@ -1805,7 +1813,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
intel_dp->DP |= DP_ENHANCED_FRAMING;
if (IS_CHERRYVIEW(dev))
if (IS_CHERRYVIEW(dev_priv))
intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
else if (crtc->pipe == PIPE_B)
intel_dp->DP |= DP_PIPEB_SELECT;
@ -2114,7 +2122,7 @@ static void edp_panel_on(struct intel_dp *intel_dp)
pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
pp = ironlake_get_pp_control(intel_dp);
if (IS_GEN5(dev)) {
if (IS_GEN5(dev_priv)) {
/* ILK workaround: disable reset around power sequence */
pp &= ~PANEL_POWER_RESET;
I915_WRITE(pp_ctrl_reg, pp);
@ -2122,7 +2130,7 @@ static void edp_panel_on(struct intel_dp *intel_dp)
}
pp |= PANEL_POWER_ON;
if (!IS_GEN5(dev))
if (!IS_GEN5(dev_priv))
pp |= PANEL_POWER_RESET;
I915_WRITE(pp_ctrl_reg, pp);
@ -2131,7 +2139,7 @@ static void edp_panel_on(struct intel_dp *intel_dp)
wait_panel_on(intel_dp);
intel_dp->last_power_on = jiffies;
if (IS_GEN5(dev)) {
if (IS_GEN5(dev_priv)) {
pp |= PANEL_POWER_RESET; /* restore panel reset bit */
I915_WRITE(pp_ctrl_reg, pp);
POSTING_READ(pp_ctrl_reg);
@ -2444,9 +2452,9 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
if (!(tmp & DP_PORT_EN))
goto out;
if (IS_GEN7(dev) && port == PORT_A) {
if (IS_GEN7(dev_priv) && port == PORT_A) {
*pipe = PORT_TO_PIPE_CPT(tmp);
} else if (HAS_PCH_CPT(dev) && port != PORT_A) {
} else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
enum pipe p;
for_each_pipe(dev_priv, p) {
@ -2461,7 +2469,7 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
i915_mmio_reg_offset(intel_dp->output_reg));
} else if (IS_CHERRYVIEW(dev)) {
} else if (IS_CHERRYVIEW(dev_priv)) {
*pipe = DP_PORT_TO_PIPE_CHV(tmp);
} else {
*pipe = PORT_TO_PIPE(tmp);
@ -2489,7 +2497,7 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
if (HAS_PCH_CPT(dev) && port != PORT_A) {
if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
@ -2515,8 +2523,8 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
pipe_config->base.adjusted_mode.flags |= flags;
if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
!IS_CHERRYVIEW(dev) && tmp & DP_COLOR_RANGE_16_235)
if (!HAS_PCH_SPLIT(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
!IS_CHERRYVIEW(dev_priv) && tmp & DP_COLOR_RANGE_16_235)
pipe_config->limited_color_range = true;
pipe_config->lane_count =
@ -2636,7 +2644,7 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
DRM_DEBUG_KMS("Using DP training pattern TPS%d\n",
dp_train_pat & DP_TRAINING_PATTERN_MASK);
if (HAS_DDI(dev)) {
if (HAS_DDI(dev_priv)) {
uint32_t temp = I915_READ(DP_TP_CTL(port));
if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
@ -2662,8 +2670,8 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
}
I915_WRITE(DP_TP_CTL(port), temp);
} else if ((IS_GEN7(dev) && port == PORT_A) ||
(HAS_PCH_CPT(dev) && port != PORT_A)) {
} else if ((IS_GEN7(dev_priv) && port == PORT_A) ||
(HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
*DP &= ~DP_LINK_TRAIN_MASK_CPT;
switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
@ -2683,7 +2691,7 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
}
} else {
if (IS_CHERRYVIEW(dev))
if (IS_CHERRYVIEW(dev_priv))
*DP &= ~DP_LINK_TRAIN_MASK_CHV;
else
*DP &= ~DP_LINK_TRAIN_MASK;
@ -2699,7 +2707,7 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
*DP |= DP_LINK_TRAIN_PAT_2;
break;
case DP_TRAINING_PATTERN_3:
if (IS_CHERRYVIEW(dev)) {
if (IS_CHERRYVIEW(dev_priv)) {
*DP |= DP_LINK_TRAIN_PAT_3_CHV;
} else {
DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n");
@ -2749,7 +2757,7 @@ static void intel_enable_dp(struct intel_encoder *encoder,
pps_lock(intel_dp);
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
vlv_init_panel_power_sequencer(intel_dp);
intel_dp_enable_port(intel_dp, pipe_config);
@ -2760,10 +2768,10 @@ static void intel_enable_dp(struct intel_encoder *encoder,
pps_unlock(intel_dp);
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
unsigned int lane_mask = 0x0;
if (IS_CHERRYVIEW(dev))
if (IS_CHERRYVIEW(dev_priv))
lane_mask = intel_dp_unused_lane_mask(pipe_config->lane_count);
vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
@ -2983,17 +2991,17 @@ intel_dp_voltage_max(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = to_i915(dev);
enum port port = dp_to_dig_port(intel_dp)->port;
if (IS_BROXTON(dev))
if (IS_BROXTON(dev_priv))
return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
else if (INTEL_INFO(dev)->gen >= 9) {
if (dev_priv->vbt.edp.low_vswing && port == PORT_A)
return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
else if (IS_GEN7(dev) && port == PORT_A)
else if (IS_GEN7(dev_priv) && port == PORT_A)
return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
else if (HAS_PCH_CPT(dev) && port != PORT_A)
else if (HAS_PCH_CPT(dev_priv) && port != PORT_A)
return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
else
return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
@ -3002,10 +3010,10 @@ intel_dp_voltage_max(struct intel_dp *intel_dp)
uint8_t
intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
enum port port = dp_to_dig_port(intel_dp)->port;
if (INTEL_INFO(dev)->gen >= 9) {
if (INTEL_GEN(dev_priv) >= 9) {
switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
return DP_TRAIN_PRE_EMPH_LEVEL_3;
@ -3018,7 +3026,7 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
default:
return DP_TRAIN_PRE_EMPH_LEVEL_0;
}
} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
return DP_TRAIN_PRE_EMPH_LEVEL_3;
@ -3030,7 +3038,7 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
default:
return DP_TRAIN_PRE_EMPH_LEVEL_0;
}
} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
return DP_TRAIN_PRE_EMPH_LEVEL_3;
@ -3042,7 +3050,7 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
default:
return DP_TRAIN_PRE_EMPH_LEVEL_0;
}
} else if (IS_GEN7(dev) && port == PORT_A) {
} else if (IS_GEN7(dev_priv) && port == PORT_A) {
switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
return DP_TRAIN_PRE_EMPH_LEVEL_2;
@ -3343,21 +3351,21 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp)
uint32_t signal_levels, mask = 0;
uint8_t train_set = intel_dp->train_set[0];
if (HAS_DDI(dev)) {
if (HAS_DDI(dev_priv)) {
signal_levels = ddi_signal_levels(intel_dp);
if (IS_BROXTON(dev))
if (IS_BROXTON(dev_priv))
signal_levels = 0;
else
mask = DDI_BUF_EMP_MASK;
} else if (IS_CHERRYVIEW(dev)) {
} else if (IS_CHERRYVIEW(dev_priv)) {
signal_levels = chv_signal_levels(intel_dp);
} else if (IS_VALLEYVIEW(dev)) {
} else if (IS_VALLEYVIEW(dev_priv)) {
signal_levels = vlv_signal_levels(intel_dp);
} else if (IS_GEN7(dev) && port == PORT_A) {
} else if (IS_GEN7(dev_priv) && port == PORT_A) {
signal_levels = gen7_edp_signal_levels(train_set);
mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
} else if (IS_GEN6(dev) && port == PORT_A) {
} else if (IS_GEN6(dev_priv) && port == PORT_A) {
signal_levels = gen6_edp_signal_levels(train_set);
mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
} else {
@ -3402,7 +3410,7 @@ void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
enum port port = intel_dig_port->port;
uint32_t val;
if (!HAS_DDI(dev))
if (!HAS_DDI(dev_priv))
return;
val = I915_READ(DP_TP_CTL(port));
@ -3437,7 +3445,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t DP = intel_dp->DP;
if (WARN_ON(HAS_DDI(dev)))
if (WARN_ON(HAS_DDI(dev_priv)))
return;
if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
@ -3445,12 +3453,12 @@ intel_dp_link_down(struct intel_dp *intel_dp)
DRM_DEBUG_KMS("\n");
if ((IS_GEN7(dev) && port == PORT_A) ||
(HAS_PCH_CPT(dev) && port != PORT_A)) {
if ((IS_GEN7(dev_priv) && port == PORT_A) ||
(HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
DP &= ~DP_LINK_TRAIN_MASK_CPT;
DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
} else {
if (IS_CHERRYVIEW(dev))
if (IS_CHERRYVIEW(dev_priv))
DP &= ~DP_LINK_TRAIN_MASK_CHV;
else
DP &= ~DP_LINK_TRAIN_MASK;
@ -3468,7 +3476,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
* to transcoder A after disabling it to allow the
* matching HDMI port to be enabled on transcoder A.
*/
if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B && port != PORT_A) {
/*
* We get CPU/PCH FIFO underruns on the other pipe when
* doing the workaround. Sweep them under the rug.
@ -3551,8 +3559,8 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
/* Read the eDP Display control capabilities registers */
if ((intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd) ==
sizeof(intel_dp->edp_dpcd)))
intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
sizeof(intel_dp->edp_dpcd))
DRM_DEBUG_KMS("EDP DPCD : %*ph\n", (int) sizeof(intel_dp->edp_dpcd),
intel_dp->edp_dpcd);
@ -3988,6 +3996,31 @@ go_again:
return -EINVAL;
}
static void
intel_dp_retrain_link(struct intel_dp *intel_dp)
{
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
/* Suppress underruns caused by re-training */
intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
if (crtc->config->has_pch_encoder)
intel_set_pch_fifo_underrun_reporting(dev_priv,
intel_crtc_pch_transcoder(crtc), false);
intel_dp_start_link_train(intel_dp);
intel_dp_stop_link_train(intel_dp);
/* Keep underrun reporting disabled until things are stable */
intel_wait_for_vblank(&dev_priv->drm, crtc->pipe);
intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
if (crtc->config->has_pch_encoder)
intel_set_pch_fifo_underrun_reporting(dev_priv,
intel_crtc_pch_transcoder(crtc), true);
}
static void
intel_dp_check_link_status(struct intel_dp *intel_dp)
{
@ -4008,13 +4041,18 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
if (!to_intel_crtc(intel_encoder->base.crtc)->active)
return;
/* FIXME: we need to synchronize this sort of stuff with hardware
* readout */
if (WARN_ON_ONCE(!intel_dp->lane_count))
return;
/* if link training is requested we should perform it always */
if ((intel_dp->compliance_test_type == DP_TEST_LINK_TRAINING) ||
(!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count))) {
DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
intel_encoder->base.name);
intel_dp_start_link_train(intel_dp);
intel_dp_stop_link_train(intel_dp);
intel_dp_retrain_link(intel_dp);
}
}
@ -4756,11 +4794,16 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
void intel_dp_encoder_reset(struct drm_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->dev);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
struct intel_lspcon *lspcon = &intel_dig_port->lspcon;
struct intel_dp *intel_dp = &intel_dig_port->dp;
if (!HAS_DDI(dev_priv))
intel_dp->DP = I915_READ(intel_dp->output_reg);
if (IS_GEN9(dev_priv) && lspcon->active)
lspcon_resume(lspcon);
if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
return;
@ -5074,7 +5117,7 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
(seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
/* Compute the divisor for the pp clock, simply match the Bspec
* formula. */
if (IS_BROXTON(dev)) {
if (IS_BROXTON(dev_priv)) {
pp_div = I915_READ(regs.pp_ctrl);
pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
@ -5087,9 +5130,9 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
/* Haswell doesn't have any port selection bits for the panel
* power sequencer any more. */
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
port_sel = PANEL_PORT_SELECT_VLV(port);
} else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
} else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
if (port == PORT_A)
port_sel = PANEL_PORT_SELECT_DPA;
else
@ -5100,7 +5143,7 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
I915_WRITE(regs.pp_on, pp_on);
I915_WRITE(regs.pp_off, pp_off);
if (IS_BROXTON(dev))
if (IS_BROXTON(dev_priv))
I915_WRITE(regs.pp_ctrl, pp_div);
else
I915_WRITE(regs.pp_div, pp_div);
@ -5108,7 +5151,7 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
I915_READ(regs.pp_on),
I915_READ(regs.pp_off),
IS_BROXTON(dev) ?
IS_BROXTON(dev_priv) ?
(I915_READ(regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK) :
I915_READ(regs.pp_div));
}
@ -5116,7 +5159,9 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
static void intel_dp_pps_init(struct drm_device *dev,
struct intel_dp *intel_dp)
{
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
struct drm_i915_private *dev_priv = to_i915(dev);
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
vlv_initial_power_sequencer_setup(intel_dp);
} else {
intel_dp_init_panel_power_sequencer(dev, intel_dp);
@ -5586,7 +5631,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
}
mutex_unlock(&dev->mode_config.mutex);
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
intel_dp->edp_notifier.notifier_call = edp_notify_handler;
register_reboot_notifier(&intel_dp->edp_notifier);
@ -5595,7 +5640,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
* If the current pipe isn't valid, try the PPS pipe, and if that
* fails just assume pipe A.
*/
if (IS_CHERRYVIEW(dev))
if (IS_CHERRYVIEW(dev_priv))
pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
else
pipe = PORT_TO_PIPE(intel_dp->DP);
@ -5651,9 +5696,9 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
/* intel_dp vfuncs */
if (INTEL_INFO(dev)->gen >= 9)
intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
else if (HAS_PCH_SPLIT(dev))
else if (HAS_PCH_SPLIT(dev_priv))
intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
else
intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider;
@ -5663,7 +5708,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
else
intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl;
if (HAS_DDI(dev))
if (HAS_DDI(dev_priv))
intel_dp->prepare_link_retrain = intel_ddi_prepare_link_retrain;
/* Preserve the current hw state. */
@ -5684,7 +5729,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
intel_encoder->type = INTEL_OUTPUT_EDP;
/* eDP only on port B and/or C on vlv/chv */
if (WARN_ON((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
if (WARN_ON((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
is_edp(intel_dp) && port != PORT_B && port != PORT_C))
return false;
@ -5705,7 +5750,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
intel_connector_attach_encoder(intel_connector, intel_encoder);
if (HAS_DDI(dev))
if (HAS_DDI(dev_priv))
intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
else
intel_connector->get_hw_state = intel_connector_get_hw_state;
@ -5717,7 +5762,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
break;
case PORT_B:
intel_encoder->hpd_pin = HPD_PORT_B;
if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
intel_encoder->hpd_pin = HPD_PORT_A;
break;
case PORT_C:
@ -5751,7 +5796,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
* 0xd. Failure to do so will result in spurious interrupts being
* generated on the port when a cable is not attached.
*/
if (IS_G4X(dev) && !IS_GM45(dev)) {
if (IS_G4X(dev_priv) && !IS_GM45(dev_priv)) {
u32 temp = I915_READ(PEG_BAND_GAP_DATA);
I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
}
@ -5794,13 +5839,13 @@ bool intel_dp_init(struct drm_device *dev,
intel_encoder->get_hw_state = intel_dp_get_hw_state;
intel_encoder->get_config = intel_dp_get_config;
intel_encoder->suspend = intel_dp_encoder_suspend;
if (IS_CHERRYVIEW(dev)) {
if (IS_CHERRYVIEW(dev_priv)) {
intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
intel_encoder->pre_enable = chv_pre_enable_dp;
intel_encoder->enable = vlv_enable_dp;
intel_encoder->post_disable = chv_post_disable_dp;
intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
} else if (IS_VALLEYVIEW(dev)) {
} else if (IS_VALLEYVIEW(dev_priv)) {
intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
intel_encoder->pre_enable = vlv_pre_enable_dp;
intel_encoder->enable = vlv_enable_dp;
@ -5817,7 +5862,7 @@ bool intel_dp_init(struct drm_device *dev,
intel_dig_port->max_lanes = 4;
intel_encoder->type = INTEL_OUTPUT_DP;
if (IS_CHERRYVIEW(dev)) {
if (IS_CHERRYVIEW(dev_priv)) {
if (port == PORT_D)
intel_encoder->crtc_mask = 1 << 2;
else
@ -5826,6 +5871,7 @@ bool intel_dp_init(struct drm_device *dev,
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
}
intel_encoder->cloneable = 0;
intel_encoder->port = port;
intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
dev_priv->hotplug.irq_port[port] = intel_dig_port;

View File

@ -225,9 +225,6 @@ static u32 intel_dp_training_pattern(struct intel_dp *intel_dp)
* Intel platforms that support HBR2 also support TPS3. TPS3 support is
* also mandatory for downstream devices that support HBR2. However, not
* all sinks follow the spec.
*
* Due to WaDisableHBR2 SKL < B0 is the only exception where TPS3 is
* supported in source but still not enabled.
*/
source_tps3 = intel_dp_source_supports_hbr2(intel_dp);
sink_tps3 = drm_dp_tps3_supported(intel_dp->dpcd);

View File

@ -523,6 +523,7 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum
DRM_MODE_ENCODER_DPMST, "DP-MST %c", pipe_name(pipe));
intel_encoder->type = INTEL_OUTPUT_DP_MST;
intel_encoder->port = intel_dig_port->port;
intel_encoder->crtc_mask = 0x7;
intel_encoder->cloneable = 0;

View File

@ -1851,13 +1851,13 @@ void intel_shared_dpll_init(struct drm_device *dev)
const struct dpll_info *dpll_info;
int i;
if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
dpll_mgr = &skl_pll_mgr;
else if (IS_BROXTON(dev))
else if (IS_BROXTON(dev_priv))
dpll_mgr = &bxt_pll_mgr;
else if (HAS_DDI(dev))
else if (HAS_DDI(dev_priv))
dpll_mgr = &hsw_pll_mgr;
else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
dpll_mgr = &pch_pll_mgr;
if (!dpll_mgr) {
@ -1883,7 +1883,7 @@ void intel_shared_dpll_init(struct drm_device *dev)
BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
/* FIXME: Move this to a more suitable place */
if (HAS_DDI(dev))
if (HAS_DDI(dev_priv))
intel_ddi_pll_init(dev);
}

View File

@ -206,6 +206,7 @@ struct intel_encoder {
struct drm_encoder base;
enum intel_output_type type;
enum port port;
unsigned int cloneable;
void (*hot_plug)(struct intel_encoder *);
bool (*compute_config)(struct intel_encoder *,
@ -247,6 +248,8 @@ struct intel_encoder {
void (*suspend)(struct intel_encoder *);
int crtc_mask;
enum hpd_pin hpd_pin;
/* for communication with audio component; protected by av_mutex */
const struct drm_connector *audio_connector;
};
struct intel_panel {
@ -465,9 +468,13 @@ struct intel_pipe_wm {
bool sprites_scaled;
};
struct skl_pipe_wm {
struct skl_plane_wm {
struct skl_wm_level wm[8];
struct skl_wm_level trans_wm;
};
struct skl_pipe_wm {
struct skl_plane_wm planes[I915_MAX_PLANES];
uint32_t linetime;
};
@ -493,6 +500,7 @@ struct intel_crtc_wm_state {
struct {
/* gen9+ only needs 1-step wm programming */
struct skl_pipe_wm optimal;
struct skl_ddb_entry ddb;
/* cached plane data rate */
unsigned plane_data_rate[I915_MAX_PLANES];
@ -730,6 +738,9 @@ struct intel_crtc {
bool cxsr_allowed;
} wm;
/* gen9+: ddb allocation currently being used */
struct skl_ddb_entry hw_ddb;
int scanline_offset;
struct {
@ -796,22 +807,22 @@ struct intel_plane {
};
struct intel_watermark_params {
unsigned long fifo_size;
unsigned long max_wm;
unsigned long default_wm;
unsigned long guard_size;
unsigned long cacheline_size;
u16 fifo_size;
u16 max_wm;
u8 default_wm;
u8 guard_size;
u8 cacheline_size;
};
struct cxsr_latency {
int is_desktop;
int is_ddr3;
unsigned long fsb_freq;
unsigned long mem_freq;
unsigned long display_sr;
unsigned long display_hpll_disable;
unsigned long cursor_sr;
unsigned long cursor_hpll_disable;
bool is_desktop : 1;
bool is_ddr3 : 1;
u16 fsb_freq;
u16 mem_freq;
u16 display_sr;
u16 display_hpll_disable;
u16 cursor_sr;
u16 cursor_hpll_disable;
};
#define to_intel_atomic_state(x) container_of(x, struct intel_atomic_state, base)
@ -950,17 +961,22 @@ struct intel_dp {
bool compliance_test_active;
};
struct intel_lspcon {
bool active;
enum drm_lspcon_mode mode;
struct drm_dp_aux *aux;
};
struct intel_digital_port {
struct intel_encoder base;
enum port port;
u32 saved_port_bits;
struct intel_dp dp;
struct intel_hdmi hdmi;
struct intel_lspcon lspcon;
enum irqreturn (*hpd_pulse)(struct intel_digital_port *, bool);
bool release_cl2_override;
uint8_t max_lanes;
/* for communication with audio component; protected by av_mutex */
const struct drm_connector *audio_connector;
};
struct intel_dp_mst_encoder {
@ -1182,6 +1198,7 @@ void i915_audio_component_init(struct drm_i915_private *dev_priv);
void i915_audio_component_cleanup(struct drm_i915_private *dev_priv);
/* intel_display.c */
enum transcoder intel_crtc_pch_transcoder(struct intel_crtc *crtc);
void skl_set_preferred_cdclk_vco(struct drm_i915_private *dev_priv, int vco);
void intel_update_rawclk(struct drm_i915_private *dev_priv);
int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
@ -1478,6 +1495,10 @@ static inline void intel_fbdev_set_suspend(struct drm_device *dev, int state, bo
{
}
static inline void intel_fbdev_output_poll_changed(struct drm_device *dev)
{
}
static inline void intel_fbdev_restore_mode(struct drm_device *dev)
{
}
@ -1504,6 +1525,7 @@ void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
void intel_fbc_flush(struct drm_i915_private *dev_priv,
unsigned int frontbuffer_bits, enum fb_op_origin origin);
void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv);
void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *dev_priv);
/* intel_hdmi.c */
void intel_hdmi_init(struct drm_device *dev, i915_reg_t hdmi_reg, enum port port);
@ -1707,7 +1729,7 @@ bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
/* intel_pm.c */
void intel_init_clock_gating(struct drm_device *dev);
void intel_suspend_hw(struct drm_device *dev);
int ilk_wm_max_level(const struct drm_device *dev);
int ilk_wm_max_level(const struct drm_i915_private *dev_priv);
void intel_update_watermarks(struct drm_crtc *crtc);
void intel_init_pm(struct drm_device *dev);
void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv);
@ -1733,20 +1755,24 @@ void ilk_wm_get_hw_state(struct drm_device *dev);
void skl_wm_get_hw_state(struct drm_device *dev);
void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
struct skl_ddb_allocation *ddb /* out */);
void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc,
struct skl_pipe_wm *out);
bool intel_can_enable_sagv(struct drm_atomic_state *state);
int intel_enable_sagv(struct drm_i915_private *dev_priv);
int intel_disable_sagv(struct drm_i915_private *dev_priv);
bool skl_wm_level_equals(const struct skl_wm_level *l1,
const struct skl_wm_level *l2);
bool skl_ddb_allocation_equals(const struct skl_ddb_allocation *old,
const struct skl_ddb_allocation *new,
enum pipe pipe);
bool skl_ddb_allocation_overlaps(struct drm_atomic_state *state,
const struct skl_ddb_allocation *old,
const struct skl_ddb_allocation *new,
enum pipe pipe);
struct intel_crtc *intel_crtc);
void skl_write_cursor_wm(struct intel_crtc *intel_crtc,
const struct skl_wm_values *wm);
const struct skl_plane_wm *wm,
const struct skl_ddb_allocation *ddb);
void skl_write_plane_wm(struct intel_crtc *intel_crtc,
const struct skl_wm_values *wm,
const struct skl_plane_wm *wm,
const struct skl_ddb_allocation *ddb,
int plane);
uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config);
bool ilk_disable_lp_wm(struct drm_device *dev);
@ -1826,4 +1852,7 @@ int intel_color_check(struct drm_crtc *crtc, struct drm_crtc_state *state);
void intel_color_set_csc(struct drm_crtc_state *crtc_state);
void intel_color_load_luts(struct drm_crtc_state *crtc_state);
/* intel_lspcon.c */
bool lspcon_init(struct intel_digital_port *intel_dig_port);
void lspcon_resume(struct intel_lspcon *lspcon);
#endif /* __INTEL_DRV_H__ */

View File

@ -437,11 +437,11 @@ static void vlv_dsi_device_ready(struct intel_encoder *encoder)
static void intel_dsi_device_ready(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
vlv_dsi_device_ready(encoder);
else if (IS_BROXTON(dev))
else if (IS_BROXTON(dev_priv))
bxt_dsi_device_ready(encoder);
}
@ -464,7 +464,7 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder)
}
for_each_dsi_port(port, intel_dsi->ports) {
i915_reg_t port_ctrl = IS_BROXTON(dev) ?
i915_reg_t port_ctrl = IS_BROXTON(dev_priv) ?
BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
u32 temp;
@ -494,7 +494,7 @@ static void intel_dsi_port_disable(struct intel_encoder *encoder)
enum port port;
for_each_dsi_port(port, intel_dsi->ports) {
i915_reg_t port_ctrl = IS_BROXTON(dev) ?
i915_reg_t port_ctrl = IS_BROXTON(dev_priv) ?
BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
u32 temp;
@ -656,7 +656,6 @@ static void intel_dsi_disable(struct intel_encoder *encoder)
static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
enum port port;
@ -664,7 +663,7 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
DRM_DEBUG_KMS("\n");
for_each_dsi_port(port, intel_dsi->ports) {
/* Common bit for both MIPI Port A & MIPI Port C on VLV/CHV */
i915_reg_t port_ctrl = IS_BROXTON(dev) ?
i915_reg_t port_ctrl = IS_BROXTON(dev_priv) ?
BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(PORT_A);
u32 val;
@ -741,7 +740,6 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
struct drm_device *dev = encoder->base.dev;
enum intel_display_power_domain power_domain;
enum port port;
bool active = false;
@ -762,7 +760,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
/* XXX: this only works for one DSI output */
for_each_dsi_port(port, intel_dsi->ports) {
i915_reg_t ctrl_reg = IS_BROXTON(dev) ?
i915_reg_t ctrl_reg = IS_BROXTON(dev_priv) ?
BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
bool enabled = I915_READ(ctrl_reg) & DPI_ENABLE;
@ -771,7 +769,8 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
* bit in port C control register does not get set. As a
* workaround, check pipe B conf instead.
*/
if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) && port == PORT_C)
if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
port == PORT_C)
enabled = I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
/* Try command mode if video mode not enabled */
@ -970,11 +969,11 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
static void intel_dsi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 pclk;
DRM_DEBUG_KMS("\n");
if (IS_BROXTON(dev))
if (IS_BROXTON(dev_priv))
bxt_dsi_get_pipe_config(encoder, pipe_config);
pclk = intel_dsi_get_pclk(encoder, pipe_config->pipe_bpp,
@ -1066,7 +1065,7 @@ static void set_dsi_timings(struct drm_encoder *encoder,
hbp = txbyteclkhs(hbp, bpp, lane_count, intel_dsi->burst_mode_ratio);
for_each_dsi_port(port, intel_dsi->ports) {
if (IS_BROXTON(dev)) {
if (IS_BROXTON(dev_priv)) {
/*
* Program hdisplay and vdisplay on MIPI transcoder.
* This is different from calculated hactive and
@ -1138,7 +1137,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
}
for_each_dsi_port(port, intel_dsi->ports) {
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
/*
* escape clock divider, 20MHz, shared for A and C.
* device ready must be off when doing this! txclkesc?
@ -1153,7 +1152,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
tmp &= ~READ_REQUEST_PRIORITY_MASK;
I915_WRITE(MIPI_CTRL(port), tmp |
READ_REQUEST_PRIORITY_HIGH);
} else if (IS_BROXTON(dev)) {
} else if (IS_BROXTON(dev_priv)) {
enum pipe pipe = intel_crtc->pipe;
tmp = I915_READ(MIPI_CTRL(port));
@ -1242,7 +1241,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
I915_WRITE(MIPI_INIT_COUNT(port),
txclkesc(intel_dsi->escape_clk_div, 100));
if (IS_BROXTON(dev) && (!intel_dsi->dual_link)) {
if (IS_BROXTON(dev_priv) && (!intel_dsi->dual_link)) {
/*
* BXT spec says write MIPI_INIT_COUNT for
* both the ports, even if only one is
@ -1346,7 +1345,7 @@ static int intel_dsi_set_property(struct drm_connector *connector,
DRM_DEBUG_KMS("no scaling not supported\n");
return -EINVAL;
}
if (HAS_GMCH_DISPLAY(dev) &&
if (HAS_GMCH_DISPLAY(to_i915(dev)) &&
val == DRM_MODE_SCALE_CENTER) {
DRM_DEBUG_KMS("centering not supported\n");
return -EINVAL;
@ -1450,9 +1449,9 @@ void intel_dsi_init(struct drm_device *dev)
if (!intel_bios_is_dsi_present(dev_priv, &port))
return;
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
dev_priv->mipi_mmio_base = VLV_MIPI_BASE;
} else if (IS_BROXTON(dev)) {
} else if (IS_BROXTON(dev_priv)) {
dev_priv->mipi_mmio_base = BXT_MIPI_BASE;
} else {
DRM_ERROR("Unsupported Mipi device to reg base");
@ -1488,6 +1487,7 @@ void intel_dsi_init(struct drm_device *dev)
intel_connector->get_hw_state = intel_connector_get_hw_state;
intel_encoder->port = port;
/*
* On BYT/CHV, pipe A maps to MIPI DSI port A, pipe B maps to MIPI DSI
* port C. BXT isn't limited like this.

View File

@ -126,6 +126,8 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
u16 len;
enum port port;
DRM_DEBUG_KMS("\n");
flags = *data++;
type = *data++;
@ -199,6 +201,8 @@ static const u8 *mipi_exec_delay(struct intel_dsi *intel_dsi, const u8 *data)
{
u32 delay = *((const u32 *) data);
DRM_DEBUG_KMS("\n");
usleep_range(delay, delay + 10);
data += 4;
@ -307,6 +311,8 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
u8 gpio_source, gpio_index;
bool value;
DRM_DEBUG_KMS("\n");
if (dev_priv->vbt.dsi.seq_version >= 3)
data++;
@ -331,18 +337,36 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
return data;
}
static const u8 *mipi_exec_i2c_skip(struct intel_dsi *intel_dsi, const u8 *data)
static const u8 *mipi_exec_i2c(struct intel_dsi *intel_dsi, const u8 *data)
{
DRM_DEBUG_KMS("Skipping I2C element execution\n");
return data + *(data + 6) + 7;
}
static const u8 *mipi_exec_spi(struct intel_dsi *intel_dsi, const u8 *data)
{
DRM_DEBUG_KMS("Skipping SPI element execution\n");
return data + *(data + 5) + 6;
}
static const u8 *mipi_exec_pmic(struct intel_dsi *intel_dsi, const u8 *data)
{
DRM_DEBUG_KMS("Skipping PMIC element execution\n");
return data + 15;
}
typedef const u8 * (*fn_mipi_elem_exec)(struct intel_dsi *intel_dsi,
const u8 *data);
static const fn_mipi_elem_exec exec_elem[] = {
[MIPI_SEQ_ELEM_SEND_PKT] = mipi_exec_send_packet,
[MIPI_SEQ_ELEM_DELAY] = mipi_exec_delay,
[MIPI_SEQ_ELEM_GPIO] = mipi_exec_gpio,
[MIPI_SEQ_ELEM_I2C] = mipi_exec_i2c_skip,
[MIPI_SEQ_ELEM_I2C] = mipi_exec_i2c,
[MIPI_SEQ_ELEM_SPI] = mipi_exec_spi,
[MIPI_SEQ_ELEM_PMIC] = mipi_exec_pmic,
};
/*
@ -385,11 +409,8 @@ static void generic_exec_sequence(struct drm_panel *panel, enum mipi_seq seq_id)
return;
data = dev_priv->vbt.dsi.sequence[seq_id];
if (!data) {
DRM_DEBUG_KMS("MIPI sequence %d - %s not available\n",
seq_id, sequence_name(seq_id));
if (!data)
return;
}
WARN_ON(*data != seq_id);
@ -420,7 +441,15 @@ static void generic_exec_sequence(struct drm_panel *panel, enum mipi_seq seq_id)
operation_size = *data++;
if (mipi_elem_exec) {
const u8 *next = data + operation_size;
data = mipi_elem_exec(intel_dsi, data);
/* Consistency check if we have size. */
if (operation_size && data != next) {
DRM_ERROR("Inconsistent operation size\n");
return;
}
} else if (operation_size) {
/* We have size, skip. */
DRM_DEBUG_KMS("Unsupported MIPI operation byte %u\n",
@ -438,6 +467,8 @@ static void generic_exec_sequence(struct drm_panel *panel, enum mipi_seq seq_id)
static int vbt_panel_prepare(struct drm_panel *panel)
{
generic_exec_sequence(panel, MIPI_SEQ_ASSERT_RESET);
generic_exec_sequence(panel, MIPI_SEQ_POWER_ON);
generic_exec_sequence(panel, MIPI_SEQ_DEASSERT_RESET);
generic_exec_sequence(panel, MIPI_SEQ_INIT_OTP);
return 0;
@ -445,7 +476,8 @@ static int vbt_panel_prepare(struct drm_panel *panel)
static int vbt_panel_unprepare(struct drm_panel *panel)
{
generic_exec_sequence(panel, MIPI_SEQ_DEASSERT_RESET);
generic_exec_sequence(panel, MIPI_SEQ_ASSERT_RESET);
generic_exec_sequence(panel, MIPI_SEQ_POWER_OFF);
return 0;
}
@ -453,12 +485,14 @@ static int vbt_panel_unprepare(struct drm_panel *panel)
static int vbt_panel_enable(struct drm_panel *panel)
{
generic_exec_sequence(panel, MIPI_SEQ_DISPLAY_ON);
generic_exec_sequence(panel, MIPI_SEQ_BACKLIGHT_ON);
return 0;
}
static int vbt_panel_disable(struct drm_panel *panel)
{
generic_exec_sequence(panel, MIPI_SEQ_BACKLIGHT_OFF);
generic_exec_sequence(panel, MIPI_SEQ_DISPLAY_OFF);
return 0;

View File

@ -351,7 +351,7 @@ static u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
u32 intel_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
struct intel_crtc_state *config)
{
if (IS_BROXTON(encoder->base.dev))
if (IS_BROXTON(to_i915(encoder->base.dev)))
return bxt_dsi_get_pclk(encoder, pipe_bpp, config);
else
return vlv_dsi_get_pclk(encoder, pipe_bpp, config);
@ -515,11 +515,11 @@ bool intel_dsi_pll_is_enabled(struct drm_i915_private *dev_priv)
int intel_compute_dsi_pll(struct intel_encoder *encoder,
struct intel_crtc_state *config)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
return vlv_compute_dsi_pll(encoder, config);
else if (IS_BROXTON(dev))
else if (IS_BROXTON(dev_priv))
return bxt_compute_dsi_pll(encoder, config);
return -ENODEV;
@ -528,21 +528,21 @@ int intel_compute_dsi_pll(struct intel_encoder *encoder,
void intel_enable_dsi_pll(struct intel_encoder *encoder,
const struct intel_crtc_state *config)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
vlv_enable_dsi_pll(encoder, config);
else if (IS_BROXTON(dev))
else if (IS_BROXTON(dev_priv))
bxt_enable_dsi_pll(encoder, config);
}
void intel_disable_dsi_pll(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
vlv_disable_dsi_pll(encoder);
else if (IS_BROXTON(dev))
else if (IS_BROXTON(dev_priv))
bxt_disable_dsi_pll(encoder);
}
@ -564,10 +564,10 @@ static void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
void intel_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
if (IS_BROXTON(dev))
if (IS_BROXTON(dev_priv))
bxt_dsi_reset_clocks(encoder, port);
else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
vlv_dsi_reset_clocks(encoder, port);
}

View File

@ -412,16 +412,14 @@ intel_dvo_get_current_mode(struct drm_connector *connector)
return mode;
}
static char intel_dvo_port_name(i915_reg_t dvo_reg)
static enum port intel_dvo_port(i915_reg_t dvo_reg)
{
if (i915_mmio_reg_equal(dvo_reg, DVOA))
return 'A';
return PORT_A;
else if (i915_mmio_reg_equal(dvo_reg, DVOB))
return 'B';
else if (i915_mmio_reg_equal(dvo_reg, DVOC))
return 'C';
return PORT_B;
else
return '?';
return PORT_C;
}
void intel_dvo_init(struct drm_device *dev)
@ -464,6 +462,7 @@ void intel_dvo_init(struct drm_device *dev)
bool dvoinit;
enum pipe pipe;
uint32_t dpll[I915_MAX_PIPES];
enum port port;
/* Allow the I2C driver info to specify the GPIO to be used in
* special cases, but otherwise default to what's defined
@ -511,12 +510,15 @@ void intel_dvo_init(struct drm_device *dev)
if (!dvoinit)
continue;
port = intel_dvo_port(dvo->dvo_reg);
drm_encoder_init(dev, &intel_encoder->base,
&intel_dvo_enc_funcs, encoder_type,
"DVO %c", intel_dvo_port_name(dvo->dvo_reg));
"DVO %c", port_name(port));
intel_encoder->type = INTEL_OUTPUT_DVO;
intel_encoder->port = port;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
switch (dvo->type) {
case INTEL_DVO_CHIP_TMDS:
intel_encoder->cloneable = (1 << INTEL_OUTPUT_ANALOG) |

View File

@ -82,12 +82,17 @@ static const struct engine_info {
},
};
static struct intel_engine_cs *
static int
intel_engine_setup(struct drm_i915_private *dev_priv,
enum intel_engine_id id)
{
const struct engine_info *info = &intel_engines[id];
struct intel_engine_cs *engine = &dev_priv->engine[id];
struct intel_engine_cs *engine;
GEM_BUG_ON(dev_priv->engine[id]);
engine = kzalloc(sizeof(*engine), GFP_KERNEL);
if (!engine)
return -ENOMEM;
engine->id = id;
engine->i915 = dev_priv;
@ -97,7 +102,8 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
engine->mmio_base = info->mmio_base;
engine->irq_shift = info->irq_shift;
return engine;
dev_priv->engine[id] = engine;
return 0;
}
/**
@ -110,13 +116,16 @@ int intel_engines_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_device_info *device_info = mkwrite_device_info(dev_priv);
unsigned int ring_mask = INTEL_INFO(dev_priv)->ring_mask;
unsigned int mask = 0;
int (*init)(struct intel_engine_cs *engine);
struct intel_engine_cs *engine;
enum intel_engine_id id;
unsigned int i;
int ret;
WARN_ON(INTEL_INFO(dev_priv)->ring_mask == 0);
WARN_ON(INTEL_INFO(dev_priv)->ring_mask &
WARN_ON(ring_mask == 0);
WARN_ON(ring_mask &
GENMASK(sizeof(mask) * BITS_PER_BYTE - 1, I915_NUM_ENGINES));
for (i = 0; i < ARRAY_SIZE(intel_engines); i++) {
@ -131,7 +140,11 @@ int intel_engines_init(struct drm_device *dev)
if (!init)
continue;
ret = init(intel_engine_setup(dev_priv, i));
ret = intel_engine_setup(dev_priv, i);
if (ret)
goto cleanup;
ret = init(dev_priv->engine[i]);
if (ret)
goto cleanup;
@ -143,7 +156,7 @@ int intel_engines_init(struct drm_device *dev)
* are added to the driver by a warning and disabling the forgotten
* engines.
*/
if (WARN_ON(mask != INTEL_INFO(dev_priv)->ring_mask))
if (WARN_ON(mask != ring_mask))
device_info->ring_mask = mask;
device_info->num_rings = hweight32(mask);
@ -151,11 +164,11 @@ int intel_engines_init(struct drm_device *dev)
return 0;
cleanup:
for (i = 0; i < I915_NUM_ENGINES; i++) {
for_each_engine(engine, dev_priv, id) {
if (i915.enable_execlists)
intel_logical_ring_cleanup(&dev_priv->engine[i]);
intel_logical_ring_cleanup(engine);
else
intel_engine_cleanup(&dev_priv->engine[i]);
intel_engine_cleanup(engine);
}
return ret;
@ -319,3 +332,137 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
intel_engine_cleanup_cmd_parser(engine);
i915_gem_batch_pool_fini(&engine->batch_pool);
}
u64 intel_engine_get_active_head(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
u64 acthd;
if (INTEL_GEN(dev_priv) >= 8)
acthd = I915_READ64_2x32(RING_ACTHD(engine->mmio_base),
RING_ACTHD_UDW(engine->mmio_base));
else if (INTEL_GEN(dev_priv) >= 4)
acthd = I915_READ(RING_ACTHD(engine->mmio_base));
else
acthd = I915_READ(ACTHD);
return acthd;
}
u64 intel_engine_get_last_batch_head(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
u64 bbaddr;
if (INTEL_GEN(dev_priv) >= 8)
bbaddr = I915_READ64_2x32(RING_BBADDR(engine->mmio_base),
RING_BBADDR_UDW(engine->mmio_base));
else
bbaddr = I915_READ(RING_BBADDR(engine->mmio_base));
return bbaddr;
}
const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
{
switch (type) {
case I915_CACHE_NONE: return " uncached";
case I915_CACHE_LLC: return HAS_LLC(i915) ? " LLC" : " snooped";
case I915_CACHE_L3_LLC: return " L3+LLC";
case I915_CACHE_WT: return " WT";
default: return "";
}
}
static inline uint32_t
read_subslice_reg(struct drm_i915_private *dev_priv, int slice,
int subslice, i915_reg_t reg)
{
uint32_t mcr;
uint32_t ret;
enum forcewake_domains fw_domains;
fw_domains = intel_uncore_forcewake_for_reg(dev_priv, reg,
FW_REG_READ);
fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
GEN8_MCR_SELECTOR,
FW_REG_READ | FW_REG_WRITE);
spin_lock_irq(&dev_priv->uncore.lock);
intel_uncore_forcewake_get__locked(dev_priv, fw_domains);
mcr = I915_READ_FW(GEN8_MCR_SELECTOR);
/*
* The HW expects the slice and sublice selectors to be reset to 0
* after reading out the registers.
*/
WARN_ON_ONCE(mcr & (GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK));
mcr &= ~(GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK);
mcr |= GEN8_MCR_SLICE(slice) | GEN8_MCR_SUBSLICE(subslice);
I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr);
ret = I915_READ_FW(reg);
mcr &= ~(GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK);
I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr);
intel_uncore_forcewake_put__locked(dev_priv, fw_domains);
spin_unlock_irq(&dev_priv->uncore.lock);
return ret;
}
/* NB: please notice the memset */
void intel_engine_get_instdone(struct intel_engine_cs *engine,
struct intel_instdone *instdone)
{
struct drm_i915_private *dev_priv = engine->i915;
u32 mmio_base = engine->mmio_base;
int slice;
int subslice;
memset(instdone, 0, sizeof(*instdone));
switch (INTEL_GEN(dev_priv)) {
default:
instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
if (engine->id != RCS)
break;
instdone->slice_common = I915_READ(GEN7_SC_INSTDONE);
for_each_instdone_slice_subslice(dev_priv, slice, subslice) {
instdone->sampler[slice][subslice] =
read_subslice_reg(dev_priv, slice, subslice,
GEN7_SAMPLER_INSTDONE);
instdone->row[slice][subslice] =
read_subslice_reg(dev_priv, slice, subslice,
GEN7_ROW_INSTDONE);
}
break;
case 7:
instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
if (engine->id != RCS)
break;
instdone->slice_common = I915_READ(GEN7_SC_INSTDONE);
instdone->sampler[0][0] = I915_READ(GEN7_SAMPLER_INSTDONE);
instdone->row[0][0] = I915_READ(GEN7_ROW_INSTDONE);
break;
case 6:
case 5:
case 4:
instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
if (engine->id == RCS)
/* HACK: Using the wrong struct member */
instdone->slice_common = I915_READ(GEN4_INSTDONE1);
break;
case 3:
case 2:
instdone->instdone = I915_READ(GEN2_INSTDONE);
break;
}
}

View File

@ -774,6 +774,14 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
struct intel_fbc *fbc = &dev_priv->fbc;
struct intel_fbc_state_cache *cache = &fbc->state_cache;
/* We don't need to use a state cache here since this information is
* global for all CRTC.
*/
if (fbc->underrun_detected) {
fbc->no_fbc_reason = "underrun detected";
return false;
}
if (!cache->plane.visible) {
fbc->no_fbc_reason = "primary plane not visible";
return false;
@ -859,6 +867,11 @@ static bool intel_fbc_can_choose(struct intel_crtc *crtc)
return false;
}
if (fbc->underrun_detected) {
fbc->no_fbc_reason = "underrun detected";
return false;
}
if (fbc_on_pipe_a_only(dev_priv) && crtc->pipe != PIPE_A) {
fbc->no_fbc_reason = "no enabled pipes can have FBC";
return false;
@ -1221,6 +1234,59 @@ void intel_fbc_global_disable(struct drm_i915_private *dev_priv)
cancel_work_sync(&fbc->work.work);
}
static void intel_fbc_underrun_work_fn(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
container_of(work, struct drm_i915_private, fbc.underrun_work);
struct intel_fbc *fbc = &dev_priv->fbc;
mutex_lock(&fbc->lock);
/* Maybe we were scheduled twice. */
if (fbc->underrun_detected)
goto out;
DRM_DEBUG_KMS("Disabling FBC due to FIFO underrun.\n");
fbc->underrun_detected = true;
intel_fbc_deactivate(dev_priv);
out:
mutex_unlock(&fbc->lock);
}
/**
* intel_fbc_handle_fifo_underrun_irq - disable FBC when we get a FIFO underrun
* @dev_priv: i915 device instance
*
* Without FBC, most underruns are harmless and don't really cause too many
* problems, except for an annoying message on dmesg. With FBC, underruns can
* become black screens or even worse, especially when paired with bad
* watermarks. So in order for us to be on the safe side, completely disable FBC
* in case we ever detect a FIFO underrun on any pipe. An underrun on any pipe
* already suggests that watermarks may be bad, so try to be as safe as
* possible.
*
* This function is called from the IRQ handler.
*/
void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *dev_priv)
{
struct intel_fbc *fbc = &dev_priv->fbc;
if (!fbc_supported(dev_priv))
return;
/* There's no guarantee that underrun_detected won't be set to true
* right after this check and before the work is scheduled, but that's
* not a problem since we'll check it again under the work function
* while FBC is locked. This check here is just to prevent us from
* unnecessarily scheduling the work, and it relies on the fact that we
* never switch underrun_detect back to false after it's true. */
if (READ_ONCE(fbc->underrun_detected))
return;
schedule_work(&fbc->underrun_work);
}
/**
* intel_fbc_init_pipe_state - initialize FBC's CRTC visibility tracking
* @dev_priv: i915 device instance
@ -1292,6 +1358,7 @@ void intel_fbc_init(struct drm_i915_private *dev_priv)
enum pipe pipe;
INIT_WORK(&fbc->work.work, intel_fbc_work_fn);
INIT_WORK(&fbc->underrun_work, intel_fbc_underrun_work_fn);
mutex_init(&fbc->lock);
fbc->enabled = false;
fbc->active = false;

View File

@ -254,13 +254,13 @@ static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
old = !intel_crtc->cpu_fifo_underrun_disabled;
intel_crtc->cpu_fifo_underrun_disabled = !enable;
if (HAS_GMCH_DISPLAY(dev))
if (HAS_GMCH_DISPLAY(dev_priv))
i9xx_set_fifo_underrun_reporting(dev, pipe, enable, old);
else if (IS_GEN5(dev) || IS_GEN6(dev))
else if (IS_GEN5(dev_priv) || IS_GEN6(dev_priv))
ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
else if (IS_GEN7(dev))
else if (IS_GEN7(dev_priv))
ivybridge_set_fifo_underrun_reporting(dev, pipe, enable, old);
else if (IS_GEN8(dev) || IS_GEN9(dev))
else if (IS_GEN8(dev_priv) || IS_GEN9(dev_priv))
broadwell_set_fifo_underrun_reporting(dev, pipe, enable);
return old;
@ -372,6 +372,8 @@ void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
if (intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false))
DRM_ERROR("CPU pipe %c FIFO underrun\n",
pipe_name(pipe));
intel_fbc_handle_fifo_underrun_irq(dev_priv);
}
/**

View File

@ -100,12 +100,13 @@ const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status)
static void guc_interrupts_release(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
int irqs;
/* tell all command streamers NOT to forward interrupts or vblank to GuC */
irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER);
irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING);
for_each_engine(engine, dev_priv)
for_each_engine(engine, dev_priv, id)
I915_WRITE(RING_MODE_GEN7(engine), irqs);
/* route all GT interrupts to the host */
@ -117,12 +118,13 @@ static void guc_interrupts_release(struct drm_i915_private *dev_priv)
static void guc_interrupts_capture(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
int irqs;
u32 tmp;
/* tell all command streamers to forward interrupts (but not vblank) to GuC */
irqs = _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING);
for_each_engine(engine, dev_priv)
for_each_engine(engine, dev_priv, id)
I915_WRITE(RING_MODE_GEN7(engine), irqs);
/* route USER_INTERRUPT to Host, all others are sent to GuC. */
@ -347,7 +349,6 @@ static u32 guc_wopcm_size(struct drm_i915_private *dev_priv)
static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
{
struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
struct drm_device *dev = &dev_priv->drm;
struct i915_vma *vma;
int ret;
@ -375,24 +376,22 @@ static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
/* Enable MIA caching. GuC clock gating is disabled. */
I915_WRITE(GUC_SHIM_CONTROL, GUC_SHIM_CONTROL_VALUE);
/* WaDisableMinuteIaClockGating:skl,bxt */
if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
/* WaDisableMinuteIaClockGating:bxt */
if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
I915_WRITE(GUC_SHIM_CONTROL, (I915_READ(GUC_SHIM_CONTROL) &
~GUC_ENABLE_MIA_CLOCK_GATING));
}
/* WaC6DisallowByGfxPause*/
if (IS_SKL_REVID(dev, 0, SKL_REVID_C0) ||
IS_BXT_REVID(dev, 0, BXT_REVID_B0))
/* WaC6DisallowByGfxPause:bxt */
if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0))
I915_WRITE(GEN6_GFXPAUSE, 0x30FFF);
if (IS_BROXTON(dev))
if (IS_BROXTON(dev_priv))
I915_WRITE(GEN9LP_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
else
I915_WRITE(GEN9_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
if (IS_GEN9(dev)) {
if (IS_GEN9(dev_priv)) {
/* DOP Clock Gating Enable for GuC clocks */
I915_WRITE(GEN7_MISCCPCTL, (GEN8_DOP_CLOCK_GATE_GUC_ENABLE |
I915_READ(GEN7_MISCCPCTL)));
@ -720,23 +719,28 @@ void intel_guc_init(struct drm_device *dev)
struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
const char *fw_path;
/* A negative value means "use platform default" */
if (i915.enable_guc_loading < 0)
i915.enable_guc_loading = HAS_GUC_UCODE(dev);
if (i915.enable_guc_submission < 0)
i915.enable_guc_submission = HAS_GUC_SCHED(dev);
if (!HAS_GUC(dev)) {
i915.enable_guc_loading = 0;
i915.enable_guc_submission = 0;
} else {
/* A negative value means "use platform default" */
if (i915.enable_guc_loading < 0)
i915.enable_guc_loading = HAS_GUC_UCODE(dev);
if (i915.enable_guc_submission < 0)
i915.enable_guc_submission = HAS_GUC_SCHED(dev);
}
if (!HAS_GUC_UCODE(dev)) {
fw_path = NULL;
} else if (IS_SKYLAKE(dev)) {
} else if (IS_SKYLAKE(dev_priv)) {
fw_path = I915_SKL_GUC_UCODE;
guc_fw->guc_fw_major_wanted = SKL_FW_MAJOR;
guc_fw->guc_fw_minor_wanted = SKL_FW_MINOR;
} else if (IS_BROXTON(dev)) {
} else if (IS_BROXTON(dev_priv)) {
fw_path = I915_BXT_GUC_UCODE;
guc_fw->guc_fw_major_wanted = BXT_FW_MAJOR;
guc_fw->guc_fw_minor_wanted = BXT_FW_MINOR;
} else if (IS_KABYLAKE(dev)) {
} else if (IS_KABYLAKE(dev_priv)) {
fw_path = I915_KBL_GUC_UCODE;
guc_fw->guc_fw_major_wanted = KBL_FW_MAJOR;
guc_fw->guc_fw_minor_wanted = KBL_FW_MINOR;

View File

@ -31,14 +31,20 @@
* GPU among multiple virtual machines on a time-sharing basis. Each
* virtual machine is presented a virtual GPU (vGPU), which has equivalent
* features as the underlying physical GPU (pGPU), so i915 driver can run
* seamlessly in a virtual machine. This file provides the englightments
* of GVT and the necessary components used by GVT in i915 driver.
* seamlessly in a virtual machine.
*
* To virtualize GPU resources GVT-g driver depends on hypervisor technology
* e.g KVM/VFIO/mdev, Xen, etc. to provide resource access trapping capability
* and be virtualized within GVT-g device module. More architectural design
* doc is available on https://01.org/group/2230/documentation-list.
*/
static bool is_supported_device(struct drm_i915_private *dev_priv)
{
if (IS_BROADWELL(dev_priv))
return true;
if (IS_SKYLAKE(dev_priv))
return true;
return false;
}

View File

@ -24,7 +24,7 @@
#ifndef _INTEL_GVT_H_
#define _INTEL_GVT_H_
#include "gvt/gvt.h"
struct intel_gvt;
#ifdef CONFIG_DRM_I915_GVT
int intel_gvt_init(struct drm_i915_private *dev_priv);

View File

@ -50,7 +50,7 @@ assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi)
struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t enabled_bits;
enabled_bits = HAS_DDI(dev) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE;
enabled_bits = HAS_DDI(dev_priv) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE;
WARN(I915_READ(intel_hdmi->hdmi_reg) & enabled_bits,
"HDMI port enabled, expecting disabled\n");
@ -864,7 +864,7 @@ static void intel_hdmi_prepare(struct intel_encoder *encoder)
intel_dp_dual_mode_set_tmds_output(intel_hdmi, true);
hdmi_val = SDVO_ENCODING_HDMI;
if (!HAS_PCH_SPLIT(dev) && crtc->config->limited_color_range)
if (!HAS_PCH_SPLIT(dev_priv) && crtc->config->limited_color_range)
hdmi_val |= HDMI_COLOR_RANGE_16_235;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
hdmi_val |= SDVO_VSYNC_ACTIVE_HIGH;
@ -879,9 +879,9 @@ static void intel_hdmi_prepare(struct intel_encoder *encoder)
if (crtc->config->has_hdmi_sink)
hdmi_val |= HDMI_MODE_SELECT_HDMI;
if (HAS_PCH_CPT(dev))
if (HAS_PCH_CPT(dev_priv))
hdmi_val |= SDVO_PIPE_SEL_CPT(crtc->pipe);
else if (IS_CHERRYVIEW(dev))
else if (IS_CHERRYVIEW(dev_priv))
hdmi_val |= SDVO_PIPE_SEL_CHV(crtc->pipe);
else
hdmi_val |= SDVO_PIPE_SEL(crtc->pipe);
@ -911,9 +911,9 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
if (!(tmp & SDVO_ENABLE))
goto out;
if (HAS_PCH_CPT(dev))
if (HAS_PCH_CPT(dev_priv))
*pipe = PORT_TO_PIPE_CPT(tmp);
else if (IS_CHERRYVIEW(dev))
else if (IS_CHERRYVIEW(dev_priv))
*pipe = SDVO_PORT_TO_PIPE_CHV(tmp);
else
*pipe = PORT_TO_PIPE(tmp);
@ -956,7 +956,7 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
if (tmp & SDVO_AUDIO_ENABLE)
pipe_config->has_audio = true;
if (!HAS_PCH_SPLIT(dev) &&
if (!HAS_PCH_SPLIT(dev_priv) &&
tmp & HDMI_COLOR_RANGE_16_235)
pipe_config->limited_color_range = true;
@ -1141,7 +1141,7 @@ static void intel_disable_hdmi(struct intel_encoder *encoder,
* to transcoder A after disabling it to allow the
* matching DP port to be enabled on transcoder A.
*/
if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B) {
if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B) {
/*
* We get CPU/PCH FIFO underruns on the other pipe when
* doing the workaround. Sweep them under the rug.
@ -1241,7 +1241,7 @@ static enum drm_mode_status
hdmi_port_clock_valid(struct intel_hdmi *hdmi,
int clock, bool respect_downstream_limits)
{
struct drm_device *dev = intel_hdmi_to_dev(hdmi);
struct drm_i915_private *dev_priv = to_i915(intel_hdmi_to_dev(hdmi));
if (clock < 25000)
return MODE_CLOCK_LOW;
@ -1249,11 +1249,11 @@ hdmi_port_clock_valid(struct intel_hdmi *hdmi,
return MODE_CLOCK_HIGH;
/* BXT DPLL can't generate 223-240 MHz */
if (IS_BROXTON(dev) && clock > 223333 && clock < 240000)
if (IS_BROXTON(dev_priv) && clock > 223333 && clock < 240000)
return MODE_CLOCK_RANGE;
/* CHV DPLL can't generate 216-240 MHz */
if (IS_CHERRYVIEW(dev) && clock > 216000 && clock < 240000)
if (IS_CHERRYVIEW(dev_priv) && clock > 216000 && clock < 240000)
return MODE_CLOCK_RANGE;
return MODE_OK;
@ -1265,6 +1265,7 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
{
struct intel_hdmi *hdmi = intel_attached_hdmi(connector);
struct drm_device *dev = intel_hdmi_to_dev(hdmi);
struct drm_i915_private *dev_priv = to_i915(dev);
enum drm_mode_status status;
int clock;
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
@ -1287,7 +1288,7 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
status = hdmi_port_clock_valid(hdmi, clock, true);
/* if we can't do 8bpc we may still be able to do 12bpc */
if (!HAS_GMCH_DISPLAY(dev) && status != MODE_OK)
if (!HAS_GMCH_DISPLAY(dev_priv) && status != MODE_OK)
status = hdmi_port_clock_valid(hdmi, clock * 3 / 2, true);
return status;
@ -1297,7 +1298,7 @@ static bool hdmi_12bpc_possible(struct intel_crtc_state *crtc_state)
{
struct drm_device *dev = crtc_state->base.crtc->dev;
if (HAS_GMCH_DISPLAY(dev))
if (HAS_GMCH_DISPLAY(to_i915(dev)))
return false;
/*
@ -1312,7 +1313,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
struct drm_connector_state *conn_state)
{
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
int clock_8bpc = pipe_config->base.adjusted_mode.crtc_clock;
int clock_12bpc = clock_8bpc * 3 / 2;
@ -1339,7 +1340,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
clock_12bpc *= 2;
}
if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev))
if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv))
pipe_config->has_pch_encoder = true;
if (pipe_config->has_hdmi_sink && intel_hdmi->has_audio)
@ -1799,6 +1800,50 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c
intel_hdmi->aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
}
static u8 intel_hdmi_ddc_pin(struct drm_i915_private *dev_priv,
enum port port)
{
const struct ddi_vbt_port_info *info =
&dev_priv->vbt.ddi_port_info[port];
u8 ddc_pin;
if (info->alternate_ddc_pin) {
DRM_DEBUG_KMS("Using DDC pin 0x%x for port %c (VBT)\n",
info->alternate_ddc_pin, port_name(port));
return info->alternate_ddc_pin;
}
switch (port) {
case PORT_B:
if (IS_BROXTON(dev_priv))
ddc_pin = GMBUS_PIN_1_BXT;
else
ddc_pin = GMBUS_PIN_DPB;
break;
case PORT_C:
if (IS_BROXTON(dev_priv))
ddc_pin = GMBUS_PIN_2_BXT;
else
ddc_pin = GMBUS_PIN_DPC;
break;
case PORT_D:
if (IS_CHERRYVIEW(dev_priv))
ddc_pin = GMBUS_PIN_DPD_CHV;
else
ddc_pin = GMBUS_PIN_DPD;
break;
default:
MISSING_CASE(port);
ddc_pin = GMBUS_PIN_DPB;
break;
}
DRM_DEBUG_KMS("Using DDC pin 0x%x for port %c (platform default)\n",
ddc_pin, port_name(port));
return ddc_pin;
}
void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
struct intel_connector *intel_connector)
{
@ -1808,7 +1853,6 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
enum port port = intel_dig_port->port;
uint8_t alternate_ddc_pin;
DRM_DEBUG_KMS("Adding HDMI connector on port %c\n",
port_name(port));
@ -1826,12 +1870,10 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
connector->doublescan_allowed = 0;
connector->stereo_allowed = 1;
intel_hdmi->ddc_bus = intel_hdmi_ddc_pin(dev_priv, port);
switch (port) {
case PORT_B:
if (IS_BROXTON(dev_priv))
intel_hdmi->ddc_bus = GMBUS_PIN_1_BXT;
else
intel_hdmi->ddc_bus = GMBUS_PIN_DPB;
/*
* On BXT A0/A1, sw needs to activate DDIA HPD logic and
* interrupts to check the external panel connection.
@ -1842,61 +1884,32 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
intel_encoder->hpd_pin = HPD_PORT_B;
break;
case PORT_C:
if (IS_BROXTON(dev_priv))
intel_hdmi->ddc_bus = GMBUS_PIN_2_BXT;
else
intel_hdmi->ddc_bus = GMBUS_PIN_DPC;
intel_encoder->hpd_pin = HPD_PORT_C;
break;
case PORT_D:
if (WARN_ON(IS_BROXTON(dev_priv)))
intel_hdmi->ddc_bus = GMBUS_PIN_DISABLED;
else if (IS_CHERRYVIEW(dev_priv))
intel_hdmi->ddc_bus = GMBUS_PIN_DPD_CHV;
else
intel_hdmi->ddc_bus = GMBUS_PIN_DPD;
intel_encoder->hpd_pin = HPD_PORT_D;
break;
case PORT_E:
/* On SKL PORT E doesn't have seperate GMBUS pin
* We rely on VBT to set a proper alternate GMBUS pin. */
alternate_ddc_pin =
dev_priv->vbt.ddi_port_info[PORT_E].alternate_ddc_pin;
switch (alternate_ddc_pin) {
case DDC_PIN_B:
intel_hdmi->ddc_bus = GMBUS_PIN_DPB;
break;
case DDC_PIN_C:
intel_hdmi->ddc_bus = GMBUS_PIN_DPC;
break;
case DDC_PIN_D:
intel_hdmi->ddc_bus = GMBUS_PIN_DPD;
break;
default:
MISSING_CASE(alternate_ddc_pin);
}
intel_encoder->hpd_pin = HPD_PORT_E;
break;
case PORT_A:
intel_encoder->hpd_pin = HPD_PORT_A;
/* Internal port only for eDP. */
default:
BUG();
MISSING_CASE(port);
return;
}
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
intel_hdmi->write_infoframe = vlv_write_infoframe;
intel_hdmi->set_infoframes = vlv_set_infoframes;
intel_hdmi->infoframe_enabled = vlv_infoframe_enabled;
} else if (IS_G4X(dev)) {
} else if (IS_G4X(dev_priv)) {
intel_hdmi->write_infoframe = g4x_write_infoframe;
intel_hdmi->set_infoframes = g4x_set_infoframes;
intel_hdmi->infoframe_enabled = g4x_infoframe_enabled;
} else if (HAS_DDI(dev)) {
} else if (HAS_DDI(dev_priv)) {
intel_hdmi->write_infoframe = hsw_write_infoframe;
intel_hdmi->set_infoframes = hsw_set_infoframes;
intel_hdmi->infoframe_enabled = hsw_infoframe_enabled;
} else if (HAS_PCH_IBX(dev)) {
} else if (HAS_PCH_IBX(dev_priv)) {
intel_hdmi->write_infoframe = ibx_write_infoframe;
intel_hdmi->set_infoframes = ibx_set_infoframes;
intel_hdmi->infoframe_enabled = ibx_infoframe_enabled;
@ -1906,7 +1919,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
intel_hdmi->infoframe_enabled = cpt_infoframe_enabled;
}
if (HAS_DDI(dev))
if (HAS_DDI(dev_priv))
intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
else
intel_connector->get_hw_state = intel_connector_get_hw_state;
@ -1920,7 +1933,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
* 0xd. Failure to do so will result in spurious interrupts being
* generated on the port when a cable is not attached.
*/
if (IS_G4X(dev) && !IS_GM45(dev)) {
if (IS_G4X(dev_priv) && !IS_GM45(dev_priv)) {
u32 temp = I915_READ(PEG_BAND_GAP_DATA);
I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
}
@ -1929,6 +1942,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
void intel_hdmi_init(struct drm_device *dev,
i915_reg_t hdmi_reg, enum port port)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_digital_port *intel_dig_port;
struct intel_encoder *intel_encoder;
struct intel_connector *intel_connector;
@ -1949,7 +1963,7 @@ void intel_hdmi_init(struct drm_device *dev,
DRM_MODE_ENCODER_TMDS, "HDMI %c", port_name(port));
intel_encoder->compute_config = intel_hdmi_compute_config;
if (HAS_PCH_SPLIT(dev)) {
if (HAS_PCH_SPLIT(dev_priv)) {
intel_encoder->disable = pch_disable_hdmi;
intel_encoder->post_disable = pch_post_disable_hdmi;
} else {
@ -1957,29 +1971,30 @@ void intel_hdmi_init(struct drm_device *dev,
}
intel_encoder->get_hw_state = intel_hdmi_get_hw_state;
intel_encoder->get_config = intel_hdmi_get_config;
if (IS_CHERRYVIEW(dev)) {
if (IS_CHERRYVIEW(dev_priv)) {
intel_encoder->pre_pll_enable = chv_hdmi_pre_pll_enable;
intel_encoder->pre_enable = chv_hdmi_pre_enable;
intel_encoder->enable = vlv_enable_hdmi;
intel_encoder->post_disable = chv_hdmi_post_disable;
intel_encoder->post_pll_disable = chv_hdmi_post_pll_disable;
} else if (IS_VALLEYVIEW(dev)) {
} else if (IS_VALLEYVIEW(dev_priv)) {
intel_encoder->pre_pll_enable = vlv_hdmi_pre_pll_enable;
intel_encoder->pre_enable = vlv_hdmi_pre_enable;
intel_encoder->enable = vlv_enable_hdmi;
intel_encoder->post_disable = vlv_hdmi_post_disable;
} else {
intel_encoder->pre_enable = intel_hdmi_pre_enable;
if (HAS_PCH_CPT(dev))
if (HAS_PCH_CPT(dev_priv))
intel_encoder->enable = cpt_enable_hdmi;
else if (HAS_PCH_IBX(dev))
else if (HAS_PCH_IBX(dev_priv))
intel_encoder->enable = ibx_enable_hdmi;
else
intel_encoder->enable = g4x_enable_hdmi;
}
intel_encoder->type = INTEL_OUTPUT_HDMI;
if (IS_CHERRYVIEW(dev)) {
intel_encoder->port = port;
if (IS_CHERRYVIEW(dev_priv)) {
if (port == PORT_D)
intel_encoder->crtc_mask = 1 << 2;
else
@ -1993,7 +2008,7 @@ void intel_hdmi_init(struct drm_device *dev,
* to work on real hardware. And since g4x can send infoframes to
* only one port anyway, nothing is lost by allowing it.
*/
if (IS_G4X(dev))
if (IS_G4X(dev_priv))
intel_encoder->cloneable |= 1 << INTEL_OUTPUT_HDMI;
intel_dig_port->port = port;

View File

@ -138,11 +138,10 @@ static void intel_i2c_quirk_set(struct drm_i915_private *dev_priv, bool enable)
static u32 get_reserved(struct intel_gmbus *bus)
{
struct drm_i915_private *dev_priv = bus->dev_priv;
struct drm_device *dev = &dev_priv->drm;
u32 reserved = 0;
/* On most chips, these bits must be preserved in software. */
if (!IS_I830(dev) && !IS_845G(dev))
if (!IS_I830(dev_priv) && !IS_845G(dev_priv))
reserved = I915_READ_NOTRACE(bus->gpio_reg) &
(GPIO_DATA_PULLUP_DISABLE |
GPIO_CLOCK_PULLUP_DISABLE);
@ -468,13 +467,9 @@ do_gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
struct intel_gmbus,
adapter);
struct drm_i915_private *dev_priv = bus->dev_priv;
const unsigned int fw =
intel_uncore_forcewake_for_reg(dev_priv, GMBUS0,
FW_REG_READ | FW_REG_WRITE);
int i = 0, inc, try = 0;
int ret = 0;
intel_uncore_forcewake_get(dev_priv, fw);
retry:
I915_WRITE_FW(GMBUS0, bus->reg0);
@ -576,7 +571,6 @@ timeout:
ret = -EAGAIN;
out:
intel_uncore_forcewake_put(dev_priv, fw);
return ret;
}
@ -633,10 +627,10 @@ int intel_setup_gmbus(struct drm_device *dev)
unsigned int pin;
int ret;
if (HAS_PCH_NOP(dev))
if (HAS_PCH_NOP(dev_priv))
return 0;
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
dev_priv->gpio_mmio_base = VLV_DISPLAY_BASE;
else if (!HAS_GMCH_DISPLAY(dev_priv))
dev_priv->gpio_mmio_base =
@ -674,7 +668,7 @@ int intel_setup_gmbus(struct drm_device *dev)
bus->reg0 = pin | GMBUS_RATE_100KHZ;
/* gmbus seems to be broken on i830 */
if (IS_I830(dev))
if (IS_I830(dev_priv))
bus->force_bit = 1;
intel_gpio_setup(bus, pin);

View File

@ -275,8 +275,7 @@ logical_ring_init_platform_invariants(struct intel_engine_cs *engine)
struct drm_i915_private *dev_priv = engine->i915;
engine->disable_lite_restore_wa =
(IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) ||
IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) &&
IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1) &&
(engine->id == VCS || engine->id == VCS2);
engine->ctx_desc_template = GEN8_CTX_VALID;
@ -853,13 +852,12 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine,
uint32_t l3sqc4_flush = (0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES);
/*
* WaDisableLSQCROPERFforOCL:skl,kbl
* WaDisableLSQCROPERFforOCL:kbl
* This WA is implemented in skl_init_clock_gating() but since
* this batch updates GEN8_L3SQCREG4 with default value we need to
* set this bit here to retain the WA during flush.
*/
if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_E0) ||
IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0))
if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0))
l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS;
wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 |
@ -1002,9 +1000,8 @@ static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine,
struct drm_i915_private *dev_priv = engine->i915;
uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
/* WaDisableCtxRestoreArbitration:skl,bxt */
if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_D0) ||
IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
/* WaDisableCtxRestoreArbitration:bxt */
if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
/* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt */
@ -1075,9 +1072,8 @@ static int gen9_init_perctx_bb(struct intel_engine_cs *engine,
{
uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
/* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_B0) ||
IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1)) {
/* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:bxt */
if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1)) {
wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
wa_ctx_emit_reg(batch, index, GEN9_SLICE_COMMON_ECO_CHICKEN0);
wa_ctx_emit(batch, index,
@ -1104,9 +1100,8 @@ static int gen9_init_perctx_bb(struct intel_engine_cs *engine,
wa_ctx_emit(batch, index, MI_NOOP);
}
/* WaDisableCtxRestoreArbitration:skl,bxt */
if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_D0) ||
IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1))
/* WaDisableCtxRestoreArbitration:bxt */
if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1))
wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE);
wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END);
@ -1250,8 +1245,12 @@ static int gen8_init_common_ring(struct intel_engine_cs *engine)
intel_engine_init_hangcheck(engine);
if (!execlists_elsp_idle(engine))
/* After a GPU reset, we may have requests to replay */
if (!execlists_elsp_idle(engine)) {
engine->execlist_port[0].count = 0;
engine->execlist_port[1].count = 0;
execlists_submit_ports(engine);
}
return 0;
}
@ -1326,10 +1325,7 @@ static void reset_common_ring(struct intel_engine_cs *engine,
memset(&port[1], 0, sizeof(port[1]));
}
/* CS is stopped, and we will resubmit both ports on resume */
GEM_BUG_ON(request->ctx != port[0].request->ctx);
port[0].count = 0;
port[1].count = 0;
/* Reset WaIdleLiteRestore:bdw,skl as well */
request->tail = request->wa_tail - WA_TAIL_DWORDS * sizeof(u32);
@ -1652,9 +1648,6 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv;
if (!intel_engine_initialized(engine))
return;
/*
* Tasklet cannot be active at this point due intel_mark_active/idle
* so this is just for documentation.
@ -1681,13 +1674,16 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
lrc_destroy_wa_ctx_obj(engine);
engine->i915 = NULL;
dev_priv->engine[engine->id] = NULL;
kfree(engine);
}
void intel_execlists_enable_submission(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
for_each_engine(engine, dev_priv)
for_each_engine(engine, dev_priv, id)
engine->submit_request = execlists_submit_request;
}
@ -1945,7 +1941,7 @@ static void execlists_init_reg_state(u32 *reg_state,
RING_START(engine->mmio_base), 0);
ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_CONTROL,
RING_CTL(engine->mmio_base),
((ring->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID);
RING_CTL_SIZE(ring->size) | RING_VALID);
ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_U,
RING_BBADDR_UDW(engine->mmio_base), 0);
ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_L,
@ -2153,30 +2149,43 @@ error_deref_obj:
void intel_lr_context_resume(struct drm_i915_private *dev_priv)
{
struct i915_gem_context *ctx = dev_priv->kernel_context;
struct intel_engine_cs *engine;
struct i915_gem_context *ctx;
enum intel_engine_id id;
for_each_engine(engine, dev_priv) {
struct intel_context *ce = &ctx->engine[engine->id];
void *vaddr;
uint32_t *reg_state;
/* Because we emit WA_TAIL_DWORDS there may be a disparity
* between our bookkeeping in ce->ring->head and ce->ring->tail and
* that stored in context. As we only write new commands from
* ce->ring->tail onwards, everything before that is junk. If the GPU
* starts reading from its RING_HEAD from the context, it may try to
* execute that junk and die.
*
* So to avoid that we reset the context images upon resume. For
* simplicity, we just zero everything out.
*/
list_for_each_entry(ctx, &dev_priv->context_list, link) {
for_each_engine(engine, dev_priv, id) {
struct intel_context *ce = &ctx->engine[engine->id];
u32 *reg;
if (!ce->state)
continue;
if (!ce->state)
continue;
vaddr = i915_gem_object_pin_map(ce->state->obj, I915_MAP_WB);
if (WARN_ON(IS_ERR(vaddr)))
continue;
reg = i915_gem_object_pin_map(ce->state->obj,
I915_MAP_WB);
if (WARN_ON(IS_ERR(reg)))
continue;
reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
reg += LRC_STATE_PN * PAGE_SIZE / sizeof(*reg);
reg[CTX_RING_HEAD+1] = 0;
reg[CTX_RING_TAIL+1] = 0;
reg_state[CTX_RING_HEAD+1] = 0;
reg_state[CTX_RING_TAIL+1] = 0;
ce->state->obj->dirty = true;
i915_gem_object_unpin_map(ce->state->obj);
ce->state->obj->dirty = true;
i915_gem_object_unpin_map(ce->state->obj);
ce->ring->head = 0;
ce->ring->tail = 0;
ce->ring->head = ce->ring->tail = 0;
ce->ring->last_retired_head = -1;
intel_ring_update_space(ce->ring);
}
}
}

View File

@ -0,0 +1,136 @@
/*
* Copyright © 2016 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
*
*/
#include <drm/drm_edid.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_dp_dual_mode_helper.h>
#include "intel_drv.h"
static enum drm_lspcon_mode lspcon_get_current_mode(struct intel_lspcon *lspcon)
{
enum drm_lspcon_mode current_mode = DRM_LSPCON_MODE_INVALID;
struct i2c_adapter *adapter = &lspcon->aux->ddc;
if (drm_lspcon_get_mode(adapter, &current_mode))
DRM_ERROR("Error reading LSPCON mode\n");
else
DRM_DEBUG_KMS("Current LSPCON mode %s\n",
current_mode == DRM_LSPCON_MODE_PCON ? "PCON" : "LS");
return current_mode;
}
static int lspcon_change_mode(struct intel_lspcon *lspcon,
enum drm_lspcon_mode mode, bool force)
{
int err;
enum drm_lspcon_mode current_mode;
struct i2c_adapter *adapter = &lspcon->aux->ddc;
err = drm_lspcon_get_mode(adapter, &current_mode);
if (err) {
DRM_ERROR("Error reading LSPCON mode\n");
return err;
}
if (current_mode == mode) {
DRM_DEBUG_KMS("Current mode = desired LSPCON mode\n");
return 0;
}
err = drm_lspcon_set_mode(adapter, mode);
if (err < 0) {
DRM_ERROR("LSPCON mode change failed\n");
return err;
}
lspcon->mode = mode;
DRM_DEBUG_KMS("LSPCON mode changed done\n");
return 0;
}
static bool lspcon_probe(struct intel_lspcon *lspcon)
{
enum drm_dp_dual_mode_type adaptor_type;
struct i2c_adapter *adapter = &lspcon->aux->ddc;
/* Lets probe the adaptor and check its type */
adaptor_type = drm_dp_dual_mode_detect(adapter);
if (adaptor_type != DRM_DP_DUAL_MODE_LSPCON) {
DRM_DEBUG_KMS("No LSPCON detected, found %s\n",
drm_dp_get_dual_mode_type_name(adaptor_type));
return false;
}
/* Yay ... got a LSPCON device */
DRM_DEBUG_KMS("LSPCON detected\n");
lspcon->mode = lspcon_get_current_mode(lspcon);
lspcon->active = true;
return true;
}
void lspcon_resume(struct intel_lspcon *lspcon)
{
if (lspcon_change_mode(lspcon, DRM_LSPCON_MODE_PCON, true))
DRM_ERROR("LSPCON resume failed\n");
else
DRM_DEBUG_KMS("LSPCON resume success\n");
}
bool lspcon_init(struct intel_digital_port *intel_dig_port)
{
struct intel_dp *dp = &intel_dig_port->dp;
struct intel_lspcon *lspcon = &intel_dig_port->lspcon;
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
if (!IS_GEN9(dev_priv)) {
DRM_ERROR("LSPCON is supported on GEN9 only\n");
return false;
}
lspcon->active = false;
lspcon->mode = DRM_LSPCON_MODE_INVALID;
lspcon->aux = &dp->aux;
if (!lspcon_probe(lspcon)) {
DRM_ERROR("Failed to probe lspcon\n");
return false;
}
/*
* In the SW state machine, lets Put LSPCON in PCON mode only.
* In this way, it will work with both HDMI 1.4 sinks as well as HDMI
* 2.0 sinks.
*/
if (lspcon->active && lspcon->mode != DRM_LSPCON_MODE_PCON) {
if (lspcon_change_mode(lspcon, DRM_LSPCON_MODE_PCON,
true) < 0) {
DRM_ERROR("LSPCON mode change to PCON failed\n");
return false;
}
}
DRM_DEBUG_KMS("Success: LSPCON init\n");
return true;
}

View File

@ -106,7 +106,7 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
if (!(tmp & LVDS_PORT_EN))
goto out;
if (HAS_PCH_CPT(dev))
if (HAS_PCH_CPT(dev_priv))
*pipe = PORT_TO_PIPE_CPT(tmp);
else
*pipe = PORT_TO_PIPE(tmp);
@ -396,7 +396,7 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
struct intel_lvds_encoder *lvds_encoder =
to_lvds_encoder(&intel_encoder->base);
struct intel_connector *intel_connector =
@ -406,7 +406,7 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
unsigned int lvds_bpp;
/* Should never happen!! */
if (INTEL_INFO(dev)->gen < 4 && intel_crtc->pipe == 0) {
if (INTEL_GEN(dev_priv) < 4 && intel_crtc->pipe == 0) {
DRM_ERROR("Can't support LVDS on pipe A\n");
return false;
}
@ -431,7 +431,7 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
adjusted_mode);
if (HAS_PCH_SPLIT(dev)) {
if (HAS_PCH_SPLIT(dev_priv)) {
pipe_config->has_pch_encoder = true;
intel_pch_panel_fitting(intel_crtc, pipe_config,
@ -566,7 +566,7 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
* and as part of the cleanup in the hw state restore we also redisable
* the vga plane.
*/
if (!HAS_PCH_SPLIT(dev))
if (!HAS_PCH_SPLIT(dev_priv))
intel_display_resume(dev);
dev_priv->modeset_restore = MODESET_DONE;
@ -949,16 +949,17 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
return (val & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP;
}
static bool intel_lvds_supported(struct drm_device *dev)
static bool intel_lvds_supported(struct drm_i915_private *dev_priv)
{
/* With the introduction of the PCH we gained a dedicated
* LVDS presence pin, use it. */
if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
return true;
/* Otherwise LVDS was only attached to mobile products,
* except for the inglorious 830gm */
if (INTEL_INFO(dev)->gen <= 4 && IS_MOBILE(dev) && !IS_I830(dev))
if (INTEL_GEN(dev_priv) <= 4 &&
IS_MOBILE(dev_priv) && !IS_I830(dev_priv))
return true;
return false;
@ -990,21 +991,21 @@ void intel_lvds_init(struct drm_device *dev)
int pipe;
u8 pin;
if (!intel_lvds_supported(dev))
if (!intel_lvds_supported(dev_priv))
return;
/* Skip init on machines we know falsely report LVDS */
if (dmi_check_system(intel_no_lvds))
return;
if (HAS_PCH_SPLIT(dev))
if (HAS_PCH_SPLIT(dev_priv))
lvds_reg = PCH_LVDS;
else
lvds_reg = LVDS;
lvds = I915_READ(lvds_reg);
if (HAS_PCH_SPLIT(dev)) {
if (HAS_PCH_SPLIT(dev_priv)) {
if ((lvds & LVDS_DETECTED) == 0)
return;
if (dev_priv->vbt.edp.support) {
@ -1064,12 +1065,13 @@ void intel_lvds_init(struct drm_device *dev)
intel_connector->get_hw_state = intel_connector_get_hw_state;
intel_connector_attach_encoder(intel_connector, intel_encoder);
intel_encoder->type = INTEL_OUTPUT_LVDS;
intel_encoder->type = INTEL_OUTPUT_LVDS;
intel_encoder->port = PORT_NONE;
intel_encoder->cloneable = 0;
if (HAS_PCH_SPLIT(dev))
if (HAS_PCH_SPLIT(dev_priv))
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
else if (IS_GEN4(dev))
else if (IS_GEN4(dev_priv))
intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
else
intel_encoder->crtc_mask = (1 << 1);
@ -1157,7 +1159,7 @@ void intel_lvds_init(struct drm_device *dev)
*/
/* Ironlake: FIXME if still fail, not try pipe mode now */
if (HAS_PCH_SPLIT(dev))
if (HAS_PCH_SPLIT(dev_priv))
goto failed;
pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0;

View File

@ -233,7 +233,7 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
static struct drm_i915_gem_request *alloc_request(struct intel_overlay *overlay)
{
struct drm_i915_private *dev_priv = overlay->i915;
struct intel_engine_cs *engine = &dev_priv->engine[RCS];
struct intel_engine_cs *engine = dev_priv->engine[RCS];
return i915_gem_request_alloc(engine, dev_priv->kernel_context);
}
@ -1470,6 +1470,8 @@ void intel_cleanup_overlay(struct drm_i915_private *dev_priv)
kfree(dev_priv->overlay);
}
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
struct intel_overlay_error_state {
struct overlay_registers regs;
unsigned long base;
@ -1587,3 +1589,5 @@ intel_overlay_print_error_state(struct drm_i915_error_state_buf *m,
P(UVSCALEV);
#undef P
}
#endif

File diff suppressed because it is too large Load Diff

View File

@ -268,7 +268,7 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp)
val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
val |= idle_frames << EDP_PSR_IDLE_FRAME_SHIFT;
if (IS_HASWELL(dev))
if (IS_HASWELL(dev_priv))
val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
if (dev_priv->psr.link_standby)
@ -344,7 +344,7 @@ static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
* ones. Since by Display design transcoder EDP is tied to port A
* we can safely escape based on the port A.
*/
if (HAS_DDI(dev) && dig_port->port != PORT_A) {
if (HAS_DDI(dev_priv) && dig_port->port != PORT_A) {
DRM_DEBUG_KMS("PSR condition failed: Port not supported\n");
return false;
}
@ -354,20 +354,20 @@ static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
return false;
}
if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
!dev_priv->psr.link_standby) {
DRM_ERROR("PSR condition failed: Link off requested but not supported on this platform\n");
return false;
}
if (IS_HASWELL(dev) &&
if (IS_HASWELL(dev_priv) &&
I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config->cpu_transcoder)) &
S3D_ENABLE) {
DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
return false;
}
if (IS_HASWELL(dev) &&
if (IS_HASWELL(dev_priv) &&
adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
return false;
@ -402,7 +402,7 @@ static void intel_psr_activate(struct intel_dp *intel_dp)
lockdep_assert_held(&dev_priv->psr.lock);
/* Enable/Re-enable PSR on the host */
if (HAS_DDI(dev))
if (HAS_DDI(dev_priv))
/* On HSW+ after we enable PSR on source it will activate it
* as soon as it match configure idle_frame count. So
* we just actually enable it here on activation time.
@ -448,7 +448,7 @@ void intel_psr_enable(struct intel_dp *intel_dp)
dev_priv->psr.busy_frontbuffer_bits = 0;
if (HAS_DDI(dev)) {
if (HAS_DDI(dev_priv)) {
hsw_psr_setup_vsc(intel_dp);
if (dev_priv->psr.psr2_support) {
@ -580,7 +580,7 @@ void intel_psr_disable(struct intel_dp *intel_dp)
}
/* Disable PSR on Source */
if (HAS_DDI(dev))
if (HAS_DDI(dev_priv))
hsw_psr_disable(intel_dp);
else
vlv_psr_disable(intel_dp);
@ -827,17 +827,17 @@ void intel_psr_init(struct drm_device *dev)
/* Per platform default */
if (i915.enable_psr == -1) {
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
i915.enable_psr = 1;
else
i915.enable_psr = 0;
}
/* Set link_standby x link_off defaults */
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
/* HSW and BDW require workarounds that we don't implement. */
dev_priv->psr.link_standby = false;
else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
/* On VLV and CHV only standby mode is supported. */
dev_priv->psr.link_standby = true;
else

View File

@ -405,22 +405,6 @@ gen8_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
return gen8_emit_pipe_control(req, flags, scratch_addr);
}
u64 intel_engine_get_active_head(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
u64 acthd;
if (INTEL_GEN(dev_priv) >= 8)
acthd = I915_READ64_2x32(RING_ACTHD(engine->mmio_base),
RING_ACTHD_UDW(engine->mmio_base));
else if (INTEL_GEN(dev_priv) >= 4)
acthd = I915_READ(RING_ACTHD(engine->mmio_base));
else
acthd = I915_READ(ACTHD);
return acthd;
}
static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
@ -585,9 +569,7 @@ static int init_ring_common(struct intel_engine_cs *engine)
I915_WRITE_TAIL(engine, ring->tail);
(void)I915_READ_TAIL(engine);
I915_WRITE_CTL(engine,
((ring->size - PAGE_SIZE) & RING_NR_PAGES)
| RING_VALID);
I915_WRITE_CTL(engine, RING_CTL_SIZE(ring->size) | RING_VALID);
/* If the head is still not zero, the ring is dead */
if (intel_wait_for_register_fw(dev_priv, RING_CTL(engine->mmio_base),
@ -851,15 +833,13 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
/* WaDisableDgMirrorFixInHalfSliceChicken5:skl,bxt */
if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) ||
IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
/* WaDisableDgMirrorFixInHalfSliceChicken5:bxt */
if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
GEN9_DG_MIRROR_FIX_ENABLE);
/* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) ||
IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
/* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:bxt */
if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1,
GEN9_RHWO_OPTIMIZATION_DISABLE);
/*
@ -869,10 +849,8 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
*/
}
/* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl */
/* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl */
WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
GEN9_ENABLE_YV12_BUGFIX |
GEN9_ENABLE_GPGPU_PREEMPTION);
/* Wa4x4STCOptimizationDisable:skl,bxt,kbl */
@ -884,9 +862,8 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
GEN9_CCS_TLB_PREFETCH_ENABLE);
/* WaDisableMaskBasedCammingInRCC:skl,bxt */
if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, SKL_REVID_C0) ||
IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
/* WaDisableMaskBasedCammingInRCC:bxt */
if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0,
PIXEL_MASK_CAMMING_DISABLE);
@ -1003,47 +980,12 @@ static int skl_init_workarounds(struct intel_engine_cs *engine)
* until D0 which is the default case so this is equivalent to
* !WaDisablePerCtxtPreemptionGranularityControl:skl
*/
if (IS_SKL_REVID(dev_priv, SKL_REVID_E0, REVID_FOREVER)) {
I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
_MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
}
if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_E0)) {
/* WaDisableChickenBitTSGBarrierAckForFFSliceCS:skl */
I915_WRITE(FF_SLICE_CS_CHICKEN2,
_MASKED_BIT_ENABLE(GEN9_TSG_BARRIER_ACK_DISABLE));
}
/* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes
* involving this register should also be added to WA batch as required.
*/
if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_E0))
/* WaDisableLSQCROPERFforOCL:skl */
I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
GEN8_LQSC_RO_PERF_DIS);
I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
_MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
/* WaEnableGapsTsvCreditFix:skl */
if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, REVID_FOREVER)) {
I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
GEN9_GAPS_TSV_CREDIT_DISABLE));
}
/* WaDisablePowerCompilerClockGating:skl */
if (IS_SKL_REVID(dev_priv, SKL_REVID_B0, SKL_REVID_B0))
WA_SET_BIT_MASKED(HIZ_CHICKEN,
BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
/* WaBarrierPerformanceFixDisable:skl */
if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, SKL_REVID_D0))
WA_SET_BIT_MASKED(HDC_CHICKEN0,
HDC_FENCE_DEST_SLM_DISABLE |
HDC_BARRIER_PERFORMANCE_DISABLE);
/* WaDisableSbeCacheDispatchPortSharing:skl */
if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0))
WA_SET_BIT_MASKED(
GEN7_HALF_SLICE_CHICKEN1,
GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
GEN9_GAPS_TSV_CREDIT_DISABLE));
/* WaDisableGafsUnitClkGating:skl */
WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
@ -1284,7 +1226,7 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *req)
if (ret)
return ret;
for_each_engine_id(waiter, dev_priv, id) {
for_each_engine(waiter, dev_priv, id) {
u64 gtt_offset = req->engine->semaphore.signal_ggtt[id];
if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
continue;
@ -1321,7 +1263,7 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *req)
if (ret)
return ret;
for_each_engine_id(waiter, dev_priv, id) {
for_each_engine(waiter, dev_priv, id) {
u64 gtt_offset = req->engine->semaphore.signal_ggtt[id];
if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
continue;
@ -1348,6 +1290,7 @@ static int gen6_signal(struct drm_i915_gem_request *req)
struct intel_ring *ring = req->ring;
struct drm_i915_private *dev_priv = req->i915;
struct intel_engine_cs *engine;
enum intel_engine_id id;
int ret, num_rings;
num_rings = INTEL_INFO(dev_priv)->num_rings;
@ -1355,7 +1298,7 @@ static int gen6_signal(struct drm_i915_gem_request *req)
if (ret)
return ret;
for_each_engine(engine, dev_priv) {
for_each_engine(engine, dev_priv, id) {
i915_reg_t mbox_reg;
if (!(BIT(engine->hw_id) & GEN6_SEMAPHORES_MASK))
@ -1989,6 +1932,7 @@ intel_engine_create_ring(struct intel_engine_cs *engine, int size)
struct i915_vma *vma;
GEM_BUG_ON(!is_power_of_2(size));
GEM_BUG_ON(RING_CTL_SIZE(size) & ~RING_NR_PAGES);
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
if (!ring)
@ -2146,9 +2090,6 @@ void intel_engine_cleanup(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv;
if (!intel_engine_initialized(engine))
return;
dev_priv = engine->i915;
if (engine->buffer) {
@ -2175,13 +2116,16 @@ void intel_engine_cleanup(struct intel_engine_cs *engine)
intel_ring_context_unpin(dev_priv->kernel_context, engine);
engine->i915 = NULL;
dev_priv->engine[engine->id] = NULL;
kfree(engine);
}
void intel_legacy_submission_resume(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
for_each_engine(engine, dev_priv) {
for_each_engine(engine, dev_priv, id) {
engine->buffer->head = engine->buffer->tail;
engine->buffer->last_retired_head = -1;
}

View File

@ -73,13 +73,40 @@ enum intel_engine_hangcheck_action {
#define HANGCHECK_SCORE_RING_HUNG 31
#define I915_MAX_SLICES 3
#define I915_MAX_SUBSLICES 3
#define instdone_slice_mask(dev_priv__) \
(INTEL_GEN(dev_priv__) == 7 ? \
1 : INTEL_INFO(dev_priv__)->sseu.slice_mask)
#define instdone_subslice_mask(dev_priv__) \
(INTEL_GEN(dev_priv__) == 7 ? \
1 : INTEL_INFO(dev_priv__)->sseu.subslice_mask)
#define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \
for ((slice__) = 0, (subslice__) = 0; \
(slice__) < I915_MAX_SLICES; \
(subslice__) = ((subslice__) + 1) < I915_MAX_SUBSLICES ? (subslice__) + 1 : 0, \
(slice__) += ((subslice__) == 0)) \
for_each_if((BIT(slice__) & instdone_slice_mask(dev_priv__)) && \
(BIT(subslice__) & instdone_subslice_mask(dev_priv__)))
struct intel_instdone {
u32 instdone;
/* The following exist only in the RCS engine */
u32 slice_common;
u32 sampler[I915_MAX_SLICES][I915_MAX_SUBSLICES];
u32 row[I915_MAX_SLICES][I915_MAX_SUBSLICES];
};
struct intel_engine_hangcheck {
u64 acthd;
u32 seqno;
int score;
enum intel_engine_hangcheck_action action;
int deadlock;
u32 instdone[I915_NUM_INSTDONE_REG];
struct intel_instdone instdone;
};
struct intel_ring {
@ -368,12 +395,6 @@ struct intel_engine_cs {
u32 (*get_cmd_length_mask)(u32 cmd_header);
};
static inline bool
intel_engine_initialized(const struct intel_engine_cs *engine)
{
return engine->i915 != NULL;
}
static inline unsigned
intel_engine_flag(const struct intel_engine_cs *engine)
{
@ -394,7 +415,7 @@ intel_engine_sync_index(struct intel_engine_cs *engine,
* vcs2 -> 0 = rcs, 1 = vcs, 2 = bcs, 3 = vecs;
*/
idx = (other - engine) - 1;
idx = (other->id - engine->id) - 1;
if (idx < 0)
idx += I915_NUM_ENGINES;
@ -514,6 +535,8 @@ int intel_init_blt_ring_buffer(struct intel_engine_cs *engine);
int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine);
u64 intel_engine_get_active_head(struct intel_engine_cs *engine);
u64 intel_engine_get_last_batch_head(struct intel_engine_cs *engine);
static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
{
return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
@ -521,6 +544,9 @@ static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
int init_workarounds_ring(struct intel_engine_cs *engine);
void intel_engine_get_instdone(struct intel_engine_cs *engine,
struct intel_instdone *instdone);
/*
* Arbitrary size for largest possible 'add request' sequence. The code paths
* are complex and variable. Empirical measurement shows that the worst case

View File

@ -288,7 +288,6 @@ void intel_display_set_init_power(struct drm_i915_private *dev_priv,
static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
{
struct pci_dev *pdev = dev_priv->drm.pdev;
struct drm_device *dev = &dev_priv->drm;
/*
* After we re-enable the power well, if we touch VGA register 0x3d5
@ -304,7 +303,7 @@ static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
vga_put(pdev, VGA_RSRC_LEGACY_IO);
if (IS_BROADWELL(dev))
if (IS_BROADWELL(dev_priv))
gen8_irq_power_well_post_enable(dev_priv,
1 << PIPE_C | 1 << PIPE_B);
}
@ -2590,20 +2589,19 @@ static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
*/
void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
{
struct drm_device *dev = &dev_priv->drm;
struct i915_power_domains *power_domains = &dev_priv->power_domains;
power_domains->initializing = true;
if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
skl_display_core_init(dev_priv, resume);
} else if (IS_BROXTON(dev)) {
} else if (IS_BROXTON(dev_priv)) {
bxt_display_core_init(dev_priv, resume);
} else if (IS_CHERRYVIEW(dev)) {
} else if (IS_CHERRYVIEW(dev_priv)) {
mutex_lock(&power_domains->lock);
chv_phy_control_init(dev_priv);
mutex_unlock(&power_domains->lock);
} else if (IS_VALLEYVIEW(dev)) {
} else if (IS_VALLEYVIEW(dev_priv)) {
mutex_lock(&power_domains->lock);
vlv_cmnlane_wa(dev_priv);
mutex_unlock(&power_domains->lock);
@ -2758,7 +2756,6 @@ void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
{
struct pci_dev *pdev = dev_priv->drm.pdev;
struct drm_device *dev = &dev_priv->drm;
struct device *kdev = &pdev->dev;
pm_runtime_set_autosuspend_delay(kdev, 10000); /* 10s */
@ -2770,7 +2767,7 @@ void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
* so the driver's own RPM reference tracking asserts also work on
* platforms without RPM support.
*/
if (!HAS_RUNTIME_PM(dev)) {
if (!HAS_RUNTIME_PM(dev_priv)) {
pm_runtime_dont_use_autosuspend(kdev);
pm_runtime_get_sync(kdev);
} else {

View File

@ -251,7 +251,7 @@ static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
* HW workaround, need to write this twice for issue
* that may result in first write getting masked.
*/
if (HAS_PCH_IBX(dev)) {
if (HAS_PCH_IBX(dev_priv)) {
I915_WRITE(intel_sdvo->sdvo_reg, val);
POSTING_READ(intel_sdvo->sdvo_reg);
}
@ -307,7 +307,7 @@ static bool intel_sdvo_read_byte(struct intel_sdvo *intel_sdvo, u8 addr, u8 *ch)
static const struct _sdvo_cmd_name {
u8 cmd;
const char *name;
} sdvo_cmd_names[] = {
} __attribute__ ((packed)) sdvo_cmd_names[] = {
SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FIRMWARE_REV),
@ -1133,7 +1133,7 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
DRM_DEBUG_KMS("forcing bpc to 8 for SDVO\n");
pipe_config->pipe_bpp = 8*3;
if (HAS_PCH_SPLIT(encoder->base.dev))
if (HAS_PCH_SPLIT(to_i915(encoder->base.dev)))
pipe_config->has_pch_encoder = true;
/* We need to construct preferred input timings based on our
@ -1273,7 +1273,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
/* The real mode polarity is set by the SDVO commands, using
* struct intel_sdvo_dtd. */
sdvox = SDVO_VSYNC_ACTIVE_HIGH | SDVO_HSYNC_ACTIVE_HIGH;
if (!HAS_PCH_SPLIT(dev) && crtc_state->limited_color_range)
if (!HAS_PCH_SPLIT(dev_priv) && crtc_state->limited_color_range)
sdvox |= HDMI_COLOR_RANGE_16_235;
if (INTEL_INFO(dev)->gen < 5)
sdvox |= SDVO_BORDER_ENABLE;
@ -1286,7 +1286,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
sdvox |= (9 << 19) | SDVO_BORDER_ENABLE;
}
if (INTEL_PCH_TYPE(dev) >= PCH_CPT)
if (INTEL_PCH_TYPE(dev_priv) >= PCH_CPT)
sdvox |= SDVO_PIPE_SEL_CPT(crtc->pipe);
else
sdvox |= SDVO_PIPE_SEL(crtc->pipe);
@ -1296,7 +1296,8 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
if (INTEL_INFO(dev)->gen >= 4) {
/* done in crtc_mode_set as the dpll_md reg must be written early */
} else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
} else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
IS_G33(dev_priv)) {
/* done in crtc_mode_set as it lives inside the dpll register */
} else {
sdvox |= (crtc_state->pixel_multiplier - 1)
@ -1339,7 +1340,7 @@ static bool intel_sdvo_get_hw_state(struct intel_encoder *encoder,
if (!(tmp & SDVO_ENABLE) && (active_outputs == 0))
return false;
if (HAS_PCH_CPT(dev))
if (HAS_PCH_CPT(dev_priv))
*pipe = PORT_TO_PIPE_CPT(tmp);
else
*pipe = PORT_TO_PIPE(tmp);
@ -1389,7 +1390,7 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
* encoder->get_config we so already have a valid pixel multplier on all
* other platfroms.
*/
if (IS_I915G(dev) || IS_I915GM(dev)) {
if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
pipe_config->pixel_multiplier =
((sdvox & SDVO_PORT_MULTIPLY_MASK)
>> SDVO_PORT_MULTIPLY_SHIFT) + 1;
@ -1595,15 +1596,15 @@ static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct in
static uint16_t intel_sdvo_get_hotplug_support(struct intel_sdvo *intel_sdvo)
{
struct drm_device *dev = intel_sdvo->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(intel_sdvo->base.base.dev);
uint16_t hotplug;
if (!I915_HAS_HOTPLUG(dev))
if (!I915_HAS_HOTPLUG(dev_priv))
return 0;
/* HW Erratum: SDVO Hotplug is broken on all i945G chips, there's noise
* on the line. */
if (IS_I945G(dev) || IS_I945GM(dev))
if (IS_I945G(dev_priv) || IS_I945GM(dev_priv))
return 0;
if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT,
@ -2981,6 +2982,7 @@ bool intel_sdvo_init(struct drm_device *dev,
/* encoder type will be decided later */
intel_encoder = &intel_sdvo->base;
intel_encoder->type = INTEL_OUTPUT_SDVO;
intel_encoder->port = port;
drm_encoder_init(dev, &intel_encoder->base, &intel_sdvo_enc_funcs, 0,
"SDVO %c", port_name(port));
@ -2996,7 +2998,7 @@ bool intel_sdvo_init(struct drm_device *dev,
}
intel_encoder->compute_config = intel_sdvo_compute_config;
if (HAS_PCH_SPLIT(dev)) {
if (HAS_PCH_SPLIT(dev_priv)) {
intel_encoder->disable = pch_disable_sdvo;
intel_encoder->post_disable = pch_post_disable_sdvo;
} else {

Some files were not shown because too many files have changed in this diff Show More