drm vmwgfx, mediatek, nouveau, amdgpu, rockchip, intel fixes
-----BEGIN PGP SIGNATURE----- iQIcBAABAgAGBQJcEwZcAAoJEAx081l5xIa+7osP/Rc6vVnKHyywi40DAdWK/e+v Wg0BsDa99ce3Bkx29iDq8khfiMkjJt4FeO0fj5+kLDPnk3m/N39d3dzgrm/tNnbY oYrmF31AfrxSP1MaUFvBPbGSJSNJL5DGlV1C+NKHJdzwSRcr+HNrQh+YUom0bCOn Kjc2kN1XUYZ68v9J7cGyHvg5O5BUrnEsnFxEUDg8BWS26u22scNbnA+1pw7J/g4Q cltdcMBbF/hfLulk/MBJrkFsL7fyjYycWAPr883m7MJZpgXsp6b1gRcWKY8WOxSa cq+m6q0icDMDgm5UXPv197vrdzROr1A2BQCtpI8EJglmPU6h4aULKr54Y5Ol6dbD /cbWqtUeWOG/uwzDyrzYLpZY76QoMZDxlqbAMVRnLFEiIWRy1PnPiXmcEwD5jumh eC2W7xZO643YA75ZstfObodjDlni5ttbW7JJtGHNGKlqSpVUoEFhmQLHMu8Ke0RJ 0ftARpSHTqsyMkZN5GdXcB/cmBKkYSjDl9UGnUhmR/HVGKpUbixKKxqym73ovKbC gejmTb0v8NXdGdvVO5T+lmfEsMKsP5j8drJOBWquFtNmJLhGG3sxFlO54ONKAzry RZtyX7g9iVAULfxV0YpbaMgsEoeVz8W/cnu9Va517KaRuUmHHg1mi/GWhgwUr49F v9DZxkSiMGYo4x2SWYmJ =DWGG -----END PGP SIGNATURE----- Merge tag 'drm-fixes-2018-12-14' of git://anongit.freedesktop.org/drm/drm Pull drm fixes from Dave Airlie: "While I hoped things would calm down, the world hasn't joined with me, but it's a few things scattered over a wide area. The i915 workarounds regression fix is probably the largest, the rest are more usual sized. We also get some new AMD PCI IDs. There is also a patch in here to MAINTAINERS to added Daniel as an official DRM toplevel co-maintainer, he's decided he wants to step up and share the glory, and he'll likely process next weeks fixes while I'm away on holidays. Summary: amdgpu: - some new PCI IDs - fixed firmware image updates - power management fixes - locking warning fix nouveau: - framebuffer flushing fix - memory leak fix - tegra device init regression fix vmwgfx: - OOM kernel memory fix - excess return in function fix i915: - the biggest fix is a regression fix where workarounds weren't getting reapplied after a gpu hang causing further crashing, this fixes the workaround application to make it happen again - GPU hang fixes for Braswell and some GEN3 GPUs - GVT fix for broadwell tiling rockchip: - revert to fix a regression causing a WARN on shutdown mediatek: - avoid crash attaching to non-existant bridges" * tag 'drm-fixes-2018-12-14' of git://anongit.freedesktop.org/drm/drm: (23 commits) drm/vmwgfx: Protect from excessive execbuf kernel memory allocations v3 MAINTAINERS: Daniel for drm co-maintainer drm/amdgpu: drop fclk/gfxclk ratio setting drm/vmwgfx: remove redundant return ret statement drm/i915: Flush GPU relocs harder for gen3 drm/i915: Allocate a common scratch page drm/i915/execlists: Apply a full mb before execution for Braswell drm/nouveau/kms: Fix memory leak in nv50_mstm_del() drm/nouveau/kms/nv50-: also flush fb writes when rewinding push buffer drm/amdgpu: Fix DEBUG_LOCKS_WARN_ON(depth <= 0) in amdgpu_ctx.lock Revert "drm/rockchip: Allow driver to be shutdown on reboot/kexec" drm/nouveau/drm/nouveau: tegra: Call nouveau_drm_device_init() drm/amdgpu/powerplay: Apply avfs cks-off voltages on VI drm/amdgpu: update SMC firmware image for polaris10 variants drm/amdkfd: add new vega20 pci id drm/amdkfd: add new vega10 pci ids drm/amdgpu: add some additional vega20 pci ids drm/amdgpu: add some additional vega10 pci ids drm/amdgpu: update smu firmware images for VI variants (v2) drm/i915: Introduce per-engine workarounds ...
This commit is contained in:
commit
92de1de51e
|
@ -4847,6 +4847,7 @@ F: include/uapi/drm/vmwgfx_drm.h
|
|||
|
||||
DRM DRIVERS
|
||||
M: David Airlie <airlied@linux.ie>
|
||||
M: Daniel Vetter <daniel@ffwll.ch>
|
||||
L: dri-devel@lists.freedesktop.org
|
||||
T: git git://anongit.freedesktop.org/drm/drm
|
||||
B: https://bugs.freedesktop.org/
|
||||
|
|
|
@ -330,7 +330,9 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
|
|||
case CHIP_TOPAZ:
|
||||
if (((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x81)) ||
|
||||
((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x83)) ||
|
||||
((adev->pdev->device == 0x6907) && (adev->pdev->revision == 0x87))) {
|
||||
((adev->pdev->device == 0x6907) && (adev->pdev->revision == 0x87)) ||
|
||||
((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0xD1)) ||
|
||||
((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0xD3))) {
|
||||
info->is_kicker = true;
|
||||
strcpy(fw_name, "amdgpu/topaz_k_smc.bin");
|
||||
} else
|
||||
|
@ -351,7 +353,6 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
|
|||
if (type == CGS_UCODE_ID_SMU) {
|
||||
if (((adev->pdev->device == 0x67ef) &&
|
||||
((adev->pdev->revision == 0xe0) ||
|
||||
(adev->pdev->revision == 0xe2) ||
|
||||
(adev->pdev->revision == 0xe5))) ||
|
||||
((adev->pdev->device == 0x67ff) &&
|
||||
((adev->pdev->revision == 0xcf) ||
|
||||
|
@ -359,8 +360,13 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
|
|||
(adev->pdev->revision == 0xff)))) {
|
||||
info->is_kicker = true;
|
||||
strcpy(fw_name, "amdgpu/polaris11_k_smc.bin");
|
||||
} else
|
||||
} else if ((adev->pdev->device == 0x67ef) &&
|
||||
(adev->pdev->revision == 0xe2)) {
|
||||
info->is_kicker = true;
|
||||
strcpy(fw_name, "amdgpu/polaris11_k2_smc.bin");
|
||||
} else {
|
||||
strcpy(fw_name, "amdgpu/polaris11_smc.bin");
|
||||
}
|
||||
} else if (type == CGS_UCODE_ID_SMU_SK) {
|
||||
strcpy(fw_name, "amdgpu/polaris11_smc_sk.bin");
|
||||
}
|
||||
|
@ -375,17 +381,35 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
|
|||
(adev->pdev->revision == 0xe7) ||
|
||||
(adev->pdev->revision == 0xef))) ||
|
||||
((adev->pdev->device == 0x6fdf) &&
|
||||
(adev->pdev->revision == 0xef))) {
|
||||
((adev->pdev->revision == 0xef) ||
|
||||
(adev->pdev->revision == 0xff)))) {
|
||||
info->is_kicker = true;
|
||||
strcpy(fw_name, "amdgpu/polaris10_k_smc.bin");
|
||||
} else
|
||||
} else if ((adev->pdev->device == 0x67df) &&
|
||||
((adev->pdev->revision == 0xe1) ||
|
||||
(adev->pdev->revision == 0xf7))) {
|
||||
info->is_kicker = true;
|
||||
strcpy(fw_name, "amdgpu/polaris10_k2_smc.bin");
|
||||
} else {
|
||||
strcpy(fw_name, "amdgpu/polaris10_smc.bin");
|
||||
}
|
||||
} else if (type == CGS_UCODE_ID_SMU_SK) {
|
||||
strcpy(fw_name, "amdgpu/polaris10_smc_sk.bin");
|
||||
}
|
||||
break;
|
||||
case CHIP_POLARIS12:
|
||||
strcpy(fw_name, "amdgpu/polaris12_smc.bin");
|
||||
if (((adev->pdev->device == 0x6987) &&
|
||||
((adev->pdev->revision == 0xc0) ||
|
||||
(adev->pdev->revision == 0xc3))) ||
|
||||
((adev->pdev->device == 0x6981) &&
|
||||
((adev->pdev->revision == 0x00) ||
|
||||
(adev->pdev->revision == 0x01) ||
|
||||
(adev->pdev->revision == 0x10)))) {
|
||||
info->is_kicker = true;
|
||||
strcpy(fw_name, "amdgpu/polaris12_k_smc.bin");
|
||||
} else {
|
||||
strcpy(fw_name, "amdgpu/polaris12_smc.bin");
|
||||
}
|
||||
break;
|
||||
case CHIP_VEGAM:
|
||||
strcpy(fw_name, "amdgpu/vegam_smc.bin");
|
||||
|
|
|
@ -124,14 +124,14 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs
|
|||
goto free_chunk;
|
||||
}
|
||||
|
||||
mutex_lock(&p->ctx->lock);
|
||||
|
||||
/* skip guilty context job */
|
||||
if (atomic_read(&p->ctx->guilty) == 1) {
|
||||
ret = -ECANCELED;
|
||||
goto free_chunk;
|
||||
}
|
||||
|
||||
mutex_lock(&p->ctx->lock);
|
||||
|
||||
/* get chunks */
|
||||
chunk_array_user = u64_to_user_ptr(cs->in.chunks);
|
||||
if (copy_from_user(chunk_array, chunk_array_user,
|
||||
|
|
|
@ -872,7 +872,13 @@ static const struct pci_device_id pciidlist[] = {
|
|||
{0x1002, 0x6864, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
|
||||
{0x1002, 0x6867, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
|
||||
{0x1002, 0x6868, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
|
||||
{0x1002, 0x6869, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
|
||||
{0x1002, 0x686a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
|
||||
{0x1002, 0x686b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
|
||||
{0x1002, 0x686c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
|
||||
{0x1002, 0x686d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
|
||||
{0x1002, 0x686e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
|
||||
{0x1002, 0x686f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
|
||||
{0x1002, 0x687f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
|
||||
/* Vega 12 */
|
||||
{0x1002, 0x69A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA12},
|
||||
|
@ -885,6 +891,7 @@ static const struct pci_device_id pciidlist[] = {
|
|||
{0x1002, 0x66A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
|
||||
{0x1002, 0x66A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
|
||||
{0x1002, 0x66A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
|
||||
{0x1002, 0x66A4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
|
||||
{0x1002, 0x66A7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
|
||||
{0x1002, 0x66AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
|
||||
/* Raven */
|
||||
|
|
|
@ -337,12 +337,19 @@ static const struct kfd_deviceid supported_devices[] = {
|
|||
{ 0x6864, &vega10_device_info }, /* Vega10 */
|
||||
{ 0x6867, &vega10_device_info }, /* Vega10 */
|
||||
{ 0x6868, &vega10_device_info }, /* Vega10 */
|
||||
{ 0x6869, &vega10_device_info }, /* Vega10 */
|
||||
{ 0x686A, &vega10_device_info }, /* Vega10 */
|
||||
{ 0x686B, &vega10_device_info }, /* Vega10 */
|
||||
{ 0x686C, &vega10_vf_device_info }, /* Vega10 vf*/
|
||||
{ 0x686D, &vega10_device_info }, /* Vega10 */
|
||||
{ 0x686E, &vega10_device_info }, /* Vega10 */
|
||||
{ 0x686F, &vega10_device_info }, /* Vega10 */
|
||||
{ 0x687F, &vega10_device_info }, /* Vega10 */
|
||||
{ 0x66a0, &vega20_device_info }, /* Vega20 */
|
||||
{ 0x66a1, &vega20_device_info }, /* Vega20 */
|
||||
{ 0x66a2, &vega20_device_info }, /* Vega20 */
|
||||
{ 0x66a3, &vega20_device_info }, /* Vega20 */
|
||||
{ 0x66a4, &vega20_device_info }, /* Vega20 */
|
||||
{ 0x66a7, &vega20_device_info }, /* Vega20 */
|
||||
{ 0x66af, &vega20_device_info } /* Vega20 */
|
||||
};
|
||||
|
|
|
@ -130,7 +130,7 @@ static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr)
|
|||
data->registry_data.disable_auto_wattman = 1;
|
||||
data->registry_data.auto_wattman_debug = 0;
|
||||
data->registry_data.auto_wattman_sample_period = 100;
|
||||
data->registry_data.fclk_gfxclk_ratio = 0x3F6CCCCD;
|
||||
data->registry_data.fclk_gfxclk_ratio = 0;
|
||||
data->registry_data.auto_wattman_threshold = 50;
|
||||
data->registry_data.gfxoff_controlled_by_driver = 1;
|
||||
data->gfxoff_allowed = false;
|
||||
|
|
|
@ -386,6 +386,8 @@ typedef uint16_t PPSMC_Result;
|
|||
#define PPSMC_MSG_AgmResetPsm ((uint16_t) 0x403)
|
||||
#define PPSMC_MSG_ReadVftCell ((uint16_t) 0x404)
|
||||
|
||||
#define PPSMC_MSG_ApplyAvfsCksOffVoltage ((uint16_t) 0x415)
|
||||
|
||||
#define PPSMC_MSG_GFX_CU_PG_ENABLE ((uint16_t) 0x280)
|
||||
#define PPSMC_MSG_GFX_CU_PG_DISABLE ((uint16_t) 0x281)
|
||||
#define PPSMC_MSG_GetCurrPkgPwr ((uint16_t) 0x282)
|
||||
|
|
|
@ -1985,6 +1985,12 @@ int polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr)
|
|||
|
||||
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs);
|
||||
|
||||
/* Apply avfs cks-off voltages to avoid the overshoot
|
||||
* when switching to the highest sclk frequency
|
||||
*/
|
||||
if (data->apply_avfs_cks_off_voltage)
|
||||
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ApplyAvfsCksOffVoltage);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -37,10 +37,13 @@ MODULE_FIRMWARE("amdgpu/fiji_smc.bin");
|
|||
MODULE_FIRMWARE("amdgpu/polaris10_smc.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris10_k_smc.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris10_k2_smc.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris11_smc.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris11_smc_sk.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris11_k_smc.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris11_k2_smc.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris12_smc.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris12_k_smc.bin");
|
||||
MODULE_FIRMWARE("amdgpu/vegam_smc.bin");
|
||||
MODULE_FIRMWARE("amdgpu/vega10_smc.bin");
|
||||
MODULE_FIRMWARE("amdgpu/vega10_acg_smc.bin");
|
||||
|
|
|
@ -235,7 +235,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
|
|||
plane->bpp = skl_pixel_formats[fmt].bpp;
|
||||
plane->drm_format = skl_pixel_formats[fmt].drm_format;
|
||||
} else {
|
||||
plane->tiled = !!(val & DISPPLANE_TILED);
|
||||
plane->tiled = val & DISPPLANE_TILED;
|
||||
fmt = bdw_format_to_drm(val & DISPPLANE_PIXFORMAT_MASK);
|
||||
plane->bpp = bdw_pixel_formats[fmt].bpp;
|
||||
plane->drm_format = bdw_pixel_formats[fmt].drm_format;
|
||||
|
|
|
@ -1444,6 +1444,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
|
|||
|
||||
intel_uncore_sanitize(dev_priv);
|
||||
|
||||
intel_gt_init_workarounds(dev_priv);
|
||||
i915_gem_load_init_fences(dev_priv);
|
||||
|
||||
/* On the 945G/GM, the chipset reports the MSI capability on the
|
||||
|
|
|
@ -67,6 +67,7 @@
|
|||
#include "intel_ringbuffer.h"
|
||||
#include "intel_uncore.h"
|
||||
#include "intel_wopcm.h"
|
||||
#include "intel_workarounds.h"
|
||||
#include "intel_uc.h"
|
||||
|
||||
#include "i915_gem.h"
|
||||
|
@ -1805,6 +1806,7 @@ struct drm_i915_private {
|
|||
int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
|
||||
|
||||
struct i915_workarounds workarounds;
|
||||
struct i915_wa_list gt_wa_list;
|
||||
|
||||
struct i915_frontbuffer_tracking fb_tracking;
|
||||
|
||||
|
@ -2148,6 +2150,8 @@ struct drm_i915_private {
|
|||
struct delayed_work idle_work;
|
||||
|
||||
ktime_t last_init_time;
|
||||
|
||||
struct i915_vma *scratch;
|
||||
} gt;
|
||||
|
||||
/* perform PHY state sanity checks? */
|
||||
|
@ -3870,4 +3874,9 @@ static inline int intel_hws_csb_write_index(struct drm_i915_private *i915)
|
|||
return I915_HWS_CSB_WRITE_INDEX;
|
||||
}
|
||||
|
||||
static inline u32 i915_scratch_offset(const struct drm_i915_private *i915)
|
||||
{
|
||||
return i915_ggtt_offset(i915->gt.scratch);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -5305,7 +5305,7 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv)
|
|||
}
|
||||
}
|
||||
|
||||
intel_gt_workarounds_apply(dev_priv);
|
||||
intel_gt_apply_workarounds(dev_priv);
|
||||
|
||||
i915_gem_init_swizzling(dev_priv);
|
||||
|
||||
|
@ -5500,6 +5500,44 @@ err_active:
|
|||
goto out_ctx;
|
||||
}
|
||||
|
||||
static int
|
||||
i915_gem_init_scratch(struct drm_i915_private *i915, unsigned int size)
|
||||
{
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct i915_vma *vma;
|
||||
int ret;
|
||||
|
||||
obj = i915_gem_object_create_stolen(i915, size);
|
||||
if (!obj)
|
||||
obj = i915_gem_object_create_internal(i915, size);
|
||||
if (IS_ERR(obj)) {
|
||||
DRM_ERROR("Failed to allocate scratch page\n");
|
||||
return PTR_ERR(obj);
|
||||
}
|
||||
|
||||
vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
|
||||
if (IS_ERR(vma)) {
|
||||
ret = PTR_ERR(vma);
|
||||
goto err_unref;
|
||||
}
|
||||
|
||||
ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
|
||||
if (ret)
|
||||
goto err_unref;
|
||||
|
||||
i915->gt.scratch = vma;
|
||||
return 0;
|
||||
|
||||
err_unref:
|
||||
i915_gem_object_put(obj);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void i915_gem_fini_scratch(struct drm_i915_private *i915)
|
||||
{
|
||||
i915_vma_unpin_and_release(&i915->gt.scratch, 0);
|
||||
}
|
||||
|
||||
int i915_gem_init(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
int ret;
|
||||
|
@ -5546,12 +5584,19 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
|
|||
goto err_unlock;
|
||||
}
|
||||
|
||||
ret = i915_gem_contexts_init(dev_priv);
|
||||
ret = i915_gem_init_scratch(dev_priv,
|
||||
IS_GEN2(dev_priv) ? SZ_256K : PAGE_SIZE);
|
||||
if (ret) {
|
||||
GEM_BUG_ON(ret == -EIO);
|
||||
goto err_ggtt;
|
||||
}
|
||||
|
||||
ret = i915_gem_contexts_init(dev_priv);
|
||||
if (ret) {
|
||||
GEM_BUG_ON(ret == -EIO);
|
||||
goto err_scratch;
|
||||
}
|
||||
|
||||
ret = intel_engines_init(dev_priv);
|
||||
if (ret) {
|
||||
GEM_BUG_ON(ret == -EIO);
|
||||
|
@ -5624,6 +5669,8 @@ err_pm:
|
|||
err_context:
|
||||
if (ret != -EIO)
|
||||
i915_gem_contexts_fini(dev_priv);
|
||||
err_scratch:
|
||||
i915_gem_fini_scratch(dev_priv);
|
||||
err_ggtt:
|
||||
err_unlock:
|
||||
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
|
||||
|
@ -5675,8 +5722,11 @@ void i915_gem_fini(struct drm_i915_private *dev_priv)
|
|||
intel_uc_fini(dev_priv);
|
||||
i915_gem_cleanup_engines(dev_priv);
|
||||
i915_gem_contexts_fini(dev_priv);
|
||||
i915_gem_fini_scratch(dev_priv);
|
||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||
|
||||
intel_wa_list_free(&dev_priv->gt_wa_list);
|
||||
|
||||
intel_cleanup_gt_powersave(dev_priv);
|
||||
|
||||
intel_uc_fini_misc(dev_priv);
|
||||
|
|
|
@ -1268,7 +1268,7 @@ relocate_entry(struct i915_vma *vma,
|
|||
else if (gen >= 4)
|
||||
len = 4;
|
||||
else
|
||||
len = 6;
|
||||
len = 3;
|
||||
|
||||
batch = reloc_gpu(eb, vma, len);
|
||||
if (IS_ERR(batch))
|
||||
|
@ -1309,11 +1309,6 @@ relocate_entry(struct i915_vma *vma,
|
|||
*batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
|
||||
*batch++ = addr;
|
||||
*batch++ = target_offset;
|
||||
|
||||
/* And again for good measure (blb/pnv) */
|
||||
*batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
|
||||
*batch++ = addr;
|
||||
*batch++ = target_offset;
|
||||
}
|
||||
|
||||
goto out;
|
||||
|
|
|
@ -1495,7 +1495,7 @@ static void gem_record_rings(struct i915_gpu_state *error)
|
|||
if (HAS_BROKEN_CS_TLB(i915))
|
||||
ee->wa_batchbuffer =
|
||||
i915_error_object_create(i915,
|
||||
engine->scratch);
|
||||
i915->gt.scratch);
|
||||
request_record_user_bo(request, ee);
|
||||
|
||||
ee->ctx =
|
||||
|
|
|
@ -490,46 +490,6 @@ void intel_engine_setup_common(struct intel_engine_cs *engine)
|
|||
intel_engine_init_cmd_parser(engine);
|
||||
}
|
||||
|
||||
int intel_engine_create_scratch(struct intel_engine_cs *engine,
|
||||
unsigned int size)
|
||||
{
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct i915_vma *vma;
|
||||
int ret;
|
||||
|
||||
WARN_ON(engine->scratch);
|
||||
|
||||
obj = i915_gem_object_create_stolen(engine->i915, size);
|
||||
if (!obj)
|
||||
obj = i915_gem_object_create_internal(engine->i915, size);
|
||||
if (IS_ERR(obj)) {
|
||||
DRM_ERROR("Failed to allocate scratch page\n");
|
||||
return PTR_ERR(obj);
|
||||
}
|
||||
|
||||
vma = i915_vma_instance(obj, &engine->i915->ggtt.vm, NULL);
|
||||
if (IS_ERR(vma)) {
|
||||
ret = PTR_ERR(vma);
|
||||
goto err_unref;
|
||||
}
|
||||
|
||||
ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
|
||||
if (ret)
|
||||
goto err_unref;
|
||||
|
||||
engine->scratch = vma;
|
||||
return 0;
|
||||
|
||||
err_unref:
|
||||
i915_gem_object_put(obj);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void intel_engine_cleanup_scratch(struct intel_engine_cs *engine)
|
||||
{
|
||||
i915_vma_unpin_and_release(&engine->scratch, 0);
|
||||
}
|
||||
|
||||
static void cleanup_status_page(struct intel_engine_cs *engine)
|
||||
{
|
||||
if (HWS_NEEDS_PHYSICAL(engine->i915)) {
|
||||
|
@ -704,8 +664,6 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
|
|||
{
|
||||
struct drm_i915_private *i915 = engine->i915;
|
||||
|
||||
intel_engine_cleanup_scratch(engine);
|
||||
|
||||
cleanup_status_page(engine);
|
||||
|
||||
intel_engine_fini_breadcrumbs(engine);
|
||||
|
@ -720,6 +678,8 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
|
|||
__intel_context_unpin(i915->kernel_context, engine);
|
||||
|
||||
i915_timeline_fini(&engine->timeline);
|
||||
|
||||
intel_wa_list_free(&engine->wa_list);
|
||||
}
|
||||
|
||||
u64 intel_engine_get_active_head(const struct intel_engine_cs *engine)
|
||||
|
|
|
@ -442,8 +442,13 @@ static u64 execlists_update_context(struct i915_request *rq)
|
|||
* may not be visible to the HW prior to the completion of the UC
|
||||
* register write and that we may begin execution from the context
|
||||
* before its image is complete leading to invalid PD chasing.
|
||||
*
|
||||
* Furthermore, Braswell, at least, wants a full mb to be sure that
|
||||
* the writes are coherent in memory (visible to the GPU) prior to
|
||||
* execution, and not just visible to other CPUs (as is the result of
|
||||
* wmb).
|
||||
*/
|
||||
wmb();
|
||||
mb();
|
||||
return ce->lrc_desc;
|
||||
}
|
||||
|
||||
|
@ -1443,9 +1448,10 @@ static int execlists_request_alloc(struct i915_request *request)
|
|||
static u32 *
|
||||
gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, u32 *batch)
|
||||
{
|
||||
/* NB no one else is allowed to scribble over scratch + 256! */
|
||||
*batch++ = MI_STORE_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT;
|
||||
*batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4);
|
||||
*batch++ = i915_ggtt_offset(engine->scratch) + 256;
|
||||
*batch++ = i915_scratch_offset(engine->i915) + 256;
|
||||
*batch++ = 0;
|
||||
|
||||
*batch++ = MI_LOAD_REGISTER_IMM(1);
|
||||
|
@ -1459,7 +1465,7 @@ gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, u32 *batch)
|
|||
|
||||
*batch++ = MI_LOAD_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT;
|
||||
*batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4);
|
||||
*batch++ = i915_ggtt_offset(engine->scratch) + 256;
|
||||
*batch++ = i915_scratch_offset(engine->i915) + 256;
|
||||
*batch++ = 0;
|
||||
|
||||
return batch;
|
||||
|
@ -1496,7 +1502,7 @@ static u32 *gen8_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
|
|||
PIPE_CONTROL_GLOBAL_GTT_IVB |
|
||||
PIPE_CONTROL_CS_STALL |
|
||||
PIPE_CONTROL_QW_WRITE,
|
||||
i915_ggtt_offset(engine->scratch) +
|
||||
i915_scratch_offset(engine->i915) +
|
||||
2 * CACHELINE_BYTES);
|
||||
|
||||
*batch++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
|
||||
|
@ -1573,7 +1579,7 @@ static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
|
|||
PIPE_CONTROL_GLOBAL_GTT_IVB |
|
||||
PIPE_CONTROL_CS_STALL |
|
||||
PIPE_CONTROL_QW_WRITE,
|
||||
i915_ggtt_offset(engine->scratch)
|
||||
i915_scratch_offset(engine->i915)
|
||||
+ 2 * CACHELINE_BYTES);
|
||||
}
|
||||
|
||||
|
@ -1793,6 +1799,8 @@ static bool unexpected_starting_state(struct intel_engine_cs *engine)
|
|||
|
||||
static int gen8_init_common_ring(struct intel_engine_cs *engine)
|
||||
{
|
||||
intel_engine_apply_workarounds(engine);
|
||||
|
||||
intel_mocs_init_engine(engine);
|
||||
|
||||
intel_engine_reset_breadcrumbs(engine);
|
||||
|
@ -2139,7 +2147,7 @@ static int gen8_emit_flush_render(struct i915_request *request,
|
|||
{
|
||||
struct intel_engine_cs *engine = request->engine;
|
||||
u32 scratch_addr =
|
||||
i915_ggtt_offset(engine->scratch) + 2 * CACHELINE_BYTES;
|
||||
i915_scratch_offset(engine->i915) + 2 * CACHELINE_BYTES;
|
||||
bool vf_flush_wa = false, dc_flush_wa = false;
|
||||
u32 *cs, flags = 0;
|
||||
int len;
|
||||
|
@ -2476,10 +2484,6 @@ int logical_render_ring_init(struct intel_engine_cs *engine)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = intel_engine_create_scratch(engine, PAGE_SIZE);
|
||||
if (ret)
|
||||
goto err_cleanup_common;
|
||||
|
||||
ret = intel_init_workaround_bb(engine);
|
||||
if (ret) {
|
||||
/*
|
||||
|
@ -2491,11 +2495,9 @@ int logical_render_ring_init(struct intel_engine_cs *engine)
|
|||
ret);
|
||||
}
|
||||
|
||||
return 0;
|
||||
intel_engine_init_workarounds(engine);
|
||||
|
||||
err_cleanup_common:
|
||||
intel_engine_cleanup_common(engine);
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int logical_xcs_ring_init(struct intel_engine_cs *engine)
|
||||
|
|
|
@ -69,19 +69,28 @@ unsigned int intel_ring_update_space(struct intel_ring *ring)
|
|||
static int
|
||||
gen2_render_ring_flush(struct i915_request *rq, u32 mode)
|
||||
{
|
||||
unsigned int num_store_dw;
|
||||
u32 cmd, *cs;
|
||||
|
||||
cmd = MI_FLUSH;
|
||||
|
||||
num_store_dw = 0;
|
||||
if (mode & EMIT_INVALIDATE)
|
||||
cmd |= MI_READ_FLUSH;
|
||||
if (mode & EMIT_FLUSH)
|
||||
num_store_dw = 4;
|
||||
|
||||
cs = intel_ring_begin(rq, 2);
|
||||
cs = intel_ring_begin(rq, 2 + 3 * num_store_dw);
|
||||
if (IS_ERR(cs))
|
||||
return PTR_ERR(cs);
|
||||
|
||||
*cs++ = cmd;
|
||||
*cs++ = MI_NOOP;
|
||||
while (num_store_dw--) {
|
||||
*cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
|
||||
*cs++ = i915_scratch_offset(rq->i915);
|
||||
*cs++ = 0;
|
||||
}
|
||||
*cs++ = MI_FLUSH | MI_NO_WRITE_FLUSH;
|
||||
|
||||
intel_ring_advance(rq, cs);
|
||||
|
||||
return 0;
|
||||
|
@ -150,8 +159,7 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode)
|
|||
*/
|
||||
if (mode & EMIT_INVALIDATE) {
|
||||
*cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE;
|
||||
*cs++ = i915_ggtt_offset(rq->engine->scratch) |
|
||||
PIPE_CONTROL_GLOBAL_GTT;
|
||||
*cs++ = i915_scratch_offset(rq->i915) | PIPE_CONTROL_GLOBAL_GTT;
|
||||
*cs++ = 0;
|
||||
*cs++ = 0;
|
||||
|
||||
|
@ -159,8 +167,7 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode)
|
|||
*cs++ = MI_FLUSH;
|
||||
|
||||
*cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE;
|
||||
*cs++ = i915_ggtt_offset(rq->engine->scratch) |
|
||||
PIPE_CONTROL_GLOBAL_GTT;
|
||||
*cs++ = i915_scratch_offset(rq->i915) | PIPE_CONTROL_GLOBAL_GTT;
|
||||
*cs++ = 0;
|
||||
*cs++ = 0;
|
||||
}
|
||||
|
@ -212,8 +219,7 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode)
|
|||
static int
|
||||
intel_emit_post_sync_nonzero_flush(struct i915_request *rq)
|
||||
{
|
||||
u32 scratch_addr =
|
||||
i915_ggtt_offset(rq->engine->scratch) + 2 * CACHELINE_BYTES;
|
||||
u32 scratch_addr = i915_scratch_offset(rq->i915) + 2 * CACHELINE_BYTES;
|
||||
u32 *cs;
|
||||
|
||||
cs = intel_ring_begin(rq, 6);
|
||||
|
@ -246,8 +252,7 @@ intel_emit_post_sync_nonzero_flush(struct i915_request *rq)
|
|||
static int
|
||||
gen6_render_ring_flush(struct i915_request *rq, u32 mode)
|
||||
{
|
||||
u32 scratch_addr =
|
||||
i915_ggtt_offset(rq->engine->scratch) + 2 * CACHELINE_BYTES;
|
||||
u32 scratch_addr = i915_scratch_offset(rq->i915) + 2 * CACHELINE_BYTES;
|
||||
u32 *cs, flags = 0;
|
||||
int ret;
|
||||
|
||||
|
@ -316,8 +321,7 @@ gen7_render_ring_cs_stall_wa(struct i915_request *rq)
|
|||
static int
|
||||
gen7_render_ring_flush(struct i915_request *rq, u32 mode)
|
||||
{
|
||||
u32 scratch_addr =
|
||||
i915_ggtt_offset(rq->engine->scratch) + 2 * CACHELINE_BYTES;
|
||||
u32 scratch_addr = i915_scratch_offset(rq->i915) + 2 * CACHELINE_BYTES;
|
||||
u32 *cs, flags = 0;
|
||||
|
||||
/*
|
||||
|
@ -971,7 +975,7 @@ i965_emit_bb_start(struct i915_request *rq,
|
|||
}
|
||||
|
||||
/* Just userspace ABI convention to limit the wa batch bo to a resonable size */
|
||||
#define I830_BATCH_LIMIT (256*1024)
|
||||
#define I830_BATCH_LIMIT SZ_256K
|
||||
#define I830_TLB_ENTRIES (2)
|
||||
#define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
|
||||
static int
|
||||
|
@ -979,7 +983,9 @@ i830_emit_bb_start(struct i915_request *rq,
|
|||
u64 offset, u32 len,
|
||||
unsigned int dispatch_flags)
|
||||
{
|
||||
u32 *cs, cs_offset = i915_ggtt_offset(rq->engine->scratch);
|
||||
u32 *cs, cs_offset = i915_scratch_offset(rq->i915);
|
||||
|
||||
GEM_BUG_ON(rq->i915->gt.scratch->size < I830_WA_SIZE);
|
||||
|
||||
cs = intel_ring_begin(rq, 6);
|
||||
if (IS_ERR(cs))
|
||||
|
@ -1437,7 +1443,6 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
|
|||
{
|
||||
struct i915_timeline *timeline;
|
||||
struct intel_ring *ring;
|
||||
unsigned int size;
|
||||
int err;
|
||||
|
||||
intel_engine_setup_common(engine);
|
||||
|
@ -1462,21 +1467,12 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
|
|||
GEM_BUG_ON(engine->buffer);
|
||||
engine->buffer = ring;
|
||||
|
||||
size = PAGE_SIZE;
|
||||
if (HAS_BROKEN_CS_TLB(engine->i915))
|
||||
size = I830_WA_SIZE;
|
||||
err = intel_engine_create_scratch(engine, size);
|
||||
err = intel_engine_init_common(engine);
|
||||
if (err)
|
||||
goto err_unpin;
|
||||
|
||||
err = intel_engine_init_common(engine);
|
||||
if (err)
|
||||
goto err_scratch;
|
||||
|
||||
return 0;
|
||||
|
||||
err_scratch:
|
||||
intel_engine_cleanup_scratch(engine);
|
||||
err_unpin:
|
||||
intel_ring_unpin(ring);
|
||||
err_ring:
|
||||
|
@ -1550,7 +1546,7 @@ static int flush_pd_dir(struct i915_request *rq)
|
|||
/* Stall until the page table load is complete */
|
||||
*cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
|
||||
*cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
|
||||
*cs++ = i915_ggtt_offset(engine->scratch);
|
||||
*cs++ = i915_scratch_offset(rq->i915);
|
||||
*cs++ = MI_NOOP;
|
||||
|
||||
intel_ring_advance(rq, cs);
|
||||
|
@ -1659,7 +1655,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
|
|||
/* Insert a delay before the next switch! */
|
||||
*cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
|
||||
*cs++ = i915_mmio_reg_offset(last_reg);
|
||||
*cs++ = i915_ggtt_offset(engine->scratch);
|
||||
*cs++ = i915_scratch_offset(rq->i915);
|
||||
*cs++ = MI_NOOP;
|
||||
}
|
||||
*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
#include "i915_selftest.h"
|
||||
#include "i915_timeline.h"
|
||||
#include "intel_gpu_commands.h"
|
||||
#include "intel_workarounds.h"
|
||||
|
||||
struct drm_printer;
|
||||
struct i915_sched_attr;
|
||||
|
@ -440,7 +441,7 @@ struct intel_engine_cs {
|
|||
|
||||
struct intel_hw_status_page status_page;
|
||||
struct i915_ctx_workarounds wa_ctx;
|
||||
struct i915_vma *scratch;
|
||||
struct i915_wa_list wa_list;
|
||||
|
||||
u32 irq_keep_mask; /* always keep these interrupts */
|
||||
u32 irq_enable_mask; /* bitmask to enable ring interrupt */
|
||||
|
@ -898,10 +899,6 @@ void intel_engine_setup_common(struct intel_engine_cs *engine);
|
|||
int intel_engine_init_common(struct intel_engine_cs *engine);
|
||||
void intel_engine_cleanup_common(struct intel_engine_cs *engine);
|
||||
|
||||
int intel_engine_create_scratch(struct intel_engine_cs *engine,
|
||||
unsigned int size);
|
||||
void intel_engine_cleanup_scratch(struct intel_engine_cs *engine);
|
||||
|
||||
int intel_init_render_ring_buffer(struct intel_engine_cs *engine);
|
||||
int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine);
|
||||
int intel_init_blt_ring_buffer(struct intel_engine_cs *engine);
|
||||
|
|
|
@ -48,6 +48,20 @@
|
|||
* - Public functions to init or apply the given workaround type.
|
||||
*/
|
||||
|
||||
static void wa_init_start(struct i915_wa_list *wal, const char *name)
|
||||
{
|
||||
wal->name = name;
|
||||
}
|
||||
|
||||
static void wa_init_finish(struct i915_wa_list *wal)
|
||||
{
|
||||
if (!wal->count)
|
||||
return;
|
||||
|
||||
DRM_DEBUG_DRIVER("Initialized %u %s workarounds\n",
|
||||
wal->count, wal->name);
|
||||
}
|
||||
|
||||
static void wa_add(struct drm_i915_private *i915,
|
||||
i915_reg_t reg, const u32 mask, const u32 val)
|
||||
{
|
||||
|
@ -580,160 +594,175 @@ int intel_ctx_workarounds_emit(struct i915_request *rq)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void bdw_gt_workarounds_apply(struct drm_i915_private *dev_priv)
|
||||
static void
|
||||
wal_add(struct i915_wa_list *wal, const struct i915_wa *wa)
|
||||
{
|
||||
const unsigned int grow = 1 << 4;
|
||||
|
||||
GEM_BUG_ON(!is_power_of_2(grow));
|
||||
|
||||
if (IS_ALIGNED(wal->count, grow)) { /* Either uninitialized or full. */
|
||||
struct i915_wa *list;
|
||||
|
||||
list = kmalloc_array(ALIGN(wal->count + 1, grow), sizeof(*wa),
|
||||
GFP_KERNEL);
|
||||
if (!list) {
|
||||
DRM_ERROR("No space for workaround init!\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (wal->list)
|
||||
memcpy(list, wal->list, sizeof(*wa) * wal->count);
|
||||
|
||||
wal->list = list;
|
||||
}
|
||||
|
||||
wal->list[wal->count++] = *wa;
|
||||
}
|
||||
|
||||
static void chv_gt_workarounds_apply(struct drm_i915_private *dev_priv)
|
||||
static void
|
||||
wa_masked_en(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
|
||||
{
|
||||
struct i915_wa wa = {
|
||||
.reg = reg,
|
||||
.mask = val,
|
||||
.val = _MASKED_BIT_ENABLE(val)
|
||||
};
|
||||
|
||||
wal_add(wal, &wa);
|
||||
}
|
||||
|
||||
static void gen9_gt_workarounds_apply(struct drm_i915_private *dev_priv)
|
||||
static void
|
||||
wa_write_masked_or(struct i915_wa_list *wal, i915_reg_t reg, u32 mask,
|
||||
u32 val)
|
||||
{
|
||||
/* WaContextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */
|
||||
I915_WRITE(GEN9_CSFE_CHICKEN1_RCS,
|
||||
_MASKED_BIT_ENABLE(GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE));
|
||||
struct i915_wa wa = {
|
||||
.reg = reg,
|
||||
.mask = mask,
|
||||
.val = val
|
||||
};
|
||||
|
||||
/* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk,cfl */
|
||||
I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
|
||||
GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
|
||||
wal_add(wal, &wa);
|
||||
}
|
||||
|
||||
static void
|
||||
wa_write(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
|
||||
{
|
||||
wa_write_masked_or(wal, reg, ~0, val);
|
||||
}
|
||||
|
||||
static void
|
||||
wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
|
||||
{
|
||||
wa_write_masked_or(wal, reg, val, val);
|
||||
}
|
||||
|
||||
static void gen9_gt_workarounds_init(struct drm_i915_private *i915)
|
||||
{
|
||||
struct i915_wa_list *wal = &i915->gt_wa_list;
|
||||
|
||||
/* WaDisableKillLogic:bxt,skl,kbl */
|
||||
if (!IS_COFFEELAKE(dev_priv))
|
||||
I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
|
||||
ECOCHK_DIS_TLB);
|
||||
if (!IS_COFFEELAKE(i915))
|
||||
wa_write_or(wal,
|
||||
GAM_ECOCHK,
|
||||
ECOCHK_DIS_TLB);
|
||||
|
||||
if (HAS_LLC(dev_priv)) {
|
||||
if (HAS_LLC(i915)) {
|
||||
/* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl
|
||||
*
|
||||
* Must match Display Engine. See
|
||||
* WaCompressedResourceDisplayNewHashMode.
|
||||
*/
|
||||
I915_WRITE(MMCD_MISC_CTRL,
|
||||
I915_READ(MMCD_MISC_CTRL) |
|
||||
MMCD_PCLA |
|
||||
MMCD_HOTSPOT_EN);
|
||||
wa_write_or(wal,
|
||||
MMCD_MISC_CTRL,
|
||||
MMCD_PCLA | MMCD_HOTSPOT_EN);
|
||||
}
|
||||
|
||||
/* WaDisableHDCInvalidation:skl,bxt,kbl,cfl */
|
||||
I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
|
||||
BDW_DISABLE_HDC_INVALIDATION);
|
||||
|
||||
/* WaProgramL3SqcReg1DefaultForPerf:bxt,glk */
|
||||
if (IS_GEN9_LP(dev_priv)) {
|
||||
u32 val = I915_READ(GEN8_L3SQCREG1);
|
||||
|
||||
val &= ~L3_PRIO_CREDITS_MASK;
|
||||
val |= L3_GENERAL_PRIO_CREDITS(62) | L3_HIGH_PRIO_CREDITS(2);
|
||||
I915_WRITE(GEN8_L3SQCREG1, val);
|
||||
}
|
||||
|
||||
/* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */
|
||||
I915_WRITE(GEN8_L3SQCREG4,
|
||||
I915_READ(GEN8_L3SQCREG4) | GEN8_LQSC_FLUSH_COHERENT_LINES);
|
||||
|
||||
/* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,[cnl] */
|
||||
I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
|
||||
_MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
|
||||
wa_write_or(wal,
|
||||
GAM_ECOCHK,
|
||||
BDW_DISABLE_HDC_INVALIDATION);
|
||||
}
|
||||
|
||||
static void skl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
|
||||
static void skl_gt_workarounds_init(struct drm_i915_private *i915)
|
||||
{
|
||||
gen9_gt_workarounds_apply(dev_priv);
|
||||
struct i915_wa_list *wal = &i915->gt_wa_list;
|
||||
|
||||
/* WaEnableGapsTsvCreditFix:skl */
|
||||
I915_WRITE(GEN8_GARBCNTL,
|
||||
I915_READ(GEN8_GARBCNTL) | GEN9_GAPS_TSV_CREDIT_DISABLE);
|
||||
gen9_gt_workarounds_init(i915);
|
||||
|
||||
/* WaDisableGafsUnitClkGating:skl */
|
||||
I915_WRITE(GEN7_UCGCTL4,
|
||||
I915_READ(GEN7_UCGCTL4) | GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
|
||||
wa_write_or(wal,
|
||||
GEN7_UCGCTL4,
|
||||
GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
|
||||
|
||||
/* WaInPlaceDecompressionHang:skl */
|
||||
if (IS_SKL_REVID(dev_priv, SKL_REVID_H0, REVID_FOREVER))
|
||||
I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
|
||||
I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
|
||||
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
|
||||
if (IS_SKL_REVID(i915, SKL_REVID_H0, REVID_FOREVER))
|
||||
wa_write_or(wal,
|
||||
GEN9_GAMT_ECO_REG_RW_IA,
|
||||
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
|
||||
}
|
||||
|
||||
static void bxt_gt_workarounds_apply(struct drm_i915_private *dev_priv)
|
||||
static void bxt_gt_workarounds_init(struct drm_i915_private *i915)
|
||||
{
|
||||
gen9_gt_workarounds_apply(dev_priv);
|
||||
struct i915_wa_list *wal = &i915->gt_wa_list;
|
||||
|
||||
/* WaDisablePooledEuLoadBalancingFix:bxt */
|
||||
I915_WRITE(FF_SLICE_CS_CHICKEN2,
|
||||
_MASKED_BIT_ENABLE(GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE));
|
||||
gen9_gt_workarounds_init(i915);
|
||||
|
||||
/* WaInPlaceDecompressionHang:bxt */
|
||||
I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
|
||||
I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
|
||||
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
|
||||
wa_write_or(wal,
|
||||
GEN9_GAMT_ECO_REG_RW_IA,
|
||||
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
|
||||
}
|
||||
|
||||
static void kbl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
|
||||
static void kbl_gt_workarounds_init(struct drm_i915_private *i915)
|
||||
{
|
||||
gen9_gt_workarounds_apply(dev_priv);
|
||||
struct i915_wa_list *wal = &i915->gt_wa_list;
|
||||
|
||||
/* WaEnableGapsTsvCreditFix:kbl */
|
||||
I915_WRITE(GEN8_GARBCNTL,
|
||||
I915_READ(GEN8_GARBCNTL) | GEN9_GAPS_TSV_CREDIT_DISABLE);
|
||||
gen9_gt_workarounds_init(i915);
|
||||
|
||||
/* WaDisableDynamicCreditSharing:kbl */
|
||||
if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
|
||||
I915_WRITE(GAMT_CHKN_BIT_REG,
|
||||
I915_READ(GAMT_CHKN_BIT_REG) |
|
||||
GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
|
||||
if (IS_KBL_REVID(i915, 0, KBL_REVID_B0))
|
||||
wa_write_or(wal,
|
||||
GAMT_CHKN_BIT_REG,
|
||||
GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
|
||||
|
||||
/* WaDisableGafsUnitClkGating:kbl */
|
||||
I915_WRITE(GEN7_UCGCTL4,
|
||||
I915_READ(GEN7_UCGCTL4) | GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
|
||||
wa_write_or(wal,
|
||||
GEN7_UCGCTL4,
|
||||
GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
|
||||
|
||||
/* WaInPlaceDecompressionHang:kbl */
|
||||
I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
|
||||
I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
|
||||
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
|
||||
|
||||
/* WaKBLVECSSemaphoreWaitPoll:kbl */
|
||||
if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_E0)) {
|
||||
struct intel_engine_cs *engine;
|
||||
unsigned int tmp;
|
||||
|
||||
for_each_engine(engine, dev_priv, tmp) {
|
||||
if (engine->id == RCS)
|
||||
continue;
|
||||
|
||||
I915_WRITE(RING_SEMA_WAIT_POLL(engine->mmio_base), 1);
|
||||
}
|
||||
}
|
||||
wa_write_or(wal,
|
||||
GEN9_GAMT_ECO_REG_RW_IA,
|
||||
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
|
||||
}
|
||||
|
||||
static void glk_gt_workarounds_apply(struct drm_i915_private *dev_priv)
|
||||
static void glk_gt_workarounds_init(struct drm_i915_private *i915)
|
||||
{
|
||||
gen9_gt_workarounds_apply(dev_priv);
|
||||
gen9_gt_workarounds_init(i915);
|
||||
}
|
||||
|
||||
static void cfl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
|
||||
static void cfl_gt_workarounds_init(struct drm_i915_private *i915)
|
||||
{
|
||||
gen9_gt_workarounds_apply(dev_priv);
|
||||
struct i915_wa_list *wal = &i915->gt_wa_list;
|
||||
|
||||
/* WaEnableGapsTsvCreditFix:cfl */
|
||||
I915_WRITE(GEN8_GARBCNTL,
|
||||
I915_READ(GEN8_GARBCNTL) | GEN9_GAPS_TSV_CREDIT_DISABLE);
|
||||
gen9_gt_workarounds_init(i915);
|
||||
|
||||
/* WaDisableGafsUnitClkGating:cfl */
|
||||
I915_WRITE(GEN7_UCGCTL4,
|
||||
I915_READ(GEN7_UCGCTL4) | GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
|
||||
wa_write_or(wal,
|
||||
GEN7_UCGCTL4,
|
||||
GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
|
||||
|
||||
/* WaInPlaceDecompressionHang:cfl */
|
||||
I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
|
||||
I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
|
||||
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
|
||||
wa_write_or(wal,
|
||||
GEN9_GAMT_ECO_REG_RW_IA,
|
||||
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
|
||||
}
|
||||
|
||||
static void wa_init_mcr(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
const struct sseu_dev_info *sseu = &(INTEL_INFO(dev_priv)->sseu);
|
||||
u32 mcr;
|
||||
struct i915_wa_list *wal = &dev_priv->gt_wa_list;
|
||||
u32 mcr_slice_subslice_mask;
|
||||
|
||||
/*
|
||||
|
@ -770,8 +799,6 @@ static void wa_init_mcr(struct drm_i915_private *dev_priv)
|
|||
WARN_ON((enabled_mask & disabled_mask) != enabled_mask);
|
||||
}
|
||||
|
||||
mcr = I915_READ(GEN8_MCR_SELECTOR);
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 11)
|
||||
mcr_slice_subslice_mask = GEN11_MCR_SLICE_MASK |
|
||||
GEN11_MCR_SUBSLICE_MASK;
|
||||
|
@ -789,148 +816,170 @@ static void wa_init_mcr(struct drm_i915_private *dev_priv)
|
|||
* occasions, such as INSTDONE, where this value is dependent
|
||||
* on s/ss combo, the read should be done with read_subslice_reg.
|
||||
*/
|
||||
mcr &= ~mcr_slice_subslice_mask;
|
||||
mcr |= intel_calculate_mcr_s_ss_select(dev_priv);
|
||||
I915_WRITE(GEN8_MCR_SELECTOR, mcr);
|
||||
wa_write_masked_or(wal,
|
||||
GEN8_MCR_SELECTOR,
|
||||
mcr_slice_subslice_mask,
|
||||
intel_calculate_mcr_s_ss_select(dev_priv));
|
||||
}
|
||||
|
||||
static void cnl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
|
||||
static void cnl_gt_workarounds_init(struct drm_i915_private *i915)
|
||||
{
|
||||
wa_init_mcr(dev_priv);
|
||||
struct i915_wa_list *wal = &i915->gt_wa_list;
|
||||
|
||||
wa_init_mcr(i915);
|
||||
|
||||
/* WaDisableI2mCycleOnWRPort:cnl (pre-prod) */
|
||||
if (IS_CNL_REVID(dev_priv, CNL_REVID_B0, CNL_REVID_B0))
|
||||
I915_WRITE(GAMT_CHKN_BIT_REG,
|
||||
I915_READ(GAMT_CHKN_BIT_REG) |
|
||||
GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT);
|
||||
if (IS_CNL_REVID(i915, CNL_REVID_B0, CNL_REVID_B0))
|
||||
wa_write_or(wal,
|
||||
GAMT_CHKN_BIT_REG,
|
||||
GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT);
|
||||
|
||||
/* WaInPlaceDecompressionHang:cnl */
|
||||
I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
|
||||
I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
|
||||
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
|
||||
|
||||
/* WaEnablePreemptionGranularityControlByUMD:cnl */
|
||||
I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
|
||||
_MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
|
||||
wa_write_or(wal,
|
||||
GEN9_GAMT_ECO_REG_RW_IA,
|
||||
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
|
||||
}
|
||||
|
||||
static void icl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
|
||||
static void icl_gt_workarounds_init(struct drm_i915_private *i915)
|
||||
{
|
||||
wa_init_mcr(dev_priv);
|
||||
struct i915_wa_list *wal = &i915->gt_wa_list;
|
||||
|
||||
/* This is not an Wa. Enable for better image quality */
|
||||
I915_WRITE(_3D_CHICKEN3,
|
||||
_MASKED_BIT_ENABLE(_3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE));
|
||||
wa_init_mcr(i915);
|
||||
|
||||
/* WaInPlaceDecompressionHang:icl */
|
||||
I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA, I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
|
||||
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
|
||||
|
||||
/* WaPipelineFlushCoherentLines:icl */
|
||||
I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
|
||||
GEN8_LQSC_FLUSH_COHERENT_LINES);
|
||||
|
||||
/* Wa_1405543622:icl
|
||||
* Formerly known as WaGAPZPriorityScheme
|
||||
*/
|
||||
I915_WRITE(GEN8_GARBCNTL, I915_READ(GEN8_GARBCNTL) |
|
||||
GEN11_ARBITRATION_PRIO_ORDER_MASK);
|
||||
|
||||
/* Wa_1604223664:icl
|
||||
* Formerly known as WaL3BankAddressHashing
|
||||
*/
|
||||
I915_WRITE(GEN8_GARBCNTL,
|
||||
(I915_READ(GEN8_GARBCNTL) & ~GEN11_HASH_CTRL_EXCL_MASK) |
|
||||
GEN11_HASH_CTRL_EXCL_BIT0);
|
||||
I915_WRITE(GEN11_GLBLINVL,
|
||||
(I915_READ(GEN11_GLBLINVL) & ~GEN11_BANK_HASH_ADDR_EXCL_MASK) |
|
||||
GEN11_BANK_HASH_ADDR_EXCL_BIT0);
|
||||
wa_write_or(wal,
|
||||
GEN9_GAMT_ECO_REG_RW_IA,
|
||||
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
|
||||
|
||||
/* WaModifyGamTlbPartitioning:icl */
|
||||
I915_WRITE(GEN11_GACB_PERF_CTRL,
|
||||
(I915_READ(GEN11_GACB_PERF_CTRL) & ~GEN11_HASH_CTRL_MASK) |
|
||||
GEN11_HASH_CTRL_BIT0 | GEN11_HASH_CTRL_BIT4);
|
||||
|
||||
/* Wa_1405733216:icl
|
||||
* Formerly known as WaDisableCleanEvicts
|
||||
*/
|
||||
I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
|
||||
GEN11_LQSC_CLEAN_EVICT_DISABLE);
|
||||
wa_write_masked_or(wal,
|
||||
GEN11_GACB_PERF_CTRL,
|
||||
GEN11_HASH_CTRL_MASK,
|
||||
GEN11_HASH_CTRL_BIT0 | GEN11_HASH_CTRL_BIT4);
|
||||
|
||||
/* Wa_1405766107:icl
|
||||
* Formerly known as WaCL2SFHalfMaxAlloc
|
||||
*/
|
||||
I915_WRITE(GEN11_LSN_UNSLCVC, I915_READ(GEN11_LSN_UNSLCVC) |
|
||||
GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC |
|
||||
GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC);
|
||||
wa_write_or(wal,
|
||||
GEN11_LSN_UNSLCVC,
|
||||
GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC |
|
||||
GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC);
|
||||
|
||||
/* Wa_220166154:icl
|
||||
* Formerly known as WaDisCtxReload
|
||||
*/
|
||||
I915_WRITE(GAMW_ECO_DEV_RW_IA_REG, I915_READ(GAMW_ECO_DEV_RW_IA_REG) |
|
||||
GAMW_ECO_DEV_CTX_RELOAD_DISABLE);
|
||||
wa_write_or(wal,
|
||||
GEN8_GAMW_ECO_DEV_RW_IA,
|
||||
GAMW_ECO_DEV_CTX_RELOAD_DISABLE);
|
||||
|
||||
/* Wa_1405779004:icl (pre-prod) */
|
||||
if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_A0))
|
||||
I915_WRITE(SLICE_UNIT_LEVEL_CLKGATE,
|
||||
I915_READ(SLICE_UNIT_LEVEL_CLKGATE) |
|
||||
MSCUNIT_CLKGATE_DIS);
|
||||
if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_A0))
|
||||
wa_write_or(wal,
|
||||
SLICE_UNIT_LEVEL_CLKGATE,
|
||||
MSCUNIT_CLKGATE_DIS);
|
||||
|
||||
/* Wa_1406680159:icl */
|
||||
I915_WRITE(SUBSLICE_UNIT_LEVEL_CLKGATE,
|
||||
I915_READ(SUBSLICE_UNIT_LEVEL_CLKGATE) |
|
||||
GWUNIT_CLKGATE_DIS);
|
||||
|
||||
/* Wa_1604302699:icl */
|
||||
I915_WRITE(GEN10_L3_CHICKEN_MODE_REGISTER,
|
||||
I915_READ(GEN10_L3_CHICKEN_MODE_REGISTER) |
|
||||
GEN11_I2M_WRITE_DISABLE);
|
||||
wa_write_or(wal,
|
||||
SUBSLICE_UNIT_LEVEL_CLKGATE,
|
||||
GWUNIT_CLKGATE_DIS);
|
||||
|
||||
/* Wa_1406838659:icl (pre-prod) */
|
||||
if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_B0))
|
||||
I915_WRITE(INF_UNIT_LEVEL_CLKGATE,
|
||||
I915_READ(INF_UNIT_LEVEL_CLKGATE) |
|
||||
CGPSF_CLKGATE_DIS);
|
||||
|
||||
/* WaForwardProgressSoftReset:icl */
|
||||
I915_WRITE(GEN10_SCRATCH_LNCF2,
|
||||
I915_READ(GEN10_SCRATCH_LNCF2) |
|
||||
PMFLUSHDONE_LNICRSDROP |
|
||||
PMFLUSH_GAPL3UNBLOCK |
|
||||
PMFLUSHDONE_LNEBLK);
|
||||
if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0))
|
||||
wa_write_or(wal,
|
||||
INF_UNIT_LEVEL_CLKGATE,
|
||||
CGPSF_CLKGATE_DIS);
|
||||
|
||||
/* Wa_1406463099:icl
|
||||
* Formerly known as WaGamTlbPendError
|
||||
*/
|
||||
I915_WRITE(GAMT_CHKN_BIT_REG,
|
||||
I915_READ(GAMT_CHKN_BIT_REG) |
|
||||
GAMT_CHKN_DISABLE_L3_COH_PIPE);
|
||||
wa_write_or(wal,
|
||||
GAMT_CHKN_BIT_REG,
|
||||
GAMT_CHKN_DISABLE_L3_COH_PIPE);
|
||||
}
|
||||
|
||||
void intel_gt_workarounds_apply(struct drm_i915_private *dev_priv)
|
||||
void intel_gt_init_workarounds(struct drm_i915_private *i915)
|
||||
{
|
||||
if (INTEL_GEN(dev_priv) < 8)
|
||||
struct i915_wa_list *wal = &i915->gt_wa_list;
|
||||
|
||||
wa_init_start(wal, "GT");
|
||||
|
||||
if (INTEL_GEN(i915) < 8)
|
||||
return;
|
||||
else if (IS_BROADWELL(dev_priv))
|
||||
bdw_gt_workarounds_apply(dev_priv);
|
||||
else if (IS_CHERRYVIEW(dev_priv))
|
||||
chv_gt_workarounds_apply(dev_priv);
|
||||
else if (IS_SKYLAKE(dev_priv))
|
||||
skl_gt_workarounds_apply(dev_priv);
|
||||
else if (IS_BROXTON(dev_priv))
|
||||
bxt_gt_workarounds_apply(dev_priv);
|
||||
else if (IS_KABYLAKE(dev_priv))
|
||||
kbl_gt_workarounds_apply(dev_priv);
|
||||
else if (IS_GEMINILAKE(dev_priv))
|
||||
glk_gt_workarounds_apply(dev_priv);
|
||||
else if (IS_COFFEELAKE(dev_priv))
|
||||
cfl_gt_workarounds_apply(dev_priv);
|
||||
else if (IS_CANNONLAKE(dev_priv))
|
||||
cnl_gt_workarounds_apply(dev_priv);
|
||||
else if (IS_ICELAKE(dev_priv))
|
||||
icl_gt_workarounds_apply(dev_priv);
|
||||
else if (IS_BROADWELL(i915))
|
||||
return;
|
||||
else if (IS_CHERRYVIEW(i915))
|
||||
return;
|
||||
else if (IS_SKYLAKE(i915))
|
||||
skl_gt_workarounds_init(i915);
|
||||
else if (IS_BROXTON(i915))
|
||||
bxt_gt_workarounds_init(i915);
|
||||
else if (IS_KABYLAKE(i915))
|
||||
kbl_gt_workarounds_init(i915);
|
||||
else if (IS_GEMINILAKE(i915))
|
||||
glk_gt_workarounds_init(i915);
|
||||
else if (IS_COFFEELAKE(i915))
|
||||
cfl_gt_workarounds_init(i915);
|
||||
else if (IS_CANNONLAKE(i915))
|
||||
cnl_gt_workarounds_init(i915);
|
||||
else if (IS_ICELAKE(i915))
|
||||
icl_gt_workarounds_init(i915);
|
||||
else
|
||||
MISSING_CASE(INTEL_GEN(dev_priv));
|
||||
MISSING_CASE(INTEL_GEN(i915));
|
||||
|
||||
wa_init_finish(wal);
|
||||
}
|
||||
|
||||
static enum forcewake_domains
|
||||
wal_get_fw_for_rmw(struct drm_i915_private *dev_priv,
|
||||
const struct i915_wa_list *wal)
|
||||
{
|
||||
enum forcewake_domains fw = 0;
|
||||
struct i915_wa *wa;
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
|
||||
fw |= intel_uncore_forcewake_for_reg(dev_priv,
|
||||
wa->reg,
|
||||
FW_REG_READ |
|
||||
FW_REG_WRITE);
|
||||
|
||||
return fw;
|
||||
}
|
||||
|
||||
static void
|
||||
wa_list_apply(struct drm_i915_private *dev_priv, const struct i915_wa_list *wal)
|
||||
{
|
||||
enum forcewake_domains fw;
|
||||
unsigned long flags;
|
||||
struct i915_wa *wa;
|
||||
unsigned int i;
|
||||
|
||||
if (!wal->count)
|
||||
return;
|
||||
|
||||
fw = wal_get_fw_for_rmw(dev_priv, wal);
|
||||
|
||||
spin_lock_irqsave(&dev_priv->uncore.lock, flags);
|
||||
intel_uncore_forcewake_get__locked(dev_priv, fw);
|
||||
|
||||
for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
|
||||
u32 val = I915_READ_FW(wa->reg);
|
||||
|
||||
val &= ~wa->mask;
|
||||
val |= wa->val;
|
||||
|
||||
I915_WRITE_FW(wa->reg, val);
|
||||
}
|
||||
|
||||
intel_uncore_forcewake_put__locked(dev_priv, fw);
|
||||
spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
|
||||
|
||||
DRM_DEBUG_DRIVER("Applied %u %s workarounds\n", wal->count, wal->name);
|
||||
}
|
||||
|
||||
void intel_gt_apply_workarounds(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
wa_list_apply(dev_priv, &dev_priv->gt_wa_list);
|
||||
}
|
||||
|
||||
struct whitelist {
|
||||
|
@ -1077,6 +1126,146 @@ void intel_whitelist_workarounds_apply(struct intel_engine_cs *engine)
|
|||
whitelist_apply(engine, whitelist_build(engine, &w));
|
||||
}
|
||||
|
||||
static void rcs_engine_wa_init(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct drm_i915_private *i915 = engine->i915;
|
||||
struct i915_wa_list *wal = &engine->wa_list;
|
||||
|
||||
if (IS_ICELAKE(i915)) {
|
||||
/* This is not an Wa. Enable for better image quality */
|
||||
wa_masked_en(wal,
|
||||
_3D_CHICKEN3,
|
||||
_3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE);
|
||||
|
||||
/* WaPipelineFlushCoherentLines:icl */
|
||||
wa_write_or(wal,
|
||||
GEN8_L3SQCREG4,
|
||||
GEN8_LQSC_FLUSH_COHERENT_LINES);
|
||||
|
||||
/*
|
||||
* Wa_1405543622:icl
|
||||
* Formerly known as WaGAPZPriorityScheme
|
||||
*/
|
||||
wa_write_or(wal,
|
||||
GEN8_GARBCNTL,
|
||||
GEN11_ARBITRATION_PRIO_ORDER_MASK);
|
||||
|
||||
/*
|
||||
* Wa_1604223664:icl
|
||||
* Formerly known as WaL3BankAddressHashing
|
||||
*/
|
||||
wa_write_masked_or(wal,
|
||||
GEN8_GARBCNTL,
|
||||
GEN11_HASH_CTRL_EXCL_MASK,
|
||||
GEN11_HASH_CTRL_EXCL_BIT0);
|
||||
wa_write_masked_or(wal,
|
||||
GEN11_GLBLINVL,
|
||||
GEN11_BANK_HASH_ADDR_EXCL_MASK,
|
||||
GEN11_BANK_HASH_ADDR_EXCL_BIT0);
|
||||
|
||||
/*
|
||||
* Wa_1405733216:icl
|
||||
* Formerly known as WaDisableCleanEvicts
|
||||
*/
|
||||
wa_write_or(wal,
|
||||
GEN8_L3SQCREG4,
|
||||
GEN11_LQSC_CLEAN_EVICT_DISABLE);
|
||||
|
||||
/* Wa_1604302699:icl */
|
||||
wa_write_or(wal,
|
||||
GEN10_L3_CHICKEN_MODE_REGISTER,
|
||||
GEN11_I2M_WRITE_DISABLE);
|
||||
|
||||
/* WaForwardProgressSoftReset:icl */
|
||||
wa_write_or(wal,
|
||||
GEN10_SCRATCH_LNCF2,
|
||||
PMFLUSHDONE_LNICRSDROP |
|
||||
PMFLUSH_GAPL3UNBLOCK |
|
||||
PMFLUSHDONE_LNEBLK);
|
||||
}
|
||||
|
||||
if (IS_GEN9(i915) || IS_CANNONLAKE(i915)) {
|
||||
/* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,cnl */
|
||||
wa_masked_en(wal,
|
||||
GEN7_FF_SLICE_CS_CHICKEN1,
|
||||
GEN9_FFSC_PERCTX_PREEMPT_CTRL);
|
||||
}
|
||||
|
||||
if (IS_SKYLAKE(i915) || IS_KABYLAKE(i915) || IS_COFFEELAKE(i915)) {
|
||||
/* WaEnableGapsTsvCreditFix:skl,kbl,cfl */
|
||||
wa_write_or(wal,
|
||||
GEN8_GARBCNTL,
|
||||
GEN9_GAPS_TSV_CREDIT_DISABLE);
|
||||
}
|
||||
|
||||
if (IS_BROXTON(i915)) {
|
||||
/* WaDisablePooledEuLoadBalancingFix:bxt */
|
||||
wa_masked_en(wal,
|
||||
FF_SLICE_CS_CHICKEN2,
|
||||
GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE);
|
||||
}
|
||||
|
||||
if (IS_GEN9(i915)) {
|
||||
/* WaContextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */
|
||||
wa_masked_en(wal,
|
||||
GEN9_CSFE_CHICKEN1_RCS,
|
||||
GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE);
|
||||
|
||||
/* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk,cfl */
|
||||
wa_write_or(wal,
|
||||
BDW_SCRATCH1,
|
||||
GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
|
||||
|
||||
/* WaProgramL3SqcReg1DefaultForPerf:bxt,glk */
|
||||
if (IS_GEN9_LP(i915))
|
||||
wa_write_masked_or(wal,
|
||||
GEN8_L3SQCREG1,
|
||||
L3_PRIO_CREDITS_MASK,
|
||||
L3_GENERAL_PRIO_CREDITS(62) |
|
||||
L3_HIGH_PRIO_CREDITS(2));
|
||||
|
||||
/* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */
|
||||
wa_write_or(wal,
|
||||
GEN8_L3SQCREG4,
|
||||
GEN8_LQSC_FLUSH_COHERENT_LINES);
|
||||
}
|
||||
}
|
||||
|
||||
static void xcs_engine_wa_init(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct drm_i915_private *i915 = engine->i915;
|
||||
struct i915_wa_list *wal = &engine->wa_list;
|
||||
|
||||
/* WaKBLVECSSemaphoreWaitPoll:kbl */
|
||||
if (IS_KBL_REVID(i915, KBL_REVID_A0, KBL_REVID_E0)) {
|
||||
wa_write(wal,
|
||||
RING_SEMA_WAIT_POLL(engine->mmio_base),
|
||||
1);
|
||||
}
|
||||
}
|
||||
|
||||
void intel_engine_init_workarounds(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct i915_wa_list *wal = &engine->wa_list;
|
||||
|
||||
if (GEM_WARN_ON(INTEL_GEN(engine->i915) < 8))
|
||||
return;
|
||||
|
||||
wa_init_start(wal, engine->name);
|
||||
|
||||
if (engine->id == RCS)
|
||||
rcs_engine_wa_init(engine);
|
||||
else
|
||||
xcs_engine_wa_init(engine);
|
||||
|
||||
wa_init_finish(wal);
|
||||
}
|
||||
|
||||
void intel_engine_apply_workarounds(struct intel_engine_cs *engine)
|
||||
{
|
||||
wa_list_apply(engine->i915, &engine->wa_list);
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
|
||||
#include "selftests/intel_workarounds.c"
|
||||
#endif
|
||||
|
|
|
@ -7,11 +7,35 @@
|
|||
#ifndef _I915_WORKAROUNDS_H_
|
||||
#define _I915_WORKAROUNDS_H_
|
||||
|
||||
#include <linux/slab.h>
|
||||
|
||||
struct i915_wa {
|
||||
i915_reg_t reg;
|
||||
u32 mask;
|
||||
u32 val;
|
||||
};
|
||||
|
||||
struct i915_wa_list {
|
||||
const char *name;
|
||||
struct i915_wa *list;
|
||||
unsigned int count;
|
||||
};
|
||||
|
||||
static inline void intel_wa_list_free(struct i915_wa_list *wal)
|
||||
{
|
||||
kfree(wal->list);
|
||||
memset(wal, 0, sizeof(*wal));
|
||||
}
|
||||
|
||||
int intel_ctx_workarounds_init(struct drm_i915_private *dev_priv);
|
||||
int intel_ctx_workarounds_emit(struct i915_request *rq);
|
||||
|
||||
void intel_gt_workarounds_apply(struct drm_i915_private *dev_priv);
|
||||
void intel_gt_init_workarounds(struct drm_i915_private *dev_priv);
|
||||
void intel_gt_apply_workarounds(struct drm_i915_private *dev_priv);
|
||||
|
||||
void intel_whitelist_workarounds_apply(struct intel_engine_cs *engine);
|
||||
|
||||
void intel_engine_init_workarounds(struct intel_engine_cs *engine);
|
||||
void intel_engine_apply_workarounds(struct intel_engine_cs *engine);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -818,10 +818,13 @@ static int mtk_dsi_create_conn_enc(struct drm_device *drm, struct mtk_dsi *dsi)
|
|||
dsi->encoder.possible_crtcs = 1;
|
||||
|
||||
/* If there's a bridge, attach to it and let it create the connector */
|
||||
ret = drm_bridge_attach(&dsi->encoder, dsi->bridge, NULL);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to attach bridge to drm\n");
|
||||
|
||||
if (dsi->bridge) {
|
||||
ret = drm_bridge_attach(&dsi->encoder, dsi->bridge, NULL);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to attach bridge to drm\n");
|
||||
goto err_encoder_cleanup;
|
||||
}
|
||||
} else {
|
||||
/* Otherwise create our own connector and attach to a panel */
|
||||
ret = mtk_dsi_create_connector(drm, dsi);
|
||||
if (ret)
|
||||
|
|
|
@ -198,6 +198,22 @@ nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
|
|||
/******************************************************************************
|
||||
* EVO channel helpers
|
||||
*****************************************************************************/
|
||||
static void
|
||||
evo_flush(struct nv50_dmac *dmac)
|
||||
{
|
||||
/* Push buffer fetches are not coherent with BAR1, we need to ensure
|
||||
* writes have been flushed right through to VRAM before writing PUT.
|
||||
*/
|
||||
if (dmac->push.type & NVIF_MEM_VRAM) {
|
||||
struct nvif_device *device = dmac->base.device;
|
||||
nvif_wr32(&device->object, 0x070000, 0x00000001);
|
||||
nvif_msec(device, 2000,
|
||||
if (!(nvif_rd32(&device->object, 0x070000) & 0x00000002))
|
||||
break;
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
u32 *
|
||||
evo_wait(struct nv50_dmac *evoc, int nr)
|
||||
{
|
||||
|
@ -208,6 +224,7 @@ evo_wait(struct nv50_dmac *evoc, int nr)
|
|||
mutex_lock(&dmac->lock);
|
||||
if (put + nr >= (PAGE_SIZE / 4) - 8) {
|
||||
dmac->ptr[put] = 0x20000000;
|
||||
evo_flush(dmac);
|
||||
|
||||
nvif_wr32(&dmac->base.user, 0x0000, 0x00000000);
|
||||
if (nvif_msec(device, 2000,
|
||||
|
@ -230,17 +247,7 @@ evo_kick(u32 *push, struct nv50_dmac *evoc)
|
|||
{
|
||||
struct nv50_dmac *dmac = evoc;
|
||||
|
||||
/* Push buffer fetches are not coherent with BAR1, we need to ensure
|
||||
* writes have been flushed right through to VRAM before writing PUT.
|
||||
*/
|
||||
if (dmac->push.type & NVIF_MEM_VRAM) {
|
||||
struct nvif_device *device = dmac->base.device;
|
||||
nvif_wr32(&device->object, 0x070000, 0x00000001);
|
||||
nvif_msec(device, 2000,
|
||||
if (!(nvif_rd32(&device->object, 0x070000) & 0x00000002))
|
||||
break;
|
||||
);
|
||||
}
|
||||
evo_flush(dmac);
|
||||
|
||||
nvif_wr32(&dmac->base.user, 0x0000, (push - dmac->ptr) << 2);
|
||||
mutex_unlock(&dmac->lock);
|
||||
|
@ -1264,6 +1271,7 @@ nv50_mstm_del(struct nv50_mstm **pmstm)
|
|||
{
|
||||
struct nv50_mstm *mstm = *pmstm;
|
||||
if (mstm) {
|
||||
drm_dp_mst_topology_mgr_destroy(&mstm->mgr);
|
||||
kfree(*pmstm);
|
||||
*pmstm = NULL;
|
||||
}
|
||||
|
|
|
@ -1171,10 +1171,16 @@ nouveau_platform_device_create(const struct nvkm_device_tegra_func *func,
|
|||
goto err_free;
|
||||
}
|
||||
|
||||
err = nouveau_drm_device_init(drm);
|
||||
if (err)
|
||||
goto err_put;
|
||||
|
||||
platform_set_drvdata(pdev, drm);
|
||||
|
||||
return drm;
|
||||
|
||||
err_put:
|
||||
drm_dev_put(drm);
|
||||
err_free:
|
||||
nvkm_device_del(pdevice);
|
||||
|
||||
|
|
|
@ -448,11 +448,6 @@ static int rockchip_drm_platform_remove(struct platform_device *pdev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void rockchip_drm_platform_shutdown(struct platform_device *pdev)
|
||||
{
|
||||
rockchip_drm_platform_remove(pdev);
|
||||
}
|
||||
|
||||
static const struct of_device_id rockchip_drm_dt_ids[] = {
|
||||
{ .compatible = "rockchip,display-subsystem", },
|
||||
{ /* sentinel */ },
|
||||
|
@ -462,7 +457,6 @@ MODULE_DEVICE_TABLE(of, rockchip_drm_dt_ids);
|
|||
static struct platform_driver rockchip_drm_platform_driver = {
|
||||
.probe = rockchip_drm_platform_probe,
|
||||
.remove = rockchip_drm_platform_remove,
|
||||
.shutdown = rockchip_drm_platform_shutdown,
|
||||
.driver = {
|
||||
.name = "rockchip-drm",
|
||||
.of_match_table = rockchip_drm_dt_ids,
|
||||
|
|
|
@ -49,6 +49,8 @@
|
|||
|
||||
#define VMWGFX_REPO "In Tree"
|
||||
|
||||
#define VMWGFX_VALIDATION_MEM_GRAN (16*PAGE_SIZE)
|
||||
|
||||
|
||||
/**
|
||||
* Fully encoded drm commands. Might move to vmw_drm.h
|
||||
|
@ -918,7 +920,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
|
|||
spin_unlock(&dev_priv->cap_lock);
|
||||
}
|
||||
|
||||
|
||||
vmw_validation_mem_init_ttm(dev_priv, VMWGFX_VALIDATION_MEM_GRAN);
|
||||
ret = vmw_kms_init(dev_priv);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_no_kms;
|
||||
|
|
|
@ -606,6 +606,9 @@ struct vmw_private {
|
|||
|
||||
struct vmw_cmdbuf_man *cman;
|
||||
DECLARE_BITMAP(irqthread_pending, VMW_IRQTHREAD_MAX);
|
||||
|
||||
/* Validation memory reservation */
|
||||
struct vmw_validation_mem vvm;
|
||||
};
|
||||
|
||||
static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
|
||||
|
@ -846,6 +849,8 @@ extern int vmw_ttm_global_init(struct vmw_private *dev_priv);
|
|||
extern void vmw_ttm_global_release(struct vmw_private *dev_priv);
|
||||
extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
|
||||
|
||||
extern void vmw_validation_mem_init_ttm(struct vmw_private *dev_priv,
|
||||
size_t gran);
|
||||
/**
|
||||
* TTM buffer object driver - vmwgfx_ttm_buffer.c
|
||||
*/
|
||||
|
|
|
@ -1738,7 +1738,6 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
|
|||
void *buf)
|
||||
{
|
||||
struct vmw_buffer_object *vmw_bo;
|
||||
int ret;
|
||||
|
||||
struct {
|
||||
uint32_t header;
|
||||
|
@ -1748,7 +1747,6 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
|
|||
return vmw_translate_guest_ptr(dev_priv, sw_context,
|
||||
&cmd->body.ptr,
|
||||
&vmw_bo);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
|
@ -3837,6 +3835,8 @@ int vmw_execbuf_process(struct drm_file *file_priv,
|
|||
struct sync_file *sync_file = NULL;
|
||||
DECLARE_VAL_CONTEXT(val_ctx, &sw_context->res_ht, 1);
|
||||
|
||||
vmw_validation_set_val_mem(&val_ctx, &dev_priv->vvm);
|
||||
|
||||
if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
|
||||
out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
|
||||
if (out_fence_fd < 0) {
|
||||
|
|
|
@ -96,3 +96,39 @@ void vmw_ttm_global_release(struct vmw_private *dev_priv)
|
|||
drm_global_item_unref(&dev_priv->bo_global_ref.ref);
|
||||
drm_global_item_unref(&dev_priv->mem_global_ref);
|
||||
}
|
||||
|
||||
/* struct vmw_validation_mem callback */
|
||||
static int vmw_vmt_reserve(struct vmw_validation_mem *m, size_t size)
|
||||
{
|
||||
static struct ttm_operation_ctx ctx = {.interruptible = false,
|
||||
.no_wait_gpu = false};
|
||||
struct vmw_private *dev_priv = container_of(m, struct vmw_private, vvm);
|
||||
|
||||
return ttm_mem_global_alloc(vmw_mem_glob(dev_priv), size, &ctx);
|
||||
}
|
||||
|
||||
/* struct vmw_validation_mem callback */
|
||||
static void vmw_vmt_unreserve(struct vmw_validation_mem *m, size_t size)
|
||||
{
|
||||
struct vmw_private *dev_priv = container_of(m, struct vmw_private, vvm);
|
||||
|
||||
return ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_validation_mem_init_ttm - Interface the validation memory tracker
|
||||
* to ttm.
|
||||
* @dev_priv: Pointer to struct vmw_private. The reason we choose a vmw private
|
||||
* rather than a struct vmw_validation_mem is to make sure assumption in the
|
||||
* callbacks that struct vmw_private derives from struct vmw_validation_mem
|
||||
* holds true.
|
||||
* @gran: The recommended allocation granularity
|
||||
*/
|
||||
void vmw_validation_mem_init_ttm(struct vmw_private *dev_priv, size_t gran)
|
||||
{
|
||||
struct vmw_validation_mem *vvm = &dev_priv->vvm;
|
||||
|
||||
vvm->reserve_mem = vmw_vmt_reserve;
|
||||
vvm->unreserve_mem = vmw_vmt_unreserve;
|
||||
vvm->gran = gran;
|
||||
}
|
||||
|
|
|
@ -104,11 +104,25 @@ void *vmw_validation_mem_alloc(struct vmw_validation_context *ctx,
|
|||
return NULL;
|
||||
|
||||
if (ctx->mem_size_left < size) {
|
||||
struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO);
|
||||
struct page *page;
|
||||
|
||||
if (ctx->vm && ctx->vm_size_left < PAGE_SIZE) {
|
||||
int ret = ctx->vm->reserve_mem(ctx->vm, ctx->vm->gran);
|
||||
|
||||
if (ret)
|
||||
return NULL;
|
||||
|
||||
ctx->vm_size_left += ctx->vm->gran;
|
||||
ctx->total_mem += ctx->vm->gran;
|
||||
}
|
||||
|
||||
page = alloc_page(GFP_KERNEL | __GFP_ZERO);
|
||||
if (!page)
|
||||
return NULL;
|
||||
|
||||
if (ctx->vm)
|
||||
ctx->vm_size_left -= PAGE_SIZE;
|
||||
|
||||
list_add_tail(&page->lru, &ctx->page_list);
|
||||
ctx->page_address = page_address(page);
|
||||
ctx->mem_size_left = PAGE_SIZE;
|
||||
|
@ -138,6 +152,11 @@ static void vmw_validation_mem_free(struct vmw_validation_context *ctx)
|
|||
}
|
||||
|
||||
ctx->mem_size_left = 0;
|
||||
if (ctx->vm && ctx->total_mem) {
|
||||
ctx->vm->unreserve_mem(ctx->vm, ctx->total_mem);
|
||||
ctx->total_mem = 0;
|
||||
ctx->vm_size_left = 0;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -33,6 +33,21 @@
|
|||
#include <linux/ww_mutex.h>
|
||||
#include <drm/ttm/ttm_execbuf_util.h>
|
||||
|
||||
/**
|
||||
* struct vmw_validation_mem - Custom interface to provide memory reservations
|
||||
* for the validation code.
|
||||
* @reserve_mem: Callback to reserve memory
|
||||
* @unreserve_mem: Callback to unreserve memory
|
||||
* @gran: Reservation granularity. Contains a hint how much memory should
|
||||
* be reserved in each call to @reserve_mem(). A slow implementation may want
|
||||
* reservation to be done in large batches.
|
||||
*/
|
||||
struct vmw_validation_mem {
|
||||
int (*reserve_mem)(struct vmw_validation_mem *m, size_t size);
|
||||
void (*unreserve_mem)(struct vmw_validation_mem *m, size_t size);
|
||||
size_t gran;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct vmw_validation_context - Per command submission validation context
|
||||
* @ht: Hash table used to find resource- or buffer object duplicates
|
||||
|
@ -47,6 +62,10 @@
|
|||
* buffer objects
|
||||
* @mem_size_left: Free memory left in the last page in @page_list
|
||||
* @page_address: Kernel virtual address of the last page in @page_list
|
||||
* @vm: A pointer to the memory reservation interface or NULL if no
|
||||
* memory reservation is needed.
|
||||
* @vm_size_left: Amount of reserved memory that so far has not been allocated.
|
||||
* @total_mem: Amount of reserved memory.
|
||||
*/
|
||||
struct vmw_validation_context {
|
||||
struct drm_open_hash *ht;
|
||||
|
@ -59,6 +78,9 @@ struct vmw_validation_context {
|
|||
unsigned int merge_dups;
|
||||
unsigned int mem_size_left;
|
||||
u8 *page_address;
|
||||
struct vmw_validation_mem *vm;
|
||||
size_t vm_size_left;
|
||||
size_t total_mem;
|
||||
};
|
||||
|
||||
struct vmw_buffer_object;
|
||||
|
@ -101,6 +123,21 @@ vmw_validation_has_bos(struct vmw_validation_context *ctx)
|
|||
return !list_empty(&ctx->bo_list);
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_validation_set_val_mem - Register a validation mem object for
|
||||
* validation memory reservation
|
||||
* @ctx: The validation context
|
||||
* @vm: Pointer to a struct vmw_validation_mem
|
||||
*
|
||||
* Must be set before the first attempt to allocate validation memory.
|
||||
*/
|
||||
static inline void
|
||||
vmw_validation_set_val_mem(struct vmw_validation_context *ctx,
|
||||
struct vmw_validation_mem *vm)
|
||||
{
|
||||
ctx->vm = vm;
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_validation_set_ht - Register a hash table for duplicate finding
|
||||
* @ctx: The validation context
|
||||
|
|
Loading…
Reference in New Issue