Merge tag 'drm-intel-gt-next-2022-09-16' of git://anongit.freedesktop.org/drm/drm-intel into drm-next
Cross-subsystem Changes: - MEI subsystem pieces for XeHP SDV GSC support These are Acked-by Greg. Driver Changes: - Release mmaps on RPM suspend on discrete GPUs (Anshuman) - Update GuC version to 7.5 on DG1, DG2 and ADL - Revert "drm/i915/dg2: extend Wa_1409120013 to DG2" (Lucas) - MTL enabling incl. standalone media (Matt R, Lucas) - Explicitly clear BB_OFFSET for new contexts on Gen8+ (Chris) - Fix throttling / perf limit reason decoding (Ashutosh) - XeHP SDV GSC support (Vitaly, Alexander, Tomas) - Fix issues with overrding firmware file paths (John) - Invert if-else ladders to check latest version first (Lucas) - Cancel GuC engine busyness worker synchronously (Umesh) - Skip applying copy engine fuses outside PVC (Lucas) - Eliminate Gen10 frequency read function (Lucas) - Static code checker fixes (Gaosheng) - Selftest improvements (Chris) Signed-off-by: Dave Airlie <airlied@redhat.com> From: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/YyQ4Jgl3cpGL1/As@jlahtine-mobl.ger.corp.intel.com
This commit is contained in:
commit
72ca70acc7
|
@ -123,6 +123,7 @@ gt-y += \
|
|||
gt/intel_ring.o \
|
||||
gt/intel_ring_submission.o \
|
||||
gt/intel_rps.o \
|
||||
gt/intel_sa_media.o \
|
||||
gt/intel_sseu.o \
|
||||
gt/intel_sseu_debugfs.o \
|
||||
gt/intel_timeline.o \
|
||||
|
|
|
@ -12,8 +12,6 @@ struct drm_i915_private;
|
|||
struct drm_i915_gem_object;
|
||||
struct intel_memory_region;
|
||||
|
||||
extern const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops;
|
||||
|
||||
void __iomem *
|
||||
i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj,
|
||||
unsigned long n,
|
||||
|
|
|
@ -413,7 +413,7 @@ retry:
|
|||
vma->mmo = mmo;
|
||||
|
||||
if (CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)
|
||||
intel_wakeref_auto(&to_gt(i915)->ggtt->userfault_wakeref,
|
||||
intel_wakeref_auto(&to_gt(i915)->userfault_wakeref,
|
||||
msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND));
|
||||
|
||||
if (write) {
|
||||
|
@ -550,6 +550,20 @@ out:
|
|||
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
|
||||
}
|
||||
|
||||
void i915_gem_object_runtime_pm_release_mmap_offset(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
|
||||
struct ttm_device *bdev = bo->bdev;
|
||||
|
||||
drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping);
|
||||
|
||||
if (obj->userfault_count) {
|
||||
/* rpm wakeref provide exclusive access */
|
||||
list_del(&obj->userfault_link);
|
||||
obj->userfault_count = 0;
|
||||
}
|
||||
}
|
||||
|
||||
void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct i915_mmap_offset *mmo, *mn;
|
||||
|
@ -573,6 +587,13 @@ void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj)
|
|||
spin_lock(&obj->mmo.lock);
|
||||
}
|
||||
spin_unlock(&obj->mmo.lock);
|
||||
|
||||
if (obj->userfault_count) {
|
||||
mutex_lock(&to_gt(to_i915(obj->base.dev))->lmem_userfault_lock);
|
||||
list_del(&obj->userfault_link);
|
||||
mutex_unlock(&to_gt(to_i915(obj->base.dev))->lmem_userfault_lock);
|
||||
obj->userfault_count = 0;
|
||||
}
|
||||
}
|
||||
|
||||
static struct i915_mmap_offset *
|
||||
|
|
|
@ -27,6 +27,7 @@ int i915_gem_dumb_mmap_offset(struct drm_file *file_priv,
|
|||
void __i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj);
|
||||
void i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj);
|
||||
|
||||
void i915_gem_object_runtime_pm_release_mmap_offset(struct drm_i915_gem_object *obj);
|
||||
void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -238,7 +238,7 @@ static void __i915_gem_object_free_mmaps(struct drm_i915_gem_object *obj)
|
|||
{
|
||||
/* Skip serialisation and waking the device if known to be not used. */
|
||||
|
||||
if (obj->userfault_count)
|
||||
if (obj->userfault_count && !IS_DGFX(to_i915(obj->base.dev)))
|
||||
i915_gem_object_release_mmap_gtt(obj);
|
||||
|
||||
if (!RB_EMPTY_ROOT(&obj->mmo.offsets)) {
|
||||
|
|
|
@ -298,7 +298,8 @@ struct drm_i915_gem_object {
|
|||
};
|
||||
|
||||
/**
|
||||
* Whether the object is currently in the GGTT mmap.
|
||||
* Whether the object is currently in the GGTT or any other supported
|
||||
* fake offset mmap backed by lmem.
|
||||
*/
|
||||
unsigned int userfault_count;
|
||||
struct list_head userfault_link;
|
||||
|
|
|
@ -24,7 +24,7 @@ void i915_gem_suspend(struct drm_i915_private *i915)
|
|||
{
|
||||
GEM_TRACE("%s\n", dev_name(i915->drm.dev));
|
||||
|
||||
intel_wakeref_auto(&to_gt(i915)->ggtt->userfault_wakeref, 0);
|
||||
intel_wakeref_auto(&to_gt(i915)->userfault_wakeref, 0);
|
||||
flush_workqueue(i915->wq);
|
||||
|
||||
/*
|
||||
|
|
|
@ -430,48 +430,29 @@ static int i915_gem_init_stolen(struct intel_memory_region *mem)
|
|||
reserved_base = stolen_top;
|
||||
reserved_size = 0;
|
||||
|
||||
switch (GRAPHICS_VER(i915)) {
|
||||
case 2:
|
||||
case 3:
|
||||
break;
|
||||
case 4:
|
||||
if (!IS_G4X(i915))
|
||||
break;
|
||||
fallthrough;
|
||||
case 5:
|
||||
g4x_get_stolen_reserved(i915, uncore,
|
||||
if (GRAPHICS_VER(i915) >= 11) {
|
||||
icl_get_stolen_reserved(i915, uncore,
|
||||
&reserved_base, &reserved_size);
|
||||
break;
|
||||
case 6:
|
||||
gen6_get_stolen_reserved(i915, uncore,
|
||||
&reserved_base, &reserved_size);
|
||||
break;
|
||||
case 7:
|
||||
if (IS_VALLEYVIEW(i915))
|
||||
vlv_get_stolen_reserved(i915, uncore,
|
||||
&reserved_base, &reserved_size);
|
||||
else
|
||||
gen7_get_stolen_reserved(i915, uncore,
|
||||
&reserved_base, &reserved_size);
|
||||
break;
|
||||
case 8:
|
||||
case 9:
|
||||
} else if (GRAPHICS_VER(i915) >= 8) {
|
||||
if (IS_LP(i915))
|
||||
chv_get_stolen_reserved(i915, uncore,
|
||||
&reserved_base, &reserved_size);
|
||||
else
|
||||
bdw_get_stolen_reserved(i915, uncore,
|
||||
&reserved_base, &reserved_size);
|
||||
break;
|
||||
default:
|
||||
MISSING_CASE(GRAPHICS_VER(i915));
|
||||
fallthrough;
|
||||
case 11:
|
||||
case 12:
|
||||
icl_get_stolen_reserved(i915, uncore,
|
||||
&reserved_base,
|
||||
&reserved_size);
|
||||
break;
|
||||
} else if (GRAPHICS_VER(i915) >= 7) {
|
||||
if (IS_VALLEYVIEW(i915))
|
||||
vlv_get_stolen_reserved(i915, uncore,
|
||||
&reserved_base, &reserved_size);
|
||||
else
|
||||
gen7_get_stolen_reserved(i915, uncore,
|
||||
&reserved_base, &reserved_size);
|
||||
} else if (GRAPHICS_VER(i915) >= 6) {
|
||||
gen6_get_stolen_reserved(i915, uncore,
|
||||
&reserved_base, &reserved_size);
|
||||
} else if (GRAPHICS_VER(i915) >= 5 || IS_G4X(i915)) {
|
||||
g4x_get_stolen_reserved(i915, uncore,
|
||||
&reserved_base, &reserved_size);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -509,9 +509,18 @@ static int i915_ttm_shrink(struct drm_i915_gem_object *obj, unsigned int flags)
|
|||
static void i915_ttm_delete_mem_notify(struct ttm_buffer_object *bo)
|
||||
{
|
||||
struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
|
||||
intel_wakeref_t wakeref = 0;
|
||||
|
||||
if (likely(obj)) {
|
||||
/* ttm_bo_release() already has dma_resv_lock */
|
||||
if (i915_ttm_cpu_maps_iomem(bo->resource))
|
||||
wakeref = intel_runtime_pm_get(&to_i915(obj->base.dev)->runtime_pm);
|
||||
|
||||
__i915_gem_object_pages_fini(obj);
|
||||
|
||||
if (wakeref)
|
||||
intel_runtime_pm_put(&to_i915(obj->base.dev)->runtime_pm, wakeref);
|
||||
|
||||
i915_ttm_free_cached_io_rsgt(obj);
|
||||
}
|
||||
}
|
||||
|
@ -981,6 +990,7 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
|
|||
struct ttm_buffer_object *bo = area->vm_private_data;
|
||||
struct drm_device *dev = bo->base.dev;
|
||||
struct drm_i915_gem_object *obj;
|
||||
intel_wakeref_t wakeref = 0;
|
||||
vm_fault_t ret;
|
||||
int idx;
|
||||
|
||||
|
@ -1002,6 +1012,9 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
|
|||
return VM_FAULT_SIGBUS;
|
||||
}
|
||||
|
||||
if (i915_ttm_cpu_maps_iomem(bo->resource))
|
||||
wakeref = intel_runtime_pm_get(&to_i915(obj->base.dev)->runtime_pm);
|
||||
|
||||
if (!i915_ttm_resource_mappable(bo->resource)) {
|
||||
int err = -ENODEV;
|
||||
int i;
|
||||
|
@ -1023,7 +1036,8 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
|
|||
if (err) {
|
||||
drm_dbg(dev, "Unable to make resource CPU accessible\n");
|
||||
dma_resv_unlock(bo->base.resv);
|
||||
return VM_FAULT_SIGBUS;
|
||||
ret = VM_FAULT_SIGBUS;
|
||||
goto out_rpm;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1034,12 +1048,30 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
|
|||
} else {
|
||||
ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot);
|
||||
}
|
||||
|
||||
if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
|
||||
return ret;
|
||||
goto out_rpm;
|
||||
|
||||
/* ttm_bo_vm_reserve() already has dma_resv_lock */
|
||||
if (ret == VM_FAULT_NOPAGE && wakeref && !obj->userfault_count) {
|
||||
obj->userfault_count = 1;
|
||||
mutex_lock(&to_gt(to_i915(obj->base.dev))->lmem_userfault_lock);
|
||||
list_add(&obj->userfault_link, &to_gt(to_i915(obj->base.dev))->lmem_userfault_list);
|
||||
mutex_unlock(&to_gt(to_i915(obj->base.dev))->lmem_userfault_lock);
|
||||
}
|
||||
|
||||
if (wakeref & CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)
|
||||
intel_wakeref_auto(&to_gt(to_i915(obj->base.dev))->userfault_wakeref,
|
||||
msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND));
|
||||
|
||||
i915_ttm_adjust_lru(obj);
|
||||
|
||||
dma_resv_unlock(bo->base.resv);
|
||||
|
||||
out_rpm:
|
||||
if (wakeref)
|
||||
intel_runtime_pm_put(&to_i915(obj->base.dev)->runtime_pm, wakeref);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -165,10 +165,12 @@ static u32 preparser_disable(bool state)
|
|||
return MI_ARB_CHECK | 1 << 8 | state;
|
||||
}
|
||||
|
||||
u32 *gen12_emit_aux_table_inv(u32 *cs, const i915_reg_t inv_reg)
|
||||
u32 *gen12_emit_aux_table_inv(struct intel_gt *gt, u32 *cs, const i915_reg_t inv_reg)
|
||||
{
|
||||
u32 gsi_offset = gt->uncore->gsi_offset;
|
||||
|
||||
*cs++ = MI_LOAD_REGISTER_IMM(1) | MI_LRI_MMIO_REMAP_EN;
|
||||
*cs++ = i915_mmio_reg_offset(inv_reg);
|
||||
*cs++ = i915_mmio_reg_offset(inv_reg) + gsi_offset;
|
||||
*cs++ = AUX_INV;
|
||||
*cs++ = MI_NOOP;
|
||||
|
||||
|
@ -254,7 +256,8 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode)
|
|||
|
||||
if (!HAS_FLAT_CCS(rq->engine->i915)) {
|
||||
/* hsdes: 1809175790 */
|
||||
cs = gen12_emit_aux_table_inv(cs, GEN12_GFX_CCS_AUX_NV);
|
||||
cs = gen12_emit_aux_table_inv(rq->engine->gt,
|
||||
cs, GEN12_GFX_CCS_AUX_NV);
|
||||
}
|
||||
|
||||
*cs++ = preparser_disable(false);
|
||||
|
@ -313,9 +316,11 @@ int gen12_emit_flush_xcs(struct i915_request *rq, u32 mode)
|
|||
|
||||
if (aux_inv) { /* hsdes: 1809175790 */
|
||||
if (rq->engine->class == VIDEO_DECODE_CLASS)
|
||||
cs = gen12_emit_aux_table_inv(cs, GEN12_VD0_AUX_NV);
|
||||
cs = gen12_emit_aux_table_inv(rq->engine->gt,
|
||||
cs, GEN12_VD0_AUX_NV);
|
||||
else
|
||||
cs = gen12_emit_aux_table_inv(cs, GEN12_VE0_AUX_NV);
|
||||
cs = gen12_emit_aux_table_inv(rq->engine->gt,
|
||||
cs, GEN12_VE0_AUX_NV);
|
||||
}
|
||||
|
||||
if (mode & EMIT_INVALIDATE)
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
#include "intel_gt_regs.h"
|
||||
#include "intel_gpu_commands.h"
|
||||
|
||||
struct intel_gt;
|
||||
struct i915_request;
|
||||
|
||||
int gen8_emit_flush_rcs(struct i915_request *rq, u32 mode);
|
||||
|
@ -45,7 +46,7 @@ u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs);
|
|||
u32 *gen11_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs);
|
||||
u32 *gen12_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs);
|
||||
|
||||
u32 *gen12_emit_aux_table_inv(u32 *cs, const i915_reg_t inv_reg);
|
||||
u32 *gen12_emit_aux_table_inv(struct intel_gt *gt, u32 *cs, const i915_reg_t inv_reg);
|
||||
|
||||
static inline u32 *
|
||||
__gen8_emit_pipe_control(u32 *batch, u32 flags0, u32 flags1, u32 offset)
|
||||
|
|
|
@ -654,16 +654,83 @@ bool gen11_vdbox_has_sfc(struct intel_gt *gt,
|
|||
*/
|
||||
if ((gt->info.sfc_mask & BIT(physical_vdbox / 2)) == 0)
|
||||
return false;
|
||||
else if (GRAPHICS_VER(i915) == 12)
|
||||
else if (MEDIA_VER(i915) >= 12)
|
||||
return (physical_vdbox % 2 == 0) ||
|
||||
!(BIT(physical_vdbox - 1) & vdbox_mask);
|
||||
else if (GRAPHICS_VER(i915) == 11)
|
||||
else if (MEDIA_VER(i915) == 11)
|
||||
return logical_vdbox % 2 == 0;
|
||||
|
||||
MISSING_CASE(GRAPHICS_VER(i915));
|
||||
return false;
|
||||
}
|
||||
|
||||
static void engine_mask_apply_media_fuses(struct intel_gt *gt)
|
||||
{
|
||||
struct drm_i915_private *i915 = gt->i915;
|
||||
unsigned int logical_vdbox = 0;
|
||||
unsigned int i;
|
||||
u32 media_fuse, fuse1;
|
||||
u16 vdbox_mask;
|
||||
u16 vebox_mask;
|
||||
|
||||
if (MEDIA_VER(gt->i915) < 11)
|
||||
return;
|
||||
|
||||
/*
|
||||
* On newer platforms the fusing register is called 'enable' and has
|
||||
* enable semantics, while on older platforms it is called 'disable'
|
||||
* and bits have disable semantices.
|
||||
*/
|
||||
media_fuse = intel_uncore_read(gt->uncore, GEN11_GT_VEBOX_VDBOX_DISABLE);
|
||||
if (MEDIA_VER_FULL(i915) < IP_VER(12, 50))
|
||||
media_fuse = ~media_fuse;
|
||||
|
||||
vdbox_mask = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK;
|
||||
vebox_mask = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >>
|
||||
GEN11_GT_VEBOX_DISABLE_SHIFT;
|
||||
|
||||
if (MEDIA_VER_FULL(i915) >= IP_VER(12, 50)) {
|
||||
fuse1 = intel_uncore_read(gt->uncore, HSW_PAVP_FUSE1);
|
||||
gt->info.sfc_mask = REG_FIELD_GET(XEHP_SFC_ENABLE_MASK, fuse1);
|
||||
} else {
|
||||
gt->info.sfc_mask = ~0;
|
||||
}
|
||||
|
||||
for (i = 0; i < I915_MAX_VCS; i++) {
|
||||
if (!HAS_ENGINE(gt, _VCS(i))) {
|
||||
vdbox_mask &= ~BIT(i);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!(BIT(i) & vdbox_mask)) {
|
||||
gt->info.engine_mask &= ~BIT(_VCS(i));
|
||||
drm_dbg(&i915->drm, "vcs%u fused off\n", i);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (gen11_vdbox_has_sfc(gt, i, logical_vdbox, vdbox_mask))
|
||||
gt->info.vdbox_sfc_access |= BIT(i);
|
||||
logical_vdbox++;
|
||||
}
|
||||
drm_dbg(&i915->drm, "vdbox enable: %04x, instances: %04lx\n",
|
||||
vdbox_mask, VDBOX_MASK(gt));
|
||||
GEM_BUG_ON(vdbox_mask != VDBOX_MASK(gt));
|
||||
|
||||
for (i = 0; i < I915_MAX_VECS; i++) {
|
||||
if (!HAS_ENGINE(gt, _VECS(i))) {
|
||||
vebox_mask &= ~BIT(i);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!(BIT(i) & vebox_mask)) {
|
||||
gt->info.engine_mask &= ~BIT(_VECS(i));
|
||||
drm_dbg(&i915->drm, "vecs%u fused off\n", i);
|
||||
}
|
||||
}
|
||||
drm_dbg(&i915->drm, "vebox enable: %04x, instances: %04lx\n",
|
||||
vebox_mask, VEBOX_MASK(gt));
|
||||
GEM_BUG_ON(vebox_mask != VEBOX_MASK(gt));
|
||||
}
|
||||
|
||||
static void engine_mask_apply_compute_fuses(struct intel_gt *gt)
|
||||
{
|
||||
struct drm_i915_private *i915 = gt->i915;
|
||||
|
@ -672,6 +739,9 @@ static void engine_mask_apply_compute_fuses(struct intel_gt *gt)
|
|||
unsigned long ccs_mask;
|
||||
unsigned int i;
|
||||
|
||||
if (GRAPHICS_VER(i915) < 11)
|
||||
return;
|
||||
|
||||
if (hweight32(CCS_MASK(gt)) <= 1)
|
||||
return;
|
||||
|
||||
|
@ -694,6 +764,10 @@ static void engine_mask_apply_copy_fuses(struct intel_gt *gt)
|
|||
unsigned long meml3_mask;
|
||||
unsigned long quad;
|
||||
|
||||
if (!(GRAPHICS_VER_FULL(i915) >= IP_VER(12, 60) &&
|
||||
GRAPHICS_VER_FULL(i915) < IP_VER(12, 70)))
|
||||
return;
|
||||
|
||||
meml3_mask = intel_uncore_read(gt->uncore, GEN10_MIRROR_FUSE3);
|
||||
meml3_mask = REG_FIELD_GET(GEN12_MEML3_EN_MASK, meml3_mask);
|
||||
|
||||
|
@ -727,75 +801,11 @@ static void engine_mask_apply_copy_fuses(struct intel_gt *gt)
|
|||
*/
|
||||
static intel_engine_mask_t init_engine_mask(struct intel_gt *gt)
|
||||
{
|
||||
struct drm_i915_private *i915 = gt->i915;
|
||||
struct intel_gt_info *info = >->info;
|
||||
struct intel_uncore *uncore = gt->uncore;
|
||||
unsigned int logical_vdbox = 0;
|
||||
unsigned int i;
|
||||
u32 media_fuse, fuse1;
|
||||
u16 vdbox_mask;
|
||||
u16 vebox_mask;
|
||||
|
||||
info->engine_mask = RUNTIME_INFO(i915)->platform_engine_mask;
|
||||
|
||||
if (GRAPHICS_VER(i915) < 11)
|
||||
return info->engine_mask;
|
||||
|
||||
/*
|
||||
* On newer platforms the fusing register is called 'enable' and has
|
||||
* enable semantics, while on older platforms it is called 'disable'
|
||||
* and bits have disable semantices.
|
||||
*/
|
||||
media_fuse = intel_uncore_read(uncore, GEN11_GT_VEBOX_VDBOX_DISABLE);
|
||||
if (GRAPHICS_VER_FULL(i915) < IP_VER(12, 50))
|
||||
media_fuse = ~media_fuse;
|
||||
|
||||
vdbox_mask = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK;
|
||||
vebox_mask = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >>
|
||||
GEN11_GT_VEBOX_DISABLE_SHIFT;
|
||||
|
||||
if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) {
|
||||
fuse1 = intel_uncore_read(uncore, HSW_PAVP_FUSE1);
|
||||
gt->info.sfc_mask = REG_FIELD_GET(XEHP_SFC_ENABLE_MASK, fuse1);
|
||||
} else {
|
||||
gt->info.sfc_mask = ~0;
|
||||
}
|
||||
|
||||
for (i = 0; i < I915_MAX_VCS; i++) {
|
||||
if (!HAS_ENGINE(gt, _VCS(i))) {
|
||||
vdbox_mask &= ~BIT(i);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!(BIT(i) & vdbox_mask)) {
|
||||
info->engine_mask &= ~BIT(_VCS(i));
|
||||
drm_dbg(&i915->drm, "vcs%u fused off\n", i);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (gen11_vdbox_has_sfc(gt, i, logical_vdbox, vdbox_mask))
|
||||
gt->info.vdbox_sfc_access |= BIT(i);
|
||||
logical_vdbox++;
|
||||
}
|
||||
drm_dbg(&i915->drm, "vdbox enable: %04x, instances: %04lx\n",
|
||||
vdbox_mask, VDBOX_MASK(gt));
|
||||
GEM_BUG_ON(vdbox_mask != VDBOX_MASK(gt));
|
||||
|
||||
for (i = 0; i < I915_MAX_VECS; i++) {
|
||||
if (!HAS_ENGINE(gt, _VECS(i))) {
|
||||
vebox_mask &= ~BIT(i);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!(BIT(i) & vebox_mask)) {
|
||||
info->engine_mask &= ~BIT(_VECS(i));
|
||||
drm_dbg(&i915->drm, "vecs%u fused off\n", i);
|
||||
}
|
||||
}
|
||||
drm_dbg(&i915->drm, "vebox enable: %04x, instances: %04lx\n",
|
||||
vebox_mask, VEBOX_MASK(gt));
|
||||
GEM_BUG_ON(vebox_mask != VEBOX_MASK(gt));
|
||||
GEM_BUG_ON(!info->engine_mask);
|
||||
|
||||
engine_mask_apply_media_fuses(gt);
|
||||
engine_mask_apply_compute_fuses(gt);
|
||||
engine_mask_apply_copy_fuses(gt);
|
||||
|
||||
|
@ -1688,9 +1698,9 @@ bool intel_engine_irq_enable(struct intel_engine_cs *engine)
|
|||
return false;
|
||||
|
||||
/* Caller disables interrupts */
|
||||
spin_lock(&engine->gt->irq_lock);
|
||||
spin_lock(engine->gt->irq_lock);
|
||||
engine->irq_enable(engine);
|
||||
spin_unlock(&engine->gt->irq_lock);
|
||||
spin_unlock(engine->gt->irq_lock);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -1701,9 +1711,9 @@ void intel_engine_irq_disable(struct intel_engine_cs *engine)
|
|||
return;
|
||||
|
||||
/* Caller disables interrupts */
|
||||
spin_lock(&engine->gt->irq_lock);
|
||||
spin_lock(engine->gt->irq_lock);
|
||||
engine->irq_disable(engine);
|
||||
spin_unlock(&engine->gt->irq_lock);
|
||||
spin_unlock(engine->gt->irq_lock);
|
||||
}
|
||||
|
||||
void intel_engines_reset_default_submission(struct intel_gt *gt)
|
||||
|
|
|
@ -110,6 +110,7 @@
|
|||
#define RING_SBBSTATE(base) _MMIO((base) + 0x118) /* hsw+ */
|
||||
#define RING_SBBADDR_UDW(base) _MMIO((base) + 0x11c) /* gen8+ */
|
||||
#define RING_BBADDR(base) _MMIO((base) + 0x140)
|
||||
#define RING_BB_OFFSET(base) _MMIO((base) + 0x158)
|
||||
#define RING_BBADDR_UDW(base) _MMIO((base) + 0x168) /* gen8+ */
|
||||
#define CCID(base) _MMIO((base) + 0x180)
|
||||
#define CCID_EN BIT(0)
|
||||
|
|
|
@ -842,7 +842,6 @@ void intel_ggtt_init_fences(struct i915_ggtt *ggtt)
|
|||
|
||||
INIT_LIST_HEAD(&ggtt->fence_list);
|
||||
INIT_LIST_HEAD(&ggtt->userfault_list);
|
||||
intel_wakeref_auto_init(&ggtt->userfault_wakeref, uncore->rpm);
|
||||
|
||||
detect_bit_6_swizzle(ggtt);
|
||||
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
#include <linux/mei_aux.h>
|
||||
#include "i915_drv.h"
|
||||
#include "i915_reg.h"
|
||||
#include "gem/i915_gem_region.h"
|
||||
#include "gt/intel_gsc.h"
|
||||
#include "gt/intel_gt.h"
|
||||
|
||||
|
@ -36,10 +37,56 @@ static int gsc_irq_init(int irq)
|
|||
return irq_set_chip_data(irq, NULL);
|
||||
}
|
||||
|
||||
static int
|
||||
gsc_ext_om_alloc(struct intel_gsc *gsc, struct intel_gsc_intf *intf, size_t size)
|
||||
{
|
||||
struct intel_gt *gt = gsc_to_gt(gsc);
|
||||
struct drm_i915_gem_object *obj;
|
||||
int err;
|
||||
|
||||
obj = i915_gem_object_create_lmem(gt->i915, size,
|
||||
I915_BO_ALLOC_CONTIGUOUS |
|
||||
I915_BO_ALLOC_CPU_CLEAR);
|
||||
if (IS_ERR(obj)) {
|
||||
drm_err(>->i915->drm, "Failed to allocate gsc memory\n");
|
||||
return PTR_ERR(obj);
|
||||
}
|
||||
|
||||
err = i915_gem_object_pin_pages_unlocked(obj);
|
||||
if (err) {
|
||||
drm_err(>->i915->drm, "Failed to pin pages for gsc memory\n");
|
||||
goto out_put;
|
||||
}
|
||||
|
||||
intf->gem_obj = obj;
|
||||
|
||||
return 0;
|
||||
|
||||
out_put:
|
||||
i915_gem_object_put(obj);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void gsc_ext_om_destroy(struct intel_gsc_intf *intf)
|
||||
{
|
||||
struct drm_i915_gem_object *obj = fetch_and_zero(&intf->gem_obj);
|
||||
|
||||
if (!obj)
|
||||
return;
|
||||
|
||||
if (i915_gem_object_has_pinned_pages(obj))
|
||||
i915_gem_object_unpin_pages(obj);
|
||||
|
||||
i915_gem_object_put(obj);
|
||||
}
|
||||
|
||||
struct gsc_def {
|
||||
const char *name;
|
||||
unsigned long bar;
|
||||
size_t bar_size;
|
||||
bool use_polling;
|
||||
bool slow_firmware;
|
||||
size_t lmem_size;
|
||||
};
|
||||
|
||||
/* gsc resources and definitions (HECI1 and HECI2) */
|
||||
|
@ -54,11 +101,25 @@ static const struct gsc_def gsc_def_dg1[] = {
|
|||
}
|
||||
};
|
||||
|
||||
static const struct gsc_def gsc_def_xehpsdv[] = {
|
||||
{
|
||||
/* HECI1 not enabled on the device. */
|
||||
},
|
||||
{
|
||||
.name = "mei-gscfi",
|
||||
.bar = DG1_GSC_HECI2_BASE,
|
||||
.bar_size = GSC_BAR_LENGTH,
|
||||
.use_polling = true,
|
||||
.slow_firmware = true,
|
||||
}
|
||||
};
|
||||
|
||||
static const struct gsc_def gsc_def_dg2[] = {
|
||||
{
|
||||
.name = "mei-gsc",
|
||||
.bar = DG2_GSC_HECI1_BASE,
|
||||
.bar_size = GSC_BAR_LENGTH,
|
||||
.lmem_size = SZ_4M,
|
||||
},
|
||||
{
|
||||
.name = "mei-gscfi",
|
||||
|
@ -75,26 +136,32 @@ static void gsc_release_dev(struct device *dev)
|
|||
kfree(adev);
|
||||
}
|
||||
|
||||
static void gsc_destroy_one(struct intel_gsc_intf *intf)
|
||||
static void gsc_destroy_one(struct drm_i915_private *i915,
|
||||
struct intel_gsc *gsc, unsigned int intf_id)
|
||||
{
|
||||
struct intel_gsc_intf *intf = &gsc->intf[intf_id];
|
||||
|
||||
if (intf->adev) {
|
||||
auxiliary_device_delete(&intf->adev->aux_dev);
|
||||
auxiliary_device_uninit(&intf->adev->aux_dev);
|
||||
intf->adev = NULL;
|
||||
}
|
||||
|
||||
if (intf->irq >= 0)
|
||||
irq_free_desc(intf->irq);
|
||||
intf->irq = -1;
|
||||
|
||||
gsc_ext_om_destroy(intf);
|
||||
}
|
||||
|
||||
static void gsc_init_one(struct drm_i915_private *i915,
|
||||
struct intel_gsc_intf *intf,
|
||||
static void gsc_init_one(struct drm_i915_private *i915, struct intel_gsc *gsc,
|
||||
unsigned int intf_id)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
|
||||
struct mei_aux_device *adev;
|
||||
struct auxiliary_device *aux_dev;
|
||||
const struct gsc_def *def;
|
||||
struct intel_gsc_intf *intf = &gsc->intf[intf_id];
|
||||
int ret;
|
||||
|
||||
intf->irq = -1;
|
||||
|
@ -105,6 +172,8 @@ static void gsc_init_one(struct drm_i915_private *i915,
|
|||
|
||||
if (IS_DG1(i915)) {
|
||||
def = &gsc_def_dg1[intf_id];
|
||||
} else if (IS_XEHPSDV(i915)) {
|
||||
def = &gsc_def_xehpsdv[intf_id];
|
||||
} else if (IS_DG2(i915)) {
|
||||
def = &gsc_def_dg2[intf_id];
|
||||
} else {
|
||||
|
@ -117,10 +186,14 @@ static void gsc_init_one(struct drm_i915_private *i915,
|
|||
return;
|
||||
}
|
||||
|
||||
/* skip irq initialization */
|
||||
if (def->use_polling)
|
||||
goto add_device;
|
||||
|
||||
intf->irq = irq_alloc_desc(0);
|
||||
if (intf->irq < 0) {
|
||||
drm_err(&i915->drm, "gsc irq error %d\n", intf->irq);
|
||||
return;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
ret = gsc_irq_init(intf->irq);
|
||||
|
@ -129,16 +202,31 @@ static void gsc_init_one(struct drm_i915_private *i915,
|
|||
goto fail;
|
||||
}
|
||||
|
||||
add_device:
|
||||
adev = kzalloc(sizeof(*adev), GFP_KERNEL);
|
||||
if (!adev)
|
||||
goto fail;
|
||||
|
||||
if (def->lmem_size) {
|
||||
drm_dbg(&i915->drm, "setting up GSC lmem\n");
|
||||
|
||||
if (gsc_ext_om_alloc(gsc, intf, def->lmem_size)) {
|
||||
drm_err(&i915->drm, "setting up gsc extended operational memory failed\n");
|
||||
kfree(adev);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
adev->ext_op_mem.start = i915_gem_object_get_dma_address(intf->gem_obj, 0);
|
||||
adev->ext_op_mem.end = adev->ext_op_mem.start + def->lmem_size;
|
||||
}
|
||||
|
||||
adev->irq = intf->irq;
|
||||
adev->bar.parent = &pdev->resource[0];
|
||||
adev->bar.start = def->bar + pdev->resource[0].start;
|
||||
adev->bar.end = adev->bar.start + def->bar_size - 1;
|
||||
adev->bar.flags = IORESOURCE_MEM;
|
||||
adev->bar.desc = IORES_DESC_NONE;
|
||||
adev->slow_firmware = def->slow_firmware;
|
||||
|
||||
aux_dev = &adev->aux_dev;
|
||||
aux_dev->name = def->name;
|
||||
|
@ -165,7 +253,7 @@ static void gsc_init_one(struct drm_i915_private *i915,
|
|||
|
||||
return;
|
||||
fail:
|
||||
gsc_destroy_one(intf);
|
||||
gsc_destroy_one(i915, gsc, intf->id);
|
||||
}
|
||||
|
||||
static void gsc_irq_handler(struct intel_gt *gt, unsigned int intf_id)
|
||||
|
@ -182,10 +270,8 @@ static void gsc_irq_handler(struct intel_gt *gt, unsigned int intf_id)
|
|||
return;
|
||||
}
|
||||
|
||||
if (gt->gsc.intf[intf_id].irq < 0) {
|
||||
drm_err_ratelimited(>->i915->drm, "GSC irq: irq not set");
|
||||
if (gt->gsc.intf[intf_id].irq < 0)
|
||||
return;
|
||||
}
|
||||
|
||||
ret = generic_handle_irq(gt->gsc.intf[intf_id].irq);
|
||||
if (ret)
|
||||
|
@ -208,7 +294,7 @@ void intel_gsc_init(struct intel_gsc *gsc, struct drm_i915_private *i915)
|
|||
return;
|
||||
|
||||
for (i = 0; i < INTEL_GSC_NUM_INTERFACES; i++)
|
||||
gsc_init_one(i915, &gsc->intf[i], i);
|
||||
gsc_init_one(i915, gsc, i);
|
||||
}
|
||||
|
||||
void intel_gsc_fini(struct intel_gsc *gsc)
|
||||
|
@ -220,5 +306,5 @@ void intel_gsc_fini(struct intel_gsc *gsc)
|
|||
return;
|
||||
|
||||
for (i = 0; i < INTEL_GSC_NUM_INTERFACES; i++)
|
||||
gsc_destroy_one(&gsc->intf[i]);
|
||||
gsc_destroy_one(gt->i915, gsc, i);
|
||||
}
|
||||
|
|
|
@ -20,11 +20,14 @@ struct mei_aux_device;
|
|||
|
||||
/**
|
||||
* struct intel_gsc - graphics security controller
|
||||
*
|
||||
* @gem_obj: scratch memory GSC operations
|
||||
* @intf : gsc interface
|
||||
*/
|
||||
struct intel_gsc {
|
||||
struct intel_gsc_intf {
|
||||
struct mei_aux_device *adev;
|
||||
struct drm_i915_gem_object *gem_obj;
|
||||
int irq;
|
||||
unsigned int id;
|
||||
} intf[INTEL_GSC_NUM_INTERFACES];
|
||||
|
|
|
@ -31,14 +31,17 @@
|
|||
#include "intel_rc6.h"
|
||||
#include "intel_renderstate.h"
|
||||
#include "intel_rps.h"
|
||||
#include "intel_sa_media.h"
|
||||
#include "intel_gt_sysfs.h"
|
||||
#include "intel_uncore.h"
|
||||
#include "shmem_utils.h"
|
||||
|
||||
static void __intel_gt_init_early(struct intel_gt *gt)
|
||||
void intel_gt_common_init_early(struct intel_gt *gt)
|
||||
{
|
||||
spin_lock_init(>->irq_lock);
|
||||
spin_lock_init(gt->irq_lock);
|
||||
|
||||
INIT_LIST_HEAD(>->lmem_userfault_list);
|
||||
mutex_init(>->lmem_userfault_lock);
|
||||
INIT_LIST_HEAD(>->closed_vma);
|
||||
spin_lock_init(>->closed_lock);
|
||||
|
||||
|
@ -58,14 +61,19 @@ static void __intel_gt_init_early(struct intel_gt *gt)
|
|||
}
|
||||
|
||||
/* Preliminary initialization of Tile 0 */
|
||||
void intel_root_gt_init_early(struct drm_i915_private *i915)
|
||||
int intel_root_gt_init_early(struct drm_i915_private *i915)
|
||||
{
|
||||
struct intel_gt *gt = to_gt(i915);
|
||||
|
||||
gt->i915 = i915;
|
||||
gt->uncore = &i915->uncore;
|
||||
gt->irq_lock = drmm_kzalloc(&i915->drm, sizeof(*gt->irq_lock), GFP_KERNEL);
|
||||
if (!gt->irq_lock)
|
||||
return -ENOMEM;
|
||||
|
||||
__intel_gt_init_early(gt);
|
||||
intel_gt_common_init_early(gt);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int intel_gt_probe_lmem(struct intel_gt *gt)
|
||||
|
@ -781,26 +789,25 @@ static int intel_gt_tile_setup(struct intel_gt *gt, phys_addr_t phys_addr)
|
|||
int ret;
|
||||
|
||||
if (!gt_is_root(gt)) {
|
||||
struct intel_uncore_mmio_debug *mmio_debug;
|
||||
struct intel_uncore *uncore;
|
||||
spinlock_t *irq_lock;
|
||||
|
||||
uncore = kzalloc(sizeof(*uncore), GFP_KERNEL);
|
||||
uncore = drmm_kzalloc(>->i915->drm, sizeof(*uncore), GFP_KERNEL);
|
||||
if (!uncore)
|
||||
return -ENOMEM;
|
||||
|
||||
mmio_debug = kzalloc(sizeof(*mmio_debug), GFP_KERNEL);
|
||||
if (!mmio_debug) {
|
||||
kfree(uncore);
|
||||
irq_lock = drmm_kzalloc(>->i915->drm, sizeof(*irq_lock), GFP_KERNEL);
|
||||
if (!irq_lock)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
gt->uncore = uncore;
|
||||
gt->uncore->debug = mmio_debug;
|
||||
gt->irq_lock = irq_lock;
|
||||
|
||||
__intel_gt_init_early(gt);
|
||||
intel_gt_common_init_early(gt);
|
||||
}
|
||||
|
||||
intel_uncore_init_early(gt->uncore, gt);
|
||||
intel_wakeref_auto_init(>->userfault_wakeref, gt->uncore->rpm);
|
||||
|
||||
ret = intel_uncore_setup_mmio(gt->uncore, phys_addr);
|
||||
if (ret)
|
||||
|
@ -811,24 +818,14 @@ static int intel_gt_tile_setup(struct intel_gt *gt, phys_addr_t phys_addr)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
intel_gt_tile_cleanup(struct intel_gt *gt)
|
||||
{
|
||||
intel_uncore_cleanup_mmio(gt->uncore);
|
||||
|
||||
if (!gt_is_root(gt)) {
|
||||
kfree(gt->uncore->debug);
|
||||
kfree(gt->uncore);
|
||||
kfree(gt);
|
||||
}
|
||||
}
|
||||
|
||||
int intel_gt_probe_all(struct drm_i915_private *i915)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
|
||||
struct intel_gt *gt = &i915->gt0;
|
||||
const struct intel_gt_definition *gtdef;
|
||||
phys_addr_t phys_addr;
|
||||
unsigned int mmio_bar;
|
||||
unsigned int i;
|
||||
int ret;
|
||||
|
||||
mmio_bar = GRAPHICS_VER(i915) == 2 ? GEN2_GTTMMADR_BAR : GTTMMADR_BAR;
|
||||
|
@ -839,14 +836,74 @@ int intel_gt_probe_all(struct drm_i915_private *i915)
|
|||
* and it has been already initialized early during probe
|
||||
* in i915_driver_probe()
|
||||
*/
|
||||
gt->i915 = i915;
|
||||
gt->name = "Primary GT";
|
||||
gt->info.engine_mask = RUNTIME_INFO(i915)->platform_engine_mask;
|
||||
|
||||
drm_dbg(&i915->drm, "Setting up %s\n", gt->name);
|
||||
ret = intel_gt_tile_setup(gt, phys_addr);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
i915->gt[0] = gt;
|
||||
|
||||
/* TODO: add more tiles */
|
||||
if (!HAS_EXTRA_GT_LIST(i915))
|
||||
return 0;
|
||||
|
||||
for (i = 1, gtdef = &INTEL_INFO(i915)->extra_gt_list[i - 1];
|
||||
gtdef->name != NULL;
|
||||
i++, gtdef = &INTEL_INFO(i915)->extra_gt_list[i - 1]) {
|
||||
gt = drmm_kzalloc(&i915->drm, sizeof(*gt), GFP_KERNEL);
|
||||
if (!gt) {
|
||||
ret = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
|
||||
gt->i915 = i915;
|
||||
gt->name = gtdef->name;
|
||||
gt->type = gtdef->type;
|
||||
gt->info.engine_mask = gtdef->engine_mask;
|
||||
gt->info.id = i;
|
||||
|
||||
drm_dbg(&i915->drm, "Setting up %s\n", gt->name);
|
||||
if (GEM_WARN_ON(range_overflows_t(resource_size_t,
|
||||
gtdef->mapping_base,
|
||||
SZ_16M,
|
||||
pci_resource_len(pdev, mmio_bar)))) {
|
||||
ret = -ENODEV;
|
||||
goto err;
|
||||
}
|
||||
|
||||
switch (gtdef->type) {
|
||||
case GT_TILE:
|
||||
ret = intel_gt_tile_setup(gt, phys_addr + gtdef->mapping_base);
|
||||
break;
|
||||
|
||||
case GT_MEDIA:
|
||||
ret = intel_sa_mediagt_setup(gt, phys_addr + gtdef->mapping_base,
|
||||
gtdef->gsi_offset);
|
||||
break;
|
||||
|
||||
case GT_PRIMARY:
|
||||
/* Primary GT should not appear in extra GT list */
|
||||
default:
|
||||
MISSING_CASE(gtdef->type);
|
||||
ret = -ENODEV;
|
||||
}
|
||||
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
i915->gt[i] = gt;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err:
|
||||
i915_probe_error(i915, "Failed to initialize %s! (%d)\n", gtdef->name, ret);
|
||||
intel_gt_release_all(i915);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int intel_gt_tiles_init(struct drm_i915_private *i915)
|
||||
|
@ -869,10 +926,8 @@ void intel_gt_release_all(struct drm_i915_private *i915)
|
|||
struct intel_gt *gt;
|
||||
unsigned int id;
|
||||
|
||||
for_each_gt(gt, i915, id) {
|
||||
intel_gt_tile_cleanup(gt);
|
||||
for_each_gt(gt, i915, id)
|
||||
i915->gt[id] = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
void intel_gt_info_print(const struct intel_gt_info *info,
|
||||
|
|
|
@ -44,7 +44,8 @@ static inline struct intel_gt *gsc_to_gt(struct intel_gsc *gsc)
|
|||
return container_of(gsc, struct intel_gt, gsc);
|
||||
}
|
||||
|
||||
void intel_root_gt_init_early(struct drm_i915_private *i915);
|
||||
void intel_gt_common_init_early(struct intel_gt *gt);
|
||||
int intel_root_gt_init_early(struct drm_i915_private *i915);
|
||||
int intel_gt_assign_ggtt(struct intel_gt *gt);
|
||||
int intel_gt_init_mmio(struct intel_gt *gt);
|
||||
int __must_check intel_gt_init_hw(struct intel_gt *gt);
|
||||
|
@ -54,7 +55,6 @@ void intel_gt_driver_register(struct intel_gt *gt);
|
|||
void intel_gt_driver_unregister(struct intel_gt *gt);
|
||||
void intel_gt_driver_remove(struct intel_gt *gt);
|
||||
void intel_gt_driver_release(struct intel_gt *gt);
|
||||
|
||||
void intel_gt_driver_late_release_all(struct drm_i915_private *i915);
|
||||
|
||||
int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout);
|
||||
|
|
|
@ -26,26 +26,6 @@ static u32 read_reference_ts_freq(struct intel_uncore *uncore)
|
|||
return base_freq + frac_freq;
|
||||
}
|
||||
|
||||
static u32 gen9_get_crystal_clock_freq(struct intel_uncore *uncore,
|
||||
u32 rpm_config_reg)
|
||||
{
|
||||
u32 f19_2_mhz = 19200000;
|
||||
u32 f24_mhz = 24000000;
|
||||
u32 crystal_clock =
|
||||
(rpm_config_reg & GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK) >>
|
||||
GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT;
|
||||
|
||||
switch (crystal_clock) {
|
||||
case GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ:
|
||||
return f19_2_mhz;
|
||||
case GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ:
|
||||
return f24_mhz;
|
||||
default:
|
||||
MISSING_CASE(crystal_clock);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
static u32 gen11_get_crystal_clock_freq(struct intel_uncore *uncore,
|
||||
u32 rpm_config_reg)
|
||||
{
|
||||
|
@ -72,98 +52,106 @@ static u32 gen11_get_crystal_clock_freq(struct intel_uncore *uncore,
|
|||
}
|
||||
}
|
||||
|
||||
static u32 read_clock_frequency(struct intel_uncore *uncore)
|
||||
static u32 gen11_read_clock_frequency(struct intel_uncore *uncore)
|
||||
{
|
||||
u32 f12_5_mhz = 12500000;
|
||||
u32 f19_2_mhz = 19200000;
|
||||
u32 f24_mhz = 24000000;
|
||||
u32 ctc_reg = intel_uncore_read(uncore, CTC_MODE);
|
||||
u32 freq = 0;
|
||||
|
||||
if (GRAPHICS_VER(uncore->i915) <= 4) {
|
||||
/*
|
||||
* PRMs say:
|
||||
*
|
||||
* "The value in this register increments once every 16
|
||||
* hclks." (through the “Clocking Configuration”
|
||||
* (“CLKCFG”) MCHBAR register)
|
||||
*/
|
||||
return RUNTIME_INFO(uncore->i915)->rawclk_freq * 1000 / 16;
|
||||
} else if (GRAPHICS_VER(uncore->i915) <= 8) {
|
||||
/*
|
||||
* PRMs say:
|
||||
*
|
||||
* "The PCU TSC counts 10ns increments; this timestamp
|
||||
* reflects bits 38:3 of the TSC (i.e. 80ns granularity,
|
||||
* rolling over every 1.5 hours).
|
||||
*/
|
||||
return f12_5_mhz;
|
||||
} else if (GRAPHICS_VER(uncore->i915) <= 9) {
|
||||
u32 ctc_reg = intel_uncore_read(uncore, CTC_MODE);
|
||||
u32 freq = 0;
|
||||
/*
|
||||
* Note that on gen11+, the clock frequency may be reconfigured.
|
||||
* We do not, and we assume nobody else does.
|
||||
*
|
||||
* First figure out the reference frequency. There are 2 ways
|
||||
* we can compute the frequency, either through the
|
||||
* TIMESTAMP_OVERRIDE register or through RPM_CONFIG. CTC_MODE
|
||||
* tells us which one we should use.
|
||||
*/
|
||||
if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) {
|
||||
freq = read_reference_ts_freq(uncore);
|
||||
} else {
|
||||
u32 c0 = intel_uncore_read(uncore, RPM_CONFIG0);
|
||||
|
||||
if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) {
|
||||
freq = read_reference_ts_freq(uncore);
|
||||
} else {
|
||||
freq = IS_GEN9_LP(uncore->i915) ? f19_2_mhz : f24_mhz;
|
||||
|
||||
/*
|
||||
* Now figure out how the command stream's timestamp
|
||||
* register increments from this frequency (it might
|
||||
* increment only every few clock cycle).
|
||||
*/
|
||||
freq >>= 3 - ((ctc_reg & CTC_SHIFT_PARAMETER_MASK) >>
|
||||
CTC_SHIFT_PARAMETER_SHIFT);
|
||||
}
|
||||
|
||||
return freq;
|
||||
} else if (GRAPHICS_VER(uncore->i915) <= 12) {
|
||||
u32 ctc_reg = intel_uncore_read(uncore, CTC_MODE);
|
||||
u32 freq = 0;
|
||||
freq = gen11_get_crystal_clock_freq(uncore, c0);
|
||||
|
||||
/*
|
||||
* First figure out the reference frequency. There are 2 ways
|
||||
* we can compute the frequency, either through the
|
||||
* TIMESTAMP_OVERRIDE register or through RPM_CONFIG. CTC_MODE
|
||||
* tells us which one we should use.
|
||||
* Now figure out how the command stream's timestamp
|
||||
* register increments from this frequency (it might
|
||||
* increment only every few clock cycle).
|
||||
*/
|
||||
if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) {
|
||||
freq = read_reference_ts_freq(uncore);
|
||||
} else {
|
||||
u32 c0 = intel_uncore_read(uncore, RPM_CONFIG0);
|
||||
|
||||
if (GRAPHICS_VER(uncore->i915) >= 11)
|
||||
freq = gen11_get_crystal_clock_freq(uncore, c0);
|
||||
else
|
||||
freq = gen9_get_crystal_clock_freq(uncore, c0);
|
||||
|
||||
/*
|
||||
* Now figure out how the command stream's timestamp
|
||||
* register increments from this frequency (it might
|
||||
* increment only every few clock cycle).
|
||||
*/
|
||||
freq >>= 3 - ((c0 & GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK) >>
|
||||
GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT);
|
||||
}
|
||||
|
||||
return freq;
|
||||
freq >>= 3 - ((c0 & GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK) >>
|
||||
GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT);
|
||||
}
|
||||
|
||||
MISSING_CASE("Unknown gen, unable to read command streamer timestamp frequency\n");
|
||||
return 0;
|
||||
return freq;
|
||||
}
|
||||
|
||||
static u32 gen9_read_clock_frequency(struct intel_uncore *uncore)
|
||||
{
|
||||
u32 ctc_reg = intel_uncore_read(uncore, CTC_MODE);
|
||||
u32 freq = 0;
|
||||
|
||||
if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) {
|
||||
freq = read_reference_ts_freq(uncore);
|
||||
} else {
|
||||
freq = IS_GEN9_LP(uncore->i915) ? 19200000 : 24000000;
|
||||
|
||||
/*
|
||||
* Now figure out how the command stream's timestamp
|
||||
* register increments from this frequency (it might
|
||||
* increment only every few clock cycle).
|
||||
*/
|
||||
freq >>= 3 - ((ctc_reg & CTC_SHIFT_PARAMETER_MASK) >>
|
||||
CTC_SHIFT_PARAMETER_SHIFT);
|
||||
}
|
||||
|
||||
return freq;
|
||||
}
|
||||
|
||||
static u32 gen5_read_clock_frequency(struct intel_uncore *uncore)
|
||||
{
|
||||
/*
|
||||
* PRMs say:
|
||||
*
|
||||
* "The PCU TSC counts 10ns increments; this timestamp
|
||||
* reflects bits 38:3 of the TSC (i.e. 80ns granularity,
|
||||
* rolling over every 1.5 hours).
|
||||
*/
|
||||
return 12500000;
|
||||
}
|
||||
|
||||
static u32 gen2_read_clock_frequency(struct intel_uncore *uncore)
|
||||
{
|
||||
/*
|
||||
* PRMs say:
|
||||
*
|
||||
* "The value in this register increments once every 16
|
||||
* hclks." (through the “Clocking Configuration”
|
||||
* (“CLKCFG”) MCHBAR register)
|
||||
*/
|
||||
return RUNTIME_INFO(uncore->i915)->rawclk_freq * 1000 / 16;
|
||||
}
|
||||
|
||||
static u32 read_clock_frequency(struct intel_uncore *uncore)
|
||||
{
|
||||
if (GRAPHICS_VER(uncore->i915) >= 11)
|
||||
return gen11_read_clock_frequency(uncore);
|
||||
else if (GRAPHICS_VER(uncore->i915) >= 9)
|
||||
return gen9_read_clock_frequency(uncore);
|
||||
else if (GRAPHICS_VER(uncore->i915) >= 5)
|
||||
return gen5_read_clock_frequency(uncore);
|
||||
else
|
||||
return gen2_read_clock_frequency(uncore);
|
||||
}
|
||||
|
||||
void intel_gt_init_clock_frequency(struct intel_gt *gt)
|
||||
{
|
||||
/*
|
||||
* Note that on gen11+, the clock frequency may be reconfigured.
|
||||
* We do not, and we assume nobody else does.
|
||||
*/
|
||||
gt->clock_frequency = read_clock_frequency(gt->uncore);
|
||||
if (gt->clock_frequency)
|
||||
gt->clock_period_ns = intel_gt_clock_interval_to_ns(gt, 1);
|
||||
|
||||
/* Icelake appears to use another fixed frequency for CTX_TIMESTAMP */
|
||||
if (GRAPHICS_VER(gt->i915) == 11)
|
||||
gt->clock_period_ns = NSEC_PER_SEC / 13750000;
|
||||
else if (gt->clock_frequency)
|
||||
gt->clock_period_ns = intel_gt_clock_interval_to_ns(gt, 1);
|
||||
|
||||
GT_TRACE(gt,
|
||||
"Using clock frequency: %dkHz, period: %dns, wrap: %lldms\n",
|
||||
|
|
|
@ -29,7 +29,7 @@ gen11_gt_engine_identity(struct intel_gt *gt,
|
|||
u32 timeout_ts;
|
||||
u32 ident;
|
||||
|
||||
lockdep_assert_held(>->irq_lock);
|
||||
lockdep_assert_held(gt->irq_lock);
|
||||
|
||||
raw_reg_write(regs, GEN11_IIR_REG_SELECTOR(bank), BIT(bit));
|
||||
|
||||
|
@ -59,11 +59,17 @@ static void
|
|||
gen11_other_irq_handler(struct intel_gt *gt, const u8 instance,
|
||||
const u16 iir)
|
||||
{
|
||||
struct intel_gt *media_gt = gt->i915->media_gt;
|
||||
|
||||
if (instance == OTHER_GUC_INSTANCE)
|
||||
return guc_irq_handler(>->uc.guc, iir);
|
||||
if (instance == OTHER_MEDIA_GUC_INSTANCE && media_gt)
|
||||
return guc_irq_handler(&media_gt->uc.guc, iir);
|
||||
|
||||
if (instance == OTHER_GTPM_INSTANCE)
|
||||
return gen11_rps_irq_handler(>->rps, iir);
|
||||
if (instance == OTHER_MEDIA_GTPM_INSTANCE && media_gt)
|
||||
return gen11_rps_irq_handler(&media_gt->rps, iir);
|
||||
|
||||
if (instance == OTHER_KCR_INSTANCE)
|
||||
return intel_pxp_irq_handler(>->pxp, iir);
|
||||
|
@ -81,6 +87,18 @@ gen11_engine_irq_handler(struct intel_gt *gt, const u8 class,
|
|||
{
|
||||
struct intel_engine_cs *engine;
|
||||
|
||||
/*
|
||||
* Platforms with standalone media have their media engines in another
|
||||
* GT.
|
||||
*/
|
||||
if (MEDIA_VER(gt->i915) >= 13 &&
|
||||
(class == VIDEO_DECODE_CLASS || class == VIDEO_ENHANCEMENT_CLASS)) {
|
||||
if (!gt->i915->media_gt)
|
||||
goto err;
|
||||
|
||||
gt = gt->i915->media_gt;
|
||||
}
|
||||
|
||||
if (instance <= MAX_ENGINE_INSTANCE)
|
||||
engine = gt->engine_class[class][instance];
|
||||
else
|
||||
|
@ -89,6 +107,7 @@ gen11_engine_irq_handler(struct intel_gt *gt, const u8 class,
|
|||
if (likely(engine))
|
||||
return intel_engine_cs_irq(engine, iir);
|
||||
|
||||
err:
|
||||
WARN_ONCE(1, "unhandled engine interrupt class=0x%x, instance=0x%x\n",
|
||||
class, instance);
|
||||
}
|
||||
|
@ -120,7 +139,7 @@ gen11_gt_bank_handler(struct intel_gt *gt, const unsigned int bank)
|
|||
unsigned long intr_dw;
|
||||
unsigned int bit;
|
||||
|
||||
lockdep_assert_held(>->irq_lock);
|
||||
lockdep_assert_held(gt->irq_lock);
|
||||
|
||||
intr_dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
|
||||
|
||||
|
@ -138,14 +157,14 @@ void gen11_gt_irq_handler(struct intel_gt *gt, const u32 master_ctl)
|
|||
{
|
||||
unsigned int bank;
|
||||
|
||||
spin_lock(>->irq_lock);
|
||||
spin_lock(gt->irq_lock);
|
||||
|
||||
for (bank = 0; bank < 2; bank++) {
|
||||
if (master_ctl & GEN11_GT_DW_IRQ(bank))
|
||||
gen11_gt_bank_handler(gt, bank);
|
||||
}
|
||||
|
||||
spin_unlock(>->irq_lock);
|
||||
spin_unlock(gt->irq_lock);
|
||||
}
|
||||
|
||||
bool gen11_gt_reset_one_iir(struct intel_gt *gt,
|
||||
|
@ -154,7 +173,7 @@ bool gen11_gt_reset_one_iir(struct intel_gt *gt,
|
|||
void __iomem * const regs = gt->uncore->regs;
|
||||
u32 dw;
|
||||
|
||||
lockdep_assert_held(>->irq_lock);
|
||||
lockdep_assert_held(gt->irq_lock);
|
||||
|
||||
dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
|
||||
if (dw & BIT(bit)) {
|
||||
|
@ -310,9 +329,9 @@ static void gen7_parity_error_irq_handler(struct intel_gt *gt, u32 iir)
|
|||
if (!HAS_L3_DPF(gt->i915))
|
||||
return;
|
||||
|
||||
spin_lock(>->irq_lock);
|
||||
spin_lock(gt->irq_lock);
|
||||
gen5_gt_disable_irq(gt, GT_PARITY_ERROR(gt->i915));
|
||||
spin_unlock(>->irq_lock);
|
||||
spin_unlock(gt->irq_lock);
|
||||
|
||||
if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
|
||||
gt->i915->l3_parity.which_slice |= 1 << 1;
|
||||
|
@ -434,7 +453,7 @@ static void gen5_gt_update_irq(struct intel_gt *gt,
|
|||
u32 interrupt_mask,
|
||||
u32 enabled_irq_mask)
|
||||
{
|
||||
lockdep_assert_held(>->irq_lock);
|
||||
lockdep_assert_held(gt->irq_lock);
|
||||
|
||||
GEM_BUG_ON(enabled_irq_mask & ~interrupt_mask);
|
||||
|
||||
|
|
|
@ -37,7 +37,7 @@ static void gen6_gt_pm_update_irq(struct intel_gt *gt,
|
|||
|
||||
WARN_ON(enabled_irq_mask & ~interrupt_mask);
|
||||
|
||||
lockdep_assert_held(>->irq_lock);
|
||||
lockdep_assert_held(gt->irq_lock);
|
||||
|
||||
new_val = gt->pm_imr;
|
||||
new_val &= ~interrupt_mask;
|
||||
|
@ -64,7 +64,7 @@ void gen6_gt_pm_reset_iir(struct intel_gt *gt, u32 reset_mask)
|
|||
struct intel_uncore *uncore = gt->uncore;
|
||||
i915_reg_t reg = GRAPHICS_VER(gt->i915) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
|
||||
|
||||
lockdep_assert_held(>->irq_lock);
|
||||
lockdep_assert_held(gt->irq_lock);
|
||||
|
||||
intel_uncore_write(uncore, reg, reset_mask);
|
||||
intel_uncore_write(uncore, reg, reset_mask);
|
||||
|
@ -92,7 +92,7 @@ static void write_pm_ier(struct intel_gt *gt)
|
|||
|
||||
void gen6_gt_pm_enable_irq(struct intel_gt *gt, u32 enable_mask)
|
||||
{
|
||||
lockdep_assert_held(>->irq_lock);
|
||||
lockdep_assert_held(gt->irq_lock);
|
||||
|
||||
gt->pm_ier |= enable_mask;
|
||||
write_pm_ier(gt);
|
||||
|
@ -101,7 +101,7 @@ void gen6_gt_pm_enable_irq(struct intel_gt *gt, u32 enable_mask)
|
|||
|
||||
void gen6_gt_pm_disable_irq(struct intel_gt *gt, u32 disable_mask)
|
||||
{
|
||||
lockdep_assert_held(>->irq_lock);
|
||||
lockdep_assert_held(gt->irq_lock);
|
||||
|
||||
gt->pm_ier &= ~disable_mask;
|
||||
gen6_gt_pm_mask_irq(gt, disable_mask);
|
||||
|
|
|
@ -1554,6 +1554,8 @@
|
|||
#define OTHER_GTPM_INSTANCE 1
|
||||
#define OTHER_KCR_INSTANCE 4
|
||||
#define OTHER_GSC_INSTANCE 6
|
||||
#define OTHER_MEDIA_GUC_INSTANCE 16
|
||||
#define OTHER_MEDIA_GTPM_INSTANCE 17
|
||||
|
||||
#define GEN11_IIR_REG_SELECTOR(x) _MMIO(0x190070 + ((x) * 4))
|
||||
|
||||
|
@ -1578,4 +1580,12 @@
|
|||
|
||||
#define GEN12_SFC_DONE(n) _MMIO(0x1cc000 + (n) * 0x1000)
|
||||
|
||||
/*
|
||||
* Standalone Media's non-engine GT registers are located at their regular GT
|
||||
* offsets plus 0x380000. This extra offset is stored inside the intel_uncore
|
||||
* structure so that the existing code can be used for both GTs without
|
||||
* modification.
|
||||
*/
|
||||
#define MTL_MEDIA_GSI_BASE 0x380000
|
||||
|
||||
#endif /* __INTEL_GT_REGS__ */
|
||||
|
|
|
@ -81,8 +81,17 @@ struct gt_defaults {
|
|||
u32 max_freq;
|
||||
};
|
||||
|
||||
enum intel_gt_type {
|
||||
GT_PRIMARY,
|
||||
GT_TILE,
|
||||
GT_MEDIA,
|
||||
};
|
||||
|
||||
struct intel_gt {
|
||||
struct drm_i915_private *i915;
|
||||
const char *name;
|
||||
enum intel_gt_type type;
|
||||
|
||||
struct intel_uncore *uncore;
|
||||
struct i915_ggtt *ggtt;
|
||||
|
||||
|
@ -132,6 +141,20 @@ struct intel_gt {
|
|||
struct intel_wakeref wakeref;
|
||||
atomic_t user_wakeref;
|
||||
|
||||
/**
|
||||
* Protects access to lmem usefault list.
|
||||
* It is required, if we are outside of the runtime suspend path,
|
||||
* access to @lmem_userfault_list requires always first grabbing the
|
||||
* runtime pm, to ensure we can't race against runtime suspend.
|
||||
* Once we have that we also need to grab @lmem_userfault_lock,
|
||||
* at which point we have exclusive access.
|
||||
* The runtime suspend path is special since it doesn't really hold any locks,
|
||||
* but instead has exclusive access by virtue of all other accesses requiring
|
||||
* holding the runtime pm wakeref.
|
||||
*/
|
||||
struct mutex lmem_userfault_lock;
|
||||
struct list_head lmem_userfault_list;
|
||||
|
||||
struct list_head closed_vma;
|
||||
spinlock_t closed_lock; /* guards the list of closed_vma */
|
||||
|
||||
|
@ -147,6 +170,9 @@ struct intel_gt {
|
|||
*/
|
||||
intel_wakeref_t awake;
|
||||
|
||||
/* Manual runtime pm autosuspend delay for user GGTT/lmem mmaps */
|
||||
struct intel_wakeref_auto userfault_wakeref;
|
||||
|
||||
u32 clock_frequency;
|
||||
u32 clock_period_ns;
|
||||
|
||||
|
@ -154,7 +180,7 @@ struct intel_gt {
|
|||
struct intel_rc6 rc6;
|
||||
struct intel_rps rps;
|
||||
|
||||
spinlock_t irq_lock;
|
||||
spinlock_t *irq_lock;
|
||||
u32 gt_imr;
|
||||
u32 pm_ier;
|
||||
u32 pm_imr;
|
||||
|
@ -262,6 +288,14 @@ struct intel_gt {
|
|||
struct kobject *sysfs_defaults;
|
||||
};
|
||||
|
||||
struct intel_gt_definition {
|
||||
enum intel_gt_type type;
|
||||
char *name;
|
||||
u32 mapping_base;
|
||||
u32 gsi_offset;
|
||||
intel_engine_mask_t engine_mask;
|
||||
};
|
||||
|
||||
enum intel_gt_scratch_field {
|
||||
/* 8 bytes */
|
||||
INTEL_GT_SCRATCH_FIELD_DEFAULT = 0,
|
||||
|
|
|
@ -386,9 +386,6 @@ struct i915_ggtt {
|
|||
*/
|
||||
struct list_head userfault_list;
|
||||
|
||||
/* Manual runtime pm autosuspend delay for user GGTT mmaps */
|
||||
struct intel_wakeref_auto userfault_wakeref;
|
||||
|
||||
struct mutex error_mutex;
|
||||
struct drm_mm_node error_capture;
|
||||
struct drm_mm_node uc_fw;
|
||||
|
|
|
@ -662,6 +662,21 @@ static int lrc_ring_mi_mode(const struct intel_engine_cs *engine)
|
|||
return -1;
|
||||
}
|
||||
|
||||
static int lrc_ring_bb_offset(const struct intel_engine_cs *engine)
|
||||
{
|
||||
if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50))
|
||||
return 0x80;
|
||||
else if (GRAPHICS_VER(engine->i915) >= 12)
|
||||
return 0x70;
|
||||
else if (GRAPHICS_VER(engine->i915) >= 9)
|
||||
return 0x64;
|
||||
else if (GRAPHICS_VER(engine->i915) >= 8 &&
|
||||
engine->class == RENDER_CLASS)
|
||||
return 0xc4;
|
||||
else
|
||||
return -1;
|
||||
}
|
||||
|
||||
static int lrc_ring_gpr0(const struct intel_engine_cs *engine)
|
||||
{
|
||||
if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50))
|
||||
|
@ -768,6 +783,7 @@ static void init_common_regs(u32 * const regs,
|
|||
bool inhibit)
|
||||
{
|
||||
u32 ctl;
|
||||
int loc;
|
||||
|
||||
ctl = _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH);
|
||||
ctl |= _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
|
||||
|
@ -779,6 +795,10 @@ static void init_common_regs(u32 * const regs,
|
|||
regs[CTX_CONTEXT_CONTROL] = ctl;
|
||||
|
||||
regs[CTX_TIMESTAMP] = ce->stats.runtime.last;
|
||||
|
||||
loc = lrc_ring_bb_offset(engine);
|
||||
if (loc != -1)
|
||||
regs[loc + 1] = 0;
|
||||
}
|
||||
|
||||
static void init_wa_bb_regs(u32 * const regs,
|
||||
|
@ -1278,7 +1298,8 @@ gen12_emit_indirect_ctx_rcs(const struct intel_context *ce, u32 *cs)
|
|||
|
||||
/* hsdes: 1809175790 */
|
||||
if (!HAS_FLAT_CCS(ce->engine->i915))
|
||||
cs = gen12_emit_aux_table_inv(cs, GEN12_GFX_CCS_AUX_NV);
|
||||
cs = gen12_emit_aux_table_inv(ce->engine->gt,
|
||||
cs, GEN12_GFX_CCS_AUX_NV);
|
||||
|
||||
/* Wa_16014892111 */
|
||||
if (IS_DG2(ce->engine->i915))
|
||||
|
@ -1304,9 +1325,11 @@ gen12_emit_indirect_ctx_xcs(const struct intel_context *ce, u32 *cs)
|
|||
/* hsdes: 1809175790 */
|
||||
if (!HAS_FLAT_CCS(ce->engine->i915)) {
|
||||
if (ce->engine->class == VIDEO_DECODE_CLASS)
|
||||
cs = gen12_emit_aux_table_inv(cs, GEN12_VD0_AUX_NV);
|
||||
cs = gen12_emit_aux_table_inv(ce->engine->gt,
|
||||
cs, GEN12_VD0_AUX_NV);
|
||||
else if (ce->engine->class == VIDEO_ENHANCEMENT_CLASS)
|
||||
cs = gen12_emit_aux_table_inv(cs, GEN12_VE0_AUX_NV);
|
||||
cs = gen12_emit_aux_table_inv(ce->engine->gt,
|
||||
cs, GEN12_VE0_AUX_NV);
|
||||
}
|
||||
|
||||
return cs;
|
||||
|
|
|
@ -194,9 +194,9 @@ static void rps_enable_interrupts(struct intel_rps *rps)
|
|||
|
||||
rps_reset_ei(rps);
|
||||
|
||||
spin_lock_irq(>->irq_lock);
|
||||
spin_lock_irq(gt->irq_lock);
|
||||
gen6_gt_pm_enable_irq(gt, rps->pm_events);
|
||||
spin_unlock_irq(>->irq_lock);
|
||||
spin_unlock_irq(gt->irq_lock);
|
||||
|
||||
intel_uncore_write(gt->uncore,
|
||||
GEN6_PMINTRMSK, rps_pm_mask(rps, rps->last_freq));
|
||||
|
@ -217,14 +217,14 @@ static void rps_reset_interrupts(struct intel_rps *rps)
|
|||
{
|
||||
struct intel_gt *gt = rps_to_gt(rps);
|
||||
|
||||
spin_lock_irq(>->irq_lock);
|
||||
spin_lock_irq(gt->irq_lock);
|
||||
if (GRAPHICS_VER(gt->i915) >= 11)
|
||||
gen11_rps_reset_interrupts(rps);
|
||||
else
|
||||
gen6_rps_reset_interrupts(rps);
|
||||
|
||||
rps->pm_iir = 0;
|
||||
spin_unlock_irq(>->irq_lock);
|
||||
spin_unlock_irq(gt->irq_lock);
|
||||
}
|
||||
|
||||
static void rps_disable_interrupts(struct intel_rps *rps)
|
||||
|
@ -234,9 +234,9 @@ static void rps_disable_interrupts(struct intel_rps *rps)
|
|||
intel_uncore_write(gt->uncore,
|
||||
GEN6_PMINTRMSK, rps_pm_sanitize_mask(rps, ~0u));
|
||||
|
||||
spin_lock_irq(>->irq_lock);
|
||||
spin_lock_irq(gt->irq_lock);
|
||||
gen6_gt_pm_disable_irq(gt, GEN6_PM_RPS_EVENTS);
|
||||
spin_unlock_irq(>->irq_lock);
|
||||
spin_unlock_irq(gt->irq_lock);
|
||||
|
||||
intel_synchronize_irq(gt->i915);
|
||||
|
||||
|
@ -1797,10 +1797,10 @@ static void rps_work(struct work_struct *work)
|
|||
int new_freq, adj, min, max;
|
||||
u32 pm_iir = 0;
|
||||
|
||||
spin_lock_irq(>->irq_lock);
|
||||
spin_lock_irq(gt->irq_lock);
|
||||
pm_iir = fetch_and_zero(&rps->pm_iir) & rps->pm_events;
|
||||
client_boost = atomic_read(&rps->num_waiters);
|
||||
spin_unlock_irq(>->irq_lock);
|
||||
spin_unlock_irq(gt->irq_lock);
|
||||
|
||||
/* Make sure we didn't queue anything we're not going to process. */
|
||||
if (!pm_iir && !client_boost)
|
||||
|
@ -1873,9 +1873,9 @@ static void rps_work(struct work_struct *work)
|
|||
mutex_unlock(&rps->lock);
|
||||
|
||||
out:
|
||||
spin_lock_irq(>->irq_lock);
|
||||
spin_lock_irq(gt->irq_lock);
|
||||
gen6_gt_pm_unmask_irq(gt, rps->pm_events);
|
||||
spin_unlock_irq(>->irq_lock);
|
||||
spin_unlock_irq(gt->irq_lock);
|
||||
}
|
||||
|
||||
void gen11_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
|
||||
|
@ -1883,7 +1883,7 @@ void gen11_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
|
|||
struct intel_gt *gt = rps_to_gt(rps);
|
||||
const u32 events = rps->pm_events & pm_iir;
|
||||
|
||||
lockdep_assert_held(>->irq_lock);
|
||||
lockdep_assert_held(gt->irq_lock);
|
||||
|
||||
if (unlikely(!events))
|
||||
return;
|
||||
|
@ -1903,7 +1903,7 @@ void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
|
|||
|
||||
events = pm_iir & rps->pm_events;
|
||||
if (events) {
|
||||
spin_lock(>->irq_lock);
|
||||
spin_lock(gt->irq_lock);
|
||||
|
||||
GT_TRACE(gt, "irq events:%x\n", events);
|
||||
|
||||
|
@ -1911,7 +1911,7 @@ void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
|
|||
rps->pm_iir |= events;
|
||||
|
||||
schedule_work(&rps->work);
|
||||
spin_unlock(>->irq_lock);
|
||||
spin_unlock(gt->irq_lock);
|
||||
}
|
||||
|
||||
if (GRAPHICS_VER(gt->i915) >= 8)
|
||||
|
|
|
@ -0,0 +1,47 @@
|
|||
// SPDX-License-Identifier: MIT
|
||||
/*
|
||||
* Copyright © 2021 Intel Corporation
|
||||
*/
|
||||
|
||||
#include <drm/drm_managed.h>
|
||||
|
||||
#include "i915_drv.h"
|
||||
#include "gt/intel_gt.h"
|
||||
#include "gt/intel_sa_media.h"
|
||||
|
||||
int intel_sa_mediagt_setup(struct intel_gt *gt, phys_addr_t phys_addr,
|
||||
u32 gsi_offset)
|
||||
{
|
||||
struct drm_i915_private *i915 = gt->i915;
|
||||
struct intel_uncore *uncore;
|
||||
|
||||
uncore = drmm_kzalloc(&i915->drm, sizeof(*uncore), GFP_KERNEL);
|
||||
if (!uncore)
|
||||
return -ENOMEM;
|
||||
|
||||
uncore->gsi_offset = gsi_offset;
|
||||
|
||||
gt->irq_lock = to_gt(i915)->irq_lock;
|
||||
intel_gt_common_init_early(gt);
|
||||
intel_uncore_init_early(uncore, gt);
|
||||
|
||||
/*
|
||||
* Standalone media shares the general MMIO space with the primary
|
||||
* GT. We'll re-use the primary GT's mapping.
|
||||
*/
|
||||
uncore->regs = i915->uncore.regs;
|
||||
if (drm_WARN_ON(&i915->drm, uncore->regs == NULL))
|
||||
return -EIO;
|
||||
|
||||
gt->uncore = uncore;
|
||||
gt->phys_addr = phys_addr;
|
||||
|
||||
/*
|
||||
* For current platforms we can assume there's only a single
|
||||
* media GT and cache it for quick lookup.
|
||||
*/
|
||||
drm_WARN_ON(&i915->drm, i915->media_gt);
|
||||
i915->media_gt = gt;
|
||||
|
||||
return 0;
|
||||
}
|
|
@ -0,0 +1,15 @@
|
|||
/* SPDX-License-Identifier: MIT */
|
||||
/*
|
||||
* Copyright © 2021 Intel Corporation
|
||||
*/
|
||||
#ifndef __INTEL_SA_MEDIA__
|
||||
#define __INTEL_SA_MEDIA__
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
struct intel_gt;
|
||||
|
||||
int intel_sa_mediagt_setup(struct intel_gt *gt, phys_addr_t phys_addr,
|
||||
u32 gsi_offset);
|
||||
|
||||
#endif /* __INTEL_SA_MEDIA_H__ */
|
|
@ -27,6 +27,9 @@
|
|||
#define NUM_GPR 16
|
||||
#define NUM_GPR_DW (NUM_GPR * 2) /* each GPR is 2 dwords */
|
||||
|
||||
#define LRI_HEADER MI_INSTR(0x22, 0)
|
||||
#define LRI_LENGTH_MASK GENMASK(7, 0)
|
||||
|
||||
static struct i915_vma *create_scratch(struct intel_gt *gt)
|
||||
{
|
||||
return __vm_create_scratch_for_read_pinned(>->ggtt->vm, PAGE_SIZE);
|
||||
|
@ -202,7 +205,7 @@ static int live_lrc_layout(void *arg)
|
|||
continue;
|
||||
}
|
||||
|
||||
if ((lri & GENMASK(31, 23)) != MI_INSTR(0x22, 0)) {
|
||||
if ((lri & GENMASK(31, 23)) != LRI_HEADER) {
|
||||
pr_err("%s: Expected LRI command at dword %d, found %08x\n",
|
||||
engine->name, dw, lri);
|
||||
err = -EINVAL;
|
||||
|
@ -357,6 +360,11 @@ static int live_lrc_fixed(void *arg)
|
|||
lrc_ring_cmd_buf_cctl(engine),
|
||||
"RING_CMD_BUF_CCTL"
|
||||
},
|
||||
{
|
||||
i915_mmio_reg_offset(RING_BB_OFFSET(engine->mmio_base)),
|
||||
lrc_ring_bb_offset(engine),
|
||||
"RING_BB_OFFSET"
|
||||
},
|
||||
{ },
|
||||
}, *t;
|
||||
u32 *hw;
|
||||
|
@ -987,18 +995,40 @@ store_context(struct intel_context *ce, struct i915_vma *scratch)
|
|||
hw = defaults;
|
||||
hw += LRC_STATE_OFFSET / sizeof(*hw);
|
||||
do {
|
||||
u32 len = hw[dw] & 0x7f;
|
||||
u32 len = hw[dw] & LRI_LENGTH_MASK;
|
||||
|
||||
/*
|
||||
* Keep it simple, skip parsing complex commands
|
||||
*
|
||||
* At present, there are no more MI_LOAD_REGISTER_IMM
|
||||
* commands after the first 3D state command. Rather
|
||||
* than include a table (see i915_cmd_parser.c) of all
|
||||
* the possible commands and their instruction lengths
|
||||
* (or mask for variable length instructions), assume
|
||||
* we have gathered the complete list of registers and
|
||||
* bail out.
|
||||
*/
|
||||
if ((hw[dw] >> INSTR_CLIENT_SHIFT) != INSTR_MI_CLIENT)
|
||||
break;
|
||||
|
||||
if (hw[dw] == 0) {
|
||||
dw++;
|
||||
continue;
|
||||
}
|
||||
|
||||
if ((hw[dw] & GENMASK(31, 23)) != MI_INSTR(0x22, 0)) {
|
||||
if ((hw[dw] & GENMASK(31, 23)) != LRI_HEADER) {
|
||||
/* Assume all other MI commands match LRI length mask */
|
||||
dw += len + 2;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!len) {
|
||||
pr_err("%s: invalid LRI found in context image\n",
|
||||
ce->engine->name);
|
||||
igt_hexdump(defaults, PAGE_SIZE);
|
||||
break;
|
||||
}
|
||||
|
||||
dw++;
|
||||
len = (len + 1) / 2;
|
||||
while (len--) {
|
||||
|
@ -1150,18 +1180,29 @@ static struct i915_vma *load_context(struct intel_context *ce, u32 poison)
|
|||
hw = defaults;
|
||||
hw += LRC_STATE_OFFSET / sizeof(*hw);
|
||||
do {
|
||||
u32 len = hw[dw] & 0x7f;
|
||||
u32 len = hw[dw] & LRI_LENGTH_MASK;
|
||||
|
||||
/* For simplicity, break parsing at the first complex command */
|
||||
if ((hw[dw] >> INSTR_CLIENT_SHIFT) != INSTR_MI_CLIENT)
|
||||
break;
|
||||
|
||||
if (hw[dw] == 0) {
|
||||
dw++;
|
||||
continue;
|
||||
}
|
||||
|
||||
if ((hw[dw] & GENMASK(31, 23)) != MI_INSTR(0x22, 0)) {
|
||||
if ((hw[dw] & GENMASK(31, 23)) != LRI_HEADER) {
|
||||
dw += len + 2;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!len) {
|
||||
pr_err("%s: invalid LRI found in context image\n",
|
||||
ce->engine->name);
|
||||
igt_hexdump(defaults, PAGE_SIZE);
|
||||
break;
|
||||
}
|
||||
|
||||
dw++;
|
||||
len = (len + 1) / 2;
|
||||
*cs++ = MI_LOAD_REGISTER_IMM(len);
|
||||
|
@ -1292,18 +1333,29 @@ static int compare_isolation(struct intel_engine_cs *engine,
|
|||
hw = defaults;
|
||||
hw += LRC_STATE_OFFSET / sizeof(*hw);
|
||||
do {
|
||||
u32 len = hw[dw] & 0x7f;
|
||||
u32 len = hw[dw] & LRI_LENGTH_MASK;
|
||||
|
||||
/* For simplicity, break parsing at the first complex command */
|
||||
if ((hw[dw] >> INSTR_CLIENT_SHIFT) != INSTR_MI_CLIENT)
|
||||
break;
|
||||
|
||||
if (hw[dw] == 0) {
|
||||
dw++;
|
||||
continue;
|
||||
}
|
||||
|
||||
if ((hw[dw] & GENMASK(31, 23)) != MI_INSTR(0x22, 0)) {
|
||||
if ((hw[dw] & GENMASK(31, 23)) != LRI_HEADER) {
|
||||
dw += len + 2;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!len) {
|
||||
pr_err("%s: invalid LRI found in context image\n",
|
||||
engine->name);
|
||||
igt_hexdump(defaults, PAGE_SIZE);
|
||||
break;
|
||||
}
|
||||
|
||||
dw++;
|
||||
len = (len + 1) / 2;
|
||||
while (len--) {
|
||||
|
@ -1343,6 +1395,30 @@ err_A0:
|
|||
return err;
|
||||
}
|
||||
|
||||
static struct i915_vma *
|
||||
create_result_vma(struct i915_address_space *vm, unsigned long sz)
|
||||
{
|
||||
struct i915_vma *vma;
|
||||
void *ptr;
|
||||
|
||||
vma = create_user_vma(vm, sz);
|
||||
if (IS_ERR(vma))
|
||||
return vma;
|
||||
|
||||
/* Set the results to a known value distinct from the poison */
|
||||
ptr = i915_gem_object_pin_map_unlocked(vma->obj, I915_MAP_WC);
|
||||
if (IS_ERR(ptr)) {
|
||||
i915_vma_put(vma);
|
||||
return ERR_CAST(ptr);
|
||||
}
|
||||
|
||||
memset(ptr, POISON_INUSE, vma->size);
|
||||
i915_gem_object_flush_map(vma->obj);
|
||||
i915_gem_object_unpin_map(vma->obj);
|
||||
|
||||
return vma;
|
||||
}
|
||||
|
||||
static int __lrc_isolation(struct intel_engine_cs *engine, u32 poison)
|
||||
{
|
||||
u32 *sema = memset32(engine->status_page.addr + 1000, 0, 1);
|
||||
|
@ -1361,13 +1437,13 @@ static int __lrc_isolation(struct intel_engine_cs *engine, u32 poison)
|
|||
goto err_A;
|
||||
}
|
||||
|
||||
ref[0] = create_user_vma(A->vm, SZ_64K);
|
||||
ref[0] = create_result_vma(A->vm, SZ_64K);
|
||||
if (IS_ERR(ref[0])) {
|
||||
err = PTR_ERR(ref[0]);
|
||||
goto err_B;
|
||||
}
|
||||
|
||||
ref[1] = create_user_vma(A->vm, SZ_64K);
|
||||
ref[1] = create_result_vma(A->vm, SZ_64K);
|
||||
if (IS_ERR(ref[1])) {
|
||||
err = PTR_ERR(ref[1]);
|
||||
goto err_ref0;
|
||||
|
@ -1389,13 +1465,13 @@ static int __lrc_isolation(struct intel_engine_cs *engine, u32 poison)
|
|||
}
|
||||
i915_request_put(rq);
|
||||
|
||||
result[0] = create_user_vma(A->vm, SZ_64K);
|
||||
result[0] = create_result_vma(A->vm, SZ_64K);
|
||||
if (IS_ERR(result[0])) {
|
||||
err = PTR_ERR(result[0]);
|
||||
goto err_ref1;
|
||||
}
|
||||
|
||||
result[1] = create_user_vma(A->vm, SZ_64K);
|
||||
result[1] = create_result_vma(A->vm, SZ_64K);
|
||||
if (IS_ERR(result[1])) {
|
||||
err = PTR_ERR(result[1]);
|
||||
goto err_result0;
|
||||
|
@ -1408,18 +1484,17 @@ static int __lrc_isolation(struct intel_engine_cs *engine, u32 poison)
|
|||
}
|
||||
|
||||
err = poison_registers(B, poison, sema);
|
||||
if (err) {
|
||||
WRITE_ONCE(*sema, -1);
|
||||
i915_request_put(rq);
|
||||
goto err_result1;
|
||||
if (err == 0 && i915_request_wait(rq, 0, HZ / 2) < 0) {
|
||||
pr_err("%s(%s): wait for results timed out\n",
|
||||
__func__, engine->name);
|
||||
err = -ETIME;
|
||||
}
|
||||
|
||||
if (i915_request_wait(rq, 0, HZ / 2) < 0) {
|
||||
i915_request_put(rq);
|
||||
err = -ETIME;
|
||||
goto err_result1;
|
||||
}
|
||||
/* Always cancel the semaphore wait, just in case the GPU gets stuck */
|
||||
WRITE_ONCE(*sema, -1);
|
||||
i915_request_put(rq);
|
||||
if (err)
|
||||
goto err_result1;
|
||||
|
||||
err = compare_isolation(engine, ref, result, A, poison);
|
||||
|
||||
|
|
|
@ -82,9 +82,9 @@ static void gen9_reset_guc_interrupts(struct intel_guc *guc)
|
|||
|
||||
assert_rpm_wakelock_held(>->i915->runtime_pm);
|
||||
|
||||
spin_lock_irq(>->irq_lock);
|
||||
spin_lock_irq(gt->irq_lock);
|
||||
gen6_gt_pm_reset_iir(gt, gt->pm_guc_events);
|
||||
spin_unlock_irq(>->irq_lock);
|
||||
spin_unlock_irq(gt->irq_lock);
|
||||
}
|
||||
|
||||
static void gen9_enable_guc_interrupts(struct intel_guc *guc)
|
||||
|
@ -93,11 +93,11 @@ static void gen9_enable_guc_interrupts(struct intel_guc *guc)
|
|||
|
||||
assert_rpm_wakelock_held(>->i915->runtime_pm);
|
||||
|
||||
spin_lock_irq(>->irq_lock);
|
||||
spin_lock_irq(gt->irq_lock);
|
||||
WARN_ON_ONCE(intel_uncore_read(gt->uncore, GEN8_GT_IIR(2)) &
|
||||
gt->pm_guc_events);
|
||||
gen6_gt_pm_enable_irq(gt, gt->pm_guc_events);
|
||||
spin_unlock_irq(>->irq_lock);
|
||||
spin_unlock_irq(gt->irq_lock);
|
||||
}
|
||||
|
||||
static void gen9_disable_guc_interrupts(struct intel_guc *guc)
|
||||
|
@ -106,11 +106,11 @@ static void gen9_disable_guc_interrupts(struct intel_guc *guc)
|
|||
|
||||
assert_rpm_wakelock_held(>->i915->runtime_pm);
|
||||
|
||||
spin_lock_irq(>->irq_lock);
|
||||
spin_lock_irq(gt->irq_lock);
|
||||
|
||||
gen6_gt_pm_disable_irq(gt, gt->pm_guc_events);
|
||||
|
||||
spin_unlock_irq(>->irq_lock);
|
||||
spin_unlock_irq(gt->irq_lock);
|
||||
intel_synchronize_irq(gt->i915);
|
||||
|
||||
gen9_reset_guc_interrupts(guc);
|
||||
|
@ -120,9 +120,9 @@ static void gen11_reset_guc_interrupts(struct intel_guc *guc)
|
|||
{
|
||||
struct intel_gt *gt = guc_to_gt(guc);
|
||||
|
||||
spin_lock_irq(>->irq_lock);
|
||||
spin_lock_irq(gt->irq_lock);
|
||||
gen11_gt_reset_one_iir(gt, 0, GEN11_GUC);
|
||||
spin_unlock_irq(>->irq_lock);
|
||||
spin_unlock_irq(gt->irq_lock);
|
||||
}
|
||||
|
||||
static void gen11_enable_guc_interrupts(struct intel_guc *guc)
|
||||
|
@ -130,25 +130,25 @@ static void gen11_enable_guc_interrupts(struct intel_guc *guc)
|
|||
struct intel_gt *gt = guc_to_gt(guc);
|
||||
u32 events = REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST);
|
||||
|
||||
spin_lock_irq(>->irq_lock);
|
||||
spin_lock_irq(gt->irq_lock);
|
||||
WARN_ON_ONCE(gen11_gt_reset_one_iir(gt, 0, GEN11_GUC));
|
||||
intel_uncore_write(gt->uncore,
|
||||
GEN11_GUC_SG_INTR_ENABLE, events);
|
||||
intel_uncore_write(gt->uncore,
|
||||
GEN11_GUC_SG_INTR_MASK, ~events);
|
||||
spin_unlock_irq(>->irq_lock);
|
||||
spin_unlock_irq(gt->irq_lock);
|
||||
}
|
||||
|
||||
static void gen11_disable_guc_interrupts(struct intel_guc *guc)
|
||||
{
|
||||
struct intel_gt *gt = guc_to_gt(guc);
|
||||
|
||||
spin_lock_irq(>->irq_lock);
|
||||
spin_lock_irq(gt->irq_lock);
|
||||
|
||||
intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_MASK, ~0);
|
||||
intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_ENABLE, 0);
|
||||
|
||||
spin_unlock_irq(>->irq_lock);
|
||||
spin_unlock_irq(gt->irq_lock);
|
||||
intel_synchronize_irq(gt->i915);
|
||||
|
||||
gen11_reset_guc_interrupts(guc);
|
||||
|
|
|
@ -1438,7 +1438,12 @@ void intel_guc_busyness_park(struct intel_gt *gt)
|
|||
if (!guc_submission_initialized(guc))
|
||||
return;
|
||||
|
||||
cancel_delayed_work(&guc->timestamp.work);
|
||||
/*
|
||||
* There is a race with suspend flow where the worker runs after suspend
|
||||
* and causes an unclaimed register access warning. Cancel the worker
|
||||
* synchronously here.
|
||||
*/
|
||||
cancel_delayed_work_sync(&guc->timestamp.work);
|
||||
|
||||
/*
|
||||
* Before parking, we should sample engine busyness stats if we need to.
|
||||
|
@ -1532,8 +1537,8 @@ void intel_guc_submission_reset_prepare(struct intel_guc *guc)
|
|||
__reset_guc_busyness_stats(guc);
|
||||
|
||||
/* Flush IRQ handler */
|
||||
spin_lock_irq(&guc_to_gt(guc)->irq_lock);
|
||||
spin_unlock_irq(&guc_to_gt(guc)->irq_lock);
|
||||
spin_lock_irq(guc_to_gt(guc)->irq_lock);
|
||||
spin_unlock_irq(guc_to_gt(guc)->irq_lock);
|
||||
|
||||
guc_flush_submissions(guc);
|
||||
guc_flush_destroyed_contexts(guc);
|
||||
|
|
|
@ -245,9 +245,9 @@ static int guc_enable_communication(struct intel_guc *guc)
|
|||
intel_guc_enable_interrupts(guc);
|
||||
|
||||
/* check for CT messages received before we enabled interrupts */
|
||||
spin_lock_irq(>->irq_lock);
|
||||
spin_lock_irq(gt->irq_lock);
|
||||
intel_guc_ct_event_handler(&guc->ct);
|
||||
spin_unlock_irq(>->irq_lock);
|
||||
spin_unlock_irq(gt->irq_lock);
|
||||
|
||||
drm_dbg(&i915->drm, "GuC communication enabled\n");
|
||||
|
||||
|
|
|
@ -72,12 +72,14 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
|
|||
* security fixes, etc. to be enabled.
|
||||
*/
|
||||
#define INTEL_GUC_FIRMWARE_DEFS(fw_def, guc_maj, guc_mmp) \
|
||||
fw_def(DG2, 0, guc_mmp(dg2, 70, 4, 1)) \
|
||||
fw_def(DG2, 0, guc_maj(dg2, 70, 5)) \
|
||||
fw_def(ALDERLAKE_P, 0, guc_maj(adlp, 70, 5)) \
|
||||
fw_def(ALDERLAKE_P, 0, guc_mmp(adlp, 70, 1, 1)) \
|
||||
fw_def(ALDERLAKE_P, 0, guc_mmp(adlp, 69, 0, 3)) \
|
||||
fw_def(ALDERLAKE_S, 0, guc_maj(tgl, 70, 5)) \
|
||||
fw_def(ALDERLAKE_S, 0, guc_mmp(tgl, 70, 1, 1)) \
|
||||
fw_def(ALDERLAKE_S, 0, guc_mmp(tgl, 69, 0, 3)) \
|
||||
fw_def(DG1, 0, guc_mmp(dg1, 70, 1, 1)) \
|
||||
fw_def(DG1, 0, guc_maj(dg1, 70, 5)) \
|
||||
fw_def(ROCKETLAKE, 0, guc_mmp(tgl, 70, 1, 1)) \
|
||||
fw_def(TIGERLAKE, 0, guc_mmp(tgl, 70, 1, 1)) \
|
||||
fw_def(JASPERLAKE, 0, guc_mmp(ehl, 70, 1, 1)) \
|
||||
|
@ -92,9 +94,11 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
|
|||
fw_def(SKYLAKE, 0, guc_mmp(skl, 70, 1, 1))
|
||||
|
||||
#define INTEL_HUC_FIRMWARE_DEFS(fw_def, huc_raw, huc_mmp) \
|
||||
fw_def(ALDERLAKE_P, 0, huc_raw(tgl)) \
|
||||
fw_def(ALDERLAKE_P, 0, huc_mmp(tgl, 7, 9, 3)) \
|
||||
fw_def(ALDERLAKE_S, 0, huc_raw(tgl)) \
|
||||
fw_def(ALDERLAKE_S, 0, huc_mmp(tgl, 7, 9, 3)) \
|
||||
fw_def(DG1, 0, huc_mmp(dg1, 7, 9, 3)) \
|
||||
fw_def(DG1, 0, huc_raw(dg1)) \
|
||||
fw_def(ROCKETLAKE, 0, huc_mmp(tgl, 7, 9, 3)) \
|
||||
fw_def(TIGERLAKE, 0, huc_mmp(tgl, 7, 9, 3)) \
|
||||
fw_def(JASPERLAKE, 0, huc_mmp(ehl, 9, 0, 0)) \
|
||||
|
@ -232,6 +236,7 @@ __uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
|
|||
u32 fw_count;
|
||||
u8 rev = INTEL_REVID(i915);
|
||||
int i;
|
||||
bool found;
|
||||
|
||||
/*
|
||||
* The only difference between the ADL GuC FWs is the HWConfig support.
|
||||
|
@ -246,6 +251,7 @@ __uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
|
|||
fw_blobs = blobs_all[uc_fw->type].blobs;
|
||||
fw_count = blobs_all[uc_fw->type].count;
|
||||
|
||||
found = false;
|
||||
for (i = 0; i < fw_count && p <= fw_blobs[i].p; i++) {
|
||||
const struct uc_fw_blob *blob = &fw_blobs[i].blob;
|
||||
|
||||
|
@ -266,9 +272,15 @@ __uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
|
|||
uc_fw->file_wanted.path = blob->path;
|
||||
uc_fw->file_wanted.major_ver = blob->major;
|
||||
uc_fw->file_wanted.minor_ver = blob->minor;
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
|
||||
if (!found && uc_fw->file_selected.path) {
|
||||
/* Failed to find a match for the last attempt?! */
|
||||
uc_fw->file_selected.path = NULL;
|
||||
}
|
||||
|
||||
/* make sure the list is ordered as expected */
|
||||
if (IS_ENABLED(CONFIG_DRM_I915_SELFTEST) && !verified) {
|
||||
verified = true;
|
||||
|
@ -322,7 +334,7 @@ __uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
|
|||
continue;
|
||||
|
||||
bad:
|
||||
drm_err(&i915->drm, "\x1B[35;1mInvalid FW blob order: %s r%u %s%d.%d.%d comes before %s r%u %s%d.%d.%d\n",
|
||||
drm_err(&i915->drm, "Invalid FW blob order: %s r%u %s%d.%d.%d comes before %s r%u %s%d.%d.%d\n",
|
||||
intel_platform_name(fw_blobs[i - 1].p), fw_blobs[i - 1].rev,
|
||||
fw_blobs[i - 1].blob.legacy ? "L" : "v",
|
||||
fw_blobs[i - 1].blob.major,
|
||||
|
@ -553,10 +565,14 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
|
|||
|
||||
err = firmware_request_nowarn(&fw, uc_fw->file_selected.path, dev);
|
||||
memcpy(&file_ideal, &uc_fw->file_wanted, sizeof(file_ideal));
|
||||
if (!err || intel_uc_fw_is_overridden(uc_fw))
|
||||
goto done;
|
||||
|
||||
/* Any error is terminal if overriding. Don't bother searching for older versions */
|
||||
if (err && intel_uc_fw_is_overridden(uc_fw))
|
||||
goto fail;
|
||||
|
||||
while (err == -ENOENT) {
|
||||
old_ver = true;
|
||||
|
||||
__uc_fw_auto_select(i915, uc_fw);
|
||||
if (!uc_fw->file_selected.path) {
|
||||
/*
|
||||
|
@ -576,8 +592,6 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
|
|||
if (err)
|
||||
goto fail;
|
||||
|
||||
old_ver = true;
|
||||
done:
|
||||
if (uc_fw->loaded_via_gsc)
|
||||
err = check_gsc_manifest(fw, uc_fw);
|
||||
else
|
||||
|
|
|
@ -105,6 +105,12 @@ static const char irst_name[] = "INT3392";
|
|||
|
||||
static const struct drm_driver i915_drm_driver;
|
||||
|
||||
static void i915_release_bridge_dev(struct drm_device *dev,
|
||||
void *bridge)
|
||||
{
|
||||
pci_dev_put(bridge);
|
||||
}
|
||||
|
||||
static int i915_get_bridge_dev(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
int domain = pci_domain_nr(to_pci_dev(dev_priv->drm.dev)->bus);
|
||||
|
@ -115,7 +121,9 @@ static int i915_get_bridge_dev(struct drm_i915_private *dev_priv)
|
|||
drm_err(&dev_priv->drm, "bridge device not found\n");
|
||||
return -EIO;
|
||||
}
|
||||
return 0;
|
||||
|
||||
return drmm_add_action_or_reset(&dev_priv->drm, i915_release_bridge_dev,
|
||||
dev_priv->bridge_dev);
|
||||
}
|
||||
|
||||
/* Allocate space for the MCH regs if needed, return nonzero on error */
|
||||
|
@ -302,8 +310,13 @@ static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
|
|||
|
||||
static void sanitize_gpu(struct drm_i915_private *i915)
|
||||
{
|
||||
if (!INTEL_INFO(i915)->gpu_reset_clobbers_display)
|
||||
__intel_gt_reset(to_gt(i915), ALL_ENGINES);
|
||||
if (!INTEL_INFO(i915)->gpu_reset_clobbers_display) {
|
||||
struct intel_gt *gt;
|
||||
unsigned int i;
|
||||
|
||||
for_each_gt(gt, i915, i)
|
||||
__intel_gt_reset(gt, ALL_ENGINES);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -326,7 +339,7 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
|
|||
intel_device_info_subplatform_init(dev_priv);
|
||||
intel_step_init(dev_priv);
|
||||
|
||||
intel_uncore_mmio_debug_init_early(&dev_priv->mmio_debug);
|
||||
intel_uncore_mmio_debug_init_early(dev_priv);
|
||||
|
||||
spin_lock_init(&dev_priv->irq_lock);
|
||||
spin_lock_init(&dev_priv->gpu_error.lock);
|
||||
|
@ -357,7 +370,9 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
|
|||
|
||||
intel_wopcm_init_early(&dev_priv->wopcm);
|
||||
|
||||
intel_root_gt_init_early(dev_priv);
|
||||
ret = intel_root_gt_init_early(dev_priv);
|
||||
if (ret < 0)
|
||||
goto err_rootgt;
|
||||
|
||||
i915_drm_clients_init(&dev_priv->clients, dev_priv);
|
||||
|
||||
|
@ -382,6 +397,7 @@ err_gem:
|
|||
i915_gem_cleanup_early(dev_priv);
|
||||
intel_gt_driver_late_release_all(dev_priv);
|
||||
i915_drm_clients_fini(&dev_priv->clients);
|
||||
err_rootgt:
|
||||
intel_region_ttm_device_fini(dev_priv);
|
||||
err_ttm:
|
||||
vlv_suspend_cleanup(dev_priv);
|
||||
|
@ -423,7 +439,8 @@ static void i915_driver_late_release(struct drm_i915_private *dev_priv)
|
|||
*/
|
||||
static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
int ret;
|
||||
struct intel_gt *gt;
|
||||
int ret, i;
|
||||
|
||||
if (i915_inject_probe_failure(dev_priv))
|
||||
return -ENODEV;
|
||||
|
@ -432,17 +449,27 @@ static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv)
|
|||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = intel_uncore_init_mmio(&dev_priv->uncore);
|
||||
if (ret)
|
||||
return ret;
|
||||
for_each_gt(gt, dev_priv, i) {
|
||||
ret = intel_uncore_init_mmio(gt->uncore);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = drmm_add_action_or_reset(&dev_priv->drm,
|
||||
intel_uncore_fini_mmio,
|
||||
gt->uncore);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Try to make sure MCHBAR is enabled before poking at it */
|
||||
intel_setup_mchbar(dev_priv);
|
||||
intel_device_info_runtime_init(dev_priv);
|
||||
|
||||
ret = intel_gt_init_mmio(to_gt(dev_priv));
|
||||
if (ret)
|
||||
goto err_uncore;
|
||||
for_each_gt(gt, dev_priv, i) {
|
||||
ret = intel_gt_init_mmio(gt);
|
||||
if (ret)
|
||||
goto err_uncore;
|
||||
}
|
||||
|
||||
/* As early as possible, scrub existing GPU state before clobbering */
|
||||
sanitize_gpu(dev_priv);
|
||||
|
@ -451,8 +478,6 @@ static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv)
|
|||
|
||||
err_uncore:
|
||||
intel_teardown_mchbar(dev_priv);
|
||||
intel_uncore_fini_mmio(&dev_priv->uncore);
|
||||
pci_dev_put(dev_priv->bridge_dev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -464,8 +489,6 @@ err_uncore:
|
|||
static void i915_driver_mmio_release(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
intel_teardown_mchbar(dev_priv);
|
||||
intel_uncore_fini_mmio(&dev_priv->uncore);
|
||||
pci_dev_put(dev_priv->bridge_dev);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -715,6 +738,8 @@ static void i915_driver_hw_remove(struct drm_i915_private *dev_priv)
|
|||
static void i915_driver_register(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
struct intel_gt *gt;
|
||||
unsigned int i;
|
||||
|
||||
i915_gem_driver_register(dev_priv);
|
||||
i915_pmu_register(dev_priv);
|
||||
|
@ -734,7 +759,8 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
|
|||
/* Depends on sysfs having been initialized */
|
||||
i915_perf_register(dev_priv);
|
||||
|
||||
intel_gt_driver_register(to_gt(dev_priv));
|
||||
for_each_gt(gt, dev_priv, i)
|
||||
intel_gt_driver_register(gt);
|
||||
|
||||
intel_display_driver_register(dev_priv);
|
||||
|
||||
|
@ -753,6 +779,9 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
|
|||
*/
|
||||
static void i915_driver_unregister(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_gt *gt;
|
||||
unsigned int i;
|
||||
|
||||
i915_switcheroo_unregister(dev_priv);
|
||||
|
||||
intel_unregister_dsm_handler();
|
||||
|
@ -762,7 +791,8 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv)
|
|||
|
||||
intel_display_driver_unregister(dev_priv);
|
||||
|
||||
intel_gt_driver_unregister(to_gt(dev_priv));
|
||||
for_each_gt(gt, dev_priv, i)
|
||||
intel_gt_driver_unregister(gt);
|
||||
|
||||
i915_perf_unregister(dev_priv);
|
||||
i915_pmu_unregister(dev_priv);
|
||||
|
@ -784,6 +814,8 @@ static void i915_welcome_messages(struct drm_i915_private *dev_priv)
|
|||
{
|
||||
if (drm_debug_enabled(DRM_UT_DRIVER)) {
|
||||
struct drm_printer p = drm_debug_printer("i915 device info:");
|
||||
struct intel_gt *gt;
|
||||
unsigned int i;
|
||||
|
||||
drm_printf(&p, "pciid=0x%04x rev=0x%02x platform=%s (subplatform=0x%x) gen=%i\n",
|
||||
INTEL_DEVID(dev_priv),
|
||||
|
@ -796,7 +828,8 @@ static void i915_welcome_messages(struct drm_i915_private *dev_priv)
|
|||
intel_device_info_print(INTEL_INFO(dev_priv),
|
||||
RUNTIME_INFO(dev_priv), &p);
|
||||
i915_print_iommu_status(dev_priv, &p);
|
||||
intel_gt_info_print(&to_gt(dev_priv)->info, &p);
|
||||
for_each_gt(gt, dev_priv, i)
|
||||
intel_gt_info_print(>->info, &p);
|
||||
}
|
||||
|
||||
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
|
||||
|
@ -1211,13 +1244,15 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
|
|||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
|
||||
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
|
||||
int ret;
|
||||
struct intel_gt *gt;
|
||||
int ret, i;
|
||||
|
||||
disable_rpm_wakeref_asserts(rpm);
|
||||
|
||||
i915_gem_suspend_late(dev_priv);
|
||||
|
||||
intel_uncore_suspend(&dev_priv->uncore);
|
||||
for_each_gt(gt, dev_priv, i)
|
||||
intel_uncore_suspend(gt->uncore);
|
||||
|
||||
intel_power_domains_suspend(dev_priv,
|
||||
get_suspend_mode(dev_priv, hibernation));
|
||||
|
@ -1349,7 +1384,8 @@ static int i915_drm_resume_early(struct drm_device *dev)
|
|||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
|
||||
int ret;
|
||||
struct intel_gt *gt;
|
||||
int ret, i;
|
||||
|
||||
/*
|
||||
* We have a resume ordering issue with the snd-hda driver also
|
||||
|
@ -1403,9 +1439,10 @@ static int i915_drm_resume_early(struct drm_device *dev)
|
|||
drm_err(&dev_priv->drm,
|
||||
"Resume prepare failed: %d, continuing anyway\n", ret);
|
||||
|
||||
intel_uncore_resume_early(&dev_priv->uncore);
|
||||
|
||||
intel_gt_check_and_clear_faults(to_gt(dev_priv));
|
||||
for_each_gt(gt, dev_priv, i) {
|
||||
intel_uncore_resume_early(gt->uncore);
|
||||
intel_gt_check_and_clear_faults(gt);
|
||||
}
|
||||
|
||||
intel_display_power_resume_early(dev_priv);
|
||||
|
||||
|
@ -1585,7 +1622,8 @@ static int intel_runtime_suspend(struct device *kdev)
|
|||
{
|
||||
struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
|
||||
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
|
||||
int ret;
|
||||
struct intel_gt *gt;
|
||||
int ret, i;
|
||||
|
||||
if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv)))
|
||||
return -ENODEV;
|
||||
|
@ -1600,11 +1638,13 @@ static int intel_runtime_suspend(struct device *kdev)
|
|||
*/
|
||||
i915_gem_runtime_suspend(dev_priv);
|
||||
|
||||
intel_gt_runtime_suspend(to_gt(dev_priv));
|
||||
for_each_gt(gt, dev_priv, i)
|
||||
intel_gt_runtime_suspend(gt);
|
||||
|
||||
intel_runtime_pm_disable_interrupts(dev_priv);
|
||||
|
||||
intel_uncore_suspend(&dev_priv->uncore);
|
||||
for_each_gt(gt, dev_priv, i)
|
||||
intel_uncore_suspend(gt->uncore);
|
||||
|
||||
intel_display_power_suspend(dev_priv);
|
||||
|
||||
|
@ -1668,7 +1708,8 @@ static int intel_runtime_resume(struct device *kdev)
|
|||
{
|
||||
struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
|
||||
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
|
||||
int ret;
|
||||
struct intel_gt *gt;
|
||||
int ret, i;
|
||||
|
||||
if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv)))
|
||||
return -ENODEV;
|
||||
|
@ -1688,7 +1729,8 @@ static int intel_runtime_resume(struct device *kdev)
|
|||
|
||||
ret = vlv_resume_prepare(dev_priv, true);
|
||||
|
||||
intel_uncore_runtime_resume(&dev_priv->uncore);
|
||||
for_each_gt(gt, dev_priv, i)
|
||||
intel_uncore_runtime_resume(gt->uncore);
|
||||
|
||||
intel_runtime_pm_enable_interrupts(dev_priv);
|
||||
|
||||
|
@ -1696,7 +1738,8 @@ static int intel_runtime_resume(struct device *kdev)
|
|||
* No point of rolling back things in case of an error, as the best
|
||||
* we can do is to hope that things will still work (and disable RPM).
|
||||
*/
|
||||
intel_gt_runtime_resume(to_gt(dev_priv));
|
||||
for_each_gt(gt, dev_priv, i)
|
||||
intel_gt_runtime_resume(gt);
|
||||
|
||||
/*
|
||||
* On VLV/CHV display interrupts are part of the display
|
||||
|
|
|
@ -497,6 +497,9 @@ struct drm_i915_private {
|
|||
|
||||
struct kobject *sysfs_gt;
|
||||
|
||||
/* Quick lookup of media GT (current platforms only have one) */
|
||||
struct intel_gt *media_gt;
|
||||
|
||||
struct {
|
||||
struct i915_gem_contexts {
|
||||
spinlock_t lock; /* locks list */
|
||||
|
@ -1061,6 +1064,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
|
|||
#define HAS_REGION(i915, i) (RUNTIME_INFO(i915)->memory_regions & (i))
|
||||
#define HAS_LMEM(i915) HAS_REGION(i915, REGION_LMEM)
|
||||
|
||||
#define HAS_EXTRA_GT_LIST(dev_priv) (INTEL_INFO(dev_priv)->extra_gt_list)
|
||||
|
||||
/*
|
||||
* Platform has the dedicated compression control state for each lmem surfaces
|
||||
* stored in lmem to support the 3D and media compression formats.
|
||||
|
|
|
@ -842,6 +842,10 @@ void i915_gem_runtime_suspend(struct drm_i915_private *i915)
|
|||
&to_gt(i915)->ggtt->userfault_list, userfault_link)
|
||||
__i915_gem_object_release_mmap_gtt(obj);
|
||||
|
||||
list_for_each_entry_safe(obj, on,
|
||||
&to_gt(i915)->lmem_userfault_list, userfault_link)
|
||||
i915_gem_object_runtime_pm_release_mmap_offset(obj);
|
||||
|
||||
/*
|
||||
* The fence will be lost when the device powers down. If any were
|
||||
* in use by hardware (i.e. they are pinned), we should not be powering
|
||||
|
@ -1172,7 +1176,7 @@ void i915_gem_driver_unregister(struct drm_i915_private *i915)
|
|||
|
||||
void i915_gem_driver_remove(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
intel_wakeref_auto_fini(&to_gt(dev_priv)->ggtt->userfault_wakeref);
|
||||
intel_wakeref_auto_fini(&to_gt(dev_priv)->userfault_wakeref);
|
||||
|
||||
i915_gem_suspend_late(dev_priv);
|
||||
intel_gt_driver_remove(to_gt(dev_priv));
|
||||
|
|
|
@ -1104,9 +1104,9 @@ static void ivb_parity_work(struct work_struct *work)
|
|||
|
||||
out:
|
||||
drm_WARN_ON(&dev_priv->drm, dev_priv->l3_parity.which_slice);
|
||||
spin_lock_irq(>->irq_lock);
|
||||
spin_lock_irq(gt->irq_lock);
|
||||
gen5_gt_enable_irq(gt, GT_PARITY_ERROR(dev_priv));
|
||||
spin_unlock_irq(>->irq_lock);
|
||||
spin_unlock_irq(gt->irq_lock);
|
||||
|
||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||
}
|
||||
|
|
|
@ -26,6 +26,9 @@
|
|||
#include <drm/drm_drv.h>
|
||||
#include <drm/i915_pciids.h>
|
||||
|
||||
#include "gt/intel_gt_regs.h"
|
||||
#include "gt/intel_sa_media.h"
|
||||
|
||||
#include "i915_driver.h"
|
||||
#include "i915_drv.h"
|
||||
#include "i915_pci.h"
|
||||
|
@ -1115,6 +1118,16 @@ static const struct intel_device_info pvc_info = {
|
|||
.display.has_cdclk_crawl = 1, \
|
||||
.__runtime.fbc_mask = BIT(INTEL_FBC_A) | BIT(INTEL_FBC_B)
|
||||
|
||||
static const struct intel_gt_definition xelpmp_extra_gt[] = {
|
||||
{
|
||||
.type = GT_MEDIA,
|
||||
.name = "Standalone Media GT",
|
||||
.gsi_offset = MTL_MEDIA_GSI_BASE,
|
||||
.engine_mask = BIT(VECS0) | BIT(VCS0) | BIT(VCS2),
|
||||
},
|
||||
{}
|
||||
};
|
||||
|
||||
__maybe_unused
|
||||
static const struct intel_device_info mtl_info = {
|
||||
XE_HP_FEATURES,
|
||||
|
@ -1128,6 +1141,7 @@ static const struct intel_device_info mtl_info = {
|
|||
.media.ver = 13,
|
||||
PLATFORM(INTEL_METEORLAKE),
|
||||
.display.has_modular_fia = 1,
|
||||
.extra_gt_list = xelpmp_extra_gt,
|
||||
.has_flat_ccs = 0,
|
||||
.has_snoop = 1,
|
||||
.__runtime.memory_regions = REGION_SMEM | REGION_STOLEN_LMEM,
|
||||
|
|
|
@ -1857,14 +1857,14 @@
|
|||
|
||||
#define GT0_PERF_LIMIT_REASONS _MMIO(0x1381a8)
|
||||
#define GT0_PERF_LIMIT_REASONS_MASK 0xde3
|
||||
#define PROCHOT_MASK REG_BIT(1)
|
||||
#define THERMAL_LIMIT_MASK REG_BIT(2)
|
||||
#define RATL_MASK REG_BIT(6)
|
||||
#define VR_THERMALERT_MASK REG_BIT(7)
|
||||
#define VR_TDC_MASK REG_BIT(8)
|
||||
#define POWER_LIMIT_4_MASK REG_BIT(9)
|
||||
#define POWER_LIMIT_1_MASK REG_BIT(11)
|
||||
#define POWER_LIMIT_2_MASK REG_BIT(12)
|
||||
#define PROCHOT_MASK REG_BIT(0)
|
||||
#define THERMAL_LIMIT_MASK REG_BIT(1)
|
||||
#define RATL_MASK REG_BIT(5)
|
||||
#define VR_THERMALERT_MASK REG_BIT(6)
|
||||
#define VR_TDC_MASK REG_BIT(7)
|
||||
#define POWER_LIMIT_4_MASK REG_BIT(8)
|
||||
#define POWER_LIMIT_1_MASK REG_BIT(10)
|
||||
#define POWER_LIMIT_2_MASK REG_BIT(11)
|
||||
|
||||
#define CHV_CLK_CTL1 _MMIO(0x101100)
|
||||
#define VLV_CLK_CTL2 _MMIO(0x101104)
|
||||
|
|
|
@ -37,6 +37,7 @@
|
|||
|
||||
struct drm_printer;
|
||||
struct drm_i915_private;
|
||||
struct intel_gt_definition;
|
||||
|
||||
/* Keep in gen based order, and chronological order within a gen */
|
||||
enum intel_platform {
|
||||
|
@ -252,6 +253,8 @@ struct intel_device_info {
|
|||
|
||||
unsigned int dma_mask_size; /* available DMA address bits */
|
||||
|
||||
const struct intel_gt_definition *extra_gt_list;
|
||||
|
||||
u8 gt; /* GT number, 0 if undefined */
|
||||
|
||||
#define DEFINE_FLAG(name) u8 name:1
|
||||
|
|
|
@ -7614,9 +7614,8 @@ static void icl_init_clock_gating(struct drm_i915_private *dev_priv)
|
|||
|
||||
static void gen12lp_init_clock_gating(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
/* Wa_1409120013:tgl,rkl,adl-s,dg1,dg2 */
|
||||
if (IS_TIGERLAKE(dev_priv) || IS_ROCKETLAKE(dev_priv) ||
|
||||
IS_ALDERLAKE_S(dev_priv) || IS_DG1(dev_priv) || IS_DG2(dev_priv))
|
||||
/* Wa_1409120013 */
|
||||
if (DISPLAY_VER(dev_priv) == 12)
|
||||
intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A),
|
||||
DPFC_CHICKEN_COMP_DUMMY_PIXEL);
|
||||
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
* IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <drm/drm_managed.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
|
||||
#include "gt/intel_engine_regs.h"
|
||||
|
@ -44,29 +45,47 @@ fw_domains_get(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
|
|||
}
|
||||
|
||||
void
|
||||
intel_uncore_mmio_debug_init_early(struct intel_uncore_mmio_debug *mmio_debug)
|
||||
intel_uncore_mmio_debug_init_early(struct drm_i915_private *i915)
|
||||
{
|
||||
spin_lock_init(&mmio_debug->lock);
|
||||
mmio_debug->unclaimed_mmio_check = 1;
|
||||
spin_lock_init(&i915->mmio_debug.lock);
|
||||
i915->mmio_debug.unclaimed_mmio_check = 1;
|
||||
|
||||
i915->uncore.debug = &i915->mmio_debug;
|
||||
}
|
||||
|
||||
static void mmio_debug_suspend(struct intel_uncore_mmio_debug *mmio_debug)
|
||||
static void mmio_debug_suspend(struct intel_uncore *uncore)
|
||||
{
|
||||
lockdep_assert_held(&mmio_debug->lock);
|
||||
if (!uncore->debug)
|
||||
return;
|
||||
|
||||
spin_lock(&uncore->debug->lock);
|
||||
|
||||
/* Save and disable mmio debugging for the user bypass */
|
||||
if (!mmio_debug->suspend_count++) {
|
||||
mmio_debug->saved_mmio_check = mmio_debug->unclaimed_mmio_check;
|
||||
mmio_debug->unclaimed_mmio_check = 0;
|
||||
if (!uncore->debug->suspend_count++) {
|
||||
uncore->debug->saved_mmio_check = uncore->debug->unclaimed_mmio_check;
|
||||
uncore->debug->unclaimed_mmio_check = 0;
|
||||
}
|
||||
|
||||
spin_unlock(&uncore->debug->lock);
|
||||
}
|
||||
|
||||
static void mmio_debug_resume(struct intel_uncore_mmio_debug *mmio_debug)
|
||||
{
|
||||
lockdep_assert_held(&mmio_debug->lock);
|
||||
static bool check_for_unclaimed_mmio(struct intel_uncore *uncore);
|
||||
|
||||
if (!--mmio_debug->suspend_count)
|
||||
mmio_debug->unclaimed_mmio_check = mmio_debug->saved_mmio_check;
|
||||
static void mmio_debug_resume(struct intel_uncore *uncore)
|
||||
{
|
||||
if (!uncore->debug)
|
||||
return;
|
||||
|
||||
spin_lock(&uncore->debug->lock);
|
||||
|
||||
if (!--uncore->debug->suspend_count)
|
||||
uncore->debug->unclaimed_mmio_check = uncore->debug->saved_mmio_check;
|
||||
|
||||
if (check_for_unclaimed_mmio(uncore))
|
||||
drm_info(&uncore->i915->drm,
|
||||
"Invalid mmio detected during user access\n");
|
||||
|
||||
spin_unlock(&uncore->debug->lock);
|
||||
}
|
||||
|
||||
static const char * const forcewake_domain_names[] = {
|
||||
|
@ -677,9 +696,7 @@ void intel_uncore_forcewake_user_get(struct intel_uncore *uncore)
|
|||
spin_lock_irq(&uncore->lock);
|
||||
if (!uncore->user_forcewake_count++) {
|
||||
intel_uncore_forcewake_get__locked(uncore, FORCEWAKE_ALL);
|
||||
spin_lock(&uncore->debug->lock);
|
||||
mmio_debug_suspend(uncore->debug);
|
||||
spin_unlock(&uncore->debug->lock);
|
||||
mmio_debug_suspend(uncore);
|
||||
}
|
||||
spin_unlock_irq(&uncore->lock);
|
||||
}
|
||||
|
@ -695,14 +712,7 @@ void intel_uncore_forcewake_user_put(struct intel_uncore *uncore)
|
|||
{
|
||||
spin_lock_irq(&uncore->lock);
|
||||
if (!--uncore->user_forcewake_count) {
|
||||
spin_lock(&uncore->debug->lock);
|
||||
mmio_debug_resume(uncore->debug);
|
||||
|
||||
if (check_for_unclaimed_mmio(uncore))
|
||||
drm_info(&uncore->i915->drm,
|
||||
"Invalid mmio detected during user access\n");
|
||||
spin_unlock(&uncore->debug->lock);
|
||||
|
||||
mmio_debug_resume(uncore);
|
||||
intel_uncore_forcewake_put__locked(uncore, FORCEWAKE_ALL);
|
||||
}
|
||||
spin_unlock_irq(&uncore->lock);
|
||||
|
@ -918,6 +928,9 @@ find_fw_domain(struct intel_uncore *uncore, u32 offset)
|
|||
{
|
||||
const struct intel_forcewake_range *entry;
|
||||
|
||||
if (IS_GSI_REG(offset))
|
||||
offset += uncore->gsi_offset;
|
||||
|
||||
entry = BSEARCH(offset,
|
||||
uncore->fw_domains_table,
|
||||
uncore->fw_domains_table_entries,
|
||||
|
@ -1133,6 +1146,9 @@ static bool is_shadowed(struct intel_uncore *uncore, u32 offset)
|
|||
if (drm_WARN_ON(&uncore->i915->drm, !uncore->shadowed_reg_table))
|
||||
return false;
|
||||
|
||||
if (IS_GSI_REG(offset))
|
||||
offset += uncore->gsi_offset;
|
||||
|
||||
return BSEARCH(offset,
|
||||
uncore->shadowed_reg_table,
|
||||
uncore->shadowed_reg_table_entries,
|
||||
|
@ -1704,7 +1720,7 @@ unclaimed_reg_debug(struct intel_uncore *uncore,
|
|||
const bool read,
|
||||
const bool before)
|
||||
{
|
||||
if (likely(!uncore->i915->params.mmio_debug))
|
||||
if (likely(!uncore->i915->params.mmio_debug) || !uncore->debug)
|
||||
return;
|
||||
|
||||
/* interrupts are disabled and re-enabled around uncore->lock usage */
|
||||
|
@ -1985,8 +2001,8 @@ static int __fw_domain_init(struct intel_uncore *uncore,
|
|||
|
||||
d->uncore = uncore;
|
||||
d->wake_count = 0;
|
||||
d->reg_set = uncore->regs + i915_mmio_reg_offset(reg_set);
|
||||
d->reg_ack = uncore->regs + i915_mmio_reg_offset(reg_ack);
|
||||
d->reg_set = uncore->regs + i915_mmio_reg_offset(reg_set) + uncore->gsi_offset;
|
||||
d->reg_ack = uncore->regs + i915_mmio_reg_offset(reg_ack) + uncore->gsi_offset;
|
||||
|
||||
d->id = domain_id;
|
||||
|
||||
|
@ -2223,6 +2239,11 @@ static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
|
|||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
static void uncore_unmap_mmio(struct drm_device *drm, void *regs)
|
||||
{
|
||||
iounmap(regs);
|
||||
}
|
||||
|
||||
int intel_uncore_setup_mmio(struct intel_uncore *uncore, phys_addr_t phys_addr)
|
||||
{
|
||||
struct drm_i915_private *i915 = uncore->i915;
|
||||
|
@ -2251,12 +2272,7 @@ int intel_uncore_setup_mmio(struct intel_uncore *uncore, phys_addr_t phys_addr)
|
|||
return -EIO;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void intel_uncore_cleanup_mmio(struct intel_uncore *uncore)
|
||||
{
|
||||
iounmap(uncore->regs);
|
||||
return drmm_add_action_or_reset(&i915->drm, uncore_unmap_mmio, uncore->regs);
|
||||
}
|
||||
|
||||
void intel_uncore_init_early(struct intel_uncore *uncore,
|
||||
|
@ -2266,7 +2282,6 @@ void intel_uncore_init_early(struct intel_uncore *uncore,
|
|||
uncore->i915 = gt->i915;
|
||||
uncore->gt = gt;
|
||||
uncore->rpm = >->i915->runtime_pm;
|
||||
uncore->debug = >->i915->mmio_debug;
|
||||
}
|
||||
|
||||
static void uncore_raw_init(struct intel_uncore *uncore)
|
||||
|
@ -2446,8 +2461,11 @@ void intel_uncore_prune_engine_fw_domains(struct intel_uncore *uncore,
|
|||
}
|
||||
}
|
||||
|
||||
void intel_uncore_fini_mmio(struct intel_uncore *uncore)
|
||||
/* Called via drm-managed action */
|
||||
void intel_uncore_fini_mmio(struct drm_device *dev, void *data)
|
||||
{
|
||||
struct intel_uncore *uncore = data;
|
||||
|
||||
if (intel_uncore_has_forcewake(uncore)) {
|
||||
iosf_mbi_punit_acquire();
|
||||
iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
|
||||
|
@ -2577,6 +2595,9 @@ bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore)
|
|||
{
|
||||
bool ret;
|
||||
|
||||
if (!uncore->debug)
|
||||
return false;
|
||||
|
||||
spin_lock_irq(&uncore->debug->lock);
|
||||
ret = check_for_unclaimed_mmio(uncore);
|
||||
spin_unlock_irq(&uncore->debug->lock);
|
||||
|
@ -2589,6 +2610,9 @@ intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore)
|
|||
{
|
||||
bool ret = false;
|
||||
|
||||
if (drm_WARN_ON(&uncore->i915->drm, !uncore->debug))
|
||||
return false;
|
||||
|
||||
spin_lock_irq(&uncore->debug->lock);
|
||||
|
||||
if (unlikely(uncore->debug->unclaimed_mmio_check <= 0))
|
||||
|
|
|
@ -33,6 +33,7 @@
|
|||
|
||||
#include "i915_reg_defs.h"
|
||||
|
||||
struct drm_device;
|
||||
struct drm_i915_private;
|
||||
struct intel_runtime_pm;
|
||||
struct intel_uncore;
|
||||
|
@ -135,6 +136,16 @@ struct intel_uncore {
|
|||
|
||||
spinlock_t lock; /** lock is also taken in irq contexts. */
|
||||
|
||||
/*
|
||||
* Do we need to apply an additional offset to reach the beginning
|
||||
* of the basic non-engine GT registers (referred to as "GSI" on
|
||||
* newer platforms, or "GT block" on older platforms)? If so, we'll
|
||||
* track that here and apply it transparently to registers in the
|
||||
* appropriate range to maintain compatibility with our existing
|
||||
* register definitions and GT code.
|
||||
*/
|
||||
u32 gsi_offset;
|
||||
|
||||
unsigned int flags;
|
||||
#define UNCORE_HAS_FORCEWAKE BIT(0)
|
||||
#define UNCORE_HAS_FPGA_DBG_UNCLAIMED BIT(1)
|
||||
|
@ -210,8 +221,7 @@ intel_uncore_has_fifo(const struct intel_uncore *uncore)
|
|||
return uncore->flags & UNCORE_HAS_FIFO;
|
||||
}
|
||||
|
||||
void
|
||||
intel_uncore_mmio_debug_init_early(struct intel_uncore_mmio_debug *mmio_debug);
|
||||
void intel_uncore_mmio_debug_init_early(struct drm_i915_private *i915);
|
||||
void intel_uncore_init_early(struct intel_uncore *uncore,
|
||||
struct intel_gt *gt);
|
||||
int intel_uncore_setup_mmio(struct intel_uncore *uncore, phys_addr_t phys_addr);
|
||||
|
@ -221,7 +231,7 @@ void intel_uncore_prune_engine_fw_domains(struct intel_uncore *uncore,
|
|||
bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore);
|
||||
bool intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore);
|
||||
void intel_uncore_cleanup_mmio(struct intel_uncore *uncore);
|
||||
void intel_uncore_fini_mmio(struct intel_uncore *uncore);
|
||||
void intel_uncore_fini_mmio(struct drm_device *dev, void *data);
|
||||
void intel_uncore_suspend(struct intel_uncore *uncore);
|
||||
void intel_uncore_resume_early(struct intel_uncore *uncore);
|
||||
void intel_uncore_runtime_resume(struct intel_uncore *uncore);
|
||||
|
@ -294,19 +304,27 @@ intel_wait_for_register_fw(struct intel_uncore *uncore,
|
|||
2, timeout_ms, NULL);
|
||||
}
|
||||
|
||||
#define IS_GSI_REG(reg) ((reg) < 0x40000)
|
||||
|
||||
/* register access functions */
|
||||
#define __raw_read(x__, s__) \
|
||||
static inline u##x__ __raw_uncore_read##x__(const struct intel_uncore *uncore, \
|
||||
i915_reg_t reg) \
|
||||
{ \
|
||||
return read##s__(uncore->regs + i915_mmio_reg_offset(reg)); \
|
||||
u32 offset = i915_mmio_reg_offset(reg); \
|
||||
if (IS_GSI_REG(offset)) \
|
||||
offset += uncore->gsi_offset; \
|
||||
return read##s__(uncore->regs + offset); \
|
||||
}
|
||||
|
||||
#define __raw_write(x__, s__) \
|
||||
static inline void __raw_uncore_write##x__(const struct intel_uncore *uncore, \
|
||||
i915_reg_t reg, u##x__ val) \
|
||||
{ \
|
||||
write##s__(val, uncore->regs + i915_mmio_reg_offset(reg)); \
|
||||
u32 offset = i915_mmio_reg_offset(reg); \
|
||||
if (IS_GSI_REG(offset)) \
|
||||
offset += uncore->gsi_offset; \
|
||||
write##s__(val, uncore->regs + offset); \
|
||||
}
|
||||
__raw_read(8, b)
|
||||
__raw_read(16, w)
|
||||
|
@ -447,6 +465,18 @@ static inline int intel_uncore_write_and_verify(struct intel_uncore *uncore,
|
|||
return (reg_val & mask) != expected_val ? -EINVAL : 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* The raw_reg_{read,write} macros are intended as a micro-optimization for
|
||||
* interrupt handlers so that the pointer indirection on uncore->regs can
|
||||
* be computed once (and presumably cached in a register) instead of generating
|
||||
* extra load instructions for each MMIO access.
|
||||
*
|
||||
* Given that these macros are only intended for non-GSI interrupt registers
|
||||
* (and the goal is to avoid extra instructions generated by the compiler),
|
||||
* these macros do not account for uncore->gsi_offset. Any caller that needs
|
||||
* to use these macros on a GSI register is responsible for adding the
|
||||
* appropriate GSI offset to the 'base' parameter.
|
||||
*/
|
||||
#define raw_reg_read(base, reg) \
|
||||
readl(base + i915_mmio_reg_offset(reg))
|
||||
#define raw_reg_write(base, reg, value) \
|
||||
|
|
|
@ -169,11 +169,11 @@ static void pxp_queue_termination(struct intel_pxp *pxp)
|
|||
* We want to get the same effect as if we received a termination
|
||||
* interrupt, so just pretend that we did.
|
||||
*/
|
||||
spin_lock_irq(>->irq_lock);
|
||||
spin_lock_irq(gt->irq_lock);
|
||||
intel_pxp_mark_termination_in_progress(pxp);
|
||||
pxp->session_events |= PXP_TERMINATION_REQUEST;
|
||||
queue_work(system_unbound_wq, &pxp->session_work);
|
||||
spin_unlock_irq(>->irq_lock);
|
||||
spin_unlock_irq(gt->irq_lock);
|
||||
}
|
||||
|
||||
static bool pxp_component_bound(struct intel_pxp *pxp)
|
||||
|
|
|
@ -47,9 +47,9 @@ static int pxp_terminate_set(void *data, u64 val)
|
|||
return -ENODEV;
|
||||
|
||||
/* simulate a termination interrupt */
|
||||
spin_lock_irq(>->irq_lock);
|
||||
spin_lock_irq(gt->irq_lock);
|
||||
intel_pxp_irq_handler(pxp, GEN12_DISPLAY_PXP_STATE_TERMINATED_INTERRUPT);
|
||||
spin_unlock_irq(>->irq_lock);
|
||||
spin_unlock_irq(gt->irq_lock);
|
||||
|
||||
if (!wait_for_completion_timeout(&pxp->termination,
|
||||
msecs_to_jiffies(100)))
|
||||
|
|
|
@ -25,7 +25,7 @@ void intel_pxp_irq_handler(struct intel_pxp *pxp, u16 iir)
|
|||
if (GEM_WARN_ON(!intel_pxp_is_enabled(pxp)))
|
||||
return;
|
||||
|
||||
lockdep_assert_held(>->irq_lock);
|
||||
lockdep_assert_held(gt->irq_lock);
|
||||
|
||||
if (unlikely(!iir))
|
||||
return;
|
||||
|
@ -55,16 +55,16 @@ static inline void __pxp_set_interrupts(struct intel_gt *gt, u32 interrupts)
|
|||
|
||||
static inline void pxp_irq_reset(struct intel_gt *gt)
|
||||
{
|
||||
spin_lock_irq(>->irq_lock);
|
||||
spin_lock_irq(gt->irq_lock);
|
||||
gen11_gt_reset_one_iir(gt, 0, GEN11_KCR);
|
||||
spin_unlock_irq(>->irq_lock);
|
||||
spin_unlock_irq(gt->irq_lock);
|
||||
}
|
||||
|
||||
void intel_pxp_irq_enable(struct intel_pxp *pxp)
|
||||
{
|
||||
struct intel_gt *gt = pxp_to_gt(pxp);
|
||||
|
||||
spin_lock_irq(>->irq_lock);
|
||||
spin_lock_irq(gt->irq_lock);
|
||||
|
||||
if (!pxp->irq_enabled)
|
||||
WARN_ON_ONCE(gen11_gt_reset_one_iir(gt, 0, GEN11_KCR));
|
||||
|
@ -72,7 +72,7 @@ void intel_pxp_irq_enable(struct intel_pxp *pxp)
|
|||
__pxp_set_interrupts(gt, GEN12_PXP_INTERRUPTS);
|
||||
pxp->irq_enabled = true;
|
||||
|
||||
spin_unlock_irq(>->irq_lock);
|
||||
spin_unlock_irq(gt->irq_lock);
|
||||
}
|
||||
|
||||
void intel_pxp_irq_disable(struct intel_pxp *pxp)
|
||||
|
@ -88,12 +88,12 @@ void intel_pxp_irq_disable(struct intel_pxp *pxp)
|
|||
*/
|
||||
GEM_WARN_ON(intel_pxp_is_active(pxp));
|
||||
|
||||
spin_lock_irq(>->irq_lock);
|
||||
spin_lock_irq(gt->irq_lock);
|
||||
|
||||
pxp->irq_enabled = false;
|
||||
__pxp_set_interrupts(gt, 0);
|
||||
|
||||
spin_unlock_irq(>->irq_lock);
|
||||
spin_unlock_irq(gt->irq_lock);
|
||||
intel_synchronize_irq(gt->i915);
|
||||
|
||||
pxp_irq_reset(gt);
|
||||
|
|
|
@ -144,9 +144,9 @@ void intel_pxp_session_work(struct work_struct *work)
|
|||
intel_wakeref_t wakeref;
|
||||
u32 events = 0;
|
||||
|
||||
spin_lock_irq(>->irq_lock);
|
||||
spin_lock_irq(gt->irq_lock);
|
||||
events = fetch_and_zero(&pxp->session_events);
|
||||
spin_unlock_irq(>->irq_lock);
|
||||
spin_unlock_irq(gt->irq_lock);
|
||||
|
||||
if (!events)
|
||||
return;
|
||||
|
|
|
@ -115,6 +115,7 @@ static struct dev_pm_domain pm_domain = {
|
|||
static void mock_gt_probe(struct drm_i915_private *i915)
|
||||
{
|
||||
i915->gt[0] = &i915->gt0;
|
||||
i915->gt[0]->name = "Mock GT";
|
||||
}
|
||||
|
||||
struct drm_i915_private *mock_gem_device(void)
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (c) 2013-2020, Intel Corporation. All rights reserved.
|
||||
* Copyright (c) 2013-2022, Intel Corporation. All rights reserved.
|
||||
* Intel Management Engine Interface (Intel MEI) Linux driver
|
||||
*/
|
||||
|
||||
|
@ -15,6 +15,7 @@
|
|||
|
||||
#include "mei_dev.h"
|
||||
#include "client.h"
|
||||
#include "mkhi.h"
|
||||
|
||||
#define MEI_UUID_NFC_INFO UUID_LE(0xd2de1625, 0x382d, 0x417d, \
|
||||
0x48, 0xa4, 0xef, 0xab, 0xba, 0x8a, 0x12, 0x06)
|
||||
|
@ -89,20 +90,6 @@ struct mei_os_ver {
|
|||
u8 reserved2;
|
||||
} __packed;
|
||||
|
||||
#define MKHI_FEATURE_PTT 0x10
|
||||
|
||||
struct mkhi_rule_id {
|
||||
__le16 rule_type;
|
||||
u8 feature_id;
|
||||
u8 reserved;
|
||||
} __packed;
|
||||
|
||||
struct mkhi_fwcaps {
|
||||
struct mkhi_rule_id id;
|
||||
u8 len;
|
||||
u8 data[];
|
||||
} __packed;
|
||||
|
||||
struct mkhi_fw_ver_block {
|
||||
u16 minor;
|
||||
u8 major;
|
||||
|
@ -115,22 +102,6 @@ struct mkhi_fw_ver {
|
|||
struct mkhi_fw_ver_block ver[MEI_MAX_FW_VER_BLOCKS];
|
||||
} __packed;
|
||||
|
||||
#define MKHI_FWCAPS_GROUP_ID 0x3
|
||||
#define MKHI_FWCAPS_SET_OS_VER_APP_RULE_CMD 6
|
||||
#define MKHI_GEN_GROUP_ID 0xFF
|
||||
#define MKHI_GEN_GET_FW_VERSION_CMD 0x2
|
||||
struct mkhi_msg_hdr {
|
||||
u8 group_id;
|
||||
u8 command;
|
||||
u8 reserved;
|
||||
u8 result;
|
||||
} __packed;
|
||||
|
||||
struct mkhi_msg {
|
||||
struct mkhi_msg_hdr hdr;
|
||||
u8 data[];
|
||||
} __packed;
|
||||
|
||||
#define MKHI_OSVER_BUF_LEN (sizeof(struct mkhi_msg_hdr) + \
|
||||
sizeof(struct mkhi_fwcaps) + \
|
||||
sizeof(struct mei_os_ver))
|
||||
|
@ -164,7 +135,6 @@ static int mei_osver(struct mei_cl_device *cldev)
|
|||
sizeof(struct mkhi_fw_ver))
|
||||
#define MKHI_FWVER_LEN(__num) (sizeof(struct mkhi_msg_hdr) + \
|
||||
sizeof(struct mkhi_fw_ver_block) * (__num))
|
||||
#define MKHI_RCV_TIMEOUT 500 /* receive timeout in msec */
|
||||
static int mei_fwver(struct mei_cl_device *cldev)
|
||||
{
|
||||
char buf[MKHI_FWVER_BUF_LEN];
|
||||
|
@ -187,7 +157,7 @@ static int mei_fwver(struct mei_cl_device *cldev)
|
|||
|
||||
ret = 0;
|
||||
bytes_recv = __mei_cl_recv(cldev->cl, buf, sizeof(buf), NULL, 0,
|
||||
MKHI_RCV_TIMEOUT);
|
||||
cldev->bus->timeouts.mkhi_recv);
|
||||
if (bytes_recv < 0 || (size_t)bytes_recv < MKHI_FWVER_LEN(1)) {
|
||||
/*
|
||||
* Should be at least one version block,
|
||||
|
@ -218,6 +188,19 @@ static int mei_fwver(struct mei_cl_device *cldev)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int mei_gfx_memory_ready(struct mei_cl_device *cldev)
|
||||
{
|
||||
struct mkhi_gfx_mem_ready req = {0};
|
||||
unsigned int mode = MEI_CL_IO_TX_INTERNAL;
|
||||
|
||||
req.hdr.group_id = MKHI_GROUP_ID_GFX;
|
||||
req.hdr.command = MKHI_GFX_MEMORY_READY_CMD_REQ;
|
||||
req.flags = MKHI_GFX_MEM_READY_PXP_ALLOWED;
|
||||
|
||||
dev_dbg(&cldev->dev, "Sending memory ready command\n");
|
||||
return __mei_cl_send(cldev->cl, (u8 *)&req, sizeof(req), 0, mode);
|
||||
}
|
||||
|
||||
static void mei_mkhi_fix(struct mei_cl_device *cldev)
|
||||
{
|
||||
int ret;
|
||||
|
@ -264,6 +247,39 @@ static void mei_gsc_mkhi_ver(struct mei_cl_device *cldev)
|
|||
dev_err(&cldev->dev, "FW version command failed %d\n", ret);
|
||||
mei_cldev_disable(cldev);
|
||||
}
|
||||
|
||||
static void mei_gsc_mkhi_fix_ver(struct mei_cl_device *cldev)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/* No need to enable the client if nothing is needed from it */
|
||||
if (!cldev->bus->fw_f_fw_ver_supported &&
|
||||
cldev->bus->pxp_mode != MEI_DEV_PXP_INIT)
|
||||
return;
|
||||
|
||||
ret = mei_cldev_enable(cldev);
|
||||
if (ret)
|
||||
return;
|
||||
|
||||
if (cldev->bus->pxp_mode == MEI_DEV_PXP_INIT) {
|
||||
ret = mei_gfx_memory_ready(cldev);
|
||||
if (ret < 0)
|
||||
dev_err(&cldev->dev, "memory ready command failed %d\n", ret);
|
||||
else
|
||||
dev_dbg(&cldev->dev, "memory ready command sent\n");
|
||||
/* we go to reset after that */
|
||||
cldev->bus->pxp_mode = MEI_DEV_PXP_SETUP;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = mei_fwver(cldev);
|
||||
if (ret < 0)
|
||||
dev_err(&cldev->dev, "FW version command failed %d\n",
|
||||
ret);
|
||||
out:
|
||||
mei_cldev_disable(cldev);
|
||||
}
|
||||
|
||||
/**
|
||||
* mei_wd - wd client on the bus, change protocol version
|
||||
* as the API has changed.
|
||||
|
@ -503,6 +519,26 @@ static void vt_support(struct mei_cl_device *cldev)
|
|||
cldev->do_match = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* pxp_is_ready - enable bus client if pxp is ready
|
||||
*
|
||||
* @cldev: me clients device
|
||||
*/
|
||||
static void pxp_is_ready(struct mei_cl_device *cldev)
|
||||
{
|
||||
struct mei_device *bus = cldev->bus;
|
||||
|
||||
switch (bus->pxp_mode) {
|
||||
case MEI_DEV_PXP_READY:
|
||||
case MEI_DEV_PXP_DEFAULT:
|
||||
cldev->do_match = 1;
|
||||
break;
|
||||
default:
|
||||
cldev->do_match = 0;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
#define MEI_FIXUP(_uuid, _hook) { _uuid, _hook }
|
||||
|
||||
static struct mei_fixup {
|
||||
|
@ -516,10 +552,10 @@ static struct mei_fixup {
|
|||
MEI_FIXUP(MEI_UUID_WD, mei_wd),
|
||||
MEI_FIXUP(MEI_UUID_MKHIF_FIX, mei_mkhi_fix),
|
||||
MEI_FIXUP(MEI_UUID_IGSC_MKHI, mei_gsc_mkhi_ver),
|
||||
MEI_FIXUP(MEI_UUID_IGSC_MKHI_FIX, mei_gsc_mkhi_ver),
|
||||
MEI_FIXUP(MEI_UUID_IGSC_MKHI_FIX, mei_gsc_mkhi_fix_ver),
|
||||
MEI_FIXUP(MEI_UUID_HDCP, whitelist),
|
||||
MEI_FIXUP(MEI_UUID_ANY, vt_support),
|
||||
MEI_FIXUP(MEI_UUID_PAVP, whitelist),
|
||||
MEI_FIXUP(MEI_UUID_PAVP, pxp_is_ready),
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (c) 2003-2020, Intel Corporation. All rights reserved.
|
||||
* Copyright (c) 2003-2022, Intel Corporation. All rights reserved.
|
||||
* Intel Management Engine Interface (Intel MEI) Linux driver
|
||||
*/
|
||||
|
||||
|
@ -870,7 +870,7 @@ static int mei_cl_send_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb)
|
|||
}
|
||||
|
||||
list_move_tail(&cb->list, &dev->ctrl_rd_list);
|
||||
cl->timer_count = MEI_CONNECT_TIMEOUT;
|
||||
cl->timer_count = dev->timeouts.connect;
|
||||
mei_schedule_stall_timer(dev);
|
||||
|
||||
return 0;
|
||||
|
@ -945,7 +945,7 @@ static int __mei_cl_disconnect(struct mei_cl *cl)
|
|||
wait_event_timeout(cl->wait,
|
||||
cl->state == MEI_FILE_DISCONNECT_REPLY ||
|
||||
cl->state == MEI_FILE_DISCONNECTED,
|
||||
mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
|
||||
dev->timeouts.cl_connect);
|
||||
mutex_lock(&dev->device_lock);
|
||||
|
||||
rets = cl->status;
|
||||
|
@ -1065,7 +1065,7 @@ static int mei_cl_send_connect(struct mei_cl *cl, struct mei_cl_cb *cb)
|
|||
}
|
||||
|
||||
list_move_tail(&cb->list, &dev->ctrl_rd_list);
|
||||
cl->timer_count = MEI_CONNECT_TIMEOUT;
|
||||
cl->timer_count = dev->timeouts.connect;
|
||||
mei_schedule_stall_timer(dev);
|
||||
return 0;
|
||||
}
|
||||
|
@ -1164,7 +1164,7 @@ int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl,
|
|||
cl->state == MEI_FILE_DISCONNECTED ||
|
||||
cl->state == MEI_FILE_DISCONNECT_REQUIRED ||
|
||||
cl->state == MEI_FILE_DISCONNECT_REPLY),
|
||||
mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
|
||||
dev->timeouts.cl_connect);
|
||||
mutex_lock(&dev->device_lock);
|
||||
|
||||
if (!mei_cl_is_connected(cl)) {
|
||||
|
@ -1562,7 +1562,7 @@ int mei_cl_notify_request(struct mei_cl *cl,
|
|||
cl->notify_en == request ||
|
||||
cl->status ||
|
||||
!mei_cl_is_connected(cl),
|
||||
mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
|
||||
dev->timeouts.cl_connect);
|
||||
mutex_lock(&dev->device_lock);
|
||||
|
||||
if (cl->notify_en != request && !cl->status)
|
||||
|
@ -2336,7 +2336,7 @@ int mei_cl_dma_alloc_and_map(struct mei_cl *cl, const struct file *fp,
|
|||
mutex_unlock(&dev->device_lock);
|
||||
wait_event_timeout(cl->wait,
|
||||
cl->dma_mapped || cl->status,
|
||||
mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
|
||||
dev->timeouts.cl_connect);
|
||||
mutex_lock(&dev->device_lock);
|
||||
|
||||
if (!cl->dma_mapped && !cl->status)
|
||||
|
@ -2415,7 +2415,7 @@ int mei_cl_dma_unmap(struct mei_cl *cl, const struct file *fp)
|
|||
mutex_unlock(&dev->device_lock);
|
||||
wait_event_timeout(cl->wait,
|
||||
!cl->dma_mapped || cl->status,
|
||||
mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
|
||||
dev->timeouts.cl_connect);
|
||||
mutex_lock(&dev->device_lock);
|
||||
|
||||
if (cl->dma_mapped && !cl->status)
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (c) 2012-2016, Intel Corporation. All rights reserved
|
||||
* Copyright (c) 2012-2022, Intel Corporation. All rights reserved
|
||||
* Intel Management Engine Interface (Intel MEI) Linux driver
|
||||
*/
|
||||
|
||||
|
@ -86,6 +86,20 @@ out:
|
|||
}
|
||||
DEFINE_SHOW_ATTRIBUTE(mei_dbgfs_active);
|
||||
|
||||
static const char *mei_dev_pxp_mode_str(enum mei_dev_pxp_mode state)
|
||||
{
|
||||
#define MEI_PXP_MODE(state) case MEI_DEV_PXP_##state: return #state
|
||||
switch (state) {
|
||||
MEI_PXP_MODE(DEFAULT);
|
||||
MEI_PXP_MODE(INIT);
|
||||
MEI_PXP_MODE(SETUP);
|
||||
MEI_PXP_MODE(READY);
|
||||
default:
|
||||
return "unknown";
|
||||
}
|
||||
#undef MEI_PXP_MODE
|
||||
}
|
||||
|
||||
static int mei_dbgfs_devstate_show(struct seq_file *m, void *unused)
|
||||
{
|
||||
struct mei_device *dev = m->private;
|
||||
|
@ -112,6 +126,9 @@ static int mei_dbgfs_devstate_show(struct seq_file *m, void *unused)
|
|||
seq_printf(m, "pg: %s, %s\n",
|
||||
mei_pg_is_enabled(dev) ? "ENABLED" : "DISABLED",
|
||||
mei_pg_state_str(mei_pg_state(dev)));
|
||||
|
||||
seq_printf(m, "pxp: %s\n", mei_dev_pxp_mode_str(dev->pxp_mode));
|
||||
|
||||
return 0;
|
||||
}
|
||||
DEFINE_SHOW_ATTRIBUTE(mei_dbgfs_devstate);
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
#include <linux/ktime.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/kthread.h>
|
||||
|
||||
#include "mei_dev.h"
|
||||
#include "hw-me.h"
|
||||
|
@ -31,6 +32,17 @@ static int mei_gsc_read_hfs(const struct mei_device *dev, int where, u32 *val)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void mei_gsc_set_ext_op_mem(const struct mei_me_hw *hw, struct resource *mem)
|
||||
{
|
||||
u32 low = lower_32_bits(mem->start);
|
||||
u32 hi = upper_32_bits(mem->start);
|
||||
u32 limit = (resource_size(mem) / SZ_4K) | GSC_EXT_OP_MEM_VALID;
|
||||
|
||||
iowrite32(low, hw->mem_addr + H_GSC_EXT_OP_MEM_BASE_ADDR_LO_REG);
|
||||
iowrite32(hi, hw->mem_addr + H_GSC_EXT_OP_MEM_BASE_ADDR_HI_REG);
|
||||
iowrite32(limit, hw->mem_addr + H_GSC_EXT_OP_MEM_LIMIT_REG);
|
||||
}
|
||||
|
||||
static int mei_gsc_probe(struct auxiliary_device *aux_dev,
|
||||
const struct auxiliary_device_id *aux_dev_id)
|
||||
{
|
||||
|
@ -47,7 +59,7 @@ static int mei_gsc_probe(struct auxiliary_device *aux_dev,
|
|||
|
||||
device = &aux_dev->dev;
|
||||
|
||||
dev = mei_me_dev_init(device, cfg);
|
||||
dev = mei_me_dev_init(device, cfg, adev->slow_firmware);
|
||||
if (!dev) {
|
||||
ret = -ENOMEM;
|
||||
goto err;
|
||||
|
@ -66,13 +78,33 @@ static int mei_gsc_probe(struct auxiliary_device *aux_dev,
|
|||
|
||||
dev_set_drvdata(device, dev);
|
||||
|
||||
ret = devm_request_threaded_irq(device, hw->irq,
|
||||
mei_me_irq_quick_handler,
|
||||
mei_me_irq_thread_handler,
|
||||
IRQF_ONESHOT, KBUILD_MODNAME, dev);
|
||||
if (ret) {
|
||||
dev_err(device, "irq register failed %d\n", ret);
|
||||
goto err;
|
||||
if (adev->ext_op_mem.start) {
|
||||
mei_gsc_set_ext_op_mem(hw, &adev->ext_op_mem);
|
||||
dev->pxp_mode = MEI_DEV_PXP_INIT;
|
||||
}
|
||||
|
||||
/* use polling */
|
||||
if (mei_me_hw_use_polling(hw)) {
|
||||
mei_disable_interrupts(dev);
|
||||
mei_clear_interrupts(dev);
|
||||
init_waitqueue_head(&hw->wait_active);
|
||||
hw->is_active = true; /* start in active mode for initialization */
|
||||
hw->polling_thread = kthread_run(mei_me_polling_thread, dev,
|
||||
"kmegscirqd/%s", dev_name(device));
|
||||
if (IS_ERR(hw->polling_thread)) {
|
||||
ret = PTR_ERR(hw->polling_thread);
|
||||
dev_err(device, "unable to create kernel thread: %d\n", ret);
|
||||
goto err;
|
||||
}
|
||||
} else {
|
||||
ret = devm_request_threaded_irq(device, hw->irq,
|
||||
mei_me_irq_quick_handler,
|
||||
mei_me_irq_thread_handler,
|
||||
IRQF_ONESHOT, KBUILD_MODNAME, dev);
|
||||
if (ret) {
|
||||
dev_err(device, "irq register failed %d\n", ret);
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
|
||||
pm_runtime_get_noresume(device);
|
||||
|
@ -98,7 +130,8 @@ static int mei_gsc_probe(struct auxiliary_device *aux_dev,
|
|||
|
||||
register_err:
|
||||
mei_stop(dev);
|
||||
devm_free_irq(device, hw->irq, dev);
|
||||
if (!mei_me_hw_use_polling(hw))
|
||||
devm_free_irq(device, hw->irq, dev);
|
||||
|
||||
err:
|
||||
dev_err(device, "probe failed: %d\n", ret);
|
||||
|
@ -119,12 +152,17 @@ static void mei_gsc_remove(struct auxiliary_device *aux_dev)
|
|||
|
||||
mei_stop(dev);
|
||||
|
||||
hw = to_me_hw(dev);
|
||||
if (mei_me_hw_use_polling(hw))
|
||||
kthread_stop(hw->polling_thread);
|
||||
|
||||
mei_deregister(dev);
|
||||
|
||||
pm_runtime_disable(&aux_dev->dev);
|
||||
|
||||
mei_disable_interrupts(dev);
|
||||
devm_free_irq(&aux_dev->dev, hw->irq, dev);
|
||||
if (!mei_me_hw_use_polling(hw))
|
||||
devm_free_irq(&aux_dev->dev, hw->irq, dev);
|
||||
}
|
||||
|
||||
static int __maybe_unused mei_gsc_pm_suspend(struct device *device)
|
||||
|
@ -144,11 +182,22 @@ static int __maybe_unused mei_gsc_pm_suspend(struct device *device)
|
|||
static int __maybe_unused mei_gsc_pm_resume(struct device *device)
|
||||
{
|
||||
struct mei_device *dev = dev_get_drvdata(device);
|
||||
struct auxiliary_device *aux_dev;
|
||||
struct mei_aux_device *adev;
|
||||
int err;
|
||||
struct mei_me_hw *hw;
|
||||
|
||||
if (!dev)
|
||||
return -ENODEV;
|
||||
|
||||
hw = to_me_hw(dev);
|
||||
aux_dev = to_auxiliary_dev(device);
|
||||
adev = auxiliary_dev_to_mei_aux_dev(aux_dev);
|
||||
if (adev->ext_op_mem.start) {
|
||||
mei_gsc_set_ext_op_mem(hw, &adev->ext_op_mem);
|
||||
dev->pxp_mode = MEI_DEV_PXP_INIT;
|
||||
}
|
||||
|
||||
err = mei_restart(dev);
|
||||
if (err)
|
||||
return err;
|
||||
|
@ -185,6 +234,9 @@ static int __maybe_unused mei_gsc_pm_runtime_suspend(struct device *device)
|
|||
if (mei_write_is_idle(dev)) {
|
||||
hw = to_me_hw(dev);
|
||||
hw->pg_state = MEI_PG_ON;
|
||||
|
||||
if (mei_me_hw_use_polling(hw))
|
||||
hw->is_active = false;
|
||||
ret = 0;
|
||||
} else {
|
||||
ret = -EAGAIN;
|
||||
|
@ -209,6 +261,11 @@ static int __maybe_unused mei_gsc_pm_runtime_resume(struct device *device)
|
|||
hw = to_me_hw(dev);
|
||||
hw->pg_state = MEI_PG_OFF;
|
||||
|
||||
if (mei_me_hw_use_polling(hw)) {
|
||||
hw->is_active = true;
|
||||
wake_up(&hw->wait_active);
|
||||
}
|
||||
|
||||
mutex_unlock(&dev->device_lock);
|
||||
|
||||
irq_ret = mei_me_irq_thread_handler(1, dev);
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (c) 2003-2020, Intel Corporation. All rights reserved.
|
||||
* Copyright (c) 2003-2022, Intel Corporation. All rights reserved.
|
||||
* Intel Management Engine Interface (Intel MEI) Linux driver
|
||||
*/
|
||||
#include <linux/export.h>
|
||||
|
@ -232,7 +232,7 @@ int mei_hbm_start_wait(struct mei_device *dev)
|
|||
mutex_unlock(&dev->device_lock);
|
||||
ret = wait_event_timeout(dev->wait_hbm_start,
|
||||
dev->hbm_state != MEI_HBM_STARTING,
|
||||
mei_secs_to_jiffies(MEI_HBM_TIMEOUT));
|
||||
dev->timeouts.hbm);
|
||||
mutex_lock(&dev->device_lock);
|
||||
|
||||
if (ret == 0 && (dev->hbm_state <= MEI_HBM_STARTING)) {
|
||||
|
@ -275,7 +275,7 @@ int mei_hbm_start_req(struct mei_device *dev)
|
|||
}
|
||||
|
||||
dev->hbm_state = MEI_HBM_STARTING;
|
||||
dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT;
|
||||
dev->init_clients_timer = dev->timeouts.client_init;
|
||||
mei_schedule_stall_timer(dev);
|
||||
return 0;
|
||||
}
|
||||
|
@ -316,7 +316,7 @@ static int mei_hbm_dma_setup_req(struct mei_device *dev)
|
|||
}
|
||||
|
||||
dev->hbm_state = MEI_HBM_DR_SETUP;
|
||||
dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT;
|
||||
dev->init_clients_timer = dev->timeouts.client_init;
|
||||
mei_schedule_stall_timer(dev);
|
||||
return 0;
|
||||
}
|
||||
|
@ -351,7 +351,7 @@ static int mei_hbm_capabilities_req(struct mei_device *dev)
|
|||
}
|
||||
|
||||
dev->hbm_state = MEI_HBM_CAP_SETUP;
|
||||
dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT;
|
||||
dev->init_clients_timer = dev->timeouts.client_init;
|
||||
mei_schedule_stall_timer(dev);
|
||||
return 0;
|
||||
}
|
||||
|
@ -385,7 +385,7 @@ static int mei_hbm_enum_clients_req(struct mei_device *dev)
|
|||
return ret;
|
||||
}
|
||||
dev->hbm_state = MEI_HBM_ENUM_CLIENTS;
|
||||
dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT;
|
||||
dev->init_clients_timer = dev->timeouts.client_init;
|
||||
mei_schedule_stall_timer(dev);
|
||||
return 0;
|
||||
}
|
||||
|
@ -751,7 +751,7 @@ static int mei_hbm_prop_req(struct mei_device *dev, unsigned long start_idx)
|
|||
return ret;
|
||||
}
|
||||
|
||||
dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT;
|
||||
dev->init_clients_timer = dev->timeouts.client_init;
|
||||
mei_schedule_stall_timer(dev);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
|
||||
/*
|
||||
* Copyright (c) 2003-2019, Intel Corporation. All rights reserved.
|
||||
* Copyright (c) 2003-2022, Intel Corporation. All rights reserved.
|
||||
* Intel Management Engine Interface (Intel MEI) Linux driver
|
||||
*/
|
||||
#ifndef _MEI_HW_MEI_REGS_H_
|
||||
|
@ -127,6 +127,8 @@
|
|||
# define PCI_CFG_HFS_3_FW_SKU_SPS 0x00000060
|
||||
#define PCI_CFG_HFS_4 0x64
|
||||
#define PCI_CFG_HFS_5 0x68
|
||||
# define GSC_CFG_HFS_5_BOOT_TYPE_MSK 0x00000003
|
||||
# define GSC_CFG_HFS_5_BOOT_TYPE_PXP 3
|
||||
#define PCI_CFG_HFS_6 0x6C
|
||||
|
||||
/* MEI registers */
|
||||
|
@ -143,6 +145,11 @@
|
|||
/* H_D0I3C - D0I3 Control */
|
||||
#define H_D0I3C 0x800
|
||||
|
||||
#define H_GSC_EXT_OP_MEM_BASE_ADDR_LO_REG 0x100
|
||||
#define H_GSC_EXT_OP_MEM_BASE_ADDR_HI_REG 0x104
|
||||
#define H_GSC_EXT_OP_MEM_LIMIT_REG 0x108
|
||||
#define GSC_EXT_OP_MEM_VALID BIT(31)
|
||||
|
||||
/* register bits of H_CSR (Host Control Status register) */
|
||||
/* Host Circular Buffer Depth - maximum number of 32-bit entries in CB */
|
||||
#define H_CBD 0xFF000000
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (c) 2003-2020, Intel Corporation. All rights reserved.
|
||||
* Copyright (c) 2003-2022, Intel Corporation. All rights reserved.
|
||||
* Intel Management Engine Interface (Intel MEI) Linux driver
|
||||
*/
|
||||
|
||||
|
@ -10,6 +10,7 @@
|
|||
#include <linux/interrupt.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/sizes.h>
|
||||
#include <linux/delay.h>
|
||||
|
||||
#include "mei_dev.h"
|
||||
#include "hbm.h"
|
||||
|
@ -327,9 +328,12 @@ static void mei_me_intr_clear(struct mei_device *dev)
|
|||
*/
|
||||
static void mei_me_intr_enable(struct mei_device *dev)
|
||||
{
|
||||
u32 hcsr = mei_hcsr_read(dev);
|
||||
u32 hcsr;
|
||||
|
||||
hcsr |= H_CSR_IE_MASK;
|
||||
if (mei_me_hw_use_polling(to_me_hw(dev)))
|
||||
return;
|
||||
|
||||
hcsr = mei_hcsr_read(dev) | H_CSR_IE_MASK;
|
||||
mei_hcsr_set(dev, hcsr);
|
||||
}
|
||||
|
||||
|
@ -354,6 +358,9 @@ static void mei_me_synchronize_irq(struct mei_device *dev)
|
|||
{
|
||||
struct mei_me_hw *hw = to_me_hw(dev);
|
||||
|
||||
if (mei_me_hw_use_polling(hw))
|
||||
return;
|
||||
|
||||
synchronize_irq(hw->irq);
|
||||
}
|
||||
|
||||
|
@ -380,7 +387,10 @@ static void mei_me_host_set_ready(struct mei_device *dev)
|
|||
{
|
||||
u32 hcsr = mei_hcsr_read(dev);
|
||||
|
||||
hcsr |= H_CSR_IE_MASK | H_IG | H_RDY;
|
||||
if (!mei_me_hw_use_polling(to_me_hw(dev)))
|
||||
hcsr |= H_CSR_IE_MASK;
|
||||
|
||||
hcsr |= H_IG | H_RDY;
|
||||
mei_hcsr_set(dev, hcsr);
|
||||
}
|
||||
|
||||
|
@ -423,6 +433,29 @@ static bool mei_me_hw_is_resetting(struct mei_device *dev)
|
|||
return (mecsr & ME_RST_HRA) == ME_RST_HRA;
|
||||
}
|
||||
|
||||
/**
|
||||
* mei_gsc_pxp_check - check for gsc firmware entering pxp mode
|
||||
*
|
||||
* @dev: the device structure
|
||||
*/
|
||||
static void mei_gsc_pxp_check(struct mei_device *dev)
|
||||
{
|
||||
struct mei_me_hw *hw = to_me_hw(dev);
|
||||
u32 fwsts5 = 0;
|
||||
|
||||
if (dev->pxp_mode == MEI_DEV_PXP_DEFAULT)
|
||||
return;
|
||||
|
||||
hw->read_fws(dev, PCI_CFG_HFS_5, &fwsts5);
|
||||
trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HFS_5", PCI_CFG_HFS_5, fwsts5);
|
||||
if ((fwsts5 & GSC_CFG_HFS_5_BOOT_TYPE_MSK) == GSC_CFG_HFS_5_BOOT_TYPE_PXP) {
|
||||
dev_dbg(dev->dev, "pxp mode is ready 0x%08x\n", fwsts5);
|
||||
dev->pxp_mode = MEI_DEV_PXP_READY;
|
||||
} else {
|
||||
dev_dbg(dev->dev, "pxp mode is not ready 0x%08x\n", fwsts5);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* mei_me_hw_ready_wait - wait until the me(hw) has turned ready
|
||||
* or timeout is reached
|
||||
|
@ -435,13 +468,15 @@ static int mei_me_hw_ready_wait(struct mei_device *dev)
|
|||
mutex_unlock(&dev->device_lock);
|
||||
wait_event_timeout(dev->wait_hw_ready,
|
||||
dev->recvd_hw_ready,
|
||||
mei_secs_to_jiffies(MEI_HW_READY_TIMEOUT));
|
||||
dev->timeouts.hw_ready);
|
||||
mutex_lock(&dev->device_lock);
|
||||
if (!dev->recvd_hw_ready) {
|
||||
dev_err(dev->dev, "wait hw ready failed\n");
|
||||
return -ETIME;
|
||||
}
|
||||
|
||||
mei_gsc_pxp_check(dev);
|
||||
|
||||
mei_me_hw_reset_release(dev);
|
||||
dev->recvd_hw_ready = false;
|
||||
return 0;
|
||||
|
@ -697,7 +732,6 @@ static void mei_me_pg_unset(struct mei_device *dev)
|
|||
static int mei_me_pg_legacy_enter_sync(struct mei_device *dev)
|
||||
{
|
||||
struct mei_me_hw *hw = to_me_hw(dev);
|
||||
unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
|
||||
int ret;
|
||||
|
||||
dev->pg_event = MEI_PG_EVENT_WAIT;
|
||||
|
@ -708,7 +742,8 @@ static int mei_me_pg_legacy_enter_sync(struct mei_device *dev)
|
|||
|
||||
mutex_unlock(&dev->device_lock);
|
||||
wait_event_timeout(dev->wait_pg,
|
||||
dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout);
|
||||
dev->pg_event == MEI_PG_EVENT_RECEIVED,
|
||||
dev->timeouts.pgi);
|
||||
mutex_lock(&dev->device_lock);
|
||||
|
||||
if (dev->pg_event == MEI_PG_EVENT_RECEIVED) {
|
||||
|
@ -734,7 +769,6 @@ static int mei_me_pg_legacy_enter_sync(struct mei_device *dev)
|
|||
static int mei_me_pg_legacy_exit_sync(struct mei_device *dev)
|
||||
{
|
||||
struct mei_me_hw *hw = to_me_hw(dev);
|
||||
unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
|
||||
int ret;
|
||||
|
||||
if (dev->pg_event == MEI_PG_EVENT_RECEIVED)
|
||||
|
@ -746,7 +780,8 @@ static int mei_me_pg_legacy_exit_sync(struct mei_device *dev)
|
|||
|
||||
mutex_unlock(&dev->device_lock);
|
||||
wait_event_timeout(dev->wait_pg,
|
||||
dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout);
|
||||
dev->pg_event == MEI_PG_EVENT_RECEIVED,
|
||||
dev->timeouts.pgi);
|
||||
mutex_lock(&dev->device_lock);
|
||||
|
||||
reply:
|
||||
|
@ -762,7 +797,8 @@ reply:
|
|||
|
||||
mutex_unlock(&dev->device_lock);
|
||||
wait_event_timeout(dev->wait_pg,
|
||||
dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, timeout);
|
||||
dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED,
|
||||
dev->timeouts.pgi);
|
||||
mutex_lock(&dev->device_lock);
|
||||
|
||||
if (dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED)
|
||||
|
@ -877,8 +913,6 @@ static u32 mei_me_d0i3_unset(struct mei_device *dev)
|
|||
static int mei_me_d0i3_enter_sync(struct mei_device *dev)
|
||||
{
|
||||
struct mei_me_hw *hw = to_me_hw(dev);
|
||||
unsigned long d0i3_timeout = mei_secs_to_jiffies(MEI_D0I3_TIMEOUT);
|
||||
unsigned long pgi_timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
|
||||
int ret;
|
||||
u32 reg;
|
||||
|
||||
|
@ -900,7 +934,8 @@ static int mei_me_d0i3_enter_sync(struct mei_device *dev)
|
|||
|
||||
mutex_unlock(&dev->device_lock);
|
||||
wait_event_timeout(dev->wait_pg,
|
||||
dev->pg_event == MEI_PG_EVENT_RECEIVED, pgi_timeout);
|
||||
dev->pg_event == MEI_PG_EVENT_RECEIVED,
|
||||
dev->timeouts.pgi);
|
||||
mutex_lock(&dev->device_lock);
|
||||
|
||||
if (dev->pg_event != MEI_PG_EVENT_RECEIVED) {
|
||||
|
@ -920,7 +955,8 @@ static int mei_me_d0i3_enter_sync(struct mei_device *dev)
|
|||
|
||||
mutex_unlock(&dev->device_lock);
|
||||
wait_event_timeout(dev->wait_pg,
|
||||
dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, d0i3_timeout);
|
||||
dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED,
|
||||
dev->timeouts.d0i3);
|
||||
mutex_lock(&dev->device_lock);
|
||||
|
||||
if (dev->pg_event != MEI_PG_EVENT_INTR_RECEIVED) {
|
||||
|
@ -980,7 +1016,6 @@ on:
|
|||
static int mei_me_d0i3_exit_sync(struct mei_device *dev)
|
||||
{
|
||||
struct mei_me_hw *hw = to_me_hw(dev);
|
||||
unsigned long timeout = mei_secs_to_jiffies(MEI_D0I3_TIMEOUT);
|
||||
int ret;
|
||||
u32 reg;
|
||||
|
||||
|
@ -1003,7 +1038,8 @@ static int mei_me_d0i3_exit_sync(struct mei_device *dev)
|
|||
|
||||
mutex_unlock(&dev->device_lock);
|
||||
wait_event_timeout(dev->wait_pg,
|
||||
dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, timeout);
|
||||
dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED,
|
||||
dev->timeouts.d0i3);
|
||||
mutex_lock(&dev->device_lock);
|
||||
|
||||
if (dev->pg_event != MEI_PG_EVENT_INTR_RECEIVED) {
|
||||
|
@ -1176,7 +1212,7 @@ static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
|
|||
|
||||
hcsr |= H_RST | H_IG | H_CSR_IS_MASK;
|
||||
|
||||
if (!intr_enable)
|
||||
if (!intr_enable || mei_me_hw_use_polling(to_me_hw(dev)))
|
||||
hcsr &= ~H_CSR_IE_MASK;
|
||||
|
||||
dev->recvd_hw_ready = false;
|
||||
|
@ -1259,7 +1295,8 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
|
|||
|
||||
/* check if ME wants a reset */
|
||||
if (!mei_hw_is_ready(dev) && dev->dev_state != MEI_DEV_RESETTING) {
|
||||
dev_warn(dev->dev, "FW not ready: resetting.\n");
|
||||
dev_warn(dev->dev, "FW not ready: resetting: dev_state = %d pxp = %d\n",
|
||||
dev->dev_state, dev->pxp_mode);
|
||||
if (dev->dev_state == MEI_DEV_POWERING_DOWN ||
|
||||
dev->dev_state == MEI_DEV_POWER_DOWN)
|
||||
mei_cl_all_disconnect(dev);
|
||||
|
@ -1331,6 +1368,66 @@ end:
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(mei_me_irq_thread_handler);
|
||||
|
||||
#define MEI_POLLING_TIMEOUT_ACTIVE 100
|
||||
#define MEI_POLLING_TIMEOUT_IDLE 500
|
||||
|
||||
/**
|
||||
* mei_me_polling_thread - interrupt register polling thread
|
||||
*
|
||||
* The thread monitors the interrupt source register and calls
|
||||
* mei_me_irq_thread_handler() to handle the firmware
|
||||
* input.
|
||||
*
|
||||
* The function polls in MEI_POLLING_TIMEOUT_ACTIVE timeout
|
||||
* in case there was an event, in idle case the polling
|
||||
* time increases yet again by MEI_POLLING_TIMEOUT_ACTIVE
|
||||
* up to MEI_POLLING_TIMEOUT_IDLE.
|
||||
*
|
||||
* @_dev: mei device
|
||||
*
|
||||
* Return: always 0
|
||||
*/
|
||||
int mei_me_polling_thread(void *_dev)
|
||||
{
|
||||
struct mei_device *dev = _dev;
|
||||
irqreturn_t irq_ret;
|
||||
long polling_timeout = MEI_POLLING_TIMEOUT_ACTIVE;
|
||||
|
||||
dev_dbg(dev->dev, "kernel thread is running\n");
|
||||
while (!kthread_should_stop()) {
|
||||
struct mei_me_hw *hw = to_me_hw(dev);
|
||||
u32 hcsr;
|
||||
|
||||
wait_event_timeout(hw->wait_active,
|
||||
hw->is_active || kthread_should_stop(),
|
||||
msecs_to_jiffies(MEI_POLLING_TIMEOUT_IDLE));
|
||||
|
||||
if (kthread_should_stop())
|
||||
break;
|
||||
|
||||
hcsr = mei_hcsr_read(dev);
|
||||
if (me_intr_src(hcsr)) {
|
||||
polling_timeout = MEI_POLLING_TIMEOUT_ACTIVE;
|
||||
irq_ret = mei_me_irq_thread_handler(1, dev);
|
||||
if (irq_ret != IRQ_HANDLED)
|
||||
dev_err(dev->dev, "irq_ret %d\n", irq_ret);
|
||||
} else {
|
||||
/*
|
||||
* Increase timeout by MEI_POLLING_TIMEOUT_ACTIVE
|
||||
* up to MEI_POLLING_TIMEOUT_IDLE
|
||||
*/
|
||||
polling_timeout = clamp_val(polling_timeout + MEI_POLLING_TIMEOUT_ACTIVE,
|
||||
MEI_POLLING_TIMEOUT_ACTIVE,
|
||||
MEI_POLLING_TIMEOUT_IDLE);
|
||||
}
|
||||
|
||||
schedule_timeout_interruptible(msecs_to_jiffies(polling_timeout));
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mei_me_polling_thread);
|
||||
|
||||
static const struct mei_hw_ops mei_me_hw_ops = {
|
||||
|
||||
.trc_status = mei_me_trc_status,
|
||||
|
@ -1636,11 +1733,12 @@ EXPORT_SYMBOL_GPL(mei_me_get_cfg);
|
|||
*
|
||||
* @parent: device associated with physical device (pci/platform)
|
||||
* @cfg: per device generation config
|
||||
* @slow_fw: configure longer timeouts as FW is slow
|
||||
*
|
||||
* Return: The mei_device pointer on success, NULL on failure.
|
||||
*/
|
||||
struct mei_device *mei_me_dev_init(struct device *parent,
|
||||
const struct mei_cfg *cfg)
|
||||
const struct mei_cfg *cfg, bool slow_fw)
|
||||
{
|
||||
struct mei_device *dev;
|
||||
struct mei_me_hw *hw;
|
||||
|
@ -1655,7 +1753,7 @@ struct mei_device *mei_me_dev_init(struct device *parent,
|
|||
for (i = 0; i < DMA_DSCR_NUM; i++)
|
||||
dev->dr_dscr[i].size = cfg->dma_size[i];
|
||||
|
||||
mei_device_init(dev, parent, &mei_me_hw_ops);
|
||||
mei_device_init(dev, parent, slow_fw, &mei_me_hw_ops);
|
||||
hw->cfg = cfg;
|
||||
|
||||
dev->fw_f_fw_ver_supported = cfg->fw_ver_supported;
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright (c) 2012-2020, Intel Corporation. All rights reserved.
|
||||
* Copyright (c) 2012-2022, Intel Corporation. All rights reserved.
|
||||
* Intel Management Engine Interface (Intel MEI) Linux driver
|
||||
*/
|
||||
|
||||
|
@ -51,6 +51,9 @@ struct mei_cfg {
|
|||
* @d0i3_supported: di03 support
|
||||
* @hbuf_depth: depth of hardware host/write buffer in slots
|
||||
* @read_fws: read FW status register handler
|
||||
* @polling_thread: interrupt polling thread
|
||||
* @wait_active: the polling thread activity wait queue
|
||||
* @is_active: the device is active
|
||||
*/
|
||||
struct mei_me_hw {
|
||||
const struct mei_cfg *cfg;
|
||||
|
@ -60,10 +63,19 @@ struct mei_me_hw {
|
|||
bool d0i3_supported;
|
||||
u8 hbuf_depth;
|
||||
int (*read_fws)(const struct mei_device *dev, int where, u32 *val);
|
||||
/* polling */
|
||||
struct task_struct *polling_thread;
|
||||
wait_queue_head_t wait_active;
|
||||
bool is_active;
|
||||
};
|
||||
|
||||
#define to_me_hw(dev) (struct mei_me_hw *)((dev)->hw)
|
||||
|
||||
static inline bool mei_me_hw_use_polling(const struct mei_me_hw *hw)
|
||||
{
|
||||
return hw->irq < 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* enum mei_cfg_idx - indices to platform specific configurations.
|
||||
*
|
||||
|
@ -120,12 +132,13 @@ enum mei_cfg_idx {
|
|||
const struct mei_cfg *mei_me_get_cfg(kernel_ulong_t idx);
|
||||
|
||||
struct mei_device *mei_me_dev_init(struct device *parent,
|
||||
const struct mei_cfg *cfg);
|
||||
const struct mei_cfg *cfg, bool slow_fw);
|
||||
|
||||
int mei_me_pg_enter_sync(struct mei_device *dev);
|
||||
int mei_me_pg_exit_sync(struct mei_device *dev);
|
||||
|
||||
irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id);
|
||||
irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id);
|
||||
int mei_me_polling_thread(void *_dev);
|
||||
|
||||
#endif /* _MEI_INTERFACE_H_ */
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (c) 2013-2020, Intel Corporation. All rights reserved.
|
||||
* Copyright (c) 2013-2022, Intel Corporation. All rights reserved.
|
||||
* Intel Management Engine Interface (Intel MEI) Linux driver
|
||||
*/
|
||||
|
||||
|
@ -1201,7 +1201,7 @@ struct mei_device *mei_txe_dev_init(struct pci_dev *pdev)
|
|||
if (!dev)
|
||||
return NULL;
|
||||
|
||||
mei_device_init(dev, &pdev->dev, &mei_txe_hw_ops);
|
||||
mei_device_init(dev, &pdev->dev, false, &mei_txe_hw_ops);
|
||||
|
||||
hw = to_txe_hw(dev);
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright (c) 2003-2020, Intel Corporation. All rights reserved
|
||||
* Copyright (c) 2003-2022, Intel Corporation. All rights reserved
|
||||
* Intel Management Engine Interface (Intel MEI) Linux driver
|
||||
*/
|
||||
|
||||
|
@ -16,11 +16,16 @@
|
|||
#define MEI_CONNECT_TIMEOUT 3 /* HPS: at least 2 seconds */
|
||||
|
||||
#define MEI_CL_CONNECT_TIMEOUT 15 /* HPS: Client Connect Timeout */
|
||||
#define MEI_CL_CONNECT_TIMEOUT_SLOW 30 /* HPS: Client Connect Timeout, slow FW */
|
||||
#define MEI_CLIENTS_INIT_TIMEOUT 15 /* HPS: Clients Enumeration Timeout */
|
||||
|
||||
#define MEI_PGI_TIMEOUT 1 /* PG Isolation time response 1 sec */
|
||||
#define MEI_D0I3_TIMEOUT 5 /* D0i3 set/unset max response time */
|
||||
#define MEI_HBM_TIMEOUT 1 /* 1 second */
|
||||
#define MEI_HBM_TIMEOUT_SLOW 5 /* 5 second, slow FW */
|
||||
|
||||
#define MKHI_RCV_TIMEOUT 500 /* receive timeout in msec */
|
||||
#define MKHI_RCV_TIMEOUT_SLOW 10000 /* receive timeout in msec, slow FW */
|
||||
|
||||
/*
|
||||
* FW page size for DMA allocations
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (c) 2012-2019, Intel Corporation. All rights reserved.
|
||||
* Copyright (c) 2012-2022, Intel Corporation. All rights reserved.
|
||||
* Intel Management Engine Interface (Intel MEI) Linux driver
|
||||
*/
|
||||
|
||||
|
@ -218,16 +218,6 @@ int mei_start(struct mei_device *dev)
|
|||
goto err;
|
||||
}
|
||||
|
||||
if (!mei_host_is_ready(dev)) {
|
||||
dev_err(dev->dev, "host is not ready.\n");
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (!mei_hw_is_ready(dev)) {
|
||||
dev_err(dev->dev, "ME is not ready.\n");
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (!mei_hbm_version_is_supported(dev)) {
|
||||
dev_dbg(dev->dev, "MEI start failed.\n");
|
||||
goto err;
|
||||
|
@ -320,6 +310,8 @@ void mei_stop(struct mei_device *dev)
|
|||
|
||||
mei_clear_interrupts(dev);
|
||||
mei_synchronize_irq(dev);
|
||||
/* to catch HW-initiated reset */
|
||||
mei_cancel_work(dev);
|
||||
|
||||
mutex_lock(&dev->device_lock);
|
||||
|
||||
|
@ -357,14 +349,16 @@ bool mei_write_is_idle(struct mei_device *dev)
|
|||
EXPORT_SYMBOL_GPL(mei_write_is_idle);
|
||||
|
||||
/**
|
||||
* mei_device_init -- initialize mei_device structure
|
||||
* mei_device_init - initialize mei_device structure
|
||||
*
|
||||
* @dev: the mei device
|
||||
* @device: the device structure
|
||||
* @slow_fw: configure longer timeouts as FW is slow
|
||||
* @hw_ops: hw operations
|
||||
*/
|
||||
void mei_device_init(struct mei_device *dev,
|
||||
struct device *device,
|
||||
bool slow_fw,
|
||||
const struct mei_hw_ops *hw_ops)
|
||||
{
|
||||
/* setup our list array */
|
||||
|
@ -393,6 +387,8 @@ void mei_device_init(struct mei_device *dev,
|
|||
bitmap_zero(dev->host_clients_map, MEI_CLIENTS_MAX);
|
||||
dev->open_handle_count = 0;
|
||||
|
||||
dev->pxp_mode = MEI_DEV_PXP_DEFAULT;
|
||||
|
||||
/*
|
||||
* Reserving the first client ID
|
||||
* 0: Reserved for MEI Bus Message communications
|
||||
|
@ -402,6 +398,21 @@ void mei_device_init(struct mei_device *dev,
|
|||
dev->pg_event = MEI_PG_EVENT_IDLE;
|
||||
dev->ops = hw_ops;
|
||||
dev->dev = device;
|
||||
|
||||
dev->timeouts.hw_ready = mei_secs_to_jiffies(MEI_HW_READY_TIMEOUT);
|
||||
dev->timeouts.connect = MEI_CONNECT_TIMEOUT;
|
||||
dev->timeouts.client_init = MEI_CLIENTS_INIT_TIMEOUT;
|
||||
dev->timeouts.pgi = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
|
||||
dev->timeouts.d0i3 = mei_secs_to_jiffies(MEI_D0I3_TIMEOUT);
|
||||
if (slow_fw) {
|
||||
dev->timeouts.cl_connect = mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT_SLOW);
|
||||
dev->timeouts.hbm = mei_secs_to_jiffies(MEI_HBM_TIMEOUT_SLOW);
|
||||
dev->timeouts.mkhi_recv = msecs_to_jiffies(MKHI_RCV_TIMEOUT_SLOW);
|
||||
} else {
|
||||
dev->timeouts.cl_connect = mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT);
|
||||
dev->timeouts.hbm = mei_secs_to_jiffies(MEI_HBM_TIMEOUT);
|
||||
dev->timeouts.mkhi_recv = msecs_to_jiffies(MKHI_RCV_TIMEOUT);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mei_device_init);
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (c) 2003-2020, Intel Corporation. All rights reserved.
|
||||
* Copyright (c) 2003-2022, Intel Corporation. All rights reserved.
|
||||
* Intel Management Engine Interface (Intel MEI) Linux driver
|
||||
*/
|
||||
|
||||
|
@ -571,7 +571,7 @@ static int mei_ioctl_connect_vtag(struct file *file,
|
|||
cl->state == MEI_FILE_DISCONNECTED ||
|
||||
cl->state == MEI_FILE_DISCONNECT_REQUIRED ||
|
||||
cl->state == MEI_FILE_DISCONNECT_REPLY),
|
||||
mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
|
||||
dev->timeouts.cl_connect);
|
||||
mutex_lock(&dev->device_lock);
|
||||
}
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright (c) 2003-2019, Intel Corporation. All rights reserved.
|
||||
* Copyright (c) 2003-2022, Intel Corporation. All rights reserved.
|
||||
* Intel Management Engine Interface (Intel MEI) Linux driver
|
||||
*/
|
||||
|
||||
|
@ -62,6 +62,21 @@ enum mei_dev_state {
|
|||
MEI_DEV_POWER_UP
|
||||
};
|
||||
|
||||
/**
|
||||
* enum mei_dev_pxp_mode - MEI PXP mode state
|
||||
*
|
||||
* @MEI_DEV_PXP_DEFAULT: PCH based device, no initailization required
|
||||
* @MEI_DEV_PXP_INIT: device requires initialization, send setup message to firmware
|
||||
* @MEI_DEV_PXP_SETUP: device is in setup stage, waiting for firmware repsonse
|
||||
* @MEI_DEV_PXP_READY: device initialized
|
||||
*/
|
||||
enum mei_dev_pxp_mode {
|
||||
MEI_DEV_PXP_DEFAULT = 0,
|
||||
MEI_DEV_PXP_INIT = 1,
|
||||
MEI_DEV_PXP_SETUP = 2,
|
||||
MEI_DEV_PXP_READY = 3,
|
||||
};
|
||||
|
||||
const char *mei_dev_state_str(int state);
|
||||
|
||||
enum mei_file_transaction_states {
|
||||
|
@ -415,6 +430,17 @@ struct mei_fw_version {
|
|||
|
||||
#define MEI_MAX_FW_VER_BLOCKS 3
|
||||
|
||||
struct mei_dev_timeouts {
|
||||
unsigned long hw_ready; /* Timeout on ready message, in jiffies */
|
||||
int connect; /* HPS: at least 2 seconds, in seconds */
|
||||
unsigned long cl_connect; /* HPS: Client Connect Timeout, in jiffies */
|
||||
int client_init; /* HPS: Clients Enumeration Timeout, in seconds */
|
||||
unsigned long pgi; /* PG Isolation time response, in jiffies */
|
||||
unsigned int d0i3; /* D0i3 set/unset max response time, in jiffies */
|
||||
unsigned long hbm; /* HBM operation timeout, in jiffies */
|
||||
unsigned long mkhi_recv; /* receive timeout, in jiffies */
|
||||
};
|
||||
|
||||
/**
|
||||
* struct mei_device - MEI private device struct
|
||||
*
|
||||
|
@ -443,6 +469,7 @@ struct mei_fw_version {
|
|||
* @reset_count : number of consecutive resets
|
||||
* @dev_state : device state
|
||||
* @hbm_state : state of host bus message protocol
|
||||
* @pxp_mode : PXP device mode
|
||||
* @init_clients_timer : HBM init handshake timeout
|
||||
*
|
||||
* @pg_event : power gating event
|
||||
|
@ -480,6 +507,8 @@ struct mei_fw_version {
|
|||
* @allow_fixed_address: allow user space to connect a fixed client
|
||||
* @override_fixed_address: force allow fixed address behavior
|
||||
*
|
||||
* @timeouts: actual timeout values
|
||||
*
|
||||
* @reset_work : work item for the device reset
|
||||
* @bus_rescan_work : work item for the bus rescan
|
||||
*
|
||||
|
@ -524,6 +553,7 @@ struct mei_device {
|
|||
unsigned long reset_count;
|
||||
enum mei_dev_state dev_state;
|
||||
enum mei_hbm_state hbm_state;
|
||||
enum mei_dev_pxp_mode pxp_mode;
|
||||
u16 init_clients_timer;
|
||||
|
||||
/*
|
||||
|
@ -568,6 +598,8 @@ struct mei_device {
|
|||
bool allow_fixed_address;
|
||||
bool override_fixed_address;
|
||||
|
||||
struct mei_dev_timeouts timeouts;
|
||||
|
||||
struct work_struct reset_work;
|
||||
struct work_struct bus_rescan_work;
|
||||
|
||||
|
@ -632,6 +664,7 @@ static inline u32 mei_slots2data(int slots)
|
|||
*/
|
||||
void mei_device_init(struct mei_device *dev,
|
||||
struct device *device,
|
||||
bool slow_fw,
|
||||
const struct mei_hw_ops *hw_ops);
|
||||
int mei_reset(struct mei_device *dev);
|
||||
int mei_start(struct mei_device *dev);
|
||||
|
|
|
@ -0,0 +1,55 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright (c) 2003-2022, Intel Corporation. All rights reserved.
|
||||
* Intel Management Engine Interface (Intel MEI) Linux driver
|
||||
*/
|
||||
|
||||
#ifndef _MEI_MKHI_H_
|
||||
#define _MEI_MKHI_H_
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
#define MKHI_FEATURE_PTT 0x10
|
||||
|
||||
#define MKHI_FWCAPS_GROUP_ID 0x3
|
||||
#define MKHI_FWCAPS_SET_OS_VER_APP_RULE_CMD 6
|
||||
#define MKHI_GEN_GROUP_ID 0xFF
|
||||
#define MKHI_GEN_GET_FW_VERSION_CMD 0x2
|
||||
|
||||
#define MKHI_GROUP_ID_GFX 0x30
|
||||
#define MKHI_GFX_RESET_WARN_CMD_REQ 0x0
|
||||
#define MKHI_GFX_MEMORY_READY_CMD_REQ 0x1
|
||||
|
||||
/* Allow transition to PXP mode without approval */
|
||||
#define MKHI_GFX_MEM_READY_PXP_ALLOWED 0x1
|
||||
|
||||
struct mkhi_rule_id {
|
||||
__le16 rule_type;
|
||||
u8 feature_id;
|
||||
u8 reserved;
|
||||
} __packed;
|
||||
|
||||
struct mkhi_fwcaps {
|
||||
struct mkhi_rule_id id;
|
||||
u8 len;
|
||||
u8 data[];
|
||||
} __packed;
|
||||
|
||||
struct mkhi_msg_hdr {
|
||||
u8 group_id;
|
||||
u8 command;
|
||||
u8 reserved;
|
||||
u8 result;
|
||||
} __packed;
|
||||
|
||||
struct mkhi_msg {
|
||||
struct mkhi_msg_hdr hdr;
|
||||
u8 data[];
|
||||
} __packed;
|
||||
|
||||
struct mkhi_gfx_mem_ready {
|
||||
struct mkhi_msg_hdr hdr;
|
||||
u32 flags;
|
||||
} __packed;
|
||||
|
||||
#endif /* _MEI_MKHI_H_ */
|
|
@ -1,6 +1,6 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (c) 2003-2020, Intel Corporation. All rights reserved.
|
||||
* Copyright (c) 2003-2022, Intel Corporation. All rights reserved.
|
||||
* Intel Management Engine Interface (Intel MEI) Linux driver
|
||||
*/
|
||||
|
||||
|
@ -203,7 +203,7 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
}
|
||||
|
||||
/* allocates and initializes the mei dev structure */
|
||||
dev = mei_me_dev_init(&pdev->dev, cfg);
|
||||
dev = mei_me_dev_init(&pdev->dev, cfg, false);
|
||||
if (!dev) {
|
||||
err = -ENOMEM;
|
||||
goto end;
|
||||
|
|
|
@ -7,10 +7,22 @@
|
|||
|
||||
#include <linux/auxiliary_bus.h>
|
||||
|
||||
/**
|
||||
* struct mei_aux_device - mei auxiliary device
|
||||
* @aux_dev: - auxiliary device object
|
||||
* @irq: interrupt driving the mei auxiliary device
|
||||
* @bar: mmio resource bar reserved to mei auxiliary device
|
||||
* @ext_op_mem: resource for extend operational memory
|
||||
* used in graphics PXP mode.
|
||||
* @slow_firmware: The device has slow underlying firmware.
|
||||
* Such firmware will require to use larger operation timeouts.
|
||||
*/
|
||||
struct mei_aux_device {
|
||||
struct auxiliary_device aux_dev;
|
||||
int irq;
|
||||
struct resource bar;
|
||||
struct resource ext_op_mem;
|
||||
bool slow_firmware;
|
||||
};
|
||||
|
||||
#define auxiliary_dev_to_mei_aux_dev(auxiliary_dev) \
|
||||
|
|
Loading…
Reference in New Issue