Merge tag 'drm-intel-next-fixes-2019-11-20' of git://anongit.freedesktop.org/drm/drm-intel into drm-next
- Includes gvt-next-fixes-2019-11-12 - Fix Bugzilla #112051: Fix detection for a CMP-V PCH - Fix Bugzilla #112256: Corrupted page table at address on plymouth splash - Fix Bugzilla #111594: Avoid losing RC6 when HuC authentication is used - Fix for OA/perf metric coherency, restore GT coarse power gating workaround - Avoid atomic context on error capture - Avoid MST bitmask overflowing to EDP/DPI input select - Fixes to CI found dmesg splats Signed-off-by: Dave Airlie <airlied@redhat.com> From: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20191120204035.GA14908@jlahtine-desk.ger.corp.intel.com
This commit is contained in:
commit
30c185da76
|
@ -234,6 +234,11 @@ static int intelfb_create(struct drm_fb_helper *helper,
|
|||
info->apertures->ranges[0].base = ggtt->gmadr.start;
|
||||
info->apertures->ranges[0].size = ggtt->mappable_end;
|
||||
|
||||
/* Our framebuffer is the entirety of fbdev's system memory */
|
||||
info->fix.smem_start =
|
||||
(unsigned long)(ggtt->gmadr.start + vma->node.start);
|
||||
info->fix.smem_len = vma->node.size;
|
||||
|
||||
vaddr = i915_vma_pin_iomap(vma);
|
||||
if (IS_ERR(vaddr)) {
|
||||
DRM_ERROR("Failed to remap framebuffer into virtual memory\n");
|
||||
|
@ -243,10 +248,6 @@ static int intelfb_create(struct drm_fb_helper *helper,
|
|||
info->screen_base = vaddr;
|
||||
info->screen_size = vma->node.size;
|
||||
|
||||
/* Our framebuffer is the entirety of fbdev's system memory */
|
||||
info->fix.smem_start = (unsigned long)info->screen_base;
|
||||
info->fix.smem_len = info->screen_size;
|
||||
|
||||
drm_fb_helper_fill_info(info, &ifbdev->helper, sizes);
|
||||
|
||||
/* If the object is shmemfs backed, it will have given us zeroed pages.
|
||||
|
|
|
@ -2885,7 +2885,7 @@ struct intel_plane *
|
|||
skl_universal_plane_create(struct drm_i915_private *dev_priv,
|
||||
enum pipe pipe, enum plane_id plane_id)
|
||||
{
|
||||
static const struct drm_plane_funcs *plane_funcs;
|
||||
const struct drm_plane_funcs *plane_funcs;
|
||||
struct intel_plane *plane;
|
||||
enum drm_plane_type plane_type;
|
||||
unsigned int supported_rotations;
|
||||
|
|
|
@ -727,6 +727,7 @@ int i915_gem_init_contexts(struct drm_i915_private *i915)
|
|||
void i915_gem_driver_release__contexts(struct drm_i915_private *i915)
|
||||
{
|
||||
destroy_kernel_context(&i915->kernel_context);
|
||||
flush_work(&i915->gem.contexts.free_work);
|
||||
}
|
||||
|
||||
static int context_idr_cleanup(int id, void *p, void *data)
|
||||
|
|
|
@ -33,7 +33,6 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
|
|||
{
|
||||
struct intel_gt_timelines *timelines = >->timelines;
|
||||
struct intel_timeline *tl, *tn;
|
||||
unsigned long active_count = 0;
|
||||
unsigned long flags;
|
||||
bool interruptible;
|
||||
LIST_HEAD(free);
|
||||
|
@ -46,10 +45,8 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
|
|||
|
||||
spin_lock_irqsave(&timelines->lock, flags);
|
||||
list_for_each_entry_safe(tl, tn, &timelines->active_list, link) {
|
||||
if (!mutex_trylock(&tl->mutex)) {
|
||||
active_count++; /* report busy to caller, try again? */
|
||||
if (!mutex_trylock(&tl->mutex))
|
||||
continue;
|
||||
}
|
||||
|
||||
intel_timeline_get(tl);
|
||||
GEM_BUG_ON(!tl->active_count);
|
||||
|
@ -74,9 +71,7 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
|
|||
|
||||
/* Resume iteration after dropping lock */
|
||||
list_safe_reset_next(tl, tn, link);
|
||||
if (--tl->active_count)
|
||||
active_count += !!rcu_access_pointer(tl->last_request.fence);
|
||||
else
|
||||
if (!--tl->active_count)
|
||||
list_del(&tl->link);
|
||||
|
||||
mutex_unlock(&tl->mutex);
|
||||
|
@ -92,7 +87,7 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
|
|||
list_for_each_entry_safe(tl, tn, &free, link)
|
||||
__intel_timeline_free(&tl->kref);
|
||||
|
||||
return active_count ? timeout : 0;
|
||||
return list_empty(&timelines->active_list) ? 0 : timeout;
|
||||
}
|
||||
|
||||
int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout)
|
||||
|
|
|
@ -178,8 +178,13 @@ static void gen9_rc6_enable(struct intel_rc6 *rc6)
|
|||
GEN6_RC_CTL_RC6_ENABLE |
|
||||
rc6_mode);
|
||||
|
||||
set(uncore, GEN9_PG_ENABLE,
|
||||
GEN9_RENDER_PG_ENABLE | GEN9_MEDIA_PG_ENABLE);
|
||||
/*
|
||||
* WaRsDisableCoarsePowerGating:skl,cnl
|
||||
* - Render/Media PG need to be disabled with RC6.
|
||||
*/
|
||||
if (!NEEDS_WaRsDisableCoarsePowerGating(rc6_to_i915(rc6)))
|
||||
set(uncore, GEN9_PG_ENABLE,
|
||||
GEN9_RENDER_PG_ENABLE | GEN9_MEDIA_PG_ENABLE);
|
||||
}
|
||||
|
||||
static void gen8_rc6_enable(struct intel_rc6 *rc6)
|
||||
|
|
|
@ -553,6 +553,13 @@ int intel_guc_suspend(struct intel_guc *guc)
|
|||
GUC_POWER_D1, /* any value greater than GUC_POWER_D0 */
|
||||
};
|
||||
|
||||
/*
|
||||
* If GuC communication is enabled but submission is not supported,
|
||||
* we do not need to suspend the GuC.
|
||||
*/
|
||||
if (!intel_guc_submission_is_enabled(guc))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* The ENTER_S_STATE action queues the save/restore operation in GuC FW
|
||||
* and then returns, so waiting on the H2G is not enough to guarantee
|
||||
|
@ -610,6 +617,14 @@ int intel_guc_resume(struct intel_guc *guc)
|
|||
GUC_POWER_D0,
|
||||
};
|
||||
|
||||
/*
|
||||
* If GuC communication is enabled but submission is not supported,
|
||||
* we do not need to resume the GuC but we do need to enable the
|
||||
* GuC communication on resume (above).
|
||||
*/
|
||||
if (!intel_guc_submission_is_enabled(guc))
|
||||
return 0;
|
||||
|
||||
return intel_guc_send(guc, action, ARRAY_SIZE(action));
|
||||
}
|
||||
|
||||
|
|
|
@ -3420,6 +3420,10 @@ int intel_gvt_for_each_tracked_mmio(struct intel_gvt *gvt,
|
|||
}
|
||||
|
||||
for (i = 0; i < gvt->mmio.num_mmio_block; i++, block++) {
|
||||
/* pvinfo data doesn't come from hw mmio */
|
||||
if (i915_mmio_reg_offset(block->offset) == VGT_PVINFO_PAGE)
|
||||
continue;
|
||||
|
||||
for (j = 0; j < block->size; j += 4) {
|
||||
ret = handler(gvt,
|
||||
i915_mmio_reg_offset(block->offset) + j,
|
||||
|
|
|
@ -91,14 +91,15 @@ static void debug_active_init(struct i915_active *ref)
|
|||
|
||||
static void debug_active_activate(struct i915_active *ref)
|
||||
{
|
||||
lockdep_assert_held(&ref->mutex);
|
||||
spin_lock_irq(&ref->tree_lock);
|
||||
if (!atomic_read(&ref->count)) /* before the first inc */
|
||||
debug_object_activate(ref, &active_debug_desc);
|
||||
spin_unlock_irq(&ref->tree_lock);
|
||||
}
|
||||
|
||||
static void debug_active_deactivate(struct i915_active *ref)
|
||||
{
|
||||
lockdep_assert_held(&ref->mutex);
|
||||
lockdep_assert_held(&ref->tree_lock);
|
||||
if (!atomic_read(&ref->count)) /* after the last dec */
|
||||
debug_object_deactivate(ref, &active_debug_desc);
|
||||
}
|
||||
|
@ -128,29 +129,22 @@ __active_retire(struct i915_active *ref)
|
|||
{
|
||||
struct active_node *it, *n;
|
||||
struct rb_root root;
|
||||
bool retire = false;
|
||||
unsigned long flags;
|
||||
|
||||
lockdep_assert_held(&ref->mutex);
|
||||
GEM_BUG_ON(i915_active_is_idle(ref));
|
||||
|
||||
/* return the unused nodes to our slabcache -- flushing the allocator */
|
||||
if (atomic_dec_and_test(&ref->count)) {
|
||||
debug_active_deactivate(ref);
|
||||
root = ref->tree;
|
||||
ref->tree = RB_ROOT;
|
||||
ref->cache = NULL;
|
||||
retire = true;
|
||||
}
|
||||
|
||||
mutex_unlock(&ref->mutex);
|
||||
if (!retire)
|
||||
if (!atomic_dec_and_lock_irqsave(&ref->count, &ref->tree_lock, flags))
|
||||
return;
|
||||
|
||||
GEM_BUG_ON(rcu_access_pointer(ref->excl.fence));
|
||||
rbtree_postorder_for_each_entry_safe(it, n, &root, node) {
|
||||
GEM_BUG_ON(i915_active_fence_isset(&it->base));
|
||||
kmem_cache_free(global.slab_cache, it);
|
||||
}
|
||||
debug_active_deactivate(ref);
|
||||
|
||||
root = ref->tree;
|
||||
ref->tree = RB_ROOT;
|
||||
ref->cache = NULL;
|
||||
|
||||
spin_unlock_irqrestore(&ref->tree_lock, flags);
|
||||
|
||||
/* After the final retire, the entire struct may be freed */
|
||||
if (ref->retire)
|
||||
|
@ -158,6 +152,11 @@ __active_retire(struct i915_active *ref)
|
|||
|
||||
/* ... except if you wait on it, you must manage your own references! */
|
||||
wake_up_var(ref);
|
||||
|
||||
rbtree_postorder_for_each_entry_safe(it, n, &root, node) {
|
||||
GEM_BUG_ON(i915_active_fence_isset(&it->base));
|
||||
kmem_cache_free(global.slab_cache, it);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -169,7 +168,6 @@ active_work(struct work_struct *wrk)
|
|||
if (atomic_add_unless(&ref->count, -1, 1))
|
||||
return;
|
||||
|
||||
mutex_lock(&ref->mutex);
|
||||
__active_retire(ref);
|
||||
}
|
||||
|
||||
|
@ -180,9 +178,7 @@ active_retire(struct i915_active *ref)
|
|||
if (atomic_add_unless(&ref->count, -1, 1))
|
||||
return;
|
||||
|
||||
/* If we are inside interrupt context (fence signaling), defer */
|
||||
if (ref->flags & I915_ACTIVE_RETIRE_SLEEPS ||
|
||||
!mutex_trylock(&ref->mutex)) {
|
||||
if (ref->flags & I915_ACTIVE_RETIRE_SLEEPS) {
|
||||
queue_work(system_unbound_wq, &ref->work);
|
||||
return;
|
||||
}
|
||||
|
@ -227,7 +223,7 @@ active_instance(struct i915_active *ref, struct intel_timeline *tl)
|
|||
if (!prealloc)
|
||||
return NULL;
|
||||
|
||||
mutex_lock(&ref->mutex);
|
||||
spin_lock_irq(&ref->tree_lock);
|
||||
GEM_BUG_ON(i915_active_is_idle(ref));
|
||||
|
||||
parent = NULL;
|
||||
|
@ -257,7 +253,7 @@ active_instance(struct i915_active *ref, struct intel_timeline *tl)
|
|||
|
||||
out:
|
||||
ref->cache = node;
|
||||
mutex_unlock(&ref->mutex);
|
||||
spin_unlock_irq(&ref->tree_lock);
|
||||
|
||||
BUILD_BUG_ON(offsetof(typeof(*node), base));
|
||||
return &node->base;
|
||||
|
@ -278,8 +274,10 @@ void __i915_active_init(struct i915_active *ref,
|
|||
if (bits & I915_ACTIVE_MAY_SLEEP)
|
||||
ref->flags |= I915_ACTIVE_RETIRE_SLEEPS;
|
||||
|
||||
spin_lock_init(&ref->tree_lock);
|
||||
ref->tree = RB_ROOT;
|
||||
ref->cache = NULL;
|
||||
|
||||
init_llist_head(&ref->preallocated_barriers);
|
||||
atomic_set(&ref->count, 0);
|
||||
__mutex_init(&ref->mutex, "i915_active", key);
|
||||
|
@ -510,7 +508,7 @@ static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx)
|
|||
if (RB_EMPTY_ROOT(&ref->tree))
|
||||
return NULL;
|
||||
|
||||
mutex_lock(&ref->mutex);
|
||||
spin_lock_irq(&ref->tree_lock);
|
||||
GEM_BUG_ON(i915_active_is_idle(ref));
|
||||
|
||||
/*
|
||||
|
@ -575,7 +573,7 @@ static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx)
|
|||
goto match;
|
||||
}
|
||||
|
||||
mutex_unlock(&ref->mutex);
|
||||
spin_unlock_irq(&ref->tree_lock);
|
||||
|
||||
return NULL;
|
||||
|
||||
|
@ -583,7 +581,7 @@ match:
|
|||
rb_erase(p, &ref->tree); /* Hide from waits and sibling allocations */
|
||||
if (p == &ref->cache->node)
|
||||
ref->cache = NULL;
|
||||
mutex_unlock(&ref->mutex);
|
||||
spin_unlock_irq(&ref->tree_lock);
|
||||
|
||||
return rb_entry(p, struct active_node, node);
|
||||
}
|
||||
|
@ -664,6 +662,7 @@ unwind:
|
|||
void i915_active_acquire_barrier(struct i915_active *ref)
|
||||
{
|
||||
struct llist_node *pos, *next;
|
||||
unsigned long flags;
|
||||
|
||||
GEM_BUG_ON(i915_active_is_idle(ref));
|
||||
|
||||
|
@ -673,7 +672,7 @@ void i915_active_acquire_barrier(struct i915_active *ref)
|
|||
* populated by i915_request_add_active_barriers() to point to the
|
||||
* request that will eventually release them.
|
||||
*/
|
||||
mutex_lock_nested(&ref->mutex, SINGLE_DEPTH_NESTING);
|
||||
spin_lock_irqsave_nested(&ref->tree_lock, flags, SINGLE_DEPTH_NESTING);
|
||||
llist_for_each_safe(pos, next, take_preallocated_barriers(ref)) {
|
||||
struct active_node *node = barrier_from_ll(pos);
|
||||
struct intel_engine_cs *engine = barrier_to_engine(node);
|
||||
|
@ -699,7 +698,7 @@ void i915_active_acquire_barrier(struct i915_active *ref)
|
|||
llist_add(barrier_to_ll(node), &engine->barrier_tasks);
|
||||
intel_engine_pm_put(engine);
|
||||
}
|
||||
mutex_unlock(&ref->mutex);
|
||||
spin_unlock_irqrestore(&ref->tree_lock, flags);
|
||||
}
|
||||
|
||||
void i915_request_add_active_barriers(struct i915_request *rq)
|
||||
|
|
|
@ -48,6 +48,7 @@ struct i915_active {
|
|||
atomic_t count;
|
||||
struct mutex mutex;
|
||||
|
||||
spinlock_t tree_lock;
|
||||
struct active_node *cache;
|
||||
struct rb_root tree;
|
||||
|
||||
|
|
|
@ -2045,4 +2045,10 @@ i915_coherent_map_type(struct drm_i915_private *i915)
|
|||
return HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC;
|
||||
}
|
||||
|
||||
static inline bool intel_guc_submission_is_enabled(struct intel_guc *guc)
|
||||
{
|
||||
return intel_guc_is_submission_supported(guc) &&
|
||||
intel_guc_is_running(guc);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -1029,9 +1029,9 @@ i915_error_object_create(struct drm_i915_private *i915,
|
|||
for_each_sgt_daddr(dma, iter, vma->pages) {
|
||||
void __iomem *s;
|
||||
|
||||
s = io_mapping_map_atomic_wc(&mem->iomap, dma);
|
||||
s = io_mapping_map_wc(&mem->iomap, dma, PAGE_SIZE);
|
||||
ret = compress_page(compress, (void __force *)s, dst);
|
||||
io_mapping_unmap_atomic(s);
|
||||
io_mapping_unmap(s);
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
|
@ -1043,9 +1043,9 @@ i915_error_object_create(struct drm_i915_private *i915,
|
|||
|
||||
drm_clflush_pages(&page, 1);
|
||||
|
||||
s = kmap_atomic(page);
|
||||
s = kmap(page);
|
||||
ret = compress_page(compress, s, dst);
|
||||
kunmap_atomic(s);
|
||||
kunmap(s);
|
||||
|
||||
drm_clflush_pages(&page, 1);
|
||||
|
||||
|
|
|
@ -1870,7 +1870,7 @@ alloc_oa_config_buffer(struct i915_perf_stream *stream,
|
|||
config_length += num_lri_dwords(oa_config->mux_regs_len);
|
||||
config_length += num_lri_dwords(oa_config->b_counter_regs_len);
|
||||
config_length += num_lri_dwords(oa_config->flex_regs_len);
|
||||
config_length++; /* MI_BATCH_BUFFER_END */
|
||||
config_length += 3; /* MI_BATCH_BUFFER_START */
|
||||
config_length = ALIGN(sizeof(u32) * config_length, I915_GTT_PAGE_SIZE);
|
||||
|
||||
obj = i915_gem_object_create_shmem(stream->perf->i915, config_length);
|
||||
|
@ -1895,7 +1895,12 @@ alloc_oa_config_buffer(struct i915_perf_stream *stream,
|
|||
oa_config->flex_regs,
|
||||
oa_config->flex_regs_len);
|
||||
|
||||
*cs++ = MI_BATCH_BUFFER_END;
|
||||
/* Jump into the active wait. */
|
||||
*cs++ = (INTEL_GEN(stream->perf->i915) < 8 ?
|
||||
MI_BATCH_BUFFER_START :
|
||||
MI_BATCH_BUFFER_START_GEN8);
|
||||
*cs++ = i915_ggtt_offset(stream->noa_wait);
|
||||
*cs++ = 0;
|
||||
|
||||
i915_gem_object_flush_map(obj);
|
||||
i915_gem_object_unpin_map(obj);
|
||||
|
|
|
@ -9671,7 +9671,7 @@ enum skl_power_gate {
|
|||
#define TRANS_DDI_EDP_INPUT_A_ONOFF (4 << 12)
|
||||
#define TRANS_DDI_EDP_INPUT_B_ONOFF (5 << 12)
|
||||
#define TRANS_DDI_EDP_INPUT_C_ONOFF (6 << 12)
|
||||
#define TRANS_DDI_MST_TRANSPORT_SELECT_MASK REG_GENMASK(12, 10)
|
||||
#define TRANS_DDI_MST_TRANSPORT_SELECT_MASK REG_GENMASK(11, 10)
|
||||
#define TRANS_DDI_MST_TRANSPORT_SELECT(trans) \
|
||||
REG_FIELD_PREP(TRANS_DDI_MST_TRANSPORT_SELECT_MASK, trans)
|
||||
#define TRANS_DDI_HDCP_SIGNALLING (1 << 9)
|
||||
|
|
|
@ -62,7 +62,6 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
|
|||
/* KBP is SPT compatible */
|
||||
return PCH_SPT;
|
||||
case INTEL_PCH_CNP_DEVICE_ID_TYPE:
|
||||
case INTEL_PCH_CNP2_DEVICE_ID_TYPE:
|
||||
DRM_DEBUG_KMS("Found Cannon Lake PCH (CNP)\n");
|
||||
WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_COFFEELAKE(dev_priv));
|
||||
return PCH_CNP;
|
||||
|
@ -76,6 +75,11 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
|
|||
WARN_ON(!IS_COFFEELAKE(dev_priv));
|
||||
/* CometPoint is CNP Compatible */
|
||||
return PCH_CNP;
|
||||
case INTEL_PCH_CMP_V_DEVICE_ID_TYPE:
|
||||
DRM_DEBUG_KMS("Found Comet Lake V PCH (CMP-V)\n");
|
||||
WARN_ON(!IS_COFFEELAKE(dev_priv));
|
||||
/* Comet Lake V PCH is based on KBP, which is SPT compatible */
|
||||
return PCH_SPT;
|
||||
case INTEL_PCH_ICP_DEVICE_ID_TYPE:
|
||||
DRM_DEBUG_KMS("Found Ice Lake PCH\n");
|
||||
WARN_ON(!IS_ICELAKE(dev_priv));
|
||||
|
|
|
@ -40,10 +40,10 @@ enum intel_pch {
|
|||
#define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00
|
||||
#define INTEL_PCH_KBP_DEVICE_ID_TYPE 0xA280
|
||||
#define INTEL_PCH_CNP_DEVICE_ID_TYPE 0xA300
|
||||
#define INTEL_PCH_CNP2_DEVICE_ID_TYPE 0xA380
|
||||
#define INTEL_PCH_CNP_LP_DEVICE_ID_TYPE 0x9D80
|
||||
#define INTEL_PCH_CMP_DEVICE_ID_TYPE 0x0280
|
||||
#define INTEL_PCH_CMP2_DEVICE_ID_TYPE 0x0680
|
||||
#define INTEL_PCH_CMP_V_DEVICE_ID_TYPE 0xA380
|
||||
#define INTEL_PCH_ICP_DEVICE_ID_TYPE 0x3480
|
||||
#define INTEL_PCH_MCC_DEVICE_ID_TYPE 0x4B00
|
||||
#define INTEL_PCH_TGP_DEVICE_ID_TYPE 0xA080
|
||||
|
|
Loading…
Reference in New Issue