drm/i915: fully apply WaSkipStolenMemoryFirstPage
Don't even tell the mm allocator to handle the first page of stolen on the affected platforms. This means that we won't inherit the FB in case the BIOS decides to put it at the start of stolen. But the BIOS should not be putting it at the start of stolen since it's going to get corrupted. I suppose the bug here is that some pixels at the very top of the screen will be corrupted, so it's not exactly easy to notice. We have confirmation that the first page of stolen does actually get corrupted, so I really think we should do this in order to avoid any possible future headaches, even if that means losing BIOS framebuffer inheritance. Let's not use the HW in a way it's not supposed to be used. Notice that now ggtt->stolen_usable_size won't reflect the ending address of the stolen usable range anymore, so we have to fix the places that rely on this. To simplify, we'll just use U64_MAX. v2: don't even put the first page on the mm (Chris) v3: drm_mm_init() takes size instead of end as argument (Ville) v4: add a comment explaining the reserved ranges (Chris) use 0 for start and U64_MAX for end when possible (Chris) Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=94605 Cc: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Paulo Zanoni <paulo.r.zanoni@intel.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Link: http://patchwork.freedesktop.org/patch/msgid/1481808235-27607-1-git-send-email-paulo.r.zanoni@intel.com
This commit is contained in:
parent
d435376104
commit
3c6b29b2df
|
@ -315,8 +315,16 @@ struct i915_ggtt {
|
|||
struct i915_address_space base;
|
||||
struct io_mapping mappable; /* Mapping to our CPU mappable region */
|
||||
|
||||
/* Stolen memory is segmented in hardware with different portions
|
||||
* offlimits to certain functions.
|
||||
*
|
||||
* The drm_mm is initialised to the total accessible range, as found
|
||||
* from the PCI config. On Broadwell+, this is further restricted to
|
||||
* avoid the first page! The upper end of stolen memory is reserved for
|
||||
* hardware functions and similarly removed from the accessible range.
|
||||
*/
|
||||
size_t stolen_size; /* Total size of stolen memory */
|
||||
size_t stolen_usable_size; /* Total size minus BIOS reserved */
|
||||
size_t stolen_usable_size; /* Total size minus reserved ranges */
|
||||
size_t stolen_reserved_base;
|
||||
size_t stolen_reserved_size;
|
||||
u64 mappable_end; /* End offset that we can CPU map */
|
||||
|
|
|
@ -54,12 +54,6 @@ int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
|
|||
if (!drm_mm_initialized(&dev_priv->mm.stolen))
|
||||
return -ENODEV;
|
||||
|
||||
/* See the comment at the drm_mm_init() call for more about this check.
|
||||
* WaSkipStolenMemoryFirstPage:bdw+ (incomplete)
|
||||
*/
|
||||
if (start < 4096 && INTEL_GEN(dev_priv) >= 8)
|
||||
start = 4096;
|
||||
|
||||
mutex_lock(&dev_priv->mm.stolen_lock);
|
||||
ret = drm_mm_insert_node_in_range(&dev_priv->mm.stolen, node, size,
|
||||
alignment, start, end,
|
||||
|
@ -73,11 +67,8 @@ int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
|
|||
struct drm_mm_node *node, u64 size,
|
||||
unsigned alignment)
|
||||
{
|
||||
struct i915_ggtt *ggtt = &dev_priv->ggtt;
|
||||
|
||||
return i915_gem_stolen_insert_node_in_range(dev_priv, node, size,
|
||||
alignment, 0,
|
||||
ggtt->stolen_usable_size);
|
||||
alignment, 0, U64_MAX);
|
||||
}
|
||||
|
||||
void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
|
||||
|
@ -410,7 +401,7 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
|
|||
{
|
||||
struct i915_ggtt *ggtt = &dev_priv->ggtt;
|
||||
unsigned long reserved_total, reserved_base = 0, reserved_size;
|
||||
unsigned long stolen_top;
|
||||
unsigned long stolen_usable_start, stolen_top;
|
||||
|
||||
mutex_init(&dev_priv->mm.stolen_lock);
|
||||
|
||||
|
@ -488,20 +479,17 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
|
|||
ggtt->stolen_size >> 10,
|
||||
(ggtt->stolen_size - reserved_total) >> 10);
|
||||
|
||||
ggtt->stolen_usable_size = ggtt->stolen_size - reserved_total;
|
||||
stolen_usable_start = 0;
|
||||
/* WaSkipStolenMemoryFirstPage:bdw+ */
|
||||
if (INTEL_GEN(dev_priv) >= 8)
|
||||
stolen_usable_start = 4096;
|
||||
|
||||
/*
|
||||
* Basic memrange allocator for stolen space.
|
||||
*
|
||||
* TODO: Notice that some platforms require us to not use the first page
|
||||
* of the stolen memory but their BIOSes may still put the framebuffer
|
||||
* on the first page. So we don't reserve this page for now because of
|
||||
* that. Our current solution is to just prevent new nodes from being
|
||||
* inserted on the first page - see the check we have at
|
||||
* i915_gem_stolen_insert_node_in_range(). We may want to fix the fbcon
|
||||
* problem later.
|
||||
*/
|
||||
drm_mm_init(&dev_priv->mm.stolen, 0, ggtt->stolen_usable_size);
|
||||
ggtt->stolen_usable_size = ggtt->stolen_size - reserved_total -
|
||||
stolen_usable_start;
|
||||
|
||||
/* Basic memrange allocator for stolen space. */
|
||||
drm_mm_init(&dev_priv->mm.stolen, stolen_usable_start,
|
||||
ggtt->stolen_usable_size);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -538,7 +538,7 @@ static int find_compression_threshold(struct drm_i915_private *dev_priv,
|
|||
IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
|
||||
end = ggtt->stolen_size - 8 * 1024 * 1024;
|
||||
else
|
||||
end = ggtt->stolen_usable_size;
|
||||
end = U64_MAX;
|
||||
|
||||
/* HACK: This code depends on what we will do in *_enable_fbc. If that
|
||||
* code changes, this code needs to change as well.
|
||||
|
|
Loading…
Reference in New Issue