- Restrict DRM_I915_DEBUG to developer builds (Chris)
- Fix return and error codes (Dan) - Suspend/Resume fix (Chris) - Disable atomics in L3 for gen9 (Chris) - Flush before changing register state (Chris) - Fix for GLK's HDMI (Ville) - Fix ILK+'s plane strides with Xtiling (Ville) - Correct surface base address for renderclear (Chris) -----BEGIN PGP SIGNATURE----- iQEzBAABCAAdFiEEbSBwaO7dZQkcLOKj+mJfZA7rE8oFAmAu7jIACgkQ+mJfZA7r E8pyaggAizzzyCGyF9FTJd6cV8iyrz0Cu+SHvfNKu33EdVECt2fJUH+upbwTRNK+ resW/RWifoGEVnZyPcBrmreCzlWicoAyfB1G1FlPOOy1ox7ygf+Me1Bn2Kdo8TdU xPDy9CAP7HKCFLURD7Cfo/kKFFk+qQRWl++MkNg5LIumEaD044T4XX2ADjV5RLgY 4Wc6wK42OMkcmBtFXNLzDTOiOy4F6K6/uqWaTTMFnG7TySR+6V/mU/q2NWTNKDnt n/tSFn8JYQbso/RBlZe2ygRKhZGr28J1pXf8KRGGdfLzKPvfMOy19PeGitnPVfVW Q9jNS1F7kus9xTK3NOR9D1erMxZgow== =M/qJ -----END PGP SIGNATURE----- Merge tag 'drm-intel-next-fixes-2021-02-18' of git://anongit.freedesktop.org/drm/drm-intel into drm-next - Restrict DRM_I915_DEBUG to developer builds (Chris) - Fix return and error codes (Dan) - Suspend/Resume fix (Chris) - Disable atomics in L3 for gen9 (Chris) - Flush before changing register state (Chris) - Fix for GLK's HDMI (Ville) - Fix ILK+'s plane strides with Xtiling (Ville) - Correct surface base address for renderclear (Chris) Signed-off-by: Dave Airlie <airlied@redhat.com> From: Rodrigo Vivi <rodrigo.vivi@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/YC7uQY1kt6w0tRp+@intel.com
This commit is contained in:
commit
f730f39eb9
|
@ -19,6 +19,8 @@ config DRM_I915_WERROR
|
|||
config DRM_I915_DEBUG
|
||||
bool "Enable additional driver debugging"
|
||||
depends on DRM_I915
|
||||
depends on EXPERT # only for developers
|
||||
depends on !COMPILE_TEST # never built by robots
|
||||
select DEBUG_FS
|
||||
select PREEMPT_COUNT
|
||||
select I2C_CHARDEV
|
||||
|
|
|
@ -255,6 +255,33 @@ int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
|
|||
else
|
||||
offset = 0;
|
||||
|
||||
/*
|
||||
* When using an X-tiled surface the plane starts to
|
||||
* misbehave if the x offset + width exceeds the stride.
|
||||
* hsw/bdw: underrun galore
|
||||
* ilk/snb/ivb: wrap to the next tile row mid scanout
|
||||
* i965/g4x: so far appear immune to this
|
||||
* vlv/chv: TODO check
|
||||
*
|
||||
* Linear surfaces seem to work just fine, even on hsw/bdw
|
||||
* despite them not using the linear offset anymore.
|
||||
*/
|
||||
if (INTEL_GEN(dev_priv) >= 4 && fb->modifier == I915_FORMAT_MOD_X_TILED) {
|
||||
u32 alignment = intel_surf_alignment(fb, 0);
|
||||
int cpp = fb->format->cpp[0];
|
||||
|
||||
while ((src_x + src_w) * cpp > plane_state->color_plane[0].stride) {
|
||||
if (offset == 0) {
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"Unable to find suitable display surface offset due to X-tiling\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
offset = intel_plane_adjust_aligned_offset(&src_x, &src_y, plane_state, 0,
|
||||
offset, offset - alignment);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Put the final coordinates back so that the src
|
||||
* coordinate checks will see the right values.
|
||||
|
|
|
@ -1322,8 +1322,8 @@ static bool has_async_flips(struct drm_i915_private *i915)
|
|||
return INTEL_GEN(i915) >= 5;
|
||||
}
|
||||
|
||||
static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
|
||||
int color_plane)
|
||||
unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
|
||||
int color_plane)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(fb->dev);
|
||||
|
||||
|
@ -1590,10 +1590,10 @@ static u32 intel_adjust_aligned_offset(int *x, int *y,
|
|||
* Adjust the tile offset by moving the difference into
|
||||
* the x/y offsets.
|
||||
*/
|
||||
static u32 intel_plane_adjust_aligned_offset(int *x, int *y,
|
||||
const struct intel_plane_state *state,
|
||||
int color_plane,
|
||||
u32 old_offset, u32 new_offset)
|
||||
u32 intel_plane_adjust_aligned_offset(int *x, int *y,
|
||||
const struct intel_plane_state *state,
|
||||
int color_plane,
|
||||
u32 old_offset, u32 new_offset)
|
||||
{
|
||||
return intel_adjust_aligned_offset(x, y, state->hw.fb, color_plane,
|
||||
state->hw.rotation,
|
||||
|
|
|
@ -653,6 +653,12 @@ void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state);
|
|||
struct intel_encoder *
|
||||
intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
|
||||
const struct intel_crtc_state *crtc_state);
|
||||
unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
|
||||
int color_plane);
|
||||
u32 intel_plane_adjust_aligned_offset(int *x, int *y,
|
||||
const struct intel_plane_state *state,
|
||||
int color_plane,
|
||||
u32 old_offset, u32 new_offset);
|
||||
|
||||
/* modesetting */
|
||||
void intel_modeset_init_hw(struct drm_i915_private *i915);
|
||||
|
|
|
@ -2218,7 +2218,11 @@ hdmi_port_clock_valid(struct intel_hdmi *hdmi,
|
|||
has_hdmi_sink))
|
||||
return MODE_CLOCK_HIGH;
|
||||
|
||||
/* BXT DPLL can't generate 223-240 MHz */
|
||||
/* GLK DPLL can't generate 446-480 MHz */
|
||||
if (IS_GEMINILAKE(dev_priv) && clock > 446666 && clock < 480000)
|
||||
return MODE_CLOCK_RANGE;
|
||||
|
||||
/* BXT/GLK DPLL can't generate 223-240 MHz */
|
||||
if (IS_GEN9_LP(dev_priv) && clock > 223333 && clock < 240000)
|
||||
return MODE_CLOCK_RANGE;
|
||||
|
||||
|
|
|
@ -85,6 +85,47 @@ void i915_gem_suspend_late(struct drm_i915_private *i915)
|
|||
wbinvd_on_all_cpus();
|
||||
}
|
||||
|
||||
int i915_gem_freeze(struct drm_i915_private *i915)
|
||||
{
|
||||
/* Discard all purgeable objects, let userspace recover those as
|
||||
* required after resuming.
|
||||
*/
|
||||
i915_gem_shrink_all(i915);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int i915_gem_freeze_late(struct drm_i915_private *i915)
|
||||
{
|
||||
struct drm_i915_gem_object *obj;
|
||||
intel_wakeref_t wakeref;
|
||||
|
||||
/*
|
||||
* Called just before we write the hibernation image.
|
||||
*
|
||||
* We need to update the domain tracking to reflect that the CPU
|
||||
* will be accessing all the pages to create and restore from the
|
||||
* hibernation, and so upon restoration those pages will be in the
|
||||
* CPU domain.
|
||||
*
|
||||
* To make sure the hibernation image contains the latest state,
|
||||
* we update that state just before writing out the image.
|
||||
*
|
||||
* To try and reduce the hibernation image, we manually shrink
|
||||
* the objects as well, see i915_gem_freeze()
|
||||
*/
|
||||
|
||||
with_intel_runtime_pm(&i915->runtime_pm, wakeref)
|
||||
i915_gem_shrink(i915, -1UL, NULL, ~0);
|
||||
i915_gem_drain_freed_objects(i915);
|
||||
|
||||
wbinvd_on_all_cpus();
|
||||
list_for_each_entry(obj, &i915->mm.shrink_list, mm.link)
|
||||
__start_cpu_write(obj);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void i915_gem_resume(struct drm_i915_private *i915)
|
||||
{
|
||||
GEM_TRACE("%s\n", dev_name(i915->drm.dev));
|
||||
|
|
|
@ -19,4 +19,7 @@ void i915_gem_idle_work_handler(struct work_struct *work);
|
|||
void i915_gem_suspend(struct drm_i915_private *i915);
|
||||
void i915_gem_suspend_late(struct drm_i915_private *i915);
|
||||
|
||||
int i915_gem_freeze(struct drm_i915_private *i915);
|
||||
int i915_gem_freeze_late(struct drm_i915_private *i915);
|
||||
|
||||
#endif /* __I915_GEM_PM_H__ */
|
||||
|
|
|
@ -753,22 +753,18 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *i915,
|
|||
mutex_lock(&i915->mm.stolen_lock);
|
||||
ret = drm_mm_reserve_node(&i915->mm.stolen, stolen);
|
||||
mutex_unlock(&i915->mm.stolen_lock);
|
||||
if (ret) {
|
||||
obj = ERR_PTR(ret);
|
||||
if (ret)
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
obj = i915_gem_object_alloc();
|
||||
if (!obj) {
|
||||
obj = ERR_PTR(-ENOMEM);
|
||||
ret = -ENOMEM;
|
||||
goto err_stolen;
|
||||
}
|
||||
|
||||
ret = __i915_gem_object_create_stolen(mem, obj, stolen);
|
||||
if (ret) {
|
||||
obj = ERR_PTR(ret);
|
||||
if (ret)
|
||||
goto err_object_free;
|
||||
}
|
||||
|
||||
i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
|
||||
return obj;
|
||||
|
@ -779,7 +775,7 @@ err_stolen:
|
|||
i915_gem_stolen_remove_node(i915, stolen);
|
||||
err_free:
|
||||
kfree(stolen);
|
||||
return obj;
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
bool i915_gem_object_is_stolen(const struct drm_i915_gem_object *obj)
|
||||
|
|
|
@ -240,7 +240,7 @@ gen7_emit_state_base_address(struct batch_chunk *batch,
|
|||
/* general */
|
||||
*cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY;
|
||||
/* surface */
|
||||
*cs++ = batch_addr(batch) | surface_state_base | BASE_ADDRESS_MODIFY;
|
||||
*cs++ = (batch_addr(batch) + surface_state_base) | BASE_ADDRESS_MODIFY;
|
||||
/* dynamic */
|
||||
*cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY;
|
||||
/* indirect */
|
||||
|
@ -393,6 +393,7 @@ static void emit_batch(struct i915_vma * const vma,
|
|||
desc_count);
|
||||
|
||||
/* Reset inherited context registers */
|
||||
gen7_emit_pipeline_flush(&cmds);
|
||||
gen7_emit_pipeline_invalidate(&cmds);
|
||||
batch_add(&cmds, MI_LOAD_REGISTER_IMM(2));
|
||||
batch_add(&cmds, i915_mmio_reg_offset(CACHE_MODE_0_GEN7));
|
||||
|
|
|
@ -1834,6 +1834,14 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
|
|||
wa_write_or(wal,
|
||||
GEN8_L3SQCREG4,
|
||||
GEN8_LQSC_FLUSH_COHERENT_LINES);
|
||||
|
||||
/* Disable atomics in L3 to prevent unrecoverable hangs */
|
||||
wa_write_clr_set(wal, GEN9_SCRATCH_LNCF1,
|
||||
GEN9_LNCF_NONIA_COHERENT_ATOMICS_ENABLE, 0);
|
||||
wa_write_clr_set(wal, GEN8_L3SQCREG4,
|
||||
GEN8_LQSQ_NONIA_COHERENT_ATOMICS_ENABLE, 0);
|
||||
wa_write_clr_set(wal, GEN9_SCRATCH1,
|
||||
EVICTION_PERF_FIX_ENABLE, 0);
|
||||
}
|
||||
|
||||
if (IS_HASWELL(i915)) {
|
||||
|
|
|
@ -3103,7 +3103,7 @@ void intel_gvt_update_reg_whitelist(struct intel_vgpu *vgpu)
|
|||
struct intel_vgpu_submission *s = &vgpu->submission;
|
||||
struct i915_request *requests[I915_NUM_ENGINES] = {};
|
||||
bool is_ctx_pinned[I915_NUM_ENGINES] = {};
|
||||
int ret;
|
||||
int ret = 0;
|
||||
|
||||
if (gvt->is_reg_whitelist_updated)
|
||||
return;
|
||||
|
@ -3157,6 +3157,7 @@ void intel_gvt_update_reg_whitelist(struct intel_vgpu *vgpu)
|
|||
if (IS_ERR(vaddr)) {
|
||||
gvt_err("failed to pin init ctx obj, ring=%d, err=%lx\n",
|
||||
id, PTR_ERR(vaddr));
|
||||
ret = PTR_ERR(vaddr);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
|
|
@ -65,6 +65,7 @@
|
|||
#include "gem/i915_gem_context.h"
|
||||
#include "gem/i915_gem_ioctls.h"
|
||||
#include "gem/i915_gem_mman.h"
|
||||
#include "gem/i915_gem_pm.h"
|
||||
#include "gt/intel_gt.h"
|
||||
#include "gt/intel_gt_pm.h"
|
||||
#include "gt/intel_rc6.h"
|
||||
|
|
|
@ -1800,8 +1800,6 @@ int i915_gem_init_userptr(struct drm_i915_private *dev_priv);
|
|||
void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv);
|
||||
void i915_gem_init_early(struct drm_i915_private *dev_priv);
|
||||
void i915_gem_cleanup_early(struct drm_i915_private *dev_priv);
|
||||
int i915_gem_freeze(struct drm_i915_private *dev_priv);
|
||||
int i915_gem_freeze_late(struct drm_i915_private *dev_priv);
|
||||
|
||||
struct intel_memory_region *i915_gem_shmem_setup(struct drm_i915_private *i915);
|
||||
|
||||
|
|
|
@ -1145,47 +1145,6 @@ void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
|
|||
drm_WARN_ON(&dev_priv->drm, dev_priv->mm.shrink_count);
|
||||
}
|
||||
|
||||
int i915_gem_freeze(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
/* Discard all purgeable objects, let userspace recover those as
|
||||
* required after resuming.
|
||||
*/
|
||||
i915_gem_shrink_all(dev_priv);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int i915_gem_freeze_late(struct drm_i915_private *i915)
|
||||
{
|
||||
struct drm_i915_gem_object *obj;
|
||||
intel_wakeref_t wakeref;
|
||||
|
||||
/*
|
||||
* Called just before we write the hibernation image.
|
||||
*
|
||||
* We need to update the domain tracking to reflect that the CPU
|
||||
* will be accessing all the pages to create and restore from the
|
||||
* hibernation, and so upon restoration those pages will be in the
|
||||
* CPU domain.
|
||||
*
|
||||
* To make sure the hibernation image contains the latest state,
|
||||
* we update that state just before writing out the image.
|
||||
*
|
||||
* To try and reduce the hibernation image, we manually shrink
|
||||
* the objects as well, see i915_gem_freeze()
|
||||
*/
|
||||
|
||||
with_intel_runtime_pm(&i915->runtime_pm, wakeref)
|
||||
i915_gem_shrink(i915, -1UL, NULL, ~0);
|
||||
i915_gem_drain_freed_objects(i915);
|
||||
|
||||
wbinvd_on_all_cpus();
|
||||
list_for_each_entry(obj, &i915->mm.shrink_list, mm.link)
|
||||
__start_cpu_write(obj);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file)
|
||||
{
|
||||
struct drm_i915_file_private *file_priv;
|
||||
|
|
|
@ -8225,6 +8225,7 @@ enum {
|
|||
#define GEN11_LQSC_CLEAN_EVICT_DISABLE (1 << 6)
|
||||
#define GEN8_LQSC_RO_PERF_DIS (1 << 27)
|
||||
#define GEN8_LQSC_FLUSH_COHERENT_LINES (1 << 21)
|
||||
#define GEN8_LQSQ_NONIA_COHERENT_ATOMICS_ENABLE REG_BIT(22)
|
||||
|
||||
/* GEN8 chicken */
|
||||
#define HDC_CHICKEN0 _MMIO(0x7300)
|
||||
|
@ -12107,6 +12108,12 @@ enum skl_power_gate {
|
|||
#define __GEN11_VCS2_MOCS0 0x10000
|
||||
#define GEN11_MFX2_MOCS(i) _MMIO(__GEN11_VCS2_MOCS0 + (i) * 4)
|
||||
|
||||
#define GEN9_SCRATCH_LNCF1 _MMIO(0xb008)
|
||||
#define GEN9_LNCF_NONIA_COHERENT_ATOMICS_ENABLE REG_BIT(0)
|
||||
|
||||
#define GEN9_SCRATCH1 _MMIO(0xb11c)
|
||||
#define EVICTION_PERF_FIX_ENABLE REG_BIT(8)
|
||||
|
||||
#define GEN10_SCRATCH_LNCF2 _MMIO(0xb0a0)
|
||||
#define PMFLUSHDONE_LNICRSDROP (1 << 20)
|
||||
#define PMFLUSH_GAPL3UNBLOCK (1 << 21)
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
|
||||
#include "gem/selftests/igt_gem_utils.h"
|
||||
#include "gem/selftests/mock_context.h"
|
||||
#include "gem/i915_gem_pm.h"
|
||||
#include "gt/intel_gt.h"
|
||||
#include "gt/intel_gt_pm.h"
|
||||
|
||||
|
|
Loading…
Reference in New Issue