drm/amd/display: Fix system hang after multiple hotplugs (v3)
[Why] mutex_lock() was introduced in dm_disable_vblank(), which could be called in an IRQ context. Waiting in IRQ would cause issues like kernel lockup, etc. [How] Handle code that requires mutex lock on a different thread. v2: squash in compilation fix without CONFIG_DRM_AMD_DC_DCN (Alex) v3: squash in warning fix (Wei) Signed-off-by: Qingqing Zhuo <qingqing.zhuo@amd.com> Acked-by: Bindu Ramamurthy <bindu.r@amd.com> Tested-by: Daniel Wheeler <daniel.wheeler@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com> Cc: stable@vger.kernel.org
This commit is contained in:
parent
b092b19602
commit
ea3b4242bc
|
@ -937,7 +937,49 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_
|
|||
|
||||
}
|
||||
#endif
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
static void event_mall_stutter(struct work_struct *work)
|
||||
{
|
||||
|
||||
struct vblank_workqueue *vblank_work = container_of(work, struct vblank_workqueue, mall_work);
|
||||
struct amdgpu_display_manager *dm = vblank_work->dm;
|
||||
|
||||
mutex_lock(&dm->dc_lock);
|
||||
|
||||
if (vblank_work->enable)
|
||||
dm->active_vblank_irq_count++;
|
||||
else
|
||||
dm->active_vblank_irq_count--;
|
||||
|
||||
|
||||
dc_allow_idle_optimizations(
|
||||
dm->dc, dm->active_vblank_irq_count == 0 ? true : false);
|
||||
|
||||
DRM_DEBUG_DRIVER("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
|
||||
|
||||
|
||||
mutex_unlock(&dm->dc_lock);
|
||||
}
|
||||
|
||||
static struct vblank_workqueue *vblank_create_workqueue(struct amdgpu_device *adev, struct dc *dc)
|
||||
{
|
||||
|
||||
int max_caps = dc->caps.max_links;
|
||||
struct vblank_workqueue *vblank_work;
|
||||
int i = 0;
|
||||
|
||||
vblank_work = kcalloc(max_caps, sizeof(*vblank_work), GFP_KERNEL);
|
||||
if (ZERO_OR_NULL_PTR(vblank_work)) {
|
||||
kfree(vblank_work);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
for (i = 0; i < max_caps; i++)
|
||||
INIT_WORK(&vblank_work[i].mall_work, event_mall_stutter);
|
||||
|
||||
return vblank_work;
|
||||
}
|
||||
#endif
|
||||
static int amdgpu_dm_init(struct amdgpu_device *adev)
|
||||
{
|
||||
struct dc_init_data init_data;
|
||||
|
@ -957,6 +999,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
|
|||
|
||||
mutex_init(&adev->dm.dc_lock);
|
||||
mutex_init(&adev->dm.audio_lock);
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
spin_lock_init(&adev->dm.vblank_lock);
|
||||
#endif
|
||||
|
||||
if(amdgpu_dm_irq_init(adev)) {
|
||||
DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
|
||||
|
@ -1071,6 +1116,17 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
|
|||
|
||||
amdgpu_dm_init_color_mod();
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
if (adev->dm.dc->caps.max_links > 0) {
|
||||
adev->dm.vblank_workqueue = vblank_create_workqueue(adev, adev->dm.dc);
|
||||
|
||||
if (!adev->dm.vblank_workqueue)
|
||||
DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
|
||||
else
|
||||
DRM_DEBUG_DRIVER("amdgpu: vblank_workqueue init done %p.\n", adev->dm.vblank_workqueue);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_DRM_AMD_DC_HDCP
|
||||
if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
|
||||
adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
|
||||
|
@ -5375,7 +5431,10 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
|
|||
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
|
||||
struct amdgpu_device *adev = drm_to_adev(crtc->dev);
|
||||
struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
struct amdgpu_display_manager *dm = &adev->dm;
|
||||
unsigned long flags;
|
||||
#endif
|
||||
int rc = 0;
|
||||
|
||||
if (enable) {
|
||||
|
@ -5398,22 +5457,15 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
|
|||
if (amdgpu_in_reset(adev))
|
||||
return 0;
|
||||
|
||||
mutex_lock(&dm->dc_lock);
|
||||
|
||||
if (enable)
|
||||
dm->active_vblank_irq_count++;
|
||||
else
|
||||
dm->active_vblank_irq_count--;
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
dc_allow_idle_optimizations(
|
||||
adev->dm.dc, dm->active_vblank_irq_count == 0 ? true : false);
|
||||
|
||||
DRM_DEBUG_DRIVER("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
|
||||
spin_lock_irqsave(&dm->vblank_lock, flags);
|
||||
dm->vblank_workqueue->dm = dm;
|
||||
dm->vblank_workqueue->otg_inst = acrtc->otg_inst;
|
||||
dm->vblank_workqueue->enable = enable;
|
||||
spin_unlock_irqrestore(&dm->vblank_lock, flags);
|
||||
schedule_work(&dm->vblank_workqueue->mall_work);
|
||||
#endif
|
||||
|
||||
mutex_unlock(&dm->dc_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -92,6 +92,20 @@ struct dm_compressor_info {
|
|||
uint64_t gpu_addr;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct vblank_workqueue - Works to be executed in a separate thread during vblank
|
||||
* @mall_work: work for mall stutter
|
||||
* @dm: amdgpu display manager device
|
||||
* @otg_inst: otg instance of which vblank is being set
|
||||
* @enable: true if enable vblank
|
||||
*/
|
||||
struct vblank_workqueue {
|
||||
struct work_struct mall_work;
|
||||
struct amdgpu_display_manager *dm;
|
||||
int otg_inst;
|
||||
bool enable;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct amdgpu_dm_backlight_caps - Information about backlight
|
||||
*
|
||||
|
@ -243,6 +257,15 @@ struct amdgpu_display_manager {
|
|||
*/
|
||||
struct mutex audio_lock;
|
||||
|
||||
/**
|
||||
* @vblank_work_lock:
|
||||
*
|
||||
* Guards access to deferred vblank work state.
|
||||
*/
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
spinlock_t vblank_lock;
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @audio_component:
|
||||
*
|
||||
|
@ -321,6 +344,10 @@ struct amdgpu_display_manager {
|
|||
struct hdcp_workqueue *hdcp_workqueue;
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
struct vblank_workqueue *vblank_workqueue;
|
||||
#endif
|
||||
|
||||
struct drm_atomic_state *cached_state;
|
||||
struct dc_state *cached_dc_state;
|
||||
|
||||
|
|
Loading…
Reference in New Issue