Merge tag 'amd-drm-fixes-5.12-2021-02-24' of https://gitlab.freedesktop.org/agd5f/linux into drm-next
amd-drm-fixes-5.12-2021-02-24: amdgpu: - Clang warning fix - S0ix platform shutdown/poweroff fix - Misc display fixes Signed-off-by: Dave Airlie <airlied@redhat.com> From: Alex Deucher <alexander.deucher@amd.com> Link: https://patchwork.freedesktop.org/patch/msgid/20210225043853.3880-1-alexander.deucher@amd.com
This commit is contained in:
commit
9c712c9c38
|
@ -1008,6 +1008,12 @@ struct amdgpu_device {
|
|||
bool in_suspend;
|
||||
bool in_hibernate;
|
||||
|
||||
/*
|
||||
* The combination flag in_poweroff_reboot_com used to identify the poweroff
|
||||
* and reboot opt in the s0i3 system-wide suspend.
|
||||
*/
|
||||
bool in_poweroff_reboot_com;
|
||||
|
||||
atomic_t in_gpu_reset;
|
||||
enum pp_mp1_state mp1_state;
|
||||
struct rw_semaphore reset_sem;
|
||||
|
|
|
@ -2678,7 +2678,8 @@ static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
|
|||
{
|
||||
int i, r;
|
||||
|
||||
if (!amdgpu_acpi_is_s0ix_supported(adev) || amdgpu_in_reset(adev)) {
|
||||
if (adev->in_poweroff_reboot_com ||
|
||||
!amdgpu_acpi_is_s0ix_supported(adev) || amdgpu_in_reset(adev)) {
|
||||
amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
|
||||
amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
|
||||
}
|
||||
|
@ -3741,7 +3742,8 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
|
|||
|
||||
amdgpu_fence_driver_suspend(adev);
|
||||
|
||||
if (!amdgpu_acpi_is_s0ix_supported(adev) || amdgpu_in_reset(adev))
|
||||
if (adev->in_poweroff_reboot_com ||
|
||||
!amdgpu_acpi_is_s0ix_supported(adev) || amdgpu_in_reset(adev))
|
||||
r = amdgpu_device_ip_suspend_phase2(adev);
|
||||
else
|
||||
amdgpu_gfx_state_change_set(adev, sGpuChangeState_D3Entry);
|
||||
|
|
|
@ -1270,7 +1270,9 @@ amdgpu_pci_shutdown(struct pci_dev *pdev)
|
|||
*/
|
||||
if (!amdgpu_passthrough(adev))
|
||||
adev->mp1_state = PP_MP1_STATE_UNLOAD;
|
||||
adev->in_poweroff_reboot_com = true;
|
||||
amdgpu_device_ip_suspend(adev);
|
||||
adev->in_poweroff_reboot_com = false;
|
||||
adev->mp1_state = PP_MP1_STATE_NONE;
|
||||
}
|
||||
|
||||
|
@ -1312,8 +1314,13 @@ static int amdgpu_pmops_thaw(struct device *dev)
|
|||
static int amdgpu_pmops_poweroff(struct device *dev)
|
||||
{
|
||||
struct drm_device *drm_dev = dev_get_drvdata(dev);
|
||||
struct amdgpu_device *adev = drm_to_adev(drm_dev);
|
||||
int r;
|
||||
|
||||
return amdgpu_device_suspend(drm_dev, true);
|
||||
adev->in_poweroff_reboot_com = true;
|
||||
r = amdgpu_device_suspend(drm_dev, true);
|
||||
adev->in_poweroff_reboot_com = false;
|
||||
return r;
|
||||
}
|
||||
|
||||
static int amdgpu_pmops_restore(struct device *dev)
|
||||
|
|
|
@ -937,7 +937,49 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_
|
|||
|
||||
}
|
||||
#endif
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
static void event_mall_stutter(struct work_struct *work)
|
||||
{
|
||||
|
||||
struct vblank_workqueue *vblank_work = container_of(work, struct vblank_workqueue, mall_work);
|
||||
struct amdgpu_display_manager *dm = vblank_work->dm;
|
||||
|
||||
mutex_lock(&dm->dc_lock);
|
||||
|
||||
if (vblank_work->enable)
|
||||
dm->active_vblank_irq_count++;
|
||||
else
|
||||
dm->active_vblank_irq_count--;
|
||||
|
||||
|
||||
dc_allow_idle_optimizations(
|
||||
dm->dc, dm->active_vblank_irq_count == 0 ? true : false);
|
||||
|
||||
DRM_DEBUG_DRIVER("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
|
||||
|
||||
|
||||
mutex_unlock(&dm->dc_lock);
|
||||
}
|
||||
|
||||
static struct vblank_workqueue *vblank_create_workqueue(struct amdgpu_device *adev, struct dc *dc)
|
||||
{
|
||||
|
||||
int max_caps = dc->caps.max_links;
|
||||
struct vblank_workqueue *vblank_work;
|
||||
int i = 0;
|
||||
|
||||
vblank_work = kcalloc(max_caps, sizeof(*vblank_work), GFP_KERNEL);
|
||||
if (ZERO_OR_NULL_PTR(vblank_work)) {
|
||||
kfree(vblank_work);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
for (i = 0; i < max_caps; i++)
|
||||
INIT_WORK(&vblank_work[i].mall_work, event_mall_stutter);
|
||||
|
||||
return vblank_work;
|
||||
}
|
||||
#endif
|
||||
static int amdgpu_dm_init(struct amdgpu_device *adev)
|
||||
{
|
||||
struct dc_init_data init_data;
|
||||
|
@ -957,6 +999,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
|
|||
|
||||
mutex_init(&adev->dm.dc_lock);
|
||||
mutex_init(&adev->dm.audio_lock);
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
spin_lock_init(&adev->dm.vblank_lock);
|
||||
#endif
|
||||
|
||||
if(amdgpu_dm_irq_init(adev)) {
|
||||
DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
|
||||
|
@ -1071,6 +1116,17 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
|
|||
|
||||
amdgpu_dm_init_color_mod();
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
if (adev->dm.dc->caps.max_links > 0) {
|
||||
adev->dm.vblank_workqueue = vblank_create_workqueue(adev, adev->dm.dc);
|
||||
|
||||
if (!adev->dm.vblank_workqueue)
|
||||
DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
|
||||
else
|
||||
DRM_DEBUG_DRIVER("amdgpu: vblank_workqueue init done %p.\n", adev->dm.vblank_workqueue);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_DRM_AMD_DC_HDCP
|
||||
if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
|
||||
adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
|
||||
|
@ -1936,7 +1992,7 @@ static void dm_gpureset_commit_state(struct dc_state *dc_state,
|
|||
dc_commit_updates_for_stream(
|
||||
dm->dc, bundle->surface_updates,
|
||||
dc_state->stream_status->plane_count,
|
||||
dc_state->streams[k], &bundle->stream_update);
|
||||
dc_state->streams[k], &bundle->stream_update, dc_state);
|
||||
}
|
||||
|
||||
cleanup:
|
||||
|
@ -1967,7 +2023,8 @@ static void dm_set_dpms_off(struct dc_link *link)
|
|||
|
||||
stream_update.stream = stream_state;
|
||||
dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
|
||||
stream_state, &stream_update);
|
||||
stream_state, &stream_update,
|
||||
stream_state->ctx->dc->current_state);
|
||||
mutex_unlock(&adev->dm.dc_lock);
|
||||
}
|
||||
|
||||
|
@ -5374,7 +5431,10 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
|
|||
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
|
||||
struct amdgpu_device *adev = drm_to_adev(crtc->dev);
|
||||
struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
struct amdgpu_display_manager *dm = &adev->dm;
|
||||
unsigned long flags;
|
||||
#endif
|
||||
int rc = 0;
|
||||
|
||||
if (enable) {
|
||||
|
@ -5397,22 +5457,15 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
|
|||
if (amdgpu_in_reset(adev))
|
||||
return 0;
|
||||
|
||||
mutex_lock(&dm->dc_lock);
|
||||
|
||||
if (enable)
|
||||
dm->active_vblank_irq_count++;
|
||||
else
|
||||
dm->active_vblank_irq_count--;
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
dc_allow_idle_optimizations(
|
||||
adev->dm.dc, dm->active_vblank_irq_count == 0 ? true : false);
|
||||
|
||||
DRM_DEBUG_DRIVER("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
|
||||
spin_lock_irqsave(&dm->vblank_lock, flags);
|
||||
dm->vblank_workqueue->dm = dm;
|
||||
dm->vblank_workqueue->otg_inst = acrtc->otg_inst;
|
||||
dm->vblank_workqueue->enable = enable;
|
||||
spin_unlock_irqrestore(&dm->vblank_lock, flags);
|
||||
schedule_work(&dm->vblank_workqueue->mall_work);
|
||||
#endif
|
||||
|
||||
mutex_unlock(&dm->dc_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -7663,7 +7716,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
|
|||
struct drm_crtc *pcrtc,
|
||||
bool wait_for_vblank)
|
||||
{
|
||||
int i;
|
||||
uint32_t i;
|
||||
uint64_t timestamp_ns;
|
||||
struct drm_plane *plane;
|
||||
struct drm_plane_state *old_plane_state, *new_plane_state;
|
||||
|
@ -7704,7 +7757,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
|
|||
amdgpu_dm_commit_cursors(state);
|
||||
|
||||
/* update planes when needed */
|
||||
for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
|
||||
for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
|
||||
struct drm_crtc *crtc = new_plane_state->crtc;
|
||||
struct drm_crtc_state *new_crtc_state;
|
||||
struct drm_framebuffer *fb = new_plane_state->fb;
|
||||
|
@ -7927,7 +7980,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
|
|||
bundle->surface_updates,
|
||||
planes_count,
|
||||
acrtc_state->stream,
|
||||
&bundle->stream_update);
|
||||
&bundle->stream_update,
|
||||
dc_state);
|
||||
|
||||
/**
|
||||
* Enable or disable the interrupts on the backend.
|
||||
|
@ -8263,13 +8317,13 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
|
|||
struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
|
||||
struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
|
||||
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
|
||||
struct dc_surface_update surface_updates[MAX_SURFACES];
|
||||
struct dc_surface_update dummy_updates[MAX_SURFACES];
|
||||
struct dc_stream_update stream_update;
|
||||
struct dc_info_packet hdr_packet;
|
||||
struct dc_stream_status *status = NULL;
|
||||
bool abm_changed, hdr_changed, scaling_changed;
|
||||
|
||||
memset(&surface_updates, 0, sizeof(surface_updates));
|
||||
memset(&dummy_updates, 0, sizeof(dummy_updates));
|
||||
memset(&stream_update, 0, sizeof(stream_update));
|
||||
|
||||
if (acrtc) {
|
||||
|
@ -8326,15 +8380,16 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
|
|||
* To fix this, DC should permit updating only stream properties.
|
||||
*/
|
||||
for (j = 0; j < status->plane_count; j++)
|
||||
surface_updates[j].surface = status->plane_states[j];
|
||||
dummy_updates[j].surface = status->plane_states[0];
|
||||
|
||||
|
||||
mutex_lock(&dm->dc_lock);
|
||||
dc_commit_updates_for_stream(dm->dc,
|
||||
surface_updates,
|
||||
dummy_updates,
|
||||
status->plane_count,
|
||||
dm_new_crtc_state->stream,
|
||||
&stream_update);
|
||||
&stream_update,
|
||||
dc_state);
|
||||
mutex_unlock(&dm->dc_lock);
|
||||
}
|
||||
|
||||
|
|
|
@ -92,6 +92,20 @@ struct dm_compressor_info {
|
|||
uint64_t gpu_addr;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct vblank_workqueue - Works to be executed in a separate thread during vblank
|
||||
* @mall_work: work for mall stutter
|
||||
* @dm: amdgpu display manager device
|
||||
* @otg_inst: otg instance of which vblank is being set
|
||||
* @enable: true if enable vblank
|
||||
*/
|
||||
struct vblank_workqueue {
|
||||
struct work_struct mall_work;
|
||||
struct amdgpu_display_manager *dm;
|
||||
int otg_inst;
|
||||
bool enable;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct amdgpu_dm_backlight_caps - Information about backlight
|
||||
*
|
||||
|
@ -243,6 +257,15 @@ struct amdgpu_display_manager {
|
|||
*/
|
||||
struct mutex audio_lock;
|
||||
|
||||
/**
|
||||
* @vblank_work_lock:
|
||||
*
|
||||
* Guards access to deferred vblank work state.
|
||||
*/
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
spinlock_t vblank_lock;
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @audio_component:
|
||||
*
|
||||
|
@ -321,6 +344,10 @@ struct amdgpu_display_manager {
|
|||
struct hdcp_workqueue *hdcp_workqueue;
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
struct vblank_workqueue *vblank_workqueue;
|
||||
#endif
|
||||
|
||||
struct drm_atomic_state *cached_state;
|
||||
struct dc_state *cached_dc_state;
|
||||
|
||||
|
|
|
@ -2697,7 +2697,8 @@ void dc_commit_updates_for_stream(struct dc *dc,
|
|||
struct dc_surface_update *srf_updates,
|
||||
int surface_count,
|
||||
struct dc_stream_state *stream,
|
||||
struct dc_stream_update *stream_update)
|
||||
struct dc_stream_update *stream_update,
|
||||
struct dc_state *state)
|
||||
{
|
||||
const struct dc_stream_status *stream_status;
|
||||
enum surface_update_type update_type;
|
||||
|
@ -2716,12 +2717,6 @@ void dc_commit_updates_for_stream(struct dc *dc,
|
|||
|
||||
|
||||
if (update_type >= UPDATE_TYPE_FULL) {
|
||||
struct dc_plane_state *new_planes[MAX_SURFACES];
|
||||
|
||||
memset(new_planes, 0, sizeof(new_planes));
|
||||
|
||||
for (i = 0; i < surface_count; i++)
|
||||
new_planes[i] = srf_updates[i].surface;
|
||||
|
||||
/* initialize scratch memory for building context */
|
||||
context = dc_create_state(dc);
|
||||
|
@ -2730,21 +2725,15 @@ void dc_commit_updates_for_stream(struct dc *dc,
|
|||
return;
|
||||
}
|
||||
|
||||
dc_resource_state_copy_construct(
|
||||
dc->current_state, context);
|
||||
dc_resource_state_copy_construct(state, context);
|
||||
|
||||
/*remove old surfaces from context */
|
||||
if (!dc_rem_all_planes_for_stream(dc, stream, context)) {
|
||||
DC_ERROR("Failed to remove streams for new validate context!\n");
|
||||
return;
|
||||
for (i = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
|
||||
struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
|
||||
|
||||
if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state)
|
||||
new_pipe->plane_state->force_full_update = true;
|
||||
}
|
||||
|
||||
/* add surface to context */
|
||||
if (!dc_add_all_planes_for_stream(dc, stream, new_planes, surface_count, context)) {
|
||||
DC_ERROR("Failed to add streams for new validate context!\n");
|
||||
return;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -294,7 +294,8 @@ void dc_commit_updates_for_stream(struct dc *dc,
|
|||
struct dc_surface_update *srf_updates,
|
||||
int surface_count,
|
||||
struct dc_stream_state *stream,
|
||||
struct dc_stream_update *stream_update);
|
||||
struct dc_stream_update *stream_update,
|
||||
struct dc_state *state);
|
||||
/*
|
||||
* Log the current stream state.
|
||||
*/
|
||||
|
|
|
@ -480,7 +480,6 @@ unsigned int dcn10_get_dig_frontend(struct link_encoder *enc)
|
|||
break;
|
||||
default:
|
||||
// invalid source select DIG
|
||||
ASSERT(false);
|
||||
result = ENGINE_ID_UNKNOWN;
|
||||
}
|
||||
|
||||
|
|
|
@ -539,6 +539,8 @@ void dcn30_init_hw(struct dc *dc)
|
|||
|
||||
fe = dc->links[i]->link_enc->funcs->get_dig_frontend(
|
||||
dc->links[i]->link_enc);
|
||||
if (fe == ENGINE_ID_UNKNOWN)
|
||||
continue;
|
||||
|
||||
for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
|
||||
if (fe == dc->res_pool->stream_enc[j]->id) {
|
||||
|
|
|
@ -168,6 +168,11 @@ static const struct irq_source_info_funcs vblank_irq_info_funcs = {
|
|||
.ack = NULL
|
||||
};
|
||||
|
||||
static const struct irq_source_info_funcs vupdate_no_lock_irq_info_funcs = {
|
||||
.set = NULL,
|
||||
.ack = NULL
|
||||
};
|
||||
|
||||
#undef BASE_INNER
|
||||
#define BASE_INNER(seg) DMU_BASE__INST0_SEG ## seg
|
||||
|
||||
|
@ -230,6 +235,17 @@ static const struct irq_source_info_funcs vblank_irq_info_funcs = {
|
|||
.funcs = &vblank_irq_info_funcs\
|
||||
}
|
||||
|
||||
/* vupdate_no_lock_int_entry maps to DC_IRQ_SOURCE_VUPDATEx, to match semantic
|
||||
* of DCE's DC_IRQ_SOURCE_VUPDATEx.
|
||||
*/
|
||||
#define vupdate_no_lock_int_entry(reg_num)\
|
||||
[DC_IRQ_SOURCE_VUPDATE1 + reg_num] = {\
|
||||
IRQ_REG_ENTRY(OTG, reg_num,\
|
||||
OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_INT_EN,\
|
||||
OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_EVENT_CLEAR),\
|
||||
.funcs = &vupdate_no_lock_irq_info_funcs\
|
||||
}
|
||||
|
||||
#define vblank_int_entry(reg_num)\
|
||||
[DC_IRQ_SOURCE_VBLANK1 + reg_num] = {\
|
||||
IRQ_REG_ENTRY(OTG, reg_num,\
|
||||
|
@ -338,6 +354,12 @@ irq_source_info_dcn21[DAL_IRQ_SOURCES_NUMBER] = {
|
|||
vupdate_int_entry(3),
|
||||
vupdate_int_entry(4),
|
||||
vupdate_int_entry(5),
|
||||
vupdate_no_lock_int_entry(0),
|
||||
vupdate_no_lock_int_entry(1),
|
||||
vupdate_no_lock_int_entry(2),
|
||||
vupdate_no_lock_int_entry(3),
|
||||
vupdate_no_lock_int_entry(4),
|
||||
vupdate_no_lock_int_entry(5),
|
||||
vblank_int_entry(0),
|
||||
vblank_int_entry(1),
|
||||
vblank_int_entry(2),
|
||||
|
|
|
@ -762,7 +762,7 @@ void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev)
|
|||
structure_size = sizeof(struct gpu_metrics_v2_0);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
return;
|
||||
}
|
||||
|
||||
#undef METRICS_VERSION
|
||||
|
|
Loading…
Reference in New Issue