From 02124a03e60d8526ad8da19d276e8b2b77be28fb Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 19 Sep 2016 09:30:21 -0400 Subject: [PATCH 01/49] drm/amdgpu/dce6: fix off by one in interrupt setup Reviewed-by: Tom St Denis Reported-by: Christian Inci Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/dce_v6_0.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c index d3512f381e53..ec5d8d93ad23 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c @@ -185,7 +185,7 @@ static void dce_v6_0_pageflip_interrupt_init(struct amdgpu_device *adev) unsigned i; /* Enable pflip interrupts */ - for (i = 0; i <= adev->mode_info.num_crtc; i++) + for (i = 0; i < adev->mode_info.num_crtc; i++) amdgpu_irq_get(adev, &adev->pageflip_irq, i); } @@ -194,7 +194,7 @@ static void dce_v6_0_pageflip_interrupt_fini(struct amdgpu_device *adev) unsigned i; /* Disable pflip interrupts */ - for (i = 0; i <= adev->mode_info.num_crtc; i++) + for (i = 0; i < adev->mode_info.num_crtc; i++) amdgpu_irq_put(adev, &adev->pageflip_irq, i); } From e313de7e89978012afec5953068052cbb42134a3 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 19 Sep 2016 12:17:22 -0400 Subject: [PATCH 02/49] Revert "drm/amdgpu: skip suspend/resume on DRM_SWITCH_POWER_DYNAMIC_OFF" This is not necessary as pointed out by Lukas Wunner. This reverts commit f46cf3735f4c05eb752d020d34ace1c85ccf567c. --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 3ddae5ff41bb..377d81875c6d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1842,8 +1842,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon) adev = dev->dev_private; - if (dev->switch_power_state == DRM_SWITCH_POWER_OFF || - dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF) + if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) return 0; drm_kms_helper_poll_disable(dev); @@ -1928,8 +1927,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon) struct drm_crtc *crtc; int r; - if (dev->switch_power_state == DRM_SWITCH_POWER_OFF || - dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF) + if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) return 0; if (fbcon) From f2aba352a954d962c434c059cb080eb935537e45 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 19 Sep 2016 12:20:18 -0400 Subject: [PATCH 03/49] Revert "drm/radeon: skip suspend/resume on DRM_SWITCH_POWER_DYNAMIC_OFF" This is not necessary as pointed out by Lukas Wunner. This reverts commit 5e0b1617fc38a27cf494c5d0b51f08de77ce0d20. --- drivers/gpu/drm/radeon/radeon_device.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index b423c0159581..bbc895891631 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -1594,8 +1594,7 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, rdev = dev->dev_private; - if (dev->switch_power_state == DRM_SWITCH_POWER_OFF || - dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF) + if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) return 0; drm_kms_helper_poll_disable(dev); @@ -1690,8 +1689,7 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon) struct drm_crtc *crtc; int r; - if (dev->switch_power_state == DRM_SWITCH_POWER_OFF || - dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF) + if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) return 0; if (fbcon) { From 9716ebc38dfabe6c8e5e3c809e9f3c61dd3740f9 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Wed, 14 Sep 2016 13:20:34 +0800 Subject: [PATCH 04/49] drm/amd/powerplay: fix mclk not switching back after multi-head was disabled Signed-off-by: Rex Zhu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org --- drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c | 2 ++ drivers/gpu/drm/amd/powerplay/eventmgr/psm.c | 3 ++- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c index 635fc4b48184..92b117843875 100644 --- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c +++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c @@ -262,6 +262,8 @@ static const pem_event_action * const display_config_change_event[] = { unblock_adjust_power_state_tasks, set_cpu_power_state, notify_hw_power_source_tasks, + get_2d_performance_state_tasks, + set_performance_state_tasks, /* updateDALConfigurationTasks, variBrightDisplayConfigurationChangeTasks, */ adjust_power_state_tasks, diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/psm.c b/drivers/gpu/drm/amd/powerplay/eventmgr/psm.c index 1d1875a7cb2d..489908887e9c 100644 --- a/drivers/gpu/drm/amd/powerplay/eventmgr/psm.c +++ b/drivers/gpu/drm/amd/powerplay/eventmgr/psm.c @@ -101,11 +101,12 @@ int psm_adjust_power_state_dynamic(struct pp_eventmgr *eventmgr, bool skip) if (requested == NULL) return 0; + phm_apply_state_adjust_rules(hwmgr, requested, pcurrent); + if (pcurrent == NULL || (0 != phm_check_states_equal(hwmgr, &pcurrent->hardware, &requested->hardware, &equal))) equal = false; if (!equal || phm_check_smc_update_required_for_display_configuration(hwmgr)) { - phm_apply_state_adjust_rules(hwmgr, requested, pcurrent); phm_set_power_state(hwmgr, &pcurrent->hardware, &requested->hardware); memcpy(hwmgr->current_ps, hwmgr->request_ps, hwmgr->ps_size); } From c07aefc268acf53c551dbd50d4fc08a82bee899f Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Wed, 14 Sep 2016 19:24:25 +0800 Subject: [PATCH 05/49] drm/amd/powerplay: fix potential issue of array access violation Signed-off-by: Rex Zhu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c index 524d0dd4f0e9..4f82a06ba3e2 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c @@ -388,12 +388,9 @@ int phm_reset_single_dpm_table(void *table, struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table; - PP_ASSERT_WITH_CODE(count <= max, - "Fatal error, can not set up single DPM table entries to exceed max number!", - ); + dpm_table->count = count > max ? max : count; - dpm_table->count = count; - for (i = 0; i < max; i++) + for (i = 0; i < dpm_table->count; i++) dpm_table->dpm_level[i].enabled = false; return 0; From 6a99a964f6e6af6b6f0d536312722257ae44f812 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Sat, 17 Sep 2016 18:50:23 +0800 Subject: [PATCH 06/49] drm/amd/powerplay: add parameter of the mclk switch latency time Signed-off-by: Rex Zhu Reviewed-by: Ken Wang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h index 18f39e89a7aa..f941acf563a9 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h +++ b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h @@ -260,6 +260,7 @@ enum amd_pp_clock_type { struct amd_pp_clocks { uint32_t count; uint32_t clock[MAX_NUM_CLOCKS]; + uint32_t latency[MAX_NUM_CLOCKS]; }; From ee1a51f882f6197e05948de615842761c3386524 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Mon, 8 Aug 2016 13:44:59 +0800 Subject: [PATCH 07/49] drm/amd/powerplay: add common interface in smumgr to help to visit fw image. Signed-off-by: Rex Zhu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/inc/smumgr.h | 69 ++++++++++++ drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c | 101 +++++++++++++++++- 2 files changed, 169 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h index 34abfd2cde53..e7af6436c6c2 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h @@ -28,6 +28,7 @@ struct pp_smumgr; struct pp_instance; +struct pp_hwmgr; #define smu_lower_32_bits(n) ((uint32_t)(n)) #define smu_upper_32_bits(n) ((uint32_t)(((n)>>16)>>16)) @@ -53,6 +54,44 @@ enum AVFS_BTC_STATUS { AVFS_BTC_SMUMSG_ERROR }; +enum SMU_TABLE { + SMU_UVD_TABLE = 0, + SMU_VCE_TABLE, + SMU_SAMU_TABLE, + SMU_BIF_TABLE, +}; + +enum SMU_TYPE { + SMU_SoftRegisters = 0, + SMU_Discrete_DpmTable, +}; + +enum SMU_MEMBER { + HandshakeDisables = 0, + VoltageChangeTimeout, + AverageGraphicsActivity, + PreVBlankGap, + VBlankTimeout, + UvdBootLevel, + VceBootLevel, + SamuBootLevel, + LowSclkInterruptThreshold, +}; + + +enum SMU_MAC_DEFINITION { + SMU_MAX_LEVELS_GRAPHICS = 0, + SMU_MAX_LEVELS_MEMORY, + SMU_MAX_LEVELS_LINK, + SMU_MAX_ENTRIES_SMIO, + SMU_MAX_LEVELS_VDDC, + SMU_MAX_LEVELS_VDDGFX, + SMU_MAX_LEVELS_VDDCI, + SMU_MAX_LEVELS_MVDD, + SMU_UVD_MCLK_HANDSHAKE_DISABLE, +}; + + struct pp_smumgr_func { int (*smu_init)(struct pp_smumgr *smumgr); int (*smu_fini)(struct pp_smumgr *smumgr); @@ -69,6 +108,18 @@ struct pp_smumgr_func { int (*download_pptable_settings)(struct pp_smumgr *smumgr, void **table); int (*upload_pptable_settings)(struct pp_smumgr *smumgr); + int (*update_smc_table)(struct pp_hwmgr *hwmgr, uint32_t type); + int (*process_firmware_header)(struct pp_hwmgr *hwmgr); + int (*update_sclk_threshold)(struct pp_hwmgr *hwmgr); + int (*thermal_setup_fan_table)(struct pp_hwmgr *hwmgr); + int (*thermal_avfs_enable)(struct pp_hwmgr *hwmgr); + int (*init_smc_table)(struct pp_hwmgr *hwmgr); + int (*populate_all_graphic_levels)(struct pp_hwmgr *hwmgr); + int (*populate_all_memory_levels)(struct pp_hwmgr *hwmgr); + int (*initialize_mc_reg_table)(struct pp_hwmgr *hwmgr); + uint32_t (*get_offsetof)(uint32_t type, uint32_t member); + uint32_t (*get_mac_definition)(uint32_t value); + bool (*is_dpm_running)(struct pp_hwmgr *hwmgr); }; struct pp_smumgr { @@ -127,6 +178,24 @@ extern int tonga_smum_init(struct pp_smumgr *smumgr); extern int fiji_smum_init(struct pp_smumgr *smumgr); extern int polaris10_smum_init(struct pp_smumgr *smumgr); +extern int smum_update_sclk_threshold(struct pp_hwmgr *hwmgr); + +extern int smum_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type); +extern int smum_process_firmware_header(struct pp_hwmgr *hwmgr); +extern int smum_thermal_avfs_enable(struct pp_hwmgr *hwmgr, + void *input, void *output, void *storage, int result); +extern int smum_thermal_setup_fan_table(struct pp_hwmgr *hwmgr, + void *input, void *output, void *storage, int result); +extern int smum_init_smc_table(struct pp_hwmgr *hwmgr); +extern int smum_populate_all_graphic_levels(struct pp_hwmgr *hwmgr); +extern int smum_populate_all_memory_levels(struct pp_hwmgr *hwmgr); +extern int smum_initialize_mc_reg_table(struct pp_hwmgr *hwmgr); +extern uint32_t smum_get_offsetof(struct pp_smumgr *smumgr, + uint32_t type, uint32_t member); +extern uint32_t smum_get_mac_definition(struct pp_smumgr *smumgr, uint32_t value); + +extern bool smum_is_dpm_running(struct pp_hwmgr *hwmgr); + #define SMUM_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT #define SMUM_FIELD_MASK(reg, field) reg##__##field##_MASK diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c index bbeb786db003..e5812aa456f3 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c @@ -86,6 +86,57 @@ int smum_fini(struct pp_smumgr *smumgr) return 0; } +int smum_thermal_avfs_enable(struct pp_hwmgr *hwmgr, + void *input, void *output, void *storage, int result) +{ + if (NULL != hwmgr->smumgr->smumgr_funcs->thermal_avfs_enable) + return hwmgr->smumgr->smumgr_funcs->thermal_avfs_enable(hwmgr); + + return 0; +} + +int smum_thermal_setup_fan_table(struct pp_hwmgr *hwmgr, + void *input, void *output, void *storage, int result) +{ + if (NULL != hwmgr->smumgr->smumgr_funcs->thermal_setup_fan_table) + return hwmgr->smumgr->smumgr_funcs->thermal_setup_fan_table(hwmgr); + + return 0; +} + +int smum_update_sclk_threshold(struct pp_hwmgr *hwmgr) +{ + + if (NULL != hwmgr->smumgr->smumgr_funcs->update_sclk_threshold) + return hwmgr->smumgr->smumgr_funcs->update_sclk_threshold(hwmgr); + + return 0; +} + +int smum_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type) +{ + + if (NULL != hwmgr->smumgr->smumgr_funcs->update_smc_table) + return hwmgr->smumgr->smumgr_funcs->update_smc_table(hwmgr, type); + + return 0; +} + +uint32_t smum_get_offsetof(struct pp_smumgr *smumgr, uint32_t type, uint32_t member) +{ + if (NULL != smumgr->smumgr_funcs->get_offsetof) + return smumgr->smumgr_funcs->get_offsetof(type, member); + + return 0; +} + +int smum_process_firmware_header(struct pp_hwmgr *hwmgr) +{ + if (NULL != hwmgr->smumgr->smumgr_funcs->process_firmware_header) + return hwmgr->smumgr->smumgr_funcs->process_firmware_header(hwmgr); + return 0; +} + int smum_get_argument(struct pp_smumgr *smumgr) { if (NULL != smumgr->smumgr_funcs->get_argument) @@ -94,13 +145,20 @@ int smum_get_argument(struct pp_smumgr *smumgr) return 0; } +uint32_t smum_get_mac_definition(struct pp_smumgr *smumgr, uint32_t value) +{ + if (NULL != smumgr->smumgr_funcs->get_mac_definition) + return smumgr->smumgr_funcs->get_mac_definition(value); + + return 0; +} + int smum_download_powerplay_table(struct pp_smumgr *smumgr, void **table) { if (NULL != smumgr->smumgr_funcs->download_pptable_settings) return smumgr->smumgr_funcs->download_pptable_settings(smumgr, table); - return 0; } @@ -267,3 +325,44 @@ int smu_free_memory(void *device, void *handle) return 0; } + +int smum_init_smc_table(struct pp_hwmgr *hwmgr) +{ + if (NULL != hwmgr->smumgr->smumgr_funcs->init_smc_table) + return hwmgr->smumgr->smumgr_funcs->init_smc_table(hwmgr); + + return 0; +} + +int smum_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) +{ + if (NULL != hwmgr->smumgr->smumgr_funcs->populate_all_graphic_levels) + return hwmgr->smumgr->smumgr_funcs->populate_all_graphic_levels(hwmgr); + + return 0; +} + +int smum_populate_all_memory_levels(struct pp_hwmgr *hwmgr) +{ + if (NULL != hwmgr->smumgr->smumgr_funcs->populate_all_memory_levels) + return hwmgr->smumgr->smumgr_funcs->populate_all_memory_levels(hwmgr); + + return 0; +} + +/*this interface is needed by island ci/vi */ +int smum_initialize_mc_reg_table(struct pp_hwmgr *hwmgr) +{ + if (NULL != hwmgr->smumgr->smumgr_funcs->initialize_mc_reg_table) + return hwmgr->smumgr->smumgr_funcs->initialize_mc_reg_table(hwmgr); + + return 0; +} + +bool smum_is_dpm_running(struct pp_hwmgr *hwmgr) +{ + if (NULL != hwmgr->smumgr->smumgr_funcs->is_dpm_running) + return hwmgr->smumgr->smumgr_funcs->is_dpm_running(hwmgr); + + return true; +} From 599a7e9fe1b683d04f889d68f866f5548b1e0239 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Fri, 9 Sep 2016 13:25:22 +0800 Subject: [PATCH 08/49] drm/amd/powerplay: implement smu7 hwmgr to manager asics with smu ip version 7. Signed-off-by: Rex Zhu Reviewed-by: Alex Deucher Reviewed-by: Ken Wang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/Makefile | 23 +- .../powerplay/hwmgr/smu7_clockpowergating.c | 488 ++ .../powerplay/hwmgr/smu7_clockpowergating.h | 40 + .../amd/powerplay/hwmgr/smu7_dyn_defaults.h | 55 + .../gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 4350 +++++++++++++++++ .../gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h | 353 ++ .../drm/amd/powerplay/hwmgr/smu7_powertune.c | 729 +++ .../drm/amd/powerplay/hwmgr/smu7_powertune.h | 62 + .../drm/amd/powerplay/hwmgr/smu7_thermal.c | 577 +++ .../drm/amd/powerplay/hwmgr/smu7_thermal.h | 58 + .../gpu/drm/amd/powerplay/inc/smu7_common.h | 58 + .../gpu/drm/amd/powerplay/inc/smu7_ppsmc.h | 412 ++ 12 files changed, 7195 insertions(+), 10 deletions(-) create mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c create mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h create mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_dyn_defaults.h create mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c create mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h create mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c create mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.h create mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c create mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.h create mode 100644 drivers/gpu/drm/amd/powerplay/inc/smu7_common.h create mode 100644 drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile index 6e359c90dfda..d5d5626b5195 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile @@ -3,16 +3,19 @@ # It provides the hardware management services for the driver. HARDWARE_MGR = hwmgr.o processpptables.o functiontables.o \ - hardwaremanager.o pp_acpi.o cz_hwmgr.o \ - cz_clockpowergating.o tonga_powertune.o\ - process_pptables_v1_0.o ppatomctrl.o \ - tonga_hwmgr.o pppcielanes.o tonga_thermal.o\ - fiji_powertune.o fiji_hwmgr.o tonga_clockpowergating.o \ - fiji_clockpowergating.o fiji_thermal.o \ - polaris10_hwmgr.o polaris10_powertune.o polaris10_thermal.o \ - polaris10_clockpowergating.o iceland_hwmgr.o \ - iceland_clockpowergating.o iceland_thermal.o \ - iceland_powertune.o + hardwaremanager.o pp_acpi.o cz_hwmgr.o \ + cz_clockpowergating.o tonga_powertune.o\ + process_pptables_v1_0.o ppatomctrl.o \ + tonga_hwmgr.o pppcielanes.o tonga_thermal.o\ + fiji_powertune.o fiji_hwmgr.o tonga_clockpowergating.o \ + fiji_clockpowergating.o fiji_thermal.o \ + polaris10_hwmgr.o polaris10_powertune.o polaris10_thermal.o \ + polaris10_clockpowergating.o \ + smu7_hwmgr.o smu7_powertune.o smu7_thermal.o \ + smu7_clockpowergating.o iceland_hwmgr.o \ + iceland_clockpowergating.o iceland_thermal.o \ + iceland_powertune.o + AMD_PP_HWMGR = $(addprefix $(AMD_PP_PATH)/hwmgr/,$(HARDWARE_MGR)) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c new file mode 100644 index 000000000000..6eb6db199250 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c @@ -0,0 +1,488 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "smu7_hwmgr.h" +#include "smu7_clockpowergating.h" +#include "smu7_common.h" + +static int smu7_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable) +{ + return smum_send_msg_to_smc(hwmgr->smumgr, enable ? + PPSMC_MSG_UVDDPM_Enable : + PPSMC_MSG_UVDDPM_Disable); +} + +static int smu7_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable) +{ + return smum_send_msg_to_smc(hwmgr->smumgr, enable ? + PPSMC_MSG_VCEDPM_Enable : + PPSMC_MSG_VCEDPM_Disable); +} + +static int smu7_enable_disable_samu_dpm(struct pp_hwmgr *hwmgr, bool enable) +{ + return smum_send_msg_to_smc(hwmgr->smumgr, enable ? + PPSMC_MSG_SAMUDPM_Enable : + PPSMC_MSG_SAMUDPM_Disable); +} + +static int smu7_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate) +{ + if (!bgate) + smum_update_smc_table(hwmgr, SMU_UVD_TABLE); + return smu7_enable_disable_uvd_dpm(hwmgr, !bgate); +} + +static int smu7_update_vce_dpm(struct pp_hwmgr *hwmgr, bool bgate) +{ + if (!bgate) + smum_update_smc_table(hwmgr, SMU_VCE_TABLE); + return smu7_enable_disable_vce_dpm(hwmgr, !bgate); +} + +static int smu7_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate) +{ + if (!bgate) + smum_update_smc_table(hwmgr, SMU_SAMU_TABLE); + return smu7_enable_disable_samu_dpm(hwmgr, !bgate); +} + +int smu7_powerdown_uvd(struct pp_hwmgr *hwmgr) +{ + if (phm_cf_want_uvd_power_gating(hwmgr)) + return smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_UVDPowerOFF); + return 0; +} + +int smu7_powerup_uvd(struct pp_hwmgr *hwmgr) +{ + if (phm_cf_want_uvd_power_gating(hwmgr)) { + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_UVDDynamicPowerGating)) { + return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_UVDPowerON, 1); + } else { + return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_UVDPowerON, 0); + } + } + + return 0; +} + +int smu7_powerdown_vce(struct pp_hwmgr *hwmgr) +{ + if (phm_cf_want_vce_power_gating(hwmgr)) + return smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_VCEPowerOFF); + return 0; +} + +int smu7_powerup_vce(struct pp_hwmgr *hwmgr) +{ + if (phm_cf_want_vce_power_gating(hwmgr)) + return smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_VCEPowerON); + return 0; +} + +int smu7_powerdown_samu(struct pp_hwmgr *hwmgr) +{ + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SamuPowerGating)) + return smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_SAMPowerOFF); + return 0; +} + +int smu7_powerup_samu(struct pp_hwmgr *hwmgr) +{ + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SamuPowerGating)) + return smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_SAMPowerON); + return 0; +} + +int smu7_disable_clock_power_gating(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + data->uvd_power_gated = false; + data->vce_power_gated = false; + data->samu_power_gated = false; + + smu7_powerup_uvd(hwmgr); + smu7_powerup_vce(hwmgr); + smu7_powerup_samu(hwmgr); + + return 0; +} + +int smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + data->uvd_power_gated = bgate; + + if (bgate) { + cgs_set_clockgating_state(hwmgr->device, + AMD_IP_BLOCK_TYPE_UVD, + AMD_CG_STATE_GATE); + smu7_update_uvd_dpm(hwmgr, true); + smu7_powerdown_uvd(hwmgr); + } else { + smu7_powerup_uvd(hwmgr); + smu7_update_uvd_dpm(hwmgr, false); + cgs_set_clockgating_state(hwmgr->device, + AMD_IP_BLOCK_TYPE_UVD, + AMD_CG_STATE_UNGATE); + } + + return 0; +} + +int smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + if (data->vce_power_gated == bgate) + return 0; + + data->vce_power_gated = bgate; + + if (bgate) { + cgs_set_clockgating_state(hwmgr->device, + AMD_IP_BLOCK_TYPE_VCE, + AMD_CG_STATE_GATE); + smu7_update_vce_dpm(hwmgr, true); + smu7_powerdown_vce(hwmgr); + } else { + smu7_powerup_vce(hwmgr); + smu7_update_vce_dpm(hwmgr, false); + cgs_set_clockgating_state(hwmgr->device, + AMD_IP_BLOCK_TYPE_VCE, + AMD_CG_STATE_UNGATE); + } + return 0; +} + +int smu7_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + if (data->samu_power_gated == bgate) + return 0; + + data->samu_power_gated = bgate; + + if (bgate) { + smu7_update_samu_dpm(hwmgr, true); + smu7_powerdown_samu(hwmgr); + } else { + smu7_powerup_samu(hwmgr); + smu7_update_samu_dpm(hwmgr, false); + } + + return 0; +} + +int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr, + const uint32_t *msg_id) +{ + PPSMC_Msg msg; + uint32_t value; + + if (!(hwmgr->feature_mask & PP_ENABLE_GFX_CG_THRU_SMU)) + return 0; + + switch ((*msg_id & PP_GROUP_MASK) >> PP_GROUP_SHIFT) { + case PP_GROUP_GFX: + switch ((*msg_id & PP_BLOCK_MASK) >> PP_BLOCK_SHIFT) { + case PP_BLOCK_GFX_CG: + if (PP_STATE_SUPPORT_CG & *msg_id) { + msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG) ? + PPSMC_MSG_EnableClockGatingFeature : + PPSMC_MSG_DisableClockGatingFeature; + value = CG_GFX_CGCG_MASK; + + if (smum_send_msg_to_smc_with_parameter( + hwmgr->smumgr, msg, value)) + return -EINVAL; + } + if (PP_STATE_SUPPORT_LS & *msg_id) { + msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS + ? PPSMC_MSG_EnableClockGatingFeature + : PPSMC_MSG_DisableClockGatingFeature; + value = CG_GFX_CGLS_MASK; + + if (smum_send_msg_to_smc_with_parameter( + hwmgr->smumgr, msg, value)) + return -EINVAL; + } + break; + + case PP_BLOCK_GFX_3D: + if (PP_STATE_SUPPORT_CG & *msg_id) { + msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG) ? + PPSMC_MSG_EnableClockGatingFeature : + PPSMC_MSG_DisableClockGatingFeature; + value = CG_GFX_3DCG_MASK; + + if (smum_send_msg_to_smc_with_parameter( + hwmgr->smumgr, msg, value)) + return -EINVAL; + } + + if (PP_STATE_SUPPORT_LS & *msg_id) { + msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS ? + PPSMC_MSG_EnableClockGatingFeature : + PPSMC_MSG_DisableClockGatingFeature; + value = CG_GFX_3DLS_MASK; + + if (smum_send_msg_to_smc_with_parameter( + hwmgr->smumgr, msg, value)) + return -EINVAL; + } + break; + + case PP_BLOCK_GFX_RLC: + if (PP_STATE_SUPPORT_LS & *msg_id) { + msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS ? + PPSMC_MSG_EnableClockGatingFeature : + PPSMC_MSG_DisableClockGatingFeature; + value = CG_GFX_RLC_LS_MASK; + + if (smum_send_msg_to_smc_with_parameter( + hwmgr->smumgr, msg, value)) + return -EINVAL; + } + break; + + case PP_BLOCK_GFX_CP: + if (PP_STATE_SUPPORT_LS & *msg_id) { + msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS ? + PPSMC_MSG_EnableClockGatingFeature : + PPSMC_MSG_DisableClockGatingFeature; + value = CG_GFX_CP_LS_MASK; + + if (smum_send_msg_to_smc_with_parameter( + hwmgr->smumgr, msg, value)) + return -EINVAL; + } + break; + + case PP_BLOCK_GFX_MG: + if (PP_STATE_SUPPORT_CG & *msg_id) { + msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG) ? + PPSMC_MSG_EnableClockGatingFeature : + PPSMC_MSG_DisableClockGatingFeature; + value = (CG_CPF_MGCG_MASK | CG_RLC_MGCG_MASK | + CG_GFX_OTHERS_MGCG_MASK); + + if (smum_send_msg_to_smc_with_parameter( + hwmgr->smumgr, msg, value)) + return -EINVAL; + } + break; + + default: + return -EINVAL; + } + break; + + case PP_GROUP_SYS: + switch ((*msg_id & PP_BLOCK_MASK) >> PP_BLOCK_SHIFT) { + case PP_BLOCK_SYS_BIF: + if (PP_STATE_SUPPORT_CG & *msg_id) { + msg = (*msg_id & PP_STATE_MASK) & PP_STATE_CG ? + PPSMC_MSG_EnableClockGatingFeature : + PPSMC_MSG_DisableClockGatingFeature; + value = CG_SYS_BIF_MGCG_MASK; + + if (smum_send_msg_to_smc_with_parameter( + hwmgr->smumgr, msg, value)) + return -EINVAL; + } + if (PP_STATE_SUPPORT_LS & *msg_id) { + msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS ? + PPSMC_MSG_EnableClockGatingFeature : + PPSMC_MSG_DisableClockGatingFeature; + value = CG_SYS_BIF_MGLS_MASK; + + if (smum_send_msg_to_smc_with_parameter( + hwmgr->smumgr, msg, value)) + return -EINVAL; + } + break; + + case PP_BLOCK_SYS_MC: + if (PP_STATE_SUPPORT_CG & *msg_id) { + msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG) ? + PPSMC_MSG_EnableClockGatingFeature : + PPSMC_MSG_DisableClockGatingFeature; + value = CG_SYS_MC_MGCG_MASK; + + if (smum_send_msg_to_smc_with_parameter( + hwmgr->smumgr, msg, value)) + return -EINVAL; + } + + if (PP_STATE_SUPPORT_LS & *msg_id) { + msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS ? + PPSMC_MSG_EnableClockGatingFeature : + PPSMC_MSG_DisableClockGatingFeature; + value = CG_SYS_MC_MGLS_MASK; + + if (smum_send_msg_to_smc_with_parameter( + hwmgr->smumgr, msg, value)) + return -EINVAL; + } + break; + + case PP_BLOCK_SYS_DRM: + if (PP_STATE_SUPPORT_CG & *msg_id) { + msg = (*msg_id & PP_STATE_MASK) & PP_STATE_CG ? + PPSMC_MSG_EnableClockGatingFeature : + PPSMC_MSG_DisableClockGatingFeature; + value = CG_SYS_DRM_MGCG_MASK; + + if (smum_send_msg_to_smc_with_parameter( + hwmgr->smumgr, msg, value)) + return -EINVAL; + } + if (PP_STATE_SUPPORT_LS & *msg_id) { + msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS ? + PPSMC_MSG_EnableClockGatingFeature : + PPSMC_MSG_DisableClockGatingFeature; + value = CG_SYS_DRM_MGLS_MASK; + + if (smum_send_msg_to_smc_with_parameter( + hwmgr->smumgr, msg, value)) + return -EINVAL; + } + break; + + case PP_BLOCK_SYS_HDP: + if (PP_STATE_SUPPORT_CG & *msg_id) { + msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG) ? + PPSMC_MSG_EnableClockGatingFeature : + PPSMC_MSG_DisableClockGatingFeature; + value = CG_SYS_HDP_MGCG_MASK; + + if (smum_send_msg_to_smc_with_parameter( + hwmgr->smumgr, msg, value)) + return -EINVAL; + } + + if (PP_STATE_SUPPORT_LS & *msg_id) { + msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS ? + PPSMC_MSG_EnableClockGatingFeature : + PPSMC_MSG_DisableClockGatingFeature; + value = CG_SYS_HDP_MGLS_MASK; + + if (smum_send_msg_to_smc_with_parameter( + hwmgr->smumgr, msg, value)) + return -EINVAL; + } + break; + + case PP_BLOCK_SYS_SDMA: + if (PP_STATE_SUPPORT_CG & *msg_id) { + msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG) ? + PPSMC_MSG_EnableClockGatingFeature : + PPSMC_MSG_DisableClockGatingFeature; + value = CG_SYS_SDMA_MGCG_MASK; + + if (smum_send_msg_to_smc_with_parameter( + hwmgr->smumgr, msg, value)) + return -EINVAL; + } + + if (PP_STATE_SUPPORT_LS & *msg_id) { + msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS ? + PPSMC_MSG_EnableClockGatingFeature : + PPSMC_MSG_DisableClockGatingFeature; + value = CG_SYS_SDMA_MGLS_MASK; + + if (smum_send_msg_to_smc_with_parameter( + hwmgr->smumgr, msg, value)) + return -EINVAL; + } + break; + + case PP_BLOCK_SYS_ROM: + if (PP_STATE_SUPPORT_CG & *msg_id) { + msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG) ? + PPSMC_MSG_EnableClockGatingFeature : + PPSMC_MSG_DisableClockGatingFeature; + value = CG_SYS_ROM_MASK; + + if (smum_send_msg_to_smc_with_parameter( + hwmgr->smumgr, msg, value)) + return -EINVAL; + } + break; + + default: + return -EINVAL; + + } + break; + + default: + return -EINVAL; + + } + + return 0; +} + +/* This function is for Polaris11 only for now, + * Powerplay will only control the static per CU Power Gating. + * Dynamic per CU Power Gating will be done in gfx. + */ +int smu7_enable_per_cu_power_gating(struct pp_hwmgr *hwmgr, bool enable) +{ + struct cgs_system_info sys_info = {0}; + uint32_t active_cus; + int result; + + sys_info.size = sizeof(struct cgs_system_info); + sys_info.info_id = CGS_SYSTEM_INFO_GFX_CU_INFO; + + result = cgs_query_system_info(hwmgr->device, &sys_info); + + if (result) + return -EINVAL; + + active_cus = sys_info.value; + + if (enable) + return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_GFX_CU_PG_ENABLE, active_cus); + else + return smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_GFX_CU_PG_DISABLE); +} diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h new file mode 100644 index 000000000000..d52a28c343e3 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h @@ -0,0 +1,40 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef _SMU7_CLOCK_POWER_GATING_H_ +#define _SMU7_CLOCK__POWER_GATING_H_ + +#include "smu7_hwmgr.h" +#include "pp_asicblocks.h" + +int smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate); +int smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate); +int smu7_powerdown_uvd(struct pp_hwmgr *hwmgr); +int smu7_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate); +int smu7_powergate_acp(struct pp_hwmgr *hwmgr, bool bgate); +int smu7_disable_clock_power_gating(struct pp_hwmgr *hwmgr); +int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr, + const uint32_t *msg_id); +int smu7_enable_per_cu_power_gating(struct pp_hwmgr *hwmgr, bool enable); + +#endif diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_dyn_defaults.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_dyn_defaults.h new file mode 100644 index 000000000000..f967613191cf --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_dyn_defaults.h @@ -0,0 +1,55 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef _SMU7_DYN_DEFAULTS_H +#define _SMU7_DYN_DEFAULTS_H + + +/* We need to fill in the default values */ + + +#define SMU7_VOTINGRIGHTSCLIENTS_DFLT0 0x3FFFC102 +#define SMU7_VOTINGRIGHTSCLIENTS_DFLT1 0x000400 +#define SMU7_VOTINGRIGHTSCLIENTS_DFLT2 0xC00080 +#define SMU7_VOTINGRIGHTSCLIENTS_DFLT3 0xC00200 +#define SMU7_VOTINGRIGHTSCLIENTS_DFLT4 0xC01680 +#define SMU7_VOTINGRIGHTSCLIENTS_DFLT5 0xC00033 +#define SMU7_VOTINGRIGHTSCLIENTS_DFLT6 0xC00033 +#define SMU7_VOTINGRIGHTSCLIENTS_DFLT7 0x3FFFC000 + + +#define SMU7_THERMALPROTECTCOUNTER_DFLT 0x200 +#define SMU7_STATICSCREENTHRESHOLDUNIT_DFLT 0 +#define SMU7_STATICSCREENTHRESHOLD_DFLT 0x00C8 +#define SMU7_GFXIDLECLOCKSTOPTHRESHOLD_DFLT 0x200 +#define SMU7_REFERENCEDIVIDER_DFLT 4 + +#define SMU7_ULVVOLTAGECHANGEDELAY_DFLT 1687 + +#define SMU7_CGULVPARAMETER_DFLT 0x00040035 +#define SMU7_CGULVCONTROL_DFLT 0x00007450 +#define SMU7_TARGETACTIVITY_DFLT 50 +#define SMU7_MCLK_TARGETACTIVITY_DFLT 10 + +#endif + diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c new file mode 100644 index 000000000000..f67e1e260b30 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -0,0 +1,4350 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include +#include +#include +#include +#include "linux/delay.h" +#include "pp_acpi.h" +#include "pp_debug.h" +#include "ppatomctrl.h" +#include "atombios.h" +#include "pptable_v1_0.h" +#include "pppcielanes.h" +#include "amd_pcie_helpers.h" +#include "hardwaremanager.h" +#include "process_pptables_v1_0.h" +#include "cgs_common.h" + +#include "smu7_common.h" + +#include "hwmgr.h" +#include "smu7_hwmgr.h" +#include "smu7_powertune.h" +#include "smu7_dyn_defaults.h" +#include "smu7_thermal.h" +#include "smu7_clockpowergating.h" +#include "processpptables.h" + +#define MC_CG_ARB_FREQ_F0 0x0a +#define MC_CG_ARB_FREQ_F1 0x0b +#define MC_CG_ARB_FREQ_F2 0x0c +#define MC_CG_ARB_FREQ_F3 0x0d + +#define MC_CG_SEQ_DRAMCONF_S0 0x05 +#define MC_CG_SEQ_DRAMCONF_S1 0x06 +#define MC_CG_SEQ_YCLK_SUSPEND 0x04 +#define MC_CG_SEQ_YCLK_RESUME 0x0a + +#define SMC_CG_IND_START 0xc0030000 +#define SMC_CG_IND_END 0xc0040000 + +#define VOLTAGE_SCALE 4 +#define VOLTAGE_VID_OFFSET_SCALE1 625 +#define VOLTAGE_VID_OFFSET_SCALE2 100 + +#define MEM_FREQ_LOW_LATENCY 25000 +#define MEM_FREQ_HIGH_LATENCY 80000 + +#define MEM_LATENCY_HIGH 45 +#define MEM_LATENCY_LOW 35 +#define MEM_LATENCY_ERR 0xFFFF + +#define MC_SEQ_MISC0_GDDR5_SHIFT 28 +#define MC_SEQ_MISC0_GDDR5_MASK 0xf0000000 +#define MC_SEQ_MISC0_GDDR5_VALUE 5 + +#define PCIE_BUS_CLK 10000 +#define TCLK (PCIE_BUS_CLK / 10) + + +/** Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */ +enum DPM_EVENT_SRC { + DPM_EVENT_SRC_ANALOG = 0, + DPM_EVENT_SRC_EXTERNAL = 1, + DPM_EVENT_SRC_DIGITAL = 2, + DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3, + DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL = 4 +}; + +static const unsigned long PhwVIslands_Magic = (unsigned long)(PHM_VIslands_Magic); + +struct smu7_power_state *cast_phw_smu7_power_state( + struct pp_hw_power_state *hw_ps) +{ + PP_ASSERT_WITH_CODE((PhwVIslands_Magic == hw_ps->magic), + "Invalid Powerstate Type!", + return NULL); + + return (struct smu7_power_state *)hw_ps; +} + +const struct smu7_power_state *cast_const_phw_smu7_power_state( + const struct pp_hw_power_state *hw_ps) +{ + PP_ASSERT_WITH_CODE((PhwVIslands_Magic == hw_ps->magic), + "Invalid Powerstate Type!", + return NULL); + + return (const struct smu7_power_state *)hw_ps; +} + +/** + * Find the MC microcode version and store it in the HwMgr struct + * + * @param hwmgr the address of the powerplay hardware manager. + * @return always 0 + */ +int smu7_get_mc_microcode_version (struct pp_hwmgr *hwmgr) +{ + cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX, 0x9F); + + hwmgr->microcode_version_info.MC = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA); + + return 0; +} + +uint16_t smu7_get_current_pcie_speed(struct pp_hwmgr *hwmgr) +{ + uint32_t speedCntl = 0; + + /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */ + speedCntl = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__PCIE, + ixPCIE_LC_SPEED_CNTL); + return((uint16_t)PHM_GET_FIELD(speedCntl, + PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE)); +} + +int smu7_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr) +{ + uint32_t link_width; + + /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */ + link_width = PHM_READ_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE, + PCIE_LC_LINK_WIDTH_CNTL, LC_LINK_WIDTH_RD); + + PP_ASSERT_WITH_CODE((7 >= link_width), + "Invalid PCIe lane width!", return 0); + + return decode_pcie_lane_width(link_width); +} + +/** +* Enable voltage control +* +* @param pHwMgr the address of the powerplay hardware manager. +* @return always PP_Result_OK +*/ +int smu7_enable_smc_voltage_controller(struct pp_hwmgr *hwmgr) +{ + if (hwmgr->feature_mask & PP_SMC_VOLTAGE_CONTROL_MASK) + smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Voltage_Cntl_Enable); + + return 0; +} + +/** +* Checks if we want to support voltage control +* +* @param hwmgr the address of the powerplay hardware manager. +*/ +static bool smu7_voltage_control(const struct pp_hwmgr *hwmgr) +{ + const struct smu7_hwmgr *data = + (const struct smu7_hwmgr *)(hwmgr->backend); + + return (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control); +} + +/** +* Enable voltage control +* +* @param hwmgr the address of the powerplay hardware manager. +* @return always 0 +*/ +static int smu7_enable_voltage_control(struct pp_hwmgr *hwmgr) +{ + /* enable voltage control */ + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + GENERAL_PWRMGT, VOLT_PWRMGT_EN, 1); + + return 0; +} + +static int phm_get_svi2_voltage_table_v0(pp_atomctrl_voltage_table *voltage_table, + struct phm_clock_voltage_dependency_table *voltage_dependency_table + ) +{ + uint32_t i; + + PP_ASSERT_WITH_CODE((NULL != voltage_table), + "Voltage Dependency Table empty.", return -EINVAL;); + + voltage_table->mask_low = 0; + voltage_table->phase_delay = 0; + voltage_table->count = voltage_dependency_table->count; + + for (i = 0; i < voltage_dependency_table->count; i++) { + voltage_table->entries[i].value = + voltage_dependency_table->entries[i].v; + voltage_table->entries[i].smio_low = 0; + } + + return 0; +} + + +/** +* Create Voltage Tables. +* +* @param hwmgr the address of the powerplay hardware manager. +* @return always 0 +*/ +static int smu7_construct_voltage_tables(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)hwmgr->pptable; + int result = 0; + uint32_t tmp; + + if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) { + result = atomctrl_get_voltage_table_v3(hwmgr, + VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT, + &(data->mvdd_voltage_table)); + PP_ASSERT_WITH_CODE((0 == result), + "Failed to retrieve MVDD table.", + return result); + } else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) { + if (hwmgr->pp_table_version == PP_TABLE_V1) + result = phm_get_svi2_mvdd_voltage_table(&(data->mvdd_voltage_table), + table_info->vdd_dep_on_mclk); + else if (hwmgr->pp_table_version == PP_TABLE_V0) + result = phm_get_svi2_voltage_table_v0(&(data->mvdd_voltage_table), + hwmgr->dyn_state.mvdd_dependency_on_mclk); + + PP_ASSERT_WITH_CODE((0 == result), + "Failed to retrieve SVI2 MVDD table from dependancy table.", + return result;); + } + + if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) { + result = atomctrl_get_voltage_table_v3(hwmgr, + VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT, + &(data->vddci_voltage_table)); + PP_ASSERT_WITH_CODE((0 == result), + "Failed to retrieve VDDCI table.", + return result); + } else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) { + if (hwmgr->pp_table_version == PP_TABLE_V1) + result = phm_get_svi2_vddci_voltage_table(&(data->vddci_voltage_table), + table_info->vdd_dep_on_mclk); + else if (hwmgr->pp_table_version == PP_TABLE_V0) + result = phm_get_svi2_voltage_table_v0(&(data->vddci_voltage_table), + hwmgr->dyn_state.vddci_dependency_on_mclk); + PP_ASSERT_WITH_CODE((0 == result), + "Failed to retrieve SVI2 VDDCI table from dependancy table.", + return result); + } + + if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) { + /* VDDGFX has only SVI2 voltage control */ + result = phm_get_svi2_vdd_voltage_table(&(data->vddgfx_voltage_table), + table_info->vddgfx_lookup_table); + PP_ASSERT_WITH_CODE((0 == result), + "Failed to retrieve SVI2 VDDGFX table from lookup table.", return result;); + } + + + if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->voltage_control) { + result = atomctrl_get_voltage_table_v3(hwmgr, + VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT, + &data->vddc_voltage_table); + PP_ASSERT_WITH_CODE((0 == result), + "Failed to retrieve VDDC table.", return result;); + } else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) { + + if (hwmgr->pp_table_version == PP_TABLE_V0) + result = phm_get_svi2_voltage_table_v0(&data->vddc_voltage_table, + hwmgr->dyn_state.vddc_dependency_on_mclk); + else if (hwmgr->pp_table_version == PP_TABLE_V1) + result = phm_get_svi2_vdd_voltage_table(&(data->vddc_voltage_table), + table_info->vddc_lookup_table); + + PP_ASSERT_WITH_CODE((0 == result), + "Failed to retrieve SVI2 VDDC table from dependancy table.", return result;); + } + + tmp = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_VDDC); + PP_ASSERT_WITH_CODE( + (data->vddc_voltage_table.count <= tmp), + "Too many voltage values for VDDC. Trimming to fit state table.", + phm_trim_voltage_table_to_fit_state_table(tmp, + &(data->vddc_voltage_table))); + + tmp = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_VDDGFX); + PP_ASSERT_WITH_CODE( + (data->vddgfx_voltage_table.count <= tmp), + "Too many voltage values for VDDC. Trimming to fit state table.", + phm_trim_voltage_table_to_fit_state_table(tmp, + &(data->vddgfx_voltage_table))); + + tmp = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_VDDCI); + PP_ASSERT_WITH_CODE( + (data->vddci_voltage_table.count <= tmp), + "Too many voltage values for VDDCI. Trimming to fit state table.", + phm_trim_voltage_table_to_fit_state_table(tmp, + &(data->vddci_voltage_table))); + + tmp = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_MVDD); + PP_ASSERT_WITH_CODE( + (data->mvdd_voltage_table.count <= tmp), + "Too many voltage values for MVDD. Trimming to fit state table.", + phm_trim_voltage_table_to_fit_state_table(tmp, + &(data->mvdd_voltage_table))); + + return 0; +} + +/** +* Programs static screed detection parameters +* +* @param hwmgr the address of the powerplay hardware manager. +* @return always 0 +*/ +static int smu7_program_static_screen_threshold_parameters( + struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + /* Set static screen threshold unit */ + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD_UNIT, + data->static_screen_threshold_unit); + /* Set static screen threshold */ + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD, + data->static_screen_threshold); + + return 0; +} + +/** +* Setup display gap for glitch free memory clock switching. +* +* @param hwmgr the address of the powerplay hardware manager. +* @return always 0 +*/ +static int smu7_enable_display_gap(struct pp_hwmgr *hwmgr) +{ + uint32_t display_gap = + cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_DISPLAY_GAP_CNTL); + + display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, + DISP_GAP, DISPLAY_GAP_IGNORE); + + display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, + DISP_GAP_MCHG, DISPLAY_GAP_VBLANK); + + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_DISPLAY_GAP_CNTL, display_gap); + + return 0; +} + +/** +* Programs activity state transition voting clients +* +* @param hwmgr the address of the powerplay hardware manager. +* @return always 0 +*/ +static int smu7_program_voting_clients(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + /* Clear reset for voting clients before enabling DPM */ + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 0); + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 0); + + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_0, data->voting_rights_clients0); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_1, data->voting_rights_clients1); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_2, data->voting_rights_clients2); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_3, data->voting_rights_clients3); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_4, data->voting_rights_clients4); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_5, data->voting_rights_clients5); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_6, data->voting_rights_clients6); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_7, data->voting_rights_clients7); + + return 0; +} + +static int smu7_clear_voting_clients(struct pp_hwmgr *hwmgr) +{ + /* Reset voting clients before disabling DPM */ + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 1); + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 1); + + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_0, 0); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_1, 0); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_2, 0); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_3, 0); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_4, 0); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_5, 0); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_6, 0); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_7, 0); + + return 0; +} + +/* Copy one arb setting to another and then switch the active set. + * arb_src and arb_dest is one of the MC_CG_ARB_FREQ_Fx constants. + */ +static int smu7_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr, + uint32_t arb_src, uint32_t arb_dest) +{ + uint32_t mc_arb_dram_timing; + uint32_t mc_arb_dram_timing2; + uint32_t burst_time; + uint32_t mc_cg_config; + + switch (arb_src) { + case MC_CG_ARB_FREQ_F0: + mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING); + mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2); + burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0); + break; + case MC_CG_ARB_FREQ_F1: + mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1); + mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1); + burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1); + break; + default: + return -EINVAL; + } + + switch (arb_dest) { + case MC_CG_ARB_FREQ_F0: + cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing); + cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2); + PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0, burst_time); + break; + case MC_CG_ARB_FREQ_F1: + cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing); + cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2); + PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1, burst_time); + break; + default: + return -EINVAL; + } + + mc_cg_config = cgs_read_register(hwmgr->device, mmMC_CG_CONFIG); + mc_cg_config |= 0x0000000F; + cgs_write_register(hwmgr->device, mmMC_CG_CONFIG, mc_cg_config); + PHM_WRITE_FIELD(hwmgr->device, MC_ARB_CG, CG_ARB_REQ, arb_dest); + + return 0; +} + +static int smu7_reset_to_default(struct pp_hwmgr *hwmgr) +{ + return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_ResetToDefaults); +} + +/** +* Initial switch from ARB F0->F1 +* +* @param hwmgr the address of the powerplay hardware manager. +* @return always 0 +* This function is to be called from the SetPowerState table. +*/ +static int smu7_initial_switch_from_arbf0_to_f1(struct pp_hwmgr *hwmgr) +{ + return smu7_copy_and_switch_arb_sets(hwmgr, + MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1); +} + +static int smu7_force_switch_to_arbf0(struct pp_hwmgr *hwmgr) +{ + uint32_t tmp; + + tmp = (cgs_read_ind_register(hwmgr->device, + CGS_IND_REG__SMC, ixSMC_SCRATCH9) & + 0x0000ff00) >> 8; + + if (tmp == MC_CG_ARB_FREQ_F0) + return 0; + + return smu7_copy_and_switch_arb_sets(hwmgr, + tmp, MC_CG_ARB_FREQ_F0); +} + +static int smu7_setup_default_pcie_table(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct phm_ppt_v1_pcie_table *pcie_table = NULL; + + uint32_t i, max_entry; + uint32_t tmp; + + PP_ASSERT_WITH_CODE((data->use_pcie_performance_levels || + data->use_pcie_power_saving_levels), "No pcie performance levels!", + return -EINVAL); + + if (table_info != NULL) + pcie_table = table_info->pcie_table; + + if (data->use_pcie_performance_levels && + !data->use_pcie_power_saving_levels) { + data->pcie_gen_power_saving = data->pcie_gen_performance; + data->pcie_lane_power_saving = data->pcie_lane_performance; + } else if (!data->use_pcie_performance_levels && + data->use_pcie_power_saving_levels) { + data->pcie_gen_performance = data->pcie_gen_power_saving; + data->pcie_lane_performance = data->pcie_lane_power_saving; + } + tmp = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_LINK); + phm_reset_single_dpm_table(&data->dpm_table.pcie_speed_table, + tmp, + MAX_REGULAR_DPM_NUMBER); + + if (pcie_table != NULL) { + /* max_entry is used to make sure we reserve one PCIE level + * for boot level (fix for A+A PSPP issue). + * If PCIE table from PPTable have ULV entry + 8 entries, + * then ignore the last entry.*/ + max_entry = (tmp < pcie_table->count) ? tmp : pcie_table->count; + for (i = 1; i < max_entry; i++) { + phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, i - 1, + get_pcie_gen_support(data->pcie_gen_cap, + pcie_table->entries[i].gen_speed), + get_pcie_lane_support(data->pcie_lane_cap, + pcie_table->entries[i].lane_width)); + } + data->dpm_table.pcie_speed_table.count = max_entry - 1; + smum_update_smc_table(hwmgr, SMU_BIF_TABLE); + } else { + /* Hardcode Pcie Table */ + phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 0, + get_pcie_gen_support(data->pcie_gen_cap, + PP_Min_PCIEGen), + get_pcie_lane_support(data->pcie_lane_cap, + PP_Max_PCIELane)); + phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 1, + get_pcie_gen_support(data->pcie_gen_cap, + PP_Min_PCIEGen), + get_pcie_lane_support(data->pcie_lane_cap, + PP_Max_PCIELane)); + phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 2, + get_pcie_gen_support(data->pcie_gen_cap, + PP_Max_PCIEGen), + get_pcie_lane_support(data->pcie_lane_cap, + PP_Max_PCIELane)); + phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 3, + get_pcie_gen_support(data->pcie_gen_cap, + PP_Max_PCIEGen), + get_pcie_lane_support(data->pcie_lane_cap, + PP_Max_PCIELane)); + phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 4, + get_pcie_gen_support(data->pcie_gen_cap, + PP_Max_PCIEGen), + get_pcie_lane_support(data->pcie_lane_cap, + PP_Max_PCIELane)); + phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 5, + get_pcie_gen_support(data->pcie_gen_cap, + PP_Max_PCIEGen), + get_pcie_lane_support(data->pcie_lane_cap, + PP_Max_PCIELane)); + + data->dpm_table.pcie_speed_table.count = 6; + } + /* Populate last level for boot PCIE level, but do not increment count. */ + phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, + data->dpm_table.pcie_speed_table.count, + get_pcie_gen_support(data->pcie_gen_cap, + PP_Min_PCIEGen), + get_pcie_lane_support(data->pcie_lane_cap, + PP_Max_PCIELane)); + + return 0; +} + +static int smu7_reset_dpm_tables(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + memset(&(data->dpm_table), 0x00, sizeof(data->dpm_table)); + + phm_reset_single_dpm_table( + &data->dpm_table.sclk_table, + smum_get_mac_definition(hwmgr->smumgr, + SMU_MAX_LEVELS_GRAPHICS), + MAX_REGULAR_DPM_NUMBER); + phm_reset_single_dpm_table( + &data->dpm_table.mclk_table, + smum_get_mac_definition(hwmgr->smumgr, + SMU_MAX_LEVELS_MEMORY), MAX_REGULAR_DPM_NUMBER); + + phm_reset_single_dpm_table( + &data->dpm_table.vddc_table, + smum_get_mac_definition(hwmgr->smumgr, + SMU_MAX_LEVELS_VDDC), + MAX_REGULAR_DPM_NUMBER); + phm_reset_single_dpm_table( + &data->dpm_table.vddci_table, + smum_get_mac_definition(hwmgr->smumgr, + SMU_MAX_LEVELS_VDDCI), MAX_REGULAR_DPM_NUMBER); + + phm_reset_single_dpm_table( + &data->dpm_table.mvdd_table, + smum_get_mac_definition(hwmgr->smumgr, + SMU_MAX_LEVELS_MVDD), + MAX_REGULAR_DPM_NUMBER); + return 0; +} +/* + * This function is to initialize all DPM state tables + * for SMU7 based on the dependency table. + * Dynamic state patching function will then trim these + * state tables to the allowed range based + * on the power policy or external client requests, + * such as UVD request, etc. + */ + +static int smu7_setup_dpm_tables_v0(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct phm_clock_voltage_dependency_table *allowed_vdd_sclk_table = + hwmgr->dyn_state.vddc_dependency_on_sclk; + struct phm_clock_voltage_dependency_table *allowed_vdd_mclk_table = + hwmgr->dyn_state.vddc_dependency_on_mclk; + struct phm_cac_leakage_table *std_voltage_table = + hwmgr->dyn_state.cac_leakage_table; + uint32_t i; + + PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table != NULL, + "SCLK dependency table is missing. This table is mandatory", return -EINVAL); + PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table->count >= 1, + "SCLK dependency table has to have is missing. This table is mandatory", return -EINVAL); + + PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL, + "MCLK dependency table is missing. This table is mandatory", return -EINVAL); + PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table->count >= 1, + "VMCLK dependency table has to have is missing. This table is mandatory", return -EINVAL); + + + /* Initialize Sclk DPM table based on allow Sclk values*/ + data->dpm_table.sclk_table.count = 0; + + for (i = 0; i < allowed_vdd_sclk_table->count; i++) { + if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count-1].value != + allowed_vdd_sclk_table->entries[i].clk) { + data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value = + allowed_vdd_sclk_table->entries[i].clk; + data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = 1; /*(i==0) ? 1 : 0; to do */ + data->dpm_table.sclk_table.count++; + } + } + + PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL, + "MCLK dependency table is missing. This table is mandatory", return -EINVAL); + /* Initialize Mclk DPM table based on allow Mclk values */ + data->dpm_table.mclk_table.count = 0; + for (i = 0; i < allowed_vdd_mclk_table->count; i++) { + if (i == 0 || data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count-1].value != + allowed_vdd_mclk_table->entries[i].clk) { + data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value = + allowed_vdd_mclk_table->entries[i].clk; + data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled = 1; /*(i==0) ? 1 : 0; */ + data->dpm_table.mclk_table.count++; + } + } + + /* Initialize Vddc DPM table based on allow Vddc values. And populate corresponding std values. */ + for (i = 0; i < allowed_vdd_sclk_table->count; i++) { + data->dpm_table.vddc_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v; + data->dpm_table.vddc_table.dpm_levels[i].param1 = std_voltage_table->entries[i].Leakage; + /* param1 is for corresponding std voltage */ + data->dpm_table.vddc_table.dpm_levels[i].enabled = 1; + } + + data->dpm_table.vddc_table.count = allowed_vdd_sclk_table->count; + allowed_vdd_mclk_table = hwmgr->dyn_state.vddci_dependency_on_mclk; + + if (NULL != allowed_vdd_mclk_table) { + /* Initialize Vddci DPM table based on allow Mclk values */ + for (i = 0; i < allowed_vdd_mclk_table->count; i++) { + data->dpm_table.vddci_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v; + data->dpm_table.vddci_table.dpm_levels[i].enabled = 1; + } + data->dpm_table.vddci_table.count = allowed_vdd_mclk_table->count; + } + + allowed_vdd_mclk_table = hwmgr->dyn_state.mvdd_dependency_on_mclk; + + if (NULL != allowed_vdd_mclk_table) { + /* + * Initialize MVDD DPM table based on allow Mclk + * values + */ + for (i = 0; i < allowed_vdd_mclk_table->count; i++) { + data->dpm_table.mvdd_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v; + data->dpm_table.mvdd_table.dpm_levels[i].enabled = 1; + } + data->dpm_table.mvdd_table.count = allowed_vdd_mclk_table->count; + } + + return 0; +} + +static int smu7_setup_dpm_tables_v1(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + uint32_t i; + + struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table; + struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table; + + if (table_info == NULL) + return -EINVAL; + + dep_sclk_table = table_info->vdd_dep_on_sclk; + dep_mclk_table = table_info->vdd_dep_on_mclk; + + PP_ASSERT_WITH_CODE(dep_sclk_table != NULL, + "SCLK dependency table is missing.", + return -EINVAL); + PP_ASSERT_WITH_CODE(dep_sclk_table->count >= 1, + "SCLK dependency table count is 0.", + return -EINVAL); + + PP_ASSERT_WITH_CODE(dep_mclk_table != NULL, + "MCLK dependency table is missing.", + return -EINVAL); + PP_ASSERT_WITH_CODE(dep_mclk_table->count >= 1, + "MCLK dependency table count is 0", + return -EINVAL); + + /* Initialize Sclk DPM table based on allow Sclk values */ + data->dpm_table.sclk_table.count = 0; + for (i = 0; i < dep_sclk_table->count; i++) { + if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count - 1].value != + dep_sclk_table->entries[i].clk) { + + data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value = + dep_sclk_table->entries[i].clk; + + data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = + (i == 0) ? true : false; + data->dpm_table.sclk_table.count++; + } + } + + /* Initialize Mclk DPM table based on allow Mclk values */ + data->dpm_table.mclk_table.count = 0; + for (i = 0; i < dep_mclk_table->count; i++) { + if (i == 0 || data->dpm_table.mclk_table.dpm_levels + [data->dpm_table.mclk_table.count - 1].value != + dep_mclk_table->entries[i].clk) { + data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value = + dep_mclk_table->entries[i].clk; + data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled = + (i == 0) ? true : false; + data->dpm_table.mclk_table.count++; + } + } + + return 0; +} + +int smu7_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + smu7_reset_dpm_tables(hwmgr); + + if (hwmgr->pp_table_version == PP_TABLE_V1) + smu7_setup_dpm_tables_v1(hwmgr); + else if (hwmgr->pp_table_version == PP_TABLE_V0) + smu7_setup_dpm_tables_v0(hwmgr); + + smu7_setup_default_pcie_table(hwmgr); + + /* save a copy of the default DPM table */ + memcpy(&(data->golden_dpm_table), &(data->dpm_table), + sizeof(struct smu7_dpm_table)); + return 0; +} + +uint32_t smu7_get_xclk(struct pp_hwmgr *hwmgr) +{ + uint32_t reference_clock, tmp; + struct cgs_display_info info = {0}; + struct cgs_mode_info mode_info; + + info.mode_info = &mode_info; + + tmp = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK); + + if (tmp) + return TCLK; + + cgs_get_active_displays_info(hwmgr->device, &info); + reference_clock = mode_info.ref_clock; + + tmp = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_CLKPIN_CNTL, XTALIN_DIVIDE); + + if (0 != tmp) + return reference_clock / 4; + + return reference_clock; +} + +static int smu7_enable_vrhot_gpio_interrupt(struct pp_hwmgr *hwmgr) +{ + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_RegulatorHot)) + return smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_EnableVRHotGPIOInterrupt); + + return 0; +} + +static int smu7_enable_sclk_control(struct pp_hwmgr *hwmgr) +{ + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, + SCLK_PWRMGT_OFF, 0); + return 0; +} + +static int smu7_enable_ulv(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + if (data->ulv_supported) + return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_EnableULV); + + return 0; +} + +static int smu7_disable_ulv(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + if (data->ulv_supported) + return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DisableULV); + + return 0; +} + +static int smu7_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr) +{ + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SclkDeepSleep)) { + if (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_MASTER_DeepSleep_ON)) + PP_ASSERT_WITH_CODE(false, + "Attempt to enable Master Deep Sleep switch failed!", + return -EINVAL); + } else { + if (smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_MASTER_DeepSleep_OFF)) { + PP_ASSERT_WITH_CODE(false, + "Attempt to disable Master Deep Sleep switch failed!", + return -EINVAL); + } + } + + return 0; +} + +static int smu7_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr) +{ + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SclkDeepSleep)) { + if (smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_MASTER_DeepSleep_OFF)) { + PP_ASSERT_WITH_CODE(false, + "Attempt to disable Master Deep Sleep switch failed!", + return -EINVAL); + } + } + + return 0; +} + +static int smu7_disable_handshake_uvd(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + uint32_t soft_register_value = 0; + uint32_t handshake_disables_offset = data->soft_regs_start + + smum_get_offsetof(hwmgr->smumgr, + SMU_SoftRegisters, HandshakeDisables); + + soft_register_value = cgs_read_ind_register(hwmgr->device, + CGS_IND_REG__SMC, handshake_disables_offset); + soft_register_value |= smum_get_mac_definition(hwmgr->smumgr, + SMU_UVD_MCLK_HANDSHAKE_DISABLE); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + handshake_disables_offset, soft_register_value); + return 0; +} + +static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + /* enable SCLK dpm */ + if (!data->sclk_dpm_key_disabled) + PP_ASSERT_WITH_CODE( + (0 == smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DPM_Enable)), + "Failed to enable SCLK DPM during DPM Start Function!", + return -EINVAL); + + /* enable MCLK dpm */ + if (0 == data->mclk_dpm_key_disabled) { + if (!(hwmgr->feature_mask & PP_UVD_HANDSHAKE_MASK)) + smu7_disable_handshake_uvd(hwmgr); + PP_ASSERT_WITH_CODE( + (0 == smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_MCLKDPM_Enable)), + "Failed to enable MCLK DPM during DPM Start Function!", + return -EINVAL); + + PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1); + + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x5); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x5); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x100005); + udelay(10); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400005); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400005); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x500005); + } + + return 0; +} + +static int smu7_start_dpm(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + /*enable general power management */ + + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, + GLOBAL_PWRMGT_EN, 1); + + /* enable sclk deep sleep */ + + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, + DYNAMIC_PM_EN, 1); + + /* prepare for PCIE DPM */ + + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + data->soft_regs_start + + smum_get_offsetof(hwmgr->smumgr, SMU_SoftRegisters, + VoltageChangeTimeout), 0x1000); + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE, + SWRST_COMMAND_1, RESETLC, 0x0); + + PP_ASSERT_WITH_CODE( + (0 == smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_Voltage_Cntl_Enable)), + "Failed to enable voltage DPM during DPM Start Function!", + return -EINVAL); + + + if (smu7_enable_sclk_mclk_dpm(hwmgr)) { + printk(KERN_ERR "Failed to enable Sclk DPM and Mclk DPM!"); + return -EINVAL; + } + + /* enable PCIE dpm */ + if (0 == data->pcie_dpm_key_disabled) { + PP_ASSERT_WITH_CODE( + (0 == smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_PCIeDPM_Enable)), + "Failed to enable pcie DPM during DPM Start Function!", + return -EINVAL); + } + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_Falcon_QuickTransition)) { + PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_EnableACDCGPIOInterrupt)), + "Failed to enable AC DC GPIO Interrupt!", + ); + } + + return 0; +} + +static int smu7_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + /* disable SCLK dpm */ + if (!data->sclk_dpm_key_disabled) + PP_ASSERT_WITH_CODE( + (smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_DPM_Disable) == 0), + "Failed to disable SCLK DPM!", + return -EINVAL); + + /* disable MCLK dpm */ + if (!data->mclk_dpm_key_disabled) { + PP_ASSERT_WITH_CODE( + (smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_MCLKDPM_Disable) == 0), + "Failed to disable MCLK DPM!", + return -EINVAL); + } + + return 0; +} + +static int smu7_stop_dpm(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + /* disable general power management */ + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, + GLOBAL_PWRMGT_EN, 0); + /* disable sclk deep sleep */ + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, + DYNAMIC_PM_EN, 0); + + /* disable PCIE dpm */ + if (!data->pcie_dpm_key_disabled) { + PP_ASSERT_WITH_CODE( + (smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_PCIeDPM_Disable) == 0), + "Failed to disable pcie DPM during DPM Stop Function!", + return -EINVAL); + } + + if (smu7_disable_sclk_mclk_dpm(hwmgr)) { + printk(KERN_ERR "Failed to disable Sclk DPM and Mclk DPM!"); + return -EINVAL; + } + + return 0; +} + +static void smu7_set_dpm_event_sources(struct pp_hwmgr *hwmgr, uint32_t sources) +{ + bool protection; + enum DPM_EVENT_SRC src; + + switch (sources) { + default: + printk(KERN_ERR "Unknown throttling event sources."); + /* fall through */ + case 0: + protection = false; + /* src is unused */ + break; + case (1 << PHM_AutoThrottleSource_Thermal): + protection = true; + src = DPM_EVENT_SRC_DIGITAL; + break; + case (1 << PHM_AutoThrottleSource_External): + protection = true; + src = DPM_EVENT_SRC_EXTERNAL; + break; + case (1 << PHM_AutoThrottleSource_External) | + (1 << PHM_AutoThrottleSource_Thermal): + protection = true; + src = DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL; + break; + } + /* Order matters - don't enable thermal protection for the wrong source. */ + if (protection) { + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_CTRL, + DPM_EVENT_SRC, src); + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, + THERMAL_PROTECTION_DIS, + !phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ThermalController)); + } else + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, + THERMAL_PROTECTION_DIS, 1); +} + +static int smu7_enable_auto_throttle_source(struct pp_hwmgr *hwmgr, + PHM_AutoThrottleSource source) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + if (!(data->active_auto_throttle_sources & (1 << source))) { + data->active_auto_throttle_sources |= 1 << source; + smu7_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources); + } + return 0; +} + +static int smu7_enable_thermal_auto_throttle(struct pp_hwmgr *hwmgr) +{ + return smu7_enable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal); +} + +static int smu7_disable_auto_throttle_source(struct pp_hwmgr *hwmgr, + PHM_AutoThrottleSource source) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + if (data->active_auto_throttle_sources & (1 << source)) { + data->active_auto_throttle_sources &= ~(1 << source); + smu7_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources); + } + return 0; +} + +static int smu7_disable_thermal_auto_throttle(struct pp_hwmgr *hwmgr) +{ + return smu7_disable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal); +} + +int smu7_pcie_performance_request(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + data->pcie_performance_request = true; + + return 0; +} + +int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr) +{ + int tmp_result = 0; + int result = 0; + + tmp_result = (!smum_is_dpm_running(hwmgr)) ? 0 : -1; + PP_ASSERT_WITH_CODE(tmp_result == 0, + "DPM is already running right now, no need to enable DPM!", + return 0); + + if (smu7_voltage_control(hwmgr)) { + tmp_result = smu7_enable_voltage_control(hwmgr); + PP_ASSERT_WITH_CODE(tmp_result == 0, + "Failed to enable voltage control!", + result = tmp_result); + + tmp_result = smu7_construct_voltage_tables(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to contruct voltage tables!", + result = tmp_result); + } + smum_initialize_mc_reg_table(hwmgr); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_EngineSpreadSpectrumSupport)) + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 1); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ThermalController)) + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 0); + + tmp_result = smu7_program_static_screen_threshold_parameters(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to program static screen threshold parameters!", + result = tmp_result); + + tmp_result = smu7_enable_display_gap(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to enable display gap!", result = tmp_result); + + tmp_result = smu7_program_voting_clients(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to program voting clients!", result = tmp_result); + + tmp_result = smum_process_firmware_header(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to process firmware header!", result = tmp_result); + + tmp_result = smu7_initial_switch_from_arbf0_to_f1(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to initialize switch from ArbF0 to F1!", + result = tmp_result); + + result = smu7_setup_default_dpm_tables(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to setup default DPM tables!", return result); + + tmp_result = smum_init_smc_table(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to initialize SMC table!", result = tmp_result); + + tmp_result = smu7_enable_vrhot_gpio_interrupt(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to enable VR hot GPIO interrupt!", result = tmp_result); + + smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_HasDisplay); + + tmp_result = smu7_enable_sclk_control(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to enable SCLK control!", result = tmp_result); + + tmp_result = smu7_enable_smc_voltage_controller(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to enable voltage control!", result = tmp_result); + + tmp_result = smu7_enable_ulv(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to enable ULV!", result = tmp_result); + + tmp_result = smu7_enable_deep_sleep_master_switch(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to enable deep sleep master switch!", result = tmp_result); + + tmp_result = smu7_enable_didt_config(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to enable deep sleep master switch!", result = tmp_result); + + tmp_result = smu7_start_dpm(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to start DPM!", result = tmp_result); + + tmp_result = smu7_enable_smc_cac(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to enable SMC CAC!", result = tmp_result); + + tmp_result = smu7_enable_power_containment(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to enable power containment!", result = tmp_result); + + tmp_result = smu7_power_control_set_level(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to power control set level!", result = tmp_result); + + tmp_result = smu7_enable_thermal_auto_throttle(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to enable thermal auto throttle!", result = tmp_result); + + tmp_result = smu7_pcie_performance_request(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "pcie performance request failed!", result = tmp_result); + + return 0; +} + +int smu7_disable_dpm_tasks(struct pp_hwmgr *hwmgr) +{ + int tmp_result, result = 0; + + tmp_result = (smum_is_dpm_running(hwmgr)) ? 0 : -1; + PP_ASSERT_WITH_CODE(tmp_result == 0, + "DPM is not running right now, no need to disable DPM!", + return 0); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ThermalController)) + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 1); + + tmp_result = smu7_disable_power_containment(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to disable power containment!", result = tmp_result); + + tmp_result = smu7_disable_smc_cac(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to disable SMC CAC!", result = tmp_result); + + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + CG_SPLL_SPREAD_SPECTRUM, SSEN, 0); + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 0); + + tmp_result = smu7_disable_thermal_auto_throttle(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to disable thermal auto throttle!", result = tmp_result); + + tmp_result = smu7_stop_dpm(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to stop DPM!", result = tmp_result); + + tmp_result = smu7_disable_deep_sleep_master_switch(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to disable deep sleep master switch!", result = tmp_result); + + tmp_result = smu7_disable_ulv(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to disable ULV!", result = tmp_result); + + tmp_result = smu7_clear_voting_clients(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to clear voting clients!", result = tmp_result); + + tmp_result = smu7_reset_to_default(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to reset to default!", result = tmp_result); + + tmp_result = smu7_force_switch_to_arbf0(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to force to switch arbf0!", result = tmp_result); + + return result; +} + +int smu7_reset_asic_tasks(struct pp_hwmgr *hwmgr) +{ + + return 0; +} + +static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + + data->dll_default_on = false; + data->mclk_dpm0_activity_target = 0xa; + data->mclk_activity_target = SMU7_MCLK_TARGETACTIVITY_DFLT; + data->vddc_vddgfx_delta = 300; + data->static_screen_threshold = SMU7_STATICSCREENTHRESHOLD_DFLT; + data->static_screen_threshold_unit = SMU7_STATICSCREENTHRESHOLDUNIT_DFLT; + data->voting_rights_clients0 = SMU7_VOTINGRIGHTSCLIENTS_DFLT0; + data->voting_rights_clients1 = SMU7_VOTINGRIGHTSCLIENTS_DFLT1; + data->voting_rights_clients2 = SMU7_VOTINGRIGHTSCLIENTS_DFLT2; + data->voting_rights_clients3 = SMU7_VOTINGRIGHTSCLIENTS_DFLT3; + data->voting_rights_clients4 = SMU7_VOTINGRIGHTSCLIENTS_DFLT4; + data->voting_rights_clients5 = SMU7_VOTINGRIGHTSCLIENTS_DFLT5; + data->voting_rights_clients6 = SMU7_VOTINGRIGHTSCLIENTS_DFLT6; + data->voting_rights_clients7 = SMU7_VOTINGRIGHTSCLIENTS_DFLT7; + + data->mclk_dpm_key_disabled = hwmgr->feature_mask & PP_MCLK_DPM_MASK ? false : true; + data->sclk_dpm_key_disabled = hwmgr->feature_mask & PP_SCLK_DPM_MASK ? false : true; + data->pcie_dpm_key_disabled = hwmgr->feature_mask & PP_PCIE_DPM_MASK ? false : true; + /* need to set voltage control types before EVV patching */ + data->voltage_control = SMU7_VOLTAGE_CONTROL_NONE; + data->vddci_control = SMU7_VOLTAGE_CONTROL_NONE; + data->mvdd_control = SMU7_VOLTAGE_CONTROL_NONE; + data->enable_tdc_limit_feature = true; + data->enable_pkg_pwr_tracking_feature = true; + data->force_pcie_gen = PP_PCIEGenInvalid; + data->ulv_supported = hwmgr->feature_mask & PP_ULV_MASK ? true : false; + + data->fast_watermark_threshold = 100; + if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, + VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) + data->voltage_control = SMU7_VOLTAGE_CONTROL_BY_SVID2; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ControlVDDGFX)) { + if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, + VOLTAGE_TYPE_VDDGFX, VOLTAGE_OBJ_SVID2)) { + data->vdd_gfx_control = SMU7_VOLTAGE_CONTROL_BY_SVID2; + } + } + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_EnableMVDDControl)) { + if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, + VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT)) + data->mvdd_control = SMU7_VOLTAGE_CONTROL_BY_GPIO; + else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, + VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2)) + data->mvdd_control = SMU7_VOLTAGE_CONTROL_BY_SVID2; + } + + if (SMU7_VOLTAGE_CONTROL_NONE == data->vdd_gfx_control) { + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ControlVDDGFX); + } + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ControlVDDCI)) { + if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, + VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT)) + data->vddci_control = SMU7_VOLTAGE_CONTROL_BY_GPIO; + else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, + VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2)) + data->vddci_control = SMU7_VOLTAGE_CONTROL_BY_SVID2; + } + + if (data->mvdd_control == SMU7_VOLTAGE_CONTROL_NONE) + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_EnableMVDDControl); + + if (data->vddci_control == SMU7_VOLTAGE_CONTROL_NONE) + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ControlVDDCI); + + if ((hwmgr->pp_table_version != PP_TABLE_V0) + && (table_info->cac_dtp_table->usClockStretchAmount != 0)) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ClockStretcher); + + data->pcie_gen_performance.max = PP_PCIEGen1; + data->pcie_gen_performance.min = PP_PCIEGen3; + data->pcie_gen_power_saving.max = PP_PCIEGen1; + data->pcie_gen_power_saving.min = PP_PCIEGen3; + data->pcie_lane_performance.max = 0; + data->pcie_lane_performance.min = 16; + data->pcie_lane_power_saving.max = 0; + data->pcie_lane_power_saving.min = 16; +} + +/** +* Get Leakage VDDC based on leakage ID. +* +* @param hwmgr the address of the powerplay hardware manager. +* @return always 0 +*/ +static int smu7_get_evv_voltages(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + uint16_t vv_id; + uint16_t vddc = 0; + uint16_t vddgfx = 0; + uint16_t i, j; + uint32_t sclk = 0; + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)hwmgr->pptable; + struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = NULL; + + + if (table_info != NULL) + sclk_table = table_info->vdd_dep_on_sclk; + + for (i = 0; i < SMU7_MAX_LEAKAGE_COUNT; i++) { + vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i; + + if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) { + if (0 == phm_get_sclk_for_voltage_evv(hwmgr, + table_info->vddgfx_lookup_table, vv_id, &sclk)) { + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ClockStretcher)) { + for (j = 1; j < sclk_table->count; j++) { + if (sclk_table->entries[j].clk == sclk && + sclk_table->entries[j].cks_enable == 0) { + sclk += 5000; + break; + } + } + } + if (0 == atomctrl_get_voltage_evv_on_sclk + (hwmgr, VOLTAGE_TYPE_VDDGFX, sclk, + vv_id, &vddgfx)) { + /* need to make sure vddgfx is less than 2v or else, it could burn the ASIC. */ + PP_ASSERT_WITH_CODE((vddgfx < 2000 && vddgfx != 0), "Invalid VDDGFX value!", return -EINVAL); + + /* the voltage should not be zero nor equal to leakage ID */ + if (vddgfx != 0 && vddgfx != vv_id) { + data->vddcgfx_leakage.actual_voltage[data->vddcgfx_leakage.count] = vddgfx; + data->vddcgfx_leakage.leakage_id[data->vddcgfx_leakage.count] = vv_id; + data->vddcgfx_leakage.count++; + } + } else { + printk("Error retrieving EVV voltage value!\n"); + } + } + } else { + + if ((hwmgr->pp_table_version == PP_TABLE_V0) + || !phm_get_sclk_for_voltage_evv(hwmgr, + table_info->vddc_lookup_table, vv_id, &sclk)) { + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ClockStretcher)) { + for (j = 1; j < sclk_table->count; j++) { + if (sclk_table->entries[j].clk == sclk && + sclk_table->entries[j].cks_enable == 0) { + sclk += 5000; + break; + } + } + } + + if (phm_get_voltage_evv_on_sclk(hwmgr, + VOLTAGE_TYPE_VDDC, + sclk, vv_id, &vddc) == 0) { + if (vddc >= 2000 || vddc == 0) + return -EINVAL; + } else { + printk(KERN_WARNING "failed to retrieving EVV voltage!\n"); + continue; + } + + /* the voltage should not be zero nor equal to leakage ID */ + if (vddc != 0 && vddc != vv_id) { + data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = (uint16_t)(vddc); + data->vddc_leakage.leakage_id[data->vddc_leakage.count] = vv_id; + data->vddc_leakage.count++; + } + } + } + } + + return 0; +} + +/** + * Change virtual leakage voltage to actual value. + * + * @param hwmgr the address of the powerplay hardware manager. + * @param pointer to changing voltage + * @param pointer to leakage table + */ +static void smu7_patch_ppt_v1_with_vdd_leakage(struct pp_hwmgr *hwmgr, + uint16_t *voltage, struct smu7_leakage_voltage *leakage_table) +{ + uint32_t index; + + /* search for leakage voltage ID 0xff01 ~ 0xff08 */ + for (index = 0; index < leakage_table->count; index++) { + /* if this voltage matches a leakage voltage ID */ + /* patch with actual leakage voltage */ + if (leakage_table->leakage_id[index] == *voltage) { + *voltage = leakage_table->actual_voltage[index]; + break; + } + } + + if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0) + printk(KERN_ERR "Voltage value looks like a Leakage ID but it's not patched \n"); +} + +/** +* Patch voltage lookup table by EVV leakages. +* +* @param hwmgr the address of the powerplay hardware manager. +* @param pointer to voltage lookup table +* @param pointer to leakage table +* @return always 0 +*/ +static int smu7_patch_lookup_table_with_leakage(struct pp_hwmgr *hwmgr, + phm_ppt_v1_voltage_lookup_table *lookup_table, + struct smu7_leakage_voltage *leakage_table) +{ + uint32_t i; + + for (i = 0; i < lookup_table->count; i++) + smu7_patch_ppt_v1_with_vdd_leakage(hwmgr, + &lookup_table->entries[i].us_vdd, leakage_table); + + return 0; +} + +static int smu7_patch_clock_voltage_limits_with_vddc_leakage( + struct pp_hwmgr *hwmgr, struct smu7_leakage_voltage *leakage_table, + uint16_t *vddc) +{ + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + smu7_patch_ppt_v1_with_vdd_leakage(hwmgr, (uint16_t *)vddc, leakage_table); + hwmgr->dyn_state.max_clock_voltage_on_dc.vddc = + table_info->max_clock_voltage_on_dc.vddc; + return 0; +} + +static int smu7_patch_voltage_dependency_tables_with_lookup_table( + struct pp_hwmgr *hwmgr) +{ + uint8_t entry_id; + uint8_t voltage_id; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + + struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = + table_info->vdd_dep_on_sclk; + struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table = + table_info->vdd_dep_on_mclk; + struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = + table_info->mm_dep_table; + + if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) { + for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) { + voltage_id = sclk_table->entries[entry_id].vddInd; + sclk_table->entries[entry_id].vddgfx = + table_info->vddgfx_lookup_table->entries[voltage_id].us_vdd; + } + } else { + for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) { + voltage_id = sclk_table->entries[entry_id].vddInd; + sclk_table->entries[entry_id].vddc = + table_info->vddc_lookup_table->entries[voltage_id].us_vdd; + } + } + + for (entry_id = 0; entry_id < mclk_table->count; ++entry_id) { + voltage_id = mclk_table->entries[entry_id].vddInd; + mclk_table->entries[entry_id].vddc = + table_info->vddc_lookup_table->entries[voltage_id].us_vdd; + } + + for (entry_id = 0; entry_id < mm_table->count; ++entry_id) { + voltage_id = mm_table->entries[entry_id].vddcInd; + mm_table->entries[entry_id].vddc = + table_info->vddc_lookup_table->entries[voltage_id].us_vdd; + } + + return 0; + +} + +static int phm_add_voltage(struct pp_hwmgr *hwmgr, + phm_ppt_v1_voltage_lookup_table *look_up_table, + phm_ppt_v1_voltage_lookup_record *record) +{ + uint32_t i; + + PP_ASSERT_WITH_CODE((NULL != look_up_table), + "Lookup Table empty.", return -EINVAL); + PP_ASSERT_WITH_CODE((0 != look_up_table->count), + "Lookup Table empty.", return -EINVAL); + + i = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_VDDGFX); + PP_ASSERT_WITH_CODE((i >= look_up_table->count), + "Lookup Table is full.", return -EINVAL); + + /* This is to avoid entering duplicate calculated records. */ + for (i = 0; i < look_up_table->count; i++) { + if (look_up_table->entries[i].us_vdd == record->us_vdd) { + if (look_up_table->entries[i].us_calculated == 1) + return 0; + break; + } + } + + look_up_table->entries[i].us_calculated = 1; + look_up_table->entries[i].us_vdd = record->us_vdd; + look_up_table->entries[i].us_cac_low = record->us_cac_low; + look_up_table->entries[i].us_cac_mid = record->us_cac_mid; + look_up_table->entries[i].us_cac_high = record->us_cac_high; + /* Only increment the count when we're appending, not replacing duplicate entry. */ + if (i == look_up_table->count) + look_up_table->count++; + + return 0; +} + + +static int smu7_calc_voltage_dependency_tables(struct pp_hwmgr *hwmgr) +{ + uint8_t entry_id; + struct phm_ppt_v1_voltage_lookup_record v_record; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); + + phm_ppt_v1_clock_voltage_dependency_table *sclk_table = pptable_info->vdd_dep_on_sclk; + phm_ppt_v1_clock_voltage_dependency_table *mclk_table = pptable_info->vdd_dep_on_mclk; + + if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) { + for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) { + if (sclk_table->entries[entry_id].vdd_offset & (1 << 15)) + v_record.us_vdd = sclk_table->entries[entry_id].vddgfx + + sclk_table->entries[entry_id].vdd_offset - 0xFFFF; + else + v_record.us_vdd = sclk_table->entries[entry_id].vddgfx + + sclk_table->entries[entry_id].vdd_offset; + + sclk_table->entries[entry_id].vddc = + v_record.us_cac_low = v_record.us_cac_mid = + v_record.us_cac_high = v_record.us_vdd; + + phm_add_voltage(hwmgr, pptable_info->vddc_lookup_table, &v_record); + } + + for (entry_id = 0; entry_id < mclk_table->count; ++entry_id) { + if (mclk_table->entries[entry_id].vdd_offset & (1 << 15)) + v_record.us_vdd = mclk_table->entries[entry_id].vddc + + mclk_table->entries[entry_id].vdd_offset - 0xFFFF; + else + v_record.us_vdd = mclk_table->entries[entry_id].vddc + + mclk_table->entries[entry_id].vdd_offset; + + mclk_table->entries[entry_id].vddgfx = v_record.us_cac_low = + v_record.us_cac_mid = v_record.us_cac_high = v_record.us_vdd; + phm_add_voltage(hwmgr, pptable_info->vddgfx_lookup_table, &v_record); + } + } + return 0; +} + +static int smu7_calc_mm_voltage_dependency_table(struct pp_hwmgr *hwmgr) +{ + uint8_t entry_id; + struct phm_ppt_v1_voltage_lookup_record v_record; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); + phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table; + + if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) { + for (entry_id = 0; entry_id < mm_table->count; entry_id++) { + if (mm_table->entries[entry_id].vddgfx_offset & (1 << 15)) + v_record.us_vdd = mm_table->entries[entry_id].vddc + + mm_table->entries[entry_id].vddgfx_offset - 0xFFFF; + else + v_record.us_vdd = mm_table->entries[entry_id].vddc + + mm_table->entries[entry_id].vddgfx_offset; + + /* Add the calculated VDDGFX to the VDDGFX lookup table */ + mm_table->entries[entry_id].vddgfx = v_record.us_cac_low = + v_record.us_cac_mid = v_record.us_cac_high = v_record.us_vdd; + phm_add_voltage(hwmgr, pptable_info->vddgfx_lookup_table, &v_record); + } + } + return 0; +} + +static int smu7_sort_lookup_table(struct pp_hwmgr *hwmgr, + struct phm_ppt_v1_voltage_lookup_table *lookup_table) +{ + uint32_t table_size, i, j; + struct phm_ppt_v1_voltage_lookup_record tmp_voltage_lookup_record; + table_size = lookup_table->count; + + PP_ASSERT_WITH_CODE(0 != lookup_table->count, + "Lookup table is empty", return -EINVAL); + + /* Sorting voltages */ + for (i = 0; i < table_size - 1; i++) { + for (j = i + 1; j > 0; j--) { + if (lookup_table->entries[j].us_vdd < + lookup_table->entries[j - 1].us_vdd) { + tmp_voltage_lookup_record = lookup_table->entries[j - 1]; + lookup_table->entries[j - 1] = lookup_table->entries[j]; + lookup_table->entries[j] = tmp_voltage_lookup_record; + } + } + } + + return 0; +} + +static int smu7_complete_dependency_tables(struct pp_hwmgr *hwmgr) +{ + int result = 0; + int tmp_result; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + + if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) { + tmp_result = smu7_patch_lookup_table_with_leakage(hwmgr, + table_info->vddgfx_lookup_table, &(data->vddcgfx_leakage)); + if (tmp_result != 0) + result = tmp_result; + + smu7_patch_ppt_v1_with_vdd_leakage(hwmgr, + &table_info->max_clock_voltage_on_dc.vddgfx, &(data->vddcgfx_leakage)); + } else { + + tmp_result = smu7_patch_lookup_table_with_leakage(hwmgr, + table_info->vddc_lookup_table, &(data->vddc_leakage)); + if (tmp_result) + result = tmp_result; + + tmp_result = smu7_patch_clock_voltage_limits_with_vddc_leakage(hwmgr, + &(data->vddc_leakage), &table_info->max_clock_voltage_on_dc.vddc); + if (tmp_result) + result = tmp_result; + } + + tmp_result = smu7_patch_voltage_dependency_tables_with_lookup_table(hwmgr); + if (tmp_result) + result = tmp_result; + + tmp_result = smu7_calc_voltage_dependency_tables(hwmgr); + if (tmp_result) + result = tmp_result; + + tmp_result = smu7_calc_mm_voltage_dependency_table(hwmgr); + if (tmp_result) + result = tmp_result; + + tmp_result = smu7_sort_lookup_table(hwmgr, table_info->vddgfx_lookup_table); + if (tmp_result) + result = tmp_result; + + tmp_result = smu7_sort_lookup_table(hwmgr, table_info->vddc_lookup_table); + if (tmp_result) + result = tmp_result; + + return result; +} + +static int smu7_set_private_data_based_on_pptable_v1(struct pp_hwmgr *hwmgr) +{ + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + + struct phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table = + table_info->vdd_dep_on_sclk; + struct phm_ppt_v1_clock_voltage_dependency_table *allowed_mclk_vdd_table = + table_info->vdd_dep_on_mclk; + + PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table != NULL, + "VDD dependency on SCLK table is missing.", + return -EINVAL); + PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1, + "VDD dependency on SCLK table has to have is missing.", + return -EINVAL); + + PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table != NULL, + "VDD dependency on MCLK table is missing", + return -EINVAL); + PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1, + "VDD dependency on MCLK table has to have is missing.", + return -EINVAL); + + table_info->max_clock_voltage_on_ac.sclk = + allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].clk; + table_info->max_clock_voltage_on_ac.mclk = + allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].clk; + table_info->max_clock_voltage_on_ac.vddc = + allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc; + table_info->max_clock_voltage_on_ac.vddci = + allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].vddci; + + hwmgr->dyn_state.max_clock_voltage_on_ac.sclk = table_info->max_clock_voltage_on_ac.sclk; + hwmgr->dyn_state.max_clock_voltage_on_ac.mclk = table_info->max_clock_voltage_on_ac.mclk; + hwmgr->dyn_state.max_clock_voltage_on_ac.vddc = table_info->max_clock_voltage_on_ac.vddc; + hwmgr->dyn_state.max_clock_voltage_on_ac.vddci = table_info->max_clock_voltage_on_ac.vddci; + + return 0; +} + +int smu7_patch_voltage_workaround(struct pp_hwmgr *hwmgr) +{ + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table; + struct phm_ppt_v1_voltage_lookup_table *lookup_table; + uint32_t i; + uint32_t hw_revision, sub_vendor_id, sub_sys_id; + struct cgs_system_info sys_info = {0}; + + if (table_info != NULL) { + dep_mclk_table = table_info->vdd_dep_on_mclk; + lookup_table = table_info->vddc_lookup_table; + } else + return 0; + + sys_info.size = sizeof(struct cgs_system_info); + + sys_info.info_id = CGS_SYSTEM_INFO_PCIE_REV; + cgs_query_system_info(hwmgr->device, &sys_info); + hw_revision = (uint32_t)sys_info.value; + + sys_info.info_id = CGS_SYSTEM_INFO_PCIE_SUB_SYS_ID; + cgs_query_system_info(hwmgr->device, &sys_info); + sub_sys_id = (uint32_t)sys_info.value; + + sys_info.info_id = CGS_SYSTEM_INFO_PCIE_SUB_SYS_VENDOR_ID; + cgs_query_system_info(hwmgr->device, &sys_info); + sub_vendor_id = (uint32_t)sys_info.value; + + if (hwmgr->chip_id == CHIP_POLARIS10 && hw_revision == 0xC7 && + ((sub_sys_id == 0xb37 && sub_vendor_id == 0x1002) || + (sub_sys_id == 0x4a8 && sub_vendor_id == 0x1043) || + (sub_sys_id == 0x9480 && sub_vendor_id == 0x1682))) { + if (lookup_table->entries[dep_mclk_table->entries[dep_mclk_table->count-1].vddInd].us_vdd >= 1000) + return 0; + + for (i = 0; i < lookup_table->count; i++) { + if (lookup_table->entries[i].us_vdd < 0xff01 && lookup_table->entries[i].us_vdd >= 1000) { + dep_mclk_table->entries[dep_mclk_table->count-1].vddInd = (uint8_t) i; + return 0; + } + } + } + return 0; +} + +static int smu7_thermal_parameter_init(struct pp_hwmgr *hwmgr) +{ + struct pp_atomctrl_gpio_pin_assignment gpio_pin_assignment; + uint32_t temp_reg; + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + + + if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_PCC_GPIO_PINID, &gpio_pin_assignment)) { + temp_reg = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL); + switch (gpio_pin_assignment.uc_gpio_pin_bit_shift) { + case 0: + temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x1); + break; + case 1: + temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x2); + break; + case 2: + temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW, 0x1); + break; + case 3: + temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, FORCE_NB_PS1, 0x1); + break; + case 4: + temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, DPM_ENABLED, 0x1); + break; + default: + PP_ASSERT_WITH_CODE(0, + "Failed to setup PCC HW register! Wrong GPIO assigned for VDDC_PCC_GPIO_PINID!", + ); + break; + } + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL, temp_reg); + } + + if (table_info == NULL) + return 0; + + if (table_info->cac_dtp_table->usDefaultTargetOperatingTemp != 0 && + hwmgr->thermal_controller.advanceFanControlParameters.ucFanControlMode) { + hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMinLimit = + (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit; + + hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMaxLimit = + (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM; + + hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMStep = 1; + + hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMaxLimit = 100; + + hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMinLimit = + (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit; + + hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMStep = 1; + + table_info->cac_dtp_table->usDefaultTargetOperatingTemp = (table_info->cac_dtp_table->usDefaultTargetOperatingTemp >= 50) ? + (table_info->cac_dtp_table->usDefaultTargetOperatingTemp - 50) : 0; + + table_info->cac_dtp_table->usOperatingTempMaxLimit = table_info->cac_dtp_table->usDefaultTargetOperatingTemp; + table_info->cac_dtp_table->usOperatingTempStep = 1; + table_info->cac_dtp_table->usOperatingTempHyst = 1; + + hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanPWM = + hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM; + + hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM = + hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanRPM; + + hwmgr->dyn_state.cac_dtp_table->usOperatingTempMinLimit = + table_info->cac_dtp_table->usOperatingTempMinLimit; + + hwmgr->dyn_state.cac_dtp_table->usOperatingTempMaxLimit = + table_info->cac_dtp_table->usOperatingTempMaxLimit; + + hwmgr->dyn_state.cac_dtp_table->usDefaultTargetOperatingTemp = + table_info->cac_dtp_table->usDefaultTargetOperatingTemp; + + hwmgr->dyn_state.cac_dtp_table->usOperatingTempStep = + table_info->cac_dtp_table->usOperatingTempStep; + + hwmgr->dyn_state.cac_dtp_table->usTargetOperatingTemp = + table_info->cac_dtp_table->usTargetOperatingTemp; + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ODFuzzyFanControlSupport); + } + + return 0; +} + +/** + * Change virtual leakage voltage to actual value. + * + * @param hwmgr the address of the powerplay hardware manager. + * @param pointer to changing voltage + * @param pointer to leakage table + */ +static void smu7_patch_ppt_v0_with_vdd_leakage(struct pp_hwmgr *hwmgr, + uint32_t *voltage, struct smu7_leakage_voltage *leakage_table) +{ + uint32_t index; + + /* search for leakage voltage ID 0xff01 ~ 0xff08 */ + for (index = 0; index < leakage_table->count; index++) { + /* if this voltage matches a leakage voltage ID */ + /* patch with actual leakage voltage */ + if (leakage_table->leakage_id[index] == *voltage) { + *voltage = leakage_table->actual_voltage[index]; + break; + } + } + + if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0) + printk(KERN_ERR "Voltage value looks like a Leakage ID but it's not patched \n"); +} + + +static int smu7_patch_vddc(struct pp_hwmgr *hwmgr, + struct phm_clock_voltage_dependency_table *tab) +{ + uint16_t i; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + if (tab) + for (i = 0; i < tab->count; i++) + smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v, + &data->vddc_leakage); + + return 0; +} + +static int smu7_patch_vddci(struct pp_hwmgr *hwmgr, + struct phm_clock_voltage_dependency_table *tab) +{ + uint16_t i; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + if (tab) + for (i = 0; i < tab->count; i++) + smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v, + &data->vddci_leakage); + + return 0; +} + +static int smu7_patch_vce_vddc(struct pp_hwmgr *hwmgr, + struct phm_vce_clock_voltage_dependency_table *tab) +{ + uint16_t i; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + if (tab) + for (i = 0; i < tab->count; i++) + smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v, + &data->vddc_leakage); + + return 0; +} + + +static int smu7_patch_uvd_vddc(struct pp_hwmgr *hwmgr, + struct phm_uvd_clock_voltage_dependency_table *tab) +{ + uint16_t i; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + if (tab) + for (i = 0; i < tab->count; i++) + smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v, + &data->vddc_leakage); + + return 0; +} + +static int smu7_patch_vddc_shed_limit(struct pp_hwmgr *hwmgr, + struct phm_phase_shedding_limits_table *tab) +{ + uint16_t i; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + if (tab) + for (i = 0; i < tab->count; i++) + smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].Voltage, + &data->vddc_leakage); + + return 0; +} + +static int smu7_patch_samu_vddc(struct pp_hwmgr *hwmgr, + struct phm_samu_clock_voltage_dependency_table *tab) +{ + uint16_t i; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + if (tab) + for (i = 0; i < tab->count; i++) + smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v, + &data->vddc_leakage); + + return 0; +} + +static int smu7_patch_acp_vddc(struct pp_hwmgr *hwmgr, + struct phm_acp_clock_voltage_dependency_table *tab) +{ + uint16_t i; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + if (tab) + for (i = 0; i < tab->count; i++) + smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v, + &data->vddc_leakage); + + return 0; +} + +static int smu7_patch_limits_vddc(struct pp_hwmgr *hwmgr, + struct phm_clock_and_voltage_limits *tab) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + if (tab) { + smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, (uint32_t *)&tab->vddc, + &data->vddc_leakage); + smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, (uint32_t *)&tab->vddci, + &data->vddci_leakage); + } + + return 0; +} + +static int smu7_patch_cac_vddc(struct pp_hwmgr *hwmgr, struct phm_cac_leakage_table *tab) +{ + uint32_t i; + uint32_t vddc; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + if (tab) { + for (i = 0; i < tab->count; i++) { + vddc = (uint32_t)(tab->entries[i].Vddc); + smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddc, &data->vddc_leakage); + tab->entries[i].Vddc = (uint16_t)vddc; + } + } + + return 0; +} + +static int smu7_patch_dependency_tables_with_leakage(struct pp_hwmgr *hwmgr) +{ + int tmp; + + tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dependency_on_sclk); + if (tmp) + return -EINVAL; + + tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dependency_on_mclk); + if (tmp) + return -EINVAL; + + tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dep_on_dal_pwrl); + if (tmp) + return -EINVAL; + + tmp = smu7_patch_vddci(hwmgr, hwmgr->dyn_state.vddci_dependency_on_mclk); + if (tmp) + return -EINVAL; + + tmp = smu7_patch_vce_vddc(hwmgr, hwmgr->dyn_state.vce_clock_voltage_dependency_table); + if (tmp) + return -EINVAL; + + tmp = smu7_patch_uvd_vddc(hwmgr, hwmgr->dyn_state.uvd_clock_voltage_dependency_table); + if (tmp) + return -EINVAL; + + tmp = smu7_patch_samu_vddc(hwmgr, hwmgr->dyn_state.samu_clock_voltage_dependency_table); + if (tmp) + return -EINVAL; + + tmp = smu7_patch_acp_vddc(hwmgr, hwmgr->dyn_state.acp_clock_voltage_dependency_table); + if (tmp) + return -EINVAL; + + tmp = smu7_patch_vddc_shed_limit(hwmgr, hwmgr->dyn_state.vddc_phase_shed_limits_table); + if (tmp) + return -EINVAL; + + tmp = smu7_patch_limits_vddc(hwmgr, &hwmgr->dyn_state.max_clock_voltage_on_ac); + if (tmp) + return -EINVAL; + + tmp = smu7_patch_limits_vddc(hwmgr, &hwmgr->dyn_state.max_clock_voltage_on_dc); + if (tmp) + return -EINVAL; + + tmp = smu7_patch_cac_vddc(hwmgr, hwmgr->dyn_state.cac_leakage_table); + if (tmp) + return -EINVAL; + + return 0; +} + + +static int smu7_set_private_data_based_on_pptable_v0(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + struct phm_clock_voltage_dependency_table *allowed_sclk_vddc_table = hwmgr->dyn_state.vddc_dependency_on_sclk; + struct phm_clock_voltage_dependency_table *allowed_mclk_vddc_table = hwmgr->dyn_state.vddc_dependency_on_mclk; + struct phm_clock_voltage_dependency_table *allowed_mclk_vddci_table = hwmgr->dyn_state.vddci_dependency_on_mclk; + + PP_ASSERT_WITH_CODE(allowed_sclk_vddc_table != NULL, + "VDDC dependency on SCLK table is missing. This table is mandatory\n", return -EINVAL); + PP_ASSERT_WITH_CODE(allowed_sclk_vddc_table->count >= 1, + "VDDC dependency on SCLK table has to have is missing. This table is mandatory\n", return -EINVAL); + + PP_ASSERT_WITH_CODE(allowed_mclk_vddc_table != NULL, + "VDDC dependency on MCLK table is missing. This table is mandatory\n", return -EINVAL); + PP_ASSERT_WITH_CODE(allowed_mclk_vddc_table->count >= 1, + "VDD dependency on MCLK table has to have is missing. This table is mandatory\n", return -EINVAL); + + data->min_vddc_in_pptable = (uint16_t)allowed_sclk_vddc_table->entries[0].v; + data->max_vddc_in_pptable = (uint16_t)allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v; + + hwmgr->dyn_state.max_clock_voltage_on_ac.sclk = + allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk; + hwmgr->dyn_state.max_clock_voltage_on_ac.mclk = + allowed_mclk_vddc_table->entries[allowed_mclk_vddc_table->count - 1].clk; + hwmgr->dyn_state.max_clock_voltage_on_ac.vddc = + allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v; + + if (allowed_mclk_vddci_table != NULL && allowed_mclk_vddci_table->count >= 1) { + data->min_vddci_in_pptable = (uint16_t)allowed_mclk_vddci_table->entries[0].v; + data->max_vddci_in_pptable = (uint16_t)allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v; + } + + if (hwmgr->dyn_state.vddci_dependency_on_mclk != NULL && hwmgr->dyn_state.vddci_dependency_on_mclk->count > 1) + hwmgr->dyn_state.max_clock_voltage_on_ac.vddci = hwmgr->dyn_state.vddci_dependency_on_mclk->entries[hwmgr->dyn_state.vddci_dependency_on_mclk->count - 1].v; + + return 0; +} + +int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data; + int result; + + data = kzalloc(sizeof(struct smu7_hwmgr), GFP_KERNEL); + if (data == NULL) + return -ENOMEM; + + hwmgr->backend = data; + + smu7_patch_voltage_workaround(hwmgr); + smu7_init_dpm_defaults(hwmgr); + + /* Get leakage voltage based on leakage ID. */ + result = smu7_get_evv_voltages(hwmgr); + + if (result) { + printk("Get EVV Voltage Failed. Abort Driver loading!\n"); + return -EINVAL; + } + + if (hwmgr->pp_table_version == PP_TABLE_V1) { + smu7_complete_dependency_tables(hwmgr); + smu7_set_private_data_based_on_pptable_v1(hwmgr); + } else if (hwmgr->pp_table_version == PP_TABLE_V0) { + smu7_patch_dependency_tables_with_leakage(hwmgr); + smu7_set_private_data_based_on_pptable_v0(hwmgr); + } + + /* Initalize Dynamic State Adjustment Rule Settings */ + result = phm_initializa_dynamic_state_adjustment_rule_settings(hwmgr); + + if (0 == result) { + struct cgs_system_info sys_info = {0}; + + data->is_tlu_enabled = false; + + hwmgr->platform_descriptor.hardwareActivityPerformanceLevels = + SMU7_MAX_HARDWARE_POWERLEVELS; + hwmgr->platform_descriptor.hardwarePerformanceLevels = 2; + hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50; + + sys_info.size = sizeof(struct cgs_system_info); + sys_info.info_id = CGS_SYSTEM_INFO_PCIE_GEN_INFO; + result = cgs_query_system_info(hwmgr->device, &sys_info); + if (result) + data->pcie_gen_cap = AMDGPU_DEFAULT_PCIE_GEN_MASK; + else + data->pcie_gen_cap = (uint32_t)sys_info.value; + if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) + data->pcie_spc_cap = 20; + sys_info.size = sizeof(struct cgs_system_info); + sys_info.info_id = CGS_SYSTEM_INFO_PCIE_MLW; + result = cgs_query_system_info(hwmgr->device, &sys_info); + if (result) + data->pcie_lane_cap = AMDGPU_DEFAULT_PCIE_MLW_MASK; + else + data->pcie_lane_cap = (uint32_t)sys_info.value; + + hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */ +/* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */ + hwmgr->platform_descriptor.clockStep.engineClock = 500; + hwmgr->platform_descriptor.clockStep.memoryClock = 500; + smu7_thermal_parameter_init(hwmgr); + } else { + /* Ignore return value in here, we are cleaning up a mess. */ + phm_hwmgr_backend_fini(hwmgr); + } + + return 0; +} + +static int smu7_force_dpm_highest(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + uint32_t level, tmp; + + if (!data->pcie_dpm_key_disabled) { + if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) { + level = 0; + tmp = data->dpm_level_enable_mask.pcie_dpm_enable_mask; + while (tmp >>= 1) + level++; + + if (level) + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_PCIeDPM_ForceLevel, level); + } + } + + if (!data->sclk_dpm_key_disabled) { + if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) { + level = 0; + tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask; + while (tmp >>= 1) + level++; + + if (level) + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SCLKDPM_SetEnabledMask, + (1 << level)); + } + } + + if (!data->mclk_dpm_key_disabled) { + if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) { + level = 0; + tmp = data->dpm_level_enable_mask.mclk_dpm_enable_mask; + while (tmp >>= 1) + level++; + + if (level) + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_MCLKDPM_SetEnabledMask, + (1 << level)); + } + } + + return 0; +} + +static int smu7_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + if (hwmgr->pp_table_version == PP_TABLE_V1) + phm_apply_dal_min_voltage_request(hwmgr); +/* TO DO for v0 iceland and Ci*/ + + if (!data->sclk_dpm_key_disabled) { + if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SCLKDPM_SetEnabledMask, + data->dpm_level_enable_mask.sclk_dpm_enable_mask); + } + + if (!data->mclk_dpm_key_disabled) { + if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_MCLKDPM_SetEnabledMask, + data->dpm_level_enable_mask.mclk_dpm_enable_mask); + } + + return 0; +} + +static int smu7_unforce_dpm_levels(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + if (!smum_is_dpm_running(hwmgr)) + return -EINVAL; + + if (!data->pcie_dpm_key_disabled) { + smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_PCIeDPM_UnForceLevel); + } + + return smu7_upload_dpm_level_enable_mask(hwmgr); +} + +static int smu7_force_dpm_lowest(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = + (struct smu7_hwmgr *)(hwmgr->backend); + uint32_t level; + + if (!data->sclk_dpm_key_disabled) + if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) { + level = phm_get_lowest_enabled_level(hwmgr, + data->dpm_level_enable_mask.sclk_dpm_enable_mask); + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SCLKDPM_SetEnabledMask, + (1 << level)); + + } + + if (!data->mclk_dpm_key_disabled) { + if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) { + level = phm_get_lowest_enabled_level(hwmgr, + data->dpm_level_enable_mask.mclk_dpm_enable_mask); + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_MCLKDPM_SetEnabledMask, + (1 << level)); + } + } + + if (!data->pcie_dpm_key_disabled) { + if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) { + level = phm_get_lowest_enabled_level(hwmgr, + data->dpm_level_enable_mask.pcie_dpm_enable_mask); + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_PCIeDPM_ForceLevel, + (level)); + } + } + + return 0; + +} +static int smu7_force_dpm_level(struct pp_hwmgr *hwmgr, + enum amd_dpm_forced_level level) +{ + int ret = 0; + + switch (level) { + case AMD_DPM_FORCED_LEVEL_HIGH: + ret = smu7_force_dpm_highest(hwmgr); + if (ret) + return ret; + break; + case AMD_DPM_FORCED_LEVEL_LOW: + ret = smu7_force_dpm_lowest(hwmgr); + if (ret) + return ret; + break; + case AMD_DPM_FORCED_LEVEL_AUTO: + ret = smu7_unforce_dpm_levels(hwmgr); + if (ret) + return ret; + break; + default: + break; + } + + hwmgr->dpm_level = level; + + return ret; +} + +static int smu7_get_power_state_size(struct pp_hwmgr *hwmgr) +{ + return sizeof(struct smu7_power_state); +} + + +static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, + struct pp_power_state *request_ps, + const struct pp_power_state *current_ps) +{ + + struct smu7_power_state *smu7_ps = + cast_phw_smu7_power_state(&request_ps->hardware); + uint32_t sclk; + uint32_t mclk; + struct PP_Clocks minimum_clocks = {0}; + bool disable_mclk_switching; + bool disable_mclk_switching_for_frame_lock; + struct cgs_display_info info = {0}; + const struct phm_clock_and_voltage_limits *max_limits; + uint32_t i; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + int32_t count; + int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0; + + data->battery_state = (PP_StateUILabel_Battery == + request_ps->classification.ui_label); + + PP_ASSERT_WITH_CODE(smu7_ps->performance_level_count == 2, + "VI should always have 2 performance levels", + ); + + max_limits = (PP_PowerSource_AC == hwmgr->power_source) ? + &(hwmgr->dyn_state.max_clock_voltage_on_ac) : + &(hwmgr->dyn_state.max_clock_voltage_on_dc); + + /* Cap clock DPM tables at DC MAX if it is in DC. */ + if (PP_PowerSource_DC == hwmgr->power_source) { + for (i = 0; i < smu7_ps->performance_level_count; i++) { + if (smu7_ps->performance_levels[i].memory_clock > max_limits->mclk) + smu7_ps->performance_levels[i].memory_clock = max_limits->mclk; + if (smu7_ps->performance_levels[i].engine_clock > max_limits->sclk) + smu7_ps->performance_levels[i].engine_clock = max_limits->sclk; + } + } + + smu7_ps->vce_clks.evclk = hwmgr->vce_arbiter.evclk; + smu7_ps->vce_clks.ecclk = hwmgr->vce_arbiter.ecclk; + + cgs_get_active_displays_info(hwmgr->device, &info); + + /*TO DO result = PHM_CheckVBlankTime(hwmgr, &vblankTooShort);*/ + + minimum_clocks.engineClock = hwmgr->display_config.min_core_set_clock; + minimum_clocks.memoryClock = hwmgr->display_config.min_mem_set_clock; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_StablePState)) { + max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac); + stable_pstate_sclk = (max_limits->sclk * 75) / 100; + + for (count = table_info->vdd_dep_on_sclk->count - 1; + count >= 0; count--) { + if (stable_pstate_sclk >= + table_info->vdd_dep_on_sclk->entries[count].clk) { + stable_pstate_sclk = + table_info->vdd_dep_on_sclk->entries[count].clk; + break; + } + } + + if (count < 0) + stable_pstate_sclk = table_info->vdd_dep_on_sclk->entries[0].clk; + + stable_pstate_mclk = max_limits->mclk; + + minimum_clocks.engineClock = stable_pstate_sclk; + minimum_clocks.memoryClock = stable_pstate_mclk; + } + + if (minimum_clocks.engineClock < hwmgr->gfx_arbiter.sclk) + minimum_clocks.engineClock = hwmgr->gfx_arbiter.sclk; + + if (minimum_clocks.memoryClock < hwmgr->gfx_arbiter.mclk) + minimum_clocks.memoryClock = hwmgr->gfx_arbiter.mclk; + + smu7_ps->sclk_threshold = hwmgr->gfx_arbiter.sclk_threshold; + + if (0 != hwmgr->gfx_arbiter.sclk_over_drive) { + PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.sclk_over_drive <= + hwmgr->platform_descriptor.overdriveLimit.engineClock), + "Overdrive sclk exceeds limit", + hwmgr->gfx_arbiter.sclk_over_drive = + hwmgr->platform_descriptor.overdriveLimit.engineClock); + + if (hwmgr->gfx_arbiter.sclk_over_drive >= hwmgr->gfx_arbiter.sclk) + smu7_ps->performance_levels[1].engine_clock = + hwmgr->gfx_arbiter.sclk_over_drive; + } + + if (0 != hwmgr->gfx_arbiter.mclk_over_drive) { + PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.mclk_over_drive <= + hwmgr->platform_descriptor.overdriveLimit.memoryClock), + "Overdrive mclk exceeds limit", + hwmgr->gfx_arbiter.mclk_over_drive = + hwmgr->platform_descriptor.overdriveLimit.memoryClock); + + if (hwmgr->gfx_arbiter.mclk_over_drive >= hwmgr->gfx_arbiter.mclk) + smu7_ps->performance_levels[1].memory_clock = + hwmgr->gfx_arbiter.mclk_over_drive; + } + + disable_mclk_switching_for_frame_lock = phm_cap_enabled( + hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DisableMclkSwitchingForFrameLock); + + + disable_mclk_switching = (1 < info.display_count) || + disable_mclk_switching_for_frame_lock; + + sclk = smu7_ps->performance_levels[0].engine_clock; + mclk = smu7_ps->performance_levels[0].memory_clock; + + if (disable_mclk_switching) + mclk = smu7_ps->performance_levels + [smu7_ps->performance_level_count - 1].memory_clock; + + if (sclk < minimum_clocks.engineClock) + sclk = (minimum_clocks.engineClock > max_limits->sclk) ? + max_limits->sclk : minimum_clocks.engineClock; + + if (mclk < minimum_clocks.memoryClock) + mclk = (minimum_clocks.memoryClock > max_limits->mclk) ? + max_limits->mclk : minimum_clocks.memoryClock; + + smu7_ps->performance_levels[0].engine_clock = sclk; + smu7_ps->performance_levels[0].memory_clock = mclk; + + smu7_ps->performance_levels[1].engine_clock = + (smu7_ps->performance_levels[1].engine_clock >= + smu7_ps->performance_levels[0].engine_clock) ? + smu7_ps->performance_levels[1].engine_clock : + smu7_ps->performance_levels[0].engine_clock; + + if (disable_mclk_switching) { + if (mclk < smu7_ps->performance_levels[1].memory_clock) + mclk = smu7_ps->performance_levels[1].memory_clock; + + smu7_ps->performance_levels[0].memory_clock = mclk; + smu7_ps->performance_levels[1].memory_clock = mclk; + } else { + if (smu7_ps->performance_levels[1].memory_clock < + smu7_ps->performance_levels[0].memory_clock) + smu7_ps->performance_levels[1].memory_clock = + smu7_ps->performance_levels[0].memory_clock; + } + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_StablePState)) { + for (i = 0; i < smu7_ps->performance_level_count; i++) { + smu7_ps->performance_levels[i].engine_clock = stable_pstate_sclk; + smu7_ps->performance_levels[i].memory_clock = stable_pstate_mclk; + smu7_ps->performance_levels[i].pcie_gen = data->pcie_gen_performance.max; + smu7_ps->performance_levels[i].pcie_lane = data->pcie_gen_performance.max; + } + } + return 0; +} + + +static int smu7_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low) +{ + struct pp_power_state *ps; + struct smu7_power_state *smu7_ps; + + if (hwmgr == NULL) + return -EINVAL; + + ps = hwmgr->request_ps; + + if (ps == NULL) + return -EINVAL; + + smu7_ps = cast_phw_smu7_power_state(&ps->hardware); + + if (low) + return smu7_ps->performance_levels[0].memory_clock; + else + return smu7_ps->performance_levels + [smu7_ps->performance_level_count-1].memory_clock; +} + +static int smu7_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low) +{ + struct pp_power_state *ps; + struct smu7_power_state *smu7_ps; + + if (hwmgr == NULL) + return -EINVAL; + + ps = hwmgr->request_ps; + + if (ps == NULL) + return -EINVAL; + + smu7_ps = cast_phw_smu7_power_state(&ps->hardware); + + if (low) + return smu7_ps->performance_levels[0].engine_clock; + else + return smu7_ps->performance_levels + [smu7_ps->performance_level_count-1].engine_clock; +} + +static int smu7_dpm_patch_boot_state(struct pp_hwmgr *hwmgr, + struct pp_hw_power_state *hw_ps) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct smu7_power_state *ps = (struct smu7_power_state *)hw_ps; + ATOM_FIRMWARE_INFO_V2_2 *fw_info; + uint16_t size; + uint8_t frev, crev; + int index = GetIndexIntoMasterTable(DATA, FirmwareInfo); + + /* First retrieve the Boot clocks and VDDC from the firmware info table. + * We assume here that fw_info is unchanged if this call fails. + */ + fw_info = (ATOM_FIRMWARE_INFO_V2_2 *)cgs_atom_get_data_table( + hwmgr->device, index, + &size, &frev, &crev); + if (!fw_info) + /* During a test, there is no firmware info table. */ + return 0; + + /* Patch the state. */ + data->vbios_boot_state.sclk_bootup_value = + le32_to_cpu(fw_info->ulDefaultEngineClock); + data->vbios_boot_state.mclk_bootup_value = + le32_to_cpu(fw_info->ulDefaultMemoryClock); + data->vbios_boot_state.mvdd_bootup_value = + le16_to_cpu(fw_info->usBootUpMVDDCVoltage); + data->vbios_boot_state.vddc_bootup_value = + le16_to_cpu(fw_info->usBootUpVDDCVoltage); + data->vbios_boot_state.vddci_bootup_value = + le16_to_cpu(fw_info->usBootUpVDDCIVoltage); + data->vbios_boot_state.pcie_gen_bootup_value = + smu7_get_current_pcie_speed(hwmgr); + + data->vbios_boot_state.pcie_lane_bootup_value = + (uint16_t)smu7_get_current_pcie_lane_number(hwmgr); + + /* set boot power state */ + ps->performance_levels[0].memory_clock = data->vbios_boot_state.mclk_bootup_value; + ps->performance_levels[0].engine_clock = data->vbios_boot_state.sclk_bootup_value; + ps->performance_levels[0].pcie_gen = data->vbios_boot_state.pcie_gen_bootup_value; + ps->performance_levels[0].pcie_lane = data->vbios_boot_state.pcie_lane_bootup_value; + + return 0; +} + +static int smu7_get_number_of_powerplay_table_entries(struct pp_hwmgr *hwmgr) +{ + int result; + unsigned long ret = 0; + + if (hwmgr->pp_table_version == PP_TABLE_V0) { + result = pp_tables_get_num_of_entries(hwmgr, &ret); + return result ? 0 : ret; + } else if (hwmgr->pp_table_version == PP_TABLE_V1) { + result = get_number_of_powerplay_table_entries_v1_0(hwmgr); + return result; + } + return 0; +} + +static int smu7_get_pp_table_entry_callback_func_v1(struct pp_hwmgr *hwmgr, + void *state, struct pp_power_state *power_state, + void *pp_table, uint32_t classification_flag) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct smu7_power_state *smu7_power_state = + (struct smu7_power_state *)(&(power_state->hardware)); + struct smu7_performance_level *performance_level; + ATOM_Tonga_State *state_entry = (ATOM_Tonga_State *)state; + ATOM_Tonga_POWERPLAYTABLE *powerplay_table = + (ATOM_Tonga_POWERPLAYTABLE *)pp_table; + PPTable_Generic_SubTable_Header *sclk_dep_table = + (PPTable_Generic_SubTable_Header *) + (((unsigned long)powerplay_table) + + le16_to_cpu(powerplay_table->usSclkDependencyTableOffset)); + + ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table = + (ATOM_Tonga_MCLK_Dependency_Table *) + (((unsigned long)powerplay_table) + + le16_to_cpu(powerplay_table->usMclkDependencyTableOffset)); + + /* The following fields are not initialized here: id orderedList allStatesList */ + power_state->classification.ui_label = + (le16_to_cpu(state_entry->usClassification) & + ATOM_PPLIB_CLASSIFICATION_UI_MASK) >> + ATOM_PPLIB_CLASSIFICATION_UI_SHIFT; + power_state->classification.flags = classification_flag; + /* NOTE: There is a classification2 flag in BIOS that is not being used right now */ + + power_state->classification.temporary_state = false; + power_state->classification.to_be_deleted = false; + + power_state->validation.disallowOnDC = + (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) & + ATOM_Tonga_DISALLOW_ON_DC)); + + power_state->pcie.lanes = 0; + + power_state->display.disableFrameModulation = false; + power_state->display.limitRefreshrate = false; + power_state->display.enableVariBright = + (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) & + ATOM_Tonga_ENABLE_VARIBRIGHT)); + + power_state->validation.supportedPowerLevels = 0; + power_state->uvd_clocks.VCLK = 0; + power_state->uvd_clocks.DCLK = 0; + power_state->temperatures.min = 0; + power_state->temperatures.max = 0; + + performance_level = &(smu7_power_state->performance_levels + [smu7_power_state->performance_level_count++]); + + PP_ASSERT_WITH_CODE( + (smu7_power_state->performance_level_count < smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_GRAPHICS)), + "Performance levels exceeds SMC limit!", + return -EINVAL); + + PP_ASSERT_WITH_CODE( + (smu7_power_state->performance_level_count <= + hwmgr->platform_descriptor.hardwareActivityPerformanceLevels), + "Performance levels exceeds Driver limit!", + return -EINVAL); + + /* Performance levels are arranged from low to high. */ + performance_level->memory_clock = mclk_dep_table->entries + [state_entry->ucMemoryClockIndexLow].ulMclk; + if (sclk_dep_table->ucRevId == 0) + performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries + [state_entry->ucEngineClockIndexLow].ulSclk; + else if (sclk_dep_table->ucRevId == 1) + performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries + [state_entry->ucEngineClockIndexLow].ulSclk; + performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap, + state_entry->ucPCIEGenLow); + performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap, + state_entry->ucPCIELaneHigh); + + performance_level = &(smu7_power_state->performance_levels + [smu7_power_state->performance_level_count++]); + performance_level->memory_clock = mclk_dep_table->entries + [state_entry->ucMemoryClockIndexHigh].ulMclk; + + if (sclk_dep_table->ucRevId == 0) + performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries + [state_entry->ucEngineClockIndexHigh].ulSclk; + else if (sclk_dep_table->ucRevId == 1) + performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries + [state_entry->ucEngineClockIndexHigh].ulSclk; + + performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap, + state_entry->ucPCIEGenHigh); + performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap, + state_entry->ucPCIELaneHigh); + + return 0; +} + +static int smu7_get_pp_table_entry_v1(struct pp_hwmgr *hwmgr, + unsigned long entry_index, struct pp_power_state *state) +{ + int result; + struct smu7_power_state *ps; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table = + table_info->vdd_dep_on_mclk; + + state->hardware.magic = PHM_VIslands_Magic; + + ps = (struct smu7_power_state *)(&state->hardware); + + result = get_powerplay_table_entry_v1_0(hwmgr, entry_index, state, + smu7_get_pp_table_entry_callback_func_v1); + + /* This is the earliest time we have all the dependency table and the VBIOS boot state + * as PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot state + * if there is only one VDDCI/MCLK level, check if it's the same as VBIOS boot state + */ + if (dep_mclk_table != NULL && dep_mclk_table->count == 1) { + if (dep_mclk_table->entries[0].clk != + data->vbios_boot_state.mclk_bootup_value) + printk(KERN_ERR "Single MCLK entry VDDCI/MCLK dependency table " + "does not match VBIOS boot MCLK level"); + if (dep_mclk_table->entries[0].vddci != + data->vbios_boot_state.vddci_bootup_value) + printk(KERN_ERR "Single VDDCI entry VDDCI/MCLK dependency table " + "does not match VBIOS boot VDDCI level"); + } + + /* set DC compatible flag if this state supports DC */ + if (!state->validation.disallowOnDC) + ps->dc_compatible = true; + + if (state->classification.flags & PP_StateClassificationFlag_ACPI) + data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen; + + ps->uvd_clks.vclk = state->uvd_clocks.VCLK; + ps->uvd_clks.dclk = state->uvd_clocks.DCLK; + + if (!result) { + uint32_t i; + + switch (state->classification.ui_label) { + case PP_StateUILabel_Performance: + data->use_pcie_performance_levels = true; + for (i = 0; i < ps->performance_level_count; i++) { + if (data->pcie_gen_performance.max < + ps->performance_levels[i].pcie_gen) + data->pcie_gen_performance.max = + ps->performance_levels[i].pcie_gen; + + if (data->pcie_gen_performance.min > + ps->performance_levels[i].pcie_gen) + data->pcie_gen_performance.min = + ps->performance_levels[i].pcie_gen; + + if (data->pcie_lane_performance.max < + ps->performance_levels[i].pcie_lane) + data->pcie_lane_performance.max = + ps->performance_levels[i].pcie_lane; + if (data->pcie_lane_performance.min > + ps->performance_levels[i].pcie_lane) + data->pcie_lane_performance.min = + ps->performance_levels[i].pcie_lane; + } + break; + case PP_StateUILabel_Battery: + data->use_pcie_power_saving_levels = true; + + for (i = 0; i < ps->performance_level_count; i++) { + if (data->pcie_gen_power_saving.max < + ps->performance_levels[i].pcie_gen) + data->pcie_gen_power_saving.max = + ps->performance_levels[i].pcie_gen; + + if (data->pcie_gen_power_saving.min > + ps->performance_levels[i].pcie_gen) + data->pcie_gen_power_saving.min = + ps->performance_levels[i].pcie_gen; + + if (data->pcie_lane_power_saving.max < + ps->performance_levels[i].pcie_lane) + data->pcie_lane_power_saving.max = + ps->performance_levels[i].pcie_lane; + + if (data->pcie_lane_power_saving.min > + ps->performance_levels[i].pcie_lane) + data->pcie_lane_power_saving.min = + ps->performance_levels[i].pcie_lane; + } + break; + default: + break; + } + } + return 0; +} + +static int smu7_get_pp_table_entry_callback_func_v0(struct pp_hwmgr *hwmgr, + struct pp_hw_power_state *power_state, + unsigned int index, const void *clock_info) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct smu7_power_state *ps = cast_phw_smu7_power_state(power_state); + const ATOM_PPLIB_CI_CLOCK_INFO *visland_clk_info = clock_info; + struct smu7_performance_level *performance_level; + uint32_t engine_clock, memory_clock; + uint16_t pcie_gen_from_bios; + + engine_clock = visland_clk_info->ucEngineClockHigh << 16 | visland_clk_info->usEngineClockLow; + memory_clock = visland_clk_info->ucMemoryClockHigh << 16 | visland_clk_info->usMemoryClockLow; + + if (!(data->mc_micro_code_feature & DISABLE_MC_LOADMICROCODE) && memory_clock > data->highest_mclk) + data->highest_mclk = memory_clock; + + performance_level = &(ps->performance_levels + [ps->performance_level_count++]); + + PP_ASSERT_WITH_CODE( + (ps->performance_level_count < smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_GRAPHICS)), + "Performance levels exceeds SMC limit!", + return -EINVAL); + + PP_ASSERT_WITH_CODE( + (ps->performance_level_count <= + hwmgr->platform_descriptor.hardwareActivityPerformanceLevels), + "Performance levels exceeds Driver limit!", + return -EINVAL); + + /* Performance levels are arranged from low to high. */ + performance_level->memory_clock = memory_clock; + performance_level->engine_clock = engine_clock; + + pcie_gen_from_bios = visland_clk_info->ucPCIEGen; + + performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap, pcie_gen_from_bios); + performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap, visland_clk_info->usPCIELane); + + return 0; +} + +static int smu7_get_pp_table_entry_v0(struct pp_hwmgr *hwmgr, + unsigned long entry_index, struct pp_power_state *state) +{ + int result; + struct smu7_power_state *ps; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct phm_clock_voltage_dependency_table *dep_mclk_table = + hwmgr->dyn_state.vddci_dependency_on_mclk; + + memset(&state->hardware, 0x00, sizeof(struct pp_hw_power_state)); + + state->hardware.magic = PHM_VIslands_Magic; + + ps = (struct smu7_power_state *)(&state->hardware); + + result = pp_tables_get_entry(hwmgr, entry_index, state, + smu7_get_pp_table_entry_callback_func_v0); + + /* + * This is the earliest time we have all the dependency table + * and the VBIOS boot state as + * PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot + * state if there is only one VDDCI/MCLK level, check if it's + * the same as VBIOS boot state + */ + if (dep_mclk_table != NULL && dep_mclk_table->count == 1) { + if (dep_mclk_table->entries[0].clk != + data->vbios_boot_state.mclk_bootup_value) + printk(KERN_ERR "Single MCLK entry VDDCI/MCLK dependency table " + "does not match VBIOS boot MCLK level"); + if (dep_mclk_table->entries[0].v != + data->vbios_boot_state.vddci_bootup_value) + printk(KERN_ERR "Single VDDCI entry VDDCI/MCLK dependency table " + "does not match VBIOS boot VDDCI level"); + } + + /* set DC compatible flag if this state supports DC */ + if (!state->validation.disallowOnDC) + ps->dc_compatible = true; + + if (state->classification.flags & PP_StateClassificationFlag_ACPI) + data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen; + + ps->uvd_clks.vclk = state->uvd_clocks.VCLK; + ps->uvd_clks.dclk = state->uvd_clocks.DCLK; + + if (!result) { + uint32_t i; + + switch (state->classification.ui_label) { + case PP_StateUILabel_Performance: + data->use_pcie_performance_levels = true; + + for (i = 0; i < ps->performance_level_count; i++) { + if (data->pcie_gen_performance.max < + ps->performance_levels[i].pcie_gen) + data->pcie_gen_performance.max = + ps->performance_levels[i].pcie_gen; + + if (data->pcie_gen_performance.min > + ps->performance_levels[i].pcie_gen) + data->pcie_gen_performance.min = + ps->performance_levels[i].pcie_gen; + + if (data->pcie_lane_performance.max < + ps->performance_levels[i].pcie_lane) + data->pcie_lane_performance.max = + ps->performance_levels[i].pcie_lane; + + if (data->pcie_lane_performance.min > + ps->performance_levels[i].pcie_lane) + data->pcie_lane_performance.min = + ps->performance_levels[i].pcie_lane; + } + break; + case PP_StateUILabel_Battery: + data->use_pcie_power_saving_levels = true; + + for (i = 0; i < ps->performance_level_count; i++) { + if (data->pcie_gen_power_saving.max < + ps->performance_levels[i].pcie_gen) + data->pcie_gen_power_saving.max = + ps->performance_levels[i].pcie_gen; + + if (data->pcie_gen_power_saving.min > + ps->performance_levels[i].pcie_gen) + data->pcie_gen_power_saving.min = + ps->performance_levels[i].pcie_gen; + + if (data->pcie_lane_power_saving.max < + ps->performance_levels[i].pcie_lane) + data->pcie_lane_power_saving.max = + ps->performance_levels[i].pcie_lane; + + if (data->pcie_lane_power_saving.min > + ps->performance_levels[i].pcie_lane) + data->pcie_lane_power_saving.min = + ps->performance_levels[i].pcie_lane; + } + break; + default: + break; + } + } + return 0; +} + +static int smu7_get_pp_table_entry(struct pp_hwmgr *hwmgr, + unsigned long entry_index, struct pp_power_state *state) +{ + if (hwmgr->pp_table_version == PP_TABLE_V0) + return smu7_get_pp_table_entry_v0(hwmgr, entry_index, state); + else if (hwmgr->pp_table_version == PP_TABLE_V1) + return smu7_get_pp_table_entry_v1(hwmgr, entry_index, state); + + return 0; +} + +static void +smu7_print_current_perforce_level(struct pp_hwmgr *hwmgr, struct seq_file *m) +{ + uint32_t sclk, mclk, activity_percent; + uint32_t offset; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency); + + sclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); + + smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency); + + mclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); + seq_printf(m, "\n [ mclk ]: %u MHz\n\n [ sclk ]: %u MHz\n", + mclk / 100, sclk / 100); + + offset = data->soft_regs_start + smum_get_offsetof(hwmgr->smumgr, + SMU_SoftRegisters, + AverageGraphicsActivity); + + activity_percent = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset); + activity_percent += 0x80; + activity_percent >>= 8; + + seq_printf(m, "\n [GPU load]: %u%%\n\n", activity_percent > 100 ? 100 : activity_percent); + + seq_printf(m, "uvd %sabled\n", data->uvd_power_gated ? "dis" : "en"); + + seq_printf(m, "vce %sabled\n", data->vce_power_gated ? "dis" : "en"); +} + +static int smu7_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input) +{ + const struct phm_set_power_state_input *states = + (const struct phm_set_power_state_input *)input; + const struct smu7_power_state *smu7_ps = + cast_const_phw_smu7_power_state(states->pnew_state); + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table); + uint32_t sclk = smu7_ps->performance_levels + [smu7_ps->performance_level_count - 1].engine_clock; + struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table); + uint32_t mclk = smu7_ps->performance_levels + [smu7_ps->performance_level_count - 1].memory_clock; + struct PP_Clocks min_clocks = {0}; + uint32_t i; + struct cgs_display_info info = {0}; + + data->need_update_smu7_dpm_table = 0; + + for (i = 0; i < sclk_table->count; i++) { + if (sclk == sclk_table->dpm_levels[i].value) + break; + } + + if (i >= sclk_table->count) + data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK; + else { + /* TODO: Check SCLK in DAL's minimum clocks + * in case DeepSleep divider update is required. + */ + if (data->display_timing.min_clock_in_sr != min_clocks.engineClockInSR && + (min_clocks.engineClockInSR >= SMU7_MINIMUM_ENGINE_CLOCK || + data->display_timing.min_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK)) + data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK; + } + + for (i = 0; i < mclk_table->count; i++) { + if (mclk == mclk_table->dpm_levels[i].value) + break; + } + + if (i >= mclk_table->count) + data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK; + + cgs_get_active_displays_info(hwmgr->device, &info); + + if (data->display_timing.num_existing_displays != info.display_count) + data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK; + + return 0; +} + +static uint16_t smu7_get_maximum_link_speed(struct pp_hwmgr *hwmgr, + const struct smu7_power_state *smu7_ps) +{ + uint32_t i; + uint32_t sclk, max_sclk = 0; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct smu7_dpm_table *dpm_table = &data->dpm_table; + + for (i = 0; i < smu7_ps->performance_level_count; i++) { + sclk = smu7_ps->performance_levels[i].engine_clock; + if (max_sclk < sclk) + max_sclk = sclk; + } + + for (i = 0; i < dpm_table->sclk_table.count; i++) { + if (dpm_table->sclk_table.dpm_levels[i].value == max_sclk) + return (uint16_t) ((i >= dpm_table->pcie_speed_table.count) ? + dpm_table->pcie_speed_table.dpm_levels + [dpm_table->pcie_speed_table.count - 1].value : + dpm_table->pcie_speed_table.dpm_levels[i].value); + } + + return 0; +} + +static int smu7_request_link_speed_change_before_state_change( + struct pp_hwmgr *hwmgr, const void *input) +{ + const struct phm_set_power_state_input *states = + (const struct phm_set_power_state_input *)input; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + const struct smu7_power_state *smu7_nps = + cast_const_phw_smu7_power_state(states->pnew_state); + const struct smu7_power_state *polaris10_cps = + cast_const_phw_smu7_power_state(states->pcurrent_state); + + uint16_t target_link_speed = smu7_get_maximum_link_speed(hwmgr, smu7_nps); + uint16_t current_link_speed; + + if (data->force_pcie_gen == PP_PCIEGenInvalid) + current_link_speed = smu7_get_maximum_link_speed(hwmgr, polaris10_cps); + else + current_link_speed = data->force_pcie_gen; + + data->force_pcie_gen = PP_PCIEGenInvalid; + data->pspp_notify_required = false; + + if (target_link_speed > current_link_speed) { + switch (target_link_speed) { + case PP_PCIEGen3: + if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN3, false)) + break; + data->force_pcie_gen = PP_PCIEGen2; + if (current_link_speed == PP_PCIEGen2) + break; + case PP_PCIEGen2: + if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN2, false)) + break; + default: + data->force_pcie_gen = smu7_get_current_pcie_speed(hwmgr); + break; + } + } else { + if (target_link_speed < current_link_speed) + data->pspp_notify_required = true; + } + + return 0; +} + +static int smu7_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + if (0 == data->need_update_smu7_dpm_table) + return 0; + + if ((0 == data->sclk_dpm_key_disabled) && + (data->need_update_smu7_dpm_table & + (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) { + PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr), + "Trying to freeze SCLK DPM when DPM is disabled", + ); + PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_SCLKDPM_FreezeLevel), + "Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!", + return -EINVAL); + } + + if ((0 == data->mclk_dpm_key_disabled) && + (data->need_update_smu7_dpm_table & + DPMTABLE_OD_UPDATE_MCLK)) { + PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr), + "Trying to freeze MCLK DPM when DPM is disabled", + ); + PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_MCLKDPM_FreezeLevel), + "Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!", + return -EINVAL); + } + + return 0; +} + +static int smu7_populate_and_upload_sclk_mclk_dpm_levels( + struct pp_hwmgr *hwmgr, const void *input) +{ + int result = 0; + const struct phm_set_power_state_input *states = + (const struct phm_set_power_state_input *)input; + const struct smu7_power_state *smu7_ps = + cast_const_phw_smu7_power_state(states->pnew_state); + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + uint32_t sclk = smu7_ps->performance_levels + [smu7_ps->performance_level_count - 1].engine_clock; + uint32_t mclk = smu7_ps->performance_levels + [smu7_ps->performance_level_count - 1].memory_clock; + struct smu7_dpm_table *dpm_table = &data->dpm_table; + + struct smu7_dpm_table *golden_dpm_table = &data->golden_dpm_table; + uint32_t dpm_count, clock_percent; + uint32_t i; + + if (0 == data->need_update_smu7_dpm_table) + return 0; + + if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) { + dpm_table->sclk_table.dpm_levels + [dpm_table->sclk_table.count - 1].value = sclk; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinACSupport) || + phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinDCSupport)) { + /* Need to do calculation based on the golden DPM table + * as the Heatmap GPU Clock axis is also based on the default values + */ + PP_ASSERT_WITH_CODE( + (golden_dpm_table->sclk_table.dpm_levels + [golden_dpm_table->sclk_table.count - 1].value != 0), + "Divide by 0!", + return -EINVAL); + dpm_count = dpm_table->sclk_table.count < 2 ? 0 : dpm_table->sclk_table.count - 2; + + for (i = dpm_count; i > 1; i--) { + if (sclk > golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value) { + clock_percent = + ((sclk + - golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value + ) * 100) + / golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value; + + dpm_table->sclk_table.dpm_levels[i].value = + golden_dpm_table->sclk_table.dpm_levels[i].value + + (golden_dpm_table->sclk_table.dpm_levels[i].value * + clock_percent)/100; + + } else if (golden_dpm_table->sclk_table.dpm_levels[dpm_table->sclk_table.count-1].value > sclk) { + clock_percent = + ((golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count - 1].value + - sclk) * 100) + / golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value; + + dpm_table->sclk_table.dpm_levels[i].value = + golden_dpm_table->sclk_table.dpm_levels[i].value - + (golden_dpm_table->sclk_table.dpm_levels[i].value * + clock_percent) / 100; + } else + dpm_table->sclk_table.dpm_levels[i].value = + golden_dpm_table->sclk_table.dpm_levels[i].value; + } + } + } + + if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK) { + dpm_table->mclk_table.dpm_levels + [dpm_table->mclk_table.count - 1].value = mclk; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinACSupport) || + phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinDCSupport)) { + + PP_ASSERT_WITH_CODE( + (golden_dpm_table->mclk_table.dpm_levels + [golden_dpm_table->mclk_table.count-1].value != 0), + "Divide by 0!", + return -EINVAL); + dpm_count = dpm_table->mclk_table.count < 2 ? 0 : dpm_table->mclk_table.count - 2; + for (i = dpm_count; i > 1; i--) { + if (golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value < mclk) { + clock_percent = ((mclk - + golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value) * 100) + / golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value; + + dpm_table->mclk_table.dpm_levels[i].value = + golden_dpm_table->mclk_table.dpm_levels[i].value + + (golden_dpm_table->mclk_table.dpm_levels[i].value * + clock_percent) / 100; + + } else if (golden_dpm_table->mclk_table.dpm_levels[dpm_table->mclk_table.count-1].value > mclk) { + clock_percent = ( + (golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value - mclk) + * 100) + / golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value; + + dpm_table->mclk_table.dpm_levels[i].value = + golden_dpm_table->mclk_table.dpm_levels[i].value - + (golden_dpm_table->mclk_table.dpm_levels[i].value * + clock_percent) / 100; + } else + dpm_table->mclk_table.dpm_levels[i].value = + golden_dpm_table->mclk_table.dpm_levels[i].value; + } + } + } + + if (data->need_update_smu7_dpm_table & + (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) { + result = smum_populate_all_graphic_levels(hwmgr); + PP_ASSERT_WITH_CODE((0 == result), + "Failed to populate SCLK during PopulateNewDPMClocksStates Function!", + return result); + } + + if (data->need_update_smu7_dpm_table & + (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) { + /*populate MCLK dpm table to SMU7 */ + result = smum_populate_all_memory_levels(hwmgr); + PP_ASSERT_WITH_CODE((0 == result), + "Failed to populate MCLK during PopulateNewDPMClocksStates Function!", + return result); + } + + return result; +} + +static int smu7_trim_single_dpm_states(struct pp_hwmgr *hwmgr, + struct smu7_single_dpm_table *dpm_table, + uint32_t low_limit, uint32_t high_limit) +{ + uint32_t i; + + for (i = 0; i < dpm_table->count; i++) { + if ((dpm_table->dpm_levels[i].value < low_limit) + || (dpm_table->dpm_levels[i].value > high_limit)) + dpm_table->dpm_levels[i].enabled = false; + else + dpm_table->dpm_levels[i].enabled = true; + } + + return 0; +} + +static int smu7_trim_dpm_states(struct pp_hwmgr *hwmgr, + const struct smu7_power_state *smu7_ps) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + uint32_t high_limit_count; + + PP_ASSERT_WITH_CODE((smu7_ps->performance_level_count >= 1), + "power state did not have any performance level", + return -EINVAL); + + high_limit_count = (1 == smu7_ps->performance_level_count) ? 0 : 1; + + smu7_trim_single_dpm_states(hwmgr, + &(data->dpm_table.sclk_table), + smu7_ps->performance_levels[0].engine_clock, + smu7_ps->performance_levels[high_limit_count].engine_clock); + + smu7_trim_single_dpm_states(hwmgr, + &(data->dpm_table.mclk_table), + smu7_ps->performance_levels[0].memory_clock, + smu7_ps->performance_levels[high_limit_count].memory_clock); + + return 0; +} + +static int smu7_generate_dpm_level_enable_mask( + struct pp_hwmgr *hwmgr, const void *input) +{ + int result; + const struct phm_set_power_state_input *states = + (const struct phm_set_power_state_input *)input; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + const struct smu7_power_state *smu7_ps = + cast_const_phw_smu7_power_state(states->pnew_state); + + result = smu7_trim_dpm_states(hwmgr, smu7_ps); + if (result) + return result; + + data->dpm_level_enable_mask.sclk_dpm_enable_mask = + phm_get_dpm_level_enable_mask_value(&data->dpm_table.sclk_table); + data->dpm_level_enable_mask.mclk_dpm_enable_mask = + phm_get_dpm_level_enable_mask_value(&data->dpm_table.mclk_table); + data->dpm_level_enable_mask.pcie_dpm_enable_mask = + phm_get_dpm_level_enable_mask_value(&data->dpm_table.pcie_speed_table); + + return 0; +} + +static int smu7_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + if (0 == data->need_update_smu7_dpm_table) + return 0; + + if ((0 == data->sclk_dpm_key_disabled) && + (data->need_update_smu7_dpm_table & + (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) { + + PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr), + "Trying to Unfreeze SCLK DPM when DPM is disabled", + ); + PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_SCLKDPM_UnfreezeLevel), + "Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!", + return -EINVAL); + } + + if ((0 == data->mclk_dpm_key_disabled) && + (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) { + + PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr), + "Trying to Unfreeze MCLK DPM when DPM is disabled", + ); + PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_SCLKDPM_UnfreezeLevel), + "Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!", + return -EINVAL); + } + + data->need_update_smu7_dpm_table = 0; + + return 0; +} + +static int smu7_notify_link_speed_change_after_state_change( + struct pp_hwmgr *hwmgr, const void *input) +{ + const struct phm_set_power_state_input *states = + (const struct phm_set_power_state_input *)input; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + const struct smu7_power_state *smu7_ps = + cast_const_phw_smu7_power_state(states->pnew_state); + uint16_t target_link_speed = smu7_get_maximum_link_speed(hwmgr, smu7_ps); + uint8_t request; + + if (data->pspp_notify_required) { + if (target_link_speed == PP_PCIEGen3) + request = PCIE_PERF_REQ_GEN3; + else if (target_link_speed == PP_PCIEGen2) + request = PCIE_PERF_REQ_GEN2; + else + request = PCIE_PERF_REQ_GEN1; + + if (request == PCIE_PERF_REQ_GEN1 && + smu7_get_current_pcie_speed(hwmgr) > 0) + return 0; + + if (acpi_pcie_perf_request(hwmgr->device, request, false)) { + if (PP_PCIEGen2 == target_link_speed) + printk("PSPP request to switch to Gen2 from Gen3 Failed!"); + else + printk("PSPP request to switch to Gen1 from Gen2 Failed!"); + } + } + + return 0; +} + +static int smu7_notify_smc_display(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + if (hwmgr->feature_mask & PP_VBI_TIME_SUPPORT_MASK) + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + (PPSMC_Msg)PPSMC_MSG_SetVBITimeout, data->frame_time_x2); + return (smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_HasDisplay) == 0) ? 0 : -EINVAL; +} + +static int smu7_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input) +{ + int tmp_result, result = 0; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + tmp_result = smu7_find_dpm_states_clocks_in_dpm_table(hwmgr, input); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to find DPM states clocks in DPM table!", + result = tmp_result); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PCIEPerformanceRequest)) { + tmp_result = + smu7_request_link_speed_change_before_state_change(hwmgr, input); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to request link speed change before state change!", + result = tmp_result); + } + + tmp_result = smu7_freeze_sclk_mclk_dpm(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to freeze SCLK MCLK DPM!", result = tmp_result); + + tmp_result = smu7_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to populate and upload SCLK MCLK DPM levels!", + result = tmp_result); + + tmp_result = smu7_generate_dpm_level_enable_mask(hwmgr, input); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to generate DPM level enabled mask!", + result = tmp_result); + + tmp_result = smum_update_sclk_threshold(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to update SCLK threshold!", + result = tmp_result); + + tmp_result = smu7_notify_smc_display(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to notify smc display settings!", + result = tmp_result); + + tmp_result = smu7_unfreeze_sclk_mclk_dpm(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to unfreeze SCLK MCLK DPM!", + result = tmp_result); + + tmp_result = smu7_upload_dpm_level_enable_mask(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to upload DPM level enabled mask!", + result = tmp_result); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PCIEPerformanceRequest)) { + tmp_result = + smu7_notify_link_speed_change_after_state_change(hwmgr, input); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to notify link speed change after state change!", + result = tmp_result); + } + data->apply_optimized_settings = false; + return result; +} + +static int smu7_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm) +{ + hwmgr->thermal_controller. + advanceFanControlParameters.usMaxFanPWM = us_max_fan_pwm; + + if (phm_is_hw_access_blocked(hwmgr)) + return 0; + + return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm); +} + +int smu7_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display) +{ + PPSMC_Msg msg = has_display ? (PPSMC_Msg)PPSMC_HasDisplay : (PPSMC_Msg)PPSMC_NoDisplay; + + return (smum_send_msg_to_smc(hwmgr->smumgr, msg) == 0) ? 0 : -1; +} + +int smu7_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr) +{ + uint32_t num_active_displays = 0; + struct cgs_display_info info = {0}; + + info.mode_info = NULL; + cgs_get_active_displays_info(hwmgr->device, &info); + + num_active_displays = info.display_count; + + if (num_active_displays > 1 && hwmgr->display_config.multi_monitor_in_sync != true) + smu7_notify_smc_display_change(hwmgr, false); + + return 0; +} + +/** +* Programs the display gap +* +* @param hwmgr the address of the powerplay hardware manager. +* @return always OK +*/ +int smu7_program_display_gap(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + uint32_t num_active_displays = 0; + uint32_t display_gap = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL); + uint32_t display_gap2; + uint32_t pre_vbi_time_in_us; + uint32_t frame_time_in_us; + uint32_t ref_clock; + uint32_t refresh_rate = 0; + struct cgs_display_info info = {0}; + struct cgs_mode_info mode_info; + + info.mode_info = &mode_info; + + cgs_get_active_displays_info(hwmgr->device, &info); + num_active_displays = info.display_count; + + display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, DISP_GAP, (num_active_displays > 0) ? DISPLAY_GAP_VBLANK_OR_WM : DISPLAY_GAP_IGNORE); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL, display_gap); + + ref_clock = mode_info.ref_clock; + refresh_rate = mode_info.refresh_rate; + + if (0 == refresh_rate) + refresh_rate = 60; + + frame_time_in_us = 1000000 / refresh_rate; + + pre_vbi_time_in_us = frame_time_in_us - 200 - mode_info.vblank_time_us; + data->frame_time_x2 = frame_time_in_us * 2 / 100; + + display_gap2 = pre_vbi_time_in_us * (ref_clock / 100); + + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL2, display_gap2); + + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + data->soft_regs_start + smum_get_offsetof(hwmgr->smumgr, + SMU_SoftRegisters, + PreVBlankGap), 0x64); + + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + data->soft_regs_start + smum_get_offsetof(hwmgr->smumgr, + SMU_SoftRegisters, + VBlankTimeout), + (frame_time_in_us - pre_vbi_time_in_us)); + + return 0; +} + +int smu7_display_configuration_changed_task(struct pp_hwmgr *hwmgr) +{ + return smu7_program_display_gap(hwmgr); +} + +/** +* Set maximum target operating fan output RPM +* +* @param hwmgr: the address of the powerplay hardware manager. +* @param usMaxFanRpm: max operating fan RPM value. +* @return The response that came from the SMC. +*/ +static int smu7_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_rpm) +{ + hwmgr->thermal_controller. + advanceFanControlParameters.usMaxFanRPM = us_max_fan_rpm; + + if (phm_is_hw_access_blocked(hwmgr)) + return 0; + + return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SetFanRpmMax, us_max_fan_rpm); +} + +int smu7_register_internal_thermal_interrupt(struct pp_hwmgr *hwmgr, + const void *thermal_interrupt_info) +{ + return 0; +} + +bool smu7_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + bool is_update_required = false; + struct cgs_display_info info = {0, 0, NULL}; + + cgs_get_active_displays_info(hwmgr->device, &info); + + if (data->display_timing.num_existing_displays != info.display_count) + is_update_required = true; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) { + if (data->display_timing.min_clock_in_sr != hwmgr->display_config.min_core_set_clock_in_sr && + (data->display_timing.min_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK || + hwmgr->display_config.min_core_set_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK)) + is_update_required = true; + } + return is_update_required; +} + +static inline bool smu7_are_power_levels_equal(const struct smu7_performance_level *pl1, + const struct smu7_performance_level *pl2) +{ + return ((pl1->memory_clock == pl2->memory_clock) && + (pl1->engine_clock == pl2->engine_clock) && + (pl1->pcie_gen == pl2->pcie_gen) && + (pl1->pcie_lane == pl2->pcie_lane)); +} + +int smu7_check_states_equal(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *pstate1, const struct pp_hw_power_state *pstate2, bool *equal) +{ + const struct smu7_power_state *psa = cast_const_phw_smu7_power_state(pstate1); + const struct smu7_power_state *psb = cast_const_phw_smu7_power_state(pstate2); + int i; + + if (pstate1 == NULL || pstate2 == NULL || equal == NULL) + return -EINVAL; + + /* If the two states don't even have the same number of performance levels they cannot be the same state. */ + if (psa->performance_level_count != psb->performance_level_count) { + *equal = false; + return 0; + } + + for (i = 0; i < psa->performance_level_count; i++) { + if (!smu7_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) { + /* If we have found even one performance level pair that is different the states are different. */ + *equal = false; + return 0; + } + } + + /* If all performance levels are the same try to use the UVD clocks to break the tie.*/ + *equal = ((psa->uvd_clks.vclk == psb->uvd_clks.vclk) && (psa->uvd_clks.dclk == psb->uvd_clks.dclk)); + *equal &= ((psa->vce_clks.evclk == psb->vce_clks.evclk) && (psa->vce_clks.ecclk == psb->vce_clks.ecclk)); + *equal &= (psa->sclk_threshold == psb->sclk_threshold); + + return 0; +} + +int smu7_upload_mc_firmware(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + uint32_t vbios_version; + uint32_t tmp; + + /* Read MC indirect register offset 0x9F bits [3:0] to see + * if VBIOS has already loaded a full version of MC ucode + * or not. + */ + + smu7_get_mc_microcode_version(hwmgr); + vbios_version = hwmgr->microcode_version_info.MC & 0xf; + + data->need_long_memory_training = false; + + cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX, + ixMC_IO_DEBUG_UP_13); + tmp = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA); + + if (tmp & (1 << 23)) { + data->mem_latency_high = MEM_LATENCY_HIGH; + data->mem_latency_low = MEM_LATENCY_LOW; + } else { + data->mem_latency_high = 330; + data->mem_latency_low = 330; + } + + return 0; +} + +static int smu7_read_clock_registers(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + data->clock_registers.vCG_SPLL_FUNC_CNTL = + cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL); + data->clock_registers.vCG_SPLL_FUNC_CNTL_2 = + cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_2); + data->clock_registers.vCG_SPLL_FUNC_CNTL_3 = + cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_3); + data->clock_registers.vCG_SPLL_FUNC_CNTL_4 = + cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_4); + data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM = + cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM); + data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2 = + cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM_2); + data->clock_registers.vDLL_CNTL = + cgs_read_register(hwmgr->device, mmDLL_CNTL); + data->clock_registers.vMCLK_PWRMGT_CNTL = + cgs_read_register(hwmgr->device, mmMCLK_PWRMGT_CNTL); + data->clock_registers.vMPLL_AD_FUNC_CNTL = + cgs_read_register(hwmgr->device, mmMPLL_AD_FUNC_CNTL); + data->clock_registers.vMPLL_DQ_FUNC_CNTL = + cgs_read_register(hwmgr->device, mmMPLL_DQ_FUNC_CNTL); + data->clock_registers.vMPLL_FUNC_CNTL = + cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL); + data->clock_registers.vMPLL_FUNC_CNTL_1 = + cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_1); + data->clock_registers.vMPLL_FUNC_CNTL_2 = + cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_2); + data->clock_registers.vMPLL_SS1 = + cgs_read_register(hwmgr->device, mmMPLL_SS1); + data->clock_registers.vMPLL_SS2 = + cgs_read_register(hwmgr->device, mmMPLL_SS2); + return 0; + +} + +/** + * Find out if memory is GDDR5. + * + * @param hwmgr the address of the powerplay hardware manager. + * @return always 0 + */ +static int smu7_get_memory_type(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + uint32_t temp; + + temp = cgs_read_register(hwmgr->device, mmMC_SEQ_MISC0); + + data->is_memory_gddr5 = (MC_SEQ_MISC0_GDDR5_VALUE == + ((temp & MC_SEQ_MISC0_GDDR5_MASK) >> + MC_SEQ_MISC0_GDDR5_SHIFT)); + + return 0; +} + +/** + * Enables Dynamic Power Management by SMC + * + * @param hwmgr the address of the powerplay hardware manager. + * @return always 0 + */ +static int smu7_enable_acpi_power_management(struct pp_hwmgr *hwmgr) +{ + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + GENERAL_PWRMGT, STATIC_PM_EN, 1); + + return 0; +} + +/** + * Initialize PowerGating States for different engines + * + * @param hwmgr the address of the powerplay hardware manager. + * @return always 0 + */ +static int smu7_init_power_gate_state(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + data->uvd_power_gated = false; + data->vce_power_gated = false; + data->samu_power_gated = false; + + return 0; +} + +static int smu7_init_sclk_threshold(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + data->low_sclk_interrupt_threshold = 0; + return 0; +} + +int smu7_setup_asic_task(struct pp_hwmgr *hwmgr) +{ + int tmp_result, result = 0; + + smu7_upload_mc_firmware(hwmgr); + + tmp_result = smu7_read_clock_registers(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to read clock registers!", result = tmp_result); + + tmp_result = smu7_get_memory_type(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to get memory type!", result = tmp_result); + + tmp_result = smu7_enable_acpi_power_management(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to enable ACPI power management!", result = tmp_result); + + tmp_result = smu7_init_power_gate_state(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to init power gate state!", result = tmp_result); + + tmp_result = smu7_get_mc_microcode_version(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to get MC microcode version!", result = tmp_result); + + tmp_result = smu7_init_sclk_threshold(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to init sclk threshold!", result = tmp_result); + + return result; +} + +static int smu7_force_clock_level(struct pp_hwmgr *hwmgr, + enum pp_clock_type type, uint32_t mask) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) + return -EINVAL; + + switch (type) { + case PP_SCLK: + if (!data->sclk_dpm_key_disabled) + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SCLKDPM_SetEnabledMask, + data->dpm_level_enable_mask.sclk_dpm_enable_mask & mask); + break; + case PP_MCLK: + if (!data->mclk_dpm_key_disabled) + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_MCLKDPM_SetEnabledMask, + data->dpm_level_enable_mask.mclk_dpm_enable_mask & mask); + break; + case PP_PCIE: + { + uint32_t tmp = mask & data->dpm_level_enable_mask.pcie_dpm_enable_mask; + uint32_t level = 0; + + while (tmp >>= 1) + level++; + + if (!data->pcie_dpm_key_disabled) + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_PCIeDPM_ForceLevel, + level); + break; + } + default: + break; + } + + return 0; +} + +static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr, + enum pp_clock_type type, char *buf) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table); + struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table); + struct smu7_single_dpm_table *pcie_table = &(data->dpm_table.pcie_speed_table); + int i, now, size = 0; + uint32_t clock, pcie_speed; + + switch (type) { + case PP_SCLK: + smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency); + clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); + + for (i = 0; i < sclk_table->count; i++) { + if (clock > sclk_table->dpm_levels[i].value) + continue; + break; + } + now = i; + + for (i = 0; i < sclk_table->count; i++) + size += sprintf(buf + size, "%d: %uMhz %s\n", + i, sclk_table->dpm_levels[i].value / 100, + (i == now) ? "*" : ""); + break; + case PP_MCLK: + smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency); + clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); + + for (i = 0; i < mclk_table->count; i++) { + if (clock > mclk_table->dpm_levels[i].value) + continue; + break; + } + now = i; + + for (i = 0; i < mclk_table->count; i++) + size += sprintf(buf + size, "%d: %uMhz %s\n", + i, mclk_table->dpm_levels[i].value / 100, + (i == now) ? "*" : ""); + break; + case PP_PCIE: + pcie_speed = smu7_get_current_pcie_speed(hwmgr); + for (i = 0; i < pcie_table->count; i++) { + if (pcie_speed != pcie_table->dpm_levels[i].value) + continue; + break; + } + now = i; + + for (i = 0; i < pcie_table->count; i++) + size += sprintf(buf + size, "%d: %s %s\n", i, + (pcie_table->dpm_levels[i].value == 0) ? "2.5GB, x8" : + (pcie_table->dpm_levels[i].value == 1) ? "5.0GB, x16" : + (pcie_table->dpm_levels[i].value == 2) ? "8.0GB, x16" : "", + (i == now) ? "*" : ""); + break; + default: + break; + } + return size; +} + +static int smu7_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode) +{ + if (mode) { + /* stop auto-manage */ + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_MicrocodeFanControl)) + smu7_fan_ctrl_stop_smc_fan_control(hwmgr); + smu7_fan_ctrl_set_static_mode(hwmgr, mode); + } else + /* restart auto-manage */ + smu7_fan_ctrl_reset_fan_speed_to_default(hwmgr); + + return 0; +} + +static int smu7_get_fan_control_mode(struct pp_hwmgr *hwmgr) +{ + if (hwmgr->fan_ctrl_is_in_default_mode) + return hwmgr->fan_ctrl_default_mode; + else + return PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + CG_FDO_CTRL2, FDO_PWM_MODE); +} + +static int smu7_get_sclk_od(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table); + struct smu7_single_dpm_table *golden_sclk_table = + &(data->golden_dpm_table.sclk_table); + int value; + + value = (sclk_table->dpm_levels[sclk_table->count - 1].value - + golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) * + 100 / + golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value; + + return value; +} + +static int smu7_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct smu7_single_dpm_table *golden_sclk_table = + &(data->golden_dpm_table.sclk_table); + struct pp_power_state *ps; + struct smu7_power_state *smu7_ps; + + if (value > 20) + value = 20; + + ps = hwmgr->request_ps; + + if (ps == NULL) + return -EINVAL; + + smu7_ps = cast_phw_smu7_power_state(&ps->hardware); + + smu7_ps->performance_levels[smu7_ps->performance_level_count - 1].engine_clock = + golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value * + value / 100 + + golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value; + + return 0; +} + +static int smu7_get_mclk_od(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table); + struct smu7_single_dpm_table *golden_mclk_table = + &(data->golden_dpm_table.mclk_table); + int value; + + value = (mclk_table->dpm_levels[mclk_table->count - 1].value - + golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) * + 100 / + golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value; + + return value; +} + +static int smu7_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct smu7_single_dpm_table *golden_mclk_table = + &(data->golden_dpm_table.mclk_table); + struct pp_power_state *ps; + struct smu7_power_state *smu7_ps; + + if (value > 20) + value = 20; + + ps = hwmgr->request_ps; + + if (ps == NULL) + return -EINVAL; + + smu7_ps = cast_phw_smu7_power_state(&ps->hardware); + + smu7_ps->performance_levels[smu7_ps->performance_level_count - 1].memory_clock = + golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value * + value / 100 + + golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value; + + return 0; +} + + +static int smu7_get_sclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks) +{ + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)hwmgr->pptable; + struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table; + int i; + + if (table_info == NULL) + return -EINVAL; + + dep_sclk_table = table_info->vdd_dep_on_sclk; + + for (i = 0; i < dep_sclk_table->count; i++) { + clocks->clock[i] = dep_sclk_table->entries[i].clk; + clocks->count++; + } + return 0; +} + +static uint32_t smu7_get_mem_latency(struct pp_hwmgr *hwmgr, uint32_t clk) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + if (clk >= MEM_FREQ_LOW_LATENCY && clk < MEM_FREQ_HIGH_LATENCY) + return data->mem_latency_high; + else if (clk >= MEM_FREQ_HIGH_LATENCY) + return data->mem_latency_low; + else + return MEM_LATENCY_ERR; +} + +static int smu7_get_mclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks) +{ + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)hwmgr->pptable; + struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table; + int i; + + if (table_info == NULL) + return -EINVAL; + + dep_mclk_table = table_info->vdd_dep_on_mclk; + + for (i = 0; i < dep_mclk_table->count; i++) { + clocks->clock[i] = dep_mclk_table->entries[i].clk; + clocks->latency[i] = smu7_get_mem_latency(hwmgr, + dep_mclk_table->entries[i].clk); + clocks->count++; + } + return 0; +} + +static int smu7_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type, + struct amd_pp_clocks *clocks) +{ + switch (type) { + case amd_pp_sys_clock: + smu7_get_sclks(hwmgr, clocks); + break; + case amd_pp_mem_clock: + smu7_get_mclks(hwmgr, clocks); + break; + default: + return -EINVAL; + } + + return 0; +} + +static struct pp_hwmgr_func smu7_hwmgr_funcs = { + .backend_init = &smu7_hwmgr_backend_init, + .backend_fini = &phm_hwmgr_backend_fini, + .asic_setup = &smu7_setup_asic_task, + .dynamic_state_management_enable = &smu7_enable_dpm_tasks, + .apply_state_adjust_rules = smu7_apply_state_adjust_rules, + .force_dpm_level = &smu7_force_dpm_level, + .power_state_set = smu7_set_power_state_tasks, + .get_power_state_size = smu7_get_power_state_size, + .get_mclk = smu7_dpm_get_mclk, + .get_sclk = smu7_dpm_get_sclk, + .patch_boot_state = smu7_dpm_patch_boot_state, + .get_pp_table_entry = smu7_get_pp_table_entry, + .get_num_of_pp_table_entries = smu7_get_number_of_powerplay_table_entries, + .print_current_perforce_level = smu7_print_current_perforce_level, + .powerdown_uvd = smu7_powerdown_uvd, + .powergate_uvd = smu7_powergate_uvd, + .powergate_vce = smu7_powergate_vce, + .disable_clock_power_gating = smu7_disable_clock_power_gating, + .update_clock_gatings = smu7_update_clock_gatings, + .notify_smc_display_config_after_ps_adjustment = smu7_notify_smc_display_config_after_ps_adjustment, + .display_config_changed = smu7_display_configuration_changed_task, + .set_max_fan_pwm_output = smu7_set_max_fan_pwm_output, + .set_max_fan_rpm_output = smu7_set_max_fan_rpm_output, + .get_temperature = smu7_thermal_get_temperature, + .stop_thermal_controller = smu7_thermal_stop_thermal_controller, + .get_fan_speed_info = smu7_fan_ctrl_get_fan_speed_info, + .get_fan_speed_percent = smu7_fan_ctrl_get_fan_speed_percent, + .set_fan_speed_percent = smu7_fan_ctrl_set_fan_speed_percent, + .reset_fan_speed_to_default = smu7_fan_ctrl_reset_fan_speed_to_default, + .get_fan_speed_rpm = smu7_fan_ctrl_get_fan_speed_rpm, + .set_fan_speed_rpm = smu7_fan_ctrl_set_fan_speed_rpm, + .uninitialize_thermal_controller = smu7_thermal_ctrl_uninitialize_thermal_controller, + .register_internal_thermal_interrupt = smu7_register_internal_thermal_interrupt, + .check_smc_update_required_for_display_configuration = smu7_check_smc_update_required_for_display_configuration, + .check_states_equal = smu7_check_states_equal, + .set_fan_control_mode = smu7_set_fan_control_mode, + .get_fan_control_mode = smu7_get_fan_control_mode, + .force_clock_level = smu7_force_clock_level, + .print_clock_levels = smu7_print_clock_levels, + .enable_per_cu_power_gating = smu7_enable_per_cu_power_gating, + .get_sclk_od = smu7_get_sclk_od, + .set_sclk_od = smu7_set_sclk_od, + .get_mclk_od = smu7_get_mclk_od, + .set_mclk_od = smu7_set_mclk_od, + .get_clock_by_type = smu7_get_clock_by_type, +}; + +uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock, + uint32_t clock_insr) +{ + uint8_t i; + uint32_t temp; + uint32_t min = max(clock_insr, (uint32_t)SMU7_MINIMUM_ENGINE_CLOCK); + + PP_ASSERT_WITH_CODE((clock >= min), "Engine clock can't satisfy stutter requirement!", return 0); + for (i = SMU7_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) { + temp = clock >> i; + + if (temp >= min || i == 0) + break; + } + return i; +} + +int smu7_hwmgr_init(struct pp_hwmgr *hwmgr) +{ + int ret = 0; + + hwmgr->hwmgr_func = &smu7_hwmgr_funcs; + if (hwmgr->pp_table_version == PP_TABLE_V0) + hwmgr->pptable_func = &pptable_funcs; + else if (hwmgr->pp_table_version == PP_TABLE_V1) + hwmgr->pptable_func = &pptable_v1_0_funcs; + + pp_smu7_thermal_initialize(hwmgr); + return ret; +} + diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h new file mode 100644 index 000000000000..27e7f76ad8a6 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h @@ -0,0 +1,353 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef _SMU7_HWMGR_H +#define _SMU7_HWMGR_H + +#include "hwmgr.h" +#include "ppatomctrl.h" + +#define SMU7_MAX_HARDWARE_POWERLEVELS 2 + +#define SMU7_VOLTAGE_CONTROL_NONE 0x0 +#define SMU7_VOLTAGE_CONTROL_BY_GPIO 0x1 +#define SMU7_VOLTAGE_CONTROL_BY_SVID2 0x2 +#define SMU7_VOLTAGE_CONTROL_MERGED 0x3 + +#define DPMTABLE_OD_UPDATE_SCLK 0x00000001 +#define DPMTABLE_OD_UPDATE_MCLK 0x00000002 +#define DPMTABLE_UPDATE_SCLK 0x00000004 +#define DPMTABLE_UPDATE_MCLK 0x00000008 + +enum gpu_pt_config_reg_type { + GPU_CONFIGREG_MMR = 0, + GPU_CONFIGREG_SMC_IND, + GPU_CONFIGREG_DIDT_IND, + GPU_CONFIGREG_GC_CAC_IND, + GPU_CONFIGREG_CACHE, + GPU_CONFIGREG_MAX +}; + +struct gpu_pt_config_reg { + uint32_t offset; + uint32_t mask; + uint32_t shift; + uint32_t value; + enum gpu_pt_config_reg_type type; +}; + +struct smu7_performance_level { + uint32_t memory_clock; + uint32_t engine_clock; + uint16_t pcie_gen; + uint16_t pcie_lane; +}; + +struct smu7_thermal_temperature_setting { + long temperature_low; + long temperature_high; + long temperature_shutdown; +}; + +struct smu7_uvd_clocks { + uint32_t vclk; + uint32_t dclk; +}; + +struct smu7_vce_clocks { + uint32_t evclk; + uint32_t ecclk; +}; + +struct smu7_power_state { + uint32_t magic; + struct smu7_uvd_clocks uvd_clks; + struct smu7_vce_clocks vce_clks; + uint32_t sam_clk; + uint16_t performance_level_count; + bool dc_compatible; + uint32_t sclk_threshold; + struct smu7_performance_level performance_levels[SMU7_MAX_HARDWARE_POWERLEVELS]; +}; + +struct smu7_dpm_level { + bool enabled; + uint32_t value; + uint32_t param1; +}; + +#define SMU7_MAX_DEEPSLEEP_DIVIDER_ID 5 +#define MAX_REGULAR_DPM_NUMBER 8 +#define SMU7_MINIMUM_ENGINE_CLOCK 2500 + +struct smu7_single_dpm_table { + uint32_t count; + struct smu7_dpm_level dpm_levels[MAX_REGULAR_DPM_NUMBER]; +}; + +struct smu7_dpm_table { + struct smu7_single_dpm_table sclk_table; + struct smu7_single_dpm_table mclk_table; + struct smu7_single_dpm_table pcie_speed_table; + struct smu7_single_dpm_table vddc_table; + struct smu7_single_dpm_table vddci_table; + struct smu7_single_dpm_table mvdd_table; +}; + +struct smu7_clock_registers { + uint32_t vCG_SPLL_FUNC_CNTL; + uint32_t vCG_SPLL_FUNC_CNTL_2; + uint32_t vCG_SPLL_FUNC_CNTL_3; + uint32_t vCG_SPLL_FUNC_CNTL_4; + uint32_t vCG_SPLL_SPREAD_SPECTRUM; + uint32_t vCG_SPLL_SPREAD_SPECTRUM_2; + uint32_t vDLL_CNTL; + uint32_t vMCLK_PWRMGT_CNTL; + uint32_t vMPLL_AD_FUNC_CNTL; + uint32_t vMPLL_DQ_FUNC_CNTL; + uint32_t vMPLL_FUNC_CNTL; + uint32_t vMPLL_FUNC_CNTL_1; + uint32_t vMPLL_FUNC_CNTL_2; + uint32_t vMPLL_SS1; + uint32_t vMPLL_SS2; +}; + +#define DISABLE_MC_LOADMICROCODE 1 +#define DISABLE_MC_CFGPROGRAMMING 2 + +struct smu7_voltage_smio_registers { + uint32_t vS0_VID_LOWER_SMIO_CNTL; +}; + +#define SMU7_MAX_LEAKAGE_COUNT 8 + +struct smu7_leakage_voltage { + uint16_t count; + uint16_t leakage_id[SMU7_MAX_LEAKAGE_COUNT]; + uint16_t actual_voltage[SMU7_MAX_LEAKAGE_COUNT]; +}; + +struct smu7_vbios_boot_state { + uint16_t mvdd_bootup_value; + uint16_t vddc_bootup_value; + uint16_t vddci_bootup_value; + uint16_t vddgfx_bootup_value; + uint32_t sclk_bootup_value; + uint32_t mclk_bootup_value; + uint16_t pcie_gen_bootup_value; + uint16_t pcie_lane_bootup_value; +}; + +struct smu7_display_timing { + uint32_t min_clock_in_sr; + uint32_t num_existing_displays; +}; + +struct smu7_dpmlevel_enable_mask { + uint32_t uvd_dpm_enable_mask; + uint32_t vce_dpm_enable_mask; + uint32_t acp_dpm_enable_mask; + uint32_t samu_dpm_enable_mask; + uint32_t sclk_dpm_enable_mask; + uint32_t mclk_dpm_enable_mask; + uint32_t pcie_dpm_enable_mask; +}; + +struct smu7_pcie_perf_range { + uint16_t max; + uint16_t min; +}; + +struct smu7_hwmgr { + struct smu7_dpm_table dpm_table; + struct smu7_dpm_table golden_dpm_table; + + uint32_t voting_rights_clients0; + uint32_t voting_rights_clients1; + uint32_t voting_rights_clients2; + uint32_t voting_rights_clients3; + uint32_t voting_rights_clients4; + uint32_t voting_rights_clients5; + uint32_t voting_rights_clients6; + uint32_t voting_rights_clients7; + uint32_t static_screen_threshold_unit; + uint32_t static_screen_threshold; + uint32_t voltage_control; + uint32_t vdd_gfx_control; + uint32_t vddc_vddgfx_delta; + uint32_t active_auto_throttle_sources; + + struct smu7_clock_registers clock_registers; + + bool is_memory_gddr5; + uint16_t acpi_vddc; + bool pspp_notify_required; + uint16_t force_pcie_gen; + uint16_t acpi_pcie_gen; + uint32_t pcie_gen_cap; + uint32_t pcie_lane_cap; + uint32_t pcie_spc_cap; + struct smu7_leakage_voltage vddc_leakage; + struct smu7_leakage_voltage vddci_leakage; + struct smu7_leakage_voltage vddcgfx_leakage; + + uint32_t mvdd_control; + uint32_t vddc_mask_low; + uint32_t mvdd_mask_low; + uint16_t max_vddc_in_pptable; + uint16_t min_vddc_in_pptable; + uint16_t max_vddci_in_pptable; + uint16_t min_vddci_in_pptable; + bool is_uvd_enabled; + struct smu7_vbios_boot_state vbios_boot_state; + + bool pcie_performance_request; + bool battery_state; + bool is_tlu_enabled; + bool disable_handshake; + bool smc_voltage_control_enabled; + bool vbi_time_out_support; + + uint32_t soft_regs_start; + /* ---- Stuff originally coming from Evergreen ---- */ + uint32_t vddci_control; + struct pp_atomctrl_voltage_table vddc_voltage_table; + struct pp_atomctrl_voltage_table vddci_voltage_table; + struct pp_atomctrl_voltage_table mvdd_voltage_table; + struct pp_atomctrl_voltage_table vddgfx_voltage_table; + + uint32_t mgcg_cgtt_local2; + uint32_t mgcg_cgtt_local3; + uint32_t gpio_debug; + uint32_t mc_micro_code_feature; + uint32_t highest_mclk; + uint16_t acpi_vddci; + uint8_t mvdd_high_index; + uint8_t mvdd_low_index; + bool dll_default_on; + bool performance_request_registered; + + /* ---- Low Power Features ---- */ + bool ulv_supported; + + /* ---- CAC Stuff ---- */ + uint32_t cac_table_start; + bool cac_configuration_required; + bool driver_calculate_cac_leakage; + bool cac_enabled; + + /* ---- DPM2 Parameters ---- */ + uint32_t power_containment_features; + bool enable_dte_feature; + bool enable_tdc_limit_feature; + bool enable_pkg_pwr_tracking_feature; + bool disable_uvd_power_tune_feature; + + + uint32_t dte_tj_offset; + uint32_t fast_watermark_threshold; + + /* ---- Phase Shedding ---- */ + bool vddc_phase_shed_control; + + /* ---- DI/DT ---- */ + struct smu7_display_timing display_timing; + + /* ---- Thermal Temperature Setting ---- */ + struct smu7_thermal_temperature_setting thermal_temp_setting; + struct smu7_dpmlevel_enable_mask dpm_level_enable_mask; + uint32_t need_update_smu7_dpm_table; + uint32_t sclk_dpm_key_disabled; + uint32_t mclk_dpm_key_disabled; + uint32_t pcie_dpm_key_disabled; + uint32_t min_engine_clocks; + struct smu7_pcie_perf_range pcie_gen_performance; + struct smu7_pcie_perf_range pcie_lane_performance; + struct smu7_pcie_perf_range pcie_gen_power_saving; + struct smu7_pcie_perf_range pcie_lane_power_saving; + bool use_pcie_performance_levels; + bool use_pcie_power_saving_levels; + uint32_t mclk_activity_target; + uint32_t mclk_dpm0_activity_target; + uint32_t low_sclk_interrupt_threshold; + uint32_t last_mclk_dpm_enable_mask; + bool uvd_enabled; + + /* ---- Power Gating States ---- */ + bool uvd_power_gated; + bool vce_power_gated; + bool samu_power_gated; + bool need_long_memory_training; + + /* Application power optimization parameters */ + bool update_up_hyst; + bool update_down_hyst; + uint32_t down_hyst; + uint32_t up_hyst; + uint32_t disable_dpm_mask; + bool apply_optimized_settings; + + uint32_t avfs_vdroop_override_setting; + bool apply_avfs_cks_off_voltage; + uint32_t frame_time_x2; + uint16_t mem_latency_high; + uint16_t mem_latency_low; +}; + +/* To convert to Q8.8 format for firmware */ +#define SMU7_Q88_FORMAT_CONVERSION_UNIT 256 + +enum SMU7_I2CLineID { + SMU7_I2CLineID_DDC1 = 0x90, + SMU7_I2CLineID_DDC2 = 0x91, + SMU7_I2CLineID_DDC3 = 0x92, + SMU7_I2CLineID_DDC4 = 0x93, + SMU7_I2CLineID_DDC5 = 0x94, + SMU7_I2CLineID_DDC6 = 0x95, + SMU7_I2CLineID_SCLSDA = 0x96, + SMU7_I2CLineID_DDCVGA = 0x97 +}; + +#define SMU7_I2C_DDC1DATA 0 +#define SMU7_I2C_DDC1CLK 1 +#define SMU7_I2C_DDC2DATA 2 +#define SMU7_I2C_DDC2CLK 3 +#define SMU7_I2C_DDC3DATA 4 +#define SMU7_I2C_DDC3CLK 5 +#define SMU7_I2C_SDA 40 +#define SMU7_I2C_SCL 41 +#define SMU7_I2C_DDC4DATA 65 +#define SMU7_I2C_DDC4CLK 66 +#define SMU7_I2C_DDC5DATA 0x48 +#define SMU7_I2C_DDC5CLK 0x49 +#define SMU7_I2C_DDC6DATA 0x4a +#define SMU7_I2C_DDC6CLK 0x4b +#define SMU7_I2C_DDCVGADATA 0x4c +#define SMU7_I2C_DDCVGACLK 0x4d + +#define SMU7_UNUSED_GPIO_PIN 0x7F +uint32_t smu7_get_xclk(struct pp_hwmgr *hwmgr); +uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock, + uint32_t clock_insr); +#endif + diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c new file mode 100644 index 000000000000..260fce050175 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c @@ -0,0 +1,729 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "hwmgr.h" +#include "smumgr.h" +#include "smu7_hwmgr.h" +#include "smu7_powertune.h" +#include "pp_debug.h" +#include "smu7_common.h" + +#define VOLTAGE_SCALE 4 + +static uint32_t DIDTBlock_Info = SQ_IR_MASK | TCP_IR_MASK | TD_PCC_MASK; + +static struct gpu_pt_config_reg GCCACConfig_Polaris10[] = { +/* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + * Offset Mask Shift Value Type + * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + */ + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00060013, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00860013, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01060013, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01860013, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02060013, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02860013, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x03060013, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x03860013, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x04060013, GPU_CONFIGREG_GC_CAC_IND }, + + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x000E0013, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x008E0013, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x010E0013, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x018E0013, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x020E0013, GPU_CONFIGREG_GC_CAC_IND }, + + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00100013, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00900013, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01100013, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01900013, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02100013, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02900013, GPU_CONFIGREG_GC_CAC_IND }, + + { 0xFFFFFFFF } +}; + +static struct gpu_pt_config_reg GCCACConfig_Polaris11[] = { +/* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + * Offset Mask Shift Value Type + * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + */ + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00060011, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00860011, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01060011, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01860011, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02060011, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02860011, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x03060011, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x03860011, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x04060011, GPU_CONFIGREG_GC_CAC_IND }, + + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x000E0011, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x008E0011, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x010E0011, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x018E0011, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x020E0011, GPU_CONFIGREG_GC_CAC_IND }, + + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00100011, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00900011, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01100011, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01900011, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02100011, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02900011, GPU_CONFIGREG_GC_CAC_IND }, + + { 0xFFFFFFFF } +}; + +static struct gpu_pt_config_reg DIDTConfig_Polaris10[] = { +/* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + * Offset Mask Shift Value Type + * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + */ + { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT0_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT0__SHIFT, 0x0073, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT1_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT1__SHIFT, 0x00ab, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT2_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT2__SHIFT, 0x0084, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT3_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT3__SHIFT, 0x005a, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT4_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT4__SHIFT, 0x0067, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT5_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT5__SHIFT, 0x0084, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT6_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT6__SHIFT, 0x0027, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT7_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT7__SHIFT, 0x0046, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT8_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT8__SHIFT, 0x00aa, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT9_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT9__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT10_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT10__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT11_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT11__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MIN_POWER_MASK, DIDT_SQ_CTRL1__MIN_POWER__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MAX_POWER_MASK, DIDT_SQ_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__UNUSED_0_MASK, DIDT_SQ_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__MAX_POWER_DELTA_MASK, DIDT_SQ_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3853, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_0_MASK, DIDT_SQ_CTRL2__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x005a, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_1_MASK, DIDT_SQ_CTRL2__UNUSED_1__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_2_MASK, DIDT_SQ_CTRL2__UNUSED_2__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x0ebb, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__UNUSED_0_MASK, DIDT_SQ_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3853, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3153, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK, DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__USE_REF_CLOCK_MASK, DIDT_SQ_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__PHASE_OFFSET_MASK, DIDT_SQ_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_RST_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__UNUSED_0_MASK, DIDT_SQ_CTRL0__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT0_MASK, DIDT_TD_WEIGHT0_3__WEIGHT0__SHIFT, 0x000a, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT1_MASK, DIDT_TD_WEIGHT0_3__WEIGHT1__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT2_MASK, DIDT_TD_WEIGHT0_3__WEIGHT2__SHIFT, 0x0017, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT3_MASK, DIDT_TD_WEIGHT0_3__WEIGHT3__SHIFT, 0x002f, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT4_MASK, DIDT_TD_WEIGHT4_7__WEIGHT4__SHIFT, 0x0046, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT5_MASK, DIDT_TD_WEIGHT4_7__WEIGHT5__SHIFT, 0x005d, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT6_MASK, DIDT_TD_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT7_MASK, DIDT_TD_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MIN_POWER_MASK, DIDT_TD_CTRL1__MIN_POWER__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MAX_POWER_MASK, DIDT_TD_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__UNUSED_0_MASK, DIDT_TD_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TD_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0x00ff, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TD_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3fff, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_0_MASK, DIDT_TD_CTRL2__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x000f, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_1_MASK, DIDT_TD_CTRL2__UNUSED_1__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_2_MASK, DIDT_TD_CTRL2__UNUSED_2__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__UNUSED_0_MASK, DIDT_TD_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x0dde, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x0dde, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__UNUSED_0_MASK, DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__USE_REF_CLOCK_MASK, DIDT_TD_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__PHASE_OFFSET_MASK, DIDT_TD_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TD_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0009, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0009, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__UNUSED_0_MASK, DIDT_TD_CTRL0__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT0_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT0__SHIFT, 0x0004, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT1_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT1__SHIFT, 0x0037, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT2_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT2__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT3_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT3__SHIFT, 0x00ff, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT4_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT4__SHIFT, 0x0054, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT5_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT5__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT6_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT7_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MIN_POWER_MASK, DIDT_TCP_CTRL1__MIN_POWER__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MAX_POWER_MASK, DIDT_TCP_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__UNUSED_0_MASK, DIDT_TCP_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TCP_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3dde, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_0_MASK, DIDT_TCP_CTRL2__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x0032, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_1_MASK, DIDT_TCP_CTRL2__UNUSED_1__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_2_MASK, DIDT_TCP_CTRL2__UNUSED_2__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__UNUSED_0_MASK, DIDT_TCP_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3dde, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3dde, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK, DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__USE_REF_CLOCK_MASK, DIDT_TCP_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__PHASE_OFFSET_MASK, DIDT_TCP_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__UNUSED_0_MASK, DIDT_TCP_CTRL0__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { 0xFFFFFFFF } +}; + +static struct gpu_pt_config_reg DIDTConfig_Polaris11[] = { +/* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + * Offset Mask Shift Value Type + * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + */ + { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT0_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT0__SHIFT, 0x0073, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT1_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT1__SHIFT, 0x00ab, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT2_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT2__SHIFT, 0x0084, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT3_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT3__SHIFT, 0x005a, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT4_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT4__SHIFT, 0x0067, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT5_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT5__SHIFT, 0x0084, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT6_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT6__SHIFT, 0x0027, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT7_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT7__SHIFT, 0x0046, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT8_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT8__SHIFT, 0x00aa, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT9_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT9__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT10_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT10__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT11_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT11__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MIN_POWER_MASK, DIDT_SQ_CTRL1__MIN_POWER__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MAX_POWER_MASK, DIDT_SQ_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__UNUSED_0_MASK, DIDT_SQ_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__MAX_POWER_DELTA_MASK, DIDT_SQ_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3853, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_0_MASK, DIDT_SQ_CTRL2__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x005a, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_1_MASK, DIDT_SQ_CTRL2__UNUSED_1__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_2_MASK, DIDT_SQ_CTRL2__UNUSED_2__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x0ebb, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__UNUSED_0_MASK, DIDT_SQ_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3853, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3153, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK, DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__USE_REF_CLOCK_MASK, DIDT_SQ_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__PHASE_OFFSET_MASK, DIDT_SQ_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_RST_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__UNUSED_0_MASK, DIDT_SQ_CTRL0__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT0_MASK, DIDT_TD_WEIGHT0_3__WEIGHT0__SHIFT, 0x000a, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT1_MASK, DIDT_TD_WEIGHT0_3__WEIGHT1__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT2_MASK, DIDT_TD_WEIGHT0_3__WEIGHT2__SHIFT, 0x0017, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT3_MASK, DIDT_TD_WEIGHT0_3__WEIGHT3__SHIFT, 0x002f, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT4_MASK, DIDT_TD_WEIGHT4_7__WEIGHT4__SHIFT, 0x0046, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT5_MASK, DIDT_TD_WEIGHT4_7__WEIGHT5__SHIFT, 0x005d, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT6_MASK, DIDT_TD_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT7_MASK, DIDT_TD_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MIN_POWER_MASK, DIDT_TD_CTRL1__MIN_POWER__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MAX_POWER_MASK, DIDT_TD_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__UNUSED_0_MASK, DIDT_TD_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TD_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0x00ff, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TD_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3fff, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_0_MASK, DIDT_TD_CTRL2__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x000f, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_1_MASK, DIDT_TD_CTRL2__UNUSED_1__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_2_MASK, DIDT_TD_CTRL2__UNUSED_2__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__UNUSED_0_MASK, DIDT_TD_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x0dde, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x0dde, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__UNUSED_0_MASK, DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__USE_REF_CLOCK_MASK, DIDT_TD_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__PHASE_OFFSET_MASK, DIDT_TD_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TD_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0008, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0008, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__UNUSED_0_MASK, DIDT_TD_CTRL0__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT0_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT0__SHIFT, 0x0004, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT1_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT1__SHIFT, 0x0037, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT2_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT2__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT3_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT3__SHIFT, 0x00ff, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT4_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT4__SHIFT, 0x0054, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT5_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT5__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT6_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT7_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MIN_POWER_MASK, DIDT_TCP_CTRL1__MIN_POWER__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MAX_POWER_MASK, DIDT_TCP_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__UNUSED_0_MASK, DIDT_TCP_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TCP_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3dde, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_0_MASK, DIDT_TCP_CTRL2__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x0032, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_1_MASK, DIDT_TCP_CTRL2__UNUSED_1__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_2_MASK, DIDT_TCP_CTRL2__UNUSED_2__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__UNUSED_0_MASK, DIDT_TCP_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3dde, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3dde, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK, DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__USE_REF_CLOCK_MASK, DIDT_TCP_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__PHASE_OFFSET_MASK, DIDT_TCP_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__UNUSED_0_MASK, DIDT_TCP_CTRL0__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { 0xFFFFFFFF } +}; + + +static int smu7_enable_didt(struct pp_hwmgr *hwmgr, const bool enable) +{ + + uint32_t en = enable ? 1 : 0; + int32_t result = 0; + uint32_t data; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping)) { + data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_CTRL0); + data &= ~DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK; + data |= ((en << DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_CTRL0, data); + DIDTBlock_Info &= ~SQ_Enable_MASK; + DIDTBlock_Info |= en << SQ_Enable_SHIFT; + } + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping)) { + data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_CTRL0); + data &= ~DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK; + data |= ((en << DIDT_DB_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_CTRL0, data); + DIDTBlock_Info &= ~DB_Enable_MASK; + DIDTBlock_Info |= en << DB_Enable_SHIFT; + } + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping)) { + data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_CTRL0); + data &= ~DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK; + data |= ((en << DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_CTRL0, data); + DIDTBlock_Info &= ~TD_Enable_MASK; + DIDTBlock_Info |= en << TD_Enable_SHIFT; + } + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping)) { + data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_CTRL0); + data &= ~DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK; + data |= ((en << DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_CTRL0, data); + DIDTBlock_Info &= ~TCP_Enable_MASK; + DIDTBlock_Info |= en << TCP_Enable_SHIFT; + } + + if (enable) + result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_Didt_Block_Function, DIDTBlock_Info); + + return result; +} + +static int smu7_program_pt_config_registers(struct pp_hwmgr *hwmgr, + struct gpu_pt_config_reg *cac_config_regs) +{ + struct gpu_pt_config_reg *config_regs = cac_config_regs; + uint32_t cache = 0; + uint32_t data = 0; + + PP_ASSERT_WITH_CODE((config_regs != NULL), "Invalid config register table.", return -EINVAL); + + while (config_regs->offset != 0xFFFFFFFF) { + if (config_regs->type == GPU_CONFIGREG_CACHE) + cache |= ((config_regs->value << config_regs->shift) & config_regs->mask); + else { + switch (config_regs->type) { + case GPU_CONFIGREG_SMC_IND: + data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, config_regs->offset); + break; + + case GPU_CONFIGREG_DIDT_IND: + data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, config_regs->offset); + break; + + case GPU_CONFIGREG_GC_CAC_IND: + data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG_GC_CAC, config_regs->offset); + break; + + default: + data = cgs_read_register(hwmgr->device, config_regs->offset); + break; + } + + data &= ~config_regs->mask; + data |= ((config_regs->value << config_regs->shift) & config_regs->mask); + data |= cache; + + switch (config_regs->type) { + case GPU_CONFIGREG_SMC_IND: + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, config_regs->offset, data); + break; + + case GPU_CONFIGREG_DIDT_IND: + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, config_regs->offset, data); + break; + + case GPU_CONFIGREG_GC_CAC_IND: + cgs_write_ind_register(hwmgr->device, CGS_IND_REG_GC_CAC, config_regs->offset, data); + break; + + default: + cgs_write_register(hwmgr->device, config_regs->offset, data); + break; + } + cache = 0; + } + + config_regs++; + } + + return 0; +} + +int smu7_enable_didt_config(struct pp_hwmgr *hwmgr) +{ + int result; + uint32_t num_se = 0; + uint32_t count, value, value2; + struct cgs_system_info sys_info = {0}; + + sys_info.size = sizeof(struct cgs_system_info); + sys_info.info_id = CGS_SYSTEM_INFO_GFX_SE_INFO; + result = cgs_query_system_info(hwmgr->device, &sys_info); + + + if (result == 0) + num_se = sys_info.value; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping) || + phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping) || + phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping) || + phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping)) { + + /* TO DO Pre DIDT disable clock gating */ + value = 0; + value2 = cgs_read_register(hwmgr->device, mmGRBM_GFX_INDEX); + for (count = 0; count < num_se; count++) { + value = SYS_GRBM_GFX_INDEX_DATA__INSTANCE_BROADCAST_WRITES_MASK + | SYS_GRBM_GFX_INDEX_DATA__SH_BROADCAST_WRITES_MASK + | (count << SYS_GRBM_GFX_INDEX_DATA__SE_INDEX__SHIFT); + cgs_write_register(hwmgr->device, mmGRBM_GFX_INDEX, value); + + if (hwmgr->chip_id == CHIP_POLARIS10) { + result = smu7_program_pt_config_registers(hwmgr, GCCACConfig_Polaris10); + PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result); + result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_Polaris10); + PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result); + } else if (hwmgr->chip_id == CHIP_POLARIS11) { + result = smu7_program_pt_config_registers(hwmgr, GCCACConfig_Polaris11); + PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result); + result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_Polaris11); + PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result); + } + } + cgs_write_register(hwmgr->device, mmGRBM_GFX_INDEX, value2); + + result = smu7_enable_didt(hwmgr, true); + PP_ASSERT_WITH_CODE((result == 0), "EnableDiDt failed.", return result); + + /* TO DO Post DIDT enable clock gating */ + } + + return 0; +} + +int smu7_disable_didt_config(struct pp_hwmgr *hwmgr) +{ + int result; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping) || + phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping) || + phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping) || + phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping)) { + /* TO DO Pre DIDT disable clock gating */ + + result = smu7_enable_didt(hwmgr, false); + PP_ASSERT_WITH_CODE((result == 0), "Post DIDT enable clock gating failed.", return result); + /* TO DO Post DIDT enable clock gating */ + } + + return 0; +} + +int smu7_enable_smc_cac(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + int result = 0; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_CAC)) { + int smc_result; + smc_result = smum_send_msg_to_smc(hwmgr->smumgr, + (uint16_t)(PPSMC_MSG_EnableCac)); + PP_ASSERT_WITH_CODE((0 == smc_result), + "Failed to enable CAC in SMC.", result = -1); + + data->cac_enabled = (0 == smc_result) ? true : false; + } + return result; +} + +int smu7_disable_smc_cac(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + int result = 0; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_CAC) && data->cac_enabled) { + int smc_result = smum_send_msg_to_smc(hwmgr->smumgr, + (uint16_t)(PPSMC_MSG_DisableCac)); + PP_ASSERT_WITH_CODE((smc_result == 0), + "Failed to disable CAC in SMC.", result = -1); + + data->cac_enabled = false; + } + return result; +} + +int smu7_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + if (data->power_containment_features & + POWERCONTAINMENT_FEATURE_PkgPwrLimit) + return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_PkgPwrSetLimit, n); + return 0; +} + +static int smu7_set_overdriver_target_tdp(struct pp_hwmgr *pHwMgr, uint32_t target_tdp) +{ + return smum_send_msg_to_smc_with_parameter(pHwMgr->smumgr, + PPSMC_MSG_OverDriveSetTargetTdp, target_tdp); +} + +int smu7_enable_power_containment(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + int smc_result; + int result = 0; + struct phm_cac_tdp_table *cac_table; + + data->power_containment_features = 0; + if (hwmgr->pp_table_version == PP_TABLE_V1) + cac_table = table_info->cac_dtp_table; + else + cac_table = hwmgr->dyn_state.cac_dtp_table; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment)) { + + if (data->enable_tdc_limit_feature) { + smc_result = smum_send_msg_to_smc(hwmgr->smumgr, + (uint16_t)(PPSMC_MSG_TDCLimitEnable)); + PP_ASSERT_WITH_CODE((0 == smc_result), + "Failed to enable TDCLimit in SMC.", result = -1;); + if (0 == smc_result) + data->power_containment_features |= + POWERCONTAINMENT_FEATURE_TDCLimit; + } + + if (data->enable_pkg_pwr_tracking_feature) { + smc_result = smum_send_msg_to_smc(hwmgr->smumgr, + (uint16_t)(PPSMC_MSG_PkgPwrLimitEnable)); + PP_ASSERT_WITH_CODE((0 == smc_result), + "Failed to enable PkgPwrTracking in SMC.", result = -1;); + if (0 == smc_result) { + uint32_t default_limit = + (uint32_t)(cac_table->usMaximumPowerDeliveryLimit * 256); + + data->power_containment_features |= + POWERCONTAINMENT_FEATURE_PkgPwrLimit; + + if (smu7_set_power_limit(hwmgr, default_limit)) + printk(KERN_ERR "Failed to set Default Power Limit in SMC!"); + } + } + } + return result; +} + +int smu7_disable_power_containment(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + int result = 0; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment) && + data->power_containment_features) { + int smc_result; + + if (data->power_containment_features & + POWERCONTAINMENT_FEATURE_TDCLimit) { + smc_result = smum_send_msg_to_smc(hwmgr->smumgr, + (uint16_t)(PPSMC_MSG_TDCLimitDisable)); + PP_ASSERT_WITH_CODE((smc_result == 0), + "Failed to disable TDCLimit in SMC.", + result = smc_result); + } + + if (data->power_containment_features & + POWERCONTAINMENT_FEATURE_DTE) { + smc_result = smum_send_msg_to_smc(hwmgr->smumgr, + (uint16_t)(PPSMC_MSG_DisableDTE)); + PP_ASSERT_WITH_CODE((smc_result == 0), + "Failed to disable DTE in SMC.", + result = smc_result); + } + + if (data->power_containment_features & + POWERCONTAINMENT_FEATURE_PkgPwrLimit) { + smc_result = smum_send_msg_to_smc(hwmgr->smumgr, + (uint16_t)(PPSMC_MSG_PkgPwrLimitDisable)); + PP_ASSERT_WITH_CODE((smc_result == 0), + "Failed to disable PkgPwrTracking in SMC.", + result = smc_result); + } + data->power_containment_features = 0; + } + + return result; +} + +int smu7_power_control_set_level(struct pp_hwmgr *hwmgr) +{ + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct phm_cac_tdp_table *cac_table; + + int adjust_percent, target_tdp; + int result = 0; + + if (hwmgr->pp_table_version == PP_TABLE_V1) + cac_table = table_info->cac_dtp_table; + else + cac_table = hwmgr->dyn_state.cac_dtp_table; + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment)) { + /* adjustment percentage has already been validated */ + adjust_percent = hwmgr->platform_descriptor.TDPAdjustmentPolarity ? + hwmgr->platform_descriptor.TDPAdjustment : + (-1 * hwmgr->platform_descriptor.TDPAdjustment); + /* SMC requested that target_tdp to be 7 bit fraction in DPM table + * but message to be 8 bit fraction for messages + */ + target_tdp = ((100 + adjust_percent) * (int)(cac_table->usTDP * 256)) / 100; + result = smu7_set_overdriver_target_tdp(hwmgr, (uint32_t)target_tdp); + } + + return result; +} diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.h new file mode 100644 index 000000000000..22f86b6bf1be --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.h @@ -0,0 +1,62 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef _SMU7_POWERTUNE_H +#define _SMU7_POWERTUNE_H + +#define DIDT_SQ_CTRL0__UNUSED_0_MASK 0xfffc0000 +#define DIDT_SQ_CTRL0__UNUSED_0__SHIFT 0x12 +#define DIDT_TD_CTRL0__UNUSED_0_MASK 0xfffc0000 +#define DIDT_TD_CTRL0__UNUSED_0__SHIFT 0x12 +#define DIDT_TCP_CTRL0__UNUSED_0_MASK 0xfffc0000 +#define DIDT_TCP_CTRL0__UNUSED_0__SHIFT 0x12 +#define DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK 0xc0000000 +#define DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT 0x0000001e +#define DIDT_TD_TUNING_CTRL__UNUSED_0_MASK 0xc0000000 +#define DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT 0x0000001e +#define DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK 0xc0000000 +#define DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT 0x0000001e + +/* PowerContainment Features */ +#define POWERCONTAINMENT_FEATURE_DTE 0x00000001 +#define POWERCONTAINMENT_FEATURE_TDCLimit 0x00000002 +#define POWERCONTAINMENT_FEATURE_PkgPwrLimit 0x00000004 + +#define ixGC_CAC_CNTL 0x0000 +#define ixDIDT_SQ_STALL_CTRL 0x0004 +#define ixDIDT_SQ_TUNING_CTRL 0x0005 +#define ixDIDT_TD_STALL_CTRL 0x0044 +#define ixDIDT_TD_TUNING_CTRL 0x0045 +#define ixDIDT_TCP_STALL_CTRL 0x0064 +#define ixDIDT_TCP_TUNING_CTRL 0x0065 + + +int smu7_enable_smc_cac(struct pp_hwmgr *hwmgr); +int smu7_disable_smc_cac(struct pp_hwmgr *hwmgr); +int smu7_enable_power_containment(struct pp_hwmgr *hwmgr); +int smu7_disable_power_containment(struct pp_hwmgr *hwmgr); +int smu7_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n); +int smu7_power_control_set_level(struct pp_hwmgr *hwmgr); +int smu7_enable_didt_config(struct pp_hwmgr *hwmgr); +int smu7_disable_didt_config(struct pp_hwmgr *hwmgr); +#endif /* DGPU_POWERTUNE_H */ + diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c new file mode 100644 index 000000000000..fb6c6f6106d5 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c @@ -0,0 +1,577 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include +#include "smu7_thermal.h" +#include "smu7_hwmgr.h" +#include "smu7_common.h" + +int smu7_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, + struct phm_fan_speed_info *fan_speed_info) +{ + if (hwmgr->thermal_controller.fanInfo.bNoFan) + return 0; + + fan_speed_info->supports_percent_read = true; + fan_speed_info->supports_percent_write = true; + fan_speed_info->min_percent = 0; + fan_speed_info->max_percent = 100; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_FanSpeedInTableIsRPM) && + hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution) { + fan_speed_info->supports_rpm_read = true; + fan_speed_info->supports_rpm_write = true; + fan_speed_info->min_rpm = hwmgr->thermal_controller.fanInfo.ulMinRPM; + fan_speed_info->max_rpm = hwmgr->thermal_controller.fanInfo.ulMaxRPM; + } else { + fan_speed_info->min_rpm = 0; + fan_speed_info->max_rpm = 0; + } + + return 0; +} + +int smu7_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, + uint32_t *speed) +{ + uint32_t duty100; + uint32_t duty; + uint64_t tmp64; + + if (hwmgr->thermal_controller.fanInfo.bNoFan) + return 0; + + duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + CG_FDO_CTRL1, FMAX_DUTY100); + duty = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + CG_THERMAL_STATUS, FDO_PWM_DUTY); + + if (duty100 == 0) + return -EINVAL; + + + tmp64 = (uint64_t)duty * 100; + do_div(tmp64, duty100); + *speed = (uint32_t)tmp64; + + if (*speed > 100) + *speed = 100; + + return 0; +} + +int smu7_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed) +{ + uint32_t tach_period; + uint32_t crystal_clock_freq; + + if (hwmgr->thermal_controller.fanInfo.bNoFan || + (hwmgr->thermal_controller.fanInfo. + ucTachometerPulsesPerRevolution == 0)) + return 0; + + tach_period = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + CG_TACH_STATUS, TACH_PERIOD); + + if (tach_period == 0) + return -EINVAL; + + crystal_clock_freq = smu7_get_xclk(hwmgr); + + *speed = 60 * crystal_clock_freq * 10000 / tach_period; + + return 0; +} + +/** +* Set Fan Speed Control to static mode, so that the user can decide what speed to use. +* @param hwmgr the address of the powerplay hardware manager. +* mode the fan control mode, 0 default, 1 by percent, 5, by RPM +* @exception Should always succeed. +*/ +int smu7_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode) +{ + + if (hwmgr->fan_ctrl_is_in_default_mode) { + hwmgr->fan_ctrl_default_mode = + PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + CG_FDO_CTRL2, FDO_PWM_MODE); + hwmgr->tmin = + PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + CG_FDO_CTRL2, TMIN); + hwmgr->fan_ctrl_is_in_default_mode = false; + } + + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + CG_FDO_CTRL2, TMIN, 0); + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + CG_FDO_CTRL2, FDO_PWM_MODE, mode); + + return 0; +} + +/** +* Reset Fan Speed Control to default mode. +* @param hwmgr the address of the powerplay hardware manager. +* @exception Should always succeed. +*/ +int smu7_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr) +{ + if (!hwmgr->fan_ctrl_is_in_default_mode) { + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + CG_FDO_CTRL2, FDO_PWM_MODE, hwmgr->fan_ctrl_default_mode); + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + CG_FDO_CTRL2, TMIN, hwmgr->tmin); + hwmgr->fan_ctrl_is_in_default_mode = true; + } + + return 0; +} + +static int smu7_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr) +{ + int result; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ODFuzzyFanControlSupport)) { + cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, FAN_CONTROL_FUZZY); + result = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StartFanControl); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_FanSpeedInTableIsRPM)) + hwmgr->hwmgr_func->set_max_fan_rpm_output(hwmgr, + hwmgr->thermal_controller. + advanceFanControlParameters.usMaxFanRPM); + else + hwmgr->hwmgr_func->set_max_fan_pwm_output(hwmgr, + hwmgr->thermal_controller. + advanceFanControlParameters.usMaxFanPWM); + + } else { + cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, FAN_CONTROL_TABLE); + result = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StartFanControl); + } + + if (!result && hwmgr->thermal_controller. + advanceFanControlParameters.ucTargetTemperature) + result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SetFanTemperatureTarget, + hwmgr->thermal_controller. + advanceFanControlParameters.ucTargetTemperature); + + return result; +} + + +int smu7_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr) +{ + return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StopFanControl); +} + +/** +* Set Fan Speed in percent. +* @param hwmgr the address of the powerplay hardware manager. +* @param speed is the percentage value (0% - 100%) to be set. +* @exception Fails is the 100% setting appears to be 0. +*/ +int smu7_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, + uint32_t speed) +{ + uint32_t duty100; + uint32_t duty; + uint64_t tmp64; + + if (hwmgr->thermal_controller.fanInfo.bNoFan) + return 0; + + if (speed > 100) + speed = 100; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_MicrocodeFanControl)) + smu7_fan_ctrl_stop_smc_fan_control(hwmgr); + + duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + CG_FDO_CTRL1, FMAX_DUTY100); + + if (duty100 == 0) + return -EINVAL; + + tmp64 = (uint64_t)speed * duty100; + do_div(tmp64, 100); + duty = (uint32_t)tmp64; + + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + CG_FDO_CTRL0, FDO_STATIC_DUTY, duty); + + return smu7_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC); +} + +/** +* Reset Fan Speed to default. +* @param hwmgr the address of the powerplay hardware manager. +* @exception Always succeeds. +*/ +int smu7_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr) +{ + int result; + + if (hwmgr->thermal_controller.fanInfo.bNoFan) + return 0; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_MicrocodeFanControl)) { + result = smu7_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC); + if (!result) + result = smu7_fan_ctrl_start_smc_fan_control(hwmgr); + } else + result = smu7_fan_ctrl_set_default_mode(hwmgr); + + return result; +} + +/** +* Set Fan Speed in RPM. +* @param hwmgr the address of the powerplay hardware manager. +* @param speed is the percentage value (min - max) to be set. +* @exception Fails is the speed not lie between min and max. +*/ +int smu7_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed) +{ + uint32_t tach_period; + uint32_t crystal_clock_freq; + + if (hwmgr->thermal_controller.fanInfo.bNoFan || + (hwmgr->thermal_controller.fanInfo. + ucTachometerPulsesPerRevolution == 0) || + (speed < hwmgr->thermal_controller.fanInfo.ulMinRPM) || + (speed > hwmgr->thermal_controller.fanInfo.ulMaxRPM)) + return 0; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_MicrocodeFanControl)) + smu7_fan_ctrl_stop_smc_fan_control(hwmgr); + + crystal_clock_freq = smu7_get_xclk(hwmgr); + + tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed); + + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + CG_TACH_STATUS, TACH_PERIOD, tach_period); + + return smu7_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC); +} + +/** +* Reads the remote temperature from the SIslands thermal controller. +* +* @param hwmgr The address of the hardware manager. +*/ +int smu7_thermal_get_temperature(struct pp_hwmgr *hwmgr) +{ + int temp; + + temp = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + CG_MULT_THERMAL_STATUS, CTF_TEMP); + + /* Bit 9 means the reading is lower than the lowest usable value. */ + if (temp & 0x200) + temp = SMU7_THERMAL_MAXIMUM_TEMP_READING; + else + temp = temp & 0x1ff; + + temp *= PP_TEMPERATURE_UNITS_PER_CENTIGRADES; + + return temp; +} + +/** +* Set the requested temperature range for high and low alert signals +* +* @param hwmgr The address of the hardware manager. +* @param range Temperature range to be programmed for high and low alert signals +* @exception PP_Result_BadInput if the input data is not valid. +*/ +static int smu7_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, + uint32_t low_temp, uint32_t high_temp) +{ + uint32_t low = SMU7_THERMAL_MINIMUM_ALERT_TEMP * + PP_TEMPERATURE_UNITS_PER_CENTIGRADES; + uint32_t high = SMU7_THERMAL_MAXIMUM_ALERT_TEMP * + PP_TEMPERATURE_UNITS_PER_CENTIGRADES; + + if (low < low_temp) + low = low_temp; + if (high > high_temp) + high = high_temp; + + if (low > high) + return -EINVAL; + + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + CG_THERMAL_INT, DIG_THERM_INTH, + (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + CG_THERMAL_INT, DIG_THERM_INTL, + (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + CG_THERMAL_CTRL, DIG_THERM_DPM, + (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); + + return 0; +} + +/** +* Programs thermal controller one-time setting registers +* +* @param hwmgr The address of the hardware manager. +*/ +static int smu7_thermal_initialize(struct pp_hwmgr *hwmgr) +{ + if (hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution) + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + CG_TACH_CTRL, EDGE_PER_REV, + hwmgr->thermal_controller.fanInfo. + ucTachometerPulsesPerRevolution - 1); + + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + CG_FDO_CTRL2, TACH_PWM_RESP_RATE, 0x28); + + return 0; +} + +/** +* Enable thermal alerts on the RV770 thermal controller. +* +* @param hwmgr The address of the hardware manager. +*/ +int smu7_thermal_enable_alert(struct pp_hwmgr *hwmgr) +{ + uint32_t alert; + + alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + CG_THERMAL_INT, THERM_INT_MASK); + alert &= ~(SMU7_THERMAL_HIGH_ALERT_MASK | SMU7_THERMAL_LOW_ALERT_MASK); + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + CG_THERMAL_INT, THERM_INT_MASK, alert); + + /* send message to SMU to enable internal thermal interrupts */ + return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Thermal_Cntl_Enable); +} + +/** +* Disable thermal alerts on the RV770 thermal controller. +* @param hwmgr The address of the hardware manager. +*/ +int smu7_thermal_disable_alert(struct pp_hwmgr *hwmgr) +{ + uint32_t alert; + + alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + CG_THERMAL_INT, THERM_INT_MASK); + alert |= (SMU7_THERMAL_HIGH_ALERT_MASK | SMU7_THERMAL_LOW_ALERT_MASK); + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + CG_THERMAL_INT, THERM_INT_MASK, alert); + + /* send message to SMU to disable internal thermal interrupts */ + return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Thermal_Cntl_Disable); +} + +/** +* Uninitialize the thermal controller. +* Currently just disables alerts. +* @param hwmgr The address of the hardware manager. +*/ +int smu7_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr) +{ + int result = smu7_thermal_disable_alert(hwmgr); + + if (!hwmgr->thermal_controller.fanInfo.bNoFan) + smu7_fan_ctrl_set_default_mode(hwmgr); + + return result; +} + +/** +* Start the fan control on the SMC. +* @param hwmgr the address of the powerplay hardware manager. +* @param pInput the pointer to input data +* @param pOutput the pointer to output data +* @param pStorage the pointer to temporary storage +* @param Result the last failure code +* @return result from set temperature range routine +*/ +static int tf_smu7_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr, + void *input, void *output, void *storage, int result) +{ +/* If the fantable setup has failed we could have disabled + * PHM_PlatformCaps_MicrocodeFanControl even after + * this function was included in the table. + * Make sure that we still think controlling the fan is OK. +*/ + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_MicrocodeFanControl)) { + smu7_fan_ctrl_start_smc_fan_control(hwmgr); + smu7_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC); + } + + return 0; +} + +/** +* Set temperature range for high and low alerts +* @param hwmgr the address of the powerplay hardware manager. +* @param pInput the pointer to input data +* @param pOutput the pointer to output data +* @param pStorage the pointer to temporary storage +* @param Result the last failure code +* @return result from set temperature range routine +*/ +static int tf_smu7_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, + void *input, void *output, void *storage, int result) +{ + struct PP_TemperatureRange *range = (struct PP_TemperatureRange *)input; + + if (range == NULL) + return -EINVAL; + + return smu7_thermal_set_temperature_range(hwmgr, range->min, range->max); +} + +/** +* Programs one-time setting registers +* @param hwmgr the address of the powerplay hardware manager. +* @param pInput the pointer to input data +* @param pOutput the pointer to output data +* @param pStorage the pointer to temporary storage +* @param Result the last failure code +* @return result from initialize thermal controller routine +*/ +static int tf_smu7_thermal_initialize(struct pp_hwmgr *hwmgr, + void *input, void *output, void *storage, int result) +{ + return smu7_thermal_initialize(hwmgr); +} + +/** +* Enable high and low alerts +* @param hwmgr the address of the powerplay hardware manager. +* @param pInput the pointer to input data +* @param pOutput the pointer to output data +* @param pStorage the pointer to temporary storage +* @param Result the last failure code +* @return result from enable alert routine +*/ +static int tf_smu7_thermal_enable_alert(struct pp_hwmgr *hwmgr, + void *input, void *output, void *storage, int result) +{ + return smu7_thermal_enable_alert(hwmgr); +} + +/** +* Disable high and low alerts +* @param hwmgr the address of the powerplay hardware manager. +* @param pInput the pointer to input data +* @param pOutput the pointer to output data +* @param pStorage the pointer to temporary storage +* @param Result the last failure code +* @return result from disable alert routine +*/ +static int tf_smu7_thermal_disable_alert(struct pp_hwmgr *hwmgr, + void *input, void *output, void *storage, int result) +{ + return smu7_thermal_disable_alert(hwmgr); +} + +static const struct phm_master_table_item +phm_thermal_start_thermal_controller_master_list[] = { + {NULL, tf_smu7_thermal_initialize}, + {NULL, tf_smu7_thermal_set_temperature_range}, + {NULL, tf_smu7_thermal_enable_alert}, + {NULL, smum_thermal_avfs_enable}, +/* We should restrict performance levels to low before we halt the SMC. + * On the other hand we are still in boot state when we do this + * so it would be pointless. + * If this assumption changes we have to revisit this table. + */ + {NULL, smum_thermal_setup_fan_table}, + {NULL, tf_smu7_thermal_start_smc_fan_control}, + {NULL, NULL} +}; + +static const struct phm_master_table_header +phm_thermal_start_thermal_controller_master = { + 0, + PHM_MasterTableFlag_None, + phm_thermal_start_thermal_controller_master_list +}; + +static const struct phm_master_table_item +phm_thermal_set_temperature_range_master_list[] = { + {NULL, tf_smu7_thermal_disable_alert}, + {NULL, tf_smu7_thermal_set_temperature_range}, + {NULL, tf_smu7_thermal_enable_alert}, + {NULL, NULL} +}; + +static const struct phm_master_table_header +phm_thermal_set_temperature_range_master = { + 0, + PHM_MasterTableFlag_None, + phm_thermal_set_temperature_range_master_list +}; + +int smu7_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr) +{ + if (!hwmgr->thermal_controller.fanInfo.bNoFan) + smu7_fan_ctrl_set_default_mode(hwmgr); + return 0; +} + +/** +* Initializes the thermal controller related functions in the Hardware Manager structure. +* @param hwmgr The address of the hardware manager. +* @exception Any error code from the low-level communication. +*/ +int pp_smu7_thermal_initialize(struct pp_hwmgr *hwmgr) +{ + int result; + + result = phm_construct_table(hwmgr, + &phm_thermal_set_temperature_range_master, + &(hwmgr->set_temperature_range)); + + if (!result) { + result = phm_construct_table(hwmgr, + &phm_thermal_start_thermal_controller_master, + &(hwmgr->start_thermal_controller)); + if (result) + phm_destroy_table(hwmgr, &(hwmgr->set_temperature_range)); + } + + if (!result) + hwmgr->fan_ctrl_is_in_default_mode = true; + return result; +} + diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.h new file mode 100644 index 000000000000..6face973be43 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.h @@ -0,0 +1,58 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef _SMU7_THERMAL_H_ +#define _SMU7_THERMAL_H_ + +#include "hwmgr.h" + +#define SMU7_THERMAL_HIGH_ALERT_MASK 0x1 +#define SMU7_THERMAL_LOW_ALERT_MASK 0x2 + +#define SMU7_THERMAL_MINIMUM_TEMP_READING -256 +#define SMU7_THERMAL_MAXIMUM_TEMP_READING 255 + +#define SMU7_THERMAL_MINIMUM_ALERT_TEMP 0 +#define SMU7_THERMAL_MAXIMUM_ALERT_TEMP 255 + +#define FDO_PWM_MODE_STATIC 1 +#define FDO_PWM_MODE_STATIC_RPM 5 + +extern int smu7_thermal_get_temperature(struct pp_hwmgr *hwmgr); +extern int smu7_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr); +extern int smu7_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, struct phm_fan_speed_info *fan_speed_info); +extern int smu7_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t *speed); +extern int smu7_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr); +extern int smu7_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode); +extern int smu7_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t speed); +extern int smu7_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr); +extern int pp_smu7_thermal_initialize(struct pp_hwmgr *hwmgr); +extern int smu7_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr); +extern int smu7_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed); +extern int smu7_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed); +extern int smu7_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr); +extern int smu7_thermal_enable_alert(struct pp_hwmgr *hwmgr); +extern int smu7_thermal_disable_alert(struct pp_hwmgr *hwmgr); + +#endif + diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu7_common.h b/drivers/gpu/drm/amd/powerplay/inc/smu7_common.h new file mode 100644 index 000000000000..65eb630bfea3 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/inc/smu7_common.h @@ -0,0 +1,58 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef _PP_COMMON_H +#define _PP_COMMON_H + +#include "smu7_ppsmc.h" +#include "cgs_common.h" + +#include "smu/smu_7_1_3_d.h" +#include "smu/smu_7_1_3_sh_mask.h" + + +#include "smu74.h" +#include "smu74_discrete.h" + +#include "gmc/gmc_8_1_d.h" +#include "gmc/gmc_8_1_sh_mask.h" + +#include "bif/bif_5_0_d.h" +#include "bif/bif_5_0_sh_mask.h" + + +#include "bif/bif_5_0_d.h" +#include "bif/bif_5_0_sh_mask.h" + +#include "dce/dce_10_0_d.h" +#include "dce/dce_10_0_sh_mask.h" + +#include "gca/gfx_8_0_d.h" +#include "gca/gfx_8_0_sh_mask.h" + +#include "oss/oss_3_0_d.h" +#include "oss/oss_3_0_sh_mask.h" + + +#endif + diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h new file mode 100644 index 000000000000..bce00096d80d --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h @@ -0,0 +1,412 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef DGPU_VI_PP_SMC_H +#define DGPU_VI_PP_SMC_H + + +#pragma pack(push, 1) + +#define PPSMC_MSG_SetGBDroopSettings ((uint16_t) 0x305) + +#define PPSMC_SWSTATE_FLAG_DC 0x01 +#define PPSMC_SWSTATE_FLAG_UVD 0x02 +#define PPSMC_SWSTATE_FLAG_VCE 0x04 + +#define PPSMC_THERMAL_PROTECT_TYPE_INTERNAL 0x00 +#define PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL 0x01 +#define PPSMC_THERMAL_PROTECT_TYPE_NONE 0xff + +#define PPSMC_SYSTEMFLAG_GPIO_DC 0x01 +#define PPSMC_SYSTEMFLAG_STEPVDDC 0x02 +#define PPSMC_SYSTEMFLAG_GDDR5 0x04 + +#define PPSMC_SYSTEMFLAG_DISABLE_BABYSTEP 0x08 + +#define PPSMC_SYSTEMFLAG_REGULATOR_HOT 0x10 +#define PPSMC_SYSTEMFLAG_REGULATOR_HOT_ANALOG 0x20 + +#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_MASK 0x07 +#define PPSMC_EXTRAFLAGS_AC2DC_DONT_WAIT_FOR_VBLANK 0x08 + +#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTODPMLOWSTATE 0x00 +#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTOINITIALSTATE 0x01 + + +#define PPSMC_DPM2FLAGS_TDPCLMP 0x01 +#define PPSMC_DPM2FLAGS_PWRSHFT 0x02 +#define PPSMC_DPM2FLAGS_OCP 0x04 + + +#define PPSMC_DISPLAY_WATERMARK_LOW 0 +#define PPSMC_DISPLAY_WATERMARK_HIGH 1 + + +#define PPSMC_STATEFLAG_AUTO_PULSE_SKIP 0x01 +#define PPSMC_STATEFLAG_POWERBOOST 0x02 +#define PPSMC_STATEFLAG_PSKIP_ON_TDP_FAULT 0x04 +#define PPSMC_STATEFLAG_POWERSHIFT 0x08 +#define PPSMC_STATEFLAG_SLOW_READ_MARGIN 0x10 +#define PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE 0x20 +#define PPSMC_STATEFLAG_DEEPSLEEP_BYPASS 0x40 + + +#define FDO_MODE_HARDWARE 0 +#define FDO_MODE_PIECE_WISE_LINEAR 1 + +enum FAN_CONTROL { + FAN_CONTROL_FUZZY, + FAN_CONTROL_TABLE +}; + + +#define PPSMC_Result_OK ((uint16_t)0x01) +#define PPSMC_Result_NoMore ((uint16_t)0x02) + +#define PPSMC_Result_NotNow ((uint16_t)0x03) +#define PPSMC_Result_Failed ((uint16_t)0xFF) +#define PPSMC_Result_UnknownCmd ((uint16_t)0xFE) +#define PPSMC_Result_UnknownVT ((uint16_t)0xFD) + +typedef uint16_t PPSMC_Result; + +#define PPSMC_isERROR(x) ((uint16_t)0x80 & (x)) + + +#define PPSMC_MSG_Halt ((uint16_t)0x10) +#define PPSMC_MSG_Resume ((uint16_t)0x11) +#define PPSMC_MSG_EnableDPMLevel ((uint16_t)0x12) +#define PPSMC_MSG_ZeroLevelsDisabled ((uint16_t)0x13) +#define PPSMC_MSG_OneLevelsDisabled ((uint16_t)0x14) +#define PPSMC_MSG_TwoLevelsDisabled ((uint16_t)0x15) +#define PPSMC_MSG_EnableThermalInterrupt ((uint16_t)0x16) +#define PPSMC_MSG_RunningOnAC ((uint16_t)0x17) +#define PPSMC_MSG_LevelUp ((uint16_t)0x18) +#define PPSMC_MSG_LevelDown ((uint16_t)0x19) +#define PPSMC_MSG_ResetDPMCounters ((uint16_t)0x1a) +#define PPSMC_MSG_SwitchToSwState ((uint16_t)0x20) +#define PPSMC_MSG_SwitchToSwStateLast ((uint16_t)0x3f) +#define PPSMC_MSG_SwitchToInitialState ((uint16_t)0x40) +#define PPSMC_MSG_NoForcedLevel ((uint16_t)0x41) +#define PPSMC_MSG_ForceHigh ((uint16_t)0x42) +#define PPSMC_MSG_ForceMediumOrHigh ((uint16_t)0x43) +#define PPSMC_MSG_SwitchToMinimumPower ((uint16_t)0x51) +#define PPSMC_MSG_ResumeFromMinimumPower ((uint16_t)0x52) +#define PPSMC_MSG_EnableCac ((uint16_t)0x53) +#define PPSMC_MSG_DisableCac ((uint16_t)0x54) +#define PPSMC_DPMStateHistoryStart ((uint16_t)0x55) +#define PPSMC_DPMStateHistoryStop ((uint16_t)0x56) +#define PPSMC_CACHistoryStart ((uint16_t)0x57) +#define PPSMC_CACHistoryStop ((uint16_t)0x58) +#define PPSMC_TDPClampingActive ((uint16_t)0x59) +#define PPSMC_TDPClampingInactive ((uint16_t)0x5A) +#define PPSMC_StartFanControl ((uint16_t)0x5B) +#define PPSMC_StopFanControl ((uint16_t)0x5C) +#define PPSMC_NoDisplay ((uint16_t)0x5D) +#define PPSMC_HasDisplay ((uint16_t)0x5E) +#define PPSMC_MSG_UVDPowerOFF ((uint16_t)0x60) +#define PPSMC_MSG_UVDPowerON ((uint16_t)0x61) +#define PPSMC_MSG_EnableULV ((uint16_t)0x62) +#define PPSMC_MSG_DisableULV ((uint16_t)0x63) +#define PPSMC_MSG_EnterULV ((uint16_t)0x64) +#define PPSMC_MSG_ExitULV ((uint16_t)0x65) +#define PPSMC_PowerShiftActive ((uint16_t)0x6A) +#define PPSMC_PowerShiftInactive ((uint16_t)0x6B) +#define PPSMC_OCPActive ((uint16_t)0x6C) +#define PPSMC_OCPInactive ((uint16_t)0x6D) +#define PPSMC_CACLongTermAvgEnable ((uint16_t)0x6E) +#define PPSMC_CACLongTermAvgDisable ((uint16_t)0x6F) +#define PPSMC_MSG_InferredStateSweep_Start ((uint16_t)0x70) +#define PPSMC_MSG_InferredStateSweep_Stop ((uint16_t)0x71) +#define PPSMC_MSG_SwitchToLowestInfState ((uint16_t)0x72) +#define PPSMC_MSG_SwitchToNonInfState ((uint16_t)0x73) +#define PPSMC_MSG_AllStateSweep_Start ((uint16_t)0x74) +#define PPSMC_MSG_AllStateSweep_Stop ((uint16_t)0x75) +#define PPSMC_MSG_SwitchNextLowerInfState ((uint16_t)0x76) +#define PPSMC_MSG_SwitchNextHigherInfState ((uint16_t)0x77) +#define PPSMC_MSG_MclkRetrainingTest ((uint16_t)0x78) +#define PPSMC_MSG_ForceTDPClamping ((uint16_t)0x79) +#define PPSMC_MSG_CollectCAC_PowerCorreln ((uint16_t)0x7A) +#define PPSMC_MSG_CollectCAC_WeightCalib ((uint16_t)0x7B) +#define PPSMC_MSG_CollectCAC_SQonly ((uint16_t)0x7C) +#define PPSMC_MSG_CollectCAC_TemperaturePwr ((uint16_t)0x7D) + +#define PPSMC_MSG_ExtremitiesTest_Start ((uint16_t)0x7E) +#define PPSMC_MSG_ExtremitiesTest_Stop ((uint16_t)0x7F) +#define PPSMC_FlushDataCache ((uint16_t)0x80) +#define PPSMC_FlushInstrCache ((uint16_t)0x81) + +#define PPSMC_MSG_SetEnabledLevels ((uint16_t)0x82) +#define PPSMC_MSG_SetForcedLevels ((uint16_t)0x83) + +#define PPSMC_MSG_ResetToDefaults ((uint16_t)0x84) + +#define PPSMC_MSG_SetForcedLevelsAndJump ((uint16_t)0x85) +#define PPSMC_MSG_SetCACHistoryMode ((uint16_t)0x86) +#define PPSMC_MSG_EnableDTE ((uint16_t)0x87) +#define PPSMC_MSG_DisableDTE ((uint16_t)0x88) + +#define PPSMC_MSG_SmcSpaceSetAddress ((uint16_t)0x89) +#define PPSM_MSG_SmcSpaceWriteDWordInc ((uint16_t)0x8A) +#define PPSM_MSG_SmcSpaceWriteWordInc ((uint16_t)0x8B) +#define PPSM_MSG_SmcSpaceWriteByteInc ((uint16_t)0x8C) + +#define PPSMC_MSG_BREAK ((uint16_t)0xF8) + +#define PPSMC_MSG_Test ((uint16_t) 0x100) +#define PPSMC_MSG_DPM_Voltage_Pwrmgt ((uint16_t) 0x101) +#define PPSMC_MSG_DPM_Config ((uint16_t) 0x102) +#define PPSMC_MSG_PM_Controller_Start ((uint16_t) 0x103) +#define PPSMC_MSG_DPM_ForceState ((uint16_t) 0x104) +#define PPSMC_MSG_PG_PowerDownSIMD ((uint16_t) 0x105) +#define PPSMC_MSG_PG_PowerUpSIMD ((uint16_t) 0x106) +#define PPSMC_MSG_PM_Controller_Stop ((uint16_t) 0x107) +#define PPSMC_MSG_PG_SIMD_Config ((uint16_t) 0x108) +#define PPSMC_MSG_Voltage_Cntl_Enable ((uint16_t) 0x109) +#define PPSMC_MSG_Thermal_Cntl_Enable ((uint16_t) 0x10a) +#define PPSMC_MSG_Reset_Service ((uint16_t) 0x10b) +#define PPSMC_MSG_VCEPowerOFF ((uint16_t) 0x10e) +#define PPSMC_MSG_VCEPowerON ((uint16_t) 0x10f) +#define PPSMC_MSG_DPM_Disable_VCE_HS ((uint16_t) 0x110) +#define PPSMC_MSG_DPM_Enable_VCE_HS ((uint16_t) 0x111) +#define PPSMC_MSG_DPM_N_LevelsDisabled ((uint16_t) 0x112) +#define PPSMC_MSG_DCEPowerOFF ((uint16_t) 0x113) +#define PPSMC_MSG_DCEPowerON ((uint16_t) 0x114) +#define PPSMC_MSG_PCIE_DDIPowerDown ((uint16_t) 0x117) +#define PPSMC_MSG_PCIE_DDIPowerUp ((uint16_t) 0x118) +#define PPSMC_MSG_PCIE_CascadePLLPowerDown ((uint16_t) 0x119) +#define PPSMC_MSG_PCIE_CascadePLLPowerUp ((uint16_t) 0x11a) +#define PPSMC_MSG_SYSPLLPowerOff ((uint16_t) 0x11b) +#define PPSMC_MSG_SYSPLLPowerOn ((uint16_t) 0x11c) +#define PPSMC_MSG_DCE_RemoveVoltageAdjustment ((uint16_t) 0x11d) +#define PPSMC_MSG_DCE_AllowVoltageAdjustment ((uint16_t) 0x11e) +#define PPSMC_MSG_DISPLAYPHYStatusNotify ((uint16_t) 0x11f) +#define PPSMC_MSG_EnableBAPM ((uint16_t) 0x120) +#define PPSMC_MSG_DisableBAPM ((uint16_t) 0x121) +#define PPSMC_MSG_Spmi_Enable ((uint16_t) 0x122) +#define PPSMC_MSG_Spmi_Timer ((uint16_t) 0x123) +#define PPSMC_MSG_LCLK_DPM_Config ((uint16_t) 0x124) +#define PPSMC_MSG_VddNB_Request ((uint16_t) 0x125) +#define PPSMC_MSG_PCIE_DDIPhyPowerDown ((uint32_t) 0x126) +#define PPSMC_MSG_PCIE_DDIPhyPowerUp ((uint32_t) 0x127) +#define PPSMC_MSG_MCLKDPM_Config ((uint16_t) 0x128) + +#define PPSMC_MSG_UVDDPM_Config ((uint16_t) 0x129) +#define PPSMC_MSG_VCEDPM_Config ((uint16_t) 0x12A) +#define PPSMC_MSG_ACPDPM_Config ((uint16_t) 0x12B) +#define PPSMC_MSG_SAMUDPM_Config ((uint16_t) 0x12C) +#define PPSMC_MSG_UVDDPM_SetEnabledMask ((uint16_t) 0x12D) +#define PPSMC_MSG_VCEDPM_SetEnabledMask ((uint16_t) 0x12E) +#define PPSMC_MSG_ACPDPM_SetEnabledMask ((uint16_t) 0x12F) +#define PPSMC_MSG_SAMUDPM_SetEnabledMask ((uint16_t) 0x130) +#define PPSMC_MSG_MCLKDPM_ForceState ((uint16_t) 0x131) +#define PPSMC_MSG_MCLKDPM_NoForcedLevel ((uint16_t) 0x132) +#define PPSMC_MSG_Thermal_Cntl_Disable ((uint16_t) 0x133) +#define PPSMC_MSG_SetTDPLimit ((uint16_t) 0x134) +#define PPSMC_MSG_Voltage_Cntl_Disable ((uint16_t) 0x135) +#define PPSMC_MSG_PCIeDPM_Enable ((uint16_t) 0x136) +#define PPSMC_MSG_ACPPowerOFF ((uint16_t) 0x137) +#define PPSMC_MSG_ACPPowerON ((uint16_t) 0x138) +#define PPSMC_MSG_SAMPowerOFF ((uint16_t) 0x139) +#define PPSMC_MSG_SAMPowerON ((uint16_t) 0x13a) +#define PPSMC_MSG_SDMAPowerOFF ((uint16_t) 0x13b) +#define PPSMC_MSG_SDMAPowerON ((uint16_t) 0x13c) +#define PPSMC_MSG_PCIeDPM_Disable ((uint16_t) 0x13d) +#define PPSMC_MSG_IOMMUPowerOFF ((uint16_t) 0x13e) +#define PPSMC_MSG_IOMMUPowerON ((uint16_t) 0x13f) +#define PPSMC_MSG_NBDPM_Enable ((uint16_t) 0x140) +#define PPSMC_MSG_NBDPM_Disable ((uint16_t) 0x141) +#define PPSMC_MSG_NBDPM_ForceNominal ((uint16_t) 0x142) +#define PPSMC_MSG_NBDPM_ForcePerformance ((uint16_t) 0x143) +#define PPSMC_MSG_NBDPM_UnForce ((uint16_t) 0x144) +#define PPSMC_MSG_SCLKDPM_SetEnabledMask ((uint16_t) 0x145) +#define PPSMC_MSG_MCLKDPM_SetEnabledMask ((uint16_t) 0x146) +#define PPSMC_MSG_PCIeDPM_ForceLevel ((uint16_t) 0x147) +#define PPSMC_MSG_PCIeDPM_UnForceLevel ((uint16_t) 0x148) +#define PPSMC_MSG_EnableACDCGPIOInterrupt ((uint16_t) 0x149) +#define PPSMC_MSG_EnableVRHotGPIOInterrupt ((uint16_t) 0x14a) +#define PPSMC_MSG_SwitchToAC ((uint16_t) 0x14b) +#define PPSMC_MSG_XDMAPowerOFF ((uint16_t) 0x14c) +#define PPSMC_MSG_XDMAPowerON ((uint16_t) 0x14d) + +#define PPSMC_MSG_DPM_Enable ((uint16_t) 0x14e) +#define PPSMC_MSG_DPM_Disable ((uint16_t) 0x14f) +#define PPSMC_MSG_MCLKDPM_Enable ((uint16_t) 0x150) +#define PPSMC_MSG_MCLKDPM_Disable ((uint16_t) 0x151) +#define PPSMC_MSG_LCLKDPM_Enable ((uint16_t) 0x152) +#define PPSMC_MSG_LCLKDPM_Disable ((uint16_t) 0x153) +#define PPSMC_MSG_UVDDPM_Enable ((uint16_t) 0x154) +#define PPSMC_MSG_UVDDPM_Disable ((uint16_t) 0x155) +#define PPSMC_MSG_SAMUDPM_Enable ((uint16_t) 0x156) +#define PPSMC_MSG_SAMUDPM_Disable ((uint16_t) 0x157) +#define PPSMC_MSG_ACPDPM_Enable ((uint16_t) 0x158) +#define PPSMC_MSG_ACPDPM_Disable ((uint16_t) 0x159) +#define PPSMC_MSG_VCEDPM_Enable ((uint16_t) 0x15a) +#define PPSMC_MSG_VCEDPM_Disable ((uint16_t) 0x15b) +#define PPSMC_MSG_LCLKDPM_SetEnabledMask ((uint16_t) 0x15c) +#define PPSMC_MSG_DPM_FPS_Mode ((uint16_t) 0x15d) +#define PPSMC_MSG_DPM_Activity_Mode ((uint16_t) 0x15e) +#define PPSMC_MSG_VddC_Request ((uint16_t) 0x15f) +#define PPSMC_MSG_MCLKDPM_GetEnabledMask ((uint16_t) 0x160) +#define PPSMC_MSG_LCLKDPM_GetEnabledMask ((uint16_t) 0x161) +#define PPSMC_MSG_SCLKDPM_GetEnabledMask ((uint16_t) 0x162) +#define PPSMC_MSG_UVDDPM_GetEnabledMask ((uint16_t) 0x163) +#define PPSMC_MSG_SAMUDPM_GetEnabledMask ((uint16_t) 0x164) +#define PPSMC_MSG_ACPDPM_GetEnabledMask ((uint16_t) 0x165) +#define PPSMC_MSG_VCEDPM_GetEnabledMask ((uint16_t) 0x166) +#define PPSMC_MSG_PCIeDPM_SetEnabledMask ((uint16_t) 0x167) +#define PPSMC_MSG_PCIeDPM_GetEnabledMask ((uint16_t) 0x168) +#define PPSMC_MSG_TDCLimitEnable ((uint16_t) 0x169) +#define PPSMC_MSG_TDCLimitDisable ((uint16_t) 0x16a) +#define PPSMC_MSG_DPM_AutoRotate_Mode ((uint16_t) 0x16b) +#define PPSMC_MSG_DISPCLK_FROM_FCH ((uint16_t) 0x16c) +#define PPSMC_MSG_DISPCLK_FROM_DFS ((uint16_t) 0x16d) +#define PPSMC_MSG_DPREFCLK_FROM_FCH ((uint16_t) 0x16e) +#define PPSMC_MSG_DPREFCLK_FROM_DFS ((uint16_t) 0x16f) +#define PPSMC_MSG_PmStatusLogStart ((uint16_t) 0x170) +#define PPSMC_MSG_PmStatusLogSample ((uint16_t) 0x171) +#define PPSMC_MSG_SCLK_AutoDPM_ON ((uint16_t) 0x172) +#define PPSMC_MSG_MCLK_AutoDPM_ON ((uint16_t) 0x173) +#define PPSMC_MSG_LCLK_AutoDPM_ON ((uint16_t) 0x174) +#define PPSMC_MSG_UVD_AutoDPM_ON ((uint16_t) 0x175) +#define PPSMC_MSG_SAMU_AutoDPM_ON ((uint16_t) 0x176) +#define PPSMC_MSG_ACP_AutoDPM_ON ((uint16_t) 0x177) +#define PPSMC_MSG_VCE_AutoDPM_ON ((uint16_t) 0x178) +#define PPSMC_MSG_PCIe_AutoDPM_ON ((uint16_t) 0x179) +#define PPSMC_MSG_MASTER_AutoDPM_ON ((uint16_t) 0x17a) +#define PPSMC_MSG_MASTER_AutoDPM_OFF ((uint16_t) 0x17b) +#define PPSMC_MSG_DYNAMICDISPPHYPOWER ((uint16_t) 0x17c) +#define PPSMC_MSG_CAC_COLLECTION_ON ((uint16_t) 0x17d) +#define PPSMC_MSG_CAC_COLLECTION_OFF ((uint16_t) 0x17e) +#define PPSMC_MSG_CAC_CORRELATION_ON ((uint16_t) 0x17f) +#define PPSMC_MSG_CAC_CORRELATION_OFF ((uint16_t) 0x180) +#define PPSMC_MSG_PM_STATUS_TO_DRAM_ON ((uint16_t) 0x181) +#define PPSMC_MSG_PM_STATUS_TO_DRAM_OFF ((uint16_t) 0x182) +#define PPSMC_MSG_ALLOW_LOWSCLK_INTERRUPT ((uint16_t) 0x184) +#define PPSMC_MSG_PkgPwrLimitEnable ((uint16_t) 0x185) +#define PPSMC_MSG_PkgPwrLimitDisable ((uint16_t) 0x186) +#define PPSMC_MSG_PkgPwrSetLimit ((uint16_t) 0x187) +#define PPSMC_MSG_OverDriveSetTargetTdp ((uint16_t) 0x188) +#define PPSMC_MSG_SCLKDPM_FreezeLevel ((uint16_t) 0x189) +#define PPSMC_MSG_SCLKDPM_UnfreezeLevel ((uint16_t) 0x18A) +#define PPSMC_MSG_MCLKDPM_FreezeLevel ((uint16_t) 0x18B) +#define PPSMC_MSG_MCLKDPM_UnfreezeLevel ((uint16_t) 0x18C) +#define PPSMC_MSG_START_DRAM_LOGGING ((uint16_t) 0x18D) +#define PPSMC_MSG_STOP_DRAM_LOGGING ((uint16_t) 0x18E) +#define PPSMC_MSG_MASTER_DeepSleep_ON ((uint16_t) 0x18F) +#define PPSMC_MSG_MASTER_DeepSleep_OFF ((uint16_t) 0x190) +#define PPSMC_MSG_Remove_DC_Clamp ((uint16_t) 0x191) +#define PPSMC_MSG_DisableACDCGPIOInterrupt ((uint16_t) 0x192) +#define PPSMC_MSG_OverrideVoltageControl_SetVddc ((uint16_t) 0x193) +#define PPSMC_MSG_OverrideVoltageControl_SetVddci ((uint16_t) 0x194) +#define PPSMC_MSG_SetVidOffset_1 ((uint16_t) 0x195) +#define PPSMC_MSG_SetVidOffset_2 ((uint16_t) 0x207) +#define PPSMC_MSG_GetVidOffset_1 ((uint16_t) 0x196) +#define PPSMC_MSG_GetVidOffset_2 ((uint16_t) 0x208) +#define PPSMC_MSG_THERMAL_OVERDRIVE_Enable ((uint16_t) 0x197) +#define PPSMC_MSG_THERMAL_OVERDRIVE_Disable ((uint16_t) 0x198) +#define PPSMC_MSG_SetTjMax ((uint16_t) 0x199) +#define PPSMC_MSG_SetFanPwmMax ((uint16_t) 0x19A) +#define PPSMC_MSG_WaitForMclkSwitchFinish ((uint16_t) 0x19B) +#define PPSMC_MSG_ENABLE_THERMAL_DPM ((uint16_t) 0x19C) +#define PPSMC_MSG_DISABLE_THERMAL_DPM ((uint16_t) 0x19D) + +#define PPSMC_MSG_API_GetSclkFrequency ((uint16_t) 0x200) +#define PPSMC_MSG_API_GetMclkFrequency ((uint16_t) 0x201) +#define PPSMC_MSG_API_GetSclkBusy ((uint16_t) 0x202) +#define PPSMC_MSG_API_GetMclkBusy ((uint16_t) 0x203) +#define PPSMC_MSG_API_GetAsicPower ((uint16_t) 0x204) +#define PPSMC_MSG_SetFanRpmMax ((uint16_t) 0x205) +#define PPSMC_MSG_SetFanSclkTarget ((uint16_t) 0x206) +#define PPSMC_MSG_SetFanMinPwm ((uint16_t) 0x209) +#define PPSMC_MSG_SetFanTemperatureTarget ((uint16_t) 0x20A) + +#define PPSMC_MSG_BACO_StartMonitor ((uint16_t) 0x240) +#define PPSMC_MSG_BACO_Cancel ((uint16_t) 0x241) +#define PPSMC_MSG_EnableVddGfx ((uint16_t) 0x242) +#define PPSMC_MSG_DisableVddGfx ((uint16_t) 0x243) +#define PPSMC_MSG_UcodeAddressLow ((uint16_t) 0x244) +#define PPSMC_MSG_UcodeAddressHigh ((uint16_t) 0x245) +#define PPSMC_MSG_UcodeLoadStatus ((uint16_t) 0x246) + +#define PPSMC_MSG_DRV_DRAM_ADDR_HI ((uint16_t) 0x250) +#define PPSMC_MSG_DRV_DRAM_ADDR_LO ((uint16_t) 0x251) +#define PPSMC_MSG_SMU_DRAM_ADDR_HI ((uint16_t) 0x252) +#define PPSMC_MSG_SMU_DRAM_ADDR_LO ((uint16_t) 0x253) +#define PPSMC_MSG_LoadUcodes ((uint16_t) 0x254) +#define PPSMC_MSG_PowerStateNotify ((uint16_t) 0x255) +#define PPSMC_MSG_COND_EXEC_DRAM_ADDR_HI ((uint16_t) 0x256) +#define PPSMC_MSG_COND_EXEC_DRAM_ADDR_LO ((uint16_t) 0x257) +#define PPSMC_MSG_VBIOS_DRAM_ADDR_HI ((uint16_t) 0x258) +#define PPSMC_MSG_VBIOS_DRAM_ADDR_LO ((uint16_t) 0x259) +#define PPSMC_MSG_LoadVBios ((uint16_t) 0x25A) +#define PPSMC_MSG_GetUcodeVersion ((uint16_t) 0x25B) +#define DMCUSMC_MSG_PSREntry ((uint16_t) 0x25C) +#define DMCUSMC_MSG_PSRExit ((uint16_t) 0x25D) +#define PPSMC_MSG_EnableClockGatingFeature ((uint16_t) 0x260) +#define PPSMC_MSG_DisableClockGatingFeature ((uint16_t) 0x261) +#define PPSMC_MSG_IsDeviceRunning ((uint16_t) 0x262) +#define PPSMC_MSG_LoadMetaData ((uint16_t) 0x263) +#define PPSMC_MSG_TMON_AutoCaliberate_Enable ((uint16_t) 0x264) +#define PPSMC_MSG_TMON_AutoCaliberate_Disable ((uint16_t) 0x265) +#define PPSMC_MSG_GetTelemetry1Slope ((uint16_t) 0x266) +#define PPSMC_MSG_GetTelemetry1Offset ((uint16_t) 0x267) +#define PPSMC_MSG_GetTelemetry2Slope ((uint16_t) 0x268) +#define PPSMC_MSG_GetTelemetry2Offset ((uint16_t) 0x269) +#define PPSMC_MSG_EnableAvfs ((uint16_t) 0x26A) +#define PPSMC_MSG_DisableAvfs ((uint16_t) 0x26B) + +#define PPSMC_MSG_PerformBtc ((uint16_t) 0x26C) +#define PPSMC_MSG_VftTableIsValid ((uint16_t) 0x275) +#define PPSMC_MSG_UseNewGPIOScheme ((uint16_t) 0x277) +#define PPSMC_MSG_GetEnabledPsm ((uint16_t) 0x400) +#define PPSMC_MSG_AgmStartPsm ((uint16_t) 0x401) +#define PPSMC_MSG_AgmReadPsm ((uint16_t) 0x402) +#define PPSMC_MSG_AgmResetPsm ((uint16_t) 0x403) +#define PPSMC_MSG_ReadVftCell ((uint16_t) 0x404) + +#define PPSMC_MSG_GFX_CU_PG_ENABLE ((uint16_t) 0x280) +#define PPSMC_MSG_GFX_CU_PG_DISABLE ((uint16_t) 0x281) +#define PPSMC_MSG_GetCurrPkgPwr ((uint16_t) 0x282) + +#define PPSMC_MSG_SetGpuPllDfsForSclk ((uint16_t) 0x300) +#define PPSMC_MSG_Didt_Block_Function ((uint16_t) 0x301) + +#define PPSMC_MSG_SetVBITimeout ((uint16_t) 0x306) + +#define PPSMC_MSG_SecureSRBMWrite ((uint16_t) 0x600) +#define PPSMC_MSG_SecureSRBMRead ((uint16_t) 0x601) +#define PPSMC_MSG_SetAddress ((uint16_t) 0x800) +#define PPSMC_MSG_GetData ((uint16_t) 0x801) +#define PPSMC_MSG_SetData ((uint16_t) 0x802) + +typedef uint16_t PPSMC_Msg; + +#define PPSMC_EVENT_STATUS_THERMAL 0x00000001 +#define PPSMC_EVENT_STATUS_REGULATORHOT 0x00000002 +#define PPSMC_EVENT_STATUS_DC 0x00000004 + +#pragma pack(pop) + +#endif + From 34a564eaf5289ad72798a07dc475b85fbffc68f2 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Fri, 9 Sep 2016 13:29:47 +0800 Subject: [PATCH 09/49] drm/amd/powerplay: implement fw image related smum interface for Polaris. Signed-off-by: Rex Zhu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/smumgr/Makefile | 2 +- .../drm/amd/powerplay/smumgr/polaris10_smc.c | 2284 +++++++++++++++++ .../drm/amd/powerplay/smumgr/polaris10_smc.h | 42 + .../amd/powerplay/smumgr/polaris10_smumgr.c | 24 +- .../amd/powerplay/smumgr/polaris10_smumgr.h | 23 +- 5 files changed, 2367 insertions(+), 8 deletions(-) create mode 100644 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c create mode 100644 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.h diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile index 19e79469f6bc..872a2f030989 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile +++ b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile @@ -3,7 +3,7 @@ # It provides the smu management services for the driver. SMU_MGR = smumgr.o cz_smumgr.o tonga_smumgr.o fiji_smumgr.o \ - polaris10_smumgr.o iceland_smumgr.o + polaris10_smumgr.o iceland_smumgr.o polaris10_smc.o AMD_PP_SMUMGR = $(addprefix $(AMD_PP_PATH)/smumgr/,$(SMU_MGR)) diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c new file mode 100644 index 000000000000..8ed98b708c55 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c @@ -0,0 +1,2284 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "polaris10_smc.h" +#include "smu7_dyn_defaults.h" + +#include "smu7_hwmgr.h" +#include "hardwaremanager.h" +#include "ppatomctrl.h" +#include "pp_debug.h" +#include "cgs_common.h" +#include "atombios.h" +#include "polaris10_smumgr.h" +#include "pppcielanes.h" + +#include "smu_ucode_xfer_vi.h" +#include "smu74_discrete.h" +#include "smu/smu_7_1_3_d.h" +#include "smu/smu_7_1_3_sh_mask.h" +#include "gmc/gmc_8_1_d.h" +#include "gmc/gmc_8_1_sh_mask.h" +#include "oss/oss_3_0_d.h" +#include "gca/gfx_8_0_d.h" +#include "bif/bif_5_0_d.h" +#include "bif/bif_5_0_sh_mask.h" +#include "dce/dce_10_0_d.h" +#include "dce/dce_10_0_sh_mask.h" +#include "polaris10_pwrvirus.h" +#include "smu7_ppsmc.h" + +#define POLARIS10_SMC_SIZE 0x20000 +#define VOLTAGE_VID_OFFSET_SCALE1 625 +#define VOLTAGE_VID_OFFSET_SCALE2 100 +#define POWERTUNE_DEFAULT_SET_MAX 1 +#define VDDC_VDDCI_DELTA 200 +#define MC_CG_ARB_FREQ_F1 0x0b + +static const struct polaris10_pt_defaults polaris10_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = { + /* sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt, + * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT */ + { 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000, + { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61}, + { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 } }, +}; + +static const sclkFcwRange_t Range_Table[NUM_SCLK_RANGE] = { + {VCO_2_4, POSTDIV_DIV_BY_16, 75, 160, 112}, + {VCO_3_6, POSTDIV_DIV_BY_16, 112, 224, 160}, + {VCO_2_4, POSTDIV_DIV_BY_8, 75, 160, 112}, + {VCO_3_6, POSTDIV_DIV_BY_8, 112, 224, 160}, + {VCO_2_4, POSTDIV_DIV_BY_4, 75, 160, 112}, + {VCO_3_6, POSTDIV_DIV_BY_4, 112, 216, 160}, + {VCO_2_4, POSTDIV_DIV_BY_2, 75, 160, 108}, + {VCO_3_6, POSTDIV_DIV_BY_2, 112, 216, 160} }; + +static int polaris10_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr, + struct phm_ppt_v1_clock_voltage_dependency_table *dep_table, + uint32_t clock, SMU_VoltageLevel *voltage, uint32_t *mvdd) +{ + uint32_t i; + uint16_t vddci; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + *voltage = *mvdd = 0; + + /* clock - voltage dependency table is empty table */ + if (dep_table->count == 0) + return -EINVAL; + + for (i = 0; i < dep_table->count; i++) { + /* find first sclk bigger than request */ + if (dep_table->entries[i].clk >= clock) { + *voltage |= (dep_table->entries[i].vddc * + VOLTAGE_SCALE) << VDDC_SHIFT; + if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control) + *voltage |= (data->vbios_boot_state.vddci_bootup_value * + VOLTAGE_SCALE) << VDDCI_SHIFT; + else if (dep_table->entries[i].vddci) + *voltage |= (dep_table->entries[i].vddci * + VOLTAGE_SCALE) << VDDCI_SHIFT; + else { + vddci = phm_find_closest_vddci(&(data->vddci_voltage_table), + (dep_table->entries[i].vddc - + (uint16_t)VDDC_VDDCI_DELTA)); + *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT; + } + + if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control) + *mvdd = data->vbios_boot_state.mvdd_bootup_value * + VOLTAGE_SCALE; + else if (dep_table->entries[i].mvdd) + *mvdd = (uint32_t) dep_table->entries[i].mvdd * + VOLTAGE_SCALE; + + *voltage |= 1 << PHASES_SHIFT; + return 0; + } + } + + /* sclk is bigger than max sclk in the dependence table */ + *voltage |= (dep_table->entries[i - 1].vddc * VOLTAGE_SCALE) << VDDC_SHIFT; + + if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control) + *voltage |= (data->vbios_boot_state.vddci_bootup_value * + VOLTAGE_SCALE) << VDDCI_SHIFT; + else if (dep_table->entries[i-1].vddci) { + vddci = phm_find_closest_vddci(&(data->vddci_voltage_table), + (dep_table->entries[i].vddc - + (uint16_t)VDDC_VDDCI_DELTA)); + *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT; + } + + if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control) + *mvdd = data->vbios_boot_state.mvdd_bootup_value * VOLTAGE_SCALE; + else if (dep_table->entries[i].mvdd) + *mvdd = (uint32_t) dep_table->entries[i - 1].mvdd * VOLTAGE_SCALE; + + return 0; +} + +static uint16_t scale_fan_gain_settings(uint16_t raw_setting) +{ + uint32_t tmp; + tmp = raw_setting * 4096 / 100; + return (uint16_t)tmp; +} + +static int polaris10_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr) +{ + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + + const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults; + SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table; + struct pp_advance_fan_control_parameters *fan_table = + &hwmgr->thermal_controller.advanceFanControlParameters; + int i, j, k; + const uint16_t *pdef1; + const uint16_t *pdef2; + + table->DefaultTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 128)); + table->TargetTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 128)); + + PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255, + "Target Operating Temp is out of Range!", + ); + + table->TemperatureLimitEdge = PP_HOST_TO_SMC_US( + cac_dtp_table->usTargetOperatingTemp * 256); + table->TemperatureLimitHotspot = PP_HOST_TO_SMC_US( + cac_dtp_table->usTemperatureLimitHotspot * 256); + table->FanGainEdge = PP_HOST_TO_SMC_US( + scale_fan_gain_settings(fan_table->usFanGainEdge)); + table->FanGainHotspot = PP_HOST_TO_SMC_US( + scale_fan_gain_settings(fan_table->usFanGainHotspot)); + + pdef1 = defaults->BAPMTI_R; + pdef2 = defaults->BAPMTI_RC; + + for (i = 0; i < SMU74_DTE_ITERATIONS; i++) { + for (j = 0; j < SMU74_DTE_SOURCES; j++) { + for (k = 0; k < SMU74_DTE_SINKS; k++) { + table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*pdef1); + table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*pdef2); + pdef1++; + pdef2++; + } + } + } + + return 0; +} + +static int polaris10_populate_svi_load_line(struct pp_hwmgr *hwmgr) +{ + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults; + + smu_data->power_tune_table.SviLoadLineEn = defaults->SviLoadLineEn; + smu_data->power_tune_table.SviLoadLineVddC = defaults->SviLoadLineVddC; + smu_data->power_tune_table.SviLoadLineTrimVddC = 3; + smu_data->power_tune_table.SviLoadLineOffsetVddC = 0; + + return 0; +} + +static int polaris10_populate_tdc_limit(struct pp_hwmgr *hwmgr) +{ + uint16_t tdc_limit; + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults; + + tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 128); + smu_data->power_tune_table.TDC_VDDC_PkgLimit = + CONVERT_FROM_HOST_TO_SMC_US(tdc_limit); + smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc = + defaults->TDC_VDDC_ThrottleReleaseLimitPerc; + smu_data->power_tune_table.TDC_MAWt = defaults->TDC_MAWt; + + return 0; +} + +static int polaris10_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset) +{ + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults; + uint32_t temp; + + if (polaris10_read_smc_sram_dword(hwmgr->smumgr, + fuse_table_offset + + offsetof(SMU74_Discrete_PmFuses, TdcWaterfallCtl), + (uint32_t *)&temp, SMC_RAM_END)) + PP_ASSERT_WITH_CODE(false, + "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!", + return -EINVAL); + else { + smu_data->power_tune_table.TdcWaterfallCtl = defaults->TdcWaterfallCtl; + smu_data->power_tune_table.LPMLTemperatureMin = + (uint8_t)((temp >> 16) & 0xff); + smu_data->power_tune_table.LPMLTemperatureMax = + (uint8_t)((temp >> 8) & 0xff); + smu_data->power_tune_table.Reserved = (uint8_t)(temp & 0xff); + } + return 0; +} + +static int polaris10_populate_temperature_scaler(struct pp_hwmgr *hwmgr) +{ + int i; + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + + /* Currently not used. Set all to zero. */ + for (i = 0; i < 16; i++) + smu_data->power_tune_table.LPMLTemperatureScaler[i] = 0; + + return 0; +} + +static int polaris10_populate_fuzzy_fan(struct pp_hwmgr *hwmgr) +{ + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + +/* TO DO move to hwmgr */ + if ((hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity & (1 << 15)) + || 0 == hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity) + hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity = + hwmgr->thermal_controller.advanceFanControlParameters.usDefaultFanOutputSensitivity; + + smu_data->power_tune_table.FuzzyFan_PwmSetDelta = PP_HOST_TO_SMC_US( + hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity); + return 0; +} + +static int polaris10_populate_gnb_lpml(struct pp_hwmgr *hwmgr) +{ + int i; + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + + /* Currently not used. Set all to zero. */ + for (i = 0; i < 16; i++) + smu_data->power_tune_table.GnbLPML[i] = 0; + + return 0; +} + +static int polaris10_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr) +{ + return 0; +} + +static int polaris10_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr) +{ + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + uint16_t hi_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd; + uint16_t lo_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd; + struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table; + + hi_sidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256); + lo_sidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256); + + smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd = + CONVERT_FROM_HOST_TO_SMC_US(hi_sidd); + smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd = + CONVERT_FROM_HOST_TO_SMC_US(lo_sidd); + + return 0; +} + +static int polaris10_populate_pm_fuses(struct pp_hwmgr *hwmgr) +{ + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + uint32_t pm_fuse_table_offset; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment)) { + if (polaris10_read_smc_sram_dword(hwmgr->smumgr, + SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU74_Firmware_Header, PmFuseTable), + &pm_fuse_table_offset, SMC_RAM_END)) + PP_ASSERT_WITH_CODE(false, + "Attempt to get pm_fuse_table_offset Failed!", + return -EINVAL); + + if (polaris10_populate_svi_load_line(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate SviLoadLine Failed!", + return -EINVAL); + + if (polaris10_populate_tdc_limit(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate TDCLimit Failed!", return -EINVAL); + + if (polaris10_populate_dw8(hwmgr, pm_fuse_table_offset)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate TdcWaterfallCtl, " + "LPMLTemperature Min and Max Failed!", + return -EINVAL); + + if (0 != polaris10_populate_temperature_scaler(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate LPMLTemperatureScaler Failed!", + return -EINVAL); + + if (polaris10_populate_fuzzy_fan(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate Fuzzy Fan Control parameters Failed!", + return -EINVAL); + + if (polaris10_populate_gnb_lpml(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate GnbLPML Failed!", + return -EINVAL); + + if (polaris10_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate GnbLPML Min and Max Vid Failed!", + return -EINVAL); + + if (polaris10_populate_bapm_vddc_base_leakage_sidd(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate BapmVddCBaseLeakage Hi and Lo " + "Sidd Failed!", return -EINVAL); + + if (polaris10_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset, + (uint8_t *)&smu_data->power_tune_table, + (sizeof(struct SMU74_Discrete_PmFuses) - 92), SMC_RAM_END)) + PP_ASSERT_WITH_CODE(false, + "Attempt to download PmFuseTable Failed!", + return -EINVAL); + } + return 0; +} + +/** + * Mvdd table preparation for SMC. + * + * @param *hwmgr The address of the hardware manager. + * @param *table The SMC DPM table structure to be populated. + * @return 0 + */ +static int polaris10_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr, + SMU74_Discrete_DpmTable *table) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + uint32_t count, level; + + if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) { + count = data->mvdd_voltage_table.count; + if (count > SMU_MAX_SMIO_LEVELS) + count = SMU_MAX_SMIO_LEVELS; + for (level = 0; level < count; level++) { + table->SmioTable2.Pattern[level].Voltage = + PP_HOST_TO_SMC_US(data->mvdd_voltage_table.entries[count].value * VOLTAGE_SCALE); + /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level.*/ + table->SmioTable2.Pattern[level].Smio = + (uint8_t) level; + table->Smio[level] |= + data->mvdd_voltage_table.entries[level].smio_low; + } + table->SmioMask2 = data->mvdd_voltage_table.mask_low; + + table->MvddLevelCount = (uint32_t) PP_HOST_TO_SMC_UL(count); + } + + return 0; +} + +static int polaris10_populate_smc_vddci_table(struct pp_hwmgr *hwmgr, + struct SMU74_Discrete_DpmTable *table) +{ + uint32_t count, level; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + count = data->vddci_voltage_table.count; + + if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) { + if (count > SMU_MAX_SMIO_LEVELS) + count = SMU_MAX_SMIO_LEVELS; + for (level = 0; level < count; ++level) { + table->SmioTable1.Pattern[level].Voltage = + PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[level].value * VOLTAGE_SCALE); + table->SmioTable1.Pattern[level].Smio = (uint8_t) level; + + table->Smio[level] |= data->vddci_voltage_table.entries[level].smio_low; + } + } + + table->SmioMask1 = data->vddci_voltage_table.mask_low; + + return 0; +} + +/** +* Preparation of vddc and vddgfx CAC tables for SMC. +* +* @param hwmgr the address of the hardware manager +* @param table the SMC DPM table structure to be populated +* @return always 0 +*/ +static int polaris10_populate_cac_table(struct pp_hwmgr *hwmgr, + struct SMU74_Discrete_DpmTable *table) +{ + uint32_t count; + uint8_t index; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct phm_ppt_v1_voltage_lookup_table *lookup_table = + table_info->vddc_lookup_table; + /* tables is already swapped, so in order to use the value from it, + * we need to swap it back. + * We are populating vddc CAC data to BapmVddc table + * in split and merged mode + */ + for (count = 0; count < lookup_table->count; count++) { + index = phm_get_voltage_index(lookup_table, + data->vddc_voltage_table.entries[count].value); + table->BapmVddcVidLoSidd[count] = convert_to_vid(lookup_table->entries[index].us_cac_low); + table->BapmVddcVidHiSidd[count] = convert_to_vid(lookup_table->entries[index].us_cac_mid); + table->BapmVddcVidHiSidd2[count] = convert_to_vid(lookup_table->entries[index].us_cac_high); + } + + return 0; +} + +/** +* Preparation of voltage tables for SMC. +* +* @param hwmgr the address of the hardware manager +* @param table the SMC DPM table structure to be populated +* @return always 0 +*/ + +static int polaris10_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr, + struct SMU74_Discrete_DpmTable *table) +{ + polaris10_populate_smc_vddci_table(hwmgr, table); + polaris10_populate_smc_mvdd_table(hwmgr, table); + polaris10_populate_cac_table(hwmgr, table); + + return 0; +} + +static int polaris10_populate_ulv_level(struct pp_hwmgr *hwmgr, + struct SMU74_Discrete_Ulv *state) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + + state->CcPwrDynRm = 0; + state->CcPwrDynRm1 = 0; + + state->VddcOffset = (uint16_t) table_info->us_ulv_voltage_offset; + state->VddcOffsetVid = (uint8_t)(table_info->us_ulv_voltage_offset * + VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1); + + state->VddcPhase = (data->vddc_phase_shed_control) ? 0 : 1; + + CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm); + CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1); + CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset); + + return 0; +} + +static int polaris10_populate_ulv_state(struct pp_hwmgr *hwmgr, + struct SMU74_Discrete_DpmTable *table) +{ + return polaris10_populate_ulv_level(hwmgr, &table->Ulv); +} + +static int polaris10_populate_smc_link_level(struct pp_hwmgr *hwmgr, + struct SMU74_Discrete_DpmTable *table) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + struct smu7_dpm_table *dpm_table = &data->dpm_table; + int i; + + /* Index (dpm_table->pcie_speed_table.count) + * is reserved for PCIE boot level. */ + for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) { + table->LinkLevel[i].PcieGenSpeed = + (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value; + table->LinkLevel[i].PcieLaneCount = (uint8_t)encode_pcie_lane_width( + dpm_table->pcie_speed_table.dpm_levels[i].param1); + table->LinkLevel[i].EnabledForActivity = 1; + table->LinkLevel[i].SPC = (uint8_t)(data->pcie_spc_cap & 0xff); + table->LinkLevel[i].DownThreshold = PP_HOST_TO_SMC_UL(5); + table->LinkLevel[i].UpThreshold = PP_HOST_TO_SMC_UL(30); + } + + smu_data->smc_state_table.LinkLevelCount = + (uint8_t)dpm_table->pcie_speed_table.count; + +/* To Do move to hwmgr */ + data->dpm_level_enable_mask.pcie_dpm_enable_mask = + phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table); + + return 0; +} + + +static void polaris10_get_sclk_range_table(struct pp_hwmgr *hwmgr, + SMU74_Discrete_DpmTable *table) +{ + struct pp_smumgr *smumgr = hwmgr->smumgr; + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); + uint32_t i, ref_clk; + + struct pp_atom_ctrl_sclk_range_table range_table_from_vbios = { { {0} } }; + + ref_clk = smu7_get_xclk(hwmgr); + + if (0 == atomctrl_get_smc_sclk_range_table(hwmgr, &range_table_from_vbios)) { + for (i = 0; i < NUM_SCLK_RANGE; i++) { + table->SclkFcwRangeTable[i].vco_setting = range_table_from_vbios.entry[i].ucVco_setting; + table->SclkFcwRangeTable[i].postdiv = range_table_from_vbios.entry[i].ucPostdiv; + table->SclkFcwRangeTable[i].fcw_pcc = range_table_from_vbios.entry[i].usFcw_pcc; + + table->SclkFcwRangeTable[i].fcw_trans_upper = range_table_from_vbios.entry[i].usFcw_trans_upper; + table->SclkFcwRangeTable[i].fcw_trans_lower = range_table_from_vbios.entry[i].usRcw_trans_lower; + + CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_pcc); + CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_upper); + CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_lower); + } + return; + } + + for (i = 0; i < NUM_SCLK_RANGE; i++) { + smu_data->range_table[i].trans_lower_frequency = (ref_clk * Range_Table[i].fcw_trans_lower) >> Range_Table[i].postdiv; + smu_data->range_table[i].trans_upper_frequency = (ref_clk * Range_Table[i].fcw_trans_upper) >> Range_Table[i].postdiv; + + table->SclkFcwRangeTable[i].vco_setting = Range_Table[i].vco_setting; + table->SclkFcwRangeTable[i].postdiv = Range_Table[i].postdiv; + table->SclkFcwRangeTable[i].fcw_pcc = Range_Table[i].fcw_pcc; + + table->SclkFcwRangeTable[i].fcw_trans_upper = Range_Table[i].fcw_trans_upper; + table->SclkFcwRangeTable[i].fcw_trans_lower = Range_Table[i].fcw_trans_lower; + + CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_pcc); + CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_upper); + CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_lower); + } +} + +/** +* Calculates the SCLK dividers using the provided engine clock +* +* @param hwmgr the address of the hardware manager +* @param clock the engine clock to use to populate the structure +* @param sclk the SMC SCLK structure to be populated +*/ +static int polaris10_calculate_sclk_params(struct pp_hwmgr *hwmgr, + uint32_t clock, SMU_SclkSetting *sclk_setting) +{ + struct pp_smumgr *smumgr = hwmgr->smumgr; + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); + const SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table); + struct pp_atomctrl_clock_dividers_ai dividers; + uint32_t ref_clock; + uint32_t pcc_target_percent, pcc_target_freq, ss_target_percent, ss_target_freq; + uint8_t i; + int result; + uint64_t temp; + + sclk_setting->SclkFrequency = clock; + /* get the engine clock dividers for this clock value */ + result = atomctrl_get_engine_pll_dividers_ai(hwmgr, clock, ÷rs); + if (result == 0) { + sclk_setting->Fcw_int = dividers.usSclk_fcw_int; + sclk_setting->Fcw_frac = dividers.usSclk_fcw_frac; + sclk_setting->Pcc_fcw_int = dividers.usPcc_fcw_int; + sclk_setting->PllRange = dividers.ucSclkPllRange; + sclk_setting->Sclk_slew_rate = 0x400; + sclk_setting->Pcc_up_slew_rate = dividers.usPcc_fcw_slew_frac; + sclk_setting->Pcc_down_slew_rate = 0xffff; + sclk_setting->SSc_En = dividers.ucSscEnable; + sclk_setting->Fcw1_int = dividers.usSsc_fcw1_int; + sclk_setting->Fcw1_frac = dividers.usSsc_fcw1_frac; + sclk_setting->Sclk_ss_slew_rate = dividers.usSsc_fcw_slew_frac; + return result; + } + + ref_clock = smu7_get_xclk(hwmgr); + + for (i = 0; i < NUM_SCLK_RANGE; i++) { + if (clock > smu_data->range_table[i].trans_lower_frequency + && clock <= smu_data->range_table[i].trans_upper_frequency) { + sclk_setting->PllRange = i; + break; + } + } + + sclk_setting->Fcw_int = (uint16_t)((clock << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock); + temp = clock << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv; + temp <<= 0x10; + do_div(temp, ref_clock); + sclk_setting->Fcw_frac = temp & 0xffff; + + pcc_target_percent = 10; /* Hardcode 10% for now. */ + pcc_target_freq = clock - (clock * pcc_target_percent / 100); + sclk_setting->Pcc_fcw_int = (uint16_t)((pcc_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock); + + ss_target_percent = 2; /* Hardcode 2% for now. */ + sclk_setting->SSc_En = 0; + if (ss_target_percent) { + sclk_setting->SSc_En = 1; + ss_target_freq = clock - (clock * ss_target_percent / 100); + sclk_setting->Fcw1_int = (uint16_t)((ss_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock); + temp = ss_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv; + temp <<= 0x10; + do_div(temp, ref_clock); + sclk_setting->Fcw1_frac = temp & 0xffff; + } + + return 0; +} + +/** +* Populates single SMC SCLK structure using the provided engine clock +* +* @param hwmgr the address of the hardware manager +* @param clock the engine clock to use to populate the structure +* @param sclk the SMC SCLK structure to be populated +*/ + +static int polaris10_populate_single_graphic_level(struct pp_hwmgr *hwmgr, + uint32_t clock, uint16_t sclk_al_threshold, + struct SMU74_Discrete_GraphicsLevel *level) +{ + int result; + /* PP_Clocks minClocks; */ + uint32_t mvdd; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + SMU_SclkSetting curr_sclk_setting = { 0 }; + + result = polaris10_calculate_sclk_params(hwmgr, clock, &curr_sclk_setting); + + /* populate graphics levels */ + result = polaris10_get_dependency_volt_by_clk(hwmgr, + table_info->vdd_dep_on_sclk, clock, + &level->MinVoltage, &mvdd); + + PP_ASSERT_WITH_CODE((0 == result), + "can not find VDDC voltage value for " + "VDDC engine clock dependency table", + return result); + level->ActivityLevel = sclk_al_threshold; + + level->CcPwrDynRm = 0; + level->CcPwrDynRm1 = 0; + level->EnabledForActivity = 0; + level->EnabledForThrottle = 1; + level->UpHyst = 10; + level->DownHyst = 0; + level->VoltageDownHyst = 0; + level->PowerThrottle = 0; + data->display_timing.min_clock_in_sr = hwmgr->display_config.min_core_set_clock_in_sr; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) + level->DeepSleepDivId = smu7_get_sleep_divider_id_from_clock(clock, + hwmgr->display_config.min_core_set_clock_in_sr); + + /* Default to slow, highest DPM level will be + * set to PPSMC_DISPLAY_WATERMARK_LOW later. + */ + if (data->update_up_hyst) + level->UpHyst = (uint8_t)data->up_hyst; + if (data->update_down_hyst) + level->DownHyst = (uint8_t)data->down_hyst; + + level->SclkSetting = curr_sclk_setting; + + CONVERT_FROM_HOST_TO_SMC_UL(level->MinVoltage); + CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm); + CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm1); + CONVERT_FROM_HOST_TO_SMC_US(level->ActivityLevel); + CONVERT_FROM_HOST_TO_SMC_UL(level->SclkSetting.SclkFrequency); + CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw_int); + CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw_frac); + CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_fcw_int); + CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Sclk_slew_rate); + CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_up_slew_rate); + CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_down_slew_rate); + CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw1_int); + CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw1_frac); + CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Sclk_ss_slew_rate); + return 0; +} + +/** +* Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states +* +* @param hwmgr the address of the hardware manager +*/ +int polaris10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) +{ + struct pp_smumgr *smumgr = hwmgr->smumgr; + struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); + struct smu7_dpm_table *dpm_table = &hw_data->dpm_table; + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table; + uint8_t pcie_entry_cnt = (uint8_t) hw_data->dpm_table.pcie_speed_table.count; + int result = 0; + uint32_t array = smu_data->dpm_table_start + + offsetof(SMU74_Discrete_DpmTable, GraphicsLevel); + uint32_t array_size = sizeof(struct SMU74_Discrete_GraphicsLevel) * + SMU74_MAX_LEVELS_GRAPHICS; + struct SMU74_Discrete_GraphicsLevel *levels = + smu_data->smc_state_table.GraphicsLevel; + uint32_t i, max_entry; + uint8_t hightest_pcie_level_enabled = 0, + lowest_pcie_level_enabled = 0, + mid_pcie_level_enabled = 0, + count = 0; + + polaris10_get_sclk_range_table(hwmgr, &(smu_data->smc_state_table)); + + for (i = 0; i < dpm_table->sclk_table.count; i++) { + + result = polaris10_populate_single_graphic_level(hwmgr, + dpm_table->sclk_table.dpm_levels[i].value, + (uint16_t)smu_data->activity_target[i], + &(smu_data->smc_state_table.GraphicsLevel[i])); + if (result) + return result; + + /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */ + if (i > 1) + levels[i].DeepSleepDivId = 0; + } + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SPLLShutdownSupport)) + smu_data->smc_state_table.GraphicsLevel[0].SclkSetting.SSc_En = 0; + + smu_data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1; + smu_data->smc_state_table.GraphicsDpmLevelCount = + (uint8_t)dpm_table->sclk_table.count; + hw_data->dpm_level_enable_mask.sclk_dpm_enable_mask = + phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table); + + + if (pcie_table != NULL) { + PP_ASSERT_WITH_CODE((1 <= pcie_entry_cnt), + "There must be 1 or more PCIE levels defined in PPTable.", + return -EINVAL); + max_entry = pcie_entry_cnt - 1; + for (i = 0; i < dpm_table->sclk_table.count; i++) + levels[i].pcieDpmLevel = + (uint8_t) ((i < max_entry) ? i : max_entry); + } else { + while (hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask && + ((hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask & + (1 << (hightest_pcie_level_enabled + 1))) != 0)) + hightest_pcie_level_enabled++; + + while (hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask && + ((hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask & + (1 << lowest_pcie_level_enabled)) == 0)) + lowest_pcie_level_enabled++; + + while ((count < hightest_pcie_level_enabled) && + ((hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask & + (1 << (lowest_pcie_level_enabled + 1 + count))) == 0)) + count++; + + mid_pcie_level_enabled = (lowest_pcie_level_enabled + 1 + count) < + hightest_pcie_level_enabled ? + (lowest_pcie_level_enabled + 1 + count) : + hightest_pcie_level_enabled; + + /* set pcieDpmLevel to hightest_pcie_level_enabled */ + for (i = 2; i < dpm_table->sclk_table.count; i++) + levels[i].pcieDpmLevel = hightest_pcie_level_enabled; + + /* set pcieDpmLevel to lowest_pcie_level_enabled */ + levels[0].pcieDpmLevel = lowest_pcie_level_enabled; + + /* set pcieDpmLevel to mid_pcie_level_enabled */ + levels[1].pcieDpmLevel = mid_pcie_level_enabled; + } + /* level count will send to smc once at init smc table and never change */ + result = polaris10_copy_bytes_to_smc(smumgr, array, (uint8_t *)levels, + (uint32_t)array_size, SMC_RAM_END); + + return result; +} + + +static int polaris10_populate_single_memory_level(struct pp_hwmgr *hwmgr, + uint32_t clock, struct SMU74_Discrete_MemoryLevel *mem_level) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + int result = 0; + struct cgs_display_info info = {0, 0, NULL}; + uint32_t mclk_stutter_mode_threshold = 40000; + + cgs_get_active_displays_info(hwmgr->device, &info); + + if (table_info->vdd_dep_on_mclk) { + result = polaris10_get_dependency_volt_by_clk(hwmgr, + table_info->vdd_dep_on_mclk, clock, + &mem_level->MinVoltage, &mem_level->MinMvdd); + PP_ASSERT_WITH_CODE((0 == result), + "can not find MinVddc voltage value from memory " + "VDDC voltage dependency table", return result); + } + + mem_level->MclkFrequency = clock; + mem_level->EnabledForThrottle = 1; + mem_level->EnabledForActivity = 0; + mem_level->UpHyst = 0; + mem_level->DownHyst = 100; + mem_level->VoltageDownHyst = 0; + mem_level->ActivityLevel = (uint16_t)data->mclk_activity_target; + mem_level->StutterEnable = false; + mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; + + data->display_timing.num_existing_displays = info.display_count; + + if (mclk_stutter_mode_threshold && + (clock <= mclk_stutter_mode_threshold) && + (SMUM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL, + STUTTER_ENABLE) & 0x1)) + mem_level->StutterEnable = true; + + if (!result) { + CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinMvdd); + CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MclkFrequency); + CONVERT_FROM_HOST_TO_SMC_US(mem_level->ActivityLevel); + CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinVoltage); + } + return result; +} + +/** +* Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states +* +* @param hwmgr the address of the hardware manager +*/ +int polaris10_populate_all_memory_levels(struct pp_hwmgr *hwmgr) +{ + struct pp_smumgr *smumgr = hwmgr->smumgr; + struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); + struct smu7_dpm_table *dpm_table = &hw_data->dpm_table; + int result; + /* populate MCLK dpm table to SMU7 */ + uint32_t array = smu_data->dpm_table_start + + offsetof(SMU74_Discrete_DpmTable, MemoryLevel); + uint32_t array_size = sizeof(SMU74_Discrete_MemoryLevel) * + SMU74_MAX_LEVELS_MEMORY; + struct SMU74_Discrete_MemoryLevel *levels = + smu_data->smc_state_table.MemoryLevel; + uint32_t i; + + for (i = 0; i < dpm_table->mclk_table.count; i++) { + PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value), + "can not populate memory level as memory clock is zero", + return -EINVAL); + result = polaris10_populate_single_memory_level(hwmgr, + dpm_table->mclk_table.dpm_levels[i].value, + &levels[i]); + if (i == dpm_table->mclk_table.count - 1) { + levels[i].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH; + levels[i].EnabledForActivity = 1; + } + if (result) + return result; + } + + /* In order to prevent MC activity from stutter mode to push DPM up, + * the UVD change complements this by putting the MCLK in + * a higher state by default such that we are not affected by + * up threshold or and MCLK DPM latency. + */ + levels[0].ActivityLevel = 0x1f; + CONVERT_FROM_HOST_TO_SMC_US(levels[0].ActivityLevel); + + smu_data->smc_state_table.MemoryDpmLevelCount = + (uint8_t)dpm_table->mclk_table.count; + hw_data->dpm_level_enable_mask.mclk_dpm_enable_mask = + phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table); + + /* level count will send to smc once at init smc table and never change */ + result = polaris10_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels, + (uint32_t)array_size, SMC_RAM_END); + + return result; +} + +/** +* Populates the SMC MVDD structure using the provided memory clock. +* +* @param hwmgr the address of the hardware manager +* @param mclk the MCLK value to be used in the decision if MVDD should be high or low. +* @param voltage the SMC VOLTAGE structure to be populated +*/ +static int polaris10_populate_mvdd_value(struct pp_hwmgr *hwmgr, + uint32_t mclk, SMIO_Pattern *smio_pat) +{ + const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + uint32_t i = 0; + + if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) { + /* find mvdd value which clock is more than request */ + for (i = 0; i < table_info->vdd_dep_on_mclk->count; i++) { + if (mclk <= table_info->vdd_dep_on_mclk->entries[i].clk) { + smio_pat->Voltage = data->mvdd_voltage_table.entries[i].value; + break; + } + } + PP_ASSERT_WITH_CODE(i < table_info->vdd_dep_on_mclk->count, + "MVDD Voltage is outside the supported range.", + return -EINVAL); + } else + return -EINVAL; + + return 0; +} + +static int polaris10_populate_smc_acpi_level(struct pp_hwmgr *hwmgr, + SMU74_Discrete_DpmTable *table) +{ + int result = 0; + uint32_t sclk_frequency; + const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + SMIO_Pattern vol_level; + uint32_t mvdd; + uint16_t us_mvdd; + + table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC; + + /* Get MinVoltage and Frequency from DPM0, + * already converted to SMC_UL */ + sclk_frequency = data->vbios_boot_state.sclk_bootup_value; + result = polaris10_get_dependency_volt_by_clk(hwmgr, + table_info->vdd_dep_on_sclk, + sclk_frequency, + &table->ACPILevel.MinVoltage, &mvdd); + PP_ASSERT_WITH_CODE((0 == result), + "Cannot find ACPI VDDC voltage value " + "in Clock Dependency Table", + ); + + result = polaris10_calculate_sclk_params(hwmgr, sclk_frequency, &(table->ACPILevel.SclkSetting)); + PP_ASSERT_WITH_CODE(result == 0, "Error retrieving Engine Clock dividers from VBIOS.", return result); + + table->ACPILevel.DeepSleepDivId = 0; + table->ACPILevel.CcPwrDynRm = 0; + table->ACPILevel.CcPwrDynRm1 = 0; + + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.MinVoltage); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1); + + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkSetting.SclkFrequency); + CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw_int); + CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw_frac); + CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_fcw_int); + CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_slew_rate); + CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_up_slew_rate); + CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_down_slew_rate); + CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_int); + CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_frac); + CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_ss_slew_rate); + + + /* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */ + table->MemoryACPILevel.MclkFrequency = data->vbios_boot_state.mclk_bootup_value; + result = polaris10_get_dependency_volt_by_clk(hwmgr, + table_info->vdd_dep_on_mclk, + table->MemoryACPILevel.MclkFrequency, + &table->MemoryACPILevel.MinVoltage, &mvdd); + PP_ASSERT_WITH_CODE((0 == result), + "Cannot find ACPI VDDCI voltage value " + "in Clock Dependency Table", + ); + + us_mvdd = 0; + if ((SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control) || + (data->mclk_dpm_key_disabled)) + us_mvdd = data->vbios_boot_state.mvdd_bootup_value; + else { + if (!polaris10_populate_mvdd_value(hwmgr, + data->dpm_table.mclk_table.dpm_levels[0].value, + &vol_level)) + us_mvdd = vol_level.Voltage; + } + + if (0 == polaris10_populate_mvdd_value(hwmgr, 0, &vol_level)) + table->MemoryACPILevel.MinMvdd = PP_HOST_TO_SMC_UL(vol_level.Voltage); + else + table->MemoryACPILevel.MinMvdd = 0; + + table->MemoryACPILevel.StutterEnable = false; + + table->MemoryACPILevel.EnabledForThrottle = 0; + table->MemoryACPILevel.EnabledForActivity = 0; + table->MemoryACPILevel.UpHyst = 0; + table->MemoryACPILevel.DownHyst = 100; + table->MemoryACPILevel.VoltageDownHyst = 0; + table->MemoryACPILevel.ActivityLevel = + PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target); + + CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MclkFrequency); + CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage); + + return result; +} + +static int polaris10_populate_smc_vce_level(struct pp_hwmgr *hwmgr, + SMU74_Discrete_DpmTable *table) +{ + int result = -EINVAL; + uint8_t count; + struct pp_atomctrl_clock_dividers_vi dividers; + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = + table_info->mm_dep_table; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + uint32_t vddci; + + table->VceLevelCount = (uint8_t)(mm_table->count); + table->VceBootLevel = 0; + + for (count = 0; count < table->VceLevelCount; count++) { + table->VceLevel[count].Frequency = mm_table->entries[count].eclk; + table->VceLevel[count].MinVoltage = 0; + table->VceLevel[count].MinVoltage |= + (mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT; + + if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) + vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table), + mm_table->entries[count].vddc - VDDC_VDDCI_DELTA); + else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) + vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA; + else + vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT; + + + table->VceLevel[count].MinVoltage |= + (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT; + table->VceLevel[count].MinVoltage |= 1 << PHASES_SHIFT; + + /*retrieve divider value for VBIOS */ + result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, + table->VceLevel[count].Frequency, ÷rs); + PP_ASSERT_WITH_CODE((0 == result), + "can not find divide id for VCE engine clock", + return result); + + table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider; + + CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency); + CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].MinVoltage); + } + return result; +} + + +static int polaris10_populate_smc_samu_level(struct pp_hwmgr *hwmgr, + SMU74_Discrete_DpmTable *table) +{ + int result = -EINVAL; + uint8_t count; + struct pp_atomctrl_clock_dividers_vi dividers; + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = + table_info->mm_dep_table; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + uint32_t vddci; + + table->SamuBootLevel = 0; + table->SamuLevelCount = (uint8_t)(mm_table->count); + + for (count = 0; count < table->SamuLevelCount; count++) { + /* not sure whether we need evclk or not */ + table->SamuLevel[count].MinVoltage = 0; + table->SamuLevel[count].Frequency = mm_table->entries[count].samclock; + table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc * + VOLTAGE_SCALE) << VDDC_SHIFT; + + if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) + vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table), + mm_table->entries[count].vddc - VDDC_VDDCI_DELTA); + else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) + vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA; + else + vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT; + + table->SamuLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT; + table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT; + + /* retrieve divider value for VBIOS */ + result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, + table->SamuLevel[count].Frequency, ÷rs); + PP_ASSERT_WITH_CODE((0 == result), + "can not find divide id for samu clock", return result); + + table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider; + + CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency); + CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].MinVoltage); + } + return result; +} + +static int polaris10_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr, + int32_t eng_clock, int32_t mem_clock, + SMU74_Discrete_MCArbDramTimingTableEntry *arb_regs) +{ + uint32_t dram_timing; + uint32_t dram_timing2; + uint32_t burst_time; + int result; + + result = atomctrl_set_engine_dram_timings_rv770(hwmgr, + eng_clock, mem_clock); + PP_ASSERT_WITH_CODE(result == 0, + "Error calling VBIOS to set DRAM_TIMING.", return result); + + dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING); + dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2); + burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0); + + + arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dram_timing); + arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dram_timing2); + arb_regs->McArbBurstTime = (uint8_t)burst_time; + + return 0; +} + +static int polaris10_program_memory_timing_parameters(struct pp_hwmgr *hwmgr) +{ + struct pp_smumgr *smumgr = hwmgr->smumgr; + struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); + struct SMU74_Discrete_MCArbDramTimingTable arb_regs; + uint32_t i, j; + int result = 0; + + for (i = 0; i < hw_data->dpm_table.sclk_table.count; i++) { + for (j = 0; j < hw_data->dpm_table.mclk_table.count; j++) { + result = polaris10_populate_memory_timing_parameters(hwmgr, + hw_data->dpm_table.sclk_table.dpm_levels[i].value, + hw_data->dpm_table.mclk_table.dpm_levels[j].value, + &arb_regs.entries[i][j]); + if (result == 0) + result = atomctrl_set_ac_timing_ai(hwmgr, hw_data->dpm_table.mclk_table.dpm_levels[j].value, j); + if (result != 0) + return result; + } + } + + result = polaris10_copy_bytes_to_smc( + hwmgr->smumgr, + smu_data->arb_table_start, + (uint8_t *)&arb_regs, + sizeof(SMU74_Discrete_MCArbDramTimingTable), + SMC_RAM_END); + return result; +} + +static int polaris10_populate_smc_uvd_level(struct pp_hwmgr *hwmgr, + struct SMU74_Discrete_DpmTable *table) +{ + int result = -EINVAL; + uint8_t count; + struct pp_atomctrl_clock_dividers_vi dividers; + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = + table_info->mm_dep_table; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + uint32_t vddci; + + table->UvdLevelCount = (uint8_t)(mm_table->count); + table->UvdBootLevel = 0; + + for (count = 0; count < table->UvdLevelCount; count++) { + table->UvdLevel[count].MinVoltage = 0; + table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk; + table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk; + table->UvdLevel[count].MinVoltage |= (mm_table->entries[count].vddc * + VOLTAGE_SCALE) << VDDC_SHIFT; + + if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) + vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table), + mm_table->entries[count].vddc - VDDC_VDDCI_DELTA); + else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) + vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA; + else + vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT; + + table->UvdLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT; + table->UvdLevel[count].MinVoltage |= 1 << PHASES_SHIFT; + + /* retrieve divider value for VBIOS */ + result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, + table->UvdLevel[count].VclkFrequency, ÷rs); + PP_ASSERT_WITH_CODE((0 == result), + "can not find divide id for Vclk clock", return result); + + table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider; + + result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, + table->UvdLevel[count].DclkFrequency, ÷rs); + PP_ASSERT_WITH_CODE((0 == result), + "can not find divide id for Dclk clock", return result); + + table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider; + + CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency); + CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency); + CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].MinVoltage); + } + + return result; +} + +static int polaris10_populate_smc_boot_level(struct pp_hwmgr *hwmgr, + struct SMU74_Discrete_DpmTable *table) +{ + int result = 0; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + table->GraphicsBootLevel = 0; + table->MemoryBootLevel = 0; + + /* find boot level from dpm table */ + result = phm_find_boot_level(&(data->dpm_table.sclk_table), + data->vbios_boot_state.sclk_bootup_value, + (uint32_t *)&(table->GraphicsBootLevel)); + + result = phm_find_boot_level(&(data->dpm_table.mclk_table), + data->vbios_boot_state.mclk_bootup_value, + (uint32_t *)&(table->MemoryBootLevel)); + + table->BootVddc = data->vbios_boot_state.vddc_bootup_value * + VOLTAGE_SCALE; + table->BootVddci = data->vbios_boot_state.vddci_bootup_value * + VOLTAGE_SCALE; + table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value * + VOLTAGE_SCALE; + + CONVERT_FROM_HOST_TO_SMC_US(table->BootVddc); + CONVERT_FROM_HOST_TO_SMC_US(table->BootVddci); + CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd); + + return 0; +} + +static int polaris10_populate_smc_initailial_state(struct pp_hwmgr *hwmgr) +{ + struct pp_smumgr *smumgr = hwmgr->smumgr; + struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + uint8_t count, level; + + count = (uint8_t)(table_info->vdd_dep_on_sclk->count); + + for (level = 0; level < count; level++) { + if (table_info->vdd_dep_on_sclk->entries[level].clk >= + hw_data->vbios_boot_state.sclk_bootup_value) { + smu_data->smc_state_table.GraphicsBootLevel = level; + break; + } + } + + count = (uint8_t)(table_info->vdd_dep_on_mclk->count); + for (level = 0; level < count; level++) { + if (table_info->vdd_dep_on_mclk->entries[level].clk >= + hw_data->vbios_boot_state.mclk_bootup_value) { + smu_data->smc_state_table.MemoryBootLevel = level; + break; + } + } + + return 0; +} + + +static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr) +{ + uint32_t ro, efuse, volt_without_cks, volt_with_cks, value, max, min; + struct pp_smumgr *smumgr = hwmgr->smumgr; + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); + + uint8_t i, stretch_amount, stretch_amount2, volt_offset = 0; + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = + table_info->vdd_dep_on_sclk; + + stretch_amount = (uint8_t)table_info->cac_dtp_table->usClockStretchAmount; + + /* Read SMU_Eefuse to read and calculate RO and determine + * if the part is SS or FF. if RO >= 1660MHz, part is FF. + */ + efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixSMU_EFUSE_0 + (67 * 4)); + efuse &= 0xFF000000; + efuse = efuse >> 24; + + if (hwmgr->chip_id == CHIP_POLARIS10) { + min = 1000; + max = 2300; + } else { + min = 1100; + max = 2100; + } + + ro = efuse * (max - min) / 255 + min; + + /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */ + for (i = 0; i < sclk_table->count; i++) { + smu_data->smc_state_table.Sclk_CKS_masterEn0_7 |= + sclk_table->entries[i].cks_enable << i; + if (hwmgr->chip_id == CHIP_POLARIS10) { + volt_without_cks = (uint32_t)((2753594000U + (sclk_table->entries[i].clk/100) * 136418 - (ro - 70) * 1000000) / \ + (2424180 - (sclk_table->entries[i].clk/100) * 1132925/1000)); + volt_with_cks = (uint32_t)((2797202000U + sclk_table->entries[i].clk/100 * 3232 - (ro - 65) * 1000000) / \ + (2522480 - sclk_table->entries[i].clk/100 * 115764/100)); + } else { + volt_without_cks = (uint32_t)((2416794800U + (sclk_table->entries[i].clk/100) * 1476925/10 - (ro - 50) * 1000000) / \ + (2625416 - (sclk_table->entries[i].clk/100) * (12586807/10000))); + volt_with_cks = (uint32_t)((2999656000U - sclk_table->entries[i].clk/100 * 392803 - (ro - 44) * 1000000) / \ + (3422454 - sclk_table->entries[i].clk/100 * (18886376/10000))); + } + + if (volt_without_cks >= volt_with_cks) + volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks + + sclk_table->entries[i].cks_voffset) * 100 + 624) / 625); + + smu_data->smc_state_table.Sclk_voltageOffset[i] = volt_offset; + } + + smu_data->smc_state_table.LdoRefSel = (table_info->cac_dtp_table->ucCKS_LDO_REFSEL != 0) ? table_info->cac_dtp_table->ucCKS_LDO_REFSEL : 6; + /* Populate CKS Lookup Table */ + if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5) + stretch_amount2 = 0; + else if (stretch_amount == 3 || stretch_amount == 4) + stretch_amount2 = 1; + else { + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ClockStretcher); + PP_ASSERT_WITH_CODE(false, + "Stretch Amount in PPTable not supported\n", + return -EINVAL); + } + + value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL); + value &= 0xFFFFFFFE; + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL, value); + + return 0; +} + +/** +* Populates the SMC VRConfig field in DPM table. +* +* @param hwmgr the address of the hardware manager +* @param table the SMC DPM table structure to be populated +* @return always 0 +*/ +static int polaris10_populate_vr_config(struct pp_hwmgr *hwmgr, + struct SMU74_Discrete_DpmTable *table) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + uint16_t config; + + config = VR_MERGED_WITH_VDDC; + table->VRConfig |= (config << VRCONF_VDDGFX_SHIFT); + + /* Set Vddc Voltage Controller */ + if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) { + config = VR_SVI2_PLANE_1; + table->VRConfig |= config; + } else { + PP_ASSERT_WITH_CODE(false, + "VDDC should be on SVI2 control in merged mode!", + ); + } + /* Set Vddci Voltage Controller */ + if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) { + config = VR_SVI2_PLANE_2; /* only in merged mode */ + table->VRConfig |= (config << VRCONF_VDDCI_SHIFT); + } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) { + config = VR_SMIO_PATTERN_1; + table->VRConfig |= (config << VRCONF_VDDCI_SHIFT); + } else { + config = VR_STATIC_VOLTAGE; + table->VRConfig |= (config << VRCONF_VDDCI_SHIFT); + } + /* Set Mvdd Voltage Controller */ + if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) { + config = VR_SVI2_PLANE_2; + table->VRConfig |= (config << VRCONF_MVDD_SHIFT); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, smu_data->soft_regs_start + + offsetof(SMU74_SoftRegisters, AllowMvddSwitch), 0x1); + } else { + config = VR_STATIC_VOLTAGE; + table->VRConfig |= (config << VRCONF_MVDD_SHIFT); + } + + return 0; +} + + +static int polaris10_populate_avfs_parameters(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct pp_smumgr *smumgr = hwmgr->smumgr; + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); + + SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table); + int result = 0; + struct pp_atom_ctrl__avfs_parameters avfs_params = {0}; + AVFS_meanNsigma_t AVFS_meanNsigma = { {0} }; + AVFS_Sclk_Offset_t AVFS_SclkOffset = { {0} }; + uint32_t tmp, i; + + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)hwmgr->pptable; + struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = + table_info->vdd_dep_on_sclk; + + + if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED) + return result; + + result = atomctrl_get_avfs_information(hwmgr, &avfs_params); + + if (0 == result) { + table->BTCGB_VDROOP_TABLE[0].a0 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a0); + table->BTCGB_VDROOP_TABLE[0].a1 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a1); + table->BTCGB_VDROOP_TABLE[0].a2 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a2); + table->BTCGB_VDROOP_TABLE[1].a0 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a0); + table->BTCGB_VDROOP_TABLE[1].a1 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a1); + table->BTCGB_VDROOP_TABLE[1].a2 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a2); + table->AVFSGB_VDROOP_TABLE[0].m1 = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_m1); + table->AVFSGB_VDROOP_TABLE[0].m2 = PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSON_m2); + table->AVFSGB_VDROOP_TABLE[0].b = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_b); + table->AVFSGB_VDROOP_TABLE[0].m1_shift = 24; + table->AVFSGB_VDROOP_TABLE[0].m2_shift = 12; + table->AVFSGB_VDROOP_TABLE[1].m1 = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_m1); + table->AVFSGB_VDROOP_TABLE[1].m2 = PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSOFF_m2); + table->AVFSGB_VDROOP_TABLE[1].b = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_b); + table->AVFSGB_VDROOP_TABLE[1].m1_shift = 24; + table->AVFSGB_VDROOP_TABLE[1].m2_shift = 12; + table->MaxVoltage = PP_HOST_TO_SMC_US(avfs_params.usMaxVoltage_0_25mv); + AVFS_meanNsigma.Aconstant[0] = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant0); + AVFS_meanNsigma.Aconstant[1] = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant1); + AVFS_meanNsigma.Aconstant[2] = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant2); + AVFS_meanNsigma.DC_tol_sigma = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_DC_tol_sigma); + AVFS_meanNsigma.Platform_mean = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_Platform_mean); + AVFS_meanNsigma.PSM_Age_CompFactor = PP_HOST_TO_SMC_US(avfs_params.usPSM_Age_ComFactor); + AVFS_meanNsigma.Platform_sigma = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_Platform_sigma); + + for (i = 0; i < NUM_VFT_COLUMNS; i++) { + AVFS_meanNsigma.Static_Voltage_Offset[i] = (uint8_t)(sclk_table->entries[i].cks_voffset * 100 / 625); + AVFS_SclkOffset.Sclk_Offset[i] = PP_HOST_TO_SMC_US((uint16_t)(sclk_table->entries[i].sclk_offset) / 100); + } + + result = polaris10_read_smc_sram_dword(smumgr, + SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, AvfsMeanNSigma), + &tmp, SMC_RAM_END); + + polaris10_copy_bytes_to_smc(smumgr, + tmp, + (uint8_t *)&AVFS_meanNsigma, + sizeof(AVFS_meanNsigma_t), + SMC_RAM_END); + + result = polaris10_read_smc_sram_dword(smumgr, + SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, AvfsSclkOffsetTable), + &tmp, SMC_RAM_END); + polaris10_copy_bytes_to_smc(smumgr, + tmp, + (uint8_t *)&AVFS_SclkOffset, + sizeof(AVFS_Sclk_Offset_t), + SMC_RAM_END); + + data->avfs_vdroop_override_setting = (avfs_params.ucEnableGB_VDROOP_TABLE_CKSON << BTCGB0_Vdroop_Enable_SHIFT) | + (avfs_params.ucEnableGB_VDROOP_TABLE_CKSOFF << BTCGB1_Vdroop_Enable_SHIFT) | + (avfs_params.ucEnableGB_FUSE_TABLE_CKSON << AVFSGB0_Vdroop_Enable_SHIFT) | + (avfs_params.ucEnableGB_FUSE_TABLE_CKSOFF << AVFSGB1_Vdroop_Enable_SHIFT); + data->apply_avfs_cks_off_voltage = (avfs_params.ucEnableApplyAVFS_CKS_OFF_Voltage == 1) ? true : false; + } + return result; +} + + +/** +* Initialize the ARB DRAM timing table's index field. +* +* @param hwmgr the address of the powerplay hardware manager. +* @return always 0 +*/ +static int polaris10_init_arb_table_index(struct pp_smumgr *smumgr) +{ + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); + uint32_t tmp; + int result; + + /* This is a read-modify-write on the first byte of the ARB table. + * The first byte in the SMU73_Discrete_MCArbDramTimingTable structure + * is the field 'current'. + * This solution is ugly, but we never write the whole table only + * individual fields in it. + * In reality this field should not be in that structure + * but in a soft register. + */ + result = polaris10_read_smc_sram_dword(smumgr, + smu_data->arb_table_start, &tmp, SMC_RAM_END); + + if (result) + return result; + + tmp &= 0x00FFFFFF; + tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24; + + return polaris10_write_smc_sram_dword(smumgr, + smu_data->arb_table_start, tmp, SMC_RAM_END); +} + +static void polaris10_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr) +{ + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + + if (table_info && + table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX && + table_info->cac_dtp_table->usPowerTuneDataSetID) + smu_data->power_tune_defaults = + &polaris10_power_tune_data_set_array + [table_info->cac_dtp_table->usPowerTuneDataSetID - 1]; + else + smu_data->power_tune_defaults = &polaris10_power_tune_data_set_array[0]; + +} + +/** +* Initializes the SMC table and uploads it +* +* @param hwmgr the address of the powerplay hardware manager. +* @return always 0 +*/ +int polaris10_init_smc_table(struct pp_hwmgr *hwmgr) +{ + int result; + struct pp_smumgr *smumgr = hwmgr->smumgr; + struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table); + uint8_t i; + struct pp_atomctrl_gpio_pin_assignment gpio_pin; + pp_atomctrl_clock_dividers_vi dividers; + + polaris10_initialize_power_tune_defaults(hwmgr); + + if (SMU7_VOLTAGE_CONTROL_NONE != hw_data->voltage_control) + polaris10_populate_smc_voltage_tables(hwmgr, table); + + table->SystemFlags = 0; + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_AutomaticDCTransition)) + table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_StepVddc)) + table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC; + + if (hw_data->is_memory_gddr5) + table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5; + + if (hw_data->ulv_supported && table_info->us_ulv_voltage_offset) { + result = polaris10_populate_ulv_state(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize ULV state!", return result); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_ULV_PARAMETER, SMU7_CGULVPARAMETER_DFLT); + } + + result = polaris10_populate_smc_link_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize Link Level!", return result); + + result = polaris10_populate_all_graphic_levels(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize Graphics Level!", return result); + + result = polaris10_populate_all_memory_levels(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize Memory Level!", return result); + + result = polaris10_populate_smc_acpi_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize ACPI Level!", return result); + + result = polaris10_populate_smc_vce_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize VCE Level!", return result); + + result = polaris10_populate_smc_samu_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize SAMU Level!", return result); + + /* Since only the initial state is completely set up at this point + * (the other states are just copies of the boot state) we only + * need to populate the ARB settings for the initial state. + */ + result = polaris10_program_memory_timing_parameters(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to Write ARB settings for the initial state.", return result); + + result = polaris10_populate_smc_uvd_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize UVD Level!", return result); + + result = polaris10_populate_smc_boot_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize Boot Level!", return result); + + result = polaris10_populate_smc_initailial_state(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize Boot State!", return result); + + result = polaris10_populate_bapm_parameters_in_dpm_table(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to populate BAPM Parameters!", return result); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ClockStretcher)) { + result = polaris10_populate_clock_stretcher_data_table(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to populate Clock Stretcher Data Table!", + return result); + } + + result = polaris10_populate_avfs_parameters(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, "Failed to populate AVFS Parameters!", return result;); + + table->CurrSclkPllRange = 0xff; + table->GraphicsVoltageChangeEnable = 1; + table->GraphicsThermThrottleEnable = 1; + table->GraphicsInterval = 1; + table->VoltageInterval = 1; + table->ThermalInterval = 1; + table->TemperatureLimitHigh = + table_info->cac_dtp_table->usTargetOperatingTemp * + SMU7_Q88_FORMAT_CONVERSION_UNIT; + table->TemperatureLimitLow = + (table_info->cac_dtp_table->usTargetOperatingTemp - 1) * + SMU7_Q88_FORMAT_CONVERSION_UNIT; + table->MemoryVoltageChangeEnable = 1; + table->MemoryInterval = 1; + table->VoltageResponseTime = 0; + table->PhaseResponseTime = 0; + table->MemoryThermThrottleEnable = 1; + table->PCIeBootLinkLevel = 0; + table->PCIeGenInterval = 1; + table->VRConfig = 0; + + result = polaris10_populate_vr_config(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to populate VRConfig setting!", return result); + + table->ThermGpio = 17; + table->SclkStepSize = 0x4000; + + if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID, &gpio_pin)) { + table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift; + } else { + table->VRHotGpio = SMU7_UNUSED_GPIO_PIN; + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_RegulatorHot); + } + + if (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID, + &gpio_pin)) { + table->AcDcGpio = gpio_pin.uc_gpio_pin_bit_shift; + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_AutomaticDCTransition); + } else { + table->AcDcGpio = SMU7_UNUSED_GPIO_PIN; + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_AutomaticDCTransition); + } + + /* Thermal Output GPIO */ + if (atomctrl_get_pp_assign_pin(hwmgr, THERMAL_INT_OUTPUT_GPIO_PINID, + &gpio_pin)) { + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ThermalOutGPIO); + + table->ThermOutGpio = gpio_pin.uc_gpio_pin_bit_shift; + + /* For porlarity read GPIOPAD_A with assigned Gpio pin + * since VBIOS will program this register to set 'inactive state', + * driver can then determine 'active state' from this and + * program SMU with correct polarity + */ + table->ThermOutPolarity = (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A) + & (1 << gpio_pin.uc_gpio_pin_bit_shift))) ? 1:0; + table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY; + + /* if required, combine VRHot/PCC with thermal out GPIO */ + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_RegulatorHot) + && phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_CombinePCCWithThermalSignal)) + table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT; + } else { + table->ThermOutGpio = 17; + table->ThermOutPolarity = 1; + table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE; + } + + /* Populate BIF_SCLK levels into SMC DPM table */ + for (i = 0; i <= hw_data->dpm_table.pcie_speed_table.count; i++) { + result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, smu_data->bif_sclk_table[i], ÷rs); + PP_ASSERT_WITH_CODE((result == 0), "Can not find DFS divide id for Sclk", return result); + + if (i == 0) + table->Ulv.BifSclkDfs = PP_HOST_TO_SMC_US((USHORT)(dividers.pll_post_divider)); + else + table->LinkLevel[i-1].BifSclkDfs = PP_HOST_TO_SMC_US((USHORT)(dividers.pll_post_divider)); + } + + for (i = 0; i < SMU74_MAX_ENTRIES_SMIO; i++) + table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]); + + CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags); + CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig); + CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1); + CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2); + CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize); + CONVERT_FROM_HOST_TO_SMC_UL(table->CurrSclkPllRange); + CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh); + CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow); + CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime); + CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime); + + /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */ + result = polaris10_copy_bytes_to_smc(hwmgr->smumgr, + smu_data->dpm_table_start + + offsetof(SMU74_Discrete_DpmTable, SystemFlags), + (uint8_t *)&(table->SystemFlags), + sizeof(SMU74_Discrete_DpmTable) - 3 * sizeof(SMU74_PIDController), + SMC_RAM_END); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to upload dpm data to SMC memory!", return result); + + result = polaris10_init_arb_table_index(hwmgr->smumgr); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to upload arb data to SMC memory!", return result); + + result = polaris10_populate_pm_fuses(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to populate PM fuses to SMC memory!", return result); + return 0; +} + +static int polaris10_program_mem_timing_parameters(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + if (data->need_update_smu7_dpm_table & + (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK)) + return polaris10_program_memory_timing_parameters(hwmgr); + + return 0; +} + +int polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr) +{ + int ret; + struct pp_smumgr *smumgr = (struct pp_smumgr *)(hwmgr->smumgr); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED) + return 0; + + ret = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SetGBDroopSettings, data->avfs_vdroop_override_setting); + + ret = (smum_send_msg_to_smc(smumgr, PPSMC_MSG_EnableAvfs) == 0) ? + 0 : -1; + + if (!ret) + /* If this param is not changed, this function could fire unnecessarily */ + smu_data->avfs.avfs_btc_status = AVFS_BTC_COMPLETED_PREVIOUSLY; + + return ret; +} + +/** +* Set up the fan table to control the fan using the SMC. +* @param hwmgr the address of the powerplay hardware manager. +* @param pInput the pointer to input data +* @param pOutput the pointer to output data +* @param pStorage the pointer to temporary storage +* @param Result the last failure code +* @return result from set temperature range routine +*/ +int polaris10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr) +{ + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + SMU74_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE }; + uint32_t duty100; + uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2; + uint16_t fdo_min, slope1, slope2; + uint32_t reference_clock; + int res; + uint64_t tmp64; + + if (smu_data->fan_table_start == 0) { + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_MicrocodeFanControl); + return 0; + } + + duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + CG_FDO_CTRL1, FMAX_DUTY100); + + if (duty100 == 0) { + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_MicrocodeFanControl); + return 0; + } + + tmp64 = hwmgr->thermal_controller.advanceFanControlParameters. + usPWMMin * duty100; + do_div(tmp64, 10000); + fdo_min = (uint16_t)tmp64; + + t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed - + hwmgr->thermal_controller.advanceFanControlParameters.usTMin; + t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh - + hwmgr->thermal_controller.advanceFanControlParameters.usTMed; + + pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed - + hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin; + pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh - + hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed; + + slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100); + slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100); + + fan_table.TempMin = cpu_to_be16((50 + hwmgr-> + thermal_controller.advanceFanControlParameters.usTMin) / 100); + fan_table.TempMed = cpu_to_be16((50 + hwmgr-> + thermal_controller.advanceFanControlParameters.usTMed) / 100); + fan_table.TempMax = cpu_to_be16((50 + hwmgr-> + thermal_controller.advanceFanControlParameters.usTMax) / 100); + + fan_table.Slope1 = cpu_to_be16(slope1); + fan_table.Slope2 = cpu_to_be16(slope2); + + fan_table.FdoMin = cpu_to_be16(fdo_min); + + fan_table.HystDown = cpu_to_be16(hwmgr-> + thermal_controller.advanceFanControlParameters.ucTHyst); + + fan_table.HystUp = cpu_to_be16(1); + + fan_table.HystSlope = cpu_to_be16(1); + + fan_table.TempRespLim = cpu_to_be16(5); + + reference_clock = smu7_get_xclk(hwmgr); + + fan_table.RefreshPeriod = cpu_to_be32((hwmgr-> + thermal_controller.advanceFanControlParameters.ulCycleDelay * + reference_clock) / 1600); + + fan_table.FdoMax = cpu_to_be16((uint16_t)duty100); + + fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD( + hwmgr->device, CGS_IND_REG__SMC, + CG_MULT_THERMAL_CTRL, TEMP_SEL); + + res = polaris10_copy_bytes_to_smc(hwmgr->smumgr, smu_data->fan_table_start, + (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), + SMC_RAM_END); + + if (!res && hwmgr->thermal_controller. + advanceFanControlParameters.ucMinimumPWMLimit) + res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SetFanMinPwm, + hwmgr->thermal_controller. + advanceFanControlParameters.ucMinimumPWMLimit); + + if (!res && hwmgr->thermal_controller. + advanceFanControlParameters.ulMinFanSCLKAcousticLimit) + res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SetFanSclkTarget, + hwmgr->thermal_controller. + advanceFanControlParameters.ulMinFanSCLKAcousticLimit); + + if (res) + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_MicrocodeFanControl); + + return 0; +} + +static int polaris10_update_uvd_smc_table(struct pp_hwmgr *hwmgr) +{ + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + uint32_t mm_boot_level_offset, mm_boot_level_value; + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + + smu_data->smc_state_table.UvdBootLevel = 0; + if (table_info->mm_dep_table->count > 0) + smu_data->smc_state_table.UvdBootLevel = + (uint8_t) (table_info->mm_dep_table->count - 1); + mm_boot_level_offset = smu_data->dpm_table_start + offsetof(SMU74_Discrete_DpmTable, + UvdBootLevel); + mm_boot_level_offset /= 4; + mm_boot_level_offset *= 4; + mm_boot_level_value = cgs_read_ind_register(hwmgr->device, + CGS_IND_REG__SMC, mm_boot_level_offset); + mm_boot_level_value &= 0x00FFFFFF; + mm_boot_level_value |= smu_data->smc_state_table.UvdBootLevel << 24; + cgs_write_ind_register(hwmgr->device, + CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value); + + if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_UVDDPM) || + phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_StablePState)) + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_UVDDPM_SetEnabledMask, + (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel)); + return 0; +} + +static int polaris10_update_vce_smc_table(struct pp_hwmgr *hwmgr) +{ + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + uint32_t mm_boot_level_offset, mm_boot_level_value; + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_StablePState)) + smu_data->smc_state_table.VceBootLevel = + (uint8_t) (table_info->mm_dep_table->count - 1); + else + smu_data->smc_state_table.VceBootLevel = 0; + + mm_boot_level_offset = smu_data->dpm_table_start + + offsetof(SMU74_Discrete_DpmTable, VceBootLevel); + mm_boot_level_offset /= 4; + mm_boot_level_offset *= 4; + mm_boot_level_value = cgs_read_ind_register(hwmgr->device, + CGS_IND_REG__SMC, mm_boot_level_offset); + mm_boot_level_value &= 0xFF00FFFF; + mm_boot_level_value |= smu_data->smc_state_table.VceBootLevel << 16; + cgs_write_ind_register(hwmgr->device, + CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_VCEDPM_SetEnabledMask, + (uint32_t)1 << smu_data->smc_state_table.VceBootLevel); + return 0; +} + +static int polaris10_update_samu_smc_table(struct pp_hwmgr *hwmgr) +{ + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + uint32_t mm_boot_level_offset, mm_boot_level_value; + + + smu_data->smc_state_table.SamuBootLevel = 0; + mm_boot_level_offset = smu_data->dpm_table_start + + offsetof(SMU74_Discrete_DpmTable, SamuBootLevel); + + mm_boot_level_offset /= 4; + mm_boot_level_offset *= 4; + mm_boot_level_value = cgs_read_ind_register(hwmgr->device, + CGS_IND_REG__SMC, mm_boot_level_offset); + mm_boot_level_value &= 0xFFFFFF00; + mm_boot_level_value |= smu_data->smc_state_table.SamuBootLevel << 0; + cgs_write_ind_register(hwmgr->device, + CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_StablePState)) + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SAMUDPM_SetEnabledMask, + (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel)); + return 0; +} + + +static int polaris10_update_bif_smc_table(struct pp_hwmgr *hwmgr) +{ + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table; + int max_entry, i; + + max_entry = (SMU74_MAX_LEVELS_LINK < pcie_table->count) ? + SMU74_MAX_LEVELS_LINK : + pcie_table->count; + /* Setup BIF_SCLK levels */ + for (i = 0; i < max_entry; i++) + smu_data->bif_sclk_table[i] = pcie_table->entries[i].pcie_sclk; + return 0; +} + +int polaris10_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type) +{ + switch (type) { + case SMU_UVD_TABLE: + polaris10_update_uvd_smc_table(hwmgr); + break; + case SMU_VCE_TABLE: + polaris10_update_vce_smc_table(hwmgr); + break; + case SMU_SAMU_TABLE: + polaris10_update_samu_smc_table(hwmgr); + break; + case SMU_BIF_TABLE: + polaris10_update_bif_smc_table(hwmgr); + default: + break; + } + return 0; +} + +int polaris10_update_sclk_threshold(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + + int result = 0; + uint32_t low_sclk_interrupt_threshold = 0; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SclkThrottleLowNotification) + && (hwmgr->gfx_arbiter.sclk_threshold != + data->low_sclk_interrupt_threshold)) { + data->low_sclk_interrupt_threshold = + hwmgr->gfx_arbiter.sclk_threshold; + low_sclk_interrupt_threshold = + data->low_sclk_interrupt_threshold; + + CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold); + + result = polaris10_copy_bytes_to_smc( + hwmgr->smumgr, + smu_data->dpm_table_start + + offsetof(SMU74_Discrete_DpmTable, + LowSclkInterruptThreshold), + (uint8_t *)&low_sclk_interrupt_threshold, + sizeof(uint32_t), + SMC_RAM_END); + } + PP_ASSERT_WITH_CODE((result == 0), + "Failed to update SCLK threshold!", return result); + + result = polaris10_program_mem_timing_parameters(hwmgr); + PP_ASSERT_WITH_CODE((result == 0), + "Failed to program memory timing parameters!", + ); + + return result; +} + +uint32_t polaris10_get_offsetof(uint32_t type, uint32_t member) +{ + switch (type) { + case SMU_SoftRegisters: + switch (member) { + case HandshakeDisables: + return offsetof(SMU74_SoftRegisters, HandshakeDisables); + case VoltageChangeTimeout: + return offsetof(SMU74_SoftRegisters, VoltageChangeTimeout); + case AverageGraphicsActivity: + return offsetof(SMU74_SoftRegisters, AverageGraphicsActivity); + case PreVBlankGap: + return offsetof(SMU74_SoftRegisters, PreVBlankGap); + case VBlankTimeout: + return offsetof(SMU74_SoftRegisters, VBlankTimeout); + } + case SMU_Discrete_DpmTable: + switch (member) { + case UvdBootLevel: + return offsetof(SMU74_Discrete_DpmTable, UvdBootLevel); + case VceBootLevel: + return offsetof(SMU74_Discrete_DpmTable, VceBootLevel); + case SamuBootLevel: + return offsetof(SMU74_Discrete_DpmTable, SamuBootLevel); + case LowSclkInterruptThreshold: + return offsetof(SMU74_Discrete_DpmTable, LowSclkInterruptThreshold); + } + } + printk("cant't get the offset of type %x member %x \n", type, member); + return 0; +} + +uint32_t polaris10_get_mac_definition(uint32_t value) +{ + switch (value) { + case SMU_MAX_LEVELS_GRAPHICS: + return SMU74_MAX_LEVELS_GRAPHICS; + case SMU_MAX_LEVELS_MEMORY: + return SMU74_MAX_LEVELS_MEMORY; + case SMU_MAX_LEVELS_LINK: + return SMU74_MAX_LEVELS_LINK; + case SMU_MAX_ENTRIES_SMIO: + return SMU74_MAX_ENTRIES_SMIO; + case SMU_MAX_LEVELS_VDDC: + return SMU74_MAX_LEVELS_VDDC; + case SMU_MAX_LEVELS_VDDGFX: + return SMU74_MAX_LEVELS_VDDGFX; + case SMU_MAX_LEVELS_VDDCI: + return SMU74_MAX_LEVELS_VDDCI; + case SMU_MAX_LEVELS_MVDD: + return SMU74_MAX_LEVELS_MVDD; + case SMU_UVD_MCLK_HANDSHAKE_DISABLE: + return SMU7_UVD_MCLK_HANDSHAKE_DISABLE; + } + + printk("cant't get the mac of %x \n", value); + return 0; +} + +/** +* Get the location of various tables inside the FW image. +* +* @param hwmgr the address of the powerplay hardware manager. +* @return always 0 +*/ +int polaris10_process_firmware_header(struct pp_hwmgr *hwmgr) +{ + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + uint32_t tmp; + int result; + bool error = false; + + result = polaris10_read_smc_sram_dword(hwmgr->smumgr, + SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU74_Firmware_Header, DpmTable), + &tmp, SMC_RAM_END); + + if (0 == result) + smu_data->dpm_table_start = tmp; + + error |= (0 != result); + + result = polaris10_read_smc_sram_dword(hwmgr->smumgr, + SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU74_Firmware_Header, SoftRegisters), + &tmp, SMC_RAM_END); + + if (!result) + smu_data->soft_regs_start = tmp; + + error |= (0 != result); + + result = polaris10_read_smc_sram_dword(hwmgr->smumgr, + SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU74_Firmware_Header, mcRegisterTable), + &tmp, SMC_RAM_END); + + if (!result) + smu_data->mc_reg_table_start = tmp; + + result = polaris10_read_smc_sram_dword(hwmgr->smumgr, + SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU74_Firmware_Header, FanTable), + &tmp, SMC_RAM_END); + + if (!result) + smu_data->fan_table_start = tmp; + + error |= (0 != result); + + result = polaris10_read_smc_sram_dword(hwmgr->smumgr, + SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU74_Firmware_Header, mcArbDramTimingTable), + &tmp, SMC_RAM_END); + + if (!result) + smu_data->arb_table_start = tmp; + + error |= (0 != result); + + result = polaris10_read_smc_sram_dword(hwmgr->smumgr, + SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU74_Firmware_Header, Version), + &tmp, SMC_RAM_END); + + if (!result) + hwmgr->microcode_version_info.SMC = tmp; + + error |= (0 != result); + + return error ? -1 : 0; +} + +bool polaris10_is_dpm_running(struct pp_hwmgr *hwmgr) +{ + return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device, + CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON)) + ? true : false; +} \ No newline at end of file diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.h b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.h new file mode 100644 index 000000000000..5ade3cea8bb7 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.h @@ -0,0 +1,42 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef POLARIS10_SMC_H +#define POLARIS10_SMC_H + +#include "smumgr.h" + + +int polaris10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr); +int polaris10_populate_all_memory_levels(struct pp_hwmgr *hwmgr); +int polaris10_init_smc_table(struct pp_hwmgr *hwmgr); +int polaris10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr); +int polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr); +int polaris10_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type); +int polaris10_update_sclk_threshold(struct pp_hwmgr *hwmgr); +uint32_t polaris10_get_offsetof(uint32_t type, uint32_t member); +uint32_t polaris10_get_mac_definition(uint32_t value); +int polaris10_process_firmware_header(struct pp_hwmgr *hwmgr); +bool polaris10_is_dpm_running(struct pp_hwmgr *hwmgr); + +#endif + diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c index 8047ad221e74..b13ea4218415 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c @@ -38,6 +38,8 @@ #include "ppatomctrl.h" #include "pp_debug.h" #include "cgs_common.h" +#include "polaris10_smc.h" +#include "smu7_ppsmc.h" #define POLARIS10_SMC_SIZE 0x20000 @@ -46,7 +48,7 @@ #define MAX_STRING_SIZE 15 #define BUFFER_SIZETWO 131072 /* 128 *1024 */ -#define SMC_RAM_END 0x40000 +#define PPPOLARIS10_TARGETACTIVITY_DFLT 50 static const SMU74_Discrete_GraphicsLevel avfs_graphics_level_polaris10[8] = { /* Min pcie DeepSleep Activity CgSpll CgSpll CcPwr CcPwr Sclk Enabled Enabled Voltage Power */ @@ -61,8 +63,8 @@ static const SMU74_Discrete_GraphicsLevel avfs_graphics_level_polaris10[8] = { { 0xa00fa446, 0x01, 0x00, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0xa0860100, 0x2800, 0, 0x2000, 2, 1, 0x0004, 0x0c02, 0xffff, 0x2700, 0x6433, 0x2100 } } }; -static const SMU74_Discrete_MemoryLevel avfs_memory_level_polaris10 = - {0x100ea446, 0, 0x30750000, 0x01, 0x01, 0x01, 0x00, 0x00, 0x64, 0x00, 0x00, 0x1f00, 0x00, 0x00}; +static const SMU74_Discrete_MemoryLevel avfs_memory_level_polaris10 = { + 0x100ea446, 0, 0x30750000, 0x01, 0x01, 0x01, 0x00, 0x00, 0x64, 0x00, 0x00, 0x1f00, 0x00, 0x00}; /** * Set the address for reading/writing the SMC SRAM space. @@ -921,6 +923,8 @@ static int polaris10_smu_init(struct pp_smumgr *smumgr) struct polaris10_smumgr *smu_data; uint8_t *internal_buf; uint64_t mc_addr = 0; + int i; + /* Allocate memory for backend private data */ smu_data = (struct polaris10_smumgr *)(smumgr->backend); smu_data->header_buffer.data_size = @@ -974,6 +978,9 @@ static int polaris10_smu_init(struct pp_smumgr *smumgr) else smu_data->avfs.avfs_btc_status = AVFS_BTC_NOTSUPPORTED; + for (i = 0; i < SMU74_MAX_LEVELS_GRAPHICS; i++) + smu_data->activity_target[i] = PPPOLARIS10_TARGETACTIVITY_DFLT; + return 0; } @@ -988,6 +995,17 @@ static const struct pp_smumgr_func polaris10_smu_funcs = { .send_msg_to_smc_with_parameter = polaris10_send_msg_to_smc_with_parameter, .download_pptable_settings = NULL, .upload_pptable_settings = NULL, + .update_smc_table = polaris10_update_smc_table, + .get_offsetof = polaris10_get_offsetof, + .process_firmware_header = polaris10_process_firmware_header, + .init_smc_table = polaris10_init_smc_table, + .update_sclk_threshold = polaris10_update_sclk_threshold, + .thermal_avfs_enable = polaris10_thermal_avfs_enable, + .thermal_setup_fan_table = polaris10_thermal_setup_fan_table, + .populate_all_graphic_levels = polaris10_populate_all_graphic_levels, + .populate_all_memory_levels = polaris10_populate_all_memory_levels, + .get_mac_definition = polaris10_get_mac_definition, + .is_dpm_running = polaris10_is_dpm_running, }; int polaris10_smum_init(struct pp_smumgr *smumgr) diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.h index 7c2445f1f043..1ab9b1d9df9a 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.h +++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.h @@ -24,9 +24,13 @@ #ifndef _POLARIS10_SMUMANAGER_H #define _POLARIS10_SMUMANAGER_H -#include + #include #include "smu74.h" +#include "smu74_discrete.h" + + +#define SMC_RAM_END 0x40000 struct polaris10_avfs { enum AVFS_BTC_STATUS avfs_btc_status; @@ -65,7 +69,13 @@ struct polaris10_smumgr { uint8_t *mec_image; struct polaris10_buffer_entry smu_buffer; struct polaris10_buffer_entry header_buffer; - uint32_t soft_regs_start; + + uint32_t soft_regs_start; + uint32_t dpm_table_start; + uint32_t mc_reg_table_start; + uint32_t fan_table_start; + uint32_t arb_table_start; + uint8_t *read_rrm_straps; uint32_t read_drm_straps_mc_address_high; uint32_t read_drm_straps_mc_address_low; @@ -74,15 +84,20 @@ struct polaris10_smumgr { uint8_t protected_mode; uint8_t security_hard_key; struct polaris10_avfs avfs; + SMU74_Discrete_DpmTable smc_state_table; + struct SMU74_Discrete_Ulv ulv_setting; + struct SMU74_Discrete_PmFuses power_tune_table; + struct polaris10_range_table range_table[NUM_SCLK_RANGE]; + const struct polaris10_pt_defaults *power_tune_defaults; + uint32_t activity_target[SMU74_MAX_LEVELS_GRAPHICS]; + uint32_t bif_sclk_table[SMU74_MAX_LEVELS_LINK]; }; int polaris10_smum_init(struct pp_smumgr *smumgr); - int polaris10_read_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t *value, uint32_t limit); int polaris10_write_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t value, uint32_t limit); int polaris10_copy_bytes_to_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address, const uint8_t *src, uint32_t byte_count, uint32_t limit); #endif - From 4be051aeb3964146d3922238fff0ed1e4a9656d1 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Tue, 6 Sep 2016 16:41:44 +0800 Subject: [PATCH 10/49] drm/amd/powerplay: use smu7 hwmgr to manager polaris10/11 Signed-off-by: Rex Zhu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/Makefile | 2 -- drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c | 38 ++++++++++++++++++-- drivers/gpu/drm/amd/powerplay/inc/hwmgr.h | 1 + 3 files changed, 37 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile index d5d5626b5195..95a3cf10b111 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile @@ -9,8 +9,6 @@ HARDWARE_MGR = hwmgr.o processpptables.o functiontables.o \ tonga_hwmgr.o pppcielanes.o tonga_thermal.o\ fiji_powertune.o fiji_hwmgr.o tonga_clockpowergating.o \ fiji_clockpowergating.o fiji_thermal.o \ - polaris10_hwmgr.o polaris10_powertune.o polaris10_thermal.o \ - polaris10_clockpowergating.o \ smu7_hwmgr.o smu7_powertune.o smu7_thermal.o \ smu7_clockpowergating.o iceland_hwmgr.o \ iceland_clockpowergating.o iceland_thermal.o \ diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c index 4f82a06ba3e2..78ccbecd7591 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c @@ -38,9 +38,9 @@ extern int cz_hwmgr_init(struct pp_hwmgr *hwmgr); extern int tonga_hwmgr_init(struct pp_hwmgr *hwmgr); extern int fiji_hwmgr_init(struct pp_hwmgr *hwmgr); -extern int polaris10_hwmgr_init(struct pp_hwmgr *hwmgr); extern int iceland_hwmgr_init(struct pp_hwmgr *hwmgr); +static int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr); static void hwmgr_init_default_caps(struct pp_hwmgr *hwmgr); static int hwmgr_set_user_specify_caps(struct pp_hwmgr *hwmgr); @@ -89,7 +89,9 @@ int hwmgr_init(struct amd_pp_init *pp_init, struct pp_instance *handle) break; case CHIP_POLARIS11: case CHIP_POLARIS10: - polaris10_hwmgr_init(hwmgr); + smu7_hwmgr_init(hwmgr); + polaris_set_asic_special_caps(hwmgr); + hwmgr->feature_mask &= ~(PP_UVD_HANDSHAKE_MASK); break; default: return -EINVAL; @@ -206,6 +208,8 @@ int phm_wait_on_register(struct pp_hwmgr *hwmgr, uint32_t index, } + + /** * Returns once the part of the register indicated by the mask has * reached the given value.The indirect space is described by giving @@ -710,3 +714,33 @@ int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type, return ret; } +int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr) +{ + /* power tune caps Assume disabled */ + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SQRamping); + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DBRamping); + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_TDRamping); + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_TCPRamping); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_CAC); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_RegulatorHot); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_AutomaticDCTransition); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_TablelessHardwareInterface); + + if (hwmgr->chip_id == CHIP_POLARIS11) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SPLLShutdownSupport); + return 0; +} + diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h index e98748344801..c9628b4db2c3 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h @@ -709,6 +709,7 @@ extern int phm_hwmgr_backend_fini(struct pp_hwmgr *hwmgr); extern uint32_t phm_get_lowest_enabled_level(struct pp_hwmgr *hwmgr, uint32_t mask); extern void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr); +extern int smu7_hwmgr_init(struct pp_hwmgr *hwmgr); extern int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type, uint32_t sclk, uint16_t id, uint16_t *voltage); From 18edef19ea44f4379e635bd32b553e58e23bba95 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Fri, 9 Sep 2016 13:29:37 +0800 Subject: [PATCH 11/49] drm/amd/powerplay: implement fw image related smu interface for Fiji. Signed-off-by: Rex Zhu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/smumgr/Makefile | 2 +- .../gpu/drm/amd/powerplay/smumgr/fiji_smc.c | 2371 +++++++++++++++++ .../gpu/drm/amd/powerplay/smumgr/fiji_smc.h | 51 + .../drm/amd/powerplay/smumgr/fiji_smumgr.c | 37 +- .../drm/amd/powerplay/smumgr/fiji_smumgr.h | 19 +- 5 files changed, 2472 insertions(+), 8 deletions(-) create mode 100644 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c create mode 100644 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.h diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile index 872a2f030989..7561239eb874 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile +++ b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile @@ -2,7 +2,7 @@ # Makefile for the 'smu manager' sub-component of powerplay. # It provides the smu management services for the driver. -SMU_MGR = smumgr.o cz_smumgr.o tonga_smumgr.o fiji_smumgr.o \ +SMU_MGR = smumgr.o cz_smumgr.o tonga_smumgr.o fiji_smumgr.o fiji_smc.o\ polaris10_smumgr.o iceland_smumgr.o polaris10_smc.o AMD_PP_SMUMGR = $(addprefix $(AMD_PP_PATH)/smumgr/,$(SMU_MGR)) diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c new file mode 100644 index 000000000000..fd0c00173cce --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c @@ -0,0 +1,2371 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "fiji_smc.h" +#include "smu7_dyn_defaults.h" + +#include "smu7_hwmgr.h" +#include "hardwaremanager.h" +#include "ppatomctrl.h" +#include "pp_debug.h" +#include "cgs_common.h" +#include "atombios.h" +#include "fiji_smumgr.h" +#include "pppcielanes.h" +#include "smu7_ppsmc.h" +#include "smu73.h" +#include "smu/smu_7_1_3_d.h" +#include "smu/smu_7_1_3_sh_mask.h" +#include "gmc/gmc_8_1_d.h" +#include "gmc/gmc_8_1_sh_mask.h" +#include "bif/bif_5_0_d.h" +#include "bif/bif_5_0_sh_mask.h" +#include "dce/dce_10_0_d.h" +#include "dce/dce_10_0_sh_mask.h" + +#define VOLTAGE_SCALE 4 +#define POWERTUNE_DEFAULT_SET_MAX 1 +#define VOLTAGE_VID_OFFSET_SCALE1 625 +#define VOLTAGE_VID_OFFSET_SCALE2 100 +#define VDDC_VDDCI_DELTA 300 +#define MC_CG_ARB_FREQ_F1 0x0b + +/* [2.5%,~2.5%] Clock stretched is multiple of 2.5% vs + * not and [Fmin, Fmax, LDO_REFSEL, USE_FOR_LOW_FREQ] + */ +static const uint16_t fiji_clock_stretcher_lookup_table[2][4] = { + {600, 1050, 3, 0}, {600, 1050, 6, 1} }; + +/* [FF, SS] type, [] 4 voltage ranges, and + * [Floor Freq, Boundary Freq, VID min , VID max] + */ +static const uint32_t fiji_clock_stretcher_ddt_table[2][4][4] = { + { {265, 529, 120, 128}, {325, 650, 96, 119}, {430, 860, 32, 95}, {0, 0, 0, 31} }, + { {275, 550, 104, 112}, {319, 638, 96, 103}, {360, 720, 64, 95}, {384, 768, 32, 63} } }; + +/* [Use_For_Low_freq] value, [0%, 5%, 10%, 7.14%, 14.28%, 20%] + * (coming from PWR_CKS_CNTL.stretch_amount reg spec) + */ +static const uint8_t fiji_clock_stretch_amount_conversion[2][6] = { + {0, 1, 3, 2, 4, 5}, {0, 2, 4, 5, 6, 5} }; + +static const struct fiji_pt_defaults fiji_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = { + /*sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc */ + {1, 0xF, 0xFD, + /* TDC_MAWt, TdcWaterfallCtl, DTEAmbientTempBase */ + 0x19, 5, 45} +}; + +/* PPGen has the gain setting generated in x * 100 unit + * This function is to convert the unit to x * 4096(0x1000) unit. + * This is the unit expected by SMC firmware + */ +static int fiji_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr, + struct phm_ppt_v1_clock_voltage_dependency_table *dep_table, + uint32_t clock, uint32_t *voltage, uint32_t *mvdd) +{ + uint32_t i; + uint16_t vddci; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + *voltage = *mvdd = 0; + + + /* clock - voltage dependency table is empty table */ + if (dep_table->count == 0) + return -EINVAL; + + for (i = 0; i < dep_table->count; i++) { + /* find first sclk bigger than request */ + if (dep_table->entries[i].clk >= clock) { + *voltage |= (dep_table->entries[i].vddc * + VOLTAGE_SCALE) << VDDC_SHIFT; + if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control) + *voltage |= (data->vbios_boot_state.vddci_bootup_value * + VOLTAGE_SCALE) << VDDCI_SHIFT; + else if (dep_table->entries[i].vddci) + *voltage |= (dep_table->entries[i].vddci * + VOLTAGE_SCALE) << VDDCI_SHIFT; + else { + vddci = phm_find_closest_vddci(&(data->vddci_voltage_table), + (dep_table->entries[i].vddc - + VDDC_VDDCI_DELTA)); + *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT; + } + + if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control) + *mvdd = data->vbios_boot_state.mvdd_bootup_value * + VOLTAGE_SCALE; + else if (dep_table->entries[i].mvdd) + *mvdd = (uint32_t) dep_table->entries[i].mvdd * + VOLTAGE_SCALE; + + *voltage |= 1 << PHASES_SHIFT; + return 0; + } + } + + /* sclk is bigger than max sclk in the dependence table */ + *voltage |= (dep_table->entries[i - 1].vddc * VOLTAGE_SCALE) << VDDC_SHIFT; + + if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control) + *voltage |= (data->vbios_boot_state.vddci_bootup_value * + VOLTAGE_SCALE) << VDDCI_SHIFT; + else if (dep_table->entries[i-1].vddci) { + vddci = phm_find_closest_vddci(&(data->vddci_voltage_table), + (dep_table->entries[i].vddc - + VDDC_VDDCI_DELTA)); + *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT; + } + + if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control) + *mvdd = data->vbios_boot_state.mvdd_bootup_value * VOLTAGE_SCALE; + else if (dep_table->entries[i].mvdd) + *mvdd = (uint32_t) dep_table->entries[i - 1].mvdd * VOLTAGE_SCALE; + + return 0; +} + + +static uint16_t scale_fan_gain_settings(uint16_t raw_setting) +{ + uint32_t tmp; + tmp = raw_setting * 4096 / 100; + return (uint16_t)tmp; +} + +static void get_scl_sda_value(uint8_t line, uint8_t *scl, uint8_t *sda) +{ + switch (line) { + case SMU7_I2CLineID_DDC1: + *scl = SMU7_I2C_DDC1CLK; + *sda = SMU7_I2C_DDC1DATA; + break; + case SMU7_I2CLineID_DDC2: + *scl = SMU7_I2C_DDC2CLK; + *sda = SMU7_I2C_DDC2DATA; + break; + case SMU7_I2CLineID_DDC3: + *scl = SMU7_I2C_DDC3CLK; + *sda = SMU7_I2C_DDC3DATA; + break; + case SMU7_I2CLineID_DDC4: + *scl = SMU7_I2C_DDC4CLK; + *sda = SMU7_I2C_DDC4DATA; + break; + case SMU7_I2CLineID_DDC5: + *scl = SMU7_I2C_DDC5CLK; + *sda = SMU7_I2C_DDC5DATA; + break; + case SMU7_I2CLineID_DDC6: + *scl = SMU7_I2C_DDC6CLK; + *sda = SMU7_I2C_DDC6DATA; + break; + case SMU7_I2CLineID_SCLSDA: + *scl = SMU7_I2C_SCL; + *sda = SMU7_I2C_SDA; + break; + case SMU7_I2CLineID_DDCVGA: + *scl = SMU7_I2C_DDCVGACLK; + *sda = SMU7_I2C_DDCVGADATA; + break; + default: + *scl = 0; + *sda = 0; + break; + } +} + +static void fiji_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr) +{ + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + + if (table_info && + table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX && + table_info->cac_dtp_table->usPowerTuneDataSetID) + smu_data->power_tune_defaults = + &fiji_power_tune_data_set_array + [table_info->cac_dtp_table->usPowerTuneDataSetID - 1]; + else + smu_data->power_tune_defaults = &fiji_power_tune_data_set_array[0]; + +} + +static int fiji_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr) +{ + + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults; + + SMU73_Discrete_DpmTable *dpm_table = &(smu_data->smc_state_table); + + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table; + struct pp_advance_fan_control_parameters *fan_table = + &hwmgr->thermal_controller.advanceFanControlParameters; + uint8_t uc_scl, uc_sda; + + /* TDP number of fraction bits are changed from 8 to 7 for Fiji + * as requested by SMC team + */ + dpm_table->DefaultTdp = PP_HOST_TO_SMC_US( + (uint16_t)(cac_dtp_table->usTDP * 128)); + dpm_table->TargetTdp = PP_HOST_TO_SMC_US( + (uint16_t)(cac_dtp_table->usTDP * 128)); + + PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255, + "Target Operating Temp is out of Range!", + ); + + dpm_table->GpuTjMax = (uint8_t)(cac_dtp_table->usTargetOperatingTemp); + dpm_table->GpuTjHyst = 8; + + dpm_table->DTEAmbientTempBase = defaults->DTEAmbientTempBase; + + /* The following are for new Fiji Multi-input fan/thermal control */ + dpm_table->TemperatureLimitEdge = PP_HOST_TO_SMC_US( + cac_dtp_table->usTargetOperatingTemp * 256); + dpm_table->TemperatureLimitHotspot = PP_HOST_TO_SMC_US( + cac_dtp_table->usTemperatureLimitHotspot * 256); + dpm_table->TemperatureLimitLiquid1 = PP_HOST_TO_SMC_US( + cac_dtp_table->usTemperatureLimitLiquid1 * 256); + dpm_table->TemperatureLimitLiquid2 = PP_HOST_TO_SMC_US( + cac_dtp_table->usTemperatureLimitLiquid2 * 256); + dpm_table->TemperatureLimitVrVddc = PP_HOST_TO_SMC_US( + cac_dtp_table->usTemperatureLimitVrVddc * 256); + dpm_table->TemperatureLimitVrMvdd = PP_HOST_TO_SMC_US( + cac_dtp_table->usTemperatureLimitVrMvdd * 256); + dpm_table->TemperatureLimitPlx = PP_HOST_TO_SMC_US( + cac_dtp_table->usTemperatureLimitPlx * 256); + + dpm_table->FanGainEdge = PP_HOST_TO_SMC_US( + scale_fan_gain_settings(fan_table->usFanGainEdge)); + dpm_table->FanGainHotspot = PP_HOST_TO_SMC_US( + scale_fan_gain_settings(fan_table->usFanGainHotspot)); + dpm_table->FanGainLiquid = PP_HOST_TO_SMC_US( + scale_fan_gain_settings(fan_table->usFanGainLiquid)); + dpm_table->FanGainVrVddc = PP_HOST_TO_SMC_US( + scale_fan_gain_settings(fan_table->usFanGainVrVddc)); + dpm_table->FanGainVrMvdd = PP_HOST_TO_SMC_US( + scale_fan_gain_settings(fan_table->usFanGainVrMvdd)); + dpm_table->FanGainPlx = PP_HOST_TO_SMC_US( + scale_fan_gain_settings(fan_table->usFanGainPlx)); + dpm_table->FanGainHbm = PP_HOST_TO_SMC_US( + scale_fan_gain_settings(fan_table->usFanGainHbm)); + + dpm_table->Liquid1_I2C_address = cac_dtp_table->ucLiquid1_I2C_address; + dpm_table->Liquid2_I2C_address = cac_dtp_table->ucLiquid2_I2C_address; + dpm_table->Vr_I2C_address = cac_dtp_table->ucVr_I2C_address; + dpm_table->Plx_I2C_address = cac_dtp_table->ucPlx_I2C_address; + + get_scl_sda_value(cac_dtp_table->ucLiquid_I2C_Line, &uc_scl, &uc_sda); + dpm_table->Liquid_I2C_LineSCL = uc_scl; + dpm_table->Liquid_I2C_LineSDA = uc_sda; + + get_scl_sda_value(cac_dtp_table->ucVr_I2C_Line, &uc_scl, &uc_sda); + dpm_table->Vr_I2C_LineSCL = uc_scl; + dpm_table->Vr_I2C_LineSDA = uc_sda; + + get_scl_sda_value(cac_dtp_table->ucPlx_I2C_Line, &uc_scl, &uc_sda); + dpm_table->Plx_I2C_LineSCL = uc_scl; + dpm_table->Plx_I2C_LineSDA = uc_sda; + + return 0; +} + + +static int fiji_populate_svi_load_line(struct pp_hwmgr *hwmgr) +{ + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults; + + smu_data->power_tune_table.SviLoadLineEn = defaults->SviLoadLineEn; + smu_data->power_tune_table.SviLoadLineVddC = defaults->SviLoadLineVddC; + smu_data->power_tune_table.SviLoadLineTrimVddC = 3; + smu_data->power_tune_table.SviLoadLineOffsetVddC = 0; + + return 0; +} + + +static int fiji_populate_tdc_limit(struct pp_hwmgr *hwmgr) +{ + uint16_t tdc_limit; + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults; + + /* TDC number of fraction bits are changed from 8 to 7 + * for Fiji as requested by SMC team + */ + tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 128); + smu_data->power_tune_table.TDC_VDDC_PkgLimit = + CONVERT_FROM_HOST_TO_SMC_US(tdc_limit); + smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc = + defaults->TDC_VDDC_ThrottleReleaseLimitPerc; + smu_data->power_tune_table.TDC_MAWt = defaults->TDC_MAWt; + + return 0; +} + +static int fiji_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset) +{ + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults; + uint32_t temp; + + if (fiji_read_smc_sram_dword(hwmgr->smumgr, + fuse_table_offset + + offsetof(SMU73_Discrete_PmFuses, TdcWaterfallCtl), + (uint32_t *)&temp, SMC_RAM_END)) + PP_ASSERT_WITH_CODE(false, + "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!", + return -EINVAL); + else { + smu_data->power_tune_table.TdcWaterfallCtl = defaults->TdcWaterfallCtl; + smu_data->power_tune_table.LPMLTemperatureMin = + (uint8_t)((temp >> 16) & 0xff); + smu_data->power_tune_table.LPMLTemperatureMax = + (uint8_t)((temp >> 8) & 0xff); + smu_data->power_tune_table.Reserved = (uint8_t)(temp & 0xff); + } + return 0; +} + +static int fiji_populate_temperature_scaler(struct pp_hwmgr *hwmgr) +{ + int i; + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + + /* Currently not used. Set all to zero. */ + for (i = 0; i < 16; i++) + smu_data->power_tune_table.LPMLTemperatureScaler[i] = 0; + + return 0; +} + +static int fiji_populate_fuzzy_fan(struct pp_hwmgr *hwmgr) +{ + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + + if ((hwmgr->thermal_controller.advanceFanControlParameters. + usFanOutputSensitivity & (1 << 15)) || + 0 == hwmgr->thermal_controller.advanceFanControlParameters. + usFanOutputSensitivity) + hwmgr->thermal_controller.advanceFanControlParameters. + usFanOutputSensitivity = hwmgr->thermal_controller. + advanceFanControlParameters.usDefaultFanOutputSensitivity; + + smu_data->power_tune_table.FuzzyFan_PwmSetDelta = + PP_HOST_TO_SMC_US(hwmgr->thermal_controller. + advanceFanControlParameters.usFanOutputSensitivity); + return 0; +} + +static int fiji_populate_gnb_lpml(struct pp_hwmgr *hwmgr) +{ + int i; + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + + /* Currently not used. Set all to zero. */ + for (i = 0; i < 16; i++) + smu_data->power_tune_table.GnbLPML[i] = 0; + + return 0; +} + +static int fiji_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr) +{ + return 0; +} + +static int fiji_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr) +{ + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + uint16_t HiSidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd; + uint16_t LoSidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd; + struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table; + + HiSidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256); + LoSidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256); + + smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd = + CONVERT_FROM_HOST_TO_SMC_US(HiSidd); + smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd = + CONVERT_FROM_HOST_TO_SMC_US(LoSidd); + + return 0; +} + +static int fiji_populate_pm_fuses(struct pp_hwmgr *hwmgr) +{ + uint32_t pm_fuse_table_offset; + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment)) { + if (fiji_read_smc_sram_dword(hwmgr->smumgr, + SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU73_Firmware_Header, PmFuseTable), + &pm_fuse_table_offset, SMC_RAM_END)) + PP_ASSERT_WITH_CODE(false, + "Attempt to get pm_fuse_table_offset Failed!", + return -EINVAL); + + /* DW6 */ + if (fiji_populate_svi_load_line(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate SviLoadLine Failed!", + return -EINVAL); + /* DW7 */ + if (fiji_populate_tdc_limit(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate TDCLimit Failed!", return -EINVAL); + /* DW8 */ + if (fiji_populate_dw8(hwmgr, pm_fuse_table_offset)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate TdcWaterfallCtl, " + "LPMLTemperature Min and Max Failed!", + return -EINVAL); + + /* DW9-DW12 */ + if (0 != fiji_populate_temperature_scaler(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate LPMLTemperatureScaler Failed!", + return -EINVAL); + + /* DW13-DW14 */ + if (fiji_populate_fuzzy_fan(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate Fuzzy Fan Control parameters Failed!", + return -EINVAL); + + /* DW15-DW18 */ + if (fiji_populate_gnb_lpml(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate GnbLPML Failed!", + return -EINVAL); + + /* DW19 */ + if (fiji_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate GnbLPML Min and Max Vid Failed!", + return -EINVAL); + + /* DW20 */ + if (fiji_populate_bapm_vddc_base_leakage_sidd(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate BapmVddCBaseLeakage Hi and Lo " + "Sidd Failed!", return -EINVAL); + + if (fiji_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset, + (uint8_t *)&smu_data->power_tune_table, + sizeof(struct SMU73_Discrete_PmFuses), SMC_RAM_END)) + PP_ASSERT_WITH_CODE(false, + "Attempt to download PmFuseTable Failed!", + return -EINVAL); + } + return 0; +} + +/** +* Preparation of vddc and vddgfx CAC tables for SMC. +* +* @param hwmgr the address of the hardware manager +* @param table the SMC DPM table structure to be populated +* @return always 0 +*/ +static int fiji_populate_cac_table(struct pp_hwmgr *hwmgr, + struct SMU73_Discrete_DpmTable *table) +{ + uint32_t count; + uint8_t index; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct phm_ppt_v1_voltage_lookup_table *lookup_table = + table_info->vddc_lookup_table; + /* tables is already swapped, so in order to use the value from it, + * we need to swap it back. + * We are populating vddc CAC data to BapmVddc table + * in split and merged mode + */ + + for (count = 0; count < lookup_table->count; count++) { + index = phm_get_voltage_index(lookup_table, + data->vddc_voltage_table.entries[count].value); + table->BapmVddcVidLoSidd[count] = + convert_to_vid(lookup_table->entries[index].us_cac_low); + table->BapmVddcVidHiSidd[count] = + convert_to_vid(lookup_table->entries[index].us_cac_high); + } + + return 0; +} + +/** +* Preparation of voltage tables for SMC. +* +* @param hwmgr the address of the hardware manager +* @param table the SMC DPM table structure to be populated +* @return always 0 +*/ + +static int fiji_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr, + struct SMU73_Discrete_DpmTable *table) +{ + int result; + + result = fiji_populate_cac_table(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "can not populate CAC voltage tables to SMC", + return -EINVAL); + + return 0; +} + +static int fiji_populate_ulv_level(struct pp_hwmgr *hwmgr, + struct SMU73_Discrete_Ulv *state) +{ + int result = 0; + + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + + state->CcPwrDynRm = 0; + state->CcPwrDynRm1 = 0; + + state->VddcOffset = (uint16_t) table_info->us_ulv_voltage_offset; + state->VddcOffsetVid = (uint8_t)(table_info->us_ulv_voltage_offset * + VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1); + + state->VddcPhase = 1; + + if (!result) { + CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm); + CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1); + CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset); + } + return result; +} + +static int fiji_populate_ulv_state(struct pp_hwmgr *hwmgr, + struct SMU73_Discrete_DpmTable *table) +{ + return fiji_populate_ulv_level(hwmgr, &table->Ulv); +} + +static int fiji_populate_smc_link_level(struct pp_hwmgr *hwmgr, + struct SMU73_Discrete_DpmTable *table) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct smu7_dpm_table *dpm_table = &data->dpm_table; + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + int i; + + /* Index (dpm_table->pcie_speed_table.count) + * is reserved for PCIE boot level. */ + for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) { + table->LinkLevel[i].PcieGenSpeed = + (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value; + table->LinkLevel[i].PcieLaneCount = (uint8_t)encode_pcie_lane_width( + dpm_table->pcie_speed_table.dpm_levels[i].param1); + table->LinkLevel[i].EnabledForActivity = 1; + table->LinkLevel[i].SPC = (uint8_t)(data->pcie_spc_cap & 0xff); + table->LinkLevel[i].DownThreshold = PP_HOST_TO_SMC_UL(5); + table->LinkLevel[i].UpThreshold = PP_HOST_TO_SMC_UL(30); + } + + smu_data->smc_state_table.LinkLevelCount = + (uint8_t)dpm_table->pcie_speed_table.count; + data->dpm_level_enable_mask.pcie_dpm_enable_mask = + phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table); + + return 0; +} + + +/** +* Calculates the SCLK dividers using the provided engine clock +* +* @param hwmgr the address of the hardware manager +* @param clock the engine clock to use to populate the structure +* @param sclk the SMC SCLK structure to be populated +*/ +static int fiji_calculate_sclk_params(struct pp_hwmgr *hwmgr, + uint32_t clock, struct SMU73_Discrete_GraphicsLevel *sclk) +{ + const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct pp_atomctrl_clock_dividers_vi dividers; + uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL; + uint32_t spll_func_cntl_3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3; + uint32_t spll_func_cntl_4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4; + uint32_t cg_spll_spread_spectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM; + uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2; + uint32_t ref_clock; + uint32_t ref_divider; + uint32_t fbdiv; + int result; + + /* get the engine clock dividers for this clock value */ + result = atomctrl_get_engine_pll_dividers_vi(hwmgr, clock, ÷rs); + + PP_ASSERT_WITH_CODE(result == 0, + "Error retrieving Engine Clock dividers from VBIOS.", + return result); + + /* To get FBDIV we need to multiply this by 16384 and divide it by Fref. */ + ref_clock = atomctrl_get_reference_clock(hwmgr); + ref_divider = 1 + dividers.uc_pll_ref_div; + + /* low 14 bits is fraction and high 12 bits is divider */ + fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF; + + /* SPLL_FUNC_CNTL setup */ + spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL, + SPLL_REF_DIV, dividers.uc_pll_ref_div); + spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL, + SPLL_PDIV_A, dividers.uc_pll_post_div); + + /* SPLL_FUNC_CNTL_3 setup*/ + spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3, + SPLL_FB_DIV, fbdiv); + + /* set to use fractional accumulation*/ + spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3, + SPLL_DITHEN, 1); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_EngineSpreadSpectrumSupport)) { + struct pp_atomctrl_internal_ss_info ssInfo; + + uint32_t vco_freq = clock * dividers.uc_pll_post_div; + if (!atomctrl_get_engine_clock_spread_spectrum(hwmgr, + vco_freq, &ssInfo)) { + /* + * ss_info.speed_spectrum_percentage -- in unit of 0.01% + * ss_info.speed_spectrum_rate -- in unit of khz + * + * clks = reference_clock * 10 / (REFDIV + 1) / speed_spectrum_rate / 2 + */ + uint32_t clk_s = ref_clock * 5 / + (ref_divider * ssInfo.speed_spectrum_rate); + /* clkv = 2 * D * fbdiv / NS */ + uint32_t clk_v = 4 * ssInfo.speed_spectrum_percentage * + fbdiv / (clk_s * 10000); + + cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum, + CG_SPLL_SPREAD_SPECTRUM, CLKS, clk_s); + cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum, + CG_SPLL_SPREAD_SPECTRUM, SSEN, 1); + cg_spll_spread_spectrum_2 = PHM_SET_FIELD(cg_spll_spread_spectrum_2, + CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clk_v); + } + } + + sclk->SclkFrequency = clock; + sclk->CgSpllFuncCntl3 = spll_func_cntl_3; + sclk->CgSpllFuncCntl4 = spll_func_cntl_4; + sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum; + sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2; + sclk->SclkDid = (uint8_t)dividers.pll_post_divider; + + return 0; +} + +/** +* Populates single SMC SCLK structure using the provided engine clock +* +* @param hwmgr the address of the hardware manager +* @param clock the engine clock to use to populate the structure +* @param sclk the SMC SCLK structure to be populated +*/ + +static int fiji_populate_single_graphic_level(struct pp_hwmgr *hwmgr, + uint32_t clock, uint16_t sclk_al_threshold, + struct SMU73_Discrete_GraphicsLevel *level) +{ + int result; + /* PP_Clocks minClocks; */ + uint32_t threshold, mvdd; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + + result = fiji_calculate_sclk_params(hwmgr, clock, level); + + /* populate graphics levels */ + result = fiji_get_dependency_volt_by_clk(hwmgr, + table_info->vdd_dep_on_sclk, clock, + (uint32_t *)(&level->MinVoltage), &mvdd); + PP_ASSERT_WITH_CODE((0 == result), + "can not find VDDC voltage value for " + "VDDC engine clock dependency table", + return result); + + level->SclkFrequency = clock; + level->ActivityLevel = sclk_al_threshold; + level->CcPwrDynRm = 0; + level->CcPwrDynRm1 = 0; + level->EnabledForActivity = 0; + level->EnabledForThrottle = 1; + level->UpHyst = 10; + level->DownHyst = 0; + level->VoltageDownHyst = 0; + level->PowerThrottle = 0; + + threshold = clock * data->fast_watermark_threshold / 100; + + data->display_timing.min_clock_in_sr = hwmgr->display_config.min_core_set_clock_in_sr; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) + level->DeepSleepDivId = smu7_get_sleep_divider_id_from_clock(clock, + hwmgr->display_config.min_core_set_clock_in_sr); + + + /* Default to slow, highest DPM level will be + * set to PPSMC_DISPLAY_WATERMARK_LOW later. + */ + level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; + + CONVERT_FROM_HOST_TO_SMC_UL(level->MinVoltage); + CONVERT_FROM_HOST_TO_SMC_UL(level->SclkFrequency); + CONVERT_FROM_HOST_TO_SMC_US(level->ActivityLevel); + CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl3); + CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl4); + CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum); + CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum2); + CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm); + CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm1); + + return 0; +} +/** +* Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states +* +* @param hwmgr the address of the hardware manager +*/ +int fiji_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + + struct smu7_dpm_table *dpm_table = &data->dpm_table; + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table; + uint8_t pcie_entry_cnt = (uint8_t) data->dpm_table.pcie_speed_table.count; + int result = 0; + uint32_t array = smu_data->dpm_table_start + + offsetof(SMU73_Discrete_DpmTable, GraphicsLevel); + uint32_t array_size = sizeof(struct SMU73_Discrete_GraphicsLevel) * + SMU73_MAX_LEVELS_GRAPHICS; + struct SMU73_Discrete_GraphicsLevel *levels = + smu_data->smc_state_table.GraphicsLevel; + uint32_t i, max_entry; + uint8_t hightest_pcie_level_enabled = 0, + lowest_pcie_level_enabled = 0, + mid_pcie_level_enabled = 0, + count = 0; + + for (i = 0; i < dpm_table->sclk_table.count; i++) { + result = fiji_populate_single_graphic_level(hwmgr, + dpm_table->sclk_table.dpm_levels[i].value, + (uint16_t)smu_data->activity_target[i], + &levels[i]); + if (result) + return result; + + /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */ + if (i > 1) + levels[i].DeepSleepDivId = 0; + } + + /* Only enable level 0 for now.*/ + levels[0].EnabledForActivity = 1; + + /* set highest level watermark to high */ + levels[dpm_table->sclk_table.count - 1].DisplayWatermark = + PPSMC_DISPLAY_WATERMARK_HIGH; + + smu_data->smc_state_table.GraphicsDpmLevelCount = + (uint8_t)dpm_table->sclk_table.count; + data->dpm_level_enable_mask.sclk_dpm_enable_mask = + phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table); + + if (pcie_table != NULL) { + PP_ASSERT_WITH_CODE((1 <= pcie_entry_cnt), + "There must be 1 or more PCIE levels defined in PPTable.", + return -EINVAL); + max_entry = pcie_entry_cnt - 1; + for (i = 0; i < dpm_table->sclk_table.count; i++) + levels[i].pcieDpmLevel = + (uint8_t) ((i < max_entry) ? i : max_entry); + } else { + while (data->dpm_level_enable_mask.pcie_dpm_enable_mask && + ((data->dpm_level_enable_mask.pcie_dpm_enable_mask & + (1 << (hightest_pcie_level_enabled + 1))) != 0)) + hightest_pcie_level_enabled++; + + while (data->dpm_level_enable_mask.pcie_dpm_enable_mask && + ((data->dpm_level_enable_mask.pcie_dpm_enable_mask & + (1 << lowest_pcie_level_enabled)) == 0)) + lowest_pcie_level_enabled++; + + while ((count < hightest_pcie_level_enabled) && + ((data->dpm_level_enable_mask.pcie_dpm_enable_mask & + (1 << (lowest_pcie_level_enabled + 1 + count))) == 0)) + count++; + + mid_pcie_level_enabled = (lowest_pcie_level_enabled + 1 + count) < + hightest_pcie_level_enabled ? + (lowest_pcie_level_enabled + 1 + count) : + hightest_pcie_level_enabled; + + /* set pcieDpmLevel to hightest_pcie_level_enabled */ + for (i = 2; i < dpm_table->sclk_table.count; i++) + levels[i].pcieDpmLevel = hightest_pcie_level_enabled; + + /* set pcieDpmLevel to lowest_pcie_level_enabled */ + levels[0].pcieDpmLevel = lowest_pcie_level_enabled; + + /* set pcieDpmLevel to mid_pcie_level_enabled */ + levels[1].pcieDpmLevel = mid_pcie_level_enabled; + } + /* level count will send to smc once at init smc table and never change */ + result = fiji_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels, + (uint32_t)array_size, SMC_RAM_END); + + return result; +} + + +/** + * MCLK Frequency Ratio + * SEQ_CG_RESP Bit[31:24] - 0x0 + * Bit[27:24] \96 DDR3 Frequency ratio + * 0x0 <= 100MHz, 450 < 0x8 <= 500MHz + * 100 < 0x1 <= 150MHz, 500 < 0x9 <= 550MHz + * 150 < 0x2 <= 200MHz, 550 < 0xA <= 600MHz + * 200 < 0x3 <= 250MHz, 600 < 0xB <= 650MHz + * 250 < 0x4 <= 300MHz, 650 < 0xC <= 700MHz + * 300 < 0x5 <= 350MHz, 700 < 0xD <= 750MHz + * 350 < 0x6 <= 400MHz, 750 < 0xE <= 800MHz + * 400 < 0x7 <= 450MHz, 800 < 0xF + */ +static uint8_t fiji_get_mclk_frequency_ratio(uint32_t mem_clock) +{ + if (mem_clock <= 10000) + return 0x0; + if (mem_clock <= 15000) + return 0x1; + if (mem_clock <= 20000) + return 0x2; + if (mem_clock <= 25000) + return 0x3; + if (mem_clock <= 30000) + return 0x4; + if (mem_clock <= 35000) + return 0x5; + if (mem_clock <= 40000) + return 0x6; + if (mem_clock <= 45000) + return 0x7; + if (mem_clock <= 50000) + return 0x8; + if (mem_clock <= 55000) + return 0x9; + if (mem_clock <= 60000) + return 0xa; + if (mem_clock <= 65000) + return 0xb; + if (mem_clock <= 70000) + return 0xc; + if (mem_clock <= 75000) + return 0xd; + if (mem_clock <= 80000) + return 0xe; + /* mem_clock > 800MHz */ + return 0xf; +} + +/** +* Populates the SMC MCLK structure using the provided memory clock +* +* @param hwmgr the address of the hardware manager +* @param clock the memory clock to use to populate the structure +* @param sclk the SMC SCLK structure to be populated +*/ +static int fiji_calculate_mclk_params(struct pp_hwmgr *hwmgr, + uint32_t clock, struct SMU73_Discrete_MemoryLevel *mclk) +{ + struct pp_atomctrl_memory_clock_param mem_param; + int result; + + result = atomctrl_get_memory_pll_dividers_vi(hwmgr, clock, &mem_param); + PP_ASSERT_WITH_CODE((0 == result), + "Failed to get Memory PLL Dividers.", + ); + + /* Save the result data to outpupt memory level structure */ + mclk->MclkFrequency = clock; + mclk->MclkDivider = (uint8_t)mem_param.mpll_post_divider; + mclk->FreqRange = fiji_get_mclk_frequency_ratio(clock); + + return result; +} + +static int fiji_populate_single_memory_level(struct pp_hwmgr *hwmgr, + uint32_t clock, struct SMU73_Discrete_MemoryLevel *mem_level) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + int result = 0; + uint32_t mclk_stutter_mode_threshold = 60000; + + if (table_info->vdd_dep_on_mclk) { + result = fiji_get_dependency_volt_by_clk(hwmgr, + table_info->vdd_dep_on_mclk, clock, + (uint32_t *)(&mem_level->MinVoltage), &mem_level->MinMvdd); + PP_ASSERT_WITH_CODE((0 == result), + "can not find MinVddc voltage value from memory " + "VDDC voltage dependency table", return result); + } + + mem_level->EnabledForThrottle = 1; + mem_level->EnabledForActivity = 0; + mem_level->UpHyst = 0; + mem_level->DownHyst = 100; + mem_level->VoltageDownHyst = 0; + mem_level->ActivityLevel = (uint16_t)data->mclk_activity_target; + mem_level->StutterEnable = false; + + mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; + + /* enable stutter mode if all the follow condition applied + * PECI_GetNumberOfActiveDisplays(hwmgr->pPECI, + * &(data->DisplayTiming.numExistingDisplays)); + */ + data->display_timing.num_existing_displays = 1; + + if (mclk_stutter_mode_threshold && + (clock <= mclk_stutter_mode_threshold) && + (!data->is_uvd_enabled) && + (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL, + STUTTER_ENABLE) & 0x1)) + mem_level->StutterEnable = true; + + result = fiji_calculate_mclk_params(hwmgr, clock, mem_level); + if (!result) { + CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinMvdd); + CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MclkFrequency); + CONVERT_FROM_HOST_TO_SMC_US(mem_level->ActivityLevel); + CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinVoltage); + } + return result; +} + +/** +* Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states +* +* @param hwmgr the address of the hardware manager +*/ +int fiji_populate_all_memory_levels(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + struct smu7_dpm_table *dpm_table = &data->dpm_table; + int result; + /* populate MCLK dpm table to SMU7 */ + uint32_t array = smu_data->dpm_table_start + + offsetof(SMU73_Discrete_DpmTable, MemoryLevel); + uint32_t array_size = sizeof(SMU73_Discrete_MemoryLevel) * + SMU73_MAX_LEVELS_MEMORY; + struct SMU73_Discrete_MemoryLevel *levels = + smu_data->smc_state_table.MemoryLevel; + uint32_t i; + + for (i = 0; i < dpm_table->mclk_table.count; i++) { + PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value), + "can not populate memory level as memory clock is zero", + return -EINVAL); + result = fiji_populate_single_memory_level(hwmgr, + dpm_table->mclk_table.dpm_levels[i].value, + &levels[i]); + if (result) + return result; + } + + /* Only enable level 0 for now. */ + levels[0].EnabledForActivity = 1; + + /* in order to prevent MC activity from stutter mode to push DPM up. + * the UVD change complements this by putting the MCLK in + * a higher state by default such that we are not effected by + * up threshold or and MCLK DPM latency. + */ + levels[0].ActivityLevel = (uint16_t)data->mclk_dpm0_activity_target; + CONVERT_FROM_HOST_TO_SMC_US(levels[0].ActivityLevel); + + smu_data->smc_state_table.MemoryDpmLevelCount = + (uint8_t)dpm_table->mclk_table.count; + data->dpm_level_enable_mask.mclk_dpm_enable_mask = + phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table); + /* set highest level watermark to high */ + levels[dpm_table->mclk_table.count - 1].DisplayWatermark = + PPSMC_DISPLAY_WATERMARK_HIGH; + + /* level count will send to smc once at init smc table and never change */ + result = fiji_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels, + (uint32_t)array_size, SMC_RAM_END); + + return result; +} + + +/** +* Populates the SMC MVDD structure using the provided memory clock. +* +* @param hwmgr the address of the hardware manager +* @param mclk the MCLK value to be used in the decision if MVDD should be high or low. +* @param voltage the SMC VOLTAGE structure to be populated +*/ +static int fiji_populate_mvdd_value(struct pp_hwmgr *hwmgr, + uint32_t mclk, SMIO_Pattern *smio_pat) +{ + const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + uint32_t i = 0; + + if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) { + /* find mvdd value which clock is more than request */ + for (i = 0; i < table_info->vdd_dep_on_mclk->count; i++) { + if (mclk <= table_info->vdd_dep_on_mclk->entries[i].clk) { + smio_pat->Voltage = data->mvdd_voltage_table.entries[i].value; + break; + } + } + PP_ASSERT_WITH_CODE(i < table_info->vdd_dep_on_mclk->count, + "MVDD Voltage is outside the supported range.", + return -EINVAL); + } else + return -EINVAL; + + return 0; +} + +static int fiji_populate_smc_acpi_level(struct pp_hwmgr *hwmgr, + SMU73_Discrete_DpmTable *table) +{ + int result = 0; + const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct pp_atomctrl_clock_dividers_vi dividers; + SMIO_Pattern vol_level; + uint32_t mvdd; + uint16_t us_mvdd; + uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL; + uint32_t spll_func_cntl_2 = data->clock_registers.vCG_SPLL_FUNC_CNTL_2; + + table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC; + + if (!data->sclk_dpm_key_disabled) { + /* Get MinVoltage and Frequency from DPM0, + * already converted to SMC_UL */ + table->ACPILevel.SclkFrequency = + data->dpm_table.sclk_table.dpm_levels[0].value; + result = fiji_get_dependency_volt_by_clk(hwmgr, + table_info->vdd_dep_on_sclk, + table->ACPILevel.SclkFrequency, + (uint32_t *)(&table->ACPILevel.MinVoltage), &mvdd); + PP_ASSERT_WITH_CODE((0 == result), + "Cannot find ACPI VDDC voltage value " \ + "in Clock Dependency Table", + ); + } else { + table->ACPILevel.SclkFrequency = + data->vbios_boot_state.sclk_bootup_value; + table->ACPILevel.MinVoltage = + data->vbios_boot_state.vddc_bootup_value * VOLTAGE_SCALE; + } + + /* get the engine clock dividers for this clock value */ + result = atomctrl_get_engine_pll_dividers_vi(hwmgr, + table->ACPILevel.SclkFrequency, ÷rs); + PP_ASSERT_WITH_CODE(result == 0, + "Error retrieving Engine Clock dividers from VBIOS.", + return result); + + table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider; + table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; + table->ACPILevel.DeepSleepDivId = 0; + + spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL, + SPLL_PWRON, 0); + spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL, + SPLL_RESET, 1); + spll_func_cntl_2 = PHM_SET_FIELD(spll_func_cntl_2, CG_SPLL_FUNC_CNTL_2, + SCLK_MUX_SEL, 4); + + table->ACPILevel.CgSpllFuncCntl = spll_func_cntl; + table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2; + table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3; + table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4; + table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM; + table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2; + table->ACPILevel.CcPwrDynRm = 0; + table->ACPILevel.CcPwrDynRm1 = 0; + + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.MinVoltage); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1); + + if (!data->mclk_dpm_key_disabled) { + /* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */ + table->MemoryACPILevel.MclkFrequency = + data->dpm_table.mclk_table.dpm_levels[0].value; + result = fiji_get_dependency_volt_by_clk(hwmgr, + table_info->vdd_dep_on_mclk, + table->MemoryACPILevel.MclkFrequency, + (uint32_t *)(&table->MemoryACPILevel.MinVoltage), &mvdd); + PP_ASSERT_WITH_CODE((0 == result), + "Cannot find ACPI VDDCI voltage value in Clock Dependency Table", + ); + } else { + table->MemoryACPILevel.MclkFrequency = + data->vbios_boot_state.mclk_bootup_value; + table->MemoryACPILevel.MinVoltage = + data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE; + } + + us_mvdd = 0; + if ((SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control) || + (data->mclk_dpm_key_disabled)) + us_mvdd = data->vbios_boot_state.mvdd_bootup_value; + else { + if (!fiji_populate_mvdd_value(hwmgr, + data->dpm_table.mclk_table.dpm_levels[0].value, + &vol_level)) + us_mvdd = vol_level.Voltage; + } + + table->MemoryACPILevel.MinMvdd = + PP_HOST_TO_SMC_UL(us_mvdd * VOLTAGE_SCALE); + + table->MemoryACPILevel.EnabledForThrottle = 0; + table->MemoryACPILevel.EnabledForActivity = 0; + table->MemoryACPILevel.UpHyst = 0; + table->MemoryACPILevel.DownHyst = 100; + table->MemoryACPILevel.VoltageDownHyst = 0; + table->MemoryACPILevel.ActivityLevel = + PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target); + + table->MemoryACPILevel.StutterEnable = false; + CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MclkFrequency); + CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage); + + return result; +} + +static int fiji_populate_smc_vce_level(struct pp_hwmgr *hwmgr, + SMU73_Discrete_DpmTable *table) +{ + int result = -EINVAL; + uint8_t count; + struct pp_atomctrl_clock_dividers_vi dividers; + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = + table_info->mm_dep_table; + + table->VceLevelCount = (uint8_t)(mm_table->count); + table->VceBootLevel = 0; + + for (count = 0; count < table->VceLevelCount; count++) { + table->VceLevel[count].Frequency = mm_table->entries[count].eclk; + table->VceLevel[count].MinVoltage = 0; + table->VceLevel[count].MinVoltage |= + (mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT; + table->VceLevel[count].MinVoltage |= + ((mm_table->entries[count].vddc - VDDC_VDDCI_DELTA) * + VOLTAGE_SCALE) << VDDCI_SHIFT; + table->VceLevel[count].MinVoltage |= 1 << PHASES_SHIFT; + + /*retrieve divider value for VBIOS */ + result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, + table->VceLevel[count].Frequency, ÷rs); + PP_ASSERT_WITH_CODE((0 == result), + "can not find divide id for VCE engine clock", + return result); + + table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider; + + CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency); + CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].MinVoltage); + } + return result; +} + +static int fiji_populate_smc_acp_level(struct pp_hwmgr *hwmgr, + SMU73_Discrete_DpmTable *table) +{ + int result = -EINVAL; + uint8_t count; + struct pp_atomctrl_clock_dividers_vi dividers; + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = + table_info->mm_dep_table; + + table->AcpLevelCount = (uint8_t)(mm_table->count); + table->AcpBootLevel = 0; + + for (count = 0; count < table->AcpLevelCount; count++) { + table->AcpLevel[count].Frequency = mm_table->entries[count].aclk; + table->AcpLevel[count].MinVoltage |= (mm_table->entries[count].vddc * + VOLTAGE_SCALE) << VDDC_SHIFT; + table->AcpLevel[count].MinVoltage |= ((mm_table->entries[count].vddc - + VDDC_VDDCI_DELTA) * VOLTAGE_SCALE) << VDDCI_SHIFT; + table->AcpLevel[count].MinVoltage |= 1 << PHASES_SHIFT; + + /* retrieve divider value for VBIOS */ + result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, + table->AcpLevel[count].Frequency, ÷rs); + PP_ASSERT_WITH_CODE((0 == result), + "can not find divide id for engine clock", return result); + + table->AcpLevel[count].Divider = (uint8_t)dividers.pll_post_divider; + + CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].Frequency); + CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].MinVoltage); + } + return result; +} + +static int fiji_populate_smc_samu_level(struct pp_hwmgr *hwmgr, + SMU73_Discrete_DpmTable *table) +{ + int result = -EINVAL; + uint8_t count; + struct pp_atomctrl_clock_dividers_vi dividers; + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = + table_info->mm_dep_table; + + table->SamuBootLevel = 0; + table->SamuLevelCount = (uint8_t)(mm_table->count); + + for (count = 0; count < table->SamuLevelCount; count++) { + /* not sure whether we need evclk or not */ + table->SamuLevel[count].MinVoltage = 0; + table->SamuLevel[count].Frequency = mm_table->entries[count].samclock; + table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc * + VOLTAGE_SCALE) << VDDC_SHIFT; + table->SamuLevel[count].MinVoltage |= ((mm_table->entries[count].vddc - + VDDC_VDDCI_DELTA) * VOLTAGE_SCALE) << VDDCI_SHIFT; + table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT; + + /* retrieve divider value for VBIOS */ + result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, + table->SamuLevel[count].Frequency, ÷rs); + PP_ASSERT_WITH_CODE((0 == result), + "can not find divide id for samu clock", return result); + + table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider; + + CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency); + CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].MinVoltage); + } + return result; +} + +static int fiji_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr, + int32_t eng_clock, int32_t mem_clock, + struct SMU73_Discrete_MCArbDramTimingTableEntry *arb_regs) +{ + uint32_t dram_timing; + uint32_t dram_timing2; + uint32_t burstTime; + ULONG state, trrds, trrdl; + int result; + + result = atomctrl_set_engine_dram_timings_rv770(hwmgr, + eng_clock, mem_clock); + PP_ASSERT_WITH_CODE(result == 0, + "Error calling VBIOS to set DRAM_TIMING.", return result); + + dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING); + dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2); + burstTime = cgs_read_register(hwmgr->device, mmMC_ARB_BURST_TIME); + + state = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, STATE0); + trrds = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, TRRDS0); + trrdl = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, TRRDL0); + + arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dram_timing); + arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dram_timing2); + arb_regs->McArbBurstTime = (uint8_t)burstTime; + arb_regs->TRRDS = (uint8_t)trrds; + arb_regs->TRRDL = (uint8_t)trrdl; + + return 0; +} + +static int fiji_program_memory_timing_parameters(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + struct SMU73_Discrete_MCArbDramTimingTable arb_regs; + uint32_t i, j; + int result = 0; + + for (i = 0; i < data->dpm_table.sclk_table.count; i++) { + for (j = 0; j < data->dpm_table.mclk_table.count; j++) { + result = fiji_populate_memory_timing_parameters(hwmgr, + data->dpm_table.sclk_table.dpm_levels[i].value, + data->dpm_table.mclk_table.dpm_levels[j].value, + &arb_regs.entries[i][j]); + if (result) + break; + } + } + + if (!result) + result = fiji_copy_bytes_to_smc( + hwmgr->smumgr, + smu_data->arb_table_start, + (uint8_t *)&arb_regs, + sizeof(SMU73_Discrete_MCArbDramTimingTable), + SMC_RAM_END); + return result; +} + +static int fiji_populate_smc_uvd_level(struct pp_hwmgr *hwmgr, + struct SMU73_Discrete_DpmTable *table) +{ + int result = -EINVAL; + uint8_t count; + struct pp_atomctrl_clock_dividers_vi dividers; + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = + table_info->mm_dep_table; + + table->UvdLevelCount = (uint8_t)(mm_table->count); + table->UvdBootLevel = 0; + + for (count = 0; count < table->UvdLevelCount; count++) { + table->UvdLevel[count].MinVoltage = 0; + table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk; + table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk; + table->UvdLevel[count].MinVoltage |= (mm_table->entries[count].vddc * + VOLTAGE_SCALE) << VDDC_SHIFT; + table->UvdLevel[count].MinVoltage |= ((mm_table->entries[count].vddc - + VDDC_VDDCI_DELTA) * VOLTAGE_SCALE) << VDDCI_SHIFT; + table->UvdLevel[count].MinVoltage |= 1 << PHASES_SHIFT; + + /* retrieve divider value for VBIOS */ + result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, + table->UvdLevel[count].VclkFrequency, ÷rs); + PP_ASSERT_WITH_CODE((0 == result), + "can not find divide id for Vclk clock", return result); + + table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider; + + result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, + table->UvdLevel[count].DclkFrequency, ÷rs); + PP_ASSERT_WITH_CODE((0 == result), + "can not find divide id for Dclk clock", return result); + + table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider; + + CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency); + CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency); + CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].MinVoltage); + + } + return result; +} + +static int fiji_populate_smc_boot_level(struct pp_hwmgr *hwmgr, + struct SMU73_Discrete_DpmTable *table) +{ + int result = 0; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + table->GraphicsBootLevel = 0; + table->MemoryBootLevel = 0; + + /* find boot level from dpm table */ + result = phm_find_boot_level(&(data->dpm_table.sclk_table), + data->vbios_boot_state.sclk_bootup_value, + (uint32_t *)&(table->GraphicsBootLevel)); + + result = phm_find_boot_level(&(data->dpm_table.mclk_table), + data->vbios_boot_state.mclk_bootup_value, + (uint32_t *)&(table->MemoryBootLevel)); + + table->BootVddc = data->vbios_boot_state.vddc_bootup_value * + VOLTAGE_SCALE; + table->BootVddci = data->vbios_boot_state.vddci_bootup_value * + VOLTAGE_SCALE; + table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value * + VOLTAGE_SCALE; + + CONVERT_FROM_HOST_TO_SMC_US(table->BootVddc); + CONVERT_FROM_HOST_TO_SMC_US(table->BootVddci); + CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd); + + return 0; +} + +static int fiji_populate_smc_initailial_state(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + uint8_t count, level; + + count = (uint8_t)(table_info->vdd_dep_on_sclk->count); + for (level = 0; level < count; level++) { + if (table_info->vdd_dep_on_sclk->entries[level].clk >= + data->vbios_boot_state.sclk_bootup_value) { + smu_data->smc_state_table.GraphicsBootLevel = level; + break; + } + } + + count = (uint8_t)(table_info->vdd_dep_on_mclk->count); + for (level = 0; level < count; level++) { + if (table_info->vdd_dep_on_mclk->entries[level].clk >= + data->vbios_boot_state.mclk_bootup_value) { + smu_data->smc_state_table.MemoryBootLevel = level; + break; + } + } + + return 0; +} + +static int fiji_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr) +{ + uint32_t ro, efuse, efuse2, clock_freq, volt_without_cks, + volt_with_cks, value; + uint16_t clock_freq_u16; + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + uint8_t type, i, j, cks_setting, stretch_amount, stretch_amount2, + volt_offset = 0; + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = + table_info->vdd_dep_on_sclk; + + stretch_amount = (uint8_t)table_info->cac_dtp_table->usClockStretchAmount; + + /* Read SMU_Eefuse to read and calculate RO and determine + * if the part is SS or FF. if RO >= 1660MHz, part is FF. + */ + efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixSMU_EFUSE_0 + (146 * 4)); + efuse2 = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixSMU_EFUSE_0 + (148 * 4)); + efuse &= 0xFF000000; + efuse = efuse >> 24; + efuse2 &= 0xF; + + if (efuse2 == 1) + ro = (2300 - 1350) * efuse / 255 + 1350; + else + ro = (2500 - 1000) * efuse / 255 + 1000; + + if (ro >= 1660) + type = 0; + else + type = 1; + + /* Populate Stretch amount */ + smu_data->smc_state_table.ClockStretcherAmount = stretch_amount; + + /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */ + for (i = 0; i < sclk_table->count; i++) { + smu_data->smc_state_table.Sclk_CKS_masterEn0_7 |= + sclk_table->entries[i].cks_enable << i; + volt_without_cks = (uint32_t)((14041 * + (sclk_table->entries[i].clk/100) / 10000 + 3571 + 75 - ro) * 1000 / + (4026 - (13924 * (sclk_table->entries[i].clk/100) / 10000))); + volt_with_cks = (uint32_t)((13946 * + (sclk_table->entries[i].clk/100) / 10000 + 3320 + 45 - ro) * 1000 / + (3664 - (11454 * (sclk_table->entries[i].clk/100) / 10000))); + if (volt_without_cks >= volt_with_cks) + volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks + + sclk_table->entries[i].cks_voffset) * 100 / 625) + 1); + smu_data->smc_state_table.Sclk_voltageOffset[i] = volt_offset; + } + + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE, + STRETCH_ENABLE, 0x0); + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE, + masterReset, 0x1); + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE, + staticEnable, 0x1); + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE, + masterReset, 0x0); + + /* Populate CKS Lookup Table */ + if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5) + stretch_amount2 = 0; + else if (stretch_amount == 3 || stretch_amount == 4) + stretch_amount2 = 1; + else { + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ClockStretcher); + PP_ASSERT_WITH_CODE(false, + "Stretch Amount in PPTable not supported\n", + return -EINVAL); + } + + value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixPWR_CKS_CNTL); + value &= 0xFFC2FF87; + smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].minFreq = + fiji_clock_stretcher_lookup_table[stretch_amount2][0]; + smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].maxFreq = + fiji_clock_stretcher_lookup_table[stretch_amount2][1]; + clock_freq_u16 = (uint16_t)(PP_SMC_TO_HOST_UL(smu_data->smc_state_table. + GraphicsLevel[smu_data->smc_state_table.GraphicsDpmLevelCount - 1]. + SclkFrequency) / 100); + if (fiji_clock_stretcher_lookup_table[stretch_amount2][0] < + clock_freq_u16 && + fiji_clock_stretcher_lookup_table[stretch_amount2][1] > + clock_freq_u16) { + /* Program PWR_CKS_CNTL. CKS_USE_FOR_LOW_FREQ */ + value |= (fiji_clock_stretcher_lookup_table[stretch_amount2][3]) << 16; + /* Program PWR_CKS_CNTL. CKS_LDO_REFSEL */ + value |= (fiji_clock_stretcher_lookup_table[stretch_amount2][2]) << 18; + /* Program PWR_CKS_CNTL. CKS_STRETCH_AMOUNT */ + value |= (fiji_clock_stretch_amount_conversion + [fiji_clock_stretcher_lookup_table[stretch_amount2][3]] + [stretch_amount]) << 3; + } + CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.CKS_LOOKUPTable. + CKS_LOOKUPTableEntry[0].minFreq); + CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.CKS_LOOKUPTable. + CKS_LOOKUPTableEntry[0].maxFreq); + smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting = + fiji_clock_stretcher_lookup_table[stretch_amount2][2] & 0x7F; + smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting |= + (fiji_clock_stretcher_lookup_table[stretch_amount2][3]) << 7; + + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixPWR_CKS_CNTL, value); + + /* Populate DDT Lookup Table */ + for (i = 0; i < 4; i++) { + /* Assign the minimum and maximum VID stored + * in the last row of Clock Stretcher Voltage Table. + */ + smu_data->smc_state_table.ClockStretcherDataTable. + ClockStretcherDataTableEntry[i].minVID = + (uint8_t) fiji_clock_stretcher_ddt_table[type][i][2]; + smu_data->smc_state_table.ClockStretcherDataTable. + ClockStretcherDataTableEntry[i].maxVID = + (uint8_t) fiji_clock_stretcher_ddt_table[type][i][3]; + /* Loop through each SCLK and check the frequency + * to see if it lies within the frequency for clock stretcher. + */ + for (j = 0; j < smu_data->smc_state_table.GraphicsDpmLevelCount; j++) { + cks_setting = 0; + clock_freq = PP_SMC_TO_HOST_UL( + smu_data->smc_state_table.GraphicsLevel[j].SclkFrequency); + /* Check the allowed frequency against the sclk level[j]. + * Sclk's endianness has already been converted, + * and it's in 10Khz unit, + * as opposed to Data table, which is in Mhz unit. + */ + if (clock_freq >= + (fiji_clock_stretcher_ddt_table[type][i][0]) * 100) { + cks_setting |= 0x2; + if (clock_freq < + (fiji_clock_stretcher_ddt_table[type][i][1]) * 100) + cks_setting |= 0x1; + } + smu_data->smc_state_table.ClockStretcherDataTable. + ClockStretcherDataTableEntry[i].setting |= cks_setting << (j * 2); + } + CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table. + ClockStretcherDataTable. + ClockStretcherDataTableEntry[i].setting); + } + + value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL); + value &= 0xFFFFFFFE; + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL, value); + + return 0; +} + +/** +* Populates the SMC VRConfig field in DPM table. +* +* @param hwmgr the address of the hardware manager +* @param table the SMC DPM table structure to be populated +* @return always 0 +*/ +static int fiji_populate_vr_config(struct pp_hwmgr *hwmgr, + struct SMU73_Discrete_DpmTable *table) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + uint16_t config; + + config = VR_MERGED_WITH_VDDC; + table->VRConfig |= (config << VRCONF_VDDGFX_SHIFT); + + /* Set Vddc Voltage Controller */ + if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) { + config = VR_SVI2_PLANE_1; + table->VRConfig |= config; + } else { + PP_ASSERT_WITH_CODE(false, + "VDDC should be on SVI2 control in merged mode!", + ); + } + /* Set Vddci Voltage Controller */ + if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) { + config = VR_SVI2_PLANE_2; /* only in merged mode */ + table->VRConfig |= (config << VRCONF_VDDCI_SHIFT); + } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) { + config = VR_SMIO_PATTERN_1; + table->VRConfig |= (config << VRCONF_VDDCI_SHIFT); + } else { + config = VR_STATIC_VOLTAGE; + table->VRConfig |= (config << VRCONF_VDDCI_SHIFT); + } + /* Set Mvdd Voltage Controller */ + if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) { + config = VR_SVI2_PLANE_2; + table->VRConfig |= (config << VRCONF_MVDD_SHIFT); + } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) { + config = VR_SMIO_PATTERN_2; + table->VRConfig |= (config << VRCONF_MVDD_SHIFT); + } else { + config = VR_STATIC_VOLTAGE; + table->VRConfig |= (config << VRCONF_MVDD_SHIFT); + } + + return 0; +} + +static int fiji_init_arb_table_index(struct pp_smumgr *smumgr) +{ + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(smumgr->backend); + uint32_t tmp; + int result; + + /* This is a read-modify-write on the first byte of the ARB table. + * The first byte in the SMU73_Discrete_MCArbDramTimingTable structure + * is the field 'current'. + * This solution is ugly, but we never write the whole table only + * individual fields in it. + * In reality this field should not be in that structure + * but in a soft register. + */ + result = fiji_read_smc_sram_dword(smumgr, + smu_data->arb_table_start, &tmp, SMC_RAM_END); + + if (result) + return result; + + tmp &= 0x00FFFFFF; + tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24; + + return fiji_write_smc_sram_dword(smumgr, + smu_data->arb_table_start, tmp, SMC_RAM_END); +} + +/** +* Initializes the SMC table and uploads it +* +* @param hwmgr the address of the powerplay hardware manager. +* @param pInput the pointer to input data (PowerState) +* @return always 0 +*/ +int fiji_init_smc_table(struct pp_hwmgr *hwmgr) +{ + int result; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct SMU73_Discrete_DpmTable *table = &(smu_data->smc_state_table); + uint8_t i; + struct pp_atomctrl_gpio_pin_assignment gpio_pin; + + fiji_initialize_power_tune_defaults(hwmgr); + + if (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control) + fiji_populate_smc_voltage_tables(hwmgr, table); + + table->SystemFlags = 0; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_AutomaticDCTransition)) + table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_StepVddc)) + table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC; + + if (data->is_memory_gddr5) + table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5; + + if (data->ulv_supported && table_info->us_ulv_voltage_offset) { + result = fiji_populate_ulv_state(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize ULV state!", return result); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_ULV_PARAMETER, 0x40035); + } + + result = fiji_populate_smc_link_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize Link Level!", return result); + + result = fiji_populate_all_graphic_levels(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize Graphics Level!", return result); + + result = fiji_populate_all_memory_levels(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize Memory Level!", return result); + + result = fiji_populate_smc_acpi_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize ACPI Level!", return result); + + result = fiji_populate_smc_vce_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize VCE Level!", return result); + + result = fiji_populate_smc_acp_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize ACP Level!", return result); + + result = fiji_populate_smc_samu_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize SAMU Level!", return result); + + /* Since only the initial state is completely set up at this point + * (the other states are just copies of the boot state) we only + * need to populate the ARB settings for the initial state. + */ + result = fiji_program_memory_timing_parameters(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to Write ARB settings for the initial state.", return result); + + result = fiji_populate_smc_uvd_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize UVD Level!", return result); + + result = fiji_populate_smc_boot_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize Boot Level!", return result); + + result = fiji_populate_smc_initailial_state(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize Boot State!", return result); + + result = fiji_populate_bapm_parameters_in_dpm_table(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to populate BAPM Parameters!", return result); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ClockStretcher)) { + result = fiji_populate_clock_stretcher_data_table(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to populate Clock Stretcher Data Table!", + return result); + } + + table->GraphicsVoltageChangeEnable = 1; + table->GraphicsThermThrottleEnable = 1; + table->GraphicsInterval = 1; + table->VoltageInterval = 1; + table->ThermalInterval = 1; + table->TemperatureLimitHigh = + table_info->cac_dtp_table->usTargetOperatingTemp * + SMU7_Q88_FORMAT_CONVERSION_UNIT; + table->TemperatureLimitLow = + (table_info->cac_dtp_table->usTargetOperatingTemp - 1) * + SMU7_Q88_FORMAT_CONVERSION_UNIT; + table->MemoryVoltageChangeEnable = 1; + table->MemoryInterval = 1; + table->VoltageResponseTime = 0; + table->PhaseResponseTime = 0; + table->MemoryThermThrottleEnable = 1; + table->PCIeBootLinkLevel = 0; /* 0:Gen1 1:Gen2 2:Gen3*/ + table->PCIeGenInterval = 1; + table->VRConfig = 0; + + result = fiji_populate_vr_config(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to populate VRConfig setting!", return result); + + table->ThermGpio = 17; + table->SclkStepSize = 0x4000; + + if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID, &gpio_pin)) { + table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift; + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_RegulatorHot); + } else { + table->VRHotGpio = SMU7_UNUSED_GPIO_PIN; + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_RegulatorHot); + } + + if (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID, + &gpio_pin)) { + table->AcDcGpio = gpio_pin.uc_gpio_pin_bit_shift; + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_AutomaticDCTransition); + } else { + table->AcDcGpio = SMU7_UNUSED_GPIO_PIN; + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_AutomaticDCTransition); + } + + /* Thermal Output GPIO */ + if (atomctrl_get_pp_assign_pin(hwmgr, THERMAL_INT_OUTPUT_GPIO_PINID, + &gpio_pin)) { + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ThermalOutGPIO); + + table->ThermOutGpio = gpio_pin.uc_gpio_pin_bit_shift; + + /* For porlarity read GPIOPAD_A with assigned Gpio pin + * since VBIOS will program this register to set 'inactive state', + * driver can then determine 'active state' from this and + * program SMU with correct polarity + */ + table->ThermOutPolarity = (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A) & + (1 << gpio_pin.uc_gpio_pin_bit_shift))) ? 1:0; + table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY; + + /* if required, combine VRHot/PCC with thermal out GPIO */ + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_RegulatorHot) && + phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_CombinePCCWithThermalSignal)) + table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT; + } else { + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ThermalOutGPIO); + table->ThermOutGpio = 17; + table->ThermOutPolarity = 1; + table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE; + } + + for (i = 0; i < SMU73_MAX_ENTRIES_SMIO; i++) + table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]); + + CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags); + CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig); + CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1); + CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2); + CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize); + CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh); + CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow); + CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime); + CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime); + + /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */ + result = fiji_copy_bytes_to_smc(hwmgr->smumgr, + smu_data->dpm_table_start + + offsetof(SMU73_Discrete_DpmTable, SystemFlags), + (uint8_t *)&(table->SystemFlags), + sizeof(SMU73_Discrete_DpmTable) - 3 * sizeof(SMU73_PIDController), + SMC_RAM_END); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to upload dpm data to SMC memory!", return result); + + result = fiji_init_arb_table_index(hwmgr->smumgr); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to upload arb data to SMC memory!", return result); + + result = fiji_populate_pm_fuses(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to populate PM fuses to SMC memory!", return result); + return 0; +} + +/** +* Set up the fan table to control the fan using the SMC. +* @param hwmgr the address of the powerplay hardware manager. +* @param pInput the pointer to input data +* @param pOutput the pointer to output data +* @param pStorage the pointer to temporary storage +* @param Result the last failure code +* @return result from set temperature range routine +*/ +int fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr) +{ + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + + SMU73_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE }; + uint32_t duty100; + uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2; + uint16_t fdo_min, slope1, slope2; + uint32_t reference_clock; + int res; + uint64_t tmp64; + + if (smu_data->fan_table_start == 0) { + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_MicrocodeFanControl); + return 0; + } + + duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + CG_FDO_CTRL1, FMAX_DUTY100); + + if (duty100 == 0) { + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_MicrocodeFanControl); + return 0; + } + + tmp64 = hwmgr->thermal_controller.advanceFanControlParameters. + usPWMMin * duty100; + do_div(tmp64, 10000); + fdo_min = (uint16_t)tmp64; + + t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed - + hwmgr->thermal_controller.advanceFanControlParameters.usTMin; + t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh - + hwmgr->thermal_controller.advanceFanControlParameters.usTMed; + + pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed - + hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin; + pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh - + hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed; + + slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100); + slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100); + + fan_table.TempMin = cpu_to_be16((50 + hwmgr-> + thermal_controller.advanceFanControlParameters.usTMin) / 100); + fan_table.TempMed = cpu_to_be16((50 + hwmgr-> + thermal_controller.advanceFanControlParameters.usTMed) / 100); + fan_table.TempMax = cpu_to_be16((50 + hwmgr-> + thermal_controller.advanceFanControlParameters.usTMax) / 100); + + fan_table.Slope1 = cpu_to_be16(slope1); + fan_table.Slope2 = cpu_to_be16(slope2); + + fan_table.FdoMin = cpu_to_be16(fdo_min); + + fan_table.HystDown = cpu_to_be16(hwmgr-> + thermal_controller.advanceFanControlParameters.ucTHyst); + + fan_table.HystUp = cpu_to_be16(1); + + fan_table.HystSlope = cpu_to_be16(1); + + fan_table.TempRespLim = cpu_to_be16(5); + + reference_clock = smu7_get_xclk(hwmgr); + + fan_table.RefreshPeriod = cpu_to_be32((hwmgr-> + thermal_controller.advanceFanControlParameters.ulCycleDelay * + reference_clock) / 1600); + + fan_table.FdoMax = cpu_to_be16((uint16_t)duty100); + + fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD( + hwmgr->device, CGS_IND_REG__SMC, + CG_MULT_THERMAL_CTRL, TEMP_SEL); + + res = fiji_copy_bytes_to_smc(hwmgr->smumgr, smu_data->fan_table_start, + (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), + SMC_RAM_END); + + if (!res && hwmgr->thermal_controller. + advanceFanControlParameters.ucMinimumPWMLimit) + res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SetFanMinPwm, + hwmgr->thermal_controller. + advanceFanControlParameters.ucMinimumPWMLimit); + + if (!res && hwmgr->thermal_controller. + advanceFanControlParameters.ulMinFanSCLKAcousticLimit) + res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SetFanSclkTarget, + hwmgr->thermal_controller. + advanceFanControlParameters.ulMinFanSCLKAcousticLimit); + + if (res) + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_MicrocodeFanControl); + + return 0; +} + +int fiji_program_mem_timing_parameters(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + if (data->need_update_smu7_dpm_table & + (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK)) + return fiji_program_memory_timing_parameters(hwmgr); + + return 0; +} + +int fiji_update_sclk_threshold(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + + int result = 0; + uint32_t low_sclk_interrupt_threshold = 0; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SclkThrottleLowNotification) + && (hwmgr->gfx_arbiter.sclk_threshold != + data->low_sclk_interrupt_threshold)) { + data->low_sclk_interrupt_threshold = + hwmgr->gfx_arbiter.sclk_threshold; + low_sclk_interrupt_threshold = + data->low_sclk_interrupt_threshold; + + CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold); + + result = fiji_copy_bytes_to_smc( + hwmgr->smumgr, + smu_data->dpm_table_start + + offsetof(SMU73_Discrete_DpmTable, + LowSclkInterruptThreshold), + (uint8_t *)&low_sclk_interrupt_threshold, + sizeof(uint32_t), + SMC_RAM_END); + } + result = fiji_program_mem_timing_parameters(hwmgr); + PP_ASSERT_WITH_CODE((result == 0), + "Failed to program memory timing parameters!", + ); + return result; +} + +uint32_t fiji_get_offsetof(uint32_t type, uint32_t member) +{ + switch (type) { + case SMU_SoftRegisters: + switch (member) { + case HandshakeDisables: + return offsetof(SMU73_SoftRegisters, HandshakeDisables); + case VoltageChangeTimeout: + return offsetof(SMU73_SoftRegisters, VoltageChangeTimeout); + case AverageGraphicsActivity: + return offsetof(SMU73_SoftRegisters, AverageGraphicsActivity); + case PreVBlankGap: + return offsetof(SMU73_SoftRegisters, PreVBlankGap); + case VBlankTimeout: + return offsetof(SMU73_SoftRegisters, VBlankTimeout); + } + case SMU_Discrete_DpmTable: + switch (member) { + case UvdBootLevel: + return offsetof(SMU73_Discrete_DpmTable, UvdBootLevel); + case VceBootLevel: + return offsetof(SMU73_Discrete_DpmTable, VceBootLevel); + case SamuBootLevel: + return offsetof(SMU73_Discrete_DpmTable, SamuBootLevel); + case LowSclkInterruptThreshold: + return offsetof(SMU73_Discrete_DpmTable, LowSclkInterruptThreshold); + } + } + printk("cant't get the offset of type %x member %x \n", type, member); + return 0; +} + +uint32_t fiji_get_mac_definition(uint32_t value) +{ + switch (value) { + case SMU_MAX_LEVELS_GRAPHICS: + return SMU73_MAX_LEVELS_GRAPHICS; + case SMU_MAX_LEVELS_MEMORY: + return SMU73_MAX_LEVELS_MEMORY; + case SMU_MAX_LEVELS_LINK: + return SMU73_MAX_LEVELS_LINK; + case SMU_MAX_ENTRIES_SMIO: + return SMU73_MAX_ENTRIES_SMIO; + case SMU_MAX_LEVELS_VDDC: + return SMU73_MAX_LEVELS_VDDC; + case SMU_MAX_LEVELS_VDDGFX: + return SMU73_MAX_LEVELS_VDDGFX; + case SMU_MAX_LEVELS_VDDCI: + return SMU73_MAX_LEVELS_VDDCI; + case SMU_MAX_LEVELS_MVDD: + return SMU73_MAX_LEVELS_MVDD; + } + + printk("cant't get the mac of %x \n", value); + return 0; +} + + +static int fiji_update_uvd_smc_table(struct pp_hwmgr *hwmgr) +{ + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + uint32_t mm_boot_level_offset, mm_boot_level_value; + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + + smu_data->smc_state_table.UvdBootLevel = 0; + if (table_info->mm_dep_table->count > 0) + smu_data->smc_state_table.UvdBootLevel = + (uint8_t) (table_info->mm_dep_table->count - 1); + mm_boot_level_offset = smu_data->dpm_table_start + offsetof(SMU73_Discrete_DpmTable, + UvdBootLevel); + mm_boot_level_offset /= 4; + mm_boot_level_offset *= 4; + mm_boot_level_value = cgs_read_ind_register(hwmgr->device, + CGS_IND_REG__SMC, mm_boot_level_offset); + mm_boot_level_value &= 0x00FFFFFF; + mm_boot_level_value |= smu_data->smc_state_table.UvdBootLevel << 24; + cgs_write_ind_register(hwmgr->device, + CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value); + + if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_UVDDPM) || + phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_StablePState)) + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_UVDDPM_SetEnabledMask, + (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel)); + return 0; +} + +static int fiji_update_vce_smc_table(struct pp_hwmgr *hwmgr) +{ + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + uint32_t mm_boot_level_offset, mm_boot_level_value; + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_StablePState)) + smu_data->smc_state_table.VceBootLevel = + (uint8_t) (table_info->mm_dep_table->count - 1); + else + smu_data->smc_state_table.VceBootLevel = 0; + + mm_boot_level_offset = smu_data->dpm_table_start + + offsetof(SMU73_Discrete_DpmTable, VceBootLevel); + mm_boot_level_offset /= 4; + mm_boot_level_offset *= 4; + mm_boot_level_value = cgs_read_ind_register(hwmgr->device, + CGS_IND_REG__SMC, mm_boot_level_offset); + mm_boot_level_value &= 0xFF00FFFF; + mm_boot_level_value |= smu_data->smc_state_table.VceBootLevel << 16; + cgs_write_ind_register(hwmgr->device, + CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_VCEDPM_SetEnabledMask, + (uint32_t)1 << smu_data->smc_state_table.VceBootLevel); + return 0; +} + +static int fiji_update_samu_smc_table(struct pp_hwmgr *hwmgr) +{ + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + uint32_t mm_boot_level_offset, mm_boot_level_value; + + + smu_data->smc_state_table.SamuBootLevel = 0; + mm_boot_level_offset = smu_data->dpm_table_start + + offsetof(SMU73_Discrete_DpmTable, SamuBootLevel); + + mm_boot_level_offset /= 4; + mm_boot_level_offset *= 4; + mm_boot_level_value = cgs_read_ind_register(hwmgr->device, + CGS_IND_REG__SMC, mm_boot_level_offset); + mm_boot_level_value &= 0xFFFFFF00; + mm_boot_level_value |= smu_data->smc_state_table.SamuBootLevel << 0; + cgs_write_ind_register(hwmgr->device, + CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_StablePState)) + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SAMUDPM_SetEnabledMask, + (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel)); + return 0; +} + +int fiji_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type) +{ + switch (type) { + case SMU_UVD_TABLE: + fiji_update_uvd_smc_table(hwmgr); + break; + case SMU_VCE_TABLE: + fiji_update_vce_smc_table(hwmgr); + break; + case SMU_SAMU_TABLE: + fiji_update_samu_smc_table(hwmgr); + break; + default: + break; + } + return 0; +} + + +/** +* Get the location of various tables inside the FW image. +* +* @param hwmgr the address of the powerplay hardware manager. +* @return always 0 +*/ +int fiji_process_firmware_header(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + uint32_t tmp; + int result; + bool error = false; + + result = fiji_read_smc_sram_dword(hwmgr->smumgr, + SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU73_Firmware_Header, DpmTable), + &tmp, SMC_RAM_END); + + if (0 == result) + smu_data->dpm_table_start = tmp; + + error |= (0 != result); + + result = fiji_read_smc_sram_dword(hwmgr->smumgr, + SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU73_Firmware_Header, SoftRegisters), + &tmp, SMC_RAM_END); + + if (!result) { + data->soft_regs_start = tmp; + smu_data->soft_regs_start = tmp; + } + + error |= (0 != result); + + result = fiji_read_smc_sram_dword(hwmgr->smumgr, + SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU73_Firmware_Header, mcRegisterTable), + &tmp, SMC_RAM_END); + + if (!result) + smu_data->mc_reg_table_start = tmp; + + result = fiji_read_smc_sram_dword(hwmgr->smumgr, + SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU73_Firmware_Header, FanTable), + &tmp, SMC_RAM_END); + + if (!result) + smu_data->fan_table_start = tmp; + + error |= (0 != result); + + result = fiji_read_smc_sram_dword(hwmgr->smumgr, + SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU73_Firmware_Header, mcArbDramTimingTable), + &tmp, SMC_RAM_END); + + if (!result) + smu_data->arb_table_start = tmp; + + error |= (0 != result); + + result = fiji_read_smc_sram_dword(hwmgr->smumgr, + SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU73_Firmware_Header, Version), + &tmp, SMC_RAM_END); + + if (!result) + hwmgr->microcode_version_info.SMC = tmp; + + error |= (0 != result); + + return error ? -1 : 0; +} + +int fiji_initialize_mc_reg_table(struct pp_hwmgr *hwmgr) +{ + + /* Program additional LP registers + * that are no longer programmed by VBIOS + */ + cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP, + cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING)); + cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP, + cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING)); + cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP, + cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2)); + cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP, + cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1)); + cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP, + cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0)); + cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP, + cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1)); + cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP, + cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING)); + + return 0; +} + +bool fiji_is_dpm_running(struct pp_hwmgr *hwmgr) +{ + return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device, + CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON)) + ? true : false; +} diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.h b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.h new file mode 100644 index 000000000000..d30d150f9ca6 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.h @@ -0,0 +1,51 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef FIJI_SMC_H +#define FIJI_SMC_H + +#include "smumgr.h" +#include "smu73.h" + +struct fiji_pt_defaults { + uint8_t SviLoadLineEn; + uint8_t SviLoadLineVddC; + uint8_t TDC_VDDC_ThrottleReleaseLimitPerc; + uint8_t TDC_MAWt; + uint8_t TdcWaterfallCtl; + uint8_t DTEAmbientTempBase; +}; + +int fiji_populate_all_graphic_levels(struct pp_hwmgr *hwmgr); +int fiji_populate_all_memory_levels(struct pp_hwmgr *hwmgr); +int fiji_init_smc_table(struct pp_hwmgr *hwmgr); +int fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr); +int fiji_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type); +int fiji_update_sclk_threshold(struct pp_hwmgr *hwmgr); +uint32_t fiji_get_offsetof(uint32_t type, uint32_t member); +uint32_t fiji_get_mac_definition(uint32_t value); +int fiji_process_firmware_header(struct pp_hwmgr *hwmgr); +int fiji_initialize_mc_reg_table(struct pp_hwmgr *hwmgr); +bool fiji_is_dpm_running(struct pp_hwmgr *hwmgr); + +#endif + diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c index 8e52a2e82db5..82a8be4af63b 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c @@ -38,6 +38,7 @@ #include "bif/bif_5_0_sh_mask.h" #include "pp_debug.h" #include "fiji_pwrvirus.h" +#include "fiji_smc.h" #define AVFS_EN_MSB 1568 #define AVFS_EN_LSB 1568 @@ -219,17 +220,28 @@ bool fiji_is_smc_ram_running(struct pp_smumgr *smumgr) */ int fiji_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg) { + int ret; + if (!fiji_is_smc_ram_running(smumgr)) return -1; - if (1 != SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP)) { - printk(KERN_ERR "Failed to send Previous Message."); - SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); - } + + SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); + + ret = SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP); + + if (ret != 1) + printk("\n failed to send pre message %x ret is %d \n", msg, ret); cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg); + SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); + ret = SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP); + + if (ret != 1) + printk("\n failed to send message %x ret is %d \n", msg, ret); + return 0; } @@ -840,7 +852,7 @@ int fiji_avfs_event_mgr(struct pp_smumgr *smumgr, bool smu_started) case AVFS_BTC_COMPLETED_RESTORED: /*S3 State - Post SMU Start*/ priv->avfs.AvfsBtcStatus = AVFS_BTC_SMUMSG_ERROR; PP_ASSERT_WITH_CODE(0 == fiji_send_msg_to_smc(smumgr, - PPSMC_MSG_VftTableIsValid), + 0x666), "[AVFS][fiji_avfs_event_mgr] SMU did not respond " "correctly to VftTableIsValid Msg", return -1;); @@ -964,6 +976,7 @@ static int fiji_smu_init(struct pp_smumgr *smumgr) { struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend); uint64_t mc_addr; + int i; priv->header_buffer.data_size = ((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096; @@ -1001,6 +1014,9 @@ static int fiji_smu_init(struct pp_smumgr *smumgr) priv->acpi_optimization = 1; + for (i = 0; i < SMU73_MAX_LEVELS_GRAPHICS; i++) + priv->activity_target[i] = 30; + return 0; } @@ -1030,6 +1046,17 @@ static const struct pp_smumgr_func fiji_smu_funcs = { .send_msg_to_smc_with_parameter = &fiji_send_msg_to_smc_with_parameter, .download_pptable_settings = NULL, .upload_pptable_settings = NULL, + .update_smc_table = fiji_update_smc_table, + .get_offsetof = fiji_get_offsetof, + .process_firmware_header = fiji_process_firmware_header, + .init_smc_table = fiji_init_smc_table, + .update_sclk_threshold = fiji_update_sclk_threshold, + .thermal_setup_fan_table = fiji_thermal_setup_fan_table, + .populate_all_graphic_levels = fiji_populate_all_graphic_levels, + .populate_all_memory_levels = fiji_populate_all_memory_levels, + .get_mac_definition = fiji_get_mac_definition, + .initialize_mc_reg_table = fiji_initialize_mc_reg_table, + .is_dpm_running = fiji_is_dpm_running, }; int fiji_smum_init(struct pp_smumgr *smumgr) diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h index b4eb483215b1..291f7042a585 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h +++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h @@ -23,6 +23,10 @@ #ifndef _FIJI_SMUMANAGER_H_ #define _FIJI_SMUMANAGER_H_ +#include "smu73_discrete.h" +#include + +#define SMC_RAM_END 0x40000 struct fiji_smu_avfs { enum AVFS_BTC_STATUS AvfsBtcStatus; @@ -40,11 +44,22 @@ struct fiji_buffer_entry { struct fiji_smumgr { uint8_t *header; uint8_t *mec_image; - uint32_t soft_regs_start; + + uint32_t soft_regs_start; + uint32_t dpm_table_start; + uint32_t mc_reg_table_start; + uint32_t fan_table_start; + uint32_t arb_table_start; struct fiji_smu_avfs avfs; uint32_t acpi_optimization; - struct fiji_buffer_entry header_buffer; + + struct SMU73_Discrete_DpmTable smc_state_table; + struct SMU73_Discrete_Ulv ulv_setting; + struct SMU73_Discrete_PmFuses power_tune_table; + const struct fiji_pt_defaults *power_tune_defaults; + uint32_t activity_target[SMU73_MAX_LEVELS_GRAPHICS]; + }; int fiji_smum_init(struct pp_smumgr *smumgr); From b859c2070354ad8a2512e1524533d9f3291174c7 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Tue, 16 Aug 2016 20:00:49 +0800 Subject: [PATCH 12/49] drm/amd/powerplay: use smu7 hwmgr to manager fiji Reviewed-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/Makefile | 3 +-- drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c | 28 ++++++++++++++++++-- 2 files changed, 27 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile index 95a3cf10b111..dffcd8af881e 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile @@ -7,10 +7,9 @@ HARDWARE_MGR = hwmgr.o processpptables.o functiontables.o \ cz_clockpowergating.o tonga_powertune.o\ process_pptables_v1_0.o ppatomctrl.o \ tonga_hwmgr.o pppcielanes.o tonga_thermal.o\ - fiji_powertune.o fiji_hwmgr.o tonga_clockpowergating.o \ - fiji_clockpowergating.o fiji_thermal.o \ smu7_hwmgr.o smu7_powertune.o smu7_thermal.o \ smu7_clockpowergating.o iceland_hwmgr.o \ + tonga_clockpowergating.o \ iceland_clockpowergating.o iceland_thermal.o \ iceland_powertune.o diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c index 78ccbecd7591..449780cf140d 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c @@ -37,12 +37,12 @@ extern int cz_hwmgr_init(struct pp_hwmgr *hwmgr); extern int tonga_hwmgr_init(struct pp_hwmgr *hwmgr); -extern int fiji_hwmgr_init(struct pp_hwmgr *hwmgr); extern int iceland_hwmgr_init(struct pp_hwmgr *hwmgr); static int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr); static void hwmgr_init_default_caps(struct pp_hwmgr *hwmgr); static int hwmgr_set_user_specify_caps(struct pp_hwmgr *hwmgr); +static int fiji_set_asic_special_caps(struct pp_hwmgr *hwmgr); uint8_t convert_to_vid(uint16_t vddc) { @@ -84,8 +84,13 @@ int hwmgr_init(struct amd_pp_init *pp_init, struct pp_instance *handle) case CHIP_TONGA: tonga_hwmgr_init(hwmgr); break; + case CHIP_FIJI: - fiji_hwmgr_init(hwmgr); + smu7_hwmgr_init(hwmgr); + fiji_set_asic_special_caps(hwmgr); + hwmgr->feature_mask &= ~(PP_SMC_VOLTAGE_CONTROL_MASK | + PP_VBI_TIME_SUPPORT_MASK | + PP_ENABLE_GFX_CG_THRU_SMU); break; case CHIP_POLARIS11: case CHIP_POLARIS10: @@ -744,3 +749,22 @@ int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr) return 0; } +int fiji_set_asic_special_caps(struct pp_hwmgr *hwmgr) +{ + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SQRamping); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DBRamping); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_TDRamping); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_TCPRamping); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_TablelessHardwareInterface); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_CAC); + return 0; +} + From 1486022088dea351805e4db4fc76c4d7c68733d6 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Thu, 11 Aug 2016 11:01:01 +0800 Subject: [PATCH 13/49] drm/amd/powerplay: implement fw image related smum interface for tonga. Signed-off-by: Rex Zhu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- .../gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c | 2 +- drivers/gpu/drm/amd/powerplay/smumgr/Makefile | 2 +- .../gpu/drm/amd/powerplay/smumgr/tonga_smc.c | 3214 +++++++++++++++++ .../gpu/drm/amd/powerplay/smumgr/tonga_smc.h | 60 + .../drm/amd/powerplay/smumgr/tonga_smumgr.c | 17 + .../drm/amd/powerplay/smumgr/tonga_smumgr.h | 37 +- 6 files changed, 3329 insertions(+), 3 deletions(-) create mode 100644 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c create mode 100644 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.h diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c index 42783bf7647c..3110bf0eeacc 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c @@ -800,7 +800,7 @@ static int tonga_process_firmware_header(struct pp_hwmgr *hwmgr) if (0 == result) { data->soft_regs_start = tmp; - tonga_smu->ulSoftRegsStart = tmp; + tonga_smu->soft_regs_start = tmp; } error |= (0 != result); diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile index 7561239eb874..2ff4aa031b1a 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile +++ b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile @@ -3,7 +3,7 @@ # It provides the smu management services for the driver. SMU_MGR = smumgr.o cz_smumgr.o tonga_smumgr.o fiji_smumgr.o fiji_smc.o\ - polaris10_smumgr.o iceland_smumgr.o polaris10_smc.o + polaris10_smumgr.o iceland_smumgr.o polaris10_smc.o tonga_smc.o AMD_PP_SMUMGR = $(addprefix $(AMD_PP_PATH)/smumgr/,$(SMU_MGR)) diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c new file mode 100644 index 000000000000..3acdbffed88c --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c @@ -0,0 +1,3214 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * + */ + +#include "tonga_smc.h" +#include "smu7_dyn_defaults.h" + +#include "smu7_hwmgr.h" +#include "hardwaremanager.h" +#include "ppatomctrl.h" +#include "pp_debug.h" +#include "cgs_common.h" +#include "atombios.h" +#include "tonga_smumgr.h" +#include "pppcielanes.h" +#include "pp_endian.h" +#include "smu7_ppsmc.h" + +#include "smu72_discrete.h" + +#include "smu/smu_7_1_2_d.h" +#include "smu/smu_7_1_2_sh_mask.h" + +#include "gmc/gmc_8_1_d.h" +#include "gmc/gmc_8_1_sh_mask.h" + +#include "bif/bif_5_0_d.h" +#include "bif/bif_5_0_sh_mask.h" + +#include "dce/dce_10_0_d.h" +#include "dce/dce_10_0_sh_mask.h" + + +#define VOLTAGE_SCALE 4 +#define POWERTUNE_DEFAULT_SET_MAX 1 +#define VOLTAGE_VID_OFFSET_SCALE1 625 +#define VOLTAGE_VID_OFFSET_SCALE2 100 +#define MC_CG_ARB_FREQ_F1 0x0b +#define VDDC_VDDCI_DELTA 200 + + +static struct tonga_pt_defaults tonga_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = { +/* sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt, + * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT + */ + {1, 0xF, 0xFD, 0x19, + 5, 45, 0, 0xB0000, + {0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, + 0xC9, 0xC9, 0x2F, 0x4D, 0x61}, + {0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, + 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4} + }, +}; + +/* [Fmin, Fmax, LDO_REFSEL, USE_FOR_LOW_FREQ] */ +static const uint16_t tonga_clock_stretcher_lookup_table[2][4] = { + {600, 1050, 3, 0}, + {600, 1050, 6, 1} +}; + +/* [FF, SS] type, [] 4 voltage ranges, + * and [Floor Freq, Boundary Freq, VID min , VID max] + */ +static const uint32_t tonga_clock_stretcher_ddt_table[2][4][4] = { + { {265, 529, 120, 128}, {325, 650, 96, 119}, {430, 860, 32, 95}, {0, 0, 0, 31} }, + { {275, 550, 104, 112}, {319, 638, 96, 103}, {360, 720, 64, 95}, {384, 768, 32, 63} } +}; + +/* [Use_For_Low_freq] value, [0%, 5%, 10%, 7.14%, 14.28%, 20%] */ +static const uint8_t tonga_clock_stretch_amount_conversion[2][6] = { + {0, 1, 3, 2, 4, 5}, + {0, 2, 4, 5, 6, 5} +}; + +/* PPGen has the gain setting generated in x * 100 unit + * This function is to convert the unit to x * 4096(0x1000) unit. + * This is the unit expected by SMC firmware + */ + + +static int tonga_get_dependecy_volt_by_clk(struct pp_hwmgr *hwmgr, + phm_ppt_v1_clock_voltage_dependency_table *allowed_clock_voltage_table, + uint32_t clock, SMU_VoltageLevel *voltage, uint32_t *mvdd) +{ + uint32_t i = 0; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *pptable_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + + /* clock - voltage dependency table is empty table */ + if (allowed_clock_voltage_table->count == 0) + return -EINVAL; + + for (i = 0; i < allowed_clock_voltage_table->count; i++) { + /* find first sclk bigger than request */ + if (allowed_clock_voltage_table->entries[i].clk >= clock) { + voltage->VddGfx = phm_get_voltage_index( + pptable_info->vddgfx_lookup_table, + allowed_clock_voltage_table->entries[i].vddgfx); + voltage->Vddc = phm_get_voltage_index( + pptable_info->vddc_lookup_table, + allowed_clock_voltage_table->entries[i].vddc); + + if (allowed_clock_voltage_table->entries[i].vddci) + voltage->Vddci = + phm_get_voltage_id(&data->vddci_voltage_table, allowed_clock_voltage_table->entries[i].vddci); + else + voltage->Vddci = + phm_get_voltage_id(&data->vddci_voltage_table, + allowed_clock_voltage_table->entries[i].vddc - VDDC_VDDCI_DELTA); + + + if (allowed_clock_voltage_table->entries[i].mvdd) + *mvdd = (uint32_t) allowed_clock_voltage_table->entries[i].mvdd; + + voltage->Phases = 1; + return 0; + } + } + + /* sclk is bigger than max sclk in the dependence table */ + voltage->VddGfx = phm_get_voltage_index(pptable_info->vddgfx_lookup_table, + allowed_clock_voltage_table->entries[i-1].vddgfx); + voltage->Vddc = phm_get_voltage_index(pptable_info->vddc_lookup_table, + allowed_clock_voltage_table->entries[i-1].vddc); + + if (allowed_clock_voltage_table->entries[i-1].vddci) + voltage->Vddci = phm_get_voltage_id(&data->vddci_voltage_table, + allowed_clock_voltage_table->entries[i-1].vddci); + + if (allowed_clock_voltage_table->entries[i-1].mvdd) + *mvdd = (uint32_t) allowed_clock_voltage_table->entries[i-1].mvdd; + + return 0; +} + + +/** + * Vddc table preparation for SMC. + * + * @param hwmgr the address of the hardware manager + * @param table the SMC DPM table structure to be populated + * @return always 0 + */ +static int tonga_populate_smc_vddc_table(struct pp_hwmgr *hwmgr, + SMU72_Discrete_DpmTable *table) +{ + unsigned int count; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) { + table->VddcLevelCount = data->vddc_voltage_table.count; + for (count = 0; count < table->VddcLevelCount; count++) { + table->VddcTable[count] = + PP_HOST_TO_SMC_US(data->vddc_voltage_table.entries[count].value * VOLTAGE_SCALE); + } + CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount); + } + return 0; +} + +/** + * VddGfx table preparation for SMC. + * + * @param hwmgr the address of the hardware manager + * @param table the SMC DPM table structure to be populated + * @return always 0 + */ +static int tonga_populate_smc_vdd_gfx_table(struct pp_hwmgr *hwmgr, + SMU72_Discrete_DpmTable *table) +{ + unsigned int count; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) { + table->VddGfxLevelCount = data->vddgfx_voltage_table.count; + for (count = 0; count < data->vddgfx_voltage_table.count; count++) { + table->VddGfxTable[count] = + PP_HOST_TO_SMC_US(data->vddgfx_voltage_table.entries[count].value * VOLTAGE_SCALE); + } + CONVERT_FROM_HOST_TO_SMC_UL(table->VddGfxLevelCount); + } + return 0; +} + +/** + * Vddci table preparation for SMC. + * + * @param *hwmgr The address of the hardware manager. + * @param *table The SMC DPM table structure to be populated. + * @return 0 + */ +static int tonga_populate_smc_vdd_ci_table(struct pp_hwmgr *hwmgr, + SMU72_Discrete_DpmTable *table) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + uint32_t count; + + table->VddciLevelCount = data->vddci_voltage_table.count; + for (count = 0; count < table->VddciLevelCount; count++) { + if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) { + table->VddciTable[count] = + PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[count].value * VOLTAGE_SCALE); + } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) { + table->SmioTable1.Pattern[count].Voltage = + PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[count].value * VOLTAGE_SCALE); + /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level. */ + table->SmioTable1.Pattern[count].Smio = + (uint8_t) count; + table->Smio[count] |= + data->vddci_voltage_table.entries[count].smio_low; + table->VddciTable[count] = + PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[count].value * VOLTAGE_SCALE); + } + } + + table->SmioMask1 = data->vddci_voltage_table.mask_low; + CONVERT_FROM_HOST_TO_SMC_UL(table->VddciLevelCount); + + return 0; +} + +/** + * Mvdd table preparation for SMC. + * + * @param *hwmgr The address of the hardware manager. + * @param *table The SMC DPM table structure to be populated. + * @return 0 + */ +static int tonga_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr, + SMU72_Discrete_DpmTable *table) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + uint32_t count; + + if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) { + table->MvddLevelCount = data->mvdd_voltage_table.count; + for (count = 0; count < table->MvddLevelCount; count++) { + table->SmioTable2.Pattern[count].Voltage = + PP_HOST_TO_SMC_US(data->mvdd_voltage_table.entries[count].value * VOLTAGE_SCALE); + /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level.*/ + table->SmioTable2.Pattern[count].Smio = + (uint8_t) count; + table->Smio[count] |= + data->mvdd_voltage_table.entries[count].smio_low; + } + table->SmioMask2 = data->mvdd_voltage_table.mask_low; + + CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount); + } + + return 0; +} + +/** + * Preparation of vddc and vddgfx CAC tables for SMC. + * + * @param hwmgr the address of the hardware manager + * @param table the SMC DPM table structure to be populated + * @return always 0 + */ +static int tonga_populate_cac_tables(struct pp_hwmgr *hwmgr, + SMU72_Discrete_DpmTable *table) +{ + uint32_t count; + uint8_t index = 0; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *pptable_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct phm_ppt_v1_voltage_lookup_table *vddgfx_lookup_table = + pptable_info->vddgfx_lookup_table; + struct phm_ppt_v1_voltage_lookup_table *vddc_lookup_table = + pptable_info->vddc_lookup_table; + + /* table is already swapped, so in order to use the value from it + * we need to swap it back. + */ + uint32_t vddc_level_count = PP_SMC_TO_HOST_UL(table->VddcLevelCount); + uint32_t vddgfx_level_count = PP_SMC_TO_HOST_UL(table->VddGfxLevelCount); + + for (count = 0; count < vddc_level_count; count++) { + /* We are populating vddc CAC data to BapmVddc table in split and merged mode */ + index = phm_get_voltage_index(vddc_lookup_table, + data->vddc_voltage_table.entries[count].value); + table->BapmVddcVidLoSidd[count] = + convert_to_vid(vddc_lookup_table->entries[index].us_cac_low); + table->BapmVddcVidHiSidd[count] = + convert_to_vid(vddc_lookup_table->entries[index].us_cac_mid); + table->BapmVddcVidHiSidd2[count] = + convert_to_vid(vddc_lookup_table->entries[index].us_cac_high); + } + + if ((data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2)) { + /* We are populating vddgfx CAC data to BapmVddgfx table in split mode */ + for (count = 0; count < vddgfx_level_count; count++) { + index = phm_get_voltage_index(vddgfx_lookup_table, + convert_to_vid(vddgfx_lookup_table->entries[index].us_cac_mid)); + table->BapmVddGfxVidHiSidd2[count] = + convert_to_vid(vddgfx_lookup_table->entries[index].us_cac_high); + } + } else { + for (count = 0; count < vddc_level_count; count++) { + index = phm_get_voltage_index(vddc_lookup_table, + data->vddc_voltage_table.entries[count].value); + table->BapmVddGfxVidLoSidd[count] = + convert_to_vid(vddc_lookup_table->entries[index].us_cac_low); + table->BapmVddGfxVidHiSidd[count] = + convert_to_vid(vddc_lookup_table->entries[index].us_cac_mid); + table->BapmVddGfxVidHiSidd2[count] = + convert_to_vid(vddc_lookup_table->entries[index].us_cac_high); + } + } + + return 0; +} + +/** + * Preparation of voltage tables for SMC. + * + * @param hwmgr the address of the hardware manager + * @param table the SMC DPM table structure to be populated + * @return always 0 + */ + +static int tonga_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr, + SMU72_Discrete_DpmTable *table) +{ + int result; + + result = tonga_populate_smc_vddc_table(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "can not populate VDDC voltage table to SMC", + return -EINVAL); + + result = tonga_populate_smc_vdd_ci_table(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "can not populate VDDCI voltage table to SMC", + return -EINVAL); + + result = tonga_populate_smc_vdd_gfx_table(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "can not populate VDDGFX voltage table to SMC", + return -EINVAL); + + result = tonga_populate_smc_mvdd_table(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "can not populate MVDD voltage table to SMC", + return -EINVAL); + + result = tonga_populate_cac_tables(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "can not populate CAC voltage tables to SMC", + return -EINVAL); + + return 0; +} + +static int tonga_populate_ulv_level(struct pp_hwmgr *hwmgr, + struct SMU72_Discrete_Ulv *state) +{ + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + + state->CcPwrDynRm = 0; + state->CcPwrDynRm1 = 0; + + state->VddcOffset = (uint16_t) table_info->us_ulv_voltage_offset; + state->VddcOffsetVid = (uint8_t)(table_info->us_ulv_voltage_offset * + VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1); + + state->VddcPhase = 1; + + CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm); + CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1); + CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset); + + return 0; +} + +static int tonga_populate_ulv_state(struct pp_hwmgr *hwmgr, + struct SMU72_Discrete_DpmTable *table) +{ + return tonga_populate_ulv_level(hwmgr, &table->Ulv); +} + +static int tonga_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU72_Discrete_DpmTable *table) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct smu7_dpm_table *dpm_table = &data->dpm_table; + struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend); + uint32_t i; + + /* Index (dpm_table->pcie_speed_table.count) is reserved for PCIE boot level. */ + for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) { + table->LinkLevel[i].PcieGenSpeed = + (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value; + table->LinkLevel[i].PcieLaneCount = + (uint8_t)encode_pcie_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1); + table->LinkLevel[i].EnabledForActivity = + 1; + table->LinkLevel[i].SPC = + (uint8_t)(data->pcie_spc_cap & 0xff); + table->LinkLevel[i].DownThreshold = + PP_HOST_TO_SMC_UL(5); + table->LinkLevel[i].UpThreshold = + PP_HOST_TO_SMC_UL(30); + } + + smu_data->smc_state_table.LinkLevelCount = + (uint8_t)dpm_table->pcie_speed_table.count; + data->dpm_level_enable_mask.pcie_dpm_enable_mask = + phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table); + + return 0; +} + +/** + * Calculates the SCLK dividers using the provided engine clock + * + * @param hwmgr the address of the hardware manager + * @param engine_clock the engine clock to use to populate the structure + * @param sclk the SMC SCLK structure to be populated + */ +static int tonga_calculate_sclk_params(struct pp_hwmgr *hwmgr, + uint32_t engine_clock, SMU72_Discrete_GraphicsLevel *sclk) +{ + const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + pp_atomctrl_clock_dividers_vi dividers; + uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL; + uint32_t spll_func_cntl_3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3; + uint32_t spll_func_cntl_4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4; + uint32_t cg_spll_spread_spectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM; + uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2; + uint32_t reference_clock; + uint32_t reference_divider; + uint32_t fbdiv; + int result; + + /* get the engine clock dividers for this clock value*/ + result = atomctrl_get_engine_pll_dividers_vi(hwmgr, engine_clock, ÷rs); + + PP_ASSERT_WITH_CODE(result == 0, + "Error retrieving Engine Clock dividers from VBIOS.", return result); + + /* To get FBDIV we need to multiply this by 16384 and divide it by Fref.*/ + reference_clock = atomctrl_get_reference_clock(hwmgr); + + reference_divider = 1 + dividers.uc_pll_ref_div; + + /* low 14 bits is fraction and high 12 bits is divider*/ + fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF; + + /* SPLL_FUNC_CNTL setup*/ + spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, + CG_SPLL_FUNC_CNTL, SPLL_REF_DIV, dividers.uc_pll_ref_div); + spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, + CG_SPLL_FUNC_CNTL, SPLL_PDIV_A, dividers.uc_pll_post_div); + + /* SPLL_FUNC_CNTL_3 setup*/ + spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, + CG_SPLL_FUNC_CNTL_3, SPLL_FB_DIV, fbdiv); + + /* set to use fractional accumulation*/ + spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, + CG_SPLL_FUNC_CNTL_3, SPLL_DITHEN, 1); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_EngineSpreadSpectrumSupport)) { + pp_atomctrl_internal_ss_info ss_info; + + uint32_t vcoFreq = engine_clock * dividers.uc_pll_post_div; + if (0 == atomctrl_get_engine_clock_spread_spectrum(hwmgr, vcoFreq, &ss_info)) { + /* + * ss_info.speed_spectrum_percentage -- in unit of 0.01% + * ss_info.speed_spectrum_rate -- in unit of khz + */ + /* clks = reference_clock * 10 / (REFDIV + 1) / speed_spectrum_rate / 2 */ + uint32_t clkS = reference_clock * 5 / (reference_divider * ss_info.speed_spectrum_rate); + + /* clkv = 2 * D * fbdiv / NS */ + uint32_t clkV = 4 * ss_info.speed_spectrum_percentage * fbdiv / (clkS * 10000); + + cg_spll_spread_spectrum = + PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, CLKS, clkS); + cg_spll_spread_spectrum = + PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, SSEN, 1); + cg_spll_spread_spectrum_2 = + PHM_SET_FIELD(cg_spll_spread_spectrum_2, CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clkV); + } + } + + sclk->SclkFrequency = engine_clock; + sclk->CgSpllFuncCntl3 = spll_func_cntl_3; + sclk->CgSpllFuncCntl4 = spll_func_cntl_4; + sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum; + sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2; + sclk->SclkDid = (uint8_t)dividers.pll_post_divider; + + return 0; +} + +/** + * Populates single SMC SCLK structure using the provided engine clock + * + * @param hwmgr the address of the hardware manager + * @param engine_clock the engine clock to use to populate the structure + * @param sclk the SMC SCLK structure to be populated + */ +static int tonga_populate_single_graphic_level(struct pp_hwmgr *hwmgr, + uint32_t engine_clock, + uint16_t sclk_activity_level_threshold, + SMU72_Discrete_GraphicsLevel *graphic_level) +{ + int result; + uint32_t mvdd; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *pptable_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + + result = tonga_calculate_sclk_params(hwmgr, engine_clock, graphic_level); + + /* populate graphics levels*/ + result = tonga_get_dependecy_volt_by_clk(hwmgr, + pptable_info->vdd_dep_on_sclk, engine_clock, + &graphic_level->MinVoltage, &mvdd); + PP_ASSERT_WITH_CODE((0 == result), + "can not find VDDC voltage value for VDDC " + "engine clock dependency table", return result); + + /* SCLK frequency in units of 10KHz*/ + graphic_level->SclkFrequency = engine_clock; + /* Indicates maximum activity level for this performance level. 50% for now*/ + graphic_level->ActivityLevel = sclk_activity_level_threshold; + + graphic_level->CcPwrDynRm = 0; + graphic_level->CcPwrDynRm1 = 0; + /* this level can be used if activity is high enough.*/ + graphic_level->EnabledForActivity = 0; + /* this level can be used for throttling.*/ + graphic_level->EnabledForThrottle = 1; + graphic_level->UpHyst = 0; + graphic_level->DownHyst = 0; + graphic_level->VoltageDownHyst = 0; + graphic_level->PowerThrottle = 0; + + data->display_timing.min_clock_in_sr = + hwmgr->display_config.min_core_set_clock_in_sr; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SclkDeepSleep)) + graphic_level->DeepSleepDivId = + smu7_get_sleep_divider_id_from_clock(engine_clock, + data->display_timing.min_clock_in_sr); + + /* Default to slow, highest DPM level will be set to PPSMC_DISPLAY_WATERMARK_LOW later.*/ + graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; + + if (0 == result) { + /* CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVoltage);*/ + /* CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVddcPhases);*/ + CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SclkFrequency); + CONVERT_FROM_HOST_TO_SMC_US(graphic_level->ActivityLevel); + CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl3); + CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl4); + CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum); + CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum2); + CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm); + CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm1); + } + + return result; +} + +/** + * Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states + * + * @param hwmgr the address of the hardware manager + */ +int tonga_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend); + struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct smu7_dpm_table *dpm_table = &data->dpm_table; + struct phm_ppt_v1_pcie_table *pcie_table = pptable_info->pcie_table; + uint8_t pcie_entry_count = (uint8_t) data->dpm_table.pcie_speed_table.count; + uint32_t level_array_address = smu_data->dpm_table_start + + offsetof(SMU72_Discrete_DpmTable, GraphicsLevel); + + uint32_t level_array_size = sizeof(SMU72_Discrete_GraphicsLevel) * + SMU72_MAX_LEVELS_GRAPHICS; + + SMU72_Discrete_GraphicsLevel *levels = smu_data->smc_state_table.GraphicsLevel; + + uint32_t i, max_entry; + uint8_t highest_pcie_level_enabled = 0; + uint8_t lowest_pcie_level_enabled = 0, mid_pcie_level_enabled = 0; + uint8_t count = 0; + int result = 0; + + memset(levels, 0x00, level_array_size); + + for (i = 0; i < dpm_table->sclk_table.count; i++) { + result = tonga_populate_single_graphic_level(hwmgr, + dpm_table->sclk_table.dpm_levels[i].value, + (uint16_t)smu_data->activity_target[i], + &(smu_data->smc_state_table.GraphicsLevel[i])); + if (result != 0) + return result; + + /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */ + if (i > 1) + smu_data->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0; + } + + /* Only enable level 0 for now. */ + smu_data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1; + + /* set highest level watermark to high */ + if (dpm_table->sclk_table.count > 1) + smu_data->smc_state_table.GraphicsLevel[dpm_table->sclk_table.count-1].DisplayWatermark = + PPSMC_DISPLAY_WATERMARK_HIGH; + + smu_data->smc_state_table.GraphicsDpmLevelCount = + (uint8_t)dpm_table->sclk_table.count; + data->dpm_level_enable_mask.sclk_dpm_enable_mask = + phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table); + + if (pcie_table != NULL) { + PP_ASSERT_WITH_CODE((pcie_entry_count >= 1), + "There must be 1 or more PCIE levels defined in PPTable.", + return -EINVAL); + max_entry = pcie_entry_count - 1; /* for indexing, we need to decrement by 1.*/ + for (i = 0; i < dpm_table->sclk_table.count; i++) { + smu_data->smc_state_table.GraphicsLevel[i].pcieDpmLevel = + (uint8_t) ((i < max_entry) ? i : max_entry); + } + } else { + if (0 == data->dpm_level_enable_mask.pcie_dpm_enable_mask) + printk(KERN_ERR "[ powerplay ] Pcie Dpm Enablemask is 0 !"); + + while (data->dpm_level_enable_mask.pcie_dpm_enable_mask && + ((data->dpm_level_enable_mask.pcie_dpm_enable_mask & + (1<<(highest_pcie_level_enabled+1))) != 0)) { + highest_pcie_level_enabled++; + } + + while (data->dpm_level_enable_mask.pcie_dpm_enable_mask && + ((data->dpm_level_enable_mask.pcie_dpm_enable_mask & + (1<dpm_level_enable_mask.pcie_dpm_enable_mask & + (1<<(lowest_pcie_level_enabled+1+count))) == 0)) { + count++; + } + mid_pcie_level_enabled = (lowest_pcie_level_enabled+1+count) < highest_pcie_level_enabled ? + (lowest_pcie_level_enabled+1+count) : highest_pcie_level_enabled; + + + /* set pcieDpmLevel to highest_pcie_level_enabled*/ + for (i = 2; i < dpm_table->sclk_table.count; i++) + smu_data->smc_state_table.GraphicsLevel[i].pcieDpmLevel = highest_pcie_level_enabled; + + /* set pcieDpmLevel to lowest_pcie_level_enabled*/ + smu_data->smc_state_table.GraphicsLevel[0].pcieDpmLevel = lowest_pcie_level_enabled; + + /* set pcieDpmLevel to mid_pcie_level_enabled*/ + smu_data->smc_state_table.GraphicsLevel[1].pcieDpmLevel = mid_pcie_level_enabled; + } + /* level count will send to smc once at init smc table and never change*/ + result = tonga_copy_bytes_to_smc(hwmgr->smumgr, level_array_address, + (uint8_t *)levels, (uint32_t)level_array_size, + SMC_RAM_END); + + return result; +} + +/** + * Populates the SMC MCLK structure using the provided memory clock + * + * @param hwmgr the address of the hardware manager + * @param memory_clock the memory clock to use to populate the structure + * @param sclk the SMC SCLK structure to be populated + */ +static int tonga_calculate_mclk_params( + struct pp_hwmgr *hwmgr, + uint32_t memory_clock, + SMU72_Discrete_MemoryLevel *mclk, + bool strobe_mode, + bool dllStateOn + ) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + uint32_t dll_cntl = data->clock_registers.vDLL_CNTL; + uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL; + uint32_t mpll_ad_func_cntl = data->clock_registers.vMPLL_AD_FUNC_CNTL; + uint32_t mpll_dq_func_cntl = data->clock_registers.vMPLL_DQ_FUNC_CNTL; + uint32_t mpll_func_cntl = data->clock_registers.vMPLL_FUNC_CNTL; + uint32_t mpll_func_cntl_1 = data->clock_registers.vMPLL_FUNC_CNTL_1; + uint32_t mpll_func_cntl_2 = data->clock_registers.vMPLL_FUNC_CNTL_2; + uint32_t mpll_ss1 = data->clock_registers.vMPLL_SS1; + uint32_t mpll_ss2 = data->clock_registers.vMPLL_SS2; + + pp_atomctrl_memory_clock_param mpll_param; + int result; + + result = atomctrl_get_memory_pll_dividers_si(hwmgr, + memory_clock, &mpll_param, strobe_mode); + PP_ASSERT_WITH_CODE( + 0 == result, + "Error retrieving Memory Clock Parameters from VBIOS.", + return result); + + /* MPLL_FUNC_CNTL setup*/ + mpll_func_cntl = PHM_SET_FIELD(mpll_func_cntl, MPLL_FUNC_CNTL, BWCTRL, + mpll_param.bw_ctrl); + + /* MPLL_FUNC_CNTL_1 setup*/ + mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1, + MPLL_FUNC_CNTL_1, CLKF, + mpll_param.mpll_fb_divider.cl_kf); + mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1, + MPLL_FUNC_CNTL_1, CLKFRAC, + mpll_param.mpll_fb_divider.clk_frac); + mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1, + MPLL_FUNC_CNTL_1, VCO_MODE, + mpll_param.vco_mode); + + /* MPLL_AD_FUNC_CNTL setup*/ + mpll_ad_func_cntl = PHM_SET_FIELD(mpll_ad_func_cntl, + MPLL_AD_FUNC_CNTL, YCLK_POST_DIV, + mpll_param.mpll_post_divider); + + if (data->is_memory_gddr5) { + /* MPLL_DQ_FUNC_CNTL setup*/ + mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl, + MPLL_DQ_FUNC_CNTL, YCLK_SEL, + mpll_param.yclk_sel); + mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl, + MPLL_DQ_FUNC_CNTL, YCLK_POST_DIV, + mpll_param.mpll_post_divider); + } + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_MemorySpreadSpectrumSupport)) { + /* + ************************************ + Fref = Reference Frequency + NF = Feedback divider ratio + NR = Reference divider ratio + Fnom = Nominal VCO output frequency = Fref * NF / NR + Fs = Spreading Rate + D = Percentage down-spread / 2 + Fint = Reference input frequency to PFD = Fref / NR + NS = Spreading rate divider ratio = int(Fint / (2 * Fs)) + CLKS = NS - 1 = ISS_STEP_NUM[11:0] + NV = D * Fs / Fnom * 4 * ((Fnom/Fref * NR) ^ 2) + CLKV = 65536 * NV = ISS_STEP_SIZE[25:0] + ************************************* + */ + pp_atomctrl_internal_ss_info ss_info; + uint32_t freq_nom; + uint32_t tmp; + uint32_t reference_clock = atomctrl_get_mpll_reference_clock(hwmgr); + + /* for GDDR5 for all modes and DDR3 */ + if (1 == mpll_param.qdr) + freq_nom = memory_clock * 4 * (1 << mpll_param.mpll_post_divider); + else + freq_nom = memory_clock * 2 * (1 << mpll_param.mpll_post_divider); + + /* tmp = (freq_nom / reference_clock * reference_divider) ^ 2 Note: S.I. reference_divider = 1*/ + tmp = (freq_nom / reference_clock); + tmp = tmp * tmp; + + if (0 == atomctrl_get_memory_clock_spread_spectrum(hwmgr, freq_nom, &ss_info)) { + /* ss_info.speed_spectrum_percentage -- in unit of 0.01% */ + /* ss.Info.speed_spectrum_rate -- in unit of khz */ + /* CLKS = reference_clock / (2 * speed_spectrum_rate * reference_divider) * 10 */ + /* = reference_clock * 5 / speed_spectrum_rate */ + uint32_t clks = reference_clock * 5 / ss_info.speed_spectrum_rate; + + /* CLKV = 65536 * speed_spectrum_percentage / 2 * spreadSpecrumRate / freq_nom * 4 / 100000 * ((freq_nom / reference_clock) ^ 2) */ + /* = 131 * speed_spectrum_percentage * speed_spectrum_rate / 100 * ((freq_nom / reference_clock) ^ 2) / freq_nom */ + uint32_t clkv = + (uint32_t)((((131 * ss_info.speed_spectrum_percentage * + ss_info.speed_spectrum_rate) / 100) * tmp) / freq_nom); + + mpll_ss1 = PHM_SET_FIELD(mpll_ss1, MPLL_SS1, CLKV, clkv); + mpll_ss2 = PHM_SET_FIELD(mpll_ss2, MPLL_SS2, CLKS, clks); + } + } + + /* MCLK_PWRMGT_CNTL setup */ + mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, + MCLK_PWRMGT_CNTL, DLL_SPEED, mpll_param.dll_speed); + mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, + MCLK_PWRMGT_CNTL, MRDCK0_PDNB, dllStateOn); + mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, + MCLK_PWRMGT_CNTL, MRDCK1_PDNB, dllStateOn); + + /* Save the result data to outpupt memory level structure */ + mclk->MclkFrequency = memory_clock; + mclk->MpllFuncCntl = mpll_func_cntl; + mclk->MpllFuncCntl_1 = mpll_func_cntl_1; + mclk->MpllFuncCntl_2 = mpll_func_cntl_2; + mclk->MpllAdFuncCntl = mpll_ad_func_cntl; + mclk->MpllDqFuncCntl = mpll_dq_func_cntl; + mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl; + mclk->DllCntl = dll_cntl; + mclk->MpllSs1 = mpll_ss1; + mclk->MpllSs2 = mpll_ss2; + + return 0; +} + +static uint8_t tonga_get_mclk_frequency_ratio(uint32_t memory_clock, + bool strobe_mode) +{ + uint8_t mc_para_index; + + if (strobe_mode) { + if (memory_clock < 12500) + mc_para_index = 0x00; + else if (memory_clock > 47500) + mc_para_index = 0x0f; + else + mc_para_index = (uint8_t)((memory_clock - 10000) / 2500); + } else { + if (memory_clock < 65000) + mc_para_index = 0x00; + else if (memory_clock > 135000) + mc_para_index = 0x0f; + else + mc_para_index = (uint8_t)((memory_clock - 60000) / 5000); + } + + return mc_para_index; +} + +static uint8_t tonga_get_ddr3_mclk_frequency_ratio(uint32_t memory_clock) +{ + uint8_t mc_para_index; + + if (memory_clock < 10000) + mc_para_index = 0; + else if (memory_clock >= 80000) + mc_para_index = 0x0f; + else + mc_para_index = (uint8_t)((memory_clock - 10000) / 5000 + 1); + + return mc_para_index; +} + + +static int tonga_populate_single_memory_level( + struct pp_hwmgr *hwmgr, + uint32_t memory_clock, + SMU72_Discrete_MemoryLevel *memory_level + ) +{ + uint32_t mvdd = 0; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *pptable_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + int result = 0; + bool dll_state_on; + struct cgs_display_info info = {0}; + uint32_t mclk_edc_wr_enable_threshold = 40000; + uint32_t mclk_stutter_mode_threshold = 30000; + uint32_t mclk_edc_enable_threshold = 40000; + uint32_t mclk_strobe_mode_threshold = 40000; + + if (NULL != pptable_info->vdd_dep_on_mclk) { + result = tonga_get_dependecy_volt_by_clk(hwmgr, + pptable_info->vdd_dep_on_mclk, + memory_clock, + &memory_level->MinVoltage, &mvdd); + PP_ASSERT_WITH_CODE( + 0 == result, + "can not find MinVddc voltage value from memory VDDC " + "voltage dependency table", + return result); + } + + if (data->mvdd_control == SMU7_VOLTAGE_CONTROL_NONE) + memory_level->MinMvdd = data->vbios_boot_state.mvdd_bootup_value; + else + memory_level->MinMvdd = mvdd; + + memory_level->EnabledForThrottle = 1; + memory_level->EnabledForActivity = 0; + memory_level->UpHyst = 0; + memory_level->DownHyst = 100; + memory_level->VoltageDownHyst = 0; + + /* Indicates maximum activity level for this performance level.*/ + memory_level->ActivityLevel = (uint16_t)data->mclk_activity_target; + memory_level->StutterEnable = 0; + memory_level->StrobeEnable = 0; + memory_level->EdcReadEnable = 0; + memory_level->EdcWriteEnable = 0; + memory_level->RttEnable = 0; + + /* default set to low watermark. Highest level will be set to high later.*/ + memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; + + cgs_get_active_displays_info(hwmgr->device, &info); + data->display_timing.num_existing_displays = info.display_count; + + if ((mclk_stutter_mode_threshold != 0) && + (memory_clock <= mclk_stutter_mode_threshold) && + (!data->is_uvd_enabled) + && (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL, STUTTER_ENABLE) & 0x1) + && (data->display_timing.num_existing_displays <= 2) + && (data->display_timing.num_existing_displays != 0)) + memory_level->StutterEnable = 1; + + /* decide strobe mode*/ + memory_level->StrobeEnable = (mclk_strobe_mode_threshold != 0) && + (memory_clock <= mclk_strobe_mode_threshold); + + /* decide EDC mode and memory clock ratio*/ + if (data->is_memory_gddr5) { + memory_level->StrobeRatio = tonga_get_mclk_frequency_ratio(memory_clock, + memory_level->StrobeEnable); + + if ((mclk_edc_enable_threshold != 0) && + (memory_clock > mclk_edc_enable_threshold)) { + memory_level->EdcReadEnable = 1; + } + + if ((mclk_edc_wr_enable_threshold != 0) && + (memory_clock > mclk_edc_wr_enable_threshold)) { + memory_level->EdcWriteEnable = 1; + } + + if (memory_level->StrobeEnable) { + if (tonga_get_mclk_frequency_ratio(memory_clock, 1) >= + ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC7) >> 16) & 0xf)) { + dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0; + } else { + dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC6) >> 1) & 0x1) ? 1 : 0; + } + + } else { + dll_state_on = data->dll_default_on; + } + } else { + memory_level->StrobeRatio = + tonga_get_ddr3_mclk_frequency_ratio(memory_clock); + dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0; + } + + result = tonga_calculate_mclk_params(hwmgr, + memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on); + + if (!result) { + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MinMvdd); + /* MCLK frequency in units of 10KHz*/ + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkFrequency); + /* Indicates maximum activity level for this performance level.*/ + CONVERT_FROM_HOST_TO_SMC_US(memory_level->ActivityLevel); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_1); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_2); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllAdFuncCntl); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllDqFuncCntl); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkPwrmgtCntl); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->DllCntl); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs1); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs2); + } + + return result; +} + +int tonga_populate_all_memory_levels(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct tonga_smumgr *smu_data = + (struct tonga_smumgr *)(hwmgr->smumgr->backend); + struct smu7_dpm_table *dpm_table = &data->dpm_table; + int result; + + /* populate MCLK dpm table to SMU7 */ + uint32_t level_array_address = + smu_data->dpm_table_start + + offsetof(SMU72_Discrete_DpmTable, MemoryLevel); + uint32_t level_array_size = + sizeof(SMU72_Discrete_MemoryLevel) * + SMU72_MAX_LEVELS_MEMORY; + SMU72_Discrete_MemoryLevel *levels = + smu_data->smc_state_table.MemoryLevel; + uint32_t i; + + memset(levels, 0x00, level_array_size); + + for (i = 0; i < dpm_table->mclk_table.count; i++) { + PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value), + "can not populate memory level as memory clock is zero", + return -EINVAL); + result = tonga_populate_single_memory_level( + hwmgr, + dpm_table->mclk_table.dpm_levels[i].value, + &(smu_data->smc_state_table.MemoryLevel[i])); + if (result) + return result; + } + + /* Only enable level 0 for now.*/ + smu_data->smc_state_table.MemoryLevel[0].EnabledForActivity = 1; + + /* + * in order to prevent MC activity from stutter mode to push DPM up. + * the UVD change complements this by putting the MCLK in a higher state + * by default such that we are not effected by up threshold or and MCLK DPM latency. + */ + smu_data->smc_state_table.MemoryLevel[0].ActivityLevel = 0x1F; + CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.MemoryLevel[0].ActivityLevel); + + smu_data->smc_state_table.MemoryDpmLevelCount = (uint8_t)dpm_table->mclk_table.count; + data->dpm_level_enable_mask.mclk_dpm_enable_mask = phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table); + /* set highest level watermark to high*/ + smu_data->smc_state_table.MemoryLevel[dpm_table->mclk_table.count-1].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH; + + /* level count will send to smc once at init smc table and never change*/ + result = tonga_copy_bytes_to_smc(hwmgr->smumgr, + level_array_address, (uint8_t *)levels, (uint32_t)level_array_size, + SMC_RAM_END); + + + return result; +} + +static int tonga_populate_mvdd_value(struct pp_hwmgr *hwmgr, + uint32_t mclk, SMIO_Pattern *smio_pattern) +{ + const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + uint32_t i = 0; + + if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) { + /* find mvdd value which clock is more than request */ + for (i = 0; i < table_info->vdd_dep_on_mclk->count; i++) { + if (mclk <= table_info->vdd_dep_on_mclk->entries[i].clk) { + /* Always round to higher voltage. */ + smio_pattern->Voltage = + data->mvdd_voltage_table.entries[i].value; + break; + } + } + + PP_ASSERT_WITH_CODE(i < table_info->vdd_dep_on_mclk->count, + "MVDD Voltage is outside the supported range.", + return -EINVAL); + } else { + return -EINVAL; + } + + return 0; +} + + +static int tonga_populate_smc_acpi_level(struct pp_hwmgr *hwmgr, + SMU72_Discrete_DpmTable *table) +{ + int result = 0; + struct tonga_smumgr *smu_data = + (struct tonga_smumgr *)(hwmgr->smumgr->backend); + const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct pp_atomctrl_clock_dividers_vi dividers; + + SMIO_Pattern voltage_level; + uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL; + uint32_t spll_func_cntl_2 = data->clock_registers.vCG_SPLL_FUNC_CNTL_2; + uint32_t dll_cntl = data->clock_registers.vDLL_CNTL; + uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL; + + /* The ACPI state should not do DPM on DC (or ever).*/ + table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC; + + table->ACPILevel.MinVoltage = + smu_data->smc_state_table.GraphicsLevel[0].MinVoltage; + + /* assign zero for now*/ + table->ACPILevel.SclkFrequency = atomctrl_get_reference_clock(hwmgr); + + /* get the engine clock dividers for this clock value*/ + result = atomctrl_get_engine_pll_dividers_vi(hwmgr, + table->ACPILevel.SclkFrequency, ÷rs); + + PP_ASSERT_WITH_CODE(result == 0, + "Error retrieving Engine Clock dividers from VBIOS.", + return result); + + /* divider ID for required SCLK*/ + table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider; + table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; + table->ACPILevel.DeepSleepDivId = 0; + + spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL, + SPLL_PWRON, 0); + spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL, + SPLL_RESET, 1); + spll_func_cntl_2 = PHM_SET_FIELD(spll_func_cntl_2, CG_SPLL_FUNC_CNTL_2, + SCLK_MUX_SEL, 4); + + table->ACPILevel.CgSpllFuncCntl = spll_func_cntl; + table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2; + table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3; + table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4; + table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM; + table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2; + table->ACPILevel.CcPwrDynRm = 0; + table->ACPILevel.CcPwrDynRm1 = 0; + + + /* For various features to be enabled/disabled while this level is active.*/ + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags); + /* SCLK frequency in units of 10KHz*/ + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1); + + /* table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;*/ + table->MemoryACPILevel.MinVoltage = + smu_data->smc_state_table.MemoryLevel[0].MinVoltage; + + /* CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);*/ + + if (0 == tonga_populate_mvdd_value(hwmgr, 0, &voltage_level)) + table->MemoryACPILevel.MinMvdd = + PP_HOST_TO_SMC_UL(voltage_level.Voltage * VOLTAGE_SCALE); + else + table->MemoryACPILevel.MinMvdd = 0; + + /* Force reset on DLL*/ + mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, + MCLK_PWRMGT_CNTL, MRDCK0_RESET, 0x1); + mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, + MCLK_PWRMGT_CNTL, MRDCK1_RESET, 0x1); + + /* Disable DLL in ACPIState*/ + mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, + MCLK_PWRMGT_CNTL, MRDCK0_PDNB, 0); + mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, + MCLK_PWRMGT_CNTL, MRDCK1_PDNB, 0); + + /* Enable DLL bypass signal*/ + dll_cntl = PHM_SET_FIELD(dll_cntl, + DLL_CNTL, MRDCK0_BYPASS, 0); + dll_cntl = PHM_SET_FIELD(dll_cntl, + DLL_CNTL, MRDCK1_BYPASS, 0); + + table->MemoryACPILevel.DllCntl = + PP_HOST_TO_SMC_UL(dll_cntl); + table->MemoryACPILevel.MclkPwrmgtCntl = + PP_HOST_TO_SMC_UL(mclk_pwrmgt_cntl); + table->MemoryACPILevel.MpllAdFuncCntl = + PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_AD_FUNC_CNTL); + table->MemoryACPILevel.MpllDqFuncCntl = + PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_DQ_FUNC_CNTL); + table->MemoryACPILevel.MpllFuncCntl = + PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL); + table->MemoryACPILevel.MpllFuncCntl_1 = + PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_1); + table->MemoryACPILevel.MpllFuncCntl_2 = + PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_2); + table->MemoryACPILevel.MpllSs1 = + PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS1); + table->MemoryACPILevel.MpllSs2 = + PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS2); + + table->MemoryACPILevel.EnabledForThrottle = 0; + table->MemoryACPILevel.EnabledForActivity = 0; + table->MemoryACPILevel.UpHyst = 0; + table->MemoryACPILevel.DownHyst = 100; + table->MemoryACPILevel.VoltageDownHyst = 0; + /* Indicates maximum activity level for this performance level.*/ + table->MemoryACPILevel.ActivityLevel = + PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target); + + table->MemoryACPILevel.StutterEnable = 0; + table->MemoryACPILevel.StrobeEnable = 0; + table->MemoryACPILevel.EdcReadEnable = 0; + table->MemoryACPILevel.EdcWriteEnable = 0; + table->MemoryACPILevel.RttEnable = 0; + + return result; +} + +static int tonga_populate_smc_uvd_level(struct pp_hwmgr *hwmgr, + SMU72_Discrete_DpmTable *table) +{ + int result = 0; + + uint8_t count; + pp_atomctrl_clock_dividers_vi dividers; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *pptable_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = + pptable_info->mm_dep_table; + + table->UvdLevelCount = (uint8_t) (mm_table->count); + table->UvdBootLevel = 0; + + for (count = 0; count < table->UvdLevelCount; count++) { + table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk; + table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk; + table->UvdLevel[count].MinVoltage.Vddc = + phm_get_voltage_index(pptable_info->vddc_lookup_table, + mm_table->entries[count].vddc); + table->UvdLevel[count].MinVoltage.VddGfx = + (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) ? + phm_get_voltage_index(pptable_info->vddgfx_lookup_table, + mm_table->entries[count].vddgfx) : 0; + table->UvdLevel[count].MinVoltage.Vddci = + phm_get_voltage_id(&data->vddci_voltage_table, + mm_table->entries[count].vddc - VDDC_VDDCI_DELTA); + table->UvdLevel[count].MinVoltage.Phases = 1; + + /* retrieve divider value for VBIOS */ + result = atomctrl_get_dfs_pll_dividers_vi( + hwmgr, + table->UvdLevel[count].VclkFrequency, + ÷rs); + + PP_ASSERT_WITH_CODE((0 == result), + "can not find divide id for Vclk clock", + return result); + + table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider; + + result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, + table->UvdLevel[count].DclkFrequency, ÷rs); + PP_ASSERT_WITH_CODE((0 == result), + "can not find divide id for Dclk clock", + return result); + + table->UvdLevel[count].DclkDivider = + (uint8_t)dividers.pll_post_divider; + + CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency); + CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency); + } + + return result; + +} + +static int tonga_populate_smc_vce_level(struct pp_hwmgr *hwmgr, + SMU72_Discrete_DpmTable *table) +{ + int result = 0; + + uint8_t count; + pp_atomctrl_clock_dividers_vi dividers; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *pptable_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = + pptable_info->mm_dep_table; + + table->VceLevelCount = (uint8_t) (mm_table->count); + table->VceBootLevel = 0; + + for (count = 0; count < table->VceLevelCount; count++) { + table->VceLevel[count].Frequency = + mm_table->entries[count].eclk; + table->VceLevel[count].MinVoltage.Vddc = + phm_get_voltage_index(pptable_info->vddc_lookup_table, + mm_table->entries[count].vddc); + table->VceLevel[count].MinVoltage.VddGfx = + (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) ? + phm_get_voltage_index(pptable_info->vddgfx_lookup_table, + mm_table->entries[count].vddgfx) : 0; + table->VceLevel[count].MinVoltage.Vddci = + phm_get_voltage_id(&data->vddci_voltage_table, + mm_table->entries[count].vddc - VDDC_VDDCI_DELTA); + table->VceLevel[count].MinVoltage.Phases = 1; + + /* retrieve divider value for VBIOS */ + result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, + table->VceLevel[count].Frequency, ÷rs); + PP_ASSERT_WITH_CODE((0 == result), + "can not find divide id for VCE engine clock", + return result); + + table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider; + + CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency); + } + + return result; +} + +static int tonga_populate_smc_acp_level(struct pp_hwmgr *hwmgr, + SMU72_Discrete_DpmTable *table) +{ + int result = 0; + uint8_t count; + pp_atomctrl_clock_dividers_vi dividers; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *pptable_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = + pptable_info->mm_dep_table; + + table->AcpLevelCount = (uint8_t) (mm_table->count); + table->AcpBootLevel = 0; + + for (count = 0; count < table->AcpLevelCount; count++) { + table->AcpLevel[count].Frequency = + pptable_info->mm_dep_table->entries[count].aclk; + table->AcpLevel[count].MinVoltage.Vddc = + phm_get_voltage_index(pptable_info->vddc_lookup_table, + mm_table->entries[count].vddc); + table->AcpLevel[count].MinVoltage.VddGfx = + (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) ? + phm_get_voltage_index(pptable_info->vddgfx_lookup_table, + mm_table->entries[count].vddgfx) : 0; + table->AcpLevel[count].MinVoltage.Vddci = + phm_get_voltage_id(&data->vddci_voltage_table, + mm_table->entries[count].vddc - VDDC_VDDCI_DELTA); + table->AcpLevel[count].MinVoltage.Phases = 1; + + /* retrieve divider value for VBIOS */ + result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, + table->AcpLevel[count].Frequency, ÷rs); + PP_ASSERT_WITH_CODE((0 == result), + "can not find divide id for engine clock", return result); + + table->AcpLevel[count].Divider = (uint8_t)dividers.pll_post_divider; + + CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].Frequency); + } + + return result; +} + +static int tonga_populate_smc_samu_level(struct pp_hwmgr *hwmgr, + SMU72_Discrete_DpmTable *table) +{ + int result = 0; + uint8_t count; + pp_atomctrl_clock_dividers_vi dividers; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *pptable_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = + pptable_info->mm_dep_table; + + table->SamuBootLevel = 0; + table->SamuLevelCount = (uint8_t) (mm_table->count); + + for (count = 0; count < table->SamuLevelCount; count++) { + /* not sure whether we need evclk or not */ + table->SamuLevel[count].Frequency = + pptable_info->mm_dep_table->entries[count].samclock; + table->SamuLevel[count].MinVoltage.Vddc = + phm_get_voltage_index(pptable_info->vddc_lookup_table, + mm_table->entries[count].vddc); + table->SamuLevel[count].MinVoltage.VddGfx = + (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) ? + phm_get_voltage_index(pptable_info->vddgfx_lookup_table, + mm_table->entries[count].vddgfx) : 0; + table->SamuLevel[count].MinVoltage.Vddci = + phm_get_voltage_id(&data->vddci_voltage_table, + mm_table->entries[count].vddc - VDDC_VDDCI_DELTA); + table->SamuLevel[count].MinVoltage.Phases = 1; + + /* retrieve divider value for VBIOS */ + result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, + table->SamuLevel[count].Frequency, ÷rs); + PP_ASSERT_WITH_CODE((0 == result), + "can not find divide id for samu clock", return result); + + table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider; + + CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency); + } + + return result; +} + +static int tonga_populate_memory_timing_parameters( + struct pp_hwmgr *hwmgr, + uint32_t engine_clock, + uint32_t memory_clock, + struct SMU72_Discrete_MCArbDramTimingTableEntry *arb_regs + ) +{ + uint32_t dramTiming; + uint32_t dramTiming2; + uint32_t burstTime; + int result; + + result = atomctrl_set_engine_dram_timings_rv770(hwmgr, + engine_clock, memory_clock); + + PP_ASSERT_WITH_CODE(result == 0, + "Error calling VBIOS to set DRAM_TIMING.", return result); + + dramTiming = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING); + dramTiming2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2); + burstTime = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0); + + arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dramTiming); + arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dramTiming2); + arb_regs->McArbBurstTime = (uint8_t)burstTime; + + return 0; +} + +/** + * Setup parameters for the MC ARB. + * + * @param hwmgr the address of the powerplay hardware manager. + * @return always 0 + * This function is to be called from the SetPowerState table. + */ +static int tonga_program_memory_timing_parameters(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct tonga_smumgr *smu_data = + (struct tonga_smumgr *)(hwmgr->smumgr->backend); + int result = 0; + SMU72_Discrete_MCArbDramTimingTable arb_regs; + uint32_t i, j; + + memset(&arb_regs, 0x00, sizeof(SMU72_Discrete_MCArbDramTimingTable)); + + for (i = 0; i < data->dpm_table.sclk_table.count; i++) { + for (j = 0; j < data->dpm_table.mclk_table.count; j++) { + result = tonga_populate_memory_timing_parameters + (hwmgr, data->dpm_table.sclk_table.dpm_levels[i].value, + data->dpm_table.mclk_table.dpm_levels[j].value, + &arb_regs.entries[i][j]); + + if (result) + break; + } + } + + if (0 == result) { + result = tonga_copy_bytes_to_smc( + hwmgr->smumgr, + smu_data->arb_table_start, + (uint8_t *)&arb_regs, + sizeof(SMU72_Discrete_MCArbDramTimingTable), + SMC_RAM_END + ); + } + + return result; +} + +static int tonga_populate_smc_boot_level(struct pp_hwmgr *hwmgr, + SMU72_Discrete_DpmTable *table) +{ + int result = 0; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct tonga_smumgr *smu_data = + (struct tonga_smumgr *)(hwmgr->smumgr->backend); + table->GraphicsBootLevel = 0; + table->MemoryBootLevel = 0; + + /* find boot level from dpm table*/ + result = phm_find_boot_level(&(data->dpm_table.sclk_table), + data->vbios_boot_state.sclk_bootup_value, + (uint32_t *)&(smu_data->smc_state_table.GraphicsBootLevel)); + + if (0 != result) { + smu_data->smc_state_table.GraphicsBootLevel = 0; + printk(KERN_ERR "[powerplay] VBIOS did not find boot engine " + "clock value in dependency table. " + "Using Graphics DPM level 0 !"); + result = 0; + } + + result = phm_find_boot_level(&(data->dpm_table.mclk_table), + data->vbios_boot_state.mclk_bootup_value, + (uint32_t *)&(smu_data->smc_state_table.MemoryBootLevel)); + + if (0 != result) { + smu_data->smc_state_table.MemoryBootLevel = 0; + printk(KERN_ERR "[powerplay] VBIOS did not find boot " + "engine clock value in dependency table." + "Using Memory DPM level 0 !"); + result = 0; + } + + table->BootVoltage.Vddc = + phm_get_voltage_id(&(data->vddc_voltage_table), + data->vbios_boot_state.vddc_bootup_value); + table->BootVoltage.VddGfx = + phm_get_voltage_id(&(data->vddgfx_voltage_table), + data->vbios_boot_state.vddgfx_bootup_value); + table->BootVoltage.Vddci = + phm_get_voltage_id(&(data->vddci_voltage_table), + data->vbios_boot_state.vddci_bootup_value); + table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value; + + CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd); + + return result; +} + + +static int tonga_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr) +{ + uint32_t ro, efuse, efuse2, clock_freq, volt_without_cks, + volt_with_cks, value; + uint16_t clock_freq_u16; + struct tonga_smumgr *smu_data = + (struct tonga_smumgr *)(hwmgr->smumgr->backend); + uint8_t type, i, j, cks_setting, stretch_amount, stretch_amount2, + volt_offset = 0; + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = + table_info->vdd_dep_on_sclk; + uint32_t hw_revision, dev_id; + struct cgs_system_info sys_info = {0}; + + stretch_amount = (uint8_t)table_info->cac_dtp_table->usClockStretchAmount; + + sys_info.size = sizeof(struct cgs_system_info); + + sys_info.info_id = CGS_SYSTEM_INFO_PCIE_REV; + cgs_query_system_info(hwmgr->device, &sys_info); + hw_revision = (uint32_t)sys_info.value; + + sys_info.info_id = CGS_SYSTEM_INFO_PCIE_DEV; + cgs_query_system_info(hwmgr->device, &sys_info); + dev_id = (uint32_t)sys_info.value; + + /* Read SMU_Eefuse to read and calculate RO and determine + * if the part is SS or FF. if RO >= 1660MHz, part is FF. + */ + efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixSMU_EFUSE_0 + (146 * 4)); + efuse2 = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixSMU_EFUSE_0 + (148 * 4)); + efuse &= 0xFF000000; + efuse = efuse >> 24; + efuse2 &= 0xF; + + if (efuse2 == 1) + ro = (2300 - 1350) * efuse / 255 + 1350; + else + ro = (2500 - 1000) * efuse / 255 + 1000; + + if (ro >= 1660) + type = 0; + else + type = 1; + + /* Populate Stretch amount */ + smu_data->smc_state_table.ClockStretcherAmount = stretch_amount; + + /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */ + for (i = 0; i < sclk_table->count; i++) { + smu_data->smc_state_table.Sclk_CKS_masterEn0_7 |= + sclk_table->entries[i].cks_enable << i; + if (ASICID_IS_TONGA_P(dev_id, hw_revision)) { + volt_without_cks = (uint32_t)((7732 + 60 - ro - 20838 * + (sclk_table->entries[i].clk/100) / 10000) * 1000 / + (8730 - (5301 * (sclk_table->entries[i].clk/100) / 1000))); + volt_with_cks = (uint32_t)((5250 + 51 - ro - 2404 * + (sclk_table->entries[i].clk/100) / 100000) * 1000 / + (6146 - (3193 * (sclk_table->entries[i].clk/100) / 1000))); + } else { + volt_without_cks = (uint32_t)((14041 * + (sclk_table->entries[i].clk/100) / 10000 + 3571 + 75 - ro) * 1000 / + (4026 - (13924 * (sclk_table->entries[i].clk/100) / 10000))); + volt_with_cks = (uint32_t)((13946 * + (sclk_table->entries[i].clk/100) / 10000 + 3320 + 45 - ro) * 1000 / + (3664 - (11454 * (sclk_table->entries[i].clk/100) / 10000))); + } + if (volt_without_cks >= volt_with_cks) + volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks + + sclk_table->entries[i].cks_voffset) * 100 / 625) + 1); + smu_data->smc_state_table.Sclk_voltageOffset[i] = volt_offset; + } + + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE, + STRETCH_ENABLE, 0x0); + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE, + masterReset, 0x1); + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE, + staticEnable, 0x1); + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE, + masterReset, 0x0); + + /* Populate CKS Lookup Table */ + if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5) + stretch_amount2 = 0; + else if (stretch_amount == 3 || stretch_amount == 4) + stretch_amount2 = 1; + else { + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ClockStretcher); + PP_ASSERT_WITH_CODE(false, + "Stretch Amount in PPTable not supported\n", + return -EINVAL); + } + + value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixPWR_CKS_CNTL); + value &= 0xFFC2FF87; + smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].minFreq = + tonga_clock_stretcher_lookup_table[stretch_amount2][0]; + smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].maxFreq = + tonga_clock_stretcher_lookup_table[stretch_amount2][1]; + clock_freq_u16 = (uint16_t)(PP_SMC_TO_HOST_UL(smu_data->smc_state_table. + GraphicsLevel[smu_data->smc_state_table.GraphicsDpmLevelCount - 1]. + SclkFrequency) / 100); + if (tonga_clock_stretcher_lookup_table[stretch_amount2][0] < + clock_freq_u16 && + tonga_clock_stretcher_lookup_table[stretch_amount2][1] > + clock_freq_u16) { + /* Program PWR_CKS_CNTL. CKS_USE_FOR_LOW_FREQ */ + value |= (tonga_clock_stretcher_lookup_table[stretch_amount2][3]) << 16; + /* Program PWR_CKS_CNTL. CKS_LDO_REFSEL */ + value |= (tonga_clock_stretcher_lookup_table[stretch_amount2][2]) << 18; + /* Program PWR_CKS_CNTL. CKS_STRETCH_AMOUNT */ + value |= (tonga_clock_stretch_amount_conversion + [tonga_clock_stretcher_lookup_table[stretch_amount2][3]] + [stretch_amount]) << 3; + } + CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.CKS_LOOKUPTable. + CKS_LOOKUPTableEntry[0].minFreq); + CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.CKS_LOOKUPTable. + CKS_LOOKUPTableEntry[0].maxFreq); + smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting = + tonga_clock_stretcher_lookup_table[stretch_amount2][2] & 0x7F; + smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting |= + (tonga_clock_stretcher_lookup_table[stretch_amount2][3]) << 7; + + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixPWR_CKS_CNTL, value); + + /* Populate DDT Lookup Table */ + for (i = 0; i < 4; i++) { + /* Assign the minimum and maximum VID stored + * in the last row of Clock Stretcher Voltage Table. + */ + smu_data->smc_state_table.ClockStretcherDataTable. + ClockStretcherDataTableEntry[i].minVID = + (uint8_t) tonga_clock_stretcher_ddt_table[type][i][2]; + smu_data->smc_state_table.ClockStretcherDataTable. + ClockStretcherDataTableEntry[i].maxVID = + (uint8_t) tonga_clock_stretcher_ddt_table[type][i][3]; + /* Loop through each SCLK and check the frequency + * to see if it lies within the frequency for clock stretcher. + */ + for (j = 0; j < smu_data->smc_state_table.GraphicsDpmLevelCount; j++) { + cks_setting = 0; + clock_freq = PP_SMC_TO_HOST_UL( + smu_data->smc_state_table.GraphicsLevel[j].SclkFrequency); + /* Check the allowed frequency against the sclk level[j]. + * Sclk's endianness has already been converted, + * and it's in 10Khz unit, + * as opposed to Data table, which is in Mhz unit. + */ + if (clock_freq >= tonga_clock_stretcher_ddt_table[type][i][0] * 100) { + cks_setting |= 0x2; + if (clock_freq < tonga_clock_stretcher_ddt_table[type][i][1] * 100) + cks_setting |= 0x1; + } + smu_data->smc_state_table.ClockStretcherDataTable. + ClockStretcherDataTableEntry[i].setting |= cks_setting << (j * 2); + } + CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table. + ClockStretcherDataTable. + ClockStretcherDataTableEntry[i].setting); + } + + value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixPWR_CKS_CNTL); + value &= 0xFFFFFFFE; + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixPWR_CKS_CNTL, value); + + return 0; +} + +/** + * Populates the SMC VRConfig field in DPM table. + * + * @param hwmgr the address of the hardware manager + * @param table the SMC DPM table structure to be populated + * @return always 0 + */ +static int tonga_populate_vr_config(struct pp_hwmgr *hwmgr, + SMU72_Discrete_DpmTable *table) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + uint16_t config; + + if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) { + /* Splitted mode */ + config = VR_SVI2_PLANE_1; + table->VRConfig |= (config<voltage_control) { + config = VR_SVI2_PLANE_2; + table->VRConfig |= config; + } else { + printk(KERN_ERR "[ powerplay ] VDDC and VDDGFX should " + "be both on SVI2 control in splitted mode !\n"); + } + } else { + /* Merged mode */ + config = VR_MERGED_WITH_VDDC; + table->VRConfig |= (config<voltage_control) { + config = VR_SVI2_PLANE_1; + table->VRConfig |= config; + } else { + printk(KERN_ERR "[ powerplay ] VDDC should be on " + "SVI2 control in merged mode !\n"); + } + } + + /* Set Vddci Voltage Controller */ + if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) { + config = VR_SVI2_PLANE_2; /* only in merged mode */ + table->VRConfig |= (config<vddci_control) { + config = VR_SMIO_PATTERN_1; + table->VRConfig |= (config<mvdd_control) { + config = VR_SMIO_PATTERN_2; + table->VRConfig |= (config<backend); + uint32_t tmp; + int result; + + /* + * This is a read-modify-write on the first byte of the ARB table. + * The first byte in the SMU72_Discrete_MCArbDramTimingTable structure + * is the field 'current'. + * This solution is ugly, but we never write the whole table only + * individual fields in it. + * In reality this field should not be in that structure + * but in a soft register. + */ + result = tonga_read_smc_sram_dword(smumgr, + smu_data->arb_table_start, &tmp, SMC_RAM_END); + + if (0 != result) + return result; + + tmp &= 0x00FFFFFF; + tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24; + + return tonga_write_smc_sram_dword(smumgr, + smu_data->arb_table_start, tmp, SMC_RAM_END); +} + + +static int tonga_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr) +{ + struct tonga_smumgr *smu_data = + (struct tonga_smumgr *)(hwmgr->smumgr->backend); + struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults; + SMU72_Discrete_DpmTable *dpm_table = &(smu_data->smc_state_table); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table; + int i, j, k; + uint16_t *pdef1; + uint16_t *pdef2; + + dpm_table->DefaultTdp = PP_HOST_TO_SMC_US( + (uint16_t)(cac_dtp_table->usTDP * 256)); + dpm_table->TargetTdp = PP_HOST_TO_SMC_US( + (uint16_t)(cac_dtp_table->usConfigurableTDP * 256)); + + PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255, + "Target Operating Temp is out of Range !", + ); + + dpm_table->GpuTjMax = (uint8_t)(cac_dtp_table->usTargetOperatingTemp); + dpm_table->GpuTjHyst = 8; + + dpm_table->DTEAmbientTempBase = defaults->dte_ambient_temp_base; + + dpm_table->BAPM_TEMP_GRADIENT = + PP_HOST_TO_SMC_UL(defaults->bamp_temp_gradient); + pdef1 = defaults->bapmti_r; + pdef2 = defaults->bapmti_rc; + + for (i = 0; i < SMU72_DTE_ITERATIONS; i++) { + for (j = 0; j < SMU72_DTE_SOURCES; j++) { + for (k = 0; k < SMU72_DTE_SINKS; k++) { + dpm_table->BAPMTI_R[i][j][k] = + PP_HOST_TO_SMC_US(*pdef1); + dpm_table->BAPMTI_RC[i][j][k] = + PP_HOST_TO_SMC_US(*pdef2); + pdef1++; + pdef2++; + } + } + } + + return 0; +} + +static int tonga_populate_svi_load_line(struct pp_hwmgr *hwmgr) +{ + struct tonga_smumgr *smu_data = + (struct tonga_smumgr *)(hwmgr->smumgr->backend); + struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults; + + smu_data->power_tune_table.SviLoadLineEn = defaults->svi_load_line_en; + smu_data->power_tune_table.SviLoadLineVddC = defaults->svi_load_line_vddC; + smu_data->power_tune_table.SviLoadLineTrimVddC = 3; + smu_data->power_tune_table.SviLoadLineOffsetVddC = 0; + + return 0; +} + +static int tonga_populate_tdc_limit(struct pp_hwmgr *hwmgr) +{ + uint16_t tdc_limit; + struct tonga_smumgr *smu_data = + (struct tonga_smumgr *)(hwmgr->smumgr->backend); + struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults; + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + + /* TDC number of fraction bits are changed from 8 to 7 + * for Fiji as requested by SMC team + */ + tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 256); + smu_data->power_tune_table.TDC_VDDC_PkgLimit = + CONVERT_FROM_HOST_TO_SMC_US(tdc_limit); + smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc = + defaults->tdc_vddc_throttle_release_limit_perc; + smu_data->power_tune_table.TDC_MAWt = defaults->tdc_mawt; + + return 0; +} + +static int tonga_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset) +{ + struct tonga_smumgr *smu_data = + (struct tonga_smumgr *)(hwmgr->smumgr->backend); + struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults; + uint32_t temp; + + if (tonga_read_smc_sram_dword(hwmgr->smumgr, + fuse_table_offset + + offsetof(SMU72_Discrete_PmFuses, TdcWaterfallCtl), + (uint32_t *)&temp, SMC_RAM_END)) + PP_ASSERT_WITH_CODE(false, + "Attempt to read PmFuses.DW6 " + "(SviLoadLineEn) from SMC Failed !", + return -EINVAL); + else + smu_data->power_tune_table.TdcWaterfallCtl = defaults->tdc_waterfall_ctl; + + return 0; +} + +static int tonga_populate_temperature_scaler(struct pp_hwmgr *hwmgr) +{ + int i; + struct tonga_smumgr *smu_data = + (struct tonga_smumgr *)(hwmgr->smumgr->backend); + + /* Currently not used. Set all to zero. */ + for (i = 0; i < 16; i++) + smu_data->power_tune_table.LPMLTemperatureScaler[i] = 0; + + return 0; +} + +static int tonga_populate_fuzzy_fan(struct pp_hwmgr *hwmgr) +{ + struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend); + + if ((hwmgr->thermal_controller.advanceFanControlParameters. + usFanOutputSensitivity & (1 << 15)) || + (hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity == 0)) + hwmgr->thermal_controller.advanceFanControlParameters. + usFanOutputSensitivity = hwmgr->thermal_controller. + advanceFanControlParameters.usDefaultFanOutputSensitivity; + + smu_data->power_tune_table.FuzzyFan_PwmSetDelta = + PP_HOST_TO_SMC_US(hwmgr->thermal_controller. + advanceFanControlParameters.usFanOutputSensitivity); + return 0; +} + +static int tonga_populate_gnb_lpml(struct pp_hwmgr *hwmgr) +{ + int i; + struct tonga_smumgr *smu_data = + (struct tonga_smumgr *)(hwmgr->smumgr->backend); + + /* Currently not used. Set all to zero. */ + for (i = 0; i < 16; i++) + smu_data->power_tune_table.GnbLPML[i] = 0; + + return 0; +} + +static int tonga_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr) +{ + return 0; +} + +static int tonga_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr) +{ + struct tonga_smumgr *smu_data = + (struct tonga_smumgr *)(hwmgr->smumgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + uint16_t hi_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd; + uint16_t lo_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd; + struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table; + + hi_sidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256); + lo_sidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256); + + smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd = + CONVERT_FROM_HOST_TO_SMC_US(hi_sidd); + smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd = + CONVERT_FROM_HOST_TO_SMC_US(lo_sidd); + + return 0; +} + +static int tonga_populate_pm_fuses(struct pp_hwmgr *hwmgr) +{ + struct tonga_smumgr *smu_data = + (struct tonga_smumgr *)(hwmgr->smumgr->backend); + uint32_t pm_fuse_table_offset; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment)) { + if (tonga_read_smc_sram_dword(hwmgr->smumgr, + SMU72_FIRMWARE_HEADER_LOCATION + + offsetof(SMU72_Firmware_Header, PmFuseTable), + &pm_fuse_table_offset, SMC_RAM_END)) + PP_ASSERT_WITH_CODE(false, + "Attempt to get pm_fuse_table_offset Failed !", + return -EINVAL); + + /* DW6 */ + if (tonga_populate_svi_load_line(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate SviLoadLine Failed !", + return -EINVAL); + /* DW7 */ + if (tonga_populate_tdc_limit(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate TDCLimit Failed !", + return -EINVAL); + /* DW8 */ + if (tonga_populate_dw8(hwmgr, pm_fuse_table_offset)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate TdcWaterfallCtl Failed !", + return -EINVAL); + + /* DW9-DW12 */ + if (tonga_populate_temperature_scaler(hwmgr) != 0) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate LPMLTemperatureScaler Failed !", + return -EINVAL); + + /* DW13-DW14 */ + if (tonga_populate_fuzzy_fan(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate Fuzzy Fan " + "Control parameters Failed !", + return -EINVAL); + + /* DW15-DW18 */ + if (tonga_populate_gnb_lpml(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate GnbLPML Failed !", + return -EINVAL); + + /* DW19 */ + if (tonga_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate GnbLPML " + "Min and Max Vid Failed !", + return -EINVAL); + + /* DW20 */ + if (tonga_populate_bapm_vddc_base_leakage_sidd(hwmgr)) + PP_ASSERT_WITH_CODE( + false, + "Attempt to populate BapmVddCBaseLeakage " + "Hi and Lo Sidd Failed !", + return -EINVAL); + + if (tonga_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset, + (uint8_t *)&smu_data->power_tune_table, + sizeof(struct SMU72_Discrete_PmFuses), SMC_RAM_END)) + PP_ASSERT_WITH_CODE(false, + "Attempt to download PmFuseTable Failed !", + return -EINVAL); + } + return 0; +} + +static int tonga_populate_mc_reg_address(struct pp_smumgr *smumgr, + SMU72_Discrete_MCRegisters *mc_reg_table) +{ + const struct tonga_smumgr *smu_data = (struct tonga_smumgr *)smumgr->backend; + + uint32_t i, j; + + for (i = 0, j = 0; j < smu_data->mc_reg_table.last; j++) { + if (smu_data->mc_reg_table.validflag & 1<address[] array " + "out of boundary", + return -EINVAL); + mc_reg_table->address[i].s0 = + PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s0); + mc_reg_table->address[i].s1 = + PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s1); + i++; + } + } + + mc_reg_table->last = (uint8_t)i; + + return 0; +} + +/*convert register values from driver to SMC format */ +static void tonga_convert_mc_registers( + const struct tonga_mc_reg_entry *entry, + SMU72_Discrete_MCRegisterSet *data, + uint32_t num_entries, uint32_t valid_flag) +{ + uint32_t i, j; + + for (i = 0, j = 0; j < num_entries; j++) { + if (valid_flag & 1<value[i] = PP_HOST_TO_SMC_UL(entry->mc_data[j]); + i++; + } + } +} + +static int tonga_convert_mc_reg_table_entry_to_smc( + struct pp_smumgr *smumgr, + const uint32_t memory_clock, + SMU72_Discrete_MCRegisterSet *mc_reg_table_data + ) +{ + struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(smumgr->backend); + uint32_t i = 0; + + for (i = 0; i < smu_data->mc_reg_table.num_entries; i++) { + if (memory_clock <= + smu_data->mc_reg_table.mc_reg_table_entry[i].mclk_max) { + break; + } + } + + if ((i == smu_data->mc_reg_table.num_entries) && (i > 0)) + --i; + + tonga_convert_mc_registers(&smu_data->mc_reg_table.mc_reg_table_entry[i], + mc_reg_table_data, smu_data->mc_reg_table.last, + smu_data->mc_reg_table.validflag); + + return 0; +} + +static int tonga_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr, + SMU72_Discrete_MCRegisters *mc_regs) +{ + int result = 0; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + int res; + uint32_t i; + + for (i = 0; i < data->dpm_table.mclk_table.count; i++) { + res = tonga_convert_mc_reg_table_entry_to_smc( + hwmgr->smumgr, + data->dpm_table.mclk_table.dpm_levels[i].value, + &mc_regs->data[i] + ); + + if (0 != res) + result = res; + } + + return result; +} + +static int tonga_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr) +{ + struct pp_smumgr *smumgr = hwmgr->smumgr; + struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(smumgr->backend); + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + uint32_t address; + int32_t result; + + if (0 == (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) + return 0; + + + memset(&smu_data->mc_regs, 0, sizeof(SMU72_Discrete_MCRegisters)); + + result = tonga_convert_mc_reg_table_to_smc(hwmgr, &(smu_data->mc_regs)); + + if (result != 0) + return result; + + + address = smu_data->mc_reg_table_start + + (uint32_t)offsetof(SMU72_Discrete_MCRegisters, data[0]); + + return tonga_copy_bytes_to_smc( + hwmgr->smumgr, address, + (uint8_t *)&smu_data->mc_regs.data[0], + sizeof(SMU72_Discrete_MCRegisterSet) * + data->dpm_table.mclk_table.count, + SMC_RAM_END); +} + +static int tonga_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr) +{ + int result; + struct pp_smumgr *smumgr = hwmgr->smumgr; + struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(smumgr->backend); + + memset(&smu_data->mc_regs, 0x00, sizeof(SMU72_Discrete_MCRegisters)); + result = tonga_populate_mc_reg_address(smumgr, &(smu_data->mc_regs)); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize MCRegTable for the MC register addresses !", + return result;); + + result = tonga_convert_mc_reg_table_to_smc(hwmgr, &smu_data->mc_regs); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize MCRegTable for driver state !", + return result;); + + return tonga_copy_bytes_to_smc(smumgr, smu_data->mc_reg_table_start, + (uint8_t *)&smu_data->mc_regs, sizeof(SMU72_Discrete_MCRegisters), SMC_RAM_END); +} + +static void tonga_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr) +{ + struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + + if (table_info && + table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX && + table_info->cac_dtp_table->usPowerTuneDataSetID) + smu_data->power_tune_defaults = + &tonga_power_tune_data_set_array + [table_info->cac_dtp_table->usPowerTuneDataSetID - 1]; + else + smu_data->power_tune_defaults = &tonga_power_tune_data_set_array[0]; +} + +/** + * Initializes the SMC table and uploads it + * + * @param hwmgr the address of the powerplay hardware manager. + * @param pInput the pointer to input data (PowerState) + * @return always 0 + */ +int tonga_init_smc_table(struct pp_hwmgr *hwmgr) +{ + int result; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct tonga_smumgr *smu_data = + (struct tonga_smumgr *)(hwmgr->smumgr->backend); + SMU72_Discrete_DpmTable *table = &(smu_data->smc_state_table); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + + uint8_t i; + pp_atomctrl_gpio_pin_assignment gpio_pin_assignment; + + + memset(&(smu_data->smc_state_table), 0x00, sizeof(smu_data->smc_state_table)); + + tonga_initialize_power_tune_defaults(hwmgr); + + if (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control) + tonga_populate_smc_voltage_tables(hwmgr, table); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_AutomaticDCTransition)) + table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC; + + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_StepVddc)) + table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC; + + if (data->is_memory_gddr5) + table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5; + + i = PHM_READ_FIELD(hwmgr->device, CC_MC_MAX_CHANNEL, NOOFCHAN); + + if (i == 1 || i == 0) + table->SystemFlags |= 0x40; + + if (data->ulv_supported && table_info->us_ulv_voltage_offset) { + result = tonga_populate_ulv_state(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize ULV state !", + return result;); + + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_ULV_PARAMETER, 0x40035); + } + + result = tonga_populate_smc_link_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize Link Level !", return result); + + result = tonga_populate_all_graphic_levels(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize Graphics Level !", return result); + + result = tonga_populate_all_memory_levels(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize Memory Level !", return result); + + result = tonga_populate_smc_acpi_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize ACPI Level !", return result); + + result = tonga_populate_smc_vce_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize VCE Level !", return result); + + result = tonga_populate_smc_acp_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize ACP Level !", return result); + + result = tonga_populate_smc_samu_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize SAMU Level !", return result); + + /* Since only the initial state is completely set up at this + * point (the other states are just copies of the boot state) we only + * need to populate the ARB settings for the initial state. + */ + result = tonga_program_memory_timing_parameters(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to Write ARB settings for the initial state.", + return result;); + + result = tonga_populate_smc_uvd_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize UVD Level !", return result); + + result = tonga_populate_smc_boot_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize Boot Level !", return result); + + tonga_populate_bapm_parameters_in_dpm_table(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to populate BAPM Parameters !", return result); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ClockStretcher)) { + result = tonga_populate_clock_stretcher_data_table(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to populate Clock Stretcher Data Table !", + return result;); + } + table->GraphicsVoltageChangeEnable = 1; + table->GraphicsThermThrottleEnable = 1; + table->GraphicsInterval = 1; + table->VoltageInterval = 1; + table->ThermalInterval = 1; + table->TemperatureLimitHigh = + table_info->cac_dtp_table->usTargetOperatingTemp * + SMU7_Q88_FORMAT_CONVERSION_UNIT; + table->TemperatureLimitLow = + (table_info->cac_dtp_table->usTargetOperatingTemp - 1) * + SMU7_Q88_FORMAT_CONVERSION_UNIT; + table->MemoryVoltageChangeEnable = 1; + table->MemoryInterval = 1; + table->VoltageResponseTime = 0; + table->PhaseResponseTime = 0; + table->MemoryThermThrottleEnable = 1; + + /* + * Cail reads current link status and reports it as cap (we cannot + * change this due to some previous issues we had) + * SMC drops the link status to lowest level after enabling + * DPM by PowerPlay. After pnp or toggling CF, driver gets reloaded again + * but this time Cail reads current link status which was set to low by + * SMC and reports it as cap to powerplay + * To avoid it, we set PCIeBootLinkLevel to highest dpm level + */ + PP_ASSERT_WITH_CODE((1 <= data->dpm_table.pcie_speed_table.count), + "There must be 1 or more PCIE levels defined in PPTable.", + return -EINVAL); + + table->PCIeBootLinkLevel = (uint8_t) (data->dpm_table.pcie_speed_table.count); + + table->PCIeGenInterval = 1; + + result = tonga_populate_vr_config(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to populate VRConfig setting !", return result); + + table->ThermGpio = 17; + table->SclkStepSize = 0x4000; + + if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID, + &gpio_pin_assignment)) { + table->VRHotGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift; + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_RegulatorHot); + } else { + table->VRHotGpio = SMU7_UNUSED_GPIO_PIN; + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_RegulatorHot); + } + + if (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID, + &gpio_pin_assignment)) { + table->AcDcGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift; + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_AutomaticDCTransition); + } else { + table->AcDcGpio = SMU7_UNUSED_GPIO_PIN; + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_AutomaticDCTransition); + } + + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_Falcon_QuickTransition); + + if (0) { + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_AutomaticDCTransition); + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_Falcon_QuickTransition); + } + + if (atomctrl_get_pp_assign_pin(hwmgr, + THERMAL_INT_OUTPUT_GPIO_PINID, &gpio_pin_assignment)) { + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ThermalOutGPIO); + + table->ThermOutGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift; + + table->ThermOutPolarity = + (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A) & + (1 << gpio_pin_assignment.uc_gpio_pin_bit_shift))) ? 1 : 0; + + table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY; + + /* if required, combine VRHot/PCC with thermal out GPIO*/ + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_RegulatorHot) && + phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_CombinePCCWithThermalSignal)){ + table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT; + } + } else { + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ThermalOutGPIO); + + table->ThermOutGpio = 17; + table->ThermOutPolarity = 1; + table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE; + } + + for (i = 0; i < SMU72_MAX_ENTRIES_SMIO; i++) + table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]); + + CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags); + CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig); + CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1); + CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2); + CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize); + CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh); + CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow); + CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime); + CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime); + + /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */ + result = tonga_copy_bytes_to_smc( + hwmgr->smumgr, + smu_data->dpm_table_start + offsetof(SMU72_Discrete_DpmTable, SystemFlags), + (uint8_t *)&(table->SystemFlags), + sizeof(SMU72_Discrete_DpmTable) - 3 * sizeof(SMU72_PIDController), + SMC_RAM_END); + + PP_ASSERT_WITH_CODE(0 == result, + "Failed to upload dpm data to SMC memory !", return result;); + + result = tonga_init_arb_table_index(hwmgr->smumgr); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to upload arb data to SMC memory !", return result); + + tonga_populate_pm_fuses(hwmgr); + PP_ASSERT_WITH_CODE((0 == result), + "Failed to populate initialize pm fuses !", return result); + + result = tonga_populate_initial_mc_reg_table(hwmgr); + PP_ASSERT_WITH_CODE((0 == result), + "Failed to populate initialize MC Reg table !", return result); + + return 0; +} + +/** +* Set up the fan table to control the fan using the SMC. +* @param hwmgr the address of the powerplay hardware manager. +* @param pInput the pointer to input data +* @param pOutput the pointer to output data +* @param pStorage the pointer to temporary storage +* @param Result the last failure code +* @return result from set temperature range routine +*/ +int tonga_thermal_setup_fan_table(struct pp_hwmgr *hwmgr) +{ + struct tonga_smumgr *smu_data = + (struct tonga_smumgr *)(hwmgr->smumgr->backend); + SMU72_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE }; + uint32_t duty100; + uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2; + uint16_t fdo_min, slope1, slope2; + uint32_t reference_clock; + int res; + uint64_t tmp64; + + if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_MicrocodeFanControl)) + return 0; + + if (0 == smu_data->fan_table_start) { + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_MicrocodeFanControl); + return 0; + } + + duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, + CGS_IND_REG__SMC, + CG_FDO_CTRL1, FMAX_DUTY100); + + if (0 == duty100) { + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_MicrocodeFanControl); + return 0; + } + + tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin * duty100; + do_div(tmp64, 10000); + fdo_min = (uint16_t)tmp64; + + t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed - + hwmgr->thermal_controller.advanceFanControlParameters.usTMin; + t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh - + hwmgr->thermal_controller.advanceFanControlParameters.usTMed; + + pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed - + hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin; + pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh - + hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed; + + slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100); + slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100); + + fan_table.TempMin = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMin) / 100); + fan_table.TempMed = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMed) / 100); + fan_table.TempMax = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMax) / 100); + + fan_table.Slope1 = cpu_to_be16(slope1); + fan_table.Slope2 = cpu_to_be16(slope2); + + fan_table.FdoMin = cpu_to_be16(fdo_min); + + fan_table.HystDown = cpu_to_be16(hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst); + + fan_table.HystUp = cpu_to_be16(1); + + fan_table.HystSlope = cpu_to_be16(1); + + fan_table.TempRespLim = cpu_to_be16(5); + + reference_clock = smu7_get_xclk(hwmgr); + + fan_table.RefreshPeriod = cpu_to_be32((hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600); + + fan_table.FdoMax = cpu_to_be16((uint16_t)duty100); + + fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_CTRL, TEMP_SEL); + + fan_table.FanControl_GL_Flag = 1; + + res = tonga_copy_bytes_to_smc(hwmgr->smumgr, smu_data->fan_table_start, (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), SMC_RAM_END); +/* TO DO FOR SOME DEVICE ID 0X692b, send this msg return invalid command. + if (res == 0 && hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit != 0) + res = (0 == smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetFanMinPwm, \ + hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit) ? 0 : -1); + + if (res == 0 && hwmgr->thermal_controller.advanceFanControlParameters.ulMinFanSCLKAcousticLimit != 0) + res = (0 == smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetFanSclkTarget, \ + hwmgr->thermal_controller.advanceFanControlParameters.ulMinFanSCLKAcousticLimit) ? 0 : -1); + + if (0 != res) + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl); +*/ + return 0; +} + + +static int tonga_program_mem_timing_parameters(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + if (data->need_update_smu7_dpm_table & + (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK)) + return tonga_program_memory_timing_parameters(hwmgr); + + return 0; +} + +int tonga_update_sclk_threshold(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct tonga_smumgr *smu_data = + (struct tonga_smumgr *)(hwmgr->smumgr->backend); + + int result = 0; + uint32_t low_sclk_interrupt_threshold = 0; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SclkThrottleLowNotification) + && (hwmgr->gfx_arbiter.sclk_threshold != + data->low_sclk_interrupt_threshold)) { + data->low_sclk_interrupt_threshold = + hwmgr->gfx_arbiter.sclk_threshold; + low_sclk_interrupt_threshold = + data->low_sclk_interrupt_threshold; + + CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold); + + result = tonga_copy_bytes_to_smc( + hwmgr->smumgr, + smu_data->dpm_table_start + + offsetof(SMU72_Discrete_DpmTable, + LowSclkInterruptThreshold), + (uint8_t *)&low_sclk_interrupt_threshold, + sizeof(uint32_t), + SMC_RAM_END); + } + + result = tonga_update_and_upload_mc_reg_table(hwmgr); + + PP_ASSERT_WITH_CODE((0 == result), + "Failed to upload MC reg table !", + return result); + + result = tonga_program_mem_timing_parameters(hwmgr); + PP_ASSERT_WITH_CODE((result == 0), + "Failed to program memory timing parameters !", + ); + + return result; +} + +uint32_t tonga_get_offsetof(uint32_t type, uint32_t member) +{ + switch (type) { + case SMU_SoftRegisters: + switch (member) { + case HandshakeDisables: + return offsetof(SMU72_SoftRegisters, HandshakeDisables); + case VoltageChangeTimeout: + return offsetof(SMU72_SoftRegisters, VoltageChangeTimeout); + case AverageGraphicsActivity: + return offsetof(SMU72_SoftRegisters, AverageGraphicsActivity); + case PreVBlankGap: + return offsetof(SMU72_SoftRegisters, PreVBlankGap); + case VBlankTimeout: + return offsetof(SMU72_SoftRegisters, VBlankTimeout); + } + case SMU_Discrete_DpmTable: + switch (member) { + case UvdBootLevel: + return offsetof(SMU72_Discrete_DpmTable, UvdBootLevel); + case VceBootLevel: + return offsetof(SMU72_Discrete_DpmTable, VceBootLevel); + case SamuBootLevel: + return offsetof(SMU72_Discrete_DpmTable, SamuBootLevel); + case LowSclkInterruptThreshold: + return offsetof(SMU72_Discrete_DpmTable, LowSclkInterruptThreshold); + } + } + printk("cant't get the offset of type %x member %x\n", type, member); + return 0; +} + +uint32_t tonga_get_mac_definition(uint32_t value) +{ + switch (value) { + case SMU_MAX_LEVELS_GRAPHICS: + return SMU72_MAX_LEVELS_GRAPHICS; + case SMU_MAX_LEVELS_MEMORY: + return SMU72_MAX_LEVELS_MEMORY; + case SMU_MAX_LEVELS_LINK: + return SMU72_MAX_LEVELS_LINK; + case SMU_MAX_ENTRIES_SMIO: + return SMU72_MAX_ENTRIES_SMIO; + case SMU_MAX_LEVELS_VDDC: + return SMU72_MAX_LEVELS_VDDC; + case SMU_MAX_LEVELS_VDDGFX: + return SMU72_MAX_LEVELS_VDDGFX; + case SMU_MAX_LEVELS_VDDCI: + return SMU72_MAX_LEVELS_VDDCI; + case SMU_MAX_LEVELS_MVDD: + return SMU72_MAX_LEVELS_MVDD; + } + printk("cant't get the mac value %x\n", value); + + return 0; +} + + +static int tonga_update_uvd_smc_table(struct pp_hwmgr *hwmgr) +{ + struct tonga_smumgr *smu_data = + (struct tonga_smumgr *)(hwmgr->smumgr->backend); + uint32_t mm_boot_level_offset, mm_boot_level_value; + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + + smu_data->smc_state_table.UvdBootLevel = 0; + if (table_info->mm_dep_table->count > 0) + smu_data->smc_state_table.UvdBootLevel = + (uint8_t) (table_info->mm_dep_table->count - 1); + mm_boot_level_offset = smu_data->dpm_table_start + + offsetof(SMU72_Discrete_DpmTable, UvdBootLevel); + mm_boot_level_offset /= 4; + mm_boot_level_offset *= 4; + mm_boot_level_value = cgs_read_ind_register(hwmgr->device, + CGS_IND_REG__SMC, mm_boot_level_offset); + mm_boot_level_value &= 0x00FFFFFF; + mm_boot_level_value |= smu_data->smc_state_table.UvdBootLevel << 24; + cgs_write_ind_register(hwmgr->device, + CGS_IND_REG__SMC, + mm_boot_level_offset, mm_boot_level_value); + + if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_UVDDPM) || + phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_StablePState)) + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_UVDDPM_SetEnabledMask, + (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel)); + return 0; +} + +static int tonga_update_vce_smc_table(struct pp_hwmgr *hwmgr) +{ + struct tonga_smumgr *smu_data = + (struct tonga_smumgr *)(hwmgr->smumgr->backend); + uint32_t mm_boot_level_offset, mm_boot_level_value; + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + + + smu_data->smc_state_table.VceBootLevel = + (uint8_t) (table_info->mm_dep_table->count - 1); + + mm_boot_level_offset = smu_data->dpm_table_start + + offsetof(SMU72_Discrete_DpmTable, VceBootLevel); + mm_boot_level_offset /= 4; + mm_boot_level_offset *= 4; + mm_boot_level_value = cgs_read_ind_register(hwmgr->device, + CGS_IND_REG__SMC, mm_boot_level_offset); + mm_boot_level_value &= 0xFF00FFFF; + mm_boot_level_value |= smu_data->smc_state_table.VceBootLevel << 16; + cgs_write_ind_register(hwmgr->device, + CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_StablePState)) + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_VCEDPM_SetEnabledMask, + (uint32_t)1 << smu_data->smc_state_table.VceBootLevel); + return 0; +} + +static int tonga_update_samu_smc_table(struct pp_hwmgr *hwmgr) +{ + struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend); + uint32_t mm_boot_level_offset, mm_boot_level_value; + + smu_data->smc_state_table.SamuBootLevel = 0; + mm_boot_level_offset = smu_data->dpm_table_start + + offsetof(SMU72_Discrete_DpmTable, SamuBootLevel); + + mm_boot_level_offset /= 4; + mm_boot_level_offset *= 4; + mm_boot_level_value = cgs_read_ind_register(hwmgr->device, + CGS_IND_REG__SMC, mm_boot_level_offset); + mm_boot_level_value &= 0xFFFFFF00; + mm_boot_level_value |= smu_data->smc_state_table.SamuBootLevel << 0; + cgs_write_ind_register(hwmgr->device, + CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_StablePState)) + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SAMUDPM_SetEnabledMask, + (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel)); + return 0; +} + +int tonga_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type) +{ + switch (type) { + case SMU_UVD_TABLE: + tonga_update_uvd_smc_table(hwmgr); + break; + case SMU_VCE_TABLE: + tonga_update_vce_smc_table(hwmgr); + break; + case SMU_SAMU_TABLE: + tonga_update_samu_smc_table(hwmgr); + break; + default: + break; + } + return 0; +} + + +/** + * Get the location of various tables inside the FW image. + * + * @param hwmgr the address of the powerplay hardware manager. + * @return always 0 + */ +int tonga_process_firmware_header(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend); + + uint32_t tmp; + int result; + bool error = false; + + result = tonga_read_smc_sram_dword(hwmgr->smumgr, + SMU72_FIRMWARE_HEADER_LOCATION + + offsetof(SMU72_Firmware_Header, DpmTable), + &tmp, SMC_RAM_END); + + if (0 == result) + smu_data->dpm_table_start = tmp; + + error |= (0 != result); + + result = tonga_read_smc_sram_dword(hwmgr->smumgr, + SMU72_FIRMWARE_HEADER_LOCATION + + offsetof(SMU72_Firmware_Header, SoftRegisters), + &tmp, SMC_RAM_END); + + if (0 == result) { + data->soft_regs_start = tmp; + smu_data->soft_regs_start = tmp; + } + + error |= (0 != result); + + + result = tonga_read_smc_sram_dword(hwmgr->smumgr, + SMU72_FIRMWARE_HEADER_LOCATION + + offsetof(SMU72_Firmware_Header, mcRegisterTable), + &tmp, SMC_RAM_END); + + if (0 == result) + smu_data->mc_reg_table_start = tmp; + + + result = tonga_read_smc_sram_dword(hwmgr->smumgr, + SMU72_FIRMWARE_HEADER_LOCATION + + offsetof(SMU72_Firmware_Header, FanTable), + &tmp, SMC_RAM_END); + + if (0 == result) + smu_data->fan_table_start = tmp; + + error |= (0 != result); + + result = tonga_read_smc_sram_dword(hwmgr->smumgr, + SMU72_FIRMWARE_HEADER_LOCATION + + offsetof(SMU72_Firmware_Header, mcArbDramTimingTable), + &tmp, SMC_RAM_END); + + if (0 == result) + smu_data->arb_table_start = tmp; + + error |= (0 != result); + + result = tonga_read_smc_sram_dword(hwmgr->smumgr, + SMU72_FIRMWARE_HEADER_LOCATION + + offsetof(SMU72_Firmware_Header, Version), + &tmp, SMC_RAM_END); + + if (0 == result) + hwmgr->microcode_version_info.SMC = tmp; + + error |= (0 != result); + + return error ? 1 : 0; +} + +/*---------------------------MC----------------------------*/ + +static uint8_t tonga_get_memory_modile_index(struct pp_hwmgr *hwmgr) +{ + return (uint8_t) (0xFF & (cgs_read_register(hwmgr->device, mmBIOS_SCRATCH_4) >> 16)); +} + +static bool tonga_check_s0_mc_reg_index(uint16_t in_reg, uint16_t *out_reg) +{ + bool result = true; + + switch (in_reg) { + case mmMC_SEQ_RAS_TIMING: + *out_reg = mmMC_SEQ_RAS_TIMING_LP; + break; + + case mmMC_SEQ_DLL_STBY: + *out_reg = mmMC_SEQ_DLL_STBY_LP; + break; + + case mmMC_SEQ_G5PDX_CMD0: + *out_reg = mmMC_SEQ_G5PDX_CMD0_LP; + break; + + case mmMC_SEQ_G5PDX_CMD1: + *out_reg = mmMC_SEQ_G5PDX_CMD1_LP; + break; + + case mmMC_SEQ_G5PDX_CTRL: + *out_reg = mmMC_SEQ_G5PDX_CTRL_LP; + break; + + case mmMC_SEQ_CAS_TIMING: + *out_reg = mmMC_SEQ_CAS_TIMING_LP; + break; + + case mmMC_SEQ_MISC_TIMING: + *out_reg = mmMC_SEQ_MISC_TIMING_LP; + break; + + case mmMC_SEQ_MISC_TIMING2: + *out_reg = mmMC_SEQ_MISC_TIMING2_LP; + break; + + case mmMC_SEQ_PMG_DVS_CMD: + *out_reg = mmMC_SEQ_PMG_DVS_CMD_LP; + break; + + case mmMC_SEQ_PMG_DVS_CTL: + *out_reg = mmMC_SEQ_PMG_DVS_CTL_LP; + break; + + case mmMC_SEQ_RD_CTL_D0: + *out_reg = mmMC_SEQ_RD_CTL_D0_LP; + break; + + case mmMC_SEQ_RD_CTL_D1: + *out_reg = mmMC_SEQ_RD_CTL_D1_LP; + break; + + case mmMC_SEQ_WR_CTL_D0: + *out_reg = mmMC_SEQ_WR_CTL_D0_LP; + break; + + case mmMC_SEQ_WR_CTL_D1: + *out_reg = mmMC_SEQ_WR_CTL_D1_LP; + break; + + case mmMC_PMG_CMD_EMRS: + *out_reg = mmMC_SEQ_PMG_CMD_EMRS_LP; + break; + + case mmMC_PMG_CMD_MRS: + *out_reg = mmMC_SEQ_PMG_CMD_MRS_LP; + break; + + case mmMC_PMG_CMD_MRS1: + *out_reg = mmMC_SEQ_PMG_CMD_MRS1_LP; + break; + + case mmMC_SEQ_PMG_TIMING: + *out_reg = mmMC_SEQ_PMG_TIMING_LP; + break; + + case mmMC_PMG_CMD_MRS2: + *out_reg = mmMC_SEQ_PMG_CMD_MRS2_LP; + break; + + case mmMC_SEQ_WR_CTL_2: + *out_reg = mmMC_SEQ_WR_CTL_2_LP; + break; + + default: + result = false; + break; + } + + return result; +} + +static int tonga_set_s0_mc_reg_index(struct tonga_mc_reg_table *table) +{ + uint32_t i; + uint16_t address; + + for (i = 0; i < table->last; i++) { + table->mc_reg_address[i].s0 = + tonga_check_s0_mc_reg_index(table->mc_reg_address[i].s1, + &address) ? + address : + table->mc_reg_address[i].s1; + } + return 0; +} + +static int tonga_copy_vbios_smc_reg_table(const pp_atomctrl_mc_reg_table *table, + struct tonga_mc_reg_table *ni_table) +{ + uint8_t i, j; + + PP_ASSERT_WITH_CODE((table->last <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE), + "Invalid VramInfo table.", return -EINVAL); + PP_ASSERT_WITH_CODE((table->num_entries <= MAX_AC_TIMING_ENTRIES), + "Invalid VramInfo table.", return -EINVAL); + + for (i = 0; i < table->last; i++) + ni_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1; + + ni_table->last = table->last; + + for (i = 0; i < table->num_entries; i++) { + ni_table->mc_reg_table_entry[i].mclk_max = + table->mc_reg_table_entry[i].mclk_max; + for (j = 0; j < table->last; j++) { + ni_table->mc_reg_table_entry[i].mc_data[j] = + table->mc_reg_table_entry[i].mc_data[j]; + } + } + + ni_table->num_entries = table->num_entries; + + return 0; +} + +/** + * VBIOS omits some information to reduce size, we need to recover them here. + * 1. when we see mmMC_SEQ_MISC1, bit[31:16] EMRS1, need to be write to + * mmMC_PMG_CMD_EMRS /_LP[15:0]. Bit[15:0] MRS, need to be update + * mmMC_PMG_CMD_MRS/_LP[15:0] + * 2. when we see mmMC_SEQ_RESERVE_M, bit[15:0] EMRS2, need to be write to + * mmMC_PMG_CMD_MRS1/_LP[15:0]. + * 3. need to set these data for each clock range + * @param hwmgr the address of the powerplay hardware manager. + * @param table the address of MCRegTable + * @return always 0 + */ +static int tonga_set_mc_special_registers(struct pp_hwmgr *hwmgr, + struct tonga_mc_reg_table *table) +{ + uint8_t i, j, k; + uint32_t temp_reg; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + for (i = 0, j = table->last; i < table->last; i++) { + PP_ASSERT_WITH_CODE((j < SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE), + "Invalid VramInfo table.", return -EINVAL); + + switch (table->mc_reg_address[i].s1) { + + case mmMC_SEQ_MISC1: + temp_reg = cgs_read_register(hwmgr->device, + mmMC_PMG_CMD_EMRS); + table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS; + table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP; + for (k = 0; k < table->num_entries; k++) { + table->mc_reg_table_entry[k].mc_data[j] = + ((temp_reg & 0xffff0000)) | + ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16); + } + j++; + PP_ASSERT_WITH_CODE((j < SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE), + "Invalid VramInfo table.", return -EINVAL); + + temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS); + table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS; + table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP; + for (k = 0; k < table->num_entries; k++) { + table->mc_reg_table_entry[k].mc_data[j] = + (temp_reg & 0xffff0000) | + (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); + + if (!data->is_memory_gddr5) + table->mc_reg_table_entry[k].mc_data[j] |= 0x100; + } + j++; + PP_ASSERT_WITH_CODE((j <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE), + "Invalid VramInfo table.", return -EINVAL); + + if (!data->is_memory_gddr5) { + table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD; + table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD; + for (k = 0; k < table->num_entries; k++) + table->mc_reg_table_entry[k].mc_data[j] = + (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16; + j++; + PP_ASSERT_WITH_CODE((j <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE), + "Invalid VramInfo table.", return -EINVAL); + } + + break; + + case mmMC_SEQ_RESERVE_M: + temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1); + table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1; + table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP; + for (k = 0; k < table->num_entries; k++) { + table->mc_reg_table_entry[k].mc_data[j] = + (temp_reg & 0xffff0000) | + (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); + } + j++; + PP_ASSERT_WITH_CODE((j <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE), + "Invalid VramInfo table.", return -EINVAL); + break; + + default: + break; + } + + } + + table->last = j; + + return 0; +} + +static int tonga_set_valid_flag(struct tonga_mc_reg_table *table) +{ + uint8_t i, j; + + for (i = 0; i < table->last; i++) { + for (j = 1; j < table->num_entries; j++) { + if (table->mc_reg_table_entry[j-1].mc_data[i] != + table->mc_reg_table_entry[j].mc_data[i]) { + table->validflag |= (1<smumgr->backend); + pp_atomctrl_mc_reg_table *table; + struct tonga_mc_reg_table *ni_table = &smu_data->mc_reg_table; + uint8_t module_index = tonga_get_memory_modile_index(hwmgr); + + table = kzalloc(sizeof(pp_atomctrl_mc_reg_table), GFP_KERNEL); + + if (NULL == table) + return -ENOMEM; + + /* Program additional LP registers that are no longer programmed by VBIOS */ + cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP, + cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING)); + cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP, + cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING)); + cgs_write_register(hwmgr->device, mmMC_SEQ_DLL_STBY_LP, + cgs_read_register(hwmgr->device, mmMC_SEQ_DLL_STBY)); + cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0_LP, + cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0)); + cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1_LP, + cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1)); + cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL_LP, + cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL)); + cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD_LP, + cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD)); + cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL_LP, + cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL)); + cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING_LP, + cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING)); + cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP, + cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2)); + cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_EMRS_LP, + cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS)); + cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS_LP, + cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS)); + cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS1_LP, + cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1)); + cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0_LP, + cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0)); + cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP, + cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1)); + cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP, + cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0)); + cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP, + cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1)); + cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP, + cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING)); + cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS2_LP, + cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS2)); + cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_2_LP, + cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_2)); + + memset(table, 0x00, sizeof(pp_atomctrl_mc_reg_table)); + + result = atomctrl_initialize_mc_reg_table(hwmgr, module_index, table); + + if (0 == result) + result = tonga_copy_vbios_smc_reg_table(table, ni_table); + + if (0 == result) { + tonga_set_s0_mc_reg_index(ni_table); + result = tonga_set_mc_special_registers(hwmgr, ni_table); + } + + if (0 == result) + tonga_set_valid_flag(ni_table); + + kfree(table); + + return result; +} + +bool tonga_is_dpm_running(struct pp_hwmgr *hwmgr) +{ + return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device, + CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON)) + ? true : false; +} diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.h b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.h new file mode 100644 index 000000000000..8ae169ff541d --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.h @@ -0,0 +1,60 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef _TONGA_SMC_H +#define _TONGA_SMC_H + +#include "smumgr.h" +#include "smu72.h" + + +#define ASICID_IS_TONGA_P(wDID, bRID) \ + (((wDID == 0x6930) && ((bRID == 0xF0) || (bRID == 0xF1) || (bRID == 0xFF))) \ + || ((wDID == 0x6920) && ((bRID == 0) || (bRID == 1)))) + + +struct tonga_pt_defaults { + uint8_t svi_load_line_en; + uint8_t svi_load_line_vddC; + uint8_t tdc_vddc_throttle_release_limit_perc; + uint8_t tdc_mawt; + uint8_t tdc_waterfall_ctl; + uint8_t dte_ambient_temp_base; + uint32_t display_cac; + uint32_t bamp_temp_gradient; + uint16_t bapmti_r[SMU72_DTE_ITERATIONS * SMU72_DTE_SOURCES * SMU72_DTE_SINKS]; + uint16_t bapmti_rc[SMU72_DTE_ITERATIONS * SMU72_DTE_SOURCES * SMU72_DTE_SINKS]; +}; + +int tonga_populate_all_graphic_levels(struct pp_hwmgr *hwmgr); +int tonga_populate_all_memory_levels(struct pp_hwmgr *hwmgr); +int tonga_init_smc_table(struct pp_hwmgr *hwmgr); +int tonga_thermal_setup_fan_table(struct pp_hwmgr *hwmgr); +int tonga_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type); +int tonga_update_sclk_threshold(struct pp_hwmgr *hwmgr); +uint32_t tonga_get_offsetof(uint32_t type, uint32_t member); +uint32_t tonga_get_mac_definition(uint32_t value); +int tonga_process_firmware_header(struct pp_hwmgr *hwmgr); +int tonga_initialize_mc_reg_table(struct pp_hwmgr *hwmgr); +bool tonga_is_dpm_running(struct pp_hwmgr *hwmgr); +#endif + diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c index f42c536b3af1..b543d6c0f96a 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c @@ -33,6 +33,7 @@ #include "smu/smu_7_1_2_d.h" #include "smu/smu_7_1_2_sh_mask.h" #include "cgs_common.h" +#include "tonga_smc.h" #define TONGA_SMC_SIZE 0x20000 #define BUFFER_SIZE 80000 @@ -749,6 +750,8 @@ static int tonga_smu_init(struct pp_smumgr *smumgr) struct tonga_smumgr *tonga_smu; uint8_t *internal_buf; uint64_t mc_addr = 0; + int i; + /* Allocate memory for backend private data */ tonga_smu = (struct tonga_smumgr *)(smumgr->backend); tonga_smu->header_buffer.data_size = @@ -793,6 +796,9 @@ static int tonga_smu_init(struct pp_smumgr *smumgr) (cgs_handle_t)tonga_smu->smu_buffer.handle); return -1;); + for (i = 0; i < SMU72_MAX_LEVELS_GRAPHICS; i++) + tonga_smu->activity_target[i] = 30; + return 0; } @@ -807,6 +813,17 @@ static const struct pp_smumgr_func tonga_smu_funcs = { .send_msg_to_smc_with_parameter = &tonga_send_msg_to_smc_with_parameter, .download_pptable_settings = NULL, .upload_pptable_settings = NULL, + .update_smc_table = tonga_update_smc_table, + .get_offsetof = tonga_get_offsetof, + .process_firmware_header = tonga_process_firmware_header, + .init_smc_table = tonga_init_smc_table, + .update_sclk_threshold = tonga_update_sclk_threshold, + .thermal_setup_fan_table = tonga_thermal_setup_fan_table, + .populate_all_graphic_levels = tonga_populate_all_graphic_levels, + .populate_all_memory_levels = tonga_populate_all_memory_levels, + .get_mac_definition = tonga_get_mac_definition, + .initialize_mc_reg_table = tonga_initialize_mc_reg_table, + .is_dpm_running = tonga_is_dpm_running, }; int tonga_smum_init(struct pp_smumgr *smumgr) diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.h index 33c788d7f05c..b2ad232506c1 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.h +++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.h @@ -24,6 +24,10 @@ #ifndef _TONGA_SMUMGR_H_ #define _TONGA_SMUMGR_H_ +#include "smu72_discrete.h" + +#define SMC_RAM_END 0x40000 + struct tonga_buffer_entry { uint32_t data_size; uint32_t mc_addr_low; @@ -32,13 +36,44 @@ struct tonga_buffer_entry { unsigned long handle; }; + +struct tonga_mc_reg_entry { + uint32_t mclk_max; + uint32_t mc_data[SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE]; +}; + +struct tonga_mc_reg_table { + uint8_t last; /* number of registers*/ + uint8_t num_entries; /* number of entries in mc_reg_table_entry used*/ + uint16_t validflag; /* indicate the corresponding register is valid or not. 1: valid, 0: invalid. bit0->address[0], bit1->address[1], etc.*/ + struct tonga_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES]; + SMU72_Discrete_MCRegisterAddress mc_reg_address[SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE]; +}; + + struct tonga_smumgr { uint8_t *pHeader; uint8_t *pMecImage; - uint32_t ulSoftRegsStart; + + + uint32_t soft_regs_start; + uint32_t dpm_table_start; + uint32_t mc_reg_table_start; + uint32_t fan_table_start; + uint32_t arb_table_start; struct tonga_buffer_entry header_buffer; struct tonga_buffer_entry smu_buffer; + + struct SMU72_Discrete_DpmTable smc_state_table; + struct SMU72_Discrete_Ulv ulv_setting; + struct SMU72_Discrete_PmFuses power_tune_table; + struct tonga_pt_defaults *power_tune_defaults; + SMU72_Discrete_MCRegisters mc_regs; + struct tonga_mc_reg_table mc_reg_table; + + uint32_t activity_target[SMU72_MAX_LEVELS_GRAPHICS]; + }; extern int tonga_smum_init(struct pp_smumgr *smumgr); From fb044ed90c6f87dba95729a8deddcd860b16fec3 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Thu, 11 Aug 2016 16:51:41 +0800 Subject: [PATCH 14/49] drm/amd/powerplay: use smu7_hwmgr to manager tonga. Signed-off-by: Rex Zhu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/Makefile | 4 +-- drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c | 32 ++++++++++++++++++-- 2 files changed, 30 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile index dffcd8af881e..69e6d156a4c3 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile @@ -4,12 +4,10 @@ HARDWARE_MGR = hwmgr.o processpptables.o functiontables.o \ hardwaremanager.o pp_acpi.o cz_hwmgr.o \ - cz_clockpowergating.o tonga_powertune.o\ + cz_clockpowergating.o pppcielanes.o\ process_pptables_v1_0.o ppatomctrl.o \ - tonga_hwmgr.o pppcielanes.o tonga_thermal.o\ smu7_hwmgr.o smu7_powertune.o smu7_thermal.o \ smu7_clockpowergating.o iceland_hwmgr.o \ - tonga_clockpowergating.o \ iceland_clockpowergating.o iceland_thermal.o \ iceland_powertune.o diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c index 449780cf140d..7ca8aaa88444 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c @@ -36,13 +36,13 @@ #include "amd_acpi.h" extern int cz_hwmgr_init(struct pp_hwmgr *hwmgr); -extern int tonga_hwmgr_init(struct pp_hwmgr *hwmgr); extern int iceland_hwmgr_init(struct pp_hwmgr *hwmgr); static int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr); static void hwmgr_init_default_caps(struct pp_hwmgr *hwmgr); static int hwmgr_set_user_specify_caps(struct pp_hwmgr *hwmgr); static int fiji_set_asic_special_caps(struct pp_hwmgr *hwmgr); +static int tonga_set_asic_special_caps(struct pp_hwmgr *hwmgr); uint8_t convert_to_vid(uint16_t vddc) { @@ -82,9 +82,11 @@ int hwmgr_init(struct amd_pp_init *pp_init, struct pp_instance *handle) iceland_hwmgr_init(hwmgr); break; case CHIP_TONGA: - tonga_hwmgr_init(hwmgr); + smu7_hwmgr_init(hwmgr); + tonga_set_asic_special_caps(hwmgr); + hwmgr->feature_mask &= ~(PP_SMC_VOLTAGE_CONTROL_MASK | + PP_VBI_TIME_SUPPORT_MASK); break; - case CHIP_FIJI: smu7_hwmgr_init(hwmgr); fiji_set_asic_special_caps(hwmgr); @@ -768,3 +770,27 @@ int fiji_set_asic_special_caps(struct pp_hwmgr *hwmgr) return 0; } +int tonga_set_asic_special_caps(struct pp_hwmgr *hwmgr) +{ + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SQRamping); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DBRamping); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_TDRamping); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_TCPRamping); + + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_UVDPowerGating); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_VCEPowerGating); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_TablelessHardwareInterface); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_CAC); + + return 0; +} From 1ff55f4651037821b1d8b028bb0541e2b404f044 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Fri, 19 Aug 2016 20:35:48 +0800 Subject: [PATCH 15/49] drm/amd/powerplay: implement smu7_smumgr for asics with smu ip version 7. Signed-off-by: Rex Zhu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/inc/smumgr.h | 1 + drivers/gpu/drm/amd/powerplay/smumgr/Makefile | 2 +- .../drm/amd/powerplay/smumgr/smu7_smumgr.c | 589 ++++++++++++++++++ .../drm/amd/powerplay/smumgr/smu7_smumgr.h | 87 +++ 4 files changed, 678 insertions(+), 1 deletion(-) create mode 100644 drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c create mode 100644 drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h diff --git a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h index e7af6436c6c2..2139072065cc 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h @@ -72,6 +72,7 @@ enum SMU_MEMBER { AverageGraphicsActivity, PreVBlankGap, VBlankTimeout, + UcodeLoadStatus, UvdBootLevel, VceBootLevel, SamuBootLevel, diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile index 2ff4aa031b1a..a4d7462668d0 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile +++ b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile @@ -3,7 +3,7 @@ # It provides the smu management services for the driver. SMU_MGR = smumgr.o cz_smumgr.o tonga_smumgr.o fiji_smumgr.o fiji_smc.o\ - polaris10_smumgr.o iceland_smumgr.o polaris10_smc.o tonga_smc.o + polaris10_smumgr.o iceland_smumgr.o polaris10_smc.o tonga_smc.o smu7_smumgr.o AMD_PP_SMUMGR = $(addprefix $(AMD_PP_PATH)/smumgr/,$(SMU_MGR)) diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c new file mode 100644 index 000000000000..6af744f42ec9 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c @@ -0,0 +1,589 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + + +#include "smumgr.h" +#include "smu_ucode_xfer_vi.h" +#include "smu/smu_7_1_3_d.h" +#include "smu/smu_7_1_3_sh_mask.h" +#include "ppatomctrl.h" +#include "pp_debug.h" +#include "cgs_common.h" +#include "smu7_ppsmc.h" +#include "smu7_smumgr.h" + +#define SMU7_SMC_SIZE 0x20000 + +static int smu7_set_smc_sram_address(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t limit) +{ + PP_ASSERT_WITH_CODE((0 == (3 & smc_addr)), "SMC address must be 4 byte aligned.", return -EINVAL); + PP_ASSERT_WITH_CODE((limit > (smc_addr + 3)), "SMC addr is beyond the SMC RAM area.", return -EINVAL); + + cgs_write_register(smumgr->device, mmSMC_IND_INDEX_11, smc_addr); + SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0); /* on ci, SMC_IND_ACCESS_CNTL is different */ + return 0; +} + + +int smu7_copy_bytes_from_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address, uint32_t *dest, uint32_t byte_count, uint32_t limit) +{ + uint32_t data; + uint32_t addr; + uint8_t *dest_byte; + uint8_t i, data_byte[4] = {0}; + uint32_t *pdata = (uint32_t *)&data_byte; + + PP_ASSERT_WITH_CODE((0 == (3 & smc_start_address)), "SMC address must be 4 byte aligned.", return -EINVAL); + PP_ASSERT_WITH_CODE((limit > (smc_start_address + byte_count)), "SMC address is beyond the SMC RAM area.", return -EINVAL); + + addr = smc_start_address; + + while (byte_count >= 4) { + smu7_read_smc_sram_dword(smumgr, addr, &data, limit); + + *dest = PP_SMC_TO_HOST_UL(data); + + dest += 1; + byte_count -= 4; + addr += 4; + } + + if (byte_count) { + smu7_read_smc_sram_dword(smumgr, addr, &data, limit); + *pdata = PP_SMC_TO_HOST_UL(data); + /* Cast dest into byte type in dest_byte. This way, we don't overflow if the allocated memory is not 4-byte aligned. */ + dest_byte = (uint8_t *)dest; + for (i = 0; i < byte_count; i++) + dest_byte[i] = data_byte[i]; + } + + return 0; +} + + +int smu7_copy_bytes_to_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address, + const uint8_t *src, uint32_t byte_count, uint32_t limit) +{ + int result; + uint32_t data = 0; + uint32_t original_data; + uint32_t addr = 0; + uint32_t extra_shift; + + PP_ASSERT_WITH_CODE((0 == (3 & smc_start_address)), "SMC address must be 4 byte aligned.", return -EINVAL); + PP_ASSERT_WITH_CODE((limit > (smc_start_address + byte_count)), "SMC address is beyond the SMC RAM area.", return -EINVAL); + + addr = smc_start_address; + + while (byte_count >= 4) { + /* Bytes are written into the SMC addres space with the MSB first. */ + data = src[0] * 0x1000000 + src[1] * 0x10000 + src[2] * 0x100 + src[3]; + + result = smu7_set_smc_sram_address(smumgr, addr, limit); + + if (0 != result) + return result; + + cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, data); + + src += 4; + byte_count -= 4; + addr += 4; + } + + if (0 != byte_count) { + + data = 0; + + result = smu7_set_smc_sram_address(smumgr, addr, limit); + + if (0 != result) + return result; + + + original_data = cgs_read_register(smumgr->device, mmSMC_IND_DATA_11); + + extra_shift = 8 * (4 - byte_count); + + while (byte_count > 0) { + /* Bytes are written into the SMC addres space with the MSB first. */ + data = (0x100 * data) + *src++; + byte_count--; + } + + data <<= extra_shift; + + data |= (original_data & ~((~0UL) << extra_shift)); + + result = smu7_set_smc_sram_address(smumgr, addr, limit); + + if (0 != result) + return result; + + cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, data); + } + + return 0; +} + + +int smu7_program_jump_on_start(struct pp_smumgr *smumgr) +{ + static const unsigned char data[4] = { 0xE0, 0x00, 0x80, 0x40 }; + + smu7_copy_bytes_to_smc(smumgr, 0x0, data, 4, sizeof(data)+1); + + return 0; +} + +bool smu7_is_smc_ram_running(struct pp_smumgr *smumgr) +{ + return ((0 == SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable)) + && (0x20100 <= cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC, ixSMC_PC_C))); +} + +int smu7_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg) +{ + int ret; + + if (!smu7_is_smc_ram_running(smumgr)) + return -EINVAL; + + + SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); + + ret = SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP); + + if (ret != 1) + printk("\n failed to send pre message %x ret is %d \n", msg, ret); + + cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg); + + SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); + + ret = SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP); + + if (ret != 1) + printk("\n failed to send message %x ret is %d \n", msg, ret); + + return 0; +} + +int smu7_send_msg_to_smc_without_waiting(struct pp_smumgr *smumgr, uint16_t msg) +{ + cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg); + + return 0; +} + +int smu7_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr, uint16_t msg, uint32_t parameter) +{ + if (!smu7_is_smc_ram_running(smumgr)) { + return -EINVAL; + } + + SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); + + cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter); + + return smu7_send_msg_to_smc(smumgr, msg); +} + +int smu7_send_msg_to_smc_with_parameter_without_waiting(struct pp_smumgr *smumgr, uint16_t msg, uint32_t parameter) +{ + cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter); + + return smu7_send_msg_to_smc_without_waiting(smumgr, msg); +} + +int smu7_send_msg_to_smc_offset(struct pp_smumgr *smumgr) +{ + cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, 0x20000); + + cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, PPSMC_MSG_Test); + + SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); + + if (1 != SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP)) + printk("Failed to send Message.\n"); + + return 0; +} + +int smu7_wait_for_smc_inactive(struct pp_smumgr *smumgr) +{ + if (!smu7_is_smc_ram_running(smumgr)) + return -EINVAL; + + SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND, SMC_SYSCON_CLOCK_CNTL_0, cken, 0); + return 0; +} + + +enum cgs_ucode_id smu7_convert_fw_type_to_cgs(uint32_t fw_type) +{ + enum cgs_ucode_id result = CGS_UCODE_ID_MAXIMUM; + + switch (fw_type) { + case UCODE_ID_SMU: + result = CGS_UCODE_ID_SMU; + break; + case UCODE_ID_SMU_SK: + result = CGS_UCODE_ID_SMU_SK; + break; + case UCODE_ID_SDMA0: + result = CGS_UCODE_ID_SDMA0; + break; + case UCODE_ID_SDMA1: + result = CGS_UCODE_ID_SDMA1; + break; + case UCODE_ID_CP_CE: + result = CGS_UCODE_ID_CP_CE; + break; + case UCODE_ID_CP_PFP: + result = CGS_UCODE_ID_CP_PFP; + break; + case UCODE_ID_CP_ME: + result = CGS_UCODE_ID_CP_ME; + break; + case UCODE_ID_CP_MEC: + result = CGS_UCODE_ID_CP_MEC; + break; + case UCODE_ID_CP_MEC_JT1: + result = CGS_UCODE_ID_CP_MEC_JT1; + break; + case UCODE_ID_CP_MEC_JT2: + result = CGS_UCODE_ID_CP_MEC_JT2; + break; + case UCODE_ID_RLC_G: + result = CGS_UCODE_ID_RLC_G; + break; + default: + break; + } + + return result; +} + + +int smu7_read_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t *value, uint32_t limit) +{ + int result; + + result = smu7_set_smc_sram_address(smumgr, smc_addr, limit); + + if (result) + return result; + + *value = cgs_read_register(smumgr->device, mmSMC_IND_DATA_11); + return 0; +} + +int smu7_write_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t value, uint32_t limit) +{ + int result; + + result = smu7_set_smc_sram_address(smumgr, smc_addr, limit); + + if (result) + return result; + + cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, value); + + return 0; +} + +/* Convert the firmware type to SMU type mask. For MEC, we need to check all MEC related type */ + +static uint32_t smu7_get_mask_for_firmware_type(uint32_t fw_type) +{ + uint32_t result = 0; + + switch (fw_type) { + case UCODE_ID_SDMA0: + result = UCODE_ID_SDMA0_MASK; + break; + case UCODE_ID_SDMA1: + result = UCODE_ID_SDMA1_MASK; + break; + case UCODE_ID_CP_CE: + result = UCODE_ID_CP_CE_MASK; + break; + case UCODE_ID_CP_PFP: + result = UCODE_ID_CP_PFP_MASK; + break; + case UCODE_ID_CP_ME: + result = UCODE_ID_CP_ME_MASK; + break; + case UCODE_ID_CP_MEC: + case UCODE_ID_CP_MEC_JT1: + case UCODE_ID_CP_MEC_JT2: + result = UCODE_ID_CP_MEC_MASK; + break; + case UCODE_ID_RLC_G: + result = UCODE_ID_RLC_G_MASK; + break; + default: + printk("UCode type is out of range! \n"); + result = 0; + } + + return result; +} + +static int smu7_populate_single_firmware_entry(struct pp_smumgr *smumgr, + uint32_t fw_type, + struct SMU_Entry *entry) +{ + int result = 0; + struct cgs_firmware_info info = {0}; + + result = cgs_get_firmware_info(smumgr->device, + smu7_convert_fw_type_to_cgs(fw_type), + &info); + + if (!result) { + entry->version = info.version; + entry->id = (uint16_t)fw_type; + entry->image_addr_high = smu_upper_32_bits(info.mc_addr); + entry->image_addr_low = smu_lower_32_bits(info.mc_addr); + entry->meta_data_addr_high = 0; + entry->meta_data_addr_low = 0; + entry->data_size_byte = info.image_size; + entry->num_register_entries = 0; + } + + if (fw_type == UCODE_ID_RLC_G) + entry->flags = 1; + else + entry->flags = 0; + + return 0; +} + +int smu7_request_smu_load_fw(struct pp_smumgr *smumgr) +{ + struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend); + uint32_t fw_to_load; + int result = 0; + struct SMU_DRAMData_TOC *toc; + + if (!smumgr->reload_fw) { + printk(KERN_INFO "[ powerplay ] skip reloading...\n"); + return 0; + } + + if (smu_data->soft_regs_start) + cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC, + smu_data->soft_regs_start + smum_get_offsetof(smumgr, + SMU_SoftRegisters, UcodeLoadStatus), + 0x0); + + if (smumgr->chip_id > CHIP_TOPAZ) { /* add support for Topaz */ + smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_SMU_DRAM_ADDR_HI, smu_data->smu_buffer.mc_addr_high); + smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_SMU_DRAM_ADDR_LO, smu_data->smu_buffer.mc_addr_low); + fw_to_load = UCODE_ID_RLC_G_MASK + + UCODE_ID_SDMA0_MASK + + UCODE_ID_SDMA1_MASK + + UCODE_ID_CP_CE_MASK + + UCODE_ID_CP_ME_MASK + + UCODE_ID_CP_PFP_MASK + + UCODE_ID_CP_MEC_MASK; + } else { + fw_to_load = UCODE_ID_RLC_G_MASK + + UCODE_ID_SDMA0_MASK + + UCODE_ID_SDMA1_MASK + + UCODE_ID_CP_CE_MASK + + UCODE_ID_CP_ME_MASK + + UCODE_ID_CP_PFP_MASK + + UCODE_ID_CP_MEC_MASK + + UCODE_ID_CP_MEC_JT1_MASK + + UCODE_ID_CP_MEC_JT2_MASK; + } + + toc = (struct SMU_DRAMData_TOC *)smu_data->header; + toc->num_entries = 0; + toc->structure_version = 1; + + PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr, + UCODE_ID_RLC_G, &toc->entry[toc->num_entries++]), + "Failed to Get Firmware Entry.", return -EINVAL); + PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr, + UCODE_ID_CP_CE, &toc->entry[toc->num_entries++]), + "Failed to Get Firmware Entry.", return -EINVAL); + PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr, + UCODE_ID_CP_PFP, &toc->entry[toc->num_entries++]), + "Failed to Get Firmware Entry.", return -EINVAL); + PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr, + UCODE_ID_CP_ME, &toc->entry[toc->num_entries++]), + "Failed to Get Firmware Entry.", return -EINVAL); + PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr, + UCODE_ID_CP_MEC, &toc->entry[toc->num_entries++]), + "Failed to Get Firmware Entry.", return -EINVAL); + PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr, + UCODE_ID_CP_MEC_JT1, &toc->entry[toc->num_entries++]), + "Failed to Get Firmware Entry.", return -EINVAL); + PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr, + UCODE_ID_CP_MEC_JT2, &toc->entry[toc->num_entries++]), + "Failed to Get Firmware Entry.", return -EINVAL); + PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr, + UCODE_ID_SDMA0, &toc->entry[toc->num_entries++]), + "Failed to Get Firmware Entry.", return -EINVAL); + PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr, + UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]), + "Failed to Get Firmware Entry.", return -EINVAL); + + smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_DRV_DRAM_ADDR_HI, smu_data->header_buffer.mc_addr_high); + smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_DRV_DRAM_ADDR_LO, smu_data->header_buffer.mc_addr_low); + + if (smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_LoadUcodes, fw_to_load)) + printk(KERN_ERR "Fail to Request SMU Load uCode"); + + return result; +} + +/* Check if the FW has been loaded, SMU will not return if loading has not finished. */ +int smu7_check_fw_load_finish(struct pp_smumgr *smumgr, uint32_t fw_type) +{ + struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend); + uint32_t fw_mask = smu7_get_mask_for_firmware_type(fw_type); + uint32_t ret; + + ret = smum_wait_on_indirect_register(smumgr, mmSMC_IND_INDEX_11, + smu_data->soft_regs_start + smum_get_offsetof(smumgr, + SMU_SoftRegisters, UcodeLoadStatus), + fw_mask, fw_mask); + + return ret; +} + +int smu7_reload_firmware(struct pp_smumgr *smumgr) +{ + return smumgr->smumgr_funcs->start_smu(smumgr); +} + +static int smu7_upload_smc_firmware_data(struct pp_smumgr *smumgr, uint32_t length, uint32_t *src, uint32_t limit) +{ + uint32_t byte_count = length; + + PP_ASSERT_WITH_CODE((limit >= byte_count), "SMC address is beyond the SMC RAM area.", return -EINVAL); + + cgs_write_register(smumgr->device, mmSMC_IND_INDEX_11, 0x20000); + SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 1); + + for (; byte_count >= 4; byte_count -= 4) + cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, *src++); + + SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0); + + PP_ASSERT_WITH_CODE((0 == byte_count), "SMC size must be dividable by 4.", return -EINVAL); + + return 0; +} + + +int smu7_upload_smu_firmware_image(struct pp_smumgr *smumgr) +{ + int result = 0; + struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend); + + struct cgs_firmware_info info = {0}; + + if (smu_data->security_hard_key == 1) + cgs_get_firmware_info(smumgr->device, + smu7_convert_fw_type_to_cgs(UCODE_ID_SMU), &info); + else + cgs_get_firmware_info(smumgr->device, + smu7_convert_fw_type_to_cgs(UCODE_ID_SMU_SK), &info); + + result = smu7_upload_smc_firmware_data(smumgr, info.image_size, (uint32_t *)info.kptr, SMU7_SMC_SIZE); + + return result; +} + + +int smu7_init(struct pp_smumgr *smumgr) +{ + struct smu7_smumgr *smu_data; + uint8_t *internal_buf; + uint64_t mc_addr = 0; + + /* Allocate memory for backend private data */ + smu_data = (struct smu7_smumgr *)(smumgr->backend); + smu_data->header_buffer.data_size = + ((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096; + smu_data->smu_buffer.data_size = 200*4096; + +/* Allocate FW image data structure and header buffer and + * send the header buffer address to SMU */ + smu_allocate_memory(smumgr->device, + smu_data->header_buffer.data_size, + CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB, + PAGE_SIZE, + &mc_addr, + &smu_data->header_buffer.kaddr, + &smu_data->header_buffer.handle); + + smu_data->header = smu_data->header_buffer.kaddr; + smu_data->header_buffer.mc_addr_high = smu_upper_32_bits(mc_addr); + smu_data->header_buffer.mc_addr_low = smu_lower_32_bits(mc_addr); + + PP_ASSERT_WITH_CODE((NULL != smu_data->header), + "Out of memory.", + kfree(smumgr->backend); + cgs_free_gpu_mem(smumgr->device, + (cgs_handle_t)smu_data->header_buffer.handle); + return -EINVAL); + + smu_allocate_memory(smumgr->device, + smu_data->smu_buffer.data_size, + CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB, + PAGE_SIZE, + &mc_addr, + &smu_data->smu_buffer.kaddr, + &smu_data->smu_buffer.handle); + + internal_buf = smu_data->smu_buffer.kaddr; + smu_data->smu_buffer.mc_addr_high = smu_upper_32_bits(mc_addr); + smu_data->smu_buffer.mc_addr_low = smu_lower_32_bits(mc_addr); + + PP_ASSERT_WITH_CODE((NULL != internal_buf), + "Out of memory.", + kfree(smumgr->backend); + cgs_free_gpu_mem(smumgr->device, + (cgs_handle_t)smu_data->smu_buffer.handle); + return -EINVAL); + + return 0; +} + + +int smu7_smu_fini(struct pp_smumgr *smumgr) +{ + if (smumgr->backend) { + kfree(smumgr->backend); + smumgr->backend = NULL; + } + cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU); + return 0; +} diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h new file mode 100644 index 000000000000..76352f2423ae --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h @@ -0,0 +1,87 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef _SMU7_SMUMANAGER_H +#define _SMU7_SMUMANAGER_H + + +#include + +#define SMC_RAM_END 0x40000 +#define mmSMC_IND_INDEX_11 0x01AC +#define mmSMC_IND_DATA_11 0x01AD + +struct smu7_buffer_entry { + uint32_t data_size; + uint32_t mc_addr_low; + uint32_t mc_addr_high; + void *kaddr; + unsigned long handle; +}; + +struct smu7_smumgr { + uint8_t *header; + uint8_t *mec_image; + struct smu7_buffer_entry smu_buffer; + struct smu7_buffer_entry header_buffer; + + uint32_t soft_regs_start; + uint32_t dpm_table_start; + uint32_t mc_reg_table_start; + uint32_t fan_table_start; + uint32_t arb_table_start; + uint32_t ulv_setting_starts; + uint8_t security_hard_key; + uint32_t acpi_optimization; +}; + + +int smu7_copy_bytes_from_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address, + uint32_t *dest, uint32_t byte_count, uint32_t limit); +int smu7_copy_bytes_to_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address, + const uint8_t *src, uint32_t byte_count, uint32_t limit); +int smu7_program_jump_on_start(struct pp_smumgr *smumgr); +bool smu7_is_smc_ram_running(struct pp_smumgr *smumgr); +int smu7_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg); +int smu7_send_msg_to_smc_without_waiting(struct pp_smumgr *smumgr, uint16_t msg); +int smu7_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr, uint16_t msg, + uint32_t parameter); +int smu7_send_msg_to_smc_with_parameter_without_waiting(struct pp_smumgr *smumgr, + uint16_t msg, uint32_t parameter); +int smu7_send_msg_to_smc_offset(struct pp_smumgr *smumgr); +int smu7_wait_for_smc_inactive(struct pp_smumgr *smumgr); + +enum cgs_ucode_id smu7_convert_fw_type_to_cgs(uint32_t fw_type); +int smu7_read_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, + uint32_t *value, uint32_t limit); +int smu7_write_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, + uint32_t value, uint32_t limit); + +int smu7_request_smu_load_fw(struct pp_smumgr *smumgr); +int smu7_check_fw_load_finish(struct pp_smumgr *smumgr, uint32_t fw_type); +int smu7_reload_firmware(struct pp_smumgr *smumgr); +int smu7_upload_smu_firmware_image(struct pp_smumgr *smumgr); +int smu7_init(struct pp_smumgr *smumgr); +int smu7_smu_fini(struct pp_smumgr *smumgr); + +#endif \ No newline at end of file From ac43f0800f46f6648b49231b54646034a4143145 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Fri, 19 Aug 2016 20:42:09 +0800 Subject: [PATCH 16/49] drm/amd/powerplay: use smu7 common functions and data on Tonga. Signed-off-by: Rex Zhu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- .../gpu/drm/amd/powerplay/smumgr/tonga_smc.c | 201 +++--- .../drm/amd/powerplay/smumgr/tonga_smumgr.c | 657 +----------------- .../drm/amd/powerplay/smumgr/tonga_smumgr.h | 33 +- 3 files changed, 117 insertions(+), 774 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c index 3acdbffed88c..4dfd3f60a967 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c @@ -347,27 +347,27 @@ static int tonga_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr, int result; result = tonga_populate_smc_vddc_table(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, + PP_ASSERT_WITH_CODE(!result, "can not populate VDDC voltage table to SMC", return -EINVAL); result = tonga_populate_smc_vdd_ci_table(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, + PP_ASSERT_WITH_CODE(!result, "can not populate VDDCI voltage table to SMC", return -EINVAL); result = tonga_populate_smc_vdd_gfx_table(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, + PP_ASSERT_WITH_CODE(!result, "can not populate VDDGFX voltage table to SMC", return -EINVAL); result = tonga_populate_smc_mvdd_table(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, + PP_ASSERT_WITH_CODE(!result, "can not populate MVDD voltage table to SMC", return -EINVAL); result = tonga_populate_cac_tables(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, + PP_ASSERT_WITH_CODE(!result, "can not populate CAC voltage tables to SMC", return -EINVAL); @@ -542,7 +542,7 @@ static int tonga_populate_single_graphic_level(struct pp_hwmgr *hwmgr, result = tonga_get_dependecy_volt_by_clk(hwmgr, pptable_info->vdd_dep_on_sclk, engine_clock, &graphic_level->MinVoltage, &mvdd); - PP_ASSERT_WITH_CODE((0 == result), + PP_ASSERT_WITH_CODE((!result), "can not find VDDC voltage value for VDDC " "engine clock dependency table", return result); @@ -574,7 +574,7 @@ static int tonga_populate_single_graphic_level(struct pp_hwmgr *hwmgr, /* Default to slow, highest DPM level will be set to PPSMC_DISPLAY_WATERMARK_LOW later.*/ graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; - if (0 == result) { + if (!result) { /* CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVoltage);*/ /* CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVddcPhases);*/ CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SclkFrequency); @@ -603,7 +603,7 @@ int tonga_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) struct smu7_dpm_table *dpm_table = &data->dpm_table; struct phm_ppt_v1_pcie_table *pcie_table = pptable_info->pcie_table; uint8_t pcie_entry_count = (uint8_t) data->dpm_table.pcie_speed_table.count; - uint32_t level_array_address = smu_data->dpm_table_start + + uint32_t level_array_address = smu_data->smu7_data.dpm_table_start + offsetof(SMU72_Discrete_DpmTable, GraphicsLevel); uint32_t level_array_size = sizeof(SMU72_Discrete_GraphicsLevel) * @@ -690,7 +690,7 @@ int tonga_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) smu_data->smc_state_table.GraphicsLevel[1].pcieDpmLevel = mid_pcie_level_enabled; } /* level count will send to smc once at init smc table and never change*/ - result = tonga_copy_bytes_to_smc(hwmgr->smumgr, level_array_address, + result = smu7_copy_bytes_to_smc(hwmgr->smumgr, level_array_address, (uint8_t *)levels, (uint32_t)level_array_size, SMC_RAM_END); @@ -730,7 +730,7 @@ static int tonga_calculate_mclk_params( result = atomctrl_get_memory_pll_dividers_si(hwmgr, memory_clock, &mpll_param, strobe_mode); PP_ASSERT_WITH_CODE( - 0 == result, + !result, "Error retrieving Memory Clock Parameters from VBIOS.", return result); @@ -900,7 +900,7 @@ static int tonga_populate_single_memory_level( memory_clock, &memory_level->MinVoltage, &mvdd); PP_ASSERT_WITH_CODE( - 0 == result, + !result, "can not find MinVddc voltage value from memory VDDC " "voltage dependency table", return result); @@ -1008,7 +1008,7 @@ int tonga_populate_all_memory_levels(struct pp_hwmgr *hwmgr) /* populate MCLK dpm table to SMU7 */ uint32_t level_array_address = - smu_data->dpm_table_start + + smu_data->smu7_data.dpm_table_start + offsetof(SMU72_Discrete_DpmTable, MemoryLevel); uint32_t level_array_size = sizeof(SMU72_Discrete_MemoryLevel) * @@ -1048,11 +1048,10 @@ int tonga_populate_all_memory_levels(struct pp_hwmgr *hwmgr) smu_data->smc_state_table.MemoryLevel[dpm_table->mclk_table.count-1].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH; /* level count will send to smc once at init smc table and never change*/ - result = tonga_copy_bytes_to_smc(hwmgr->smumgr, + result = smu7_copy_bytes_to_smc(hwmgr->smumgr, level_array_address, (uint8_t *)levels, (uint32_t)level_array_size, SMC_RAM_END); - return result; } @@ -1257,7 +1256,7 @@ static int tonga_populate_smc_uvd_level(struct pp_hwmgr *hwmgr, table->UvdLevel[count].VclkFrequency, ÷rs); - PP_ASSERT_WITH_CODE((0 == result), + PP_ASSERT_WITH_CODE((!result), "can not find divide id for Vclk clock", return result); @@ -1265,7 +1264,7 @@ static int tonga_populate_smc_uvd_level(struct pp_hwmgr *hwmgr, result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, table->UvdLevel[count].DclkFrequency, ÷rs); - PP_ASSERT_WITH_CODE((0 == result), + PP_ASSERT_WITH_CODE((!result), "can not find divide id for Dclk clock", return result); @@ -1314,7 +1313,7 @@ static int tonga_populate_smc_vce_level(struct pp_hwmgr *hwmgr, /* retrieve divider value for VBIOS */ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, table->VceLevel[count].Frequency, ÷rs); - PP_ASSERT_WITH_CODE((0 == result), + PP_ASSERT_WITH_CODE((!result), "can not find divide id for VCE engine clock", return result); @@ -1359,7 +1358,7 @@ static int tonga_populate_smc_acp_level(struct pp_hwmgr *hwmgr, /* retrieve divider value for VBIOS */ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, table->AcpLevel[count].Frequency, ÷rs); - PP_ASSERT_WITH_CODE((0 == result), + PP_ASSERT_WITH_CODE((!result), "can not find divide id for engine clock", return result); table->AcpLevel[count].Divider = (uint8_t)dividers.pll_post_divider; @@ -1404,7 +1403,7 @@ static int tonga_populate_smc_samu_level(struct pp_hwmgr *hwmgr, /* retrieve divider value for VBIOS */ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, table->SamuLevel[count].Frequency, ÷rs); - PP_ASSERT_WITH_CODE((0 == result), + PP_ASSERT_WITH_CODE((!result), "can not find divide id for samu clock", return result); table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider; @@ -1474,10 +1473,10 @@ static int tonga_program_memory_timing_parameters(struct pp_hwmgr *hwmgr) } } - if (0 == result) { - result = tonga_copy_bytes_to_smc( + if (!result) { + result = smu7_copy_bytes_to_smc( hwmgr->smumgr, - smu_data->arb_table_start, + smu_data->smu7_data.arb_table_start, (uint8_t *)&arb_regs, sizeof(SMU72_Discrete_MCArbDramTimingTable), SMC_RAM_END @@ -1502,7 +1501,7 @@ static int tonga_populate_smc_boot_level(struct pp_hwmgr *hwmgr, data->vbios_boot_state.sclk_bootup_value, (uint32_t *)&(smu_data->smc_state_table.GraphicsBootLevel)); - if (0 != result) { + if (result != 0) { smu_data->smc_state_table.GraphicsBootLevel = 0; printk(KERN_ERR "[powerplay] VBIOS did not find boot engine " "clock value in dependency table. " @@ -1514,7 +1513,7 @@ static int tonga_populate_smc_boot_level(struct pp_hwmgr *hwmgr, data->vbios_boot_state.mclk_bootup_value, (uint32_t *)&(smu_data->smc_state_table.MemoryBootLevel)); - if (0 != result) { + if (result != 0) { smu_data->smc_state_table.MemoryBootLevel = 0; printk(KERN_ERR "[powerplay] VBIOS did not find boot " "engine clock value in dependency table." @@ -1538,7 +1537,6 @@ static int tonga_populate_smc_boot_level(struct pp_hwmgr *hwmgr, return result; } - static int tonga_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr) { uint32_t ro, efuse, efuse2, clock_freq, volt_without_cks, @@ -1591,6 +1589,7 @@ static int tonga_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr) /* Populate Stretch amount */ smu_data->smc_state_table.ClockStretcherAmount = stretch_amount; + /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */ for (i = 0; i < sclk_table->count; i++) { smu_data->smc_state_table.Sclk_CKS_masterEn0_7 |= @@ -1798,17 +1797,17 @@ static int tonga_init_arb_table_index(struct pp_smumgr *smumgr) * In reality this field should not be in that structure * but in a soft register. */ - result = tonga_read_smc_sram_dword(smumgr, - smu_data->arb_table_start, &tmp, SMC_RAM_END); + result = smu7_read_smc_sram_dword(smumgr, + smu_data->smu7_data.arb_table_start, &tmp, SMC_RAM_END); - if (0 != result) + if (result != 0) return result; tmp &= 0x00FFFFFF; tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24; - return tonga_write_smc_sram_dword(smumgr, - smu_data->arb_table_start, tmp, SMC_RAM_END); + return smu7_write_smc_sram_dword(smumgr, + smu_data->smu7_data.arb_table_start, tmp, SMC_RAM_END); } @@ -1903,7 +1902,7 @@ static int tonga_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults; uint32_t temp; - if (tonga_read_smc_sram_dword(hwmgr->smumgr, + if (smu7_read_smc_sram_dword(hwmgr->smumgr, fuse_table_offset + offsetof(SMU72_Discrete_PmFuses, TdcWaterfallCtl), (uint32_t *)&temp, SMC_RAM_END)) @@ -1994,7 +1993,7 @@ static int tonga_populate_pm_fuses(struct pp_hwmgr *hwmgr) if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PowerContainment)) { - if (tonga_read_smc_sram_dword(hwmgr->smumgr, + if (smu7_read_smc_sram_dword(hwmgr->smumgr, SMU72_FIRMWARE_HEADER_LOCATION + offsetof(SMU72_Firmware_Header, PmFuseTable), &pm_fuse_table_offset, SMC_RAM_END)) @@ -2052,7 +2051,7 @@ static int tonga_populate_pm_fuses(struct pp_hwmgr *hwmgr) "Hi and Lo Sidd Failed !", return -EINVAL); - if (tonga_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset, + if (smu7_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset, (uint8_t *)&smu_data->power_tune_table, sizeof(struct SMU72_Discrete_PmFuses), SMC_RAM_END)) PP_ASSERT_WITH_CODE(false, @@ -2173,10 +2172,10 @@ static int tonga_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr) return result; - address = smu_data->mc_reg_table_start + + address = smu_data->smu7_data.mc_reg_table_start + (uint32_t)offsetof(SMU72_Discrete_MCRegisters, data[0]); - return tonga_copy_bytes_to_smc( + return smu7_copy_bytes_to_smc( hwmgr->smumgr, address, (uint8_t *)&smu_data->mc_regs.data[0], sizeof(SMU72_Discrete_MCRegisterSet) * @@ -2192,16 +2191,16 @@ static int tonga_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr) memset(&smu_data->mc_regs, 0x00, sizeof(SMU72_Discrete_MCRegisters)); result = tonga_populate_mc_reg_address(smumgr, &(smu_data->mc_regs)); - PP_ASSERT_WITH_CODE(0 == result, + PP_ASSERT_WITH_CODE(!result, "Failed to initialize MCRegTable for the MC register addresses !", return result;); result = tonga_convert_mc_reg_table_to_smc(hwmgr, &smu_data->mc_regs); - PP_ASSERT_WITH_CODE(0 == result, + PP_ASSERT_WITH_CODE(!result, "Failed to initialize MCRegTable for driver state !", return result;); - return tonga_copy_bytes_to_smc(smumgr, smu_data->mc_reg_table_start, + return smu7_copy_bytes_to_smc(smumgr, smu_data->smu7_data.mc_reg_table_start, (uint8_t *)&smu_data->mc_regs, sizeof(SMU72_Discrete_MCRegisters), SMC_RAM_END); } @@ -2268,7 +2267,7 @@ int tonga_init_smc_table(struct pp_hwmgr *hwmgr) if (data->ulv_supported && table_info->us_ulv_voltage_offset) { result = tonga_populate_ulv_state(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, + PP_ASSERT_WITH_CODE(!result, "Failed to initialize ULV state !", return result;); @@ -2277,31 +2276,31 @@ int tonga_init_smc_table(struct pp_hwmgr *hwmgr) } result = tonga_populate_smc_link_level(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, + PP_ASSERT_WITH_CODE(!result, "Failed to initialize Link Level !", return result); result = tonga_populate_all_graphic_levels(hwmgr); - PP_ASSERT_WITH_CODE(0 == result, + PP_ASSERT_WITH_CODE(!result, "Failed to initialize Graphics Level !", return result); result = tonga_populate_all_memory_levels(hwmgr); - PP_ASSERT_WITH_CODE(0 == result, + PP_ASSERT_WITH_CODE(!result, "Failed to initialize Memory Level !", return result); result = tonga_populate_smc_acpi_level(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, + PP_ASSERT_WITH_CODE(!result, "Failed to initialize ACPI Level !", return result); result = tonga_populate_smc_vce_level(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, + PP_ASSERT_WITH_CODE(!result, "Failed to initialize VCE Level !", return result); result = tonga_populate_smc_acp_level(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, + PP_ASSERT_WITH_CODE(!result, "Failed to initialize ACP Level !", return result); result = tonga_populate_smc_samu_level(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, + PP_ASSERT_WITH_CODE(!result, "Failed to initialize SAMU Level !", return result); /* Since only the initial state is completely set up at this @@ -2309,26 +2308,26 @@ int tonga_init_smc_table(struct pp_hwmgr *hwmgr) * need to populate the ARB settings for the initial state. */ result = tonga_program_memory_timing_parameters(hwmgr); - PP_ASSERT_WITH_CODE(0 == result, + PP_ASSERT_WITH_CODE(!result, "Failed to Write ARB settings for the initial state.", return result;); result = tonga_populate_smc_uvd_level(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, + PP_ASSERT_WITH_CODE(!result, "Failed to initialize UVD Level !", return result); result = tonga_populate_smc_boot_level(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, + PP_ASSERT_WITH_CODE(!result, "Failed to initialize Boot Level !", return result); tonga_populate_bapm_parameters_in_dpm_table(hwmgr); - PP_ASSERT_WITH_CODE(0 == result, + PP_ASSERT_WITH_CODE(!result, "Failed to populate BAPM Parameters !", return result); if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_ClockStretcher)) { result = tonga_populate_clock_stretcher_data_table(hwmgr); - PP_ASSERT_WITH_CODE(0 == result, + PP_ASSERT_WITH_CODE(!result, "Failed to populate Clock Stretcher Data Table !", return result;); } @@ -2367,7 +2366,7 @@ int tonga_init_smc_table(struct pp_hwmgr *hwmgr) table->PCIeGenInterval = 1; result = tonga_populate_vr_config(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, + PP_ASSERT_WITH_CODE(!result, "Failed to populate VRConfig setting !", return result); table->ThermGpio = 17; @@ -2448,26 +2447,26 @@ int tonga_init_smc_table(struct pp_hwmgr *hwmgr) CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime); /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */ - result = tonga_copy_bytes_to_smc( + result = smu7_copy_bytes_to_smc( hwmgr->smumgr, - smu_data->dpm_table_start + offsetof(SMU72_Discrete_DpmTable, SystemFlags), + smu_data->smu7_data.dpm_table_start + offsetof(SMU72_Discrete_DpmTable, SystemFlags), (uint8_t *)&(table->SystemFlags), sizeof(SMU72_Discrete_DpmTable) - 3 * sizeof(SMU72_PIDController), SMC_RAM_END); - PP_ASSERT_WITH_CODE(0 == result, + PP_ASSERT_WITH_CODE(!result, "Failed to upload dpm data to SMC memory !", return result;); result = tonga_init_arb_table_index(hwmgr->smumgr); - PP_ASSERT_WITH_CODE(0 == result, + PP_ASSERT_WITH_CODE(!result, "Failed to upload arb data to SMC memory !", return result); tonga_populate_pm_fuses(hwmgr); - PP_ASSERT_WITH_CODE((0 == result), + PP_ASSERT_WITH_CODE((!result), "Failed to populate initialize pm fuses !", return result); result = tonga_populate_initial_mc_reg_table(hwmgr); - PP_ASSERT_WITH_CODE((0 == result), + PP_ASSERT_WITH_CODE((!result), "Failed to populate initialize MC Reg table !", return result); return 0; @@ -2498,7 +2497,7 @@ int tonga_thermal_setup_fan_table(struct pp_hwmgr *hwmgr) PHM_PlatformCaps_MicrocodeFanControl)) return 0; - if (0 == smu_data->fan_table_start) { + if (0 == smu_data->smu7_data.fan_table_start) { phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl); return 0; @@ -2558,19 +2557,12 @@ int tonga_thermal_setup_fan_table(struct pp_hwmgr *hwmgr) fan_table.FanControl_GL_Flag = 1; - res = tonga_copy_bytes_to_smc(hwmgr->smumgr, smu_data->fan_table_start, (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), SMC_RAM_END); -/* TO DO FOR SOME DEVICE ID 0X692b, send this msg return invalid command. - if (res == 0 && hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit != 0) - res = (0 == smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetFanMinPwm, \ - hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit) ? 0 : -1); + res = smu7_copy_bytes_to_smc(hwmgr->smumgr, + smu_data->smu7_data.fan_table_start, + (uint8_t *)&fan_table, + (uint32_t)sizeof(fan_table), + SMC_RAM_END); - if (res == 0 && hwmgr->thermal_controller.advanceFanControlParameters.ulMinFanSCLKAcousticLimit != 0) - res = (0 == smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetFanSclkTarget, \ - hwmgr->thermal_controller.advanceFanControlParameters.ulMinFanSCLKAcousticLimit) ? 0 : -1); - - if (0 != res) - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl); -*/ return 0; } @@ -2606,9 +2598,9 @@ int tonga_update_sclk_threshold(struct pp_hwmgr *hwmgr) CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold); - result = tonga_copy_bytes_to_smc( + result = smu7_copy_bytes_to_smc( hwmgr->smumgr, - smu_data->dpm_table_start + + smu_data->smu7_data.dpm_table_start + offsetof(SMU72_Discrete_DpmTable, LowSclkInterruptThreshold), (uint8_t *)&low_sclk_interrupt_threshold, @@ -2618,7 +2610,7 @@ int tonga_update_sclk_threshold(struct pp_hwmgr *hwmgr) result = tonga_update_and_upload_mc_reg_table(hwmgr); - PP_ASSERT_WITH_CODE((0 == result), + PP_ASSERT_WITH_CODE((!result), "Failed to upload MC reg table !", return result); @@ -2645,6 +2637,8 @@ uint32_t tonga_get_offsetof(uint32_t type, uint32_t member) return offsetof(SMU72_SoftRegisters, PreVBlankGap); case VBlankTimeout: return offsetof(SMU72_SoftRegisters, VBlankTimeout); + case UcodeLoadStatus: + return offsetof(SMU72_SoftRegisters, UcodeLoadStatus); } case SMU_Discrete_DpmTable: switch (member) { @@ -2700,7 +2694,7 @@ static int tonga_update_uvd_smc_table(struct pp_hwmgr *hwmgr) if (table_info->mm_dep_table->count > 0) smu_data->smc_state_table.UvdBootLevel = (uint8_t) (table_info->mm_dep_table->count - 1); - mm_boot_level_offset = smu_data->dpm_table_start + + mm_boot_level_offset = smu_data->smu7_data.dpm_table_start + offsetof(SMU72_Discrete_DpmTable, UvdBootLevel); mm_boot_level_offset /= 4; mm_boot_level_offset *= 4; @@ -2734,7 +2728,7 @@ static int tonga_update_vce_smc_table(struct pp_hwmgr *hwmgr) smu_data->smc_state_table.VceBootLevel = (uint8_t) (table_info->mm_dep_table->count - 1); - mm_boot_level_offset = smu_data->dpm_table_start + + mm_boot_level_offset = smu_data->smu7_data.dpm_table_start + offsetof(SMU72_Discrete_DpmTable, VceBootLevel); mm_boot_level_offset /= 4; mm_boot_level_offset *= 4; @@ -2759,7 +2753,7 @@ static int tonga_update_samu_smc_table(struct pp_hwmgr *hwmgr) uint32_t mm_boot_level_offset, mm_boot_level_value; smu_data->smc_state_table.SamuBootLevel = 0; - mm_boot_level_offset = smu_data->dpm_table_start + + mm_boot_level_offset = smu_data->smu7_data.dpm_table_start + offsetof(SMU72_Discrete_DpmTable, SamuBootLevel); mm_boot_level_offset /= 4; @@ -2813,67 +2807,66 @@ int tonga_process_firmware_header(struct pp_hwmgr *hwmgr) int result; bool error = false; - result = tonga_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr->smumgr, SMU72_FIRMWARE_HEADER_LOCATION + offsetof(SMU72_Firmware_Header, DpmTable), &tmp, SMC_RAM_END); - if (0 == result) - smu_data->dpm_table_start = tmp; + if (!result) + smu_data->smu7_data.dpm_table_start = tmp; - error |= (0 != result); + error |= (result != 0); - result = tonga_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr->smumgr, SMU72_FIRMWARE_HEADER_LOCATION + offsetof(SMU72_Firmware_Header, SoftRegisters), &tmp, SMC_RAM_END); - if (0 == result) { + if (!result) { data->soft_regs_start = tmp; - smu_data->soft_regs_start = tmp; + smu_data->smu7_data.soft_regs_start = tmp; } - error |= (0 != result); + error |= (result != 0); - result = tonga_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr->smumgr, SMU72_FIRMWARE_HEADER_LOCATION + offsetof(SMU72_Firmware_Header, mcRegisterTable), &tmp, SMC_RAM_END); - if (0 == result) - smu_data->mc_reg_table_start = tmp; + if (!result) + smu_data->smu7_data.mc_reg_table_start = tmp; - - result = tonga_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr->smumgr, SMU72_FIRMWARE_HEADER_LOCATION + offsetof(SMU72_Firmware_Header, FanTable), &tmp, SMC_RAM_END); - if (0 == result) - smu_data->fan_table_start = tmp; + if (!result) + smu_data->smu7_data.fan_table_start = tmp; - error |= (0 != result); + error |= (result != 0); - result = tonga_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr->smumgr, SMU72_FIRMWARE_HEADER_LOCATION + offsetof(SMU72_Firmware_Header, mcArbDramTimingTable), &tmp, SMC_RAM_END); - if (0 == result) - smu_data->arb_table_start = tmp; + if (!result) + smu_data->smu7_data.arb_table_start = tmp; - error |= (0 != result); + error |= (result != 0); - result = tonga_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr->smumgr, SMU72_FIRMWARE_HEADER_LOCATION + offsetof(SMU72_Firmware_Header, Version), &tmp, SMC_RAM_END); - if (0 == result) + if (!result) hwmgr->microcode_version_info.SMC = tmp; - error |= (0 != result); + error |= (result != 0); return error ? 1 : 0; } @@ -3141,7 +3134,7 @@ int tonga_initialize_mc_reg_table(struct pp_hwmgr *hwmgr) table = kzalloc(sizeof(pp_atomctrl_mc_reg_table), GFP_KERNEL); - if (NULL == table) + if (table == NULL) return -ENOMEM; /* Program additional LP registers that are no longer programmed by VBIOS */ @@ -3190,15 +3183,15 @@ int tonga_initialize_mc_reg_table(struct pp_hwmgr *hwmgr) result = atomctrl_initialize_mc_reg_table(hwmgr, module_index, table); - if (0 == result) + if (!result) result = tonga_copy_vbios_smc_reg_table(table, ni_table); - if (0 == result) { + if (!result) { tonga_set_s0_mc_reg_index(ni_table); result = tonga_set_mc_special_registers(hwmgr, ni_table); } - if (0 == result) + if (!result) tonga_set_valid_flag(ni_table); kfree(table); diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c index b543d6c0f96a..5f9124046b9b 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c @@ -34,587 +34,8 @@ #include "smu/smu_7_1_2_sh_mask.h" #include "cgs_common.h" #include "tonga_smc.h" +#include "smu7_smumgr.h" -#define TONGA_SMC_SIZE 0x20000 -#define BUFFER_SIZE 80000 -#define MAX_STRING_SIZE 15 -#define BUFFER_SIZETWO 131072 /*128 *1024*/ - -/** -* Set the address for reading/writing the SMC SRAM space. -* @param smumgr the address of the powerplay hardware manager. -* @param smcAddress the address in the SMC RAM to access. -*/ -static int tonga_set_smc_sram_address(struct pp_smumgr *smumgr, - uint32_t smcAddress, uint32_t limit) -{ - if (smumgr == NULL || smumgr->device == NULL) - return -EINVAL; - PP_ASSERT_WITH_CODE((0 == (3 & smcAddress)), - "SMC address must be 4 byte aligned.", - return -1;); - - PP_ASSERT_WITH_CODE((limit > (smcAddress + 3)), - "SMC address is beyond the SMC RAM area.", - return -1;); - - cgs_write_register(smumgr->device, mmSMC_IND_INDEX_0, smcAddress); - SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0); - - return 0; -} - -/** -* Copy bytes from an array into the SMC RAM space. -* -* @param smumgr the address of the powerplay SMU manager. -* @param smcStartAddress the start address in the SMC RAM to copy bytes to. -* @param src the byte array to copy the bytes from. -* @param byteCount the number of bytes to copy. -*/ -int tonga_copy_bytes_to_smc(struct pp_smumgr *smumgr, - uint32_t smcStartAddress, const uint8_t *src, - uint32_t byteCount, uint32_t limit) -{ - uint32_t addr; - uint32_t data, orig_data; - int result = 0; - uint32_t extra_shift; - - if (smumgr == NULL || smumgr->device == NULL) - return -EINVAL; - PP_ASSERT_WITH_CODE((0 == (3 & smcStartAddress)), - "SMC address must be 4 byte aligned.", - return 0;); - - PP_ASSERT_WITH_CODE((limit > (smcStartAddress + byteCount)), - "SMC address is beyond the SMC RAM area.", - return 0;); - - addr = smcStartAddress; - - while (byteCount >= 4) { - /* - * Bytes are written into the - * SMC address space with the MSB first - */ - data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3]; - - result = tonga_set_smc_sram_address(smumgr, addr, limit); - - if (result) - goto out; - - cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data); - - src += 4; - byteCount -= 4; - addr += 4; - } - - if (0 != byteCount) { - /* Now write odd bytes left, do a read modify write cycle */ - data = 0; - - result = tonga_set_smc_sram_address(smumgr, addr, limit); - if (result) - goto out; - - orig_data = cgs_read_register(smumgr->device, - mmSMC_IND_DATA_0); - extra_shift = 8 * (4 - byteCount); - - while (byteCount > 0) { - data = (data << 8) + *src++; - byteCount--; - } - - data <<= extra_shift; - data |= (orig_data & ~((~0UL) << extra_shift)); - - result = tonga_set_smc_sram_address(smumgr, addr, limit); - if (result) - goto out; - - cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data); - } - -out: - return result; -} - - -int tonga_program_jump_on_start(struct pp_smumgr *smumgr) -{ - static const unsigned char pData[] = { 0xE0, 0x00, 0x80, 0x40 }; - - tonga_copy_bytes_to_smc(smumgr, 0x0, pData, 4, sizeof(pData)+1); - - return 0; -} - -/** -* Return if the SMC is currently running. -* -* @param smumgr the address of the powerplay hardware manager. -*/ -static int tonga_is_smc_ram_running(struct pp_smumgr *smumgr) -{ - return ((0 == SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, - SMC_SYSCON_CLOCK_CNTL_0, ck_disable)) - && (0x20100 <= cgs_read_ind_register(smumgr->device, - CGS_IND_REG__SMC, ixSMC_PC_C))); -} - -static int tonga_send_msg_to_smc_offset(struct pp_smumgr *smumgr) -{ - if (smumgr == NULL || smumgr->device == NULL) - return -EINVAL; - - SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); - - cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, 0x20000); - cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, PPSMC_MSG_Test); - - SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); - - return 0; -} - -/** -* Send a message to the SMC, and wait for its response. -* -* @param smumgr the address of the powerplay hardware manager. -* @param msg the message to send. -* @return The response that came from the SMC. -*/ -static int tonga_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg) -{ - if (smumgr == NULL || smumgr->device == NULL) - return -EINVAL; - - if (!tonga_is_smc_ram_running(smumgr)) - return -1; - - SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); - PP_ASSERT_WITH_CODE( - 1 == SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP), - "Failed to send Previous Message.", - ); - - cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg); - - SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); - PP_ASSERT_WITH_CODE( - 1 == SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP), - "Failed to send Message.", - ); - - return 0; -} - -/* -* Send a message to the SMC, and do not wait for its response. -* -* @param smumgr the address of the powerplay hardware manager. -* @param msg the message to send. -* @return The response that came from the SMC. -*/ -static int tonga_send_msg_to_smc_without_waiting - (struct pp_smumgr *smumgr, uint16_t msg) -{ - if (smumgr == NULL || smumgr->device == NULL) - return -EINVAL; - - SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); - PP_ASSERT_WITH_CODE( - 1 == SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP), - "Failed to send Previous Message.", - ); - cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg); - - return 0; -} - -/* -* Send a message to the SMC with parameter -* -* @param smumgr: the address of the powerplay hardware manager. -* @param msg: the message to send. -* @param parameter: the parameter to send -* @return The response that came from the SMC. -*/ -static int tonga_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr, - uint16_t msg, uint32_t parameter) -{ - if (smumgr == NULL || smumgr->device == NULL) - return -EINVAL; - - if (!tonga_is_smc_ram_running(smumgr)) - return PPSMC_Result_Failed; - - SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); - cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter); - - return tonga_send_msg_to_smc(smumgr, msg); -} - -/* -* Send a message to the SMC with parameter, do not wait for response -* -* @param smumgr: the address of the powerplay hardware manager. -* @param msg: the message to send. -* @param parameter: the parameter to send -* @return The response that came from the SMC. -*/ -static int tonga_send_msg_to_smc_with_parameter_without_waiting( - struct pp_smumgr *smumgr, - uint16_t msg, uint32_t parameter) -{ - if (smumgr == NULL || smumgr->device == NULL) - return -EINVAL; - - SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); - - cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter); - - return tonga_send_msg_to_smc_without_waiting(smumgr, msg); -} - -/* - * Read a 32bit value from the SMC SRAM space. - * ALL PARAMETERS ARE IN HOST BYTE ORDER. - * @param smumgr the address of the powerplay hardware manager. - * @param smcAddress the address in the SMC RAM to access. - * @param value and output parameter for the data read from the SMC SRAM. - */ -int tonga_read_smc_sram_dword(struct pp_smumgr *smumgr, - uint32_t smcAddress, uint32_t *value, - uint32_t limit) -{ - int result; - - result = tonga_set_smc_sram_address(smumgr, smcAddress, limit); - - if (0 != result) - return result; - - *value = cgs_read_register(smumgr->device, mmSMC_IND_DATA_0); - - return 0; -} - -/* - * Write a 32bit value to the SMC SRAM space. - * ALL PARAMETERS ARE IN HOST BYTE ORDER. - * @param smumgr the address of the powerplay hardware manager. - * @param smcAddress the address in the SMC RAM to access. - * @param value to write to the SMC SRAM. - */ -int tonga_write_smc_sram_dword(struct pp_smumgr *smumgr, - uint32_t smcAddress, uint32_t value, - uint32_t limit) -{ - int result; - - result = tonga_set_smc_sram_address(smumgr, smcAddress, limit); - - if (0 != result) - return result; - - cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, value); - - return 0; -} - -static int tonga_smu_fini(struct pp_smumgr *smumgr) -{ - struct tonga_smumgr *priv = (struct tonga_smumgr *)(smumgr->backend); - - smu_free_memory(smumgr->device, (void *)priv->smu_buffer.handle); - smu_free_memory(smumgr->device, (void *)priv->header_buffer.handle); - - if (smumgr->backend != NULL) { - kfree(smumgr->backend); - smumgr->backend = NULL; - } - - cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU); - return 0; -} - -static enum cgs_ucode_id tonga_convert_fw_type_to_cgs(uint32_t fw_type) -{ - enum cgs_ucode_id result = CGS_UCODE_ID_MAXIMUM; - - switch (fw_type) { - case UCODE_ID_SMU: - result = CGS_UCODE_ID_SMU; - break; - case UCODE_ID_SDMA0: - result = CGS_UCODE_ID_SDMA0; - break; - case UCODE_ID_SDMA1: - result = CGS_UCODE_ID_SDMA1; - break; - case UCODE_ID_CP_CE: - result = CGS_UCODE_ID_CP_CE; - break; - case UCODE_ID_CP_PFP: - result = CGS_UCODE_ID_CP_PFP; - break; - case UCODE_ID_CP_ME: - result = CGS_UCODE_ID_CP_ME; - break; - case UCODE_ID_CP_MEC: - result = CGS_UCODE_ID_CP_MEC; - break; - case UCODE_ID_CP_MEC_JT1: - result = CGS_UCODE_ID_CP_MEC_JT1; - break; - case UCODE_ID_CP_MEC_JT2: - result = CGS_UCODE_ID_CP_MEC_JT2; - break; - case UCODE_ID_RLC_G: - result = CGS_UCODE_ID_RLC_G; - break; - default: - break; - } - - return result; -} - -/** - * Convert the PPIRI firmware type to SMU type mask. - * For MEC, we need to check all MEC related type -*/ -static uint16_t tonga_get_mask_for_firmware_type(uint16_t firmwareType) -{ - uint16_t result = 0; - - switch (firmwareType) { - case UCODE_ID_SDMA0: - result = UCODE_ID_SDMA0_MASK; - break; - case UCODE_ID_SDMA1: - result = UCODE_ID_SDMA1_MASK; - break; - case UCODE_ID_CP_CE: - result = UCODE_ID_CP_CE_MASK; - break; - case UCODE_ID_CP_PFP: - result = UCODE_ID_CP_PFP_MASK; - break; - case UCODE_ID_CP_ME: - result = UCODE_ID_CP_ME_MASK; - break; - case UCODE_ID_CP_MEC: - case UCODE_ID_CP_MEC_JT1: - case UCODE_ID_CP_MEC_JT2: - result = UCODE_ID_CP_MEC_MASK; - break; - case UCODE_ID_RLC_G: - result = UCODE_ID_RLC_G_MASK; - break; - default: - break; - } - - return result; -} - -/** - * Check if the FW has been loaded, - * SMU will not return if loading has not finished. -*/ -static int tonga_check_fw_load_finish(struct pp_smumgr *smumgr, uint32_t fwType) -{ - uint16_t fwMask = tonga_get_mask_for_firmware_type(fwType); - - if (0 != SMUM_WAIT_VFPF_INDIRECT_REGISTER(smumgr, SMC_IND, - SOFT_REGISTERS_TABLE_28, fwMask, fwMask)) { - printk(KERN_ERR "[ powerplay ] check firmware loading failed\n"); - return -EINVAL; - } - - return 0; -} - -/* Populate one firmware image to the data structure */ -static int tonga_populate_single_firmware_entry(struct pp_smumgr *smumgr, - uint16_t firmware_type, - struct SMU_Entry *pentry) -{ - int result; - struct cgs_firmware_info info = {0}; - - result = cgs_get_firmware_info( - smumgr->device, - tonga_convert_fw_type_to_cgs(firmware_type), - &info); - - if (result == 0) { - pentry->version = 0; - pentry->id = (uint16_t)firmware_type; - pentry->image_addr_high = smu_upper_32_bits(info.mc_addr); - pentry->image_addr_low = smu_lower_32_bits(info.mc_addr); - pentry->meta_data_addr_high = 0; - pentry->meta_data_addr_low = 0; - pentry->data_size_byte = info.image_size; - pentry->num_register_entries = 0; - - if (firmware_type == UCODE_ID_RLC_G) - pentry->flags = 1; - else - pentry->flags = 0; - } else { - return result; - } - - return result; -} - -static int tonga_request_smu_reload_fw(struct pp_smumgr *smumgr) -{ - struct tonga_smumgr *tonga_smu = - (struct tonga_smumgr *)(smumgr->backend); - uint16_t fw_to_load; - struct SMU_DRAMData_TOC *toc; - /** - * First time this gets called during SmuMgr init, - * we haven't processed SMU header file yet, - * so Soft Register Start offset is unknown. - * However, for this case, UcodeLoadStatus is already 0, - * so we can skip this if the Soft Registers Start offset is 0. - */ - cgs_write_ind_register(smumgr->device, - CGS_IND_REG__SMC, ixSOFT_REGISTERS_TABLE_28, 0); - - tonga_send_msg_to_smc_with_parameter(smumgr, - PPSMC_MSG_SMU_DRAM_ADDR_HI, - tonga_smu->smu_buffer.mc_addr_high); - tonga_send_msg_to_smc_with_parameter(smumgr, - PPSMC_MSG_SMU_DRAM_ADDR_LO, - tonga_smu->smu_buffer.mc_addr_low); - - toc = (struct SMU_DRAMData_TOC *)tonga_smu->pHeader; - toc->num_entries = 0; - toc->structure_version = 1; - - PP_ASSERT_WITH_CODE( - 0 == tonga_populate_single_firmware_entry(smumgr, - UCODE_ID_RLC_G, - &toc->entry[toc->num_entries++]), - "Failed to Get Firmware Entry.\n", - return -1); - PP_ASSERT_WITH_CODE( - 0 == tonga_populate_single_firmware_entry(smumgr, - UCODE_ID_CP_CE, - &toc->entry[toc->num_entries++]), - "Failed to Get Firmware Entry.\n", - return -1); - PP_ASSERT_WITH_CODE( - 0 == tonga_populate_single_firmware_entry - (smumgr, UCODE_ID_CP_PFP, &toc->entry[toc->num_entries++]), - "Failed to Get Firmware Entry.\n", return -1); - PP_ASSERT_WITH_CODE( - 0 == tonga_populate_single_firmware_entry - (smumgr, UCODE_ID_CP_ME, &toc->entry[toc->num_entries++]), - "Failed to Get Firmware Entry.\n", return -1); - PP_ASSERT_WITH_CODE( - 0 == tonga_populate_single_firmware_entry - (smumgr, UCODE_ID_CP_MEC, &toc->entry[toc->num_entries++]), - "Failed to Get Firmware Entry.\n", return -1); - PP_ASSERT_WITH_CODE( - 0 == tonga_populate_single_firmware_entry - (smumgr, UCODE_ID_CP_MEC_JT1, &toc->entry[toc->num_entries++]), - "Failed to Get Firmware Entry.\n", return -1); - PP_ASSERT_WITH_CODE( - 0 == tonga_populate_single_firmware_entry - (smumgr, UCODE_ID_CP_MEC_JT2, &toc->entry[toc->num_entries++]), - "Failed to Get Firmware Entry.\n", return -1); - PP_ASSERT_WITH_CODE( - 0 == tonga_populate_single_firmware_entry - (smumgr, UCODE_ID_SDMA0, &toc->entry[toc->num_entries++]), - "Failed to Get Firmware Entry.\n", return -1); - PP_ASSERT_WITH_CODE( - 0 == tonga_populate_single_firmware_entry - (smumgr, UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]), - "Failed to Get Firmware Entry.\n", return -1); - - tonga_send_msg_to_smc_with_parameter(smumgr, - PPSMC_MSG_DRV_DRAM_ADDR_HI, - tonga_smu->header_buffer.mc_addr_high); - tonga_send_msg_to_smc_with_parameter(smumgr, - PPSMC_MSG_DRV_DRAM_ADDR_LO, - tonga_smu->header_buffer.mc_addr_low); - - fw_to_load = UCODE_ID_RLC_G_MASK - + UCODE_ID_SDMA0_MASK - + UCODE_ID_SDMA1_MASK - + UCODE_ID_CP_CE_MASK - + UCODE_ID_CP_ME_MASK - + UCODE_ID_CP_PFP_MASK - + UCODE_ID_CP_MEC_MASK; - - PP_ASSERT_WITH_CODE( - 0 == tonga_send_msg_to_smc_with_parameter_without_waiting( - smumgr, PPSMC_MSG_LoadUcodes, fw_to_load), - "Fail to Request SMU Load uCode", return 0); - - return 0; -} - -static int tonga_request_smu_load_specific_fw(struct pp_smumgr *smumgr, - uint32_t firmwareType) -{ - return 0; -} - -/** - * Upload the SMC firmware to the SMC microcontroller. - * - * @param smumgr the address of the powerplay hardware manager. - * @param pFirmware the data structure containing the various sections of the firmware. - */ -static int tonga_smu_upload_firmware_image(struct pp_smumgr *smumgr) -{ - const uint8_t *src; - uint32_t byte_count; - uint32_t *data; - struct cgs_firmware_info info = {0}; - - if (smumgr == NULL || smumgr->device == NULL) - return -EINVAL; - - cgs_get_firmware_info(smumgr->device, - tonga_convert_fw_type_to_cgs(UCODE_ID_SMU), &info); - - if (info.image_size & 3) { - printk(KERN_ERR "[ powerplay ] SMC ucode is not 4 bytes aligned\n"); - return -EINVAL; - } - - if (info.image_size > TONGA_SMC_SIZE) { - printk(KERN_ERR "[ powerplay ] SMC address is beyond the SMC RAM area\n"); - return -EINVAL; - } - - cgs_write_register(smumgr->device, mmSMC_IND_INDEX_0, 0x20000); - SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1); - - byte_count = info.image_size; - src = (const uint8_t *)info.kptr; - - data = (uint32_t *)src; - for (; byte_count >= 4; data++, byte_count -= 4) - cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data[0]); - - SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0); - - return 0; -} static int tonga_start_in_protection_mode(struct pp_smumgr *smumgr) { @@ -624,7 +45,7 @@ static int tonga_start_in_protection_mode(struct pp_smumgr *smumgr) SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 1); - result = tonga_smu_upload_firmware_image(smumgr); + result = smu7_upload_smu_firmware_image(smumgr); if (result) return result; @@ -654,7 +75,7 @@ static int tonga_start_in_protection_mode(struct pp_smumgr *smumgr) /** * Call Test SMU message with 0x20000 offset to trigger SMU start */ - tonga_send_msg_to_smc_offset(smumgr); + smu7_send_msg_to_smc_offset(smumgr); /* Wait for done bit to be set */ SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND, @@ -691,13 +112,13 @@ static int tonga_start_in_non_protection_mode(struct pp_smumgr *smumgr) SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 1); - result = tonga_smu_upload_firmware_image(smumgr); + result = smu7_upload_smu_firmware_image(smumgr); if (result != 0) return result; /* Set smc instruct start point at 0x0 */ - tonga_program_jump_on_start(smumgr); + smu7_program_jump_on_start(smumgr); SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, @@ -719,7 +140,7 @@ static int tonga_start_smu(struct pp_smumgr *smumgr) int result; /* Only start SMC if SMC RAM is not running */ - if (!tonga_is_smc_ram_running(smumgr)) { + if (!smu7_is_smc_ram_running(smumgr)) { /*Check if SMU is running in protected mode*/ if (0 == SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_MODE)) { @@ -733,7 +154,7 @@ static int tonga_start_smu(struct pp_smumgr *smumgr) } } - result = tonga_request_smu_reload_fw(smumgr); + result = smu7_request_smu_load_fw(smumgr); return result; } @@ -747,70 +168,28 @@ static int tonga_start_smu(struct pp_smumgr *smumgr) */ static int tonga_smu_init(struct pp_smumgr *smumgr) { - struct tonga_smumgr *tonga_smu; - uint8_t *internal_buf; - uint64_t mc_addr = 0; + struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(smumgr->backend); + int i; - /* Allocate memory for backend private data */ - tonga_smu = (struct tonga_smumgr *)(smumgr->backend); - tonga_smu->header_buffer.data_size = - ((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096; - tonga_smu->smu_buffer.data_size = 200*4096; - - smu_allocate_memory(smumgr->device, - tonga_smu->header_buffer.data_size, - CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB, - PAGE_SIZE, - &mc_addr, - &tonga_smu->header_buffer.kaddr, - &tonga_smu->header_buffer.handle); - - tonga_smu->pHeader = tonga_smu->header_buffer.kaddr; - tonga_smu->header_buffer.mc_addr_high = smu_upper_32_bits(mc_addr); - tonga_smu->header_buffer.mc_addr_low = smu_lower_32_bits(mc_addr); - - PP_ASSERT_WITH_CODE((NULL != tonga_smu->pHeader), - "Out of memory.", - kfree(smumgr->backend); - cgs_free_gpu_mem(smumgr->device, - (cgs_handle_t)tonga_smu->header_buffer.handle); - return -1); - - smu_allocate_memory(smumgr->device, - tonga_smu->smu_buffer.data_size, - CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB, - PAGE_SIZE, - &mc_addr, - &tonga_smu->smu_buffer.kaddr, - &tonga_smu->smu_buffer.handle); - - internal_buf = tonga_smu->smu_buffer.kaddr; - tonga_smu->smu_buffer.mc_addr_high = smu_upper_32_bits(mc_addr); - tonga_smu->smu_buffer.mc_addr_low = smu_lower_32_bits(mc_addr); - - PP_ASSERT_WITH_CODE((NULL != internal_buf), - "Out of memory.", - kfree(smumgr->backend); - cgs_free_gpu_mem(smumgr->device, - (cgs_handle_t)tonga_smu->smu_buffer.handle); - return -1;); + if (smu7_init(smumgr)) + return -EINVAL; for (i = 0; i < SMU72_MAX_LEVELS_GRAPHICS; i++) - tonga_smu->activity_target[i] = 30; + smu_data->activity_target[i] = 30; return 0; } static const struct pp_smumgr_func tonga_smu_funcs = { .smu_init = &tonga_smu_init, - .smu_fini = &tonga_smu_fini, + .smu_fini = &smu7_smu_fini, .start_smu = &tonga_start_smu, - .check_fw_load_finish = &tonga_check_fw_load_finish, - .request_smu_load_fw = &tonga_request_smu_reload_fw, - .request_smu_load_specific_fw = &tonga_request_smu_load_specific_fw, - .send_msg_to_smc = &tonga_send_msg_to_smc, - .send_msg_to_smc_with_parameter = &tonga_send_msg_to_smc_with_parameter, + .check_fw_load_finish = &smu7_check_fw_load_finish, + .request_smu_load_fw = &smu7_request_smu_load_fw, + .request_smu_load_specific_fw = NULL, + .send_msg_to_smc = &smu7_send_msg_to_smc, + .send_msg_to_smc_with_parameter = &smu7_send_msg_to_smc_with_parameter, .download_pptable_settings = NULL, .upload_pptable_settings = NULL, .update_smc_table = tonga_update_smc_table, diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.h index b2ad232506c1..edb5f203f7f5 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.h +++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.h @@ -26,16 +26,7 @@ #include "smu72_discrete.h" -#define SMC_RAM_END 0x40000 - -struct tonga_buffer_entry { - uint32_t data_size; - uint32_t mc_addr_low; - uint32_t mc_addr_high; - void *kaddr; - unsigned long handle; -}; - +#include "smu7_smumgr.h" struct tonga_mc_reg_entry { uint32_t mclk_max; @@ -52,19 +43,8 @@ struct tonga_mc_reg_table { struct tonga_smumgr { - uint8_t *pHeader; - uint8_t *pMecImage; - - - uint32_t soft_regs_start; - uint32_t dpm_table_start; - uint32_t mc_reg_table_start; - uint32_t fan_table_start; - uint32_t arb_table_start; - - struct tonga_buffer_entry header_buffer; - struct tonga_buffer_entry smu_buffer; + struct smu7_smumgr smu7_data; struct SMU72_Discrete_DpmTable smc_state_table; struct SMU72_Discrete_Ulv ulv_setting; struct SMU72_Discrete_PmFuses power_tune_table; @@ -76,13 +56,4 @@ struct tonga_smumgr { }; -extern int tonga_smum_init(struct pp_smumgr *smumgr); -extern int tonga_copy_bytes_to_smc(struct pp_smumgr *smumgr, - uint32_t smcStartAddress, const uint8_t *src, - uint32_t byteCount, uint32_t limit); -extern int tonga_read_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smcAddress, - uint32_t *value, uint32_t limit); -extern int tonga_write_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smcAddress, - uint32_t value, uint32_t limit); - #endif From d01ec3fb8c9bff2f82db075027f56b3390a3c77e Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Fri, 19 Aug 2016 20:43:52 +0800 Subject: [PATCH 17/49] drm/amd/powerplay: use smu7 common functions and data on Polars10. Signed-off-by: Rex Zhu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- .../amd/powerplay/inc/polaris10_pwrvirus.h | 3 +- .../drm/amd/powerplay/smumgr/polaris10_smc.c | 81 ++- .../amd/powerplay/smumgr/polaris10_smumgr.c | 681 +----------------- .../amd/powerplay/smumgr/polaris10_smumgr.h | 34 +- 4 files changed, 84 insertions(+), 715 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/inc/polaris10_pwrvirus.h b/drivers/gpu/drm/amd/powerplay/inc/polaris10_pwrvirus.h index f497e7d98e6d..0de443612312 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/polaris10_pwrvirus.h +++ b/drivers/gpu/drm/amd/powerplay/inc/polaris10_pwrvirus.h @@ -23,8 +23,7 @@ #ifndef _POLARIS10_PWRVIRUS_H #define _POLARIS10_PWRVIRUS_H -#define mmSMC_IND_INDEX_11 0x01AC -#define mmSMC_IND_DATA_11 0x01AD + #define mmCP_HYP_MEC1_UCODE_ADDR 0xf81a #define mmCP_HYP_MEC1_UCODE_DATA 0xf81b #define mmCP_HYP_MEC2_UCODE_ADDR 0xf81c diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c index 8ed98b708c55..4ccc0b72324d 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c @@ -47,6 +47,7 @@ #include "dce/dce_10_0_sh_mask.h" #include "polaris10_pwrvirus.h" #include "smu7_ppsmc.h" +#include "smu7_smumgr.h" #define POLARIS10_SMC_SIZE 0x20000 #define VOLTAGE_VID_OFFSET_SCALE1 625 @@ -230,7 +231,7 @@ static int polaris10_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_of const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults; uint32_t temp; - if (polaris10_read_smc_sram_dword(hwmgr->smumgr, + if (smu7_read_smc_sram_dword(hwmgr->smumgr, fuse_table_offset + offsetof(SMU74_Discrete_PmFuses, TdcWaterfallCtl), (uint32_t *)&temp, SMC_RAM_END)) @@ -319,7 +320,7 @@ static int polaris10_populate_pm_fuses(struct pp_hwmgr *hwmgr) if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PowerContainment)) { - if (polaris10_read_smc_sram_dword(hwmgr->smumgr, + if (smu7_read_smc_sram_dword(hwmgr->smumgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, PmFuseTable), &pm_fuse_table_offset, SMC_RAM_END)) @@ -367,7 +368,7 @@ static int polaris10_populate_pm_fuses(struct pp_hwmgr *hwmgr) "Attempt to populate BapmVddCBaseLeakage Hi and Lo " "Sidd Failed!", return -EINVAL); - if (polaris10_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset, + if (smu7_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset, (uint8_t *)&smu_data->power_tune_table, (sizeof(struct SMU74_Discrete_PmFuses) - 92), SMC_RAM_END)) PP_ASSERT_WITH_CODE(false, @@ -755,7 +756,7 @@ int polaris10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table; uint8_t pcie_entry_cnt = (uint8_t) hw_data->dpm_table.pcie_speed_table.count; int result = 0; - uint32_t array = smu_data->dpm_table_start + + uint32_t array = smu_data->smu7_data.dpm_table_start + offsetof(SMU74_Discrete_DpmTable, GraphicsLevel); uint32_t array_size = sizeof(struct SMU74_Discrete_GraphicsLevel) * SMU74_MAX_LEVELS_GRAPHICS; @@ -833,7 +834,7 @@ int polaris10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) levels[1].pcieDpmLevel = mid_pcie_level_enabled; } /* level count will send to smc once at init smc table and never change */ - result = polaris10_copy_bytes_to_smc(smumgr, array, (uint8_t *)levels, + result = smu7_copy_bytes_to_smc(smumgr, array, (uint8_t *)levels, (uint32_t)array_size, SMC_RAM_END); return result; @@ -901,7 +902,7 @@ int polaris10_populate_all_memory_levels(struct pp_hwmgr *hwmgr) struct smu7_dpm_table *dpm_table = &hw_data->dpm_table; int result; /* populate MCLK dpm table to SMU7 */ - uint32_t array = smu_data->dpm_table_start + + uint32_t array = smu_data->smu7_data.dpm_table_start + offsetof(SMU74_Discrete_DpmTable, MemoryLevel); uint32_t array_size = sizeof(SMU74_Discrete_MemoryLevel) * SMU74_MAX_LEVELS_MEMORY; @@ -938,7 +939,7 @@ int polaris10_populate_all_memory_levels(struct pp_hwmgr *hwmgr) phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table); /* level count will send to smc once at init smc table and never change */ - result = polaris10_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels, + result = smu7_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels, (uint32_t)array_size, SMC_RAM_END); return result; @@ -1216,9 +1217,9 @@ static int polaris10_program_memory_timing_parameters(struct pp_hwmgr *hwmgr) } } - result = polaris10_copy_bytes_to_smc( + result = smu7_copy_bytes_to_smc( hwmgr->smumgr, - smu_data->arb_table_start, + smu_data->smu7_data.arb_table_start, (uint8_t *)&arb_regs, sizeof(SMU74_Discrete_MCArbDramTimingTable), SMC_RAM_END); @@ -1463,7 +1464,7 @@ static int polaris10_populate_vr_config(struct pp_hwmgr *hwmgr, if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) { config = VR_SVI2_PLANE_2; table->VRConfig |= (config << VRCONF_MVDD_SHIFT); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, smu_data->soft_regs_start + + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, smu_data->smu7_data.soft_regs_start + offsetof(SMU74_SoftRegisters, AllowMvddSwitch), 0x1); } else { config = VR_STATIC_VOLTAGE; @@ -1529,20 +1530,20 @@ static int polaris10_populate_avfs_parameters(struct pp_hwmgr *hwmgr) AVFS_SclkOffset.Sclk_Offset[i] = PP_HOST_TO_SMC_US((uint16_t)(sclk_table->entries[i].sclk_offset) / 100); } - result = polaris10_read_smc_sram_dword(smumgr, + result = smu7_read_smc_sram_dword(smumgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, AvfsMeanNSigma), &tmp, SMC_RAM_END); - polaris10_copy_bytes_to_smc(smumgr, + smu7_copy_bytes_to_smc(smumgr, tmp, (uint8_t *)&AVFS_meanNsigma, sizeof(AVFS_meanNsigma_t), SMC_RAM_END); - result = polaris10_read_smc_sram_dword(smumgr, + result = smu7_read_smc_sram_dword(smumgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, AvfsSclkOffsetTable), &tmp, SMC_RAM_END); - polaris10_copy_bytes_to_smc(smumgr, + smu7_copy_bytes_to_smc(smumgr, tmp, (uint8_t *)&AVFS_SclkOffset, sizeof(AVFS_Sclk_Offset_t), @@ -1578,8 +1579,8 @@ static int polaris10_init_arb_table_index(struct pp_smumgr *smumgr) * In reality this field should not be in that structure * but in a soft register. */ - result = polaris10_read_smc_sram_dword(smumgr, - smu_data->arb_table_start, &tmp, SMC_RAM_END); + result = smu7_read_smc_sram_dword(smumgr, + smu_data->smu7_data.arb_table_start, &tmp, SMC_RAM_END); if (result) return result; @@ -1587,8 +1588,8 @@ static int polaris10_init_arb_table_index(struct pp_smumgr *smumgr) tmp &= 0x00FFFFFF; tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24; - return polaris10_write_smc_sram_dword(smumgr, - smu_data->arb_table_start, tmp, SMC_RAM_END); + return smu7_write_smc_sram_dword(smumgr, + smu_data->smu7_data.arb_table_start, tmp, SMC_RAM_END); } static void polaris10_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr) @@ -1811,8 +1812,8 @@ int polaris10_init_smc_table(struct pp_hwmgr *hwmgr) CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime); /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */ - result = polaris10_copy_bytes_to_smc(hwmgr->smumgr, - smu_data->dpm_table_start + + result = smu7_copy_bytes_to_smc(hwmgr->smumgr, + smu_data->smu7_data.dpm_table_start + offsetof(SMU74_Discrete_DpmTable, SystemFlags), (uint8_t *)&(table->SystemFlags), sizeof(SMU74_Discrete_DpmTable) - 3 * sizeof(SMU74_PIDController), @@ -1884,7 +1885,7 @@ int polaris10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr) int res; uint64_t tmp64; - if (smu_data->fan_table_start == 0) { + if (smu_data->smu7_data.fan_table_start == 0) { phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl); return 0; @@ -1950,7 +1951,7 @@ int polaris10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr) hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_CTRL, TEMP_SEL); - res = polaris10_copy_bytes_to_smc(hwmgr->smumgr, smu_data->fan_table_start, + res = smu7_copy_bytes_to_smc(hwmgr->smumgr, smu_data->smu7_data.fan_table_start, (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), SMC_RAM_END); @@ -1986,7 +1987,7 @@ static int polaris10_update_uvd_smc_table(struct pp_hwmgr *hwmgr) if (table_info->mm_dep_table->count > 0) smu_data->smc_state_table.UvdBootLevel = (uint8_t) (table_info->mm_dep_table->count - 1); - mm_boot_level_offset = smu_data->dpm_table_start + offsetof(SMU74_Discrete_DpmTable, + mm_boot_level_offset = smu_data->smu7_data.dpm_table_start + offsetof(SMU74_Discrete_DpmTable, UvdBootLevel); mm_boot_level_offset /= 4; mm_boot_level_offset *= 4; @@ -2021,7 +2022,7 @@ static int polaris10_update_vce_smc_table(struct pp_hwmgr *hwmgr) else smu_data->smc_state_table.VceBootLevel = 0; - mm_boot_level_offset = smu_data->dpm_table_start + + mm_boot_level_offset = smu_data->smu7_data.dpm_table_start + offsetof(SMU74_Discrete_DpmTable, VceBootLevel); mm_boot_level_offset /= 4; mm_boot_level_offset *= 4; @@ -2046,7 +2047,7 @@ static int polaris10_update_samu_smc_table(struct pp_hwmgr *hwmgr) smu_data->smc_state_table.SamuBootLevel = 0; - mm_boot_level_offset = smu_data->dpm_table_start + + mm_boot_level_offset = smu_data->smu7_data.dpm_table_start + offsetof(SMU74_Discrete_DpmTable, SamuBootLevel); mm_boot_level_offset /= 4; @@ -2123,9 +2124,9 @@ int polaris10_update_sclk_threshold(struct pp_hwmgr *hwmgr) CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold); - result = polaris10_copy_bytes_to_smc( + result = smu7_copy_bytes_to_smc( hwmgr->smumgr, - smu_data->dpm_table_start + + smu_data->smu7_data.dpm_table_start + offsetof(SMU74_Discrete_DpmTable, LowSclkInterruptThreshold), (uint8_t *)&low_sclk_interrupt_threshold, @@ -2158,6 +2159,8 @@ uint32_t polaris10_get_offsetof(uint32_t type, uint32_t member) return offsetof(SMU74_SoftRegisters, PreVBlankGap); case VBlankTimeout: return offsetof(SMU74_SoftRegisters, VBlankTimeout); + case UcodeLoadStatus: + return offsetof(SMU74_SoftRegisters, UcodeLoadStatus); } case SMU_Discrete_DpmTable: switch (member) { @@ -2215,55 +2218,55 @@ int polaris10_process_firmware_header(struct pp_hwmgr *hwmgr) int result; bool error = false; - result = polaris10_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr->smumgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, DpmTable), &tmp, SMC_RAM_END); if (0 == result) - smu_data->dpm_table_start = tmp; + smu_data->smu7_data.dpm_table_start = tmp; error |= (0 != result); - result = polaris10_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr->smumgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, SoftRegisters), &tmp, SMC_RAM_END); if (!result) - smu_data->soft_regs_start = tmp; + smu_data->smu7_data.soft_regs_start = tmp; error |= (0 != result); - result = polaris10_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr->smumgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, mcRegisterTable), &tmp, SMC_RAM_END); if (!result) - smu_data->mc_reg_table_start = tmp; + smu_data->smu7_data.mc_reg_table_start = tmp; - result = polaris10_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr->smumgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, FanTable), &tmp, SMC_RAM_END); if (!result) - smu_data->fan_table_start = tmp; + smu_data->smu7_data.fan_table_start = tmp; error |= (0 != result); - result = polaris10_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr->smumgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, mcArbDramTimingTable), &tmp, SMC_RAM_END); if (!result) - smu_data->arb_table_start = tmp; + smu_data->smu7_data.arb_table_start = tmp; error |= (0 != result); - result = polaris10_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr->smumgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, Version), &tmp, SMC_RAM_END); @@ -2281,4 +2284,4 @@ bool polaris10_is_dpm_running(struct pp_hwmgr *hwmgr) return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON)) ? true : false; -} \ No newline at end of file +} diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c index b13ea4218415..5c3598ab7dae 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c @@ -40,13 +40,7 @@ #include "cgs_common.h" #include "polaris10_smc.h" #include "smu7_ppsmc.h" - -#define POLARIS10_SMC_SIZE 0x20000 - -/* Microcode file is stored in this buffer */ -#define BUFFER_SIZE 80000 -#define MAX_STRING_SIZE 15 -#define BUFFER_SIZETWO 131072 /* 128 *1024 */ +#include "smu7_smumgr.h" #define PPPOLARIS10_TARGETACTIVITY_DFLT 50 @@ -66,569 +60,6 @@ static const SMU74_Discrete_GraphicsLevel avfs_graphics_level_polaris10[8] = { static const SMU74_Discrete_MemoryLevel avfs_memory_level_polaris10 = { 0x100ea446, 0, 0x30750000, 0x01, 0x01, 0x01, 0x00, 0x00, 0x64, 0x00, 0x00, 0x1f00, 0x00, 0x00}; -/** -* Set the address for reading/writing the SMC SRAM space. -* @param smumgr the address of the powerplay hardware manager. -* @param smcAddress the address in the SMC RAM to access. -*/ -static int polaris10_set_smc_sram_address(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t limit) -{ - PP_ASSERT_WITH_CODE((0 == (3 & smc_addr)), "SMC address must be 4 byte aligned.", return -EINVAL); - PP_ASSERT_WITH_CODE((limit > (smc_addr + 3)), "SMC addr is beyond the SMC RAM area.", return -EINVAL); - - cgs_write_register(smumgr->device, mmSMC_IND_INDEX_11, smc_addr); - SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0); - - return 0; -} - -/** -* Copy bytes from SMC RAM space into driver memory. -* -* @param smumgr the address of the powerplay SMU manager. -* @param smc_start_address the start address in the SMC RAM to copy bytes from -* @param src the byte array to copy the bytes to. -* @param byte_count the number of bytes to copy. -*/ -int polaris10_copy_bytes_from_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address, uint32_t *dest, uint32_t byte_count, uint32_t limit) -{ - uint32_t data; - uint32_t addr; - uint8_t *dest_byte; - uint8_t i, data_byte[4] = {0}; - uint32_t *pdata = (uint32_t *)&data_byte; - - PP_ASSERT_WITH_CODE((0 == (3 & smc_start_address)), "SMC address must be 4 byte aligned.", return -1;); - PP_ASSERT_WITH_CODE((limit > (smc_start_address + byte_count)), "SMC address is beyond the SMC RAM area.", return -1); - - addr = smc_start_address; - - while (byte_count >= 4) { - polaris10_read_smc_sram_dword(smumgr, addr, &data, limit); - - *dest = PP_SMC_TO_HOST_UL(data); - - dest += 1; - byte_count -= 4; - addr += 4; - } - - if (byte_count) { - polaris10_read_smc_sram_dword(smumgr, addr, &data, limit); - *pdata = PP_SMC_TO_HOST_UL(data); - /* Cast dest into byte type in dest_byte. This way, we don't overflow if the allocated memory is not 4-byte aligned. */ - dest_byte = (uint8_t *)dest; - for (i = 0; i < byte_count; i++) - dest_byte[i] = data_byte[i]; - } - - return 0; -} - -/** -* Copy bytes from an array into the SMC RAM space. -* -* @param pSmuMgr the address of the powerplay SMU manager. -* @param smc_start_address the start address in the SMC RAM to copy bytes to. -* @param src the byte array to copy the bytes from. -* @param byte_count the number of bytes to copy. -*/ -int polaris10_copy_bytes_to_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address, - const uint8_t *src, uint32_t byte_count, uint32_t limit) -{ - int result; - uint32_t data = 0; - uint32_t original_data; - uint32_t addr = 0; - uint32_t extra_shift; - - PP_ASSERT_WITH_CODE((0 == (3 & smc_start_address)), "SMC address must be 4 byte aligned.", return -1); - PP_ASSERT_WITH_CODE((limit > (smc_start_address + byte_count)), "SMC address is beyond the SMC RAM area.", return -1); - - addr = smc_start_address; - - while (byte_count >= 4) { - /* Bytes are written into the SMC addres space with the MSB first. */ - data = src[0] * 0x1000000 + src[1] * 0x10000 + src[2] * 0x100 + src[3]; - - result = polaris10_set_smc_sram_address(smumgr, addr, limit); - - if (0 != result) - return result; - - cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, data); - - src += 4; - byte_count -= 4; - addr += 4; - } - - if (0 != byte_count) { - - data = 0; - - result = polaris10_set_smc_sram_address(smumgr, addr, limit); - - if (0 != result) - return result; - - - original_data = cgs_read_register(smumgr->device, mmSMC_IND_DATA_11); - - extra_shift = 8 * (4 - byte_count); - - while (byte_count > 0) { - /* Bytes are written into the SMC addres space with the MSB first. */ - data = (0x100 * data) + *src++; - byte_count--; - } - - data <<= extra_shift; - - data |= (original_data & ~((~0UL) << extra_shift)); - - result = polaris10_set_smc_sram_address(smumgr, addr, limit); - - if (0 != result) - return result; - - cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, data); - } - - return 0; -} - - -static int polaris10_program_jump_on_start(struct pp_smumgr *smumgr) -{ - static const unsigned char data[4] = { 0xE0, 0x00, 0x80, 0x40 }; - - polaris10_copy_bytes_to_smc(smumgr, 0x0, data, 4, sizeof(data)+1); - - return 0; -} - -/** -* Return if the SMC is currently running. -* -* @param smumgr the address of the powerplay hardware manager. -*/ -bool polaris10_is_smc_ram_running(struct pp_smumgr *smumgr) -{ - return ((0 == SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable)) - && (0x20100 <= cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC, ixSMC_PC_C))); -} - -static bool polaris10_is_hw_avfs_present(struct pp_smumgr *smumgr) -{ - uint32_t efuse; - - efuse = cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC, ixSMU_EFUSE_0 + (49*4)); - efuse &= 0x00000001; - if (efuse) - return true; - - return false; -} - -/** -* Send a message to the SMC, and wait for its response. -* -* @param smumgr the address of the powerplay hardware manager. -* @param msg the message to send. -* @return The response that came from the SMC. -*/ -int polaris10_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg) -{ - int ret; - - if (!polaris10_is_smc_ram_running(smumgr)) - return -1; - - - SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); - - ret = SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP); - - if (ret != 1) - printk("\n failed to send pre message %x ret is %d \n", msg, ret); - - cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg); - - SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); - - ret = SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP); - - if (ret != 1) - printk("\n failed to send message %x ret is %d \n", msg, ret); - - return 0; -} - - -/** -* Send a message to the SMC, and do not wait for its response. -* -* @param smumgr the address of the powerplay hardware manager. -* @param msg the message to send. -* @return Always return 0. -*/ -int polaris10_send_msg_to_smc_without_waiting(struct pp_smumgr *smumgr, uint16_t msg) -{ - cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg); - - return 0; -} - -/** -* Send a message to the SMC with parameter -* -* @param smumgr: the address of the powerplay hardware manager. -* @param msg: the message to send. -* @param parameter: the parameter to send -* @return The response that came from the SMC. -*/ -int polaris10_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr, uint16_t msg, uint32_t parameter) -{ - if (!polaris10_is_smc_ram_running(smumgr)) { - return -1; - } - - SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); - - cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter); - - return polaris10_send_msg_to_smc(smumgr, msg); -} - - -/** -* Send a message to the SMC with parameter, do not wait for response -* -* @param smumgr: the address of the powerplay hardware manager. -* @param msg: the message to send. -* @param parameter: the parameter to send -* @return The response that came from the SMC. -*/ -int polaris10_send_msg_to_smc_with_parameter_without_waiting(struct pp_smumgr *smumgr, uint16_t msg, uint32_t parameter) -{ - cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter); - - return polaris10_send_msg_to_smc_without_waiting(smumgr, msg); -} - -int polaris10_send_msg_to_smc_offset(struct pp_smumgr *smumgr) -{ - cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, 0x20000); - - cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, PPSMC_MSG_Test); - - SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); - - if (1 != SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP)) - printk("Failed to send Message.\n"); - - return 0; -} - -/** -* Wait until the SMC is doing nithing. Doing nothing means that the SMC is either turned off or it is sitting on the STOP instruction. -* -* @param smumgr the address of the powerplay hardware manager. -* @param msg the message to send. -* @return The response that came from the SMC. -*/ -int polaris10_wait_for_smc_inactive(struct pp_smumgr *smumgr) -{ - /* If the SMC is not even on it qualifies as inactive. */ - if (!polaris10_is_smc_ram_running(smumgr)) - return -1; - - SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND, SMC_SYSCON_CLOCK_CNTL_0, cken, 0); - return 0; -} - - -/** -* Upload the SMC firmware to the SMC microcontroller. -* -* @param smumgr the address of the powerplay hardware manager. -* @param pFirmware the data structure containing the various sections of the firmware. -*/ -static int polaris10_upload_smc_firmware_data(struct pp_smumgr *smumgr, uint32_t length, uint32_t *src, uint32_t limit) -{ - uint32_t byte_count = length; - - PP_ASSERT_WITH_CODE((limit >= byte_count), "SMC address is beyond the SMC RAM area.", return -1); - - cgs_write_register(smumgr->device, mmSMC_IND_INDEX_11, 0x20000); - SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 1); - - for (; byte_count >= 4; byte_count -= 4) - cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, *src++); - - SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0); - - PP_ASSERT_WITH_CODE((0 == byte_count), "SMC size must be dividable by 4.", return -1); - - return 0; -} - -static enum cgs_ucode_id polaris10_convert_fw_type_to_cgs(uint32_t fw_type) -{ - enum cgs_ucode_id result = CGS_UCODE_ID_MAXIMUM; - - switch (fw_type) { - case UCODE_ID_SMU: - result = CGS_UCODE_ID_SMU; - break; - case UCODE_ID_SMU_SK: - result = CGS_UCODE_ID_SMU_SK; - break; - case UCODE_ID_SDMA0: - result = CGS_UCODE_ID_SDMA0; - break; - case UCODE_ID_SDMA1: - result = CGS_UCODE_ID_SDMA1; - break; - case UCODE_ID_CP_CE: - result = CGS_UCODE_ID_CP_CE; - break; - case UCODE_ID_CP_PFP: - result = CGS_UCODE_ID_CP_PFP; - break; - case UCODE_ID_CP_ME: - result = CGS_UCODE_ID_CP_ME; - break; - case UCODE_ID_CP_MEC: - result = CGS_UCODE_ID_CP_MEC; - break; - case UCODE_ID_CP_MEC_JT1: - result = CGS_UCODE_ID_CP_MEC_JT1; - break; - case UCODE_ID_CP_MEC_JT2: - result = CGS_UCODE_ID_CP_MEC_JT2; - break; - case UCODE_ID_RLC_G: - result = CGS_UCODE_ID_RLC_G; - break; - default: - break; - } - - return result; -} - -static int polaris10_upload_smu_firmware_image(struct pp_smumgr *smumgr) -{ - int result = 0; - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); - - struct cgs_firmware_info info = {0}; - - if (smu_data->security_hard_key == 1) - cgs_get_firmware_info(smumgr->device, - polaris10_convert_fw_type_to_cgs(UCODE_ID_SMU), &info); - else - cgs_get_firmware_info(smumgr->device, - polaris10_convert_fw_type_to_cgs(UCODE_ID_SMU_SK), &info); - - /* TO DO cgs_init_samu_load_smu(smumgr->device, (uint32_t *)info.kptr, info.image_size, smu_data->post_initial_boot);*/ - result = polaris10_upload_smc_firmware_data(smumgr, info.image_size, (uint32_t *)info.kptr, POLARIS10_SMC_SIZE); - - return result; -} - -/** -* Read a 32bit value from the SMC SRAM space. -* ALL PARAMETERS ARE IN HOST BYTE ORDER. -* @param smumgr the address of the powerplay hardware manager. -* @param smcAddress the address in the SMC RAM to access. -* @param value and output parameter for the data read from the SMC SRAM. -*/ -int polaris10_read_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t *value, uint32_t limit) -{ - int result; - - result = polaris10_set_smc_sram_address(smumgr, smc_addr, limit); - - if (result) - return result; - - *value = cgs_read_register(smumgr->device, mmSMC_IND_DATA_11); - return 0; -} - -/** -* Write a 32bit value to the SMC SRAM space. -* ALL PARAMETERS ARE IN HOST BYTE ORDER. -* @param smumgr the address of the powerplay hardware manager. -* @param smc_addr the address in the SMC RAM to access. -* @param value to write to the SMC SRAM. -*/ -int polaris10_write_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t value, uint32_t limit) -{ - int result; - - result = polaris10_set_smc_sram_address(smumgr, smc_addr, limit); - - if (result) - return result; - - cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, value); - - return 0; -} - - -int polaris10_smu_fini(struct pp_smumgr *smumgr) -{ - if (smumgr->backend) { - kfree(smumgr->backend); - smumgr->backend = NULL; - } - cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU); - return 0; -} - -/* Convert the firmware type to SMU type mask. For MEC, we need to check all MEC related type */ -static uint32_t polaris10_get_mask_for_firmware_type(uint32_t fw_type) -{ - uint32_t result = 0; - - switch (fw_type) { - case UCODE_ID_SDMA0: - result = UCODE_ID_SDMA0_MASK; - break; - case UCODE_ID_SDMA1: - result = UCODE_ID_SDMA1_MASK; - break; - case UCODE_ID_CP_CE: - result = UCODE_ID_CP_CE_MASK; - break; - case UCODE_ID_CP_PFP: - result = UCODE_ID_CP_PFP_MASK; - break; - case UCODE_ID_CP_ME: - result = UCODE_ID_CP_ME_MASK; - break; - case UCODE_ID_CP_MEC_JT1: - case UCODE_ID_CP_MEC_JT2: - result = UCODE_ID_CP_MEC_MASK; - break; - case UCODE_ID_RLC_G: - result = UCODE_ID_RLC_G_MASK; - break; - default: - printk("UCode type is out of range! \n"); - result = 0; - } - - return result; -} - -/* Populate one firmware image to the data structure */ - -static int polaris10_populate_single_firmware_entry(struct pp_smumgr *smumgr, - uint32_t fw_type, - struct SMU_Entry *entry) -{ - int result = 0; - struct cgs_firmware_info info = {0}; - - result = cgs_get_firmware_info(smumgr->device, - polaris10_convert_fw_type_to_cgs(fw_type), - &info); - - if (!result) { - entry->version = info.version; - entry->id = (uint16_t)fw_type; - entry->image_addr_high = smu_upper_32_bits(info.mc_addr); - entry->image_addr_low = smu_lower_32_bits(info.mc_addr); - entry->meta_data_addr_high = 0; - entry->meta_data_addr_low = 0; - entry->data_size_byte = info.image_size; - entry->num_register_entries = 0; - } - - if (fw_type == UCODE_ID_RLC_G) - entry->flags = 1; - else - entry->flags = 0; - - return 0; -} - -static int polaris10_request_smu_load_fw(struct pp_smumgr *smumgr) -{ - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); - uint32_t fw_to_load; - - int result = 0; - struct SMU_DRAMData_TOC *toc; - - if (!smumgr->reload_fw) { - printk(KERN_INFO "[ powerplay ] skip reloading...\n"); - return 0; - } - - if (smu_data->soft_regs_start) - cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC, - smu_data->soft_regs_start + offsetof(SMU74_SoftRegisters, UcodeLoadStatus), - 0x0); - - polaris10_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_SMU_DRAM_ADDR_HI, smu_data->smu_buffer.mc_addr_high); - polaris10_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_SMU_DRAM_ADDR_LO, smu_data->smu_buffer.mc_addr_low); - - toc = (struct SMU_DRAMData_TOC *)smu_data->header; - toc->num_entries = 0; - toc->structure_version = 1; - - PP_ASSERT_WITH_CODE(0 == polaris10_populate_single_firmware_entry(smumgr, UCODE_ID_RLC_G, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -1); - PP_ASSERT_WITH_CODE(0 == polaris10_populate_single_firmware_entry(smumgr, UCODE_ID_CP_CE, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -1); - PP_ASSERT_WITH_CODE(0 == polaris10_populate_single_firmware_entry(smumgr, UCODE_ID_CP_PFP, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -1); - PP_ASSERT_WITH_CODE(0 == polaris10_populate_single_firmware_entry(smumgr, UCODE_ID_CP_ME, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -1); - PP_ASSERT_WITH_CODE(0 == polaris10_populate_single_firmware_entry(smumgr, UCODE_ID_CP_MEC, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -1); - PP_ASSERT_WITH_CODE(0 == polaris10_populate_single_firmware_entry(smumgr, UCODE_ID_CP_MEC_JT1, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -1); - PP_ASSERT_WITH_CODE(0 == polaris10_populate_single_firmware_entry(smumgr, UCODE_ID_CP_MEC_JT2, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -1); - PP_ASSERT_WITH_CODE(0 == polaris10_populate_single_firmware_entry(smumgr, UCODE_ID_SDMA0, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -1); - PP_ASSERT_WITH_CODE(0 == polaris10_populate_single_firmware_entry(smumgr, UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -1); - - polaris10_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_DRV_DRAM_ADDR_HI, smu_data->header_buffer.mc_addr_high); - polaris10_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_DRV_DRAM_ADDR_LO, smu_data->header_buffer.mc_addr_low); - - fw_to_load = UCODE_ID_RLC_G_MASK - + UCODE_ID_SDMA0_MASK - + UCODE_ID_SDMA1_MASK - + UCODE_ID_CP_CE_MASK - + UCODE_ID_CP_ME_MASK - + UCODE_ID_CP_PFP_MASK - + UCODE_ID_CP_MEC_MASK; - - if (polaris10_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_LoadUcodes, fw_to_load)) - printk(KERN_ERR "Fail to Request SMU Load uCode"); - - return result; -} - -/* Check if the FW has been loaded, SMU will not return if loading has not finished. */ -static int polaris10_check_fw_load_finish(struct pp_smumgr *smumgr, uint32_t fw_type) -{ - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); - uint32_t fw_mask = polaris10_get_mask_for_firmware_type(fw_type); - uint32_t ret; - /* Check SOFT_REGISTERS_TABLE_28.UcodeLoadStatus */ - ret = smum_wait_on_indirect_register(smumgr, mmSMC_IND_INDEX_11, - smu_data->soft_regs_start + offsetof(SMU74_SoftRegisters, UcodeLoadStatus), - fw_mask, fw_mask); - - return ret; -} - -static int polaris10_reload_firmware(struct pp_smumgr *smumgr) -{ - return smumgr->smumgr_funcs->start_smu(smumgr); -} static int polaris10_setup_pwr_virus(struct pp_smumgr *smumgr) { @@ -670,7 +101,7 @@ static int polaris10_perform_btc(struct pp_smumgr *smumgr) struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); if (0 != smu_data->avfs.avfs_btc_param) { - if (0 != polaris10_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_PerformBtc, smu_data->avfs.avfs_btc_param)) { + if (0 != smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_PerformBtc, smu_data->avfs.avfs_btc_param)) { printk("[AVFS][SmuPolaris10_PerformBtc] PerformBTC SMU msg failed"); result = -1; } @@ -698,7 +129,7 @@ int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr) graphics_level_size = sizeof(avfs_graphics_level_polaris10); u16_boot_mvdd = PP_HOST_TO_SMC_US(1300 * VOLTAGE_SCALE); - PP_ASSERT_WITH_CODE(0 == polaris10_read_smc_sram_dword(smumgr, + PP_ASSERT_WITH_CODE(0 == smu7_read_smc_sram_dword(smumgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, DpmTable), &dpm_table_start, 0x40000), "[AVFS][Polaris10_SetupGfxLvlStruct] SMU could not communicate starting address of DPM table", @@ -709,14 +140,14 @@ int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr) vr_config_address = dpm_table_start + offsetof(SMU74_Discrete_DpmTable, VRConfig); - PP_ASSERT_WITH_CODE(0 == polaris10_copy_bytes_to_smc(smumgr, vr_config_address, + PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(smumgr, vr_config_address, (uint8_t *)&vr_config, sizeof(uint32_t), 0x40000), "[AVFS][Polaris10_SetupGfxLvlStruct] Problems copying VRConfig value over to SMC", return -1); graphics_level_address = dpm_table_start + offsetof(SMU74_Discrete_DpmTable, GraphicsLevel); - PP_ASSERT_WITH_CODE(0 == polaris10_copy_bytes_to_smc(smumgr, graphics_level_address, + PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(smumgr, graphics_level_address, (uint8_t *)(&avfs_graphics_level_polaris10), graphics_level_size, 0x40000), "[AVFS][Polaris10_SetupGfxLvlStruct] Copying of SCLK DPM table failed!", @@ -724,7 +155,7 @@ int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr) graphics_level_address = dpm_table_start + offsetof(SMU74_Discrete_DpmTable, MemoryLevel); - PP_ASSERT_WITH_CODE(0 == polaris10_copy_bytes_to_smc(smumgr, graphics_level_address, + PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(smumgr, graphics_level_address, (uint8_t *)(&avfs_memory_level_polaris10), sizeof(avfs_memory_level_polaris10), 0x40000), "[AVFS][Polaris10_SetupGfxLvlStruct] Copying of MCLK DPM table failed!", return -1); @@ -733,7 +164,7 @@ int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr) graphics_level_address = dpm_table_start + offsetof(SMU74_Discrete_DpmTable, BootMVdd); - PP_ASSERT_WITH_CODE(0 == polaris10_copy_bytes_to_smc(smumgr, graphics_level_address, + PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(smumgr, graphics_level_address, (uint8_t *)(&u16_boot_mvdd), sizeof(u16_boot_mvdd), 0x40000), "[AVFS][Polaris10_SetupGfxLvlStruct] Copying of DPM table failed!", return -1); @@ -794,7 +225,7 @@ static int polaris10_start_smu_in_protection_mode(struct pp_smumgr *smumgr) SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 1); - result = polaris10_upload_smu_firmware_image(smumgr); + result = smu7_upload_smu_firmware_image(smumgr); if (result != 0) return result; @@ -813,7 +244,7 @@ static int polaris10_start_smu_in_protection_mode(struct pp_smumgr *smumgr) /* Call Test SMU message with 0x20000 offset to trigger SMU start */ - polaris10_send_msg_to_smc_offset(smumgr); + smu7_send_msg_to_smc_offset(smumgr); /* Wait done bit to be set */ /* Check pass/failed indicator */ @@ -854,12 +285,12 @@ static int polaris10_start_smu_in_non_protection_mode(struct pp_smumgr *smumgr) SMC_SYSCON_RESET_CNTL, rst_reg, 1); - result = polaris10_upload_smu_firmware_image(smumgr); + result = smu7_upload_smu_firmware_image(smumgr); if (result != 0) return result; /* Set smc instruct start point at 0x0 */ - polaris10_program_jump_on_start(smumgr); + smu7_program_jump_on_start(smumgr); SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0); @@ -882,10 +313,10 @@ static int polaris10_start_smu(struct pp_smumgr *smumgr) bool SMU_VFT_INTACT; /* Only start SMC if SMC RAM is not running */ - if (!polaris10_is_smc_ram_running(smumgr)) { + if (!smu7_is_smc_ram_running(smumgr)) { SMU_VFT_INTACT = false; smu_data->protected_mode = (uint8_t) (SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_MODE)); - smu_data->security_hard_key = (uint8_t) (SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_SEL)); + smu_data->smu7_data.security_hard_key = (uint8_t) (SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_SEL)); /* Check if SMU is running in protected mode */ if (smu_data->protected_mode == 0) { @@ -895,7 +326,7 @@ static int polaris10_start_smu(struct pp_smumgr *smumgr) /* If failed, try with different security Key. */ if (result != 0) { - smu_data->security_hard_key ^= 1; + smu_data->smu7_data.security_hard_key ^= 1; result = polaris10_start_smu_in_protection_mode(smumgr); } } @@ -907,71 +338,35 @@ static int polaris10_start_smu(struct pp_smumgr *smumgr) } else SMU_VFT_INTACT = true; /*Driver went offline but SMU was still alive and contains the VFT table */ - smu_data->post_initial_boot = true; polaris10_avfs_event_mgr(smumgr, SMU_VFT_INTACT); /* Setup SoftRegsStart here for register lookup in case DummyBackEnd is used and ProcessFirmwareHeader is not executed */ - polaris10_read_smc_sram_dword(smumgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, SoftRegisters), - &(smu_data->soft_regs_start), 0x40000); + smu7_read_smc_sram_dword(smumgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, SoftRegisters), + &(smu_data->smu7_data.soft_regs_start), 0x40000); - result = polaris10_request_smu_load_fw(smumgr); + result = smu7_request_smu_load_fw(smumgr); return result; } +static bool polaris10_is_hw_avfs_present(struct pp_smumgr *smumgr) +{ + uint32_t efuse; + + efuse = cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC, ixSMU_EFUSE_0 + (49*4)); + efuse &= 0x00000001; + if (efuse) + return true; + + return false; +} + static int polaris10_smu_init(struct pp_smumgr *smumgr) { - struct polaris10_smumgr *smu_data; - uint8_t *internal_buf; - uint64_t mc_addr = 0; + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); int i; - /* Allocate memory for backend private data */ - smu_data = (struct polaris10_smumgr *)(smumgr->backend); - smu_data->header_buffer.data_size = - ((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096; - smu_data->smu_buffer.data_size = 200*4096; - smu_data->avfs.avfs_btc_status = AVFS_BTC_NOTSUPPORTED; -/* Allocate FW image data structure and header buffer and - * send the header buffer address to SMU */ - smu_allocate_memory(smumgr->device, - smu_data->header_buffer.data_size, - CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB, - PAGE_SIZE, - &mc_addr, - &smu_data->header_buffer.kaddr, - &smu_data->header_buffer.handle); - - smu_data->header = smu_data->header_buffer.kaddr; - smu_data->header_buffer.mc_addr_high = smu_upper_32_bits(mc_addr); - smu_data->header_buffer.mc_addr_low = smu_lower_32_bits(mc_addr); - - PP_ASSERT_WITH_CODE((NULL != smu_data->header), - "Out of memory.", - kfree(smumgr->backend); - cgs_free_gpu_mem(smumgr->device, - (cgs_handle_t)smu_data->header_buffer.handle); - return -1); - -/* Allocate buffer for SMU internal buffer and send the address to SMU. - * Iceland SMU does not need internal buffer.*/ - smu_allocate_memory(smumgr->device, - smu_data->smu_buffer.data_size, - CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB, - PAGE_SIZE, - &mc_addr, - &smu_data->smu_buffer.kaddr, - &smu_data->smu_buffer.handle); - - internal_buf = smu_data->smu_buffer.kaddr; - smu_data->smu_buffer.mc_addr_high = smu_upper_32_bits(mc_addr); - smu_data->smu_buffer.mc_addr_low = smu_lower_32_bits(mc_addr); - - PP_ASSERT_WITH_CODE((NULL != internal_buf), - "Out of memory.", - kfree(smumgr->backend); - cgs_free_gpu_mem(smumgr->device, - (cgs_handle_t)smu_data->smu_buffer.handle); - return -1;); + if (smu7_init(smumgr)) + return -EINVAL; if (polaris10_is_hw_avfs_present(smumgr)) smu_data->avfs.avfs_btc_status = AVFS_BTC_BOOT; @@ -986,13 +381,13 @@ static int polaris10_smu_init(struct pp_smumgr *smumgr) static const struct pp_smumgr_func polaris10_smu_funcs = { .smu_init = polaris10_smu_init, - .smu_fini = polaris10_smu_fini, + .smu_fini = smu7_smu_fini, .start_smu = polaris10_start_smu, - .check_fw_load_finish = polaris10_check_fw_load_finish, - .request_smu_load_fw = polaris10_reload_firmware, + .check_fw_load_finish = smu7_check_fw_load_finish, + .request_smu_load_fw = smu7_reload_firmware, .request_smu_load_specific_fw = NULL, - .send_msg_to_smc = polaris10_send_msg_to_smc, - .send_msg_to_smc_with_parameter = polaris10_send_msg_to_smc_with_parameter, + .send_msg_to_smc = smu7_send_msg_to_smc, + .send_msg_to_smc_with_parameter = smu7_send_msg_to_smc_with_parameter, .download_pptable_settings = NULL, .upload_pptable_settings = NULL, .update_smc_table = polaris10_update_smc_table, @@ -1015,7 +410,7 @@ int polaris10_smum_init(struct pp_smumgr *smumgr) polaris10_smu = kzalloc(sizeof(struct polaris10_smumgr), GFP_KERNEL); if (polaris10_smu == NULL) - return -1; + return -EINVAL; smumgr->backend = polaris10_smu; smumgr->smumgr_funcs = &polaris10_smu_funcs; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.h index 1ab9b1d9df9a..49ebf1d5a53c 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.h +++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.h @@ -28,7 +28,7 @@ #include #include "smu74.h" #include "smu74_discrete.h" - +#include "smu7_smumgr.h" #define SMC_RAM_END 0x40000 @@ -51,13 +51,7 @@ struct polaris10_pt_defaults { uint16_t BAPMTI_RC[SMU74_DTE_ITERATIONS * SMU74_DTE_SOURCES * SMU74_DTE_SINKS]; }; -struct polaris10_buffer_entry { - uint32_t data_size; - uint32_t mc_addr_low; - uint32_t mc_addr_high; - void *kaddr; - unsigned long handle; -}; + struct polaris10_range_table { uint32_t trans_lower_frequency; /* in 10khz */ @@ -65,24 +59,8 @@ struct polaris10_range_table { }; struct polaris10_smumgr { - uint8_t *header; - uint8_t *mec_image; - struct polaris10_buffer_entry smu_buffer; - struct polaris10_buffer_entry header_buffer; - - uint32_t soft_regs_start; - uint32_t dpm_table_start; - uint32_t mc_reg_table_start; - uint32_t fan_table_start; - uint32_t arb_table_start; - - uint8_t *read_rrm_straps; - uint32_t read_drm_straps_mc_address_high; - uint32_t read_drm_straps_mc_address_low; - uint32_t acpi_optimization; - bool post_initial_boot; + struct smu7_smumgr smu7_data; uint8_t protected_mode; - uint8_t security_hard_key; struct polaris10_avfs avfs; SMU74_Discrete_DpmTable smc_state_table; struct SMU74_Discrete_Ulv ulv_setting; @@ -94,10 +72,4 @@ struct polaris10_smumgr { }; -int polaris10_smum_init(struct pp_smumgr *smumgr); -int polaris10_read_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t *value, uint32_t limit); -int polaris10_write_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t value, uint32_t limit); -int polaris10_copy_bytes_to_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address, - const uint8_t *src, uint32_t byte_count, uint32_t limit); - #endif From 5746f90c56ebc9aeca10ee0296f1bc426a4bb2e1 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Thu, 25 Aug 2016 20:25:40 +0800 Subject: [PATCH 18/49] drm/amd/powerplay: use smu7 common functions and data on Fiji. Signed-off-by: Rex Zhu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- .../gpu/drm/amd/powerplay/smumgr/fiji_smc.c | 69 +- .../drm/amd/powerplay/smumgr/fiji_smumgr.c | 605 +----------------- .../drm/amd/powerplay/smumgr/fiji_smumgr.h | 29 +- 3 files changed, 68 insertions(+), 635 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c index fd0c00173cce..76310ac7ef0d 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c @@ -42,6 +42,7 @@ #include "bif/bif_5_0_sh_mask.h" #include "dce/dce_10_0_d.h" #include "dce/dce_10_0_sh_mask.h" +#include "smu7_smumgr.h" #define VOLTAGE_SCALE 4 #define POWERTUNE_DEFAULT_SET_MAX 1 @@ -337,7 +338,7 @@ static int fiji_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset) const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults; uint32_t temp; - if (fiji_read_smc_sram_dword(hwmgr->smumgr, + if (smu7_read_smc_sram_dword(hwmgr->smumgr, fuse_table_offset + offsetof(SMU73_Discrete_PmFuses, TdcWaterfallCtl), (uint32_t *)&temp, SMC_RAM_END)) @@ -429,7 +430,7 @@ static int fiji_populate_pm_fuses(struct pp_hwmgr *hwmgr) if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PowerContainment)) { - if (fiji_read_smc_sram_dword(hwmgr->smumgr, + if (smu7_read_smc_sram_dword(hwmgr->smumgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU73_Firmware_Header, PmFuseTable), &pm_fuse_table_offset, SMC_RAM_END)) @@ -483,7 +484,7 @@ static int fiji_populate_pm_fuses(struct pp_hwmgr *hwmgr) "Attempt to populate BapmVddCBaseLeakage Hi and Lo " "Sidd Failed!", return -EINVAL); - if (fiji_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset, + if (smu7_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset, (uint8_t *)&smu_data->power_tune_table, sizeof(struct SMU73_Discrete_PmFuses), SMC_RAM_END)) PP_ASSERT_WITH_CODE(false, @@ -781,7 +782,7 @@ int fiji_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table; uint8_t pcie_entry_cnt = (uint8_t) data->dpm_table.pcie_speed_table.count; int result = 0; - uint32_t array = smu_data->dpm_table_start + + uint32_t array = smu_data->smu7_data.dpm_table_start + offsetof(SMU73_Discrete_DpmTable, GraphicsLevel); uint32_t array_size = sizeof(struct SMU73_Discrete_GraphicsLevel) * SMU73_MAX_LEVELS_GRAPHICS; @@ -858,7 +859,7 @@ int fiji_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) levels[1].pcieDpmLevel = mid_pcie_level_enabled; } /* level count will send to smc once at init smc table and never change */ - result = fiji_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels, + result = smu7_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels, (uint32_t)array_size, SMC_RAM_END); return result; @@ -1003,7 +1004,7 @@ int fiji_populate_all_memory_levels(struct pp_hwmgr *hwmgr) struct smu7_dpm_table *dpm_table = &data->dpm_table; int result; /* populate MCLK dpm table to SMU7 */ - uint32_t array = smu_data->dpm_table_start + + uint32_t array = smu_data->smu7_data.dpm_table_start + offsetof(SMU73_Discrete_DpmTable, MemoryLevel); uint32_t array_size = sizeof(SMU73_Discrete_MemoryLevel) * SMU73_MAX_LEVELS_MEMORY; @@ -1042,7 +1043,7 @@ int fiji_populate_all_memory_levels(struct pp_hwmgr *hwmgr) PPSMC_DISPLAY_WATERMARK_HIGH; /* level count will send to smc once at init smc table and never change */ - result = fiji_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels, + result = smu7_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels, (uint32_t)array_size, SMC_RAM_END); return result; @@ -1368,9 +1369,9 @@ static int fiji_program_memory_timing_parameters(struct pp_hwmgr *hwmgr) } if (!result) - result = fiji_copy_bytes_to_smc( + result = smu7_copy_bytes_to_smc( hwmgr->smumgr, - smu_data->arb_table_start, + smu_data->smu7_data.arb_table_start, (uint8_t *)&arb_regs, sizeof(SMU73_Discrete_MCArbDramTimingTable), SMC_RAM_END); @@ -1707,8 +1708,8 @@ static int fiji_init_arb_table_index(struct pp_smumgr *smumgr) * In reality this field should not be in that structure * but in a soft register. */ - result = fiji_read_smc_sram_dword(smumgr, - smu_data->arb_table_start, &tmp, SMC_RAM_END); + result = smu7_read_smc_sram_dword(smumgr, + smu_data->smu7_data.arb_table_start, &tmp, SMC_RAM_END); if (result) return result; @@ -1716,8 +1717,8 @@ static int fiji_init_arb_table_index(struct pp_smumgr *smumgr) tmp &= 0x00FFFFFF; tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24; - return fiji_write_smc_sram_dword(smumgr, - smu_data->arb_table_start, tmp, SMC_RAM_END); + return smu7_write_smc_sram_dword(smumgr, + smu_data->smu7_data.arb_table_start, tmp, SMC_RAM_END); } /** @@ -1917,8 +1918,8 @@ int fiji_init_smc_table(struct pp_hwmgr *hwmgr) CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime); /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */ - result = fiji_copy_bytes_to_smc(hwmgr->smumgr, - smu_data->dpm_table_start + + result = smu7_copy_bytes_to_smc(hwmgr->smumgr, + smu_data->smu7_data.dpm_table_start + offsetof(SMU73_Discrete_DpmTable, SystemFlags), (uint8_t *)&(table->SystemFlags), sizeof(SMU73_Discrete_DpmTable) - 3 * sizeof(SMU73_PIDController), @@ -1957,7 +1958,7 @@ int fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr) int res; uint64_t tmp64; - if (smu_data->fan_table_start == 0) { + if (smu_data->smu7_data.fan_table_start == 0) { phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl); return 0; @@ -2023,7 +2024,7 @@ int fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr) hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_CTRL, TEMP_SEL); - res = fiji_copy_bytes_to_smc(hwmgr->smumgr, smu_data->fan_table_start, + res = smu7_copy_bytes_to_smc(hwmgr->smumgr, smu_data->smu7_data.fan_table_start, (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), SMC_RAM_END); @@ -2078,9 +2079,9 @@ int fiji_update_sclk_threshold(struct pp_hwmgr *hwmgr) CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold); - result = fiji_copy_bytes_to_smc( + result = smu7_copy_bytes_to_smc( hwmgr->smumgr, - smu_data->dpm_table_start + + smu_data->smu7_data.dpm_table_start + offsetof(SMU73_Discrete_DpmTable, LowSclkInterruptThreshold), (uint8_t *)&low_sclk_interrupt_threshold, @@ -2109,6 +2110,8 @@ uint32_t fiji_get_offsetof(uint32_t type, uint32_t member) return offsetof(SMU73_SoftRegisters, PreVBlankGap); case VBlankTimeout: return offsetof(SMU73_SoftRegisters, VBlankTimeout); + case UcodeLoadStatus: + return offsetof(SMU73_SoftRegisters, UcodeLoadStatus); } case SMU_Discrete_DpmTable: switch (member) { @@ -2163,7 +2166,7 @@ static int fiji_update_uvd_smc_table(struct pp_hwmgr *hwmgr) if (table_info->mm_dep_table->count > 0) smu_data->smc_state_table.UvdBootLevel = (uint8_t) (table_info->mm_dep_table->count - 1); - mm_boot_level_offset = smu_data->dpm_table_start + offsetof(SMU73_Discrete_DpmTable, + mm_boot_level_offset = smu_data->smu7_data.dpm_table_start + offsetof(SMU73_Discrete_DpmTable, UvdBootLevel); mm_boot_level_offset /= 4; mm_boot_level_offset *= 4; @@ -2198,7 +2201,7 @@ static int fiji_update_vce_smc_table(struct pp_hwmgr *hwmgr) else smu_data->smc_state_table.VceBootLevel = 0; - mm_boot_level_offset = smu_data->dpm_table_start + + mm_boot_level_offset = smu_data->smu7_data.dpm_table_start + offsetof(SMU73_Discrete_DpmTable, VceBootLevel); mm_boot_level_offset /= 4; mm_boot_level_offset *= 4; @@ -2223,7 +2226,7 @@ static int fiji_update_samu_smc_table(struct pp_hwmgr *hwmgr) smu_data->smc_state_table.SamuBootLevel = 0; - mm_boot_level_offset = smu_data->dpm_table_start + + mm_boot_level_offset = smu_data->smu7_data.dpm_table_start + offsetof(SMU73_Discrete_DpmTable, SamuBootLevel); mm_boot_level_offset /= 4; @@ -2276,57 +2279,57 @@ int fiji_process_firmware_header(struct pp_hwmgr *hwmgr) int result; bool error = false; - result = fiji_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr->smumgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU73_Firmware_Header, DpmTable), &tmp, SMC_RAM_END); if (0 == result) - smu_data->dpm_table_start = tmp; + smu_data->smu7_data.dpm_table_start = tmp; error |= (0 != result); - result = fiji_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr->smumgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU73_Firmware_Header, SoftRegisters), &tmp, SMC_RAM_END); if (!result) { data->soft_regs_start = tmp; - smu_data->soft_regs_start = tmp; + smu_data->smu7_data.soft_regs_start = tmp; } error |= (0 != result); - result = fiji_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr->smumgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU73_Firmware_Header, mcRegisterTable), &tmp, SMC_RAM_END); if (!result) - smu_data->mc_reg_table_start = tmp; + smu_data->smu7_data.mc_reg_table_start = tmp; - result = fiji_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr->smumgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU73_Firmware_Header, FanTable), &tmp, SMC_RAM_END); if (!result) - smu_data->fan_table_start = tmp; + smu_data->smu7_data.fan_table_start = tmp; error |= (0 != result); - result = fiji_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr->smumgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU73_Firmware_Header, mcArbDramTimingTable), &tmp, SMC_RAM_END); if (!result) - smu_data->arb_table_start = tmp; + smu_data->smu7_data.arb_table_start = tmp; error |= (0 != result); - result = fiji_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr->smumgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU73_Firmware_Header, Version), &tmp, SMC_RAM_END); diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c index 82a8be4af63b..02fe1df855a9 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c @@ -58,520 +58,6 @@ static const struct SMU73_Discrete_GraphicsLevel avfs_graphics_level[8] = { { 0xf811d047, 0x80380100, 0x01, 0x00, 0x1e00, 0x00000610, 0x87020000, 0x21680000, 0x12000000, 0, 0, 0x0c, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00 } }; -static enum cgs_ucode_id fiji_convert_fw_type_to_cgs(uint32_t fw_type) -{ - enum cgs_ucode_id result = CGS_UCODE_ID_MAXIMUM; - - switch (fw_type) { - case UCODE_ID_SMU: - result = CGS_UCODE_ID_SMU; - break; - case UCODE_ID_SDMA0: - result = CGS_UCODE_ID_SDMA0; - break; - case UCODE_ID_SDMA1: - result = CGS_UCODE_ID_SDMA1; - break; - case UCODE_ID_CP_CE: - result = CGS_UCODE_ID_CP_CE; - break; - case UCODE_ID_CP_PFP: - result = CGS_UCODE_ID_CP_PFP; - break; - case UCODE_ID_CP_ME: - result = CGS_UCODE_ID_CP_ME; - break; - case UCODE_ID_CP_MEC: - result = CGS_UCODE_ID_CP_MEC; - break; - case UCODE_ID_CP_MEC_JT1: - result = CGS_UCODE_ID_CP_MEC_JT1; - break; - case UCODE_ID_CP_MEC_JT2: - result = CGS_UCODE_ID_CP_MEC_JT2; - break; - case UCODE_ID_RLC_G: - result = CGS_UCODE_ID_RLC_G; - break; - default: - break; - } - - return result; -} -/** -* Set the address for reading/writing the SMC SRAM space. -* @param smumgr the address of the powerplay hardware manager. -* @param smc_addr the address in the SMC RAM to access. -*/ -static int fiji_set_smc_sram_address(struct pp_smumgr *smumgr, - uint32_t smc_addr, uint32_t limit) -{ - PP_ASSERT_WITH_CODE((0 == (3 & smc_addr)), - "SMC address must be 4 byte aligned.", return -EINVAL;); - PP_ASSERT_WITH_CODE((limit > (smc_addr + 3)), - "SMC address is beyond the SMC RAM area.", return -EINVAL;); - - cgs_write_register(smumgr->device, mmSMC_IND_INDEX_0, smc_addr); - SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0); - - return 0; -} - -/** -* Copy bytes from an array into the SMC RAM space. -* -* @param smumgr the address of the powerplay SMU manager. -* @param smcStartAddress the start address in the SMC RAM to copy bytes to. -* @param src the byte array to copy the bytes from. -* @param byteCount the number of bytes to copy. -*/ -int fiji_copy_bytes_to_smc(struct pp_smumgr *smumgr, - uint32_t smcStartAddress, const uint8_t *src, - uint32_t byteCount, uint32_t limit) -{ - int result; - uint32_t data, originalData; - uint32_t addr, extraShift; - - PP_ASSERT_WITH_CODE((0 == (3 & smcStartAddress)), - "SMC address must be 4 byte aligned.", return -EINVAL;); - PP_ASSERT_WITH_CODE((limit > (smcStartAddress + byteCount)), - "SMC address is beyond the SMC RAM area.", return -EINVAL;); - - addr = smcStartAddress; - - while (byteCount >= 4) { - /* Bytes are written into the SMC addres space with the MSB first. */ - data = src[0] * 0x1000000 + src[1] * 0x10000 + src[2] * 0x100 + src[3]; - - result = fiji_set_smc_sram_address(smumgr, addr, limit); - if (result) - return result; - - cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data); - - src += 4; - byteCount -= 4; - addr += 4; - } - - if (byteCount) { - /* Now write the odd bytes left. - * Do a read modify write cycle. - */ - data = 0; - - result = fiji_set_smc_sram_address(smumgr, addr, limit); - if (result) - return result; - - originalData = cgs_read_register(smumgr->device, mmSMC_IND_DATA_0); - extraShift = 8 * (4 - byteCount); - - while (byteCount > 0) { - /* Bytes are written into the SMC addres - * space with the MSB first. - */ - data = (0x100 * data) + *src++; - byteCount--; - } - data <<= extraShift; - data |= (originalData & ~((~0UL) << extraShift)); - - result = fiji_set_smc_sram_address(smumgr, addr, limit); - if (!result) - return result; - - cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data); - } - return 0; -} - -int fiji_program_jump_on_start(struct pp_smumgr *smumgr) -{ - static const unsigned char data[] = { 0xE0, 0x00, 0x80, 0x40 }; - - fiji_copy_bytes_to_smc(smumgr, 0x0, data, 4, sizeof(data) + 1); - - return 0; -} - -/** -* Return if the SMC is currently running. -* -* @param smumgr the address of the powerplay hardware manager. -*/ -bool fiji_is_smc_ram_running(struct pp_smumgr *smumgr) -{ - return ((0 == SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, - CGS_IND_REG__SMC, - SMC_SYSCON_CLOCK_CNTL_0, ck_disable)) - && (0x20100 <= cgs_read_ind_register(smumgr->device, - CGS_IND_REG__SMC, ixSMC_PC_C))); -} - -/** -* Send a message to the SMC, and wait for its response. -* -* @param smumgr the address of the powerplay hardware manager. -* @param msg the message to send. -* @return The response that came from the SMC. -*/ -int fiji_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg) -{ - int ret; - - if (!fiji_is_smc_ram_running(smumgr)) - return -1; - - - SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); - - ret = SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP); - - if (ret != 1) - printk("\n failed to send pre message %x ret is %d \n", msg, ret); - - cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg); - - SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); - - ret = SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP); - - if (ret != 1) - printk("\n failed to send message %x ret is %d \n", msg, ret); - - return 0; -} - -/** - * Send a message to the SMC with parameter - * @param smumgr: the address of the powerplay hardware manager. - * @param msg: the message to send. - * @param parameter: the parameter to send - * @return The response that came from the SMC. - */ -int fiji_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr, - uint16_t msg, uint32_t parameter) -{ - if (!fiji_is_smc_ram_running(smumgr)) - return -1; - - if (1 != SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP)) { - printk(KERN_ERR "Failed to send Previous Message."); - SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); - } - - cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter); - cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg); - SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); - - return 0; -} - - -/** -* Send a message to the SMC with parameter, do not wait for response -* -* @param smumgr: the address of the powerplay hardware manager. -* @param msg: the message to send. -* @param parameter: the parameter to send -* @return The response that came from the SMC. -*/ -int fiji_send_msg_to_smc_with_parameter_without_waiting( - struct pp_smumgr *smumgr, uint16_t msg, uint32_t parameter) -{ - if (1 != SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP)) { - printk(KERN_ERR "Failed to send Previous Message."); - SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); - } - cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter); - cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg); - - return 0; -} - -/** -* Uploads the SMU firmware from .hex file -* -* @param smumgr the address of the powerplay SMU manager. -* @return 0 or -1. -*/ - -static int fiji_upload_smu_firmware_image(struct pp_smumgr *smumgr) -{ - const uint8_t *src; - uint32_t byte_count; - uint32_t *data; - struct cgs_firmware_info info = {0}; - - cgs_get_firmware_info(smumgr->device, - fiji_convert_fw_type_to_cgs(UCODE_ID_SMU), &info); - - if (info.image_size & 3) { - printk(KERN_ERR "SMC ucode is not 4 bytes aligned\n"); - return -EINVAL; - } - - if (info.image_size > FIJI_SMC_SIZE) { - printk(KERN_ERR "SMC address is beyond the SMC RAM area\n"); - return -EINVAL; - } - - cgs_write_register(smumgr->device, mmSMC_IND_INDEX_0, 0x20000); - SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1); - - byte_count = info.image_size; - src = (const uint8_t *)info.kptr; - - data = (uint32_t *)src; - for (; byte_count >= 4; data++, byte_count -= 4) - cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data[0]); - - SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0); - return 0; -} - -/** -* Read a 32bit value from the SMC SRAM space. -* ALL PARAMETERS ARE IN HOST BYTE ORDER. -* @param smumgr the address of the powerplay hardware manager. -* @param smc_addr the address in the SMC RAM to access. -* @param value and output parameter for the data read from the SMC SRAM. -*/ -int fiji_read_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, - uint32_t *value, uint32_t limit) -{ - int result = fiji_set_smc_sram_address(smumgr, smc_addr, limit); - - if (result) - return result; - - *value = cgs_read_register(smumgr->device, mmSMC_IND_DATA_0); - return 0; -} - -/** -* Write a 32bit value to the SMC SRAM space. -* ALL PARAMETERS ARE IN HOST BYTE ORDER. -* @param smumgr the address of the powerplay hardware manager. -* @param smc_addr the address in the SMC RAM to access. -* @param value to write to the SMC SRAM. -*/ -int fiji_write_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, - uint32_t value, uint32_t limit) -{ - int result; - - result = fiji_set_smc_sram_address(smumgr, smc_addr, limit); - - if (result) - return result; - - cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, value); - return 0; -} - -static uint32_t fiji_get_mask_for_firmware_type(uint32_t fw_type) -{ - uint32_t result = 0; - - switch (fw_type) { - case UCODE_ID_SDMA0: - result = UCODE_ID_SDMA0_MASK; - break; - case UCODE_ID_SDMA1: - result = UCODE_ID_SDMA1_MASK; - break; - case UCODE_ID_CP_CE: - result = UCODE_ID_CP_CE_MASK; - break; - case UCODE_ID_CP_PFP: - result = UCODE_ID_CP_PFP_MASK; - break; - case UCODE_ID_CP_ME: - result = UCODE_ID_CP_ME_MASK; - break; - case UCODE_ID_CP_MEC_JT1: - result = UCODE_ID_CP_MEC_MASK | UCODE_ID_CP_MEC_JT1_MASK; - break; - case UCODE_ID_CP_MEC_JT2: - result = UCODE_ID_CP_MEC_MASK | UCODE_ID_CP_MEC_JT2_MASK; - break; - case UCODE_ID_RLC_G: - result = UCODE_ID_RLC_G_MASK; - break; - default: - printk(KERN_ERR "UCode type is out of range!"); - result = 0; - } - - return result; -} - -/* Populate one firmware image to the data structure */ -static int fiji_populate_single_firmware_entry(struct pp_smumgr *smumgr, - uint32_t fw_type, struct SMU_Entry *entry) -{ - int result; - struct cgs_firmware_info info = {0}; - - result = cgs_get_firmware_info( - smumgr->device, - fiji_convert_fw_type_to_cgs(fw_type), - &info); - - if (!result) { - entry->version = 0; - entry->id = (uint16_t)fw_type; - entry->image_addr_high = smu_upper_32_bits(info.mc_addr); - entry->image_addr_low = smu_lower_32_bits(info.mc_addr); - entry->meta_data_addr_high = 0; - entry->meta_data_addr_low = 0; - entry->data_size_byte = info.image_size; - entry->num_register_entries = 0; - - if (fw_type == UCODE_ID_RLC_G) - entry->flags = 1; - else - entry->flags = 0; - } - - return result; -} - -static int fiji_request_smu_load_fw(struct pp_smumgr *smumgr) -{ - struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend); - uint32_t fw_to_load; - struct SMU_DRAMData_TOC *toc; - - if (priv->soft_regs_start) - cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC, - priv->soft_regs_start + - offsetof(SMU73_SoftRegisters, UcodeLoadStatus), - 0x0); - - toc = (struct SMU_DRAMData_TOC *)priv->header; - toc->num_entries = 0; - toc->structure_version = 1; - - PP_ASSERT_WITH_CODE( - 0 == fiji_populate_single_firmware_entry(smumgr, - UCODE_ID_RLC_G, &toc->entry[toc->num_entries++]), - "Failed to Get Firmware Entry.\n" , return -1 ); - PP_ASSERT_WITH_CODE( - 0 == fiji_populate_single_firmware_entry(smumgr, - UCODE_ID_CP_CE, &toc->entry[toc->num_entries++]), - "Failed to Get Firmware Entry.\n" , return -1 ); - PP_ASSERT_WITH_CODE( - 0 == fiji_populate_single_firmware_entry(smumgr, - UCODE_ID_CP_PFP, &toc->entry[toc->num_entries++]), - "Failed to Get Firmware Entry.\n" , return -1 ); - PP_ASSERT_WITH_CODE( - 0 == fiji_populate_single_firmware_entry(smumgr, - UCODE_ID_CP_ME, &toc->entry[toc->num_entries++]), - "Failed to Get Firmware Entry.\n" , return -1 ); - PP_ASSERT_WITH_CODE( - 0 == fiji_populate_single_firmware_entry(smumgr, - UCODE_ID_CP_MEC, &toc->entry[toc->num_entries++]), - "Failed to Get Firmware Entry.\n" , return -1 ); - PP_ASSERT_WITH_CODE( - 0 == fiji_populate_single_firmware_entry(smumgr, - UCODE_ID_CP_MEC_JT1, &toc->entry[toc->num_entries++]), - "Failed to Get Firmware Entry.\n" , return -1 ); - PP_ASSERT_WITH_CODE( - 0 == fiji_populate_single_firmware_entry(smumgr, - UCODE_ID_CP_MEC_JT2, &toc->entry[toc->num_entries++]), - "Failed to Get Firmware Entry.\n" , return -1 ); - PP_ASSERT_WITH_CODE( - 0 == fiji_populate_single_firmware_entry(smumgr, - UCODE_ID_SDMA0, &toc->entry[toc->num_entries++]), - "Failed to Get Firmware Entry.\n" , return -1 ); - PP_ASSERT_WITH_CODE( - 0 == fiji_populate_single_firmware_entry(smumgr, - UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]), - "Failed to Get Firmware Entry.\n" , return -1 ); - - fiji_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_DRV_DRAM_ADDR_HI, - priv->header_buffer.mc_addr_high); - fiji_send_msg_to_smc_with_parameter(smumgr,PPSMC_MSG_DRV_DRAM_ADDR_LO, - priv->header_buffer.mc_addr_low); - - fw_to_load = UCODE_ID_RLC_G_MASK - + UCODE_ID_SDMA0_MASK - + UCODE_ID_SDMA1_MASK - + UCODE_ID_CP_CE_MASK - + UCODE_ID_CP_ME_MASK - + UCODE_ID_CP_PFP_MASK - + UCODE_ID_CP_MEC_MASK - + UCODE_ID_CP_MEC_JT1_MASK - + UCODE_ID_CP_MEC_JT2_MASK; - - if (fiji_send_msg_to_smc_with_parameter(smumgr, - PPSMC_MSG_LoadUcodes, fw_to_load)) - printk(KERN_ERR "Fail to Request SMU Load uCode"); - - return 0; -} - - -/* Check if the FW has been loaded, SMU will not return - * if loading has not finished. - */ -static int fiji_check_fw_load_finish(struct pp_smumgr *smumgr, - uint32_t fw_type) -{ - struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend); - uint32_t mask = fiji_get_mask_for_firmware_type(fw_type); - - /* Check SOFT_REGISTERS_TABLE_28.UcodeLoadStatus */ - if (smum_wait_on_indirect_register(smumgr, mmSMC_IND_INDEX, - priv->soft_regs_start + - offsetof(SMU73_SoftRegisters, UcodeLoadStatus), - mask, mask)) { - printk(KERN_ERR "check firmware loading failed\n"); - return -EINVAL; - } - return 0; -} - - -static int fiji_reload_firmware(struct pp_smumgr *smumgr) -{ - return smumgr->smumgr_funcs->start_smu(smumgr); -} - -static bool fiji_is_hw_virtualization_enabled(struct pp_smumgr *smumgr) -{ - uint32_t value; - - value = cgs_read_register(smumgr->device, mmBIF_IOV_FUNC_IDENTIFIER); - if (value & BIF_IOV_FUNC_IDENTIFIER__IOV_ENABLE_MASK) { - /* driver reads on SR-IOV enabled PF: 0x80000000 - * driver reads on SR-IOV enabled VF: 0x80000001 - * driver reads on SR-IOV disabled: 0x00000000 - */ - return true; - } - return false; -} - -static int fiji_request_smu_specific_fw_load(struct pp_smumgr *smumgr, uint32_t fw_type) -{ - if (fiji_is_hw_virtualization_enabled(smumgr)) { - uint32_t masks = fiji_get_mask_for_firmware_type(fw_type); - if (fiji_send_msg_to_smc_with_parameter_without_waiting(smumgr, - PPSMC_MSG_LoadUcodes, masks)) - printk(KERN_ERR "Fail to Request SMU Load uCode"); - } - /* For non-virtualization cases, - * SMU loads all FWs at once in fiji_request_smu_load_fw. - */ - return 0; -} - static int fiji_start_smu_in_protection_mode(struct pp_smumgr *smumgr) { int result = 0; @@ -583,7 +69,7 @@ static int fiji_start_smu_in_protection_mode(struct pp_smumgr *smumgr) SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 1); - result = fiji_upload_smu_firmware_image(smumgr); + result = smu7_upload_smu_firmware_image(smumgr); if (result) return result; @@ -622,8 +108,8 @@ static int fiji_start_smu_in_protection_mode(struct pp_smumgr *smumgr) SMU_STATUS, SMU_DONE, 0); /* Check pass/failed indicator */ - if (1 != SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, - SMU_STATUS, SMU_PASS)) { + if (SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + SMU_STATUS, SMU_PASS) != 1) { PP_ASSERT_WITH_CODE(false, "SMU Firmware start failed!", return -1); } @@ -651,12 +137,12 @@ static int fiji_start_smu_in_non_protection_mode(struct pp_smumgr *smumgr) SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 1); - result = fiji_upload_smu_firmware_image(smumgr); + result = smu7_upload_smu_firmware_image(smumgr); if (result) return result; /* Set smc instruct start point at 0x0 */ - fiji_program_jump_on_start(smumgr); + smu7_program_jump_on_start(smumgr); /* Enable clock */ SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, @@ -710,15 +196,15 @@ static int fiji_start_avfs_btc(struct pp_smumgr *smumgr) priv->avfs.AvfsBtcStatus = AVFS_BTC_STARTED; if (priv->avfs.AvfsBtcParam) { - if (!fiji_send_msg_to_smc_with_parameter(smumgr, + if (!smum_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_PerformBtc, priv->avfs.AvfsBtcParam)) { - if (!fiji_send_msg_to_smc(smumgr, PPSMC_MSG_EnableAvfs)) { + if (!smum_send_msg_to_smc(smumgr, PPSMC_MSG_EnableAvfs)) { priv->avfs.AvfsBtcStatus = AVFS_BTC_COMPLETED_UNSAVED; result = 0; } else { printk(KERN_ERR "[AVFS][fiji_start_avfs_btc] Attempt" " to Enable AVFS Failed!"); - fiji_send_msg_to_smc(smumgr, PPSMC_MSG_DisableAvfs); + smum_send_msg_to_smc(smumgr, PPSMC_MSG_DisableAvfs); result = -1; } } else { @@ -748,7 +234,7 @@ int fiji_setup_pm_fuse_for_avfs(struct pp_smumgr *smumgr) charz_freq = 0x30750000; /* In 10KHz units 0x00007530 Actual value */ inversion_voltage = 0x1A04; /* mV Q14.2 0x41A Actual value */ - PP_ASSERT_WITH_CODE(0 == fiji_read_smc_sram_dword(smumgr, + PP_ASSERT_WITH_CODE(0 == smu7_read_smc_sram_dword(smumgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU73_Firmware_Header, PmFuseTable), &table_start, 0x40000), "[AVFS][Fiji_SetupGfxLvlStruct] SMU could not communicate " @@ -760,13 +246,13 @@ int fiji_setup_pm_fuse_for_avfs(struct pp_smumgr *smumgr) inversion_voltage_addr = table_start + offsetof(struct SMU73_Discrete_PmFuses, InversionVoltage); - result = fiji_copy_bytes_to_smc(smumgr, charz_freq_addr, + result = smu7_copy_bytes_to_smc(smumgr, charz_freq_addr, (uint8_t *)(&charz_freq), sizeof(charz_freq), 0x40000); PP_ASSERT_WITH_CODE(0 == result, "[AVFS][fiji_setup_pm_fuse_for_avfs] charz_freq could not " "be populated.", return -1;); - result = fiji_copy_bytes_to_smc(smumgr, inversion_voltage_addr, + result = smu7_copy_bytes_to_smc(smumgr, inversion_voltage_addr, (uint8_t *)(&inversion_voltage), sizeof(inversion_voltage), 0x40000); PP_ASSERT_WITH_CODE(0 == result, "[AVFS][fiji_setup_pm_fuse_for_avfs] " "charz_freq could not be populated.", return -1;); @@ -781,7 +267,7 @@ int fiji_setup_graphics_level_structure(struct pp_smumgr *smumgr) uint32_t level_addr, vr_config_addr; uint32_t level_size = sizeof(avfs_graphics_level); - PP_ASSERT_WITH_CODE(0 == fiji_read_smc_sram_dword(smumgr, + PP_ASSERT_WITH_CODE(0 == smu7_read_smc_sram_dword(smumgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU73_Firmware_Header, DpmTable), &table_start, 0x40000), @@ -796,7 +282,7 @@ int fiji_setup_graphics_level_structure(struct pp_smumgr *smumgr) vr_config_addr = table_start + offsetof(SMU73_Discrete_DpmTable, VRConfig); - PP_ASSERT_WITH_CODE(0 == fiji_copy_bytes_to_smc(smumgr, vr_config_addr, + PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(smumgr, vr_config_addr, (uint8_t *)&vr_config, sizeof(int32_t), 0x40000), "[AVFS][Fiji_SetupGfxLvlStruct] Problems copying " "vr_config value over to SMC", @@ -804,7 +290,7 @@ int fiji_setup_graphics_level_structure(struct pp_smumgr *smumgr) level_addr = table_start + offsetof(SMU73_Discrete_DpmTable, GraphicsLevel); - PP_ASSERT_WITH_CODE(0 == fiji_copy_bytes_to_smc(smumgr, level_addr, + PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(smumgr, level_addr, (uint8_t *)(&avfs_graphics_level), level_size, 0x40000), "[AVFS][Fiji_SetupGfxLvlStruct] Copying of DPM table failed!", return -1;); @@ -851,13 +337,13 @@ int fiji_avfs_event_mgr(struct pp_smumgr *smumgr, bool smu_started) break; case AVFS_BTC_COMPLETED_RESTORED: /*S3 State - Post SMU Start*/ priv->avfs.AvfsBtcStatus = AVFS_BTC_SMUMSG_ERROR; - PP_ASSERT_WITH_CODE(0 == fiji_send_msg_to_smc(smumgr, + PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(smumgr, 0x666), "[AVFS][fiji_avfs_event_mgr] SMU did not respond " "correctly to VftTableIsValid Msg", return -1;); priv->avfs.AvfsBtcStatus = AVFS_BTC_SMUMSG_ERROR; - PP_ASSERT_WITH_CODE(0 == fiji_send_msg_to_smc(smumgr, + PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(smumgr, PPSMC_MSG_EnableAvfs), "[AVFS][fiji_avfs_event_mgr] SMU did not respond " "correctly to EnableAvfs Message Msg", @@ -910,7 +396,7 @@ static int fiji_start_smu(struct pp_smumgr *smumgr) struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend); /* Only start SMC if SMC RAM is not running */ - if (!fiji_is_smc_ram_running(smumgr)) { + if (!smu7_is_smc_ram_running(smumgr)) { fiji_avfs_event_mgr(smumgr, false); /* Check if SMU is running in protected mode */ @@ -941,12 +427,12 @@ static int fiji_start_smu(struct pp_smumgr *smumgr) /* Setup SoftRegsStart here for register lookup in case * DummyBackEnd is used and ProcessFirmwareHeader is not executed */ - fiji_read_smc_sram_dword(smumgr, + smu7_read_smc_sram_dword(smumgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU73_Firmware_Header, SoftRegisters), - &(priv->soft_regs_start), 0x40000); + &(priv->smu7_data.soft_regs_start), 0x40000); - result = fiji_request_smu_load_fw(smumgr); + result = smu7_request_smu_load_fw(smumgr); return result; } @@ -975,29 +461,10 @@ static bool fiji_is_hw_avfs_present(struct pp_smumgr *smumgr) static int fiji_smu_init(struct pp_smumgr *smumgr) { struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend); - uint64_t mc_addr; int i; - priv->header_buffer.data_size = - ((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096; - smu_allocate_memory(smumgr->device, - priv->header_buffer.data_size, - CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB, - PAGE_SIZE, - &mc_addr, - &priv->header_buffer.kaddr, - &priv->header_buffer.handle); - - priv->header = priv->header_buffer.kaddr; - priv->header_buffer.mc_addr_high = smu_upper_32_bits(mc_addr); - priv->header_buffer.mc_addr_low = smu_lower_32_bits(mc_addr); - - PP_ASSERT_WITH_CODE((NULL != priv->header), - "Out of memory.", - kfree(smumgr->backend); - cgs_free_gpu_mem(smumgr->device, - (cgs_handle_t)priv->header_buffer.handle); - return -1); + if (smu7_init(smumgr)) + return -EINVAL; priv->avfs.AvfsBtcStatus = AVFS_BTC_BOOT; if (fiji_is_hw_avfs_present(smumgr)) @@ -1012,38 +479,22 @@ static int fiji_smu_init(struct pp_smumgr *smumgr) else priv->avfs.AvfsBtcStatus = AVFS_BTC_NOTSUPPORTED; - priv->acpi_optimization = 1; - for (i = 0; i < SMU73_MAX_LEVELS_GRAPHICS; i++) priv->activity_target[i] = 30; return 0; } -static int fiji_smu_fini(struct pp_smumgr *smumgr) -{ - struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend); - - smu_free_memory(smumgr->device, (void *)priv->header_buffer.handle); - - if (smumgr->backend) { - kfree(smumgr->backend); - smumgr->backend = NULL; - } - - cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU); - return 0; -} static const struct pp_smumgr_func fiji_smu_funcs = { .smu_init = &fiji_smu_init, - .smu_fini = &fiji_smu_fini, + .smu_fini = &smu7_smu_fini, .start_smu = &fiji_start_smu, - .check_fw_load_finish = &fiji_check_fw_load_finish, - .request_smu_load_fw = &fiji_reload_firmware, - .request_smu_load_specific_fw = &fiji_request_smu_specific_fw_load, - .send_msg_to_smc = &fiji_send_msg_to_smc, - .send_msg_to_smc_with_parameter = &fiji_send_msg_to_smc_with_parameter, + .check_fw_load_finish = &smu7_check_fw_load_finish, + .request_smu_load_fw = &smu7_reload_firmware, + .request_smu_load_specific_fw = NULL, + .send_msg_to_smc = &smu7_send_msg_to_smc, + .send_msg_to_smc_with_parameter = &smu7_send_msg_to_smc_with_parameter, .download_pptable_settings = NULL, .upload_pptable_settings = NULL, .update_smc_table = fiji_update_smc_table, diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h index 291f7042a585..adcbdfb209be 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h +++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h @@ -25,35 +25,20 @@ #include "smu73_discrete.h" #include +#include "smu7_smumgr.h" + -#define SMC_RAM_END 0x40000 struct fiji_smu_avfs { enum AVFS_BTC_STATUS AvfsBtcStatus; uint32_t AvfsBtcParam; }; -struct fiji_buffer_entry { - uint32_t data_size; - uint32_t mc_addr_low; - uint32_t mc_addr_high; - void *kaddr; - unsigned long handle; -}; struct fiji_smumgr { - uint8_t *header; - uint8_t *mec_image; + struct smu7_smumgr smu7_data; - uint32_t soft_regs_start; - uint32_t dpm_table_start; - uint32_t mc_reg_table_start; - uint32_t fan_table_start; - uint32_t arb_table_start; struct fiji_smu_avfs avfs; - uint32_t acpi_optimization; - struct fiji_buffer_entry header_buffer; - struct SMU73_Discrete_DpmTable smc_state_table; struct SMU73_Discrete_Ulv ulv_setting; struct SMU73_Discrete_PmFuses power_tune_table; @@ -62,13 +47,7 @@ struct fiji_smumgr { }; -int fiji_smum_init(struct pp_smumgr *smumgr); -int fiji_read_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smcAddress, - uint32_t *value, uint32_t limit); -int fiji_write_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, - uint32_t value, uint32_t limit); -int fiji_copy_bytes_to_smc(struct pp_smumgr *smumgr, uint32_t smcStartAddress, - const uint8_t *src, uint32_t byteCount, uint32_t limit); + #endif From 9c6d4956964d4b0282078dc348ca788dc3189d53 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Tue, 23 Aug 2016 11:57:30 +0800 Subject: [PATCH 19/49] drm/amd/powerplay: use smu7 common functions and data on icelannd. Signed-off-by: Rex Zhu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- .../drm/amd/powerplay/hwmgr/iceland_hwmgr.c | 50 +- .../amd/powerplay/hwmgr/iceland_powertune.c | 6 +- .../drm/amd/powerplay/hwmgr/iceland_thermal.c | 2 +- .../drm/amd/powerplay/smumgr/iceland_smumgr.c | 598 ++---------------- .../drm/amd/powerplay/smumgr/iceland_smumgr.h | 35 +- 5 files changed, 82 insertions(+), 609 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c index 5abe43360ec0..50aa23f15540 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c @@ -767,12 +767,7 @@ int iceland_program_voting_clients(struct pp_hwmgr *hwmgr) static int iceland_upload_firmware(struct pp_hwmgr *hwmgr) { - int ret = 0; - - if (!iceland_is_smc_ram_running(hwmgr->smumgr)) - ret = iceland_smu_upload_firmware_image(hwmgr->smumgr); - - return ret; + return 0; } /** @@ -789,7 +784,7 @@ static int iceland_process_firmware_header(struct pp_hwmgr *hwmgr) int result; bool error = 0; - result = iceland_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr->smumgr, SMU71_FIRMWARE_HEADER_LOCATION + offsetof(SMU71_Firmware_Header, DpmTable), &tmp, data->sram_end); @@ -800,7 +795,7 @@ static int iceland_process_firmware_header(struct pp_hwmgr *hwmgr) error |= (0 != result); - result = iceland_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr->smumgr, SMU71_FIRMWARE_HEADER_LOCATION + offsetof(SMU71_Firmware_Header, SoftRegisters), &tmp, data->sram_end); @@ -812,7 +807,7 @@ static int iceland_process_firmware_header(struct pp_hwmgr *hwmgr) error |= (0 != result); - result = iceland_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr->smumgr, SMU71_FIRMWARE_HEADER_LOCATION + offsetof(SMU71_Firmware_Header, mcRegisterTable), &tmp, data->sram_end); @@ -821,7 +816,7 @@ static int iceland_process_firmware_header(struct pp_hwmgr *hwmgr) data->mc_reg_table_start = tmp; } - result = iceland_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr->smumgr, SMU71_FIRMWARE_HEADER_LOCATION + offsetof(SMU71_Firmware_Header, FanTable), &tmp, data->sram_end); @@ -832,7 +827,7 @@ static int iceland_process_firmware_header(struct pp_hwmgr *hwmgr) error |= (0 != result); - result = iceland_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr->smumgr, SMU71_FIRMWARE_HEADER_LOCATION + offsetof(SMU71_Firmware_Header, mcArbDramTimingTable), &tmp, data->sram_end); @@ -844,7 +839,7 @@ static int iceland_process_firmware_header(struct pp_hwmgr *hwmgr) error |= (0 != result); - result = iceland_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr->smumgr, SMU71_FIRMWARE_HEADER_LOCATION + offsetof(SMU71_Firmware_Header, Version), &tmp, data->sram_end); @@ -855,7 +850,7 @@ static int iceland_process_firmware_header(struct pp_hwmgr *hwmgr) error |= (0 != result); - result = iceland_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr->smumgr, SMU71_FIRMWARE_HEADER_LOCATION + offsetof(SMU71_Firmware_Header, UlvSettings), &tmp, data->sram_end); @@ -1507,7 +1502,7 @@ int iceland_program_memory_timing_parameters(struct pp_hwmgr *hwmgr) } if (0 == result) { - result = iceland_copy_bytes_to_smc( + result = smu7_copy_bytes_to_smc( hwmgr->smumgr, data->arb_table_start, (uint8_t *)&arb_regs, @@ -2438,7 +2433,7 @@ static int iceland_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) data->smc_state_table.GraphicsLevel[1].pcieDpmLevel = mid_pcie_level_enabled; /* level count will send to smc once at init smc table and never change*/ - result = iceland_copy_bytes_to_smc(hwmgr->smumgr, level_array_adress, (uint8_t *)levels, (uint32_t)level_array_size, data->sram_end); + result = smu7_copy_bytes_to_smc(hwmgr->smumgr, level_array_adress, (uint8_t *)levels, (uint32_t)level_array_size, data->sram_end); if (0 != result) return result; @@ -2492,7 +2487,7 @@ static int iceland_populate_all_memory_levels(struct pp_hwmgr *hwmgr) data->smc_state_table.MemoryLevel[dpm_table->mclk_table.count-1].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH; /* level count will send to smc once at init smc table and never change*/ - result = iceland_copy_bytes_to_smc(hwmgr->smumgr, + result = smu7_copy_bytes_to_smc(hwmgr->smumgr, level_array_adress, (uint8_t *)levels, (uint32_t)level_array_size, data->sram_end); if (0 != result) { @@ -2754,7 +2749,7 @@ static int iceland_init_smc_table(struct pp_hwmgr *hwmgr) table->BootMVdd = PP_HOST_TO_SMC_US(table->BootMVdd * VOLTAGE_SCALE); /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */ - result = iceland_copy_bytes_to_smc(hwmgr->smumgr, data->dpm_table_start + + result = smu7_copy_bytes_to_smc(hwmgr->smumgr, data->dpm_table_start + offsetof(SMU71_Discrete_DpmTable, SystemFlags), (uint8_t *)&(table->SystemFlags), sizeof(SMU71_Discrete_DpmTable) - 3 * sizeof(SMU71_PIDController), @@ -2764,7 +2759,7 @@ static int iceland_init_smc_table(struct pp_hwmgr *hwmgr) "Failed to upload dpm data to SMC memory!", return result); /* Upload all ulv setting to SMC memory.(dpm level, dpm level count etc) */ - result = iceland_copy_bytes_to_smc(hwmgr->smumgr, + result = smu7_copy_bytes_to_smc(hwmgr->smumgr, data->ulv_settings_start, (uint8_t *)&(data->ulv_setting), sizeof(SMU71_Discrete_Ulv), @@ -2884,7 +2879,7 @@ int iceland_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE(0 == result, "Failed to initialize MCRegTable for driver state!", return result;); - return iceland_copy_bytes_to_smc(hwmgr->smumgr, data->mc_reg_table_start, + return smu7_copy_bytes_to_smc(hwmgr->smumgr, data->mc_reg_table_start, (uint8_t *)&data->mc_reg_table, sizeof(SMU71_Discrete_MCRegisters), data->sram_end); } @@ -3047,15 +3042,6 @@ static int iceland_enable_thermal_auto_throttle(struct pp_hwmgr *hwmgr) return iceland_enable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal); } -static int iceland_tf_start_smc(struct pp_hwmgr *hwmgr) -{ - int ret = 0; - - if (!iceland_is_smc_ram_running(hwmgr->smumgr)) - ret = iceland_smu_start_smc(hwmgr->smumgr); - - return ret; -} /** * Programs the Deep Sleep registers @@ -3141,10 +3127,6 @@ static int iceland_enable_dpm_tasks(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to populate PM fuses!", return tmp_result); - /* start SMC */ - tmp_result = iceland_tf_start_smc(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to start SMC!", return tmp_result); /* enable SCLK control */ tmp_result = iceland_enable_sclk_control(hwmgr); @@ -4636,7 +4618,7 @@ static int iceland_update_sclk_threshold(struct pp_hwmgr *hwmgr) CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold); - result = iceland_copy_bytes_to_smc( + result = smu7_copy_bytes_to_smc( hwmgr->smumgr, data->dpm_table_start + offsetof(SMU71_Discrete_DpmTable, LowSclkInterruptThreshold), @@ -4670,7 +4652,7 @@ static int iceland_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr) address = data->mc_reg_table_start + (uint32_t)offsetof(SMU71_Discrete_MCRegisters, data[0]); - return iceland_copy_bytes_to_smc(hwmgr->smumgr, address, + return smu7_copy_bytes_to_smc(hwmgr->smumgr, address, (uint8_t *)&data->mc_reg_table.data[0], sizeof(SMU71_Discrete_MCRegisterSet) * data->dpm_table.mclk_table.count, data->sram_end); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_powertune.c index 041e9648e592..766280626836 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_powertune.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_powertune.c @@ -239,7 +239,7 @@ static int iceland_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offs const struct iceland_pt_defaults *defaults = data->power_tune_defaults; uint32_t temp; - if (iceland_read_smc_sram_dword(hwmgr->smumgr, + if (smu7_read_smc_sram_dword(hwmgr->smumgr, fuse_table_offset + offsetof(SMU71_Discrete_PmFuses, TdcWaterfallCtl), (uint32_t *)&temp, data->sram_end)) @@ -299,7 +299,7 @@ int iceland_populate_pm_fuses(struct pp_hwmgr *hwmgr) if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PowerContainment)) { - if (iceland_read_smc_sram_dword(hwmgr->smumgr, + if (smu7_read_smc_sram_dword(hwmgr->smumgr, SMU71_FIRMWARE_HEADER_LOCATION + offsetof(SMU71_Firmware_Header, PmFuseTable), &pm_fuse_table_offset, data->sram_end)) @@ -359,7 +359,7 @@ int iceland_populate_pm_fuses(struct pp_hwmgr *hwmgr) "Attempt to populate BapmVddCBaseLeakage Hi and Lo Sidd Failed!", return -EINVAL); - if (iceland_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset, + if (smu7_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset, (uint8_t *)&data->power_tune_table, sizeof(struct SMU71_Discrete_PmFuses), data->sram_end)) PP_ASSERT_WITH_CODE(false, diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_thermal.c index 527f37022424..45d17d715640 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_thermal.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_thermal.c @@ -426,7 +426,7 @@ int tf_iceland_thermal_setup_fan_table(struct pp_hwmgr *hwmgr, void *input, void //fan_table.FanControl_GL_Flag = 1; - res = iceland_copy_bytes_to_smc(hwmgr->smumgr, data->fan_table_start, (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), data->sram_end); + res = smu7_copy_bytes_to_smc(hwmgr->smumgr, data->fan_table_start, (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), data->sram_end); /* TO DO FOR SOME DEVICE ID 0X692b, send this msg return invalid command. if (res == 0 && hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit != 0) res = (0 == smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetFanMinPwm, \ diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c index f50658332d9d..31b6de858317 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c @@ -36,119 +36,8 @@ #include "smu/smu_7_1_1_sh_mask.h" #include "cgs_common.h" -#define ICELAND_SMC_SIZE 0x20000 -#define BUFFER_SIZE 80000 -#define MAX_STRING_SIZE 15 -#define BUFFER_SIZETWO 131072 /*128 *1024*/ +#define ICELAND_SMC_SIZE 0x20000 -/** - * Set the address for reading/writing the SMC SRAM space. - * @param smumgr the address of the powerplay hardware manager. - * @param smcAddress the address in the SMC RAM to access. - */ -static int iceland_set_smc_sram_address(struct pp_smumgr *smumgr, - uint32_t smcAddress, uint32_t limit) -{ - if (smumgr == NULL || smumgr->device == NULL) - return -EINVAL; - PP_ASSERT_WITH_CODE((0 == (3 & smcAddress)), - "SMC address must be 4 byte aligned.", - return -1;); - - PP_ASSERT_WITH_CODE((limit > (smcAddress + 3)), - "SMC address is beyond the SMC RAM area.", - return -1;); - - cgs_write_register(smumgr->device, mmSMC_IND_INDEX_0, smcAddress); - SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0); - - return 0; -} - -/** - * Copy bytes from an array into the SMC RAM space. - * - * @param smumgr the address of the powerplay SMU manager. - * @param smcStartAddress the start address in the SMC RAM to copy bytes to. - * @param src the byte array to copy the bytes from. - * @param byteCount the number of bytes to copy. - */ -int iceland_copy_bytes_to_smc(struct pp_smumgr *smumgr, - uint32_t smcStartAddress, const uint8_t *src, - uint32_t byteCount, uint32_t limit) -{ - uint32_t addr; - uint32_t data, orig_data; - int result = 0; - uint32_t extra_shift; - - if (smumgr == NULL || smumgr->device == NULL) - return -EINVAL; - PP_ASSERT_WITH_CODE((0 == (3 & smcStartAddress)), - "SMC address must be 4 byte aligned.", - return 0;); - - PP_ASSERT_WITH_CODE((limit > (smcStartAddress + byteCount)), - "SMC address is beyond the SMC RAM area.", - return 0;); - - addr = smcStartAddress; - - while (byteCount >= 4) { - /* - * Bytes are written into the - * SMC address space with the MSB first - */ - data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3]; - - result = iceland_set_smc_sram_address(smumgr, addr, limit); - - if (result) - goto out; - - cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data); - - src += 4; - byteCount -= 4; - addr += 4; - } - - if (0 != byteCount) { - /* Now write odd bytes left, do a read modify write cycle */ - data = 0; - - result = iceland_set_smc_sram_address(smumgr, addr, limit); - if (result) - goto out; - - orig_data = cgs_read_register(smumgr->device, - mmSMC_IND_DATA_0); - extra_shift = 8 * (4 - byteCount); - - while (byteCount > 0) { - data = (data << 8) + *src++; - byteCount--; - } - - data <<= extra_shift; - data |= (orig_data & ~((~0UL) << extra_shift)); - - result = iceland_set_smc_sram_address(smumgr, addr, limit); - if (result) - goto out; - - cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data); - } - -out: - return result; -} - -/** - * Deassert the reset'pin' (set it to high). - * - * @param smumgr the address of the powerplay hardware manager. - */ static int iceland_start_smc(struct pp_smumgr *smumgr) { SMUM_WRITE_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, @@ -157,284 +46,15 @@ static int iceland_start_smc(struct pp_smumgr *smumgr) return 0; } -static void iceland_pp_reset_smc(struct pp_smumgr *smumgr) +static void iceland_reset_smc(struct pp_smumgr *smumgr) { SMUM_WRITE_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 1); } -int iceland_program_jump_on_start(struct pp_smumgr *smumgr) -{ - static const unsigned char pData[] = { 0xE0, 0x00, 0x80, 0x40 }; - iceland_copy_bytes_to_smc(smumgr, 0x0, pData, 4, sizeof(pData)+1); - - return 0; -} - -/** - * Return if the SMC is currently running. - * - * @param smumgr the address of the powerplay hardware manager. - */ -bool iceland_is_smc_ram_running(struct pp_smumgr *smumgr) -{ - uint32_t val1, val2; - - val1 = SMUM_READ_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, - SMC_SYSCON_CLOCK_CNTL_0, ck_disable); - val2 = cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC, - ixSMC_PC_C); - - return ((0 == val1) && (0x20100 <= val2)); -} - -/** - * Send a message to the SMC, and wait for its response. - * - * @param smumgr the address of the powerplay hardware manager. - * @param msg the message to send. - * @return The response that came from the SMC. - */ -static int iceland_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg) -{ - if (smumgr == NULL || smumgr->device == NULL) - return -EINVAL; - - if (!iceland_is_smc_ram_running(smumgr)) - return -EINVAL; - - SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); - PP_ASSERT_WITH_CODE( - 1 == SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP), - "Failed to send Previous Message.", - ); - - cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg); - - SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); - PP_ASSERT_WITH_CODE( - 1 == SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP), - "Failed to send Message.", - ); - - return 0; -} - -/** - * Send a message to the SMC with parameter - * - * @param smumgr: the address of the powerplay hardware manager. - * @param msg: the message to send. - * @param parameter: the parameter to send - * @return The response that came from the SMC. - */ -static int iceland_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr, - uint16_t msg, uint32_t parameter) -{ - if (smumgr == NULL || smumgr->device == NULL) - return -EINVAL; - - cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter); - - return iceland_send_msg_to_smc(smumgr, msg); -} - -/* - * Read a 32bit value from the SMC SRAM space. - * ALL PARAMETERS ARE IN HOST BYTE ORDER. - * @param smumgr the address of the powerplay hardware manager. - * @param smcAddress the address in the SMC RAM to access. - * @param value and output parameter for the data read from the SMC SRAM. - */ -int iceland_read_smc_sram_dword(struct pp_smumgr *smumgr, - uint32_t smcAddress, uint32_t *value, - uint32_t limit) -{ - int result; - - result = iceland_set_smc_sram_address(smumgr, smcAddress, limit); - - if (0 != result) - return result; - - *value = cgs_read_register(smumgr->device, mmSMC_IND_DATA_0); - - return 0; -} - -/* - * Write a 32bit value to the SMC SRAM space. - * ALL PARAMETERS ARE IN HOST BYTE ORDER. - * @param smumgr the address of the powerplay hardware manager. - * @param smcAddress the address in the SMC RAM to access. - * @param value to write to the SMC SRAM. - */ -int iceland_write_smc_sram_dword(struct pp_smumgr *smumgr, - uint32_t smcAddress, uint32_t value, - uint32_t limit) -{ - int result; - - result = iceland_set_smc_sram_address(smumgr, smcAddress, limit); - - if (0 != result) - return result; - - cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, value); - - return 0; -} - -static int iceland_smu_fini(struct pp_smumgr *smumgr) -{ - struct iceland_smumgr *priv = (struct iceland_smumgr *)(smumgr->backend); - - smu_free_memory(smumgr->device, (void *)priv->header_buffer.handle); - - if (smumgr->backend != NULL) { - kfree(smumgr->backend); - smumgr->backend = NULL; - } - - cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU); - return 0; -} - -static enum cgs_ucode_id iceland_convert_fw_type_to_cgs(uint32_t fw_type) -{ - enum cgs_ucode_id result = CGS_UCODE_ID_MAXIMUM; - - switch (fw_type) { - case UCODE_ID_SMU: - result = CGS_UCODE_ID_SMU; - break; - case UCODE_ID_SDMA0: - result = CGS_UCODE_ID_SDMA0; - break; - case UCODE_ID_SDMA1: - result = CGS_UCODE_ID_SDMA1; - break; - case UCODE_ID_CP_CE: - result = CGS_UCODE_ID_CP_CE; - break; - case UCODE_ID_CP_PFP: - result = CGS_UCODE_ID_CP_PFP; - break; - case UCODE_ID_CP_ME: - result = CGS_UCODE_ID_CP_ME; - break; - case UCODE_ID_CP_MEC: - result = CGS_UCODE_ID_CP_MEC; - break; - case UCODE_ID_CP_MEC_JT1: - result = CGS_UCODE_ID_CP_MEC_JT1; - break; - case UCODE_ID_CP_MEC_JT2: - result = CGS_UCODE_ID_CP_MEC_JT2; - break; - case UCODE_ID_RLC_G: - result = CGS_UCODE_ID_RLC_G; - break; - default: - break; - } - - return result; -} - -/** - * Convert the PPIRI firmware type to SMU type mask. - * For MEC, we need to check all MEC related type - */ -static uint16_t iceland_get_mask_for_firmware_type(uint16_t firmwareType) -{ - uint16_t result = 0; - - switch (firmwareType) { - case UCODE_ID_SDMA0: - result = UCODE_ID_SDMA0_MASK; - break; - case UCODE_ID_SDMA1: - result = UCODE_ID_SDMA1_MASK; - break; - case UCODE_ID_CP_CE: - result = UCODE_ID_CP_CE_MASK; - break; - case UCODE_ID_CP_PFP: - result = UCODE_ID_CP_PFP_MASK; - break; - case UCODE_ID_CP_ME: - result = UCODE_ID_CP_ME_MASK; - break; - case UCODE_ID_CP_MEC: - case UCODE_ID_CP_MEC_JT1: - case UCODE_ID_CP_MEC_JT2: - result = UCODE_ID_CP_MEC_MASK; - break; - case UCODE_ID_RLC_G: - result = UCODE_ID_RLC_G_MASK; - break; - default: - break; - } - - return result; -} - -/** - * Check if the FW has been loaded, - * SMU will not return if loading has not finished. -*/ -static int iceland_check_fw_load_finish(struct pp_smumgr *smumgr, uint32_t fwType) -{ - uint16_t fwMask = iceland_get_mask_for_firmware_type(fwType); - - if (0 != SMUM_WAIT_VFPF_INDIRECT_REGISTER(smumgr, SMC_IND, - SOFT_REGISTERS_TABLE_27, fwMask, fwMask)) { - pr_err("[ powerplay ] check firmware loading failed\n"); - return -EINVAL; - } - - return 0; -} - -/* Populate one firmware image to the data structure */ -static int iceland_populate_single_firmware_entry(struct pp_smumgr *smumgr, - uint16_t firmware_type, - struct SMU_Entry *pentry) -{ - int result; - struct cgs_firmware_info info = {0}; - - result = cgs_get_firmware_info( - smumgr->device, - iceland_convert_fw_type_to_cgs(firmware_type), - &info); - - if (result == 0) { - pentry->version = 0; - pentry->id = (uint16_t)firmware_type; - pentry->image_addr_high = smu_upper_32_bits(info.mc_addr); - pentry->image_addr_low = smu_lower_32_bits(info.mc_addr); - pentry->meta_data_addr_high = 0; - pentry->meta_data_addr_low = 0; - pentry->data_size_byte = info.image_size; - pentry->num_register_entries = 0; - - if (firmware_type == UCODE_ID_RLC_G) - pentry->flags = 1; - else - pentry->flags = 0; - } else { - return result; - } - - return result; -} - -static void iceland_pp_stop_smc_clock(struct pp_smumgr *smumgr) +static void iceland_stop_smc_clock(struct pp_smumgr *smumgr) { SMUM_WRITE_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, @@ -448,10 +68,10 @@ static void iceland_start_smc_clock(struct pp_smumgr *smumgr) ck_disable, 0); } -int iceland_smu_start_smc(struct pp_smumgr *smumgr) +static int iceland_smu_start_smc(struct pp_smumgr *smumgr) { /* set smc instruct start point at 0x0 */ - iceland_program_jump_on_start(smumgr); + smu7_program_jump_on_start(smumgr); /* enable smc clock */ iceland_start_smc_clock(smumgr); @@ -465,17 +85,37 @@ int iceland_smu_start_smc(struct pp_smumgr *smumgr) return 0; } -/** - * Upload the SMC firmware to the SMC microcontroller. - * - * @param smumgr the address of the powerplay hardware manager. - * @param pFirmware the data structure containing the various sections of the firmware. - */ -int iceland_smu_upload_firmware_image(struct pp_smumgr *smumgr) + +static int iceland_upload_smc_firmware_data(struct pp_smumgr *smumgr, + uint32_t length, const uint8_t *src, + uint32_t limit, uint32_t start_addr) { - const uint8_t *src; - uint32_t byte_count, val; + uint32_t byte_count = length; uint32_t data; + + PP_ASSERT_WITH_CODE((limit >= byte_count), "SMC address is beyond the SMC RAM area.", return -EINVAL); + + cgs_write_register(smumgr->device, mmSMC_IND_INDEX_0, start_addr); + SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1); + + while (byte_count >= 4) { + data = src[0] * 0x1000000 + src[1] * 0x10000 + src[2] * 0x100 + src[3]; + cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data); + src += 4; + byte_count -= 4; + } + + SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0); + + PP_ASSERT_WITH_CODE((0 == byte_count), "SMC size must be dividable by 4.", return -EINVAL); + + return 0; +} + + +static int iceland_smu_upload_firmware_image(struct pp_smumgr *smumgr) +{ + uint32_t val; struct cgs_firmware_info info = {0}; if (smumgr == NULL || smumgr->device == NULL) @@ -483,7 +123,7 @@ int iceland_smu_upload_firmware_image(struct pp_smumgr *smumgr) /* load SMC firmware */ cgs_get_firmware_info(smumgr->device, - iceland_convert_fw_type_to_cgs(UCODE_ID_SMU), &info); + smu7_convert_fw_type_to_cgs(UCODE_ID_SMU), &info); if (info.image_size & 3) { pr_err("[ powerplay ] SMC ucode is not 4 bytes aligned\n"); @@ -506,122 +146,17 @@ int iceland_smu_upload_firmware_image(struct pp_smumgr *smumgr) ixSMC_SYSCON_MISC_CNTL, val | 1); /* stop smc clock */ - iceland_pp_stop_smc_clock(smumgr); + iceland_stop_smc_clock(smumgr); /* reset smc */ - iceland_pp_reset_smc(smumgr); - - cgs_write_register(smumgr->device, mmSMC_IND_INDEX_0, - info.ucode_start_address); - - SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, - AUTO_INCREMENT_IND_0, 1); - - byte_count = info.image_size; - src = (const uint8_t *)info.kptr; - - while (byte_count >= 4) { - data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3]; - cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data); - src += 4; - byte_count -= 4; - } - - SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, - AUTO_INCREMENT_IND_0, 0); + iceland_reset_smc(smumgr); + iceland_upload_smc_firmware_data(smumgr, info.image_size, + (uint8_t *)info.kptr, ICELAND_SMC_SIZE, + info.ucode_start_address); return 0; } -static int iceland_request_smu_reload_fw(struct pp_smumgr *smumgr) -{ - struct iceland_smumgr *iceland_smu = - (struct iceland_smumgr *)(smumgr->backend); - uint16_t fw_to_load; - int result = 0; - struct SMU_DRAMData_TOC *toc; - - toc = (struct SMU_DRAMData_TOC *)iceland_smu->pHeader; - toc->num_entries = 0; - toc->structure_version = 1; - - PP_ASSERT_WITH_CODE( - 0 == iceland_populate_single_firmware_entry(smumgr, - UCODE_ID_RLC_G, - &toc->entry[toc->num_entries++]), - "Failed to Get Firmware Entry.\n", - return -1); - PP_ASSERT_WITH_CODE( - 0 == iceland_populate_single_firmware_entry(smumgr, - UCODE_ID_CP_CE, - &toc->entry[toc->num_entries++]), - "Failed to Get Firmware Entry.\n", - return -1); - PP_ASSERT_WITH_CODE( - 0 == iceland_populate_single_firmware_entry - (smumgr, UCODE_ID_CP_PFP, &toc->entry[toc->num_entries++]), - "Failed to Get Firmware Entry.\n", return -1); - PP_ASSERT_WITH_CODE( - 0 == iceland_populate_single_firmware_entry - (smumgr, UCODE_ID_CP_ME, &toc->entry[toc->num_entries++]), - "Failed to Get Firmware Entry.\n", return -1); - PP_ASSERT_WITH_CODE( - 0 == iceland_populate_single_firmware_entry - (smumgr, UCODE_ID_CP_MEC, &toc->entry[toc->num_entries++]), - "Failed to Get Firmware Entry.\n", return -1); - PP_ASSERT_WITH_CODE( - 0 == iceland_populate_single_firmware_entry - (smumgr, UCODE_ID_CP_MEC_JT1, &toc->entry[toc->num_entries++]), - "Failed to Get Firmware Entry.\n", return -1); - PP_ASSERT_WITH_CODE( - 0 == iceland_populate_single_firmware_entry - (smumgr, UCODE_ID_CP_MEC_JT2, &toc->entry[toc->num_entries++]), - "Failed to Get Firmware Entry.\n", return -1); - PP_ASSERT_WITH_CODE( - 0 == iceland_populate_single_firmware_entry - (smumgr, UCODE_ID_SDMA0, &toc->entry[toc->num_entries++]), - "Failed to Get Firmware Entry.\n", return -1); - PP_ASSERT_WITH_CODE( - 0 == iceland_populate_single_firmware_entry - (smumgr, UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]), - "Failed to Get Firmware Entry.\n", return -1); - - if (!iceland_is_smc_ram_running(smumgr)) { - result = iceland_smu_upload_firmware_image(smumgr); - if (result) - return result; - - result = iceland_smu_start_smc(smumgr); - if (result) - return result; - } - - iceland_send_msg_to_smc_with_parameter(smumgr, - PPSMC_MSG_DRV_DRAM_ADDR_HI, - iceland_smu->header_buffer.mc_addr_high); - - iceland_send_msg_to_smc_with_parameter(smumgr, - PPSMC_MSG_DRV_DRAM_ADDR_LO, - iceland_smu->header_buffer.mc_addr_low); - - fw_to_load = UCODE_ID_RLC_G_MASK - + UCODE_ID_SDMA0_MASK - + UCODE_ID_SDMA1_MASK - + UCODE_ID_CP_CE_MASK - + UCODE_ID_CP_ME_MASK - + UCODE_ID_CP_PFP_MASK - + UCODE_ID_CP_MEC_MASK - + UCODE_ID_CP_MEC_JT1_MASK - + UCODE_ID_CP_MEC_JT2_MASK; - - PP_ASSERT_WITH_CODE( - 0 == iceland_send_msg_to_smc_with_parameter( - smumgr, PPSMC_MSG_LoadUcodes, fw_to_load), - "Fail to Request SMU Load uCode", return 0); - - return result; -} - static int iceland_request_smu_load_specific_fw(struct pp_smumgr *smumgr, uint32_t firmwareType) { @@ -635,12 +170,22 @@ static int iceland_start_smu(struct pp_smumgr *smumgr) result = iceland_smu_upload_firmware_image(smumgr); if (result) return result; - result = iceland_smu_start_smc(smumgr); if (result) return result; - result = iceland_request_smu_reload_fw(smumgr); + if (!smu7_is_smc_ram_running(smumgr)) { + printk("smu not running, upload firmware again \n"); + result = iceland_smu_upload_firmware_image(smumgr); + if (result) + return result; + + result = iceland_smu_start_smc(smumgr); + if (result) + return result; + } + + result = smu7_request_smu_load_fw(smumgr); return result; } @@ -654,45 +199,18 @@ static int iceland_start_smu(struct pp_smumgr *smumgr) */ static int iceland_smu_init(struct pp_smumgr *smumgr) { - struct iceland_smumgr *iceland_smu; - uint64_t mc_addr = 0; - - /* Allocate memory for backend private data */ - iceland_smu = (struct iceland_smumgr *)(smumgr->backend); - iceland_smu->header_buffer.data_size = - ((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096; - - smu_allocate_memory(smumgr->device, - iceland_smu->header_buffer.data_size, - CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB, - PAGE_SIZE, - &mc_addr, - &iceland_smu->header_buffer.kaddr, - &iceland_smu->header_buffer.handle); - - iceland_smu->pHeader = iceland_smu->header_buffer.kaddr; - iceland_smu->header_buffer.mc_addr_high = smu_upper_32_bits(mc_addr); - iceland_smu->header_buffer.mc_addr_low = smu_lower_32_bits(mc_addr); - - PP_ASSERT_WITH_CODE((NULL != iceland_smu->pHeader), - "Out of memory.", - kfree(smumgr->backend); - cgs_free_gpu_mem(smumgr->device, - (cgs_handle_t)iceland_smu->header_buffer.handle); - return -1); - - return 0; + return smu7_init(smumgr); } static const struct pp_smumgr_func iceland_smu_funcs = { .smu_init = &iceland_smu_init, - .smu_fini = &iceland_smu_fini, + .smu_fini = &smu7_smu_fini, .start_smu = &iceland_start_smu, - .check_fw_load_finish = &iceland_check_fw_load_finish, - .request_smu_load_fw = &iceland_request_smu_reload_fw, + .check_fw_load_finish = &smu7_check_fw_load_finish, + .request_smu_load_fw = &smu7_reload_firmware, .request_smu_load_specific_fw = &iceland_request_smu_load_specific_fw, - .send_msg_to_smc = &iceland_send_msg_to_smc, - .send_msg_to_smc_with_parameter = &iceland_send_msg_to_smc_with_parameter, + .send_msg_to_smc = &smu7_send_msg_to_smc, + .send_msg_to_smc_with_parameter = &smu7_send_msg_to_smc_with_parameter, .download_pptable_settings = NULL, .upload_pptable_settings = NULL, }; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h index 62009a7ae827..331e2782e5b4 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h +++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h @@ -26,39 +26,12 @@ #ifndef _ICELAND_SMUMGR_H_ #define _ICELAND_SMUMGR_H_ -struct iceland_buffer_entry { - uint32_t data_size; - uint32_t mc_addr_low; - uint32_t mc_addr_high; - void *kaddr; - unsigned long handle; -}; -/* Iceland only has header_buffer, don't have smu buffer. */ +#include "smu7_smumgr.h" + + struct iceland_smumgr { - uint8_t *pHeader; - uint8_t *pMecImage; - uint32_t ulSoftRegsStart; - - struct iceland_buffer_entry header_buffer; + struct smu7_smumgr smu7_data; }; -extern int iceland_smum_init(struct pp_smumgr *smumgr); -extern int iceland_copy_bytes_to_smc(struct pp_smumgr *smumgr, - uint32_t smcStartAddress, - const uint8_t *src, - uint32_t byteCount, uint32_t limit); - -extern int iceland_smu_start_smc(struct pp_smumgr *smumgr); - -extern int iceland_read_smc_sram_dword(struct pp_smumgr *smumgr, - uint32_t smcAddress, - uint32_t *value, uint32_t limit); -extern int iceland_write_smc_sram_dword(struct pp_smumgr *smumgr, - uint32_t smcAddress, - uint32_t value, uint32_t limit); - -extern bool iceland_is_smc_ram_running(struct pp_smumgr *smumgr); -extern int iceland_smu_upload_firmware_image(struct pp_smumgr *smumgr); - #endif From 18aafc59b1067185f80597fc3331e5117c63834d Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Tue, 23 Aug 2016 11:58:14 +0800 Subject: [PATCH 20/49] drm/amd/powerplay: implement fw related smu interface for iceland. Signed-off-by: Rex Zhu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- .../amd/powerplay/hwmgr/iceland_powertune.h | 14 - drivers/gpu/drm/amd/powerplay/smumgr/Makefile | 5 +- .../drm/amd/powerplay/smumgr/iceland_smc.c | 2576 +++++++++++++++++ .../drm/amd/powerplay/smumgr/iceland_smc.h | 40 + .../drm/amd/powerplay/smumgr/iceland_smumgr.c | 21 +- .../drm/amd/powerplay/smumgr/iceland_smumgr.h | 34 + 6 files changed, 2673 insertions(+), 17 deletions(-) create mode 100644 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c create mode 100644 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.h diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_powertune.h b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_powertune.h index 6c25ee139ca3..4008d49617e4 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_powertune.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_powertune.h @@ -49,20 +49,6 @@ struct iceland_pt_config_reg { enum iceland_pt_config_reg_type type; }; -struct iceland_pt_defaults -{ - uint8_t svi_load_line_en; - uint8_t svi_load_line_vddc; - uint8_t tdc_vddc_throttle_release_limit_perc; - uint8_t tdc_mawt; - uint8_t tdc_waterfall_ctl; - uint8_t dte_ambient_temp_base; - uint32_t display_cac; - uint32_t bamp_temp_gradient; - uint16_t bapmti_r[SMU71_DTE_ITERATIONS * SMU71_DTE_SOURCES * SMU71_DTE_SINKS]; - uint16_t bapmti_rc[SMU71_DTE_ITERATIONS * SMU71_DTE_SOURCES * SMU71_DTE_SINKS]; -}; - void iceland_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr); int iceland_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr); int iceland_populate_pm_fuses(struct pp_hwmgr *hwmgr); diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile index a4d7462668d0..51ff08301651 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile +++ b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile @@ -2,8 +2,9 @@ # Makefile for the 'smu manager' sub-component of powerplay. # It provides the smu management services for the driver. -SMU_MGR = smumgr.o cz_smumgr.o tonga_smumgr.o fiji_smumgr.o fiji_smc.o\ - polaris10_smumgr.o iceland_smumgr.o polaris10_smc.o tonga_smc.o smu7_smumgr.o +SMU_MGR = smumgr.o cz_smumgr.o tonga_smumgr.o fiji_smumgr.o fiji_smc.o \ + polaris10_smumgr.o iceland_smumgr.o polaris10_smc.o tonga_smc.o \ + smu7_smumgr.o iceland_smc.o AMD_PP_SMUMGR = $(addprefix $(AMD_PP_PATH)/smumgr/,$(SMU_MGR)) diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c new file mode 100644 index 000000000000..40f18685a7f4 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c @@ -0,0 +1,2576 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * + */ + +#include "iceland_smc.h" +#include "smu7_dyn_defaults.h" + +#include "smu7_hwmgr.h" +#include "hardwaremanager.h" +#include "ppatomctrl.h" +#include "pp_debug.h" +#include "cgs_common.h" +#include "atombios.h" +#include "pppcielanes.h" +#include "pp_endian.h" +#include "smu7_ppsmc.h" + +#include "smu71_discrete.h" + +#include "smu/smu_7_1_1_d.h" +#include "smu/smu_7_1_1_sh_mask.h" + +#include "gmc/gmc_8_1_d.h" +#include "gmc/gmc_8_1_sh_mask.h" + +#include "bif/bif_5_0_d.h" +#include "bif/bif_5_0_sh_mask.h" + +#include "dce/dce_10_0_d.h" +#include "dce/dce_10_0_sh_mask.h" +#include "processpptables.h" + +#include "iceland_smumgr.h" + +#define VOLTAGE_SCALE 4 +#define POWERTUNE_DEFAULT_SET_MAX 1 +#define VOLTAGE_VID_OFFSET_SCALE1 625 +#define VOLTAGE_VID_OFFSET_SCALE2 100 +#define MC_CG_ARB_FREQ_F1 0x0b +#define VDDC_VDDCI_DELTA 200 + +#define DEVICE_ID_VI_ICELAND_M_6900 0x6900 +#define DEVICE_ID_VI_ICELAND_M_6901 0x6901 +#define DEVICE_ID_VI_ICELAND_M_6902 0x6902 +#define DEVICE_ID_VI_ICELAND_M_6903 0x6903 + +static struct iceland_pt_defaults defaults_iceland = { + /* + * sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc, + * TDC_MAWt, TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT + */ + 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000, + { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61 }, + { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 } +}; + +/* 35W - XT, XTL */ +static struct iceland_pt_defaults defaults_icelandxt = { + /* + * sviLoadLIneEn, SviLoadLineVddC, + * TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt, + * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, + * BAPM_TEMP_GRADIENT + */ + 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x0, + { 0xA7, 0x0, 0x0, 0xB5, 0x0, 0x0, 0x9F, 0x0, 0x0, 0xD6, 0x0, 0x0, 0xD7, 0x0, 0x0}, + { 0x1EA, 0x0, 0x0, 0x224, 0x0, 0x0, 0x25E, 0x0, 0x0, 0x28E, 0x0, 0x0, 0x2AB, 0x0, 0x0} +}; + +/* 25W - PRO, LE */ +static struct iceland_pt_defaults defaults_icelandpro = { + /* + * sviLoadLIneEn, SviLoadLineVddC, + * TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt, + * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, + * BAPM_TEMP_GRADIENT + */ + 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x0, + { 0xB7, 0x0, 0x0, 0xC3, 0x0, 0x0, 0xB5, 0x0, 0x0, 0xEA, 0x0, 0x0, 0xE6, 0x0, 0x0}, + { 0x1EA, 0x0, 0x0, 0x224, 0x0, 0x0, 0x25E, 0x0, 0x0, 0x28E, 0x0, 0x0, 0x2AB, 0x0, 0x0} +}; + +static void iceland_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr) +{ + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + struct cgs_system_info sys_info = {0}; + uint32_t dev_id; + + sys_info.size = sizeof(struct cgs_system_info); + sys_info.info_id = CGS_SYSTEM_INFO_PCIE_DEV; + cgs_query_system_info(hwmgr->device, &sys_info); + dev_id = (uint32_t)sys_info.value; + + switch (dev_id) { + case DEVICE_ID_VI_ICELAND_M_6900: + case DEVICE_ID_VI_ICELAND_M_6903: + smu_data->power_tune_defaults = &defaults_icelandxt; + break; + + case DEVICE_ID_VI_ICELAND_M_6901: + case DEVICE_ID_VI_ICELAND_M_6902: + smu_data->power_tune_defaults = &defaults_icelandpro; + break; + default: + smu_data->power_tune_defaults = &defaults_iceland; + pr_warning("Unknown V.I. Device ID.\n"); + break; + } + return; +} + +static int iceland_populate_svi_load_line(struct pp_hwmgr *hwmgr) +{ + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + const struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults; + + smu_data->power_tune_table.SviLoadLineEn = defaults->svi_load_line_en; + smu_data->power_tune_table.SviLoadLineVddC = defaults->svi_load_line_vddc; + smu_data->power_tune_table.SviLoadLineTrimVddC = 3; + smu_data->power_tune_table.SviLoadLineOffsetVddC = 0; + + return 0; +} + +static int iceland_populate_tdc_limit(struct pp_hwmgr *hwmgr) +{ + uint16_t tdc_limit; + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + const struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults; + + tdc_limit = (uint16_t)(hwmgr->dyn_state.cac_dtp_table->usTDC * 256); + smu_data->power_tune_table.TDC_VDDC_PkgLimit = + CONVERT_FROM_HOST_TO_SMC_US(tdc_limit); + smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc = + defaults->tdc_vddc_throttle_release_limit_perc; + smu_data->power_tune_table.TDC_MAWt = defaults->tdc_mawt; + + return 0; +} + +static int iceland_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset) +{ + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + const struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults; + uint32_t temp; + + if (smu7_read_smc_sram_dword(hwmgr->smumgr, + fuse_table_offset + + offsetof(SMU71_Discrete_PmFuses, TdcWaterfallCtl), + (uint32_t *)&temp, SMC_RAM_END)) + PP_ASSERT_WITH_CODE(false, + "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!", + return -EINVAL); + else + smu_data->power_tune_table.TdcWaterfallCtl = defaults->tdc_waterfall_ctl; + + return 0; +} + +static int iceland_populate_temperature_scaler(struct pp_hwmgr *hwmgr) +{ + return 0; +} + +static int iceland_populate_gnb_lpml(struct pp_hwmgr *hwmgr) +{ + int i; + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + + /* Currently not used. Set all to zero. */ + for (i = 0; i < 8; i++) + smu_data->power_tune_table.GnbLPML[i] = 0; + + return 0; +} + +static int iceland_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr) +{ + return 0; +} + +static int iceland_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr) +{ + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + uint16_t HiSidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd; + uint16_t LoSidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd; + struct phm_cac_tdp_table *cac_table = hwmgr->dyn_state.cac_dtp_table; + + HiSidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256); + LoSidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256); + + smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd = + CONVERT_FROM_HOST_TO_SMC_US(HiSidd); + smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd = + CONVERT_FROM_HOST_TO_SMC_US(LoSidd); + + return 0; +} + +static int iceland_populate_bapm_vddc_vid_sidd(struct pp_hwmgr *hwmgr) +{ + int i; + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + uint8_t *hi_vid = smu_data->power_tune_table.BapmVddCVidHiSidd; + uint8_t *lo_vid = smu_data->power_tune_table.BapmVddCVidLoSidd; + + PP_ASSERT_WITH_CODE(NULL != hwmgr->dyn_state.cac_leakage_table, + "The CAC Leakage table does not exist!", return -EINVAL); + PP_ASSERT_WITH_CODE(hwmgr->dyn_state.cac_leakage_table->count <= 8, + "There should never be more than 8 entries for BapmVddcVid!!!", return -EINVAL); + PP_ASSERT_WITH_CODE(hwmgr->dyn_state.cac_leakage_table->count == hwmgr->dyn_state.vddc_dependency_on_sclk->count, + "CACLeakageTable->count and VddcDependencyOnSCLk->count not equal", return -EINVAL); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_EVV)) { + for (i = 0; (uint32_t) i < hwmgr->dyn_state.cac_leakage_table->count; i++) { + lo_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc1); + hi_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc2); + } + } else { + PP_ASSERT_WITH_CODE(false, "Iceland should always support EVV", return -EINVAL); + } + + return 0; +} + +static int iceland_populate_vddc_vid(struct pp_hwmgr *hwmgr) +{ + int i; + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + uint8_t *vid = smu_data->power_tune_table.VddCVid; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + PP_ASSERT_WITH_CODE(data->vddc_voltage_table.count <= 8, + "There should never be more than 8 entries for VddcVid!!!", + return -EINVAL); + + for (i = 0; i < (int)data->vddc_voltage_table.count; i++) { + vid[i] = convert_to_vid(data->vddc_voltage_table.entries[i].value); + } + + return 0; +} + + + +static int iceland_populate_pm_fuses(struct pp_hwmgr *hwmgr) +{ + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + uint32_t pm_fuse_table_offset; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment)) { + if (smu7_read_smc_sram_dword(hwmgr->smumgr, + SMU71_FIRMWARE_HEADER_LOCATION + + offsetof(SMU71_Firmware_Header, PmFuseTable), + &pm_fuse_table_offset, SMC_RAM_END)) + PP_ASSERT_WITH_CODE(false, + "Attempt to get pm_fuse_table_offset Failed!", + return -EINVAL); + + /* DW0 - DW3 */ + if (iceland_populate_bapm_vddc_vid_sidd(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate bapm vddc vid Failed!", + return -EINVAL); + + /* DW4 - DW5 */ + if (iceland_populate_vddc_vid(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate vddc vid Failed!", + return -EINVAL); + + /* DW6 */ + if (iceland_populate_svi_load_line(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate SviLoadLine Failed!", + return -EINVAL); + /* DW7 */ + if (iceland_populate_tdc_limit(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate TDCLimit Failed!", return -EINVAL); + /* DW8 */ + if (iceland_populate_dw8(hwmgr, pm_fuse_table_offset)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate TdcWaterfallCtl, " + "LPMLTemperature Min and Max Failed!", + return -EINVAL); + + /* DW9-DW12 */ + if (0 != iceland_populate_temperature_scaler(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate LPMLTemperatureScaler Failed!", + return -EINVAL); + + /* DW13-DW16 */ + if (iceland_populate_gnb_lpml(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate GnbLPML Failed!", + return -EINVAL); + + /* DW17 */ + if (iceland_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate GnbLPML Min and Max Vid Failed!", + return -EINVAL); + + /* DW18 */ + if (iceland_populate_bapm_vddc_base_leakage_sidd(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate BapmVddCBaseLeakage Hi and Lo Sidd Failed!", + return -EINVAL); + + if (smu7_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset, + (uint8_t *)&smu_data->power_tune_table, + sizeof(struct SMU71_Discrete_PmFuses), SMC_RAM_END)) + PP_ASSERT_WITH_CODE(false, + "Attempt to download PmFuseTable Failed!", + return -EINVAL); + } + return 0; +} + +static int iceland_get_dependecy_volt_by_clk(struct pp_hwmgr *hwmgr, + struct phm_clock_voltage_dependency_table *allowed_clock_voltage_table, + uint32_t clock, uint32_t *vol) +{ + uint32_t i = 0; + + /* clock - voltage dependency table is empty table */ + if (allowed_clock_voltage_table->count == 0) + return -EINVAL; + + for (i = 0; i < allowed_clock_voltage_table->count; i++) { + /* find first sclk bigger than request */ + if (allowed_clock_voltage_table->entries[i].clk >= clock) { + *vol = allowed_clock_voltage_table->entries[i].v; + return 0; + } + } + + /* sclk is bigger than max sclk in the dependence table */ + *vol = allowed_clock_voltage_table->entries[i - 1].v; + + return 0; +} + +static int iceland_get_std_voltage_value_sidd(struct pp_hwmgr *hwmgr, + pp_atomctrl_voltage_table_entry *tab, uint16_t *hi, + uint16_t *lo) +{ + uint16_t v_index; + bool vol_found = false; + *hi = tab->value * VOLTAGE_SCALE; + *lo = tab->value * VOLTAGE_SCALE; + + /* SCLK/VDDC Dependency Table has to exist. */ + PP_ASSERT_WITH_CODE(NULL != hwmgr->dyn_state.vddc_dependency_on_sclk, + "The SCLK/VDDC Dependency Table does not exist.\n", + return -EINVAL); + + if (NULL == hwmgr->dyn_state.cac_leakage_table) { + pr_warning("CAC Leakage Table does not exist, using vddc.\n"); + return 0; + } + + /* + * Since voltage in the sclk/vddc dependency table is not + * necessarily in ascending order because of ELB voltage + * patching, loop through entire list to find exact voltage. + */ + for (v_index = 0; (uint32_t)v_index < hwmgr->dyn_state.vddc_dependency_on_sclk->count; v_index++) { + if (tab->value == hwmgr->dyn_state.vddc_dependency_on_sclk->entries[v_index].v) { + vol_found = true; + if ((uint32_t)v_index < hwmgr->dyn_state.cac_leakage_table->count) { + *lo = hwmgr->dyn_state.cac_leakage_table->entries[v_index].Vddc * VOLTAGE_SCALE; + *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[v_index].Leakage * VOLTAGE_SCALE); + } else { + pr_warning("Index from SCLK/VDDC Dependency Table exceeds the CAC Leakage Table index, using maximum index from CAC table.\n"); + *lo = hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Vddc * VOLTAGE_SCALE; + *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Leakage * VOLTAGE_SCALE); + } + break; + } + } + + /* + * If voltage is not found in the first pass, loop again to + * find the best match, equal or higher value. + */ + if (!vol_found) { + for (v_index = 0; (uint32_t)v_index < hwmgr->dyn_state.vddc_dependency_on_sclk->count; v_index++) { + if (tab->value <= hwmgr->dyn_state.vddc_dependency_on_sclk->entries[v_index].v) { + vol_found = true; + if ((uint32_t)v_index < hwmgr->dyn_state.cac_leakage_table->count) { + *lo = hwmgr->dyn_state.cac_leakage_table->entries[v_index].Vddc * VOLTAGE_SCALE; + *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[v_index].Leakage) * VOLTAGE_SCALE; + } else { + pr_warning("Index from SCLK/VDDC Dependency Table exceeds the CAC Leakage Table index in second look up, using maximum index from CAC table."); + *lo = hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Vddc * VOLTAGE_SCALE; + *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Leakage * VOLTAGE_SCALE); + } + break; + } + } + + if (!vol_found) + pr_warning("Unable to get std_vddc from SCLK/VDDC Dependency Table, using vddc.\n"); + } + + return 0; +} + +static int iceland_populate_smc_voltage_table(struct pp_hwmgr *hwmgr, + pp_atomctrl_voltage_table_entry *tab, + SMU71_Discrete_VoltageLevel *smc_voltage_tab) +{ + int result; + + result = iceland_get_std_voltage_value_sidd(hwmgr, tab, + &smc_voltage_tab->StdVoltageHiSidd, + &smc_voltage_tab->StdVoltageLoSidd); + if (0 != result) { + smc_voltage_tab->StdVoltageHiSidd = tab->value * VOLTAGE_SCALE; + smc_voltage_tab->StdVoltageLoSidd = tab->value * VOLTAGE_SCALE; + } + + smc_voltage_tab->Voltage = PP_HOST_TO_SMC_US(tab->value * VOLTAGE_SCALE); + CONVERT_FROM_HOST_TO_SMC_US(smc_voltage_tab->StdVoltageHiSidd); + CONVERT_FROM_HOST_TO_SMC_US(smc_voltage_tab->StdVoltageHiSidd); + + return 0; +} + +static int iceland_populate_smc_vddc_table(struct pp_hwmgr *hwmgr, + SMU71_Discrete_DpmTable *table) +{ + unsigned int count; + int result; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + table->VddcLevelCount = data->vddc_voltage_table.count; + for (count = 0; count < table->VddcLevelCount; count++) { + result = iceland_populate_smc_voltage_table(hwmgr, + &(data->vddc_voltage_table.entries[count]), + &(table->VddcLevel[count])); + PP_ASSERT_WITH_CODE(0 == result, "do not populate SMC VDDC voltage table", return -EINVAL); + + /* GPIO voltage control */ + if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->voltage_control) + table->VddcLevel[count].Smio |= data->vddc_voltage_table.entries[count].smio_low; + else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) + table->VddcLevel[count].Smio = 0; + } + + CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount); + + return 0; +} + +static int iceland_populate_smc_vdd_ci_table(struct pp_hwmgr *hwmgr, + SMU71_Discrete_DpmTable *table) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + uint32_t count; + int result; + + table->VddciLevelCount = data->vddci_voltage_table.count; + + for (count = 0; count < table->VddciLevelCount; count++) { + result = iceland_populate_smc_voltage_table(hwmgr, + &(data->vddci_voltage_table.entries[count]), + &(table->VddciLevel[count])); + PP_ASSERT_WITH_CODE(result == 0, "do not populate SMC VDDCI voltage table", return -EINVAL); + if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) + table->VddciLevel[count].Smio |= data->vddci_voltage_table.entries[count].smio_low; + else + table->VddciLevel[count].Smio |= 0; + } + + CONVERT_FROM_HOST_TO_SMC_UL(table->VddciLevelCount); + + return 0; +} + +static int iceland_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr, + SMU71_Discrete_DpmTable *table) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + uint32_t count; + int result; + + table->MvddLevelCount = data->mvdd_voltage_table.count; + + for (count = 0; count < table->VddciLevelCount; count++) { + result = iceland_populate_smc_voltage_table(hwmgr, + &(data->mvdd_voltage_table.entries[count]), + &table->MvddLevel[count]); + PP_ASSERT_WITH_CODE(result == 0, "do not populate SMC mvdd voltage table", return -EINVAL); + if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) + table->MvddLevel[count].Smio |= data->mvdd_voltage_table.entries[count].smio_low; + else + table->MvddLevel[count].Smio |= 0; + } + + CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount); + + return 0; +} + + +static int iceland_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr, + SMU71_Discrete_DpmTable *table) +{ + int result; + + result = iceland_populate_smc_vddc_table(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "can not populate VDDC voltage table to SMC", return -EINVAL); + + result = iceland_populate_smc_vdd_ci_table(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "can not populate VDDCI voltage table to SMC", return -EINVAL); + + result = iceland_populate_smc_mvdd_table(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "can not populate MVDD voltage table to SMC", return -EINVAL); + + return 0; +} + +static int iceland_populate_ulv_level(struct pp_hwmgr *hwmgr, + struct SMU71_Discrete_Ulv *state) +{ + uint32_t voltage_response_time, ulv_voltage; + int result; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + state->CcPwrDynRm = 0; + state->CcPwrDynRm1 = 0; + + result = pp_tables_get_response_times(hwmgr, &voltage_response_time, &ulv_voltage); + PP_ASSERT_WITH_CODE((0 == result), "can not get ULV voltage value", return result;); + + if (ulv_voltage == 0) { + data->ulv_supported = false; + return 0; + } + + if (data->voltage_control != SMU7_VOLTAGE_CONTROL_BY_SVID2) { + /* use minimum voltage if ulv voltage in pptable is bigger than minimum voltage */ + if (ulv_voltage > hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v) + state->VddcOffset = 0; + else + /* used in SMIO Mode. not implemented for now. this is backup only for CI. */ + state->VddcOffset = (uint16_t)(hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v - ulv_voltage); + } else { + /* use minimum voltage if ulv voltage in pptable is bigger than minimum voltage */ + if (ulv_voltage > hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v) + state->VddcOffsetVid = 0; + else /* used in SVI2 Mode */ + state->VddcOffsetVid = (uint8_t)( + (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v - ulv_voltage) + * VOLTAGE_VID_OFFSET_SCALE2 + / VOLTAGE_VID_OFFSET_SCALE1); + } + state->VddcPhase = 1; + + CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm); + CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1); + CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset); + + return 0; +} + +static int iceland_populate_ulv_state(struct pp_hwmgr *hwmgr, + SMU71_Discrete_Ulv *ulv_level) +{ + return iceland_populate_ulv_level(hwmgr, ulv_level); +} + +static int iceland_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU71_Discrete_DpmTable *table) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct smu7_dpm_table *dpm_table = &data->dpm_table; + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + uint32_t i; + + /* Index (dpm_table->pcie_speed_table.count) is reserved for PCIE boot level. */ + for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) { + table->LinkLevel[i].PcieGenSpeed = + (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value; + table->LinkLevel[i].PcieLaneCount = + (uint8_t)encode_pcie_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1); + table->LinkLevel[i].EnabledForActivity = + 1; + table->LinkLevel[i].SPC = + (uint8_t)(data->pcie_spc_cap & 0xff); + table->LinkLevel[i].DownThreshold = + PP_HOST_TO_SMC_UL(5); + table->LinkLevel[i].UpThreshold = + PP_HOST_TO_SMC_UL(30); + } + + smu_data->smc_state_table.LinkLevelCount = + (uint8_t)dpm_table->pcie_speed_table.count; + data->dpm_level_enable_mask.pcie_dpm_enable_mask = + phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table); + + return 0; +} + +/** + * Calculates the SCLK dividers using the provided engine clock + * + * @param hwmgr the address of the hardware manager + * @param engine_clock the engine clock to use to populate the structure + * @param sclk the SMC SCLK structure to be populated + */ +static int iceland_calculate_sclk_params(struct pp_hwmgr *hwmgr, + uint32_t engine_clock, SMU71_Discrete_GraphicsLevel *sclk) +{ + const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + pp_atomctrl_clock_dividers_vi dividers; + uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL; + uint32_t spll_func_cntl_3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3; + uint32_t spll_func_cntl_4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4; + uint32_t cg_spll_spread_spectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM; + uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2; + uint32_t reference_clock; + uint32_t reference_divider; + uint32_t fbdiv; + int result; + + /* get the engine clock dividers for this clock value*/ + result = atomctrl_get_engine_pll_dividers_vi(hwmgr, engine_clock, ÷rs); + + PP_ASSERT_WITH_CODE(result == 0, + "Error retrieving Engine Clock dividers from VBIOS.", return result); + + /* To get FBDIV we need to multiply this by 16384 and divide it by Fref.*/ + reference_clock = atomctrl_get_reference_clock(hwmgr); + + reference_divider = 1 + dividers.uc_pll_ref_div; + + /* low 14 bits is fraction and high 12 bits is divider*/ + fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF; + + /* SPLL_FUNC_CNTL setup*/ + spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, + CG_SPLL_FUNC_CNTL, SPLL_REF_DIV, dividers.uc_pll_ref_div); + spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, + CG_SPLL_FUNC_CNTL, SPLL_PDIV_A, dividers.uc_pll_post_div); + + /* SPLL_FUNC_CNTL_3 setup*/ + spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, + CG_SPLL_FUNC_CNTL_3, SPLL_FB_DIV, fbdiv); + + /* set to use fractional accumulation*/ + spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, + CG_SPLL_FUNC_CNTL_3, SPLL_DITHEN, 1); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_EngineSpreadSpectrumSupport)) { + pp_atomctrl_internal_ss_info ss_info; + + uint32_t vcoFreq = engine_clock * dividers.uc_pll_post_div; + if (0 == atomctrl_get_engine_clock_spread_spectrum(hwmgr, vcoFreq, &ss_info)) { + /* + * ss_info.speed_spectrum_percentage -- in unit of 0.01% + * ss_info.speed_spectrum_rate -- in unit of khz + */ + /* clks = reference_clock * 10 / (REFDIV + 1) / speed_spectrum_rate / 2 */ + uint32_t clkS = reference_clock * 5 / (reference_divider * ss_info.speed_spectrum_rate); + + /* clkv = 2 * D * fbdiv / NS */ + uint32_t clkV = 4 * ss_info.speed_spectrum_percentage * fbdiv / (clkS * 10000); + + cg_spll_spread_spectrum = + PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, CLKS, clkS); + cg_spll_spread_spectrum = + PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, SSEN, 1); + cg_spll_spread_spectrum_2 = + PHM_SET_FIELD(cg_spll_spread_spectrum_2, CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clkV); + } + } + + sclk->SclkFrequency = engine_clock; + sclk->CgSpllFuncCntl3 = spll_func_cntl_3; + sclk->CgSpllFuncCntl4 = spll_func_cntl_4; + sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum; + sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2; + sclk->SclkDid = (uint8_t)dividers.pll_post_divider; + + return 0; +} + +static int iceland_populate_phase_value_based_on_sclk(struct pp_hwmgr *hwmgr, + const struct phm_phase_shedding_limits_table *pl, + uint32_t sclk, uint32_t *p_shed) +{ + unsigned int i; + + /* use the minimum phase shedding */ + *p_shed = 1; + + for (i = 0; i < pl->count; i++) { + if (sclk < pl->entries[i].Sclk) { + *p_shed = i; + break; + } + } + return 0; +} + +/** + * Populates single SMC SCLK structure using the provided engine clock + * + * @param hwmgr the address of the hardware manager + * @param engine_clock the engine clock to use to populate the structure + * @param sclk the SMC SCLK structure to be populated + */ +static int iceland_populate_single_graphic_level(struct pp_hwmgr *hwmgr, + uint32_t engine_clock, + uint16_t sclk_activity_level_threshold, + SMU71_Discrete_GraphicsLevel *graphic_level) +{ + int result; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + result = iceland_calculate_sclk_params(hwmgr, engine_clock, graphic_level); + + /* populate graphics levels*/ + result = iceland_get_dependecy_volt_by_clk(hwmgr, + hwmgr->dyn_state.vddc_dependency_on_sclk, engine_clock, + &graphic_level->MinVddc); + PP_ASSERT_WITH_CODE((0 == result), + "can not find VDDC voltage value for VDDC \ + engine clock dependency table", return result); + + /* SCLK frequency in units of 10KHz*/ + graphic_level->SclkFrequency = engine_clock; + graphic_level->MinVddcPhases = 1; + + if (data->vddc_phase_shed_control) + iceland_populate_phase_value_based_on_sclk(hwmgr, + hwmgr->dyn_state.vddc_phase_shed_limits_table, + engine_clock, + &graphic_level->MinVddcPhases); + + /* Indicates maximum activity level for this performance level. 50% for now*/ + graphic_level->ActivityLevel = sclk_activity_level_threshold; + + graphic_level->CcPwrDynRm = 0; + graphic_level->CcPwrDynRm1 = 0; + /* this level can be used if activity is high enough.*/ + graphic_level->EnabledForActivity = 0; + /* this level can be used for throttling.*/ + graphic_level->EnabledForThrottle = 1; + graphic_level->UpHyst = 0; + graphic_level->DownHyst = 100; + graphic_level->VoltageDownHyst = 0; + graphic_level->PowerThrottle = 0; + + data->display_timing.min_clock_in_sr = + hwmgr->display_config.min_core_set_clock_in_sr; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SclkDeepSleep)) + graphic_level->DeepSleepDivId = + smu7_get_sleep_divider_id_from_clock(engine_clock, + data->display_timing.min_clock_in_sr); + + /* Default to slow, highest DPM level will be set to PPSMC_DISPLAY_WATERMARK_LOW later.*/ + graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; + + if (0 == result) { + graphic_level->MinVddc = PP_HOST_TO_SMC_UL(graphic_level->MinVddc * VOLTAGE_SCALE); + CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVddcPhases); + CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SclkFrequency); + CONVERT_FROM_HOST_TO_SMC_US(graphic_level->ActivityLevel); + CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl3); + CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl4); + CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum); + CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum2); + CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm); + CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm1); + } + + return result; +} + +/** + * Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states + * + * @param hwmgr the address of the hardware manager + */ +int iceland_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + struct smu7_dpm_table *dpm_table = &data->dpm_table; + uint32_t level_array_adress = smu_data->smu7_data.dpm_table_start + + offsetof(SMU71_Discrete_DpmTable, GraphicsLevel); + + uint32_t level_array_size = sizeof(SMU71_Discrete_GraphicsLevel) * + SMU71_MAX_LEVELS_GRAPHICS; + + SMU71_Discrete_GraphicsLevel *levels = smu_data->smc_state_table.GraphicsLevel; + + uint32_t i; + uint8_t highest_pcie_level_enabled = 0; + uint8_t lowest_pcie_level_enabled = 0, mid_pcie_level_enabled = 0; + uint8_t count = 0; + int result = 0; + + memset(levels, 0x00, level_array_size); + + for (i = 0; i < dpm_table->sclk_table.count; i++) { + result = iceland_populate_single_graphic_level(hwmgr, + dpm_table->sclk_table.dpm_levels[i].value, + (uint16_t)smu_data->activity_target[i], + &(smu_data->smc_state_table.GraphicsLevel[i])); + if (result != 0) + return result; + + /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */ + if (i > 1) + smu_data->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0; + } + + /* Only enable level 0 for now. */ + smu_data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1; + + /* set highest level watermark to high */ + if (dpm_table->sclk_table.count > 1) + smu_data->smc_state_table.GraphicsLevel[dpm_table->sclk_table.count-1].DisplayWatermark = + PPSMC_DISPLAY_WATERMARK_HIGH; + + smu_data->smc_state_table.GraphicsDpmLevelCount = + (uint8_t)dpm_table->sclk_table.count; + data->dpm_level_enable_mask.sclk_dpm_enable_mask = + phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table); + + while ((data->dpm_level_enable_mask.pcie_dpm_enable_mask & + (1 << (highest_pcie_level_enabled + 1))) != 0) { + highest_pcie_level_enabled++; + } + + while ((data->dpm_level_enable_mask.pcie_dpm_enable_mask & + (1 << lowest_pcie_level_enabled)) == 0) { + lowest_pcie_level_enabled++; + } + + while ((count < highest_pcie_level_enabled) && + ((data->dpm_level_enable_mask.pcie_dpm_enable_mask & + (1 << (lowest_pcie_level_enabled + 1 + count))) == 0)) { + count++; + } + + mid_pcie_level_enabled = (lowest_pcie_level_enabled+1+count) < highest_pcie_level_enabled ? + (lowest_pcie_level_enabled+1+count) : highest_pcie_level_enabled; + + + /* set pcieDpmLevel to highest_pcie_level_enabled*/ + for (i = 2; i < dpm_table->sclk_table.count; i++) { + smu_data->smc_state_table.GraphicsLevel[i].pcieDpmLevel = highest_pcie_level_enabled; + } + + /* set pcieDpmLevel to lowest_pcie_level_enabled*/ + smu_data->smc_state_table.GraphicsLevel[0].pcieDpmLevel = lowest_pcie_level_enabled; + + /* set pcieDpmLevel to mid_pcie_level_enabled*/ + smu_data->smc_state_table.GraphicsLevel[1].pcieDpmLevel = mid_pcie_level_enabled; + + /* level count will send to smc once at init smc table and never change*/ + result = smu7_copy_bytes_to_smc(hwmgr->smumgr, level_array_adress, + (uint8_t *)levels, (uint32_t)level_array_size, + SMC_RAM_END); + + return result; +} + +/** + * Populates the SMC MCLK structure using the provided memory clock + * + * @param hwmgr the address of the hardware manager + * @param memory_clock the memory clock to use to populate the structure + * @param sclk the SMC SCLK structure to be populated + */ +static int iceland_calculate_mclk_params( + struct pp_hwmgr *hwmgr, + uint32_t memory_clock, + SMU71_Discrete_MemoryLevel *mclk, + bool strobe_mode, + bool dllStateOn + ) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + uint32_t dll_cntl = data->clock_registers.vDLL_CNTL; + uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL; + uint32_t mpll_ad_func_cntl = data->clock_registers.vMPLL_AD_FUNC_CNTL; + uint32_t mpll_dq_func_cntl = data->clock_registers.vMPLL_DQ_FUNC_CNTL; + uint32_t mpll_func_cntl = data->clock_registers.vMPLL_FUNC_CNTL; + uint32_t mpll_func_cntl_1 = data->clock_registers.vMPLL_FUNC_CNTL_1; + uint32_t mpll_func_cntl_2 = data->clock_registers.vMPLL_FUNC_CNTL_2; + uint32_t mpll_ss1 = data->clock_registers.vMPLL_SS1; + uint32_t mpll_ss2 = data->clock_registers.vMPLL_SS2; + + pp_atomctrl_memory_clock_param mpll_param; + int result; + + result = atomctrl_get_memory_pll_dividers_si(hwmgr, + memory_clock, &mpll_param, strobe_mode); + PP_ASSERT_WITH_CODE(0 == result, + "Error retrieving Memory Clock Parameters from VBIOS.", return result); + + /* MPLL_FUNC_CNTL setup*/ + mpll_func_cntl = PHM_SET_FIELD(mpll_func_cntl, MPLL_FUNC_CNTL, BWCTRL, mpll_param.bw_ctrl); + + /* MPLL_FUNC_CNTL_1 setup*/ + mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1, + MPLL_FUNC_CNTL_1, CLKF, mpll_param.mpll_fb_divider.cl_kf); + mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1, + MPLL_FUNC_CNTL_1, CLKFRAC, mpll_param.mpll_fb_divider.clk_frac); + mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1, + MPLL_FUNC_CNTL_1, VCO_MODE, mpll_param.vco_mode); + + /* MPLL_AD_FUNC_CNTL setup*/ + mpll_ad_func_cntl = PHM_SET_FIELD(mpll_ad_func_cntl, + MPLL_AD_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider); + + if (data->is_memory_gddr5) { + /* MPLL_DQ_FUNC_CNTL setup*/ + mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl, + MPLL_DQ_FUNC_CNTL, YCLK_SEL, mpll_param.yclk_sel); + mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl, + MPLL_DQ_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider); + } + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_MemorySpreadSpectrumSupport)) { + /* + ************************************ + Fref = Reference Frequency + NF = Feedback divider ratio + NR = Reference divider ratio + Fnom = Nominal VCO output frequency = Fref * NF / NR + Fs = Spreading Rate + D = Percentage down-spread / 2 + Fint = Reference input frequency to PFD = Fref / NR + NS = Spreading rate divider ratio = int(Fint / (2 * Fs)) + CLKS = NS - 1 = ISS_STEP_NUM[11:0] + NV = D * Fs / Fnom * 4 * ((Fnom/Fref * NR) ^ 2) + CLKV = 65536 * NV = ISS_STEP_SIZE[25:0] + ************************************* + */ + pp_atomctrl_internal_ss_info ss_info; + uint32_t freq_nom; + uint32_t tmp; + uint32_t reference_clock = atomctrl_get_mpll_reference_clock(hwmgr); + + /* for GDDR5 for all modes and DDR3 */ + if (1 == mpll_param.qdr) + freq_nom = memory_clock * 4 * (1 << mpll_param.mpll_post_divider); + else + freq_nom = memory_clock * 2 * (1 << mpll_param.mpll_post_divider); + + /* tmp = (freq_nom / reference_clock * reference_divider) ^ 2 Note: S.I. reference_divider = 1*/ + tmp = (freq_nom / reference_clock); + tmp = tmp * tmp; + + if (0 == atomctrl_get_memory_clock_spread_spectrum(hwmgr, freq_nom, &ss_info)) { + /* ss_info.speed_spectrum_percentage -- in unit of 0.01% */ + /* ss.Info.speed_spectrum_rate -- in unit of khz */ + /* CLKS = reference_clock / (2 * speed_spectrum_rate * reference_divider) * 10 */ + /* = reference_clock * 5 / speed_spectrum_rate */ + uint32_t clks = reference_clock * 5 / ss_info.speed_spectrum_rate; + + /* CLKV = 65536 * speed_spectrum_percentage / 2 * spreadSpecrumRate / freq_nom * 4 / 100000 * ((freq_nom / reference_clock) ^ 2) */ + /* = 131 * speed_spectrum_percentage * speed_spectrum_rate / 100 * ((freq_nom / reference_clock) ^ 2) / freq_nom */ + uint32_t clkv = + (uint32_t)((((131 * ss_info.speed_spectrum_percentage * + ss_info.speed_spectrum_rate) / 100) * tmp) / freq_nom); + + mpll_ss1 = PHM_SET_FIELD(mpll_ss1, MPLL_SS1, CLKV, clkv); + mpll_ss2 = PHM_SET_FIELD(mpll_ss2, MPLL_SS2, CLKS, clks); + } + } + + /* MCLK_PWRMGT_CNTL setup */ + mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, + MCLK_PWRMGT_CNTL, DLL_SPEED, mpll_param.dll_speed); + mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, + MCLK_PWRMGT_CNTL, MRDCK0_PDNB, dllStateOn); + mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, + MCLK_PWRMGT_CNTL, MRDCK1_PDNB, dllStateOn); + + + /* Save the result data to outpupt memory level structure */ + mclk->MclkFrequency = memory_clock; + mclk->MpllFuncCntl = mpll_func_cntl; + mclk->MpllFuncCntl_1 = mpll_func_cntl_1; + mclk->MpllFuncCntl_2 = mpll_func_cntl_2; + mclk->MpllAdFuncCntl = mpll_ad_func_cntl; + mclk->MpllDqFuncCntl = mpll_dq_func_cntl; + mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl; + mclk->DllCntl = dll_cntl; + mclk->MpllSs1 = mpll_ss1; + mclk->MpllSs2 = mpll_ss2; + + return 0; +} + +static uint8_t iceland_get_mclk_frequency_ratio(uint32_t memory_clock, + bool strobe_mode) +{ + uint8_t mc_para_index; + + if (strobe_mode) { + if (memory_clock < 12500) { + mc_para_index = 0x00; + } else if (memory_clock > 47500) { + mc_para_index = 0x0f; + } else { + mc_para_index = (uint8_t)((memory_clock - 10000) / 2500); + } + } else { + if (memory_clock < 65000) { + mc_para_index = 0x00; + } else if (memory_clock > 135000) { + mc_para_index = 0x0f; + } else { + mc_para_index = (uint8_t)((memory_clock - 60000) / 5000); + } + } + + return mc_para_index; +} + +static uint8_t iceland_get_ddr3_mclk_frequency_ratio(uint32_t memory_clock) +{ + uint8_t mc_para_index; + + if (memory_clock < 10000) { + mc_para_index = 0; + } else if (memory_clock >= 80000) { + mc_para_index = 0x0f; + } else { + mc_para_index = (uint8_t)((memory_clock - 10000) / 5000 + 1); + } + + return mc_para_index; +} + +static int iceland_populate_phase_value_based_on_mclk(struct pp_hwmgr *hwmgr, const struct phm_phase_shedding_limits_table *pl, + uint32_t memory_clock, uint32_t *p_shed) +{ + unsigned int i; + + *p_shed = 1; + + for (i = 0; i < pl->count; i++) { + if (memory_clock < pl->entries[i].Mclk) { + *p_shed = i; + break; + } + } + + return 0; +} + +static int iceland_populate_single_memory_level( + struct pp_hwmgr *hwmgr, + uint32_t memory_clock, + SMU71_Discrete_MemoryLevel *memory_level + ) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + int result = 0; + bool dll_state_on; + struct cgs_display_info info = {0}; + uint32_t mclk_edc_wr_enable_threshold = 40000; + uint32_t mclk_edc_enable_threshold = 40000; + uint32_t mclk_strobe_mode_threshold = 40000; + + if (hwmgr->dyn_state.vddc_dependency_on_mclk != NULL) { + result = iceland_get_dependecy_volt_by_clk(hwmgr, + hwmgr->dyn_state.vddc_dependency_on_mclk, memory_clock, &memory_level->MinVddc); + PP_ASSERT_WITH_CODE((0 == result), + "can not find MinVddc voltage value from memory VDDC voltage dependency table", return result); + } + + if (data->vddci_control == SMU7_VOLTAGE_CONTROL_NONE) { + memory_level->MinVddci = memory_level->MinVddc; + } else if (NULL != hwmgr->dyn_state.vddci_dependency_on_mclk) { + result = iceland_get_dependecy_volt_by_clk(hwmgr, + hwmgr->dyn_state.vddci_dependency_on_mclk, + memory_clock, + &memory_level->MinVddci); + PP_ASSERT_WITH_CODE((0 == result), + "can not find MinVddci voltage value from memory VDDCI voltage dependency table", return result); + } + + memory_level->MinVddcPhases = 1; + + if (data->vddc_phase_shed_control) { + iceland_populate_phase_value_based_on_mclk(hwmgr, hwmgr->dyn_state.vddc_phase_shed_limits_table, + memory_clock, &memory_level->MinVddcPhases); + } + + memory_level->EnabledForThrottle = 1; + memory_level->EnabledForActivity = 0; + memory_level->UpHyst = 0; + memory_level->DownHyst = 100; + memory_level->VoltageDownHyst = 0; + + /* Indicates maximum activity level for this performance level.*/ + memory_level->ActivityLevel = (uint16_t)data->mclk_activity_target; + memory_level->StutterEnable = 0; + memory_level->StrobeEnable = 0; + memory_level->EdcReadEnable = 0; + memory_level->EdcWriteEnable = 0; + memory_level->RttEnable = 0; + + /* default set to low watermark. Highest level will be set to high later.*/ + memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; + + cgs_get_active_displays_info(hwmgr->device, &info); + data->display_timing.num_existing_displays = info.display_count; + + /* stutter mode not support on iceland */ + + /* decide strobe mode*/ + memory_level->StrobeEnable = (mclk_strobe_mode_threshold != 0) && + (memory_clock <= mclk_strobe_mode_threshold); + + /* decide EDC mode and memory clock ratio*/ + if (data->is_memory_gddr5) { + memory_level->StrobeRatio = iceland_get_mclk_frequency_ratio(memory_clock, + memory_level->StrobeEnable); + + if ((mclk_edc_enable_threshold != 0) && + (memory_clock > mclk_edc_enable_threshold)) { + memory_level->EdcReadEnable = 1; + } + + if ((mclk_edc_wr_enable_threshold != 0) && + (memory_clock > mclk_edc_wr_enable_threshold)) { + memory_level->EdcWriteEnable = 1; + } + + if (memory_level->StrobeEnable) { + if (iceland_get_mclk_frequency_ratio(memory_clock, 1) >= + ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC7) >> 16) & 0xf)) + dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0; + else + dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC6) >> 1) & 0x1) ? 1 : 0; + } else + dll_state_on = data->dll_default_on; + } else { + memory_level->StrobeRatio = + iceland_get_ddr3_mclk_frequency_ratio(memory_clock); + dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0; + } + + result = iceland_calculate_mclk_params(hwmgr, + memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on); + + if (0 == result) { + memory_level->MinVddc = PP_HOST_TO_SMC_UL(memory_level->MinVddc * VOLTAGE_SCALE); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MinVddcPhases); + memory_level->MinVddci = PP_HOST_TO_SMC_UL(memory_level->MinVddci * VOLTAGE_SCALE); + memory_level->MinMvdd = PP_HOST_TO_SMC_UL(memory_level->MinMvdd * VOLTAGE_SCALE); + /* MCLK frequency in units of 10KHz*/ + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkFrequency); + /* Indicates maximum activity level for this performance level.*/ + CONVERT_FROM_HOST_TO_SMC_US(memory_level->ActivityLevel); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_1); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_2); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllAdFuncCntl); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllDqFuncCntl); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkPwrmgtCntl); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->DllCntl); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs1); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs2); + } + + return result; +} + +/** + * Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states + * + * @param hwmgr the address of the hardware manager + */ + +int iceland_populate_all_memory_levels(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + struct smu7_dpm_table *dpm_table = &data->dpm_table; + int result; + + /* populate MCLK dpm table to SMU7 */ + uint32_t level_array_adress = smu_data->smu7_data.dpm_table_start + offsetof(SMU71_Discrete_DpmTable, MemoryLevel); + uint32_t level_array_size = sizeof(SMU71_Discrete_MemoryLevel) * SMU71_MAX_LEVELS_MEMORY; + SMU71_Discrete_MemoryLevel *levels = smu_data->smc_state_table.MemoryLevel; + uint32_t i; + + memset(levels, 0x00, level_array_size); + + for (i = 0; i < dpm_table->mclk_table.count; i++) { + PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value), + "can not populate memory level as memory clock is zero", return -EINVAL); + result = iceland_populate_single_memory_level(hwmgr, dpm_table->mclk_table.dpm_levels[i].value, + &(smu_data->smc_state_table.MemoryLevel[i])); + if (0 != result) { + return result; + } + } + + /* Only enable level 0 for now.*/ + smu_data->smc_state_table.MemoryLevel[0].EnabledForActivity = 1; + + /* + * in order to prevent MC activity from stutter mode to push DPM up. + * the UVD change complements this by putting the MCLK in a higher state + * by default such that we are not effected by up threshold or and MCLK DPM latency. + */ + smu_data->smc_state_table.MemoryLevel[0].ActivityLevel = 0x1F; + CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.MemoryLevel[0].ActivityLevel); + + smu_data->smc_state_table.MemoryDpmLevelCount = (uint8_t)dpm_table->mclk_table.count; + data->dpm_level_enable_mask.mclk_dpm_enable_mask = phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table); + /* set highest level watermark to high*/ + smu_data->smc_state_table.MemoryLevel[dpm_table->mclk_table.count-1].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH; + + /* level count will send to smc once at init smc table and never change*/ + result = smu7_copy_bytes_to_smc(hwmgr->smumgr, + level_array_adress, (uint8_t *)levels, (uint32_t)level_array_size, + SMC_RAM_END); + + return result; +} + +static int iceland_populate_mvdd_value(struct pp_hwmgr *hwmgr, uint32_t mclk, + SMU71_Discrete_VoltageLevel *voltage) +{ + const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + uint32_t i = 0; + + if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) { + /* find mvdd value which clock is more than request */ + for (i = 0; i < hwmgr->dyn_state.mvdd_dependency_on_mclk->count; i++) { + if (mclk <= hwmgr->dyn_state.mvdd_dependency_on_mclk->entries[i].clk) { + /* Always round to higher voltage. */ + voltage->Voltage = data->mvdd_voltage_table.entries[i].value; + break; + } + } + + PP_ASSERT_WITH_CODE(i < hwmgr->dyn_state.mvdd_dependency_on_mclk->count, + "MVDD Voltage is outside the supported range.", return -EINVAL); + + } else { + return -EINVAL; + } + + return 0; +} + +static int iceland_populate_smc_acpi_level(struct pp_hwmgr *hwmgr, + SMU71_Discrete_DpmTable *table) +{ + int result = 0; + const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct pp_atomctrl_clock_dividers_vi dividers; + uint32_t vddc_phase_shed_control = 0; + + SMU71_Discrete_VoltageLevel voltage_level; + uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL; + uint32_t spll_func_cntl_2 = data->clock_registers.vCG_SPLL_FUNC_CNTL_2; + uint32_t dll_cntl = data->clock_registers.vDLL_CNTL; + uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL; + + + /* The ACPI state should not do DPM on DC (or ever).*/ + table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC; + + if (data->acpi_vddc) + table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->acpi_vddc * VOLTAGE_SCALE); + else + table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->min_vddc_in_pptable * VOLTAGE_SCALE); + + table->ACPILevel.MinVddcPhases = vddc_phase_shed_control ? 0 : 1; + /* assign zero for now*/ + table->ACPILevel.SclkFrequency = atomctrl_get_reference_clock(hwmgr); + + /* get the engine clock dividers for this clock value*/ + result = atomctrl_get_engine_pll_dividers_vi(hwmgr, + table->ACPILevel.SclkFrequency, ÷rs); + + PP_ASSERT_WITH_CODE(result == 0, + "Error retrieving Engine Clock dividers from VBIOS.", return result); + + /* divider ID for required SCLK*/ + table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider; + table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; + table->ACPILevel.DeepSleepDivId = 0; + + spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, + CG_SPLL_FUNC_CNTL, SPLL_PWRON, 0); + spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, + CG_SPLL_FUNC_CNTL, SPLL_RESET, 1); + spll_func_cntl_2 = PHM_SET_FIELD(spll_func_cntl_2, + CG_SPLL_FUNC_CNTL_2, SCLK_MUX_SEL, 4); + + table->ACPILevel.CgSpllFuncCntl = spll_func_cntl; + table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2; + table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3; + table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4; + table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM; + table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2; + table->ACPILevel.CcPwrDynRm = 0; + table->ACPILevel.CcPwrDynRm1 = 0; + + + /* For various features to be enabled/disabled while this level is active.*/ + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags); + /* SCLK frequency in units of 10KHz*/ + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1); + + /* table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;*/ + table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc; + table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases; + + if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control) + table->MemoryACPILevel.MinVddci = table->MemoryACPILevel.MinVddc; + else { + if (data->acpi_vddci != 0) + table->MemoryACPILevel.MinVddci = PP_HOST_TO_SMC_UL(data->acpi_vddci * VOLTAGE_SCALE); + else + table->MemoryACPILevel.MinVddci = PP_HOST_TO_SMC_UL(data->min_vddci_in_pptable * VOLTAGE_SCALE); + } + + if (0 == iceland_populate_mvdd_value(hwmgr, 0, &voltage_level)) + table->MemoryACPILevel.MinMvdd = + PP_HOST_TO_SMC_UL(voltage_level.Voltage * VOLTAGE_SCALE); + else + table->MemoryACPILevel.MinMvdd = 0; + + /* Force reset on DLL*/ + mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, + MCLK_PWRMGT_CNTL, MRDCK0_RESET, 0x1); + mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, + MCLK_PWRMGT_CNTL, MRDCK1_RESET, 0x1); + + /* Disable DLL in ACPIState*/ + mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, + MCLK_PWRMGT_CNTL, MRDCK0_PDNB, 0); + mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, + MCLK_PWRMGT_CNTL, MRDCK1_PDNB, 0); + + /* Enable DLL bypass signal*/ + dll_cntl = PHM_SET_FIELD(dll_cntl, + DLL_CNTL, MRDCK0_BYPASS, 0); + dll_cntl = PHM_SET_FIELD(dll_cntl, + DLL_CNTL, MRDCK1_BYPASS, 0); + + table->MemoryACPILevel.DllCntl = + PP_HOST_TO_SMC_UL(dll_cntl); + table->MemoryACPILevel.MclkPwrmgtCntl = + PP_HOST_TO_SMC_UL(mclk_pwrmgt_cntl); + table->MemoryACPILevel.MpllAdFuncCntl = + PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_AD_FUNC_CNTL); + table->MemoryACPILevel.MpllDqFuncCntl = + PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_DQ_FUNC_CNTL); + table->MemoryACPILevel.MpllFuncCntl = + PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL); + table->MemoryACPILevel.MpllFuncCntl_1 = + PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_1); + table->MemoryACPILevel.MpllFuncCntl_2 = + PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_2); + table->MemoryACPILevel.MpllSs1 = + PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS1); + table->MemoryACPILevel.MpllSs2 = + PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS2); + + table->MemoryACPILevel.EnabledForThrottle = 0; + table->MemoryACPILevel.EnabledForActivity = 0; + table->MemoryACPILevel.UpHyst = 0; + table->MemoryACPILevel.DownHyst = 100; + table->MemoryACPILevel.VoltageDownHyst = 0; + /* Indicates maximum activity level for this performance level.*/ + table->MemoryACPILevel.ActivityLevel = PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target); + + table->MemoryACPILevel.StutterEnable = 0; + table->MemoryACPILevel.StrobeEnable = 0; + table->MemoryACPILevel.EdcReadEnable = 0; + table->MemoryACPILevel.EdcWriteEnable = 0; + table->MemoryACPILevel.RttEnable = 0; + + return result; +} + +static int iceland_populate_smc_uvd_level(struct pp_hwmgr *hwmgr, + SMU71_Discrete_DpmTable *table) +{ + return 0; +} + +static int iceland_populate_smc_vce_level(struct pp_hwmgr *hwmgr, + SMU71_Discrete_DpmTable *table) +{ + return 0; +} + +static int iceland_populate_smc_acp_level(struct pp_hwmgr *hwmgr, + SMU71_Discrete_DpmTable *table) +{ + return 0; +} + +static int iceland_populate_smc_samu_level(struct pp_hwmgr *hwmgr, + SMU71_Discrete_DpmTable *table) +{ + return 0; +} + +static int iceland_populate_memory_timing_parameters( + struct pp_hwmgr *hwmgr, + uint32_t engine_clock, + uint32_t memory_clock, + struct SMU71_Discrete_MCArbDramTimingTableEntry *arb_regs + ) +{ + uint32_t dramTiming; + uint32_t dramTiming2; + uint32_t burstTime; + int result; + + result = atomctrl_set_engine_dram_timings_rv770(hwmgr, + engine_clock, memory_clock); + + PP_ASSERT_WITH_CODE(result == 0, + "Error calling VBIOS to set DRAM_TIMING.", return result); + + dramTiming = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING); + dramTiming2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2); + burstTime = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0); + + arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dramTiming); + arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dramTiming2); + arb_regs->McArbBurstTime = (uint8_t)burstTime; + + return 0; +} + +/** + * Setup parameters for the MC ARB. + * + * @param hwmgr the address of the powerplay hardware manager. + * @return always 0 + * This function is to be called from the SetPowerState table. + */ +static int iceland_program_memory_timing_parameters(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + int result = 0; + SMU71_Discrete_MCArbDramTimingTable arb_regs; + uint32_t i, j; + + memset(&arb_regs, 0x00, sizeof(SMU71_Discrete_MCArbDramTimingTable)); + + for (i = 0; i < data->dpm_table.sclk_table.count; i++) { + for (j = 0; j < data->dpm_table.mclk_table.count; j++) { + result = iceland_populate_memory_timing_parameters + (hwmgr, data->dpm_table.sclk_table.dpm_levels[i].value, + data->dpm_table.mclk_table.dpm_levels[j].value, + &arb_regs.entries[i][j]); + + if (0 != result) { + break; + } + } + } + + if (0 == result) { + result = smu7_copy_bytes_to_smc( + hwmgr->smumgr, + smu_data->smu7_data.arb_table_start, + (uint8_t *)&arb_regs, + sizeof(SMU71_Discrete_MCArbDramTimingTable), + SMC_RAM_END + ); + } + + return result; +} + +static int iceland_populate_smc_boot_level(struct pp_hwmgr *hwmgr, + SMU71_Discrete_DpmTable *table) +{ + int result = 0; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + table->GraphicsBootLevel = 0; + table->MemoryBootLevel = 0; + + /* find boot level from dpm table*/ + result = phm_find_boot_level(&(data->dpm_table.sclk_table), + data->vbios_boot_state.sclk_bootup_value, + (uint32_t *)&(smu_data->smc_state_table.GraphicsBootLevel)); + + if (0 != result) { + smu_data->smc_state_table.GraphicsBootLevel = 0; + printk(KERN_ERR "[ powerplay ] VBIOS did not find boot engine clock value \ + in dependency table. Using Graphics DPM level 0!"); + result = 0; + } + + result = phm_find_boot_level(&(data->dpm_table.mclk_table), + data->vbios_boot_state.mclk_bootup_value, + (uint32_t *)&(smu_data->smc_state_table.MemoryBootLevel)); + + if (0 != result) { + smu_data->smc_state_table.MemoryBootLevel = 0; + printk(KERN_ERR "[ powerplay ] VBIOS did not find boot engine clock value \ + in dependency table. Using Memory DPM level 0!"); + result = 0; + } + + table->BootVddc = data->vbios_boot_state.vddc_bootup_value; + if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control) + table->BootVddci = table->BootVddc; + else + table->BootVddci = data->vbios_boot_state.vddci_bootup_value; + + table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value; + + return result; +} + +static int iceland_populate_mc_reg_address(struct pp_smumgr *smumgr, + SMU71_Discrete_MCRegisters *mc_reg_table) +{ + const struct iceland_smumgr *smu_data = (struct iceland_smumgr *)smumgr->backend; + + uint32_t i, j; + + for (i = 0, j = 0; j < smu_data->mc_reg_table.last; j++) { + if (smu_data->mc_reg_table.validflag & 1<address[] array out of boundary", return -EINVAL); + mc_reg_table->address[i].s0 = + PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s0); + mc_reg_table->address[i].s1 = + PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s1); + i++; + } + } + + mc_reg_table->last = (uint8_t)i; + + return 0; +} + +/*convert register values from driver to SMC format */ +static void iceland_convert_mc_registers( + const struct iceland_mc_reg_entry *entry, + SMU71_Discrete_MCRegisterSet *data, + uint32_t num_entries, uint32_t valid_flag) +{ + uint32_t i, j; + + for (i = 0, j = 0; j < num_entries; j++) { + if (valid_flag & 1<value[i] = PP_HOST_TO_SMC_UL(entry->mc_data[j]); + i++; + } + } +} + +static int iceland_convert_mc_reg_table_entry_to_smc( + struct pp_smumgr *smumgr, + const uint32_t memory_clock, + SMU71_Discrete_MCRegisterSet *mc_reg_table_data + ) +{ + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(smumgr->backend); + uint32_t i = 0; + + for (i = 0; i < smu_data->mc_reg_table.num_entries; i++) { + if (memory_clock <= + smu_data->mc_reg_table.mc_reg_table_entry[i].mclk_max) { + break; + } + } + + if ((i == smu_data->mc_reg_table.num_entries) && (i > 0)) + --i; + + iceland_convert_mc_registers(&smu_data->mc_reg_table.mc_reg_table_entry[i], + mc_reg_table_data, smu_data->mc_reg_table.last, + smu_data->mc_reg_table.validflag); + + return 0; +} + +static int iceland_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr, + SMU71_Discrete_MCRegisters *mc_regs) +{ + int result = 0; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + int res; + uint32_t i; + + for (i = 0; i < data->dpm_table.mclk_table.count; i++) { + res = iceland_convert_mc_reg_table_entry_to_smc( + hwmgr->smumgr, + data->dpm_table.mclk_table.dpm_levels[i].value, + &mc_regs->data[i] + ); + + if (0 != res) + result = res; + } + + return result; +} + +static int iceland_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr) +{ + struct pp_smumgr *smumgr = hwmgr->smumgr; + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(smumgr->backend); + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + uint32_t address; + int32_t result; + + if (0 == (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) + return 0; + + + memset(&smu_data->mc_regs, 0, sizeof(SMU71_Discrete_MCRegisters)); + + result = iceland_convert_mc_reg_table_to_smc(hwmgr, &(smu_data->mc_regs)); + + if (result != 0) + return result; + + + address = smu_data->smu7_data.mc_reg_table_start + (uint32_t)offsetof(SMU71_Discrete_MCRegisters, data[0]); + + return smu7_copy_bytes_to_smc(hwmgr->smumgr, address, + (uint8_t *)&smu_data->mc_regs.data[0], + sizeof(SMU71_Discrete_MCRegisterSet) * data->dpm_table.mclk_table.count, + SMC_RAM_END); +} + +static int iceland_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr) +{ + int result; + struct pp_smumgr *smumgr = hwmgr->smumgr; + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(smumgr->backend); + + memset(&smu_data->mc_regs, 0x00, sizeof(SMU71_Discrete_MCRegisters)); + result = iceland_populate_mc_reg_address(smumgr, &(smu_data->mc_regs)); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize MCRegTable for the MC register addresses!", return result;); + + result = iceland_convert_mc_reg_table_to_smc(hwmgr, &smu_data->mc_regs); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize MCRegTable for driver state!", return result;); + + return smu7_copy_bytes_to_smc(smumgr, smu_data->smu7_data.mc_reg_table_start, + (uint8_t *)&smu_data->mc_regs, sizeof(SMU71_Discrete_MCRegisters), SMC_RAM_END); +} + +static int iceland_populate_smc_initial_state(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + uint8_t count, level; + + count = (uint8_t)(hwmgr->dyn_state.vddc_dependency_on_sclk->count); + + for (level = 0; level < count; level++) { + if (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[level].clk + >= data->vbios_boot_state.sclk_bootup_value) { + smu_data->smc_state_table.GraphicsBootLevel = level; + break; + } + } + + count = (uint8_t)(hwmgr->dyn_state.vddc_dependency_on_mclk->count); + + for (level = 0; level < count; level++) { + if (hwmgr->dyn_state.vddc_dependency_on_mclk->entries[level].clk + >= data->vbios_boot_state.mclk_bootup_value) { + smu_data->smc_state_table.MemoryBootLevel = level; + break; + } + } + + return 0; +} + +static int iceland_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults; + SMU71_Discrete_DpmTable *dpm_table = &(smu_data->smc_state_table); + struct phm_cac_tdp_table *cac_dtp_table = hwmgr->dyn_state.cac_dtp_table; + struct phm_ppm_table *ppm = hwmgr->dyn_state.ppm_parameter_table; + uint16_t *def1, *def2; + int i, j, k; + + + /* + * TDP number of fraction bits are changed from 8 to 7 for Iceland + * as requested by SMC team + */ + + dpm_table->DefaultTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 256)); + dpm_table->TargetTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usConfigurableTDP * 256)); + + + dpm_table->DTETjOffset = 0; + + dpm_table->GpuTjMax = (uint8_t)(data->thermal_temp_setting.temperature_high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES); + dpm_table->GpuTjHyst = 8; + + dpm_table->DTEAmbientTempBase = defaults->dte_ambient_temp_base; + + /* The following are for new Iceland Multi-input fan/thermal control */ + if (NULL != ppm) { + dpm_table->PPM_PkgPwrLimit = (uint16_t)ppm->dgpu_tdp * 256 / 1000; + dpm_table->PPM_TemperatureLimit = (uint16_t)ppm->tj_max * 256; + } else { + dpm_table->PPM_PkgPwrLimit = 0; + dpm_table->PPM_TemperatureLimit = 0; + } + + CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_PkgPwrLimit); + CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_TemperatureLimit); + + dpm_table->BAPM_TEMP_GRADIENT = PP_HOST_TO_SMC_UL(defaults->bamp_temp_gradient); + def1 = defaults->bapmti_r; + def2 = defaults->bapmti_rc; + + for (i = 0; i < SMU71_DTE_ITERATIONS; i++) { + for (j = 0; j < SMU71_DTE_SOURCES; j++) { + for (k = 0; k < SMU71_DTE_SINKS; k++) { + dpm_table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*def1); + dpm_table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*def2); + def1++; + def2++; + } + } + } + + return 0; +} + +static int iceland_populate_smc_svi2_config(struct pp_hwmgr *hwmgr, + SMU71_Discrete_DpmTable *tab) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) + tab->SVI2Enable |= VDDC_ON_SVI2; + + if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) + tab->SVI2Enable |= VDDCI_ON_SVI2; + else + tab->MergedVddci = 1; + + if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) + tab->SVI2Enable |= MVDD_ON_SVI2; + + PP_ASSERT_WITH_CODE(tab->SVI2Enable != (VDDC_ON_SVI2 | VDDCI_ON_SVI2 | MVDD_ON_SVI2) && + (tab->SVI2Enable & VDDC_ON_SVI2), "SVI2 domain configuration is incorrect!", return -EINVAL); + + return 0; +} + +/** + * Initializes the SMC table and uploads it + * + * @param hwmgr the address of the powerplay hardware manager. + * @param pInput the pointer to input data (PowerState) + * @return always 0 + */ +int iceland_init_smc_table(struct pp_hwmgr *hwmgr) +{ + int result; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + SMU71_Discrete_DpmTable *table = &(smu_data->smc_state_table); + + + iceland_initialize_power_tune_defaults(hwmgr); + memset(&(smu_data->smc_state_table), 0x00, sizeof(smu_data->smc_state_table)); + + if (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control) { + iceland_populate_smc_voltage_tables(hwmgr, table); + } + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_AutomaticDCTransition)) + table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC; + + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_StepVddc)) + table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC; + + if (data->is_memory_gddr5) + table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5; + + + if (data->ulv_supported) { + result = iceland_populate_ulv_state(hwmgr, &(smu_data->ulv_setting)); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize ULV state!", return result;); + + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_ULV_PARAMETER, 0x40035); + } + + result = iceland_populate_smc_link_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize Link Level!", return result;); + + result = iceland_populate_all_graphic_levels(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize Graphics Level!", return result;); + + result = iceland_populate_all_memory_levels(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize Memory Level!", return result;); + + result = iceland_populate_smc_acpi_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize ACPI Level!", return result;); + + result = iceland_populate_smc_vce_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize VCE Level!", return result;); + + result = iceland_populate_smc_acp_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize ACP Level!", return result;); + + result = iceland_populate_smc_samu_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize SAMU Level!", return result;); + + /* Since only the initial state is completely set up at this point (the other states are just copies of the boot state) we only */ + /* need to populate the ARB settings for the initial state. */ + result = iceland_program_memory_timing_parameters(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to Write ARB settings for the initial state.", return result;); + + result = iceland_populate_smc_uvd_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize UVD Level!", return result;); + + table->GraphicsBootLevel = 0; + table->MemoryBootLevel = 0; + + result = iceland_populate_smc_boot_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize Boot Level!", return result;); + + result = iceland_populate_smc_initial_state(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, "Failed to initialize Boot State!", return result); + + result = iceland_populate_bapm_parameters_in_dpm_table(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, "Failed to populate BAPM Parameters!", return result); + + table->GraphicsVoltageChangeEnable = 1; + table->GraphicsThermThrottleEnable = 1; + table->GraphicsInterval = 1; + table->VoltageInterval = 1; + table->ThermalInterval = 1; + + table->TemperatureLimitHigh = + (data->thermal_temp_setting.temperature_high * + SMU7_Q88_FORMAT_CONVERSION_UNIT) / PP_TEMPERATURE_UNITS_PER_CENTIGRADES; + table->TemperatureLimitLow = + (data->thermal_temp_setting.temperature_low * + SMU7_Q88_FORMAT_CONVERSION_UNIT) / PP_TEMPERATURE_UNITS_PER_CENTIGRADES; + + table->MemoryVoltageChangeEnable = 1; + table->MemoryInterval = 1; + table->VoltageResponseTime = 0; + table->PhaseResponseTime = 0; + table->MemoryThermThrottleEnable = 1; + table->PCIeBootLinkLevel = 0; + table->PCIeGenInterval = 1; + + result = iceland_populate_smc_svi2_config(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to populate SVI2 setting!", return result); + + table->ThermGpio = 17; + table->SclkStepSize = 0x4000; + + CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags); + CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcVid); + CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcPhase); + CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddciVid); + CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskMvddVid); + CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize); + CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh); + CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow); + CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime); + CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime); + + table->BootVddc = PP_HOST_TO_SMC_US(table->BootVddc * VOLTAGE_SCALE); + table->BootVddci = PP_HOST_TO_SMC_US(table->BootVddci * VOLTAGE_SCALE); + table->BootMVdd = PP_HOST_TO_SMC_US(table->BootMVdd * VOLTAGE_SCALE); + + /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */ + result = smu7_copy_bytes_to_smc(hwmgr->smumgr, smu_data->smu7_data.dpm_table_start + + offsetof(SMU71_Discrete_DpmTable, SystemFlags), + (uint8_t *)&(table->SystemFlags), + sizeof(SMU71_Discrete_DpmTable)-3 * sizeof(SMU71_PIDController), + SMC_RAM_END); + + PP_ASSERT_WITH_CODE(0 == result, + "Failed to upload dpm data to SMC memory!", return result;); + + /* Upload all ulv setting to SMC memory.(dpm level, dpm level count etc) */ + result = smu7_copy_bytes_to_smc(hwmgr->smumgr, + smu_data->smu7_data.ulv_setting_starts, + (uint8_t *)&(smu_data->ulv_setting), + sizeof(SMU71_Discrete_Ulv), + SMC_RAM_END); + + + result = iceland_populate_initial_mc_reg_table(hwmgr); + PP_ASSERT_WITH_CODE((0 == result), + "Failed to populate initialize MC Reg table!", return result); + + result = iceland_populate_pm_fuses(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to populate PM fuses to SMC memory!", return result); + + return 0; +} + +/** +* Set up the fan table to control the fan using the SMC. +* @param hwmgr the address of the powerplay hardware manager. +* @param pInput the pointer to input data +* @param pOutput the pointer to output data +* @param pStorage the pointer to temporary storage +* @param Result the last failure code +* @return result from set temperature range routine +*/ +int iceland_thermal_setup_fan_table(struct pp_hwmgr *hwmgr) +{ + struct smu7_smumgr *smu7_data = (struct smu7_smumgr *)(hwmgr->smumgr->backend); + SMU71_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE }; + uint32_t duty100; + uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2; + uint16_t fdo_min, slope1, slope2; + uint32_t reference_clock; + int res; + uint64_t tmp64; + + if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl)) + return 0; + + if (0 == smu7_data->fan_table_start) { + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl); + return 0; + } + + duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100); + + if (0 == duty100) { + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl); + return 0; + } + + tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin * duty100; + do_div(tmp64, 10000); + fdo_min = (uint16_t)tmp64; + + t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed - hwmgr->thermal_controller.advanceFanControlParameters.usTMin; + t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh - hwmgr->thermal_controller.advanceFanControlParameters.usTMed; + + pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin; + pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed; + + slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100); + slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100); + + fan_table.TempMin = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMin) / 100); + fan_table.TempMed = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMed) / 100); + fan_table.TempMax = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMax) / 100); + + fan_table.Slope1 = cpu_to_be16(slope1); + fan_table.Slope2 = cpu_to_be16(slope2); + + fan_table.FdoMin = cpu_to_be16(fdo_min); + + fan_table.HystDown = cpu_to_be16(hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst); + + fan_table.HystUp = cpu_to_be16(1); + + fan_table.HystSlope = cpu_to_be16(1); + + fan_table.TempRespLim = cpu_to_be16(5); + + reference_clock = smu7_get_xclk(hwmgr); + + fan_table.RefreshPeriod = cpu_to_be32((hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600); + + fan_table.FdoMax = cpu_to_be16((uint16_t)duty100); + + fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_CTRL, TEMP_SEL); + + /* fan_table.FanControl_GL_Flag = 1; */ + + res = smu7_copy_bytes_to_smc(hwmgr->smumgr, smu7_data->fan_table_start, (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), SMC_RAM_END); + + return 0; +} + + +static int iceland_program_mem_timing_parameters(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + if (data->need_update_smu7_dpm_table & + (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK)) + return iceland_program_memory_timing_parameters(hwmgr); + + return 0; +} + +int iceland_update_sclk_threshold(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + + int result = 0; + uint32_t low_sclk_interrupt_threshold = 0; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SclkThrottleLowNotification) + && (hwmgr->gfx_arbiter.sclk_threshold != + data->low_sclk_interrupt_threshold)) { + data->low_sclk_interrupt_threshold = + hwmgr->gfx_arbiter.sclk_threshold; + low_sclk_interrupt_threshold = + data->low_sclk_interrupt_threshold; + + CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold); + + result = smu7_copy_bytes_to_smc( + hwmgr->smumgr, + smu_data->smu7_data.dpm_table_start + + offsetof(SMU71_Discrete_DpmTable, + LowSclkInterruptThreshold), + (uint8_t *)&low_sclk_interrupt_threshold, + sizeof(uint32_t), + SMC_RAM_END); + } + + result = iceland_update_and_upload_mc_reg_table(hwmgr); + + PP_ASSERT_WITH_CODE((0 == result), "Failed to upload MC reg table!", return result); + + result = iceland_program_mem_timing_parameters(hwmgr); + PP_ASSERT_WITH_CODE((result == 0), + "Failed to program memory timing parameters!", + ); + + return result; +} + +uint32_t iceland_get_offsetof(uint32_t type, uint32_t member) +{ + switch (type) { + case SMU_SoftRegisters: + switch (member) { + case HandshakeDisables: + return offsetof(SMU71_SoftRegisters, HandshakeDisables); + case VoltageChangeTimeout: + return offsetof(SMU71_SoftRegisters, VoltageChangeTimeout); + case AverageGraphicsActivity: + return offsetof(SMU71_SoftRegisters, AverageGraphicsActivity); + case PreVBlankGap: + return offsetof(SMU71_SoftRegisters, PreVBlankGap); + case VBlankTimeout: + return offsetof(SMU71_SoftRegisters, VBlankTimeout); + case UcodeLoadStatus: + return offsetof(SMU71_SoftRegisters, UcodeLoadStatus); + } + case SMU_Discrete_DpmTable: + switch (member) { + case LowSclkInterruptThreshold: + return offsetof(SMU71_Discrete_DpmTable, LowSclkInterruptThreshold); + } + } + printk("cant't get the offset of type %x member %x \n", type, member); + return 0; +} + +uint32_t iceland_get_mac_definition(uint32_t value) +{ + switch (value) { + case SMU_MAX_LEVELS_GRAPHICS: + return SMU71_MAX_LEVELS_GRAPHICS; + case SMU_MAX_LEVELS_MEMORY: + return SMU71_MAX_LEVELS_MEMORY; + case SMU_MAX_LEVELS_LINK: + return SMU71_MAX_LEVELS_LINK; + case SMU_MAX_ENTRIES_SMIO: + return SMU71_MAX_ENTRIES_SMIO; + case SMU_MAX_LEVELS_VDDC: + return SMU71_MAX_LEVELS_VDDC; + case SMU_MAX_LEVELS_VDDCI: + return SMU71_MAX_LEVELS_VDDCI; + case SMU_MAX_LEVELS_MVDD: + return SMU71_MAX_LEVELS_MVDD; + } + + printk("cant't get the mac of %x \n", value); + return 0; +} + +/** + * Get the location of various tables inside the FW image. + * + * @param hwmgr the address of the powerplay hardware manager. + * @return always 0 + */ +int iceland_process_firmware_header(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct smu7_smumgr *smu7_data = (struct smu7_smumgr *)(hwmgr->smumgr->backend); + + uint32_t tmp; + int result; + bool error = false; + + result = smu7_read_smc_sram_dword(hwmgr->smumgr, + SMU71_FIRMWARE_HEADER_LOCATION + + offsetof(SMU71_Firmware_Header, DpmTable), + &tmp, SMC_RAM_END); + + if (0 == result) { + smu7_data->dpm_table_start = tmp; + } + + error |= (0 != result); + + result = smu7_read_smc_sram_dword(hwmgr->smumgr, + SMU71_FIRMWARE_HEADER_LOCATION + + offsetof(SMU71_Firmware_Header, SoftRegisters), + &tmp, SMC_RAM_END); + + if (0 == result) { + data->soft_regs_start = tmp; + smu7_data->soft_regs_start = tmp; + } + + error |= (0 != result); + + + result = smu7_read_smc_sram_dword(hwmgr->smumgr, + SMU71_FIRMWARE_HEADER_LOCATION + + offsetof(SMU71_Firmware_Header, mcRegisterTable), + &tmp, SMC_RAM_END); + + if (0 == result) { + smu7_data->mc_reg_table_start = tmp; + } + + result = smu7_read_smc_sram_dword(hwmgr->smumgr, + SMU71_FIRMWARE_HEADER_LOCATION + + offsetof(SMU71_Firmware_Header, FanTable), + &tmp, SMC_RAM_END); + + if (0 == result) { + smu7_data->fan_table_start = tmp; + } + + error |= (0 != result); + + result = smu7_read_smc_sram_dword(hwmgr->smumgr, + SMU71_FIRMWARE_HEADER_LOCATION + + offsetof(SMU71_Firmware_Header, mcArbDramTimingTable), + &tmp, SMC_RAM_END); + + if (0 == result) { + smu7_data->arb_table_start = tmp; + } + + error |= (0 != result); + + + result = smu7_read_smc_sram_dword(hwmgr->smumgr, + SMU71_FIRMWARE_HEADER_LOCATION + + offsetof(SMU71_Firmware_Header, Version), + &tmp, SMC_RAM_END); + + if (0 == result) { + hwmgr->microcode_version_info.SMC = tmp; + } + + error |= (0 != result); + + result = smu7_read_smc_sram_dword(hwmgr->smumgr, + SMU71_FIRMWARE_HEADER_LOCATION + + offsetof(SMU71_Firmware_Header, UlvSettings), + &tmp, SMC_RAM_END); + + if (0 == result) { + smu7_data->ulv_setting_starts = tmp; + } + + error |= (0 != result); + + return error ? 1 : 0; +} + +/*---------------------------MC----------------------------*/ + +static uint8_t iceland_get_memory_modile_index(struct pp_hwmgr *hwmgr) +{ + return (uint8_t) (0xFF & (cgs_read_register(hwmgr->device, mmBIOS_SCRATCH_4) >> 16)); +} + +static bool iceland_check_s0_mc_reg_index(uint16_t in_reg, uint16_t *out_reg) +{ + bool result = true; + + switch (in_reg) { + case mmMC_SEQ_RAS_TIMING: + *out_reg = mmMC_SEQ_RAS_TIMING_LP; + break; + + case mmMC_SEQ_DLL_STBY: + *out_reg = mmMC_SEQ_DLL_STBY_LP; + break; + + case mmMC_SEQ_G5PDX_CMD0: + *out_reg = mmMC_SEQ_G5PDX_CMD0_LP; + break; + + case mmMC_SEQ_G5PDX_CMD1: + *out_reg = mmMC_SEQ_G5PDX_CMD1_LP; + break; + + case mmMC_SEQ_G5PDX_CTRL: + *out_reg = mmMC_SEQ_G5PDX_CTRL_LP; + break; + + case mmMC_SEQ_CAS_TIMING: + *out_reg = mmMC_SEQ_CAS_TIMING_LP; + break; + + case mmMC_SEQ_MISC_TIMING: + *out_reg = mmMC_SEQ_MISC_TIMING_LP; + break; + + case mmMC_SEQ_MISC_TIMING2: + *out_reg = mmMC_SEQ_MISC_TIMING2_LP; + break; + + case mmMC_SEQ_PMG_DVS_CMD: + *out_reg = mmMC_SEQ_PMG_DVS_CMD_LP; + break; + + case mmMC_SEQ_PMG_DVS_CTL: + *out_reg = mmMC_SEQ_PMG_DVS_CTL_LP; + break; + + case mmMC_SEQ_RD_CTL_D0: + *out_reg = mmMC_SEQ_RD_CTL_D0_LP; + break; + + case mmMC_SEQ_RD_CTL_D1: + *out_reg = mmMC_SEQ_RD_CTL_D1_LP; + break; + + case mmMC_SEQ_WR_CTL_D0: + *out_reg = mmMC_SEQ_WR_CTL_D0_LP; + break; + + case mmMC_SEQ_WR_CTL_D1: + *out_reg = mmMC_SEQ_WR_CTL_D1_LP; + break; + + case mmMC_PMG_CMD_EMRS: + *out_reg = mmMC_SEQ_PMG_CMD_EMRS_LP; + break; + + case mmMC_PMG_CMD_MRS: + *out_reg = mmMC_SEQ_PMG_CMD_MRS_LP; + break; + + case mmMC_PMG_CMD_MRS1: + *out_reg = mmMC_SEQ_PMG_CMD_MRS1_LP; + break; + + case mmMC_SEQ_PMG_TIMING: + *out_reg = mmMC_SEQ_PMG_TIMING_LP; + break; + + case mmMC_PMG_CMD_MRS2: + *out_reg = mmMC_SEQ_PMG_CMD_MRS2_LP; + break; + + case mmMC_SEQ_WR_CTL_2: + *out_reg = mmMC_SEQ_WR_CTL_2_LP; + break; + + default: + result = false; + break; + } + + return result; +} + +static int iceland_set_s0_mc_reg_index(struct iceland_mc_reg_table *table) +{ + uint32_t i; + uint16_t address; + + for (i = 0; i < table->last; i++) { + table->mc_reg_address[i].s0 = + iceland_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) + ? address : table->mc_reg_address[i].s1; + } + return 0; +} + +static int iceland_copy_vbios_smc_reg_table(const pp_atomctrl_mc_reg_table *table, + struct iceland_mc_reg_table *ni_table) +{ + uint8_t i, j; + + PP_ASSERT_WITH_CODE((table->last <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE), + "Invalid VramInfo table.", return -EINVAL); + PP_ASSERT_WITH_CODE((table->num_entries <= MAX_AC_TIMING_ENTRIES), + "Invalid VramInfo table.", return -EINVAL); + + for (i = 0; i < table->last; i++) { + ni_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1; + } + ni_table->last = table->last; + + for (i = 0; i < table->num_entries; i++) { + ni_table->mc_reg_table_entry[i].mclk_max = + table->mc_reg_table_entry[i].mclk_max; + for (j = 0; j < table->last; j++) { + ni_table->mc_reg_table_entry[i].mc_data[j] = + table->mc_reg_table_entry[i].mc_data[j]; + } + } + + ni_table->num_entries = table->num_entries; + + return 0; +} + +/** + * VBIOS omits some information to reduce size, we need to recover them here. + * 1. when we see mmMC_SEQ_MISC1, bit[31:16] EMRS1, need to be write to mmMC_PMG_CMD_EMRS /_LP[15:0]. + * Bit[15:0] MRS, need to be update mmMC_PMG_CMD_MRS/_LP[15:0] + * 2. when we see mmMC_SEQ_RESERVE_M, bit[15:0] EMRS2, need to be write to mmMC_PMG_CMD_MRS1/_LP[15:0]. + * 3. need to set these data for each clock range + * + * @param hwmgr the address of the powerplay hardware manager. + * @param table the address of MCRegTable + * @return always 0 + */ +static int iceland_set_mc_special_registers(struct pp_hwmgr *hwmgr, + struct iceland_mc_reg_table *table) +{ + uint8_t i, j, k; + uint32_t temp_reg; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + for (i = 0, j = table->last; i < table->last; i++) { + PP_ASSERT_WITH_CODE((j < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE), + "Invalid VramInfo table.", return -EINVAL); + + switch (table->mc_reg_address[i].s1) { + + case mmMC_SEQ_MISC1: + temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS); + table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS; + table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP; + for (k = 0; k < table->num_entries; k++) { + table->mc_reg_table_entry[k].mc_data[j] = + ((temp_reg & 0xffff0000)) | + ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16); + } + j++; + PP_ASSERT_WITH_CODE((j < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE), + "Invalid VramInfo table.", return -EINVAL); + + temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS); + table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS; + table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP; + for (k = 0; k < table->num_entries; k++) { + table->mc_reg_table_entry[k].mc_data[j] = + (temp_reg & 0xffff0000) | + (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); + + if (!data->is_memory_gddr5) { + table->mc_reg_table_entry[k].mc_data[j] |= 0x100; + } + } + j++; + PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE), + "Invalid VramInfo table.", return -EINVAL); + + if (!data->is_memory_gddr5) { + table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD; + table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD; + for (k = 0; k < table->num_entries; k++) { + table->mc_reg_table_entry[k].mc_data[j] = + (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16; + } + j++; + PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE), + "Invalid VramInfo table.", return -EINVAL); + } + + break; + + case mmMC_SEQ_RESERVE_M: + temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1); + table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1; + table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP; + for (k = 0; k < table->num_entries; k++) { + table->mc_reg_table_entry[k].mc_data[j] = + (temp_reg & 0xffff0000) | + (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); + } + j++; + PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE), + "Invalid VramInfo table.", return -EINVAL); + break; + + default: + break; + } + + } + + table->last = j; + + return 0; +} + +static int iceland_set_valid_flag(struct iceland_mc_reg_table *table) +{ + uint8_t i, j; + for (i = 0; i < table->last; i++) { + for (j = 1; j < table->num_entries; j++) { + if (table->mc_reg_table_entry[j-1].mc_data[i] != + table->mc_reg_table_entry[j].mc_data[i]) { + table->validflag |= (1<smumgr->backend); + pp_atomctrl_mc_reg_table *table; + struct iceland_mc_reg_table *ni_table = &smu_data->mc_reg_table; + uint8_t module_index = iceland_get_memory_modile_index(hwmgr); + + table = kzalloc(sizeof(pp_atomctrl_mc_reg_table), GFP_KERNEL); + + if (NULL == table) + return -ENOMEM; + + /* Program additional LP registers that are no longer programmed by VBIOS */ + cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING)); + cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING)); + cgs_write_register(hwmgr->device, mmMC_SEQ_DLL_STBY_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_DLL_STBY)); + cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0)); + cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1)); + cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL)); + cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD)); + cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL)); + cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING)); + cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2)); + cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_EMRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS)); + cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS)); + cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS1_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1)); + cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0)); + cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1)); + cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0)); + cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1)); + cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING)); + cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS2_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS2)); + cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_2)); + + memset(table, 0x00, sizeof(pp_atomctrl_mc_reg_table)); + + result = atomctrl_initialize_mc_reg_table(hwmgr, module_index, table); + + if (0 == result) + result = iceland_copy_vbios_smc_reg_table(table, ni_table); + + if (0 == result) { + iceland_set_s0_mc_reg_index(ni_table); + result = iceland_set_mc_special_registers(hwmgr, ni_table); + } + + if (0 == result) + iceland_set_valid_flag(ni_table); + + kfree(table); + + return result; +} + +bool iceland_is_dpm_running(struct pp_hwmgr *hwmgr) +{ + return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device, + CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON)) + ? true : false; +} diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.h b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.h new file mode 100644 index 000000000000..13c8dbbccaf2 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.h @@ -0,0 +1,40 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef _ICELAND_SMC_H +#define _ICELAND_SMC_H + +#include "smumgr.h" + + +int iceland_populate_all_graphic_levels(struct pp_hwmgr *hwmgr); +int iceland_populate_all_memory_levels(struct pp_hwmgr *hwmgr); +int iceland_init_smc_table(struct pp_hwmgr *hwmgr); +int iceland_thermal_setup_fan_table(struct pp_hwmgr *hwmgr); +int iceland_update_sclk_threshold(struct pp_hwmgr *hwmgr); +uint32_t iceland_get_offsetof(uint32_t type, uint32_t member); +uint32_t iceland_get_mac_definition(uint32_t value); +int iceland_process_firmware_header(struct pp_hwmgr *hwmgr); +int iceland_initialize_mc_reg_table(struct pp_hwmgr *hwmgr); +bool iceland_is_dpm_running(struct pp_hwmgr *hwmgr); +#endif + diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c index 31b6de858317..eeafefc4acba 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c @@ -35,6 +35,7 @@ #include "smu/smu_7_1_1_d.h" #include "smu/smu_7_1_1_sh_mask.h" #include "cgs_common.h" +#include "iceland_smc.h" #define ICELAND_SMC_SIZE 0x20000 @@ -199,7 +200,15 @@ static int iceland_start_smu(struct pp_smumgr *smumgr) */ static int iceland_smu_init(struct pp_smumgr *smumgr) { - return smu7_init(smumgr); + int i; + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(smumgr->backend); + if (smu7_init(smumgr)) + return -EINVAL; + + for (i = 0; i < SMU71_MAX_LEVELS_GRAPHICS; i++) + smu_data->activity_target[i] = 30; + + return 0; } static const struct pp_smumgr_func iceland_smu_funcs = { @@ -213,6 +222,16 @@ static const struct pp_smumgr_func iceland_smu_funcs = { .send_msg_to_smc_with_parameter = &smu7_send_msg_to_smc_with_parameter, .download_pptable_settings = NULL, .upload_pptable_settings = NULL, + .get_offsetof = iceland_get_offsetof, + .process_firmware_header = iceland_process_firmware_header, + .init_smc_table = iceland_init_smc_table, + .update_sclk_threshold = iceland_update_sclk_threshold, + .thermal_setup_fan_table = iceland_thermal_setup_fan_table, + .populate_all_graphic_levels = iceland_populate_all_graphic_levels, + .populate_all_memory_levels = iceland_populate_all_memory_levels, + .get_mac_definition = iceland_get_mac_definition, + .initialize_mc_reg_table = iceland_initialize_mc_reg_table, + .is_dpm_running = iceland_is_dpm_running, }; int iceland_smum_init(struct pp_smumgr *smumgr) diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h index 331e2782e5b4..cfadfeeea039 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h +++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h @@ -28,10 +28,44 @@ #include "smu7_smumgr.h" +#include "pp_endian.h" +#include "smu71_discrete.h" +struct iceland_pt_defaults { + uint8_t svi_load_line_en; + uint8_t svi_load_line_vddc; + uint8_t tdc_vddc_throttle_release_limit_perc; + uint8_t tdc_mawt; + uint8_t tdc_waterfall_ctl; + uint8_t dte_ambient_temp_base; + uint32_t display_cac; + uint32_t bamp_temp_gradient; + uint16_t bapmti_r[SMU71_DTE_ITERATIONS * SMU71_DTE_SOURCES * SMU71_DTE_SINKS]; + uint16_t bapmti_rc[SMU71_DTE_ITERATIONS * SMU71_DTE_SOURCES * SMU71_DTE_SINKS]; +}; + +struct iceland_mc_reg_entry { + uint32_t mclk_max; + uint32_t mc_data[SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE]; +}; + +struct iceland_mc_reg_table { + uint8_t last; /* number of registers*/ + uint8_t num_entries; /* number of entries in mc_reg_table_entry used*/ + uint16_t validflag; /* indicate the corresponding register is valid or not. 1: valid, 0: invalid. bit0->address[0], bit1->address[1], etc.*/ + struct iceland_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES]; + SMU71_Discrete_MCRegisterAddress mc_reg_address[SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE]; +}; struct iceland_smumgr { struct smu7_smumgr smu7_data; + struct SMU71_Discrete_DpmTable smc_state_table; + struct SMU71_Discrete_PmFuses power_tune_table; + struct SMU71_Discrete_Ulv ulv_setting; + struct iceland_pt_defaults *power_tune_defaults; + SMU71_Discrete_MCRegisters mc_regs; + struct iceland_mc_reg_table mc_reg_table; + uint32_t activity_target[SMU71_MAX_LEVELS_GRAPHICS]; }; #endif From ab4f06d3adcc5165b13ed2e657050fd1808f319b Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Thu, 25 Aug 2016 20:08:03 +0800 Subject: [PATCH 21/49] drm/amd/powerplay: use smu7 hwmgr to manager iceland Signed-off-by: Rex Zhu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/Makefile | 4 +-- drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c | 33 +++++++++++++++----- 2 files changed, 27 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile index 69e6d156a4c3..5fff1d636ab7 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile @@ -7,9 +7,7 @@ HARDWARE_MGR = hwmgr.o processpptables.o functiontables.o \ cz_clockpowergating.o pppcielanes.o\ process_pptables_v1_0.o ppatomctrl.o \ smu7_hwmgr.o smu7_powertune.o smu7_thermal.o \ - smu7_clockpowergating.o iceland_hwmgr.o \ - iceland_clockpowergating.o iceland_thermal.o \ - iceland_powertune.o + smu7_clockpowergating.o AMD_PP_HWMGR = $(addprefix $(AMD_PP_PATH)/hwmgr/,$(HARDWARE_MGR)) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c index 7ca8aaa88444..1167205057b3 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c @@ -36,13 +36,13 @@ #include "amd_acpi.h" extern int cz_hwmgr_init(struct pp_hwmgr *hwmgr); -extern int iceland_hwmgr_init(struct pp_hwmgr *hwmgr); static int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr); static void hwmgr_init_default_caps(struct pp_hwmgr *hwmgr); static int hwmgr_set_user_specify_caps(struct pp_hwmgr *hwmgr); static int fiji_set_asic_special_caps(struct pp_hwmgr *hwmgr); static int tonga_set_asic_special_caps(struct pp_hwmgr *hwmgr); +static int topaz_set_asic_special_caps(struct pp_hwmgr *hwmgr); uint8_t convert_to_vid(uint16_t vddc) { @@ -79,16 +79,18 @@ int hwmgr_init(struct amd_pp_init *pp_init, struct pp_instance *handle) case AMDGPU_FAMILY_VI: switch (hwmgr->chip_id) { case CHIP_TOPAZ: - iceland_hwmgr_init(hwmgr); + topaz_set_asic_special_caps(hwmgr); + hwmgr->feature_mask &= ~(PP_SMC_VOLTAGE_CONTROL_MASK | + PP_VBI_TIME_SUPPORT_MASK | + PP_ENABLE_GFX_CG_THRU_SMU); + hwmgr->pp_table_version = PP_TABLE_V0; break; case CHIP_TONGA: - smu7_hwmgr_init(hwmgr); tonga_set_asic_special_caps(hwmgr); hwmgr->feature_mask &= ~(PP_SMC_VOLTAGE_CONTROL_MASK | PP_VBI_TIME_SUPPORT_MASK); break; case CHIP_FIJI: - smu7_hwmgr_init(hwmgr); fiji_set_asic_special_caps(hwmgr); hwmgr->feature_mask &= ~(PP_SMC_VOLTAGE_CONTROL_MASK | PP_VBI_TIME_SUPPORT_MASK | @@ -96,13 +98,13 @@ int hwmgr_init(struct amd_pp_init *pp_init, struct pp_instance *handle) break; case CHIP_POLARIS11: case CHIP_POLARIS10: - smu7_hwmgr_init(hwmgr); polaris_set_asic_special_caps(hwmgr); hwmgr->feature_mask &= ~(PP_UVD_HANDSHAKE_MASK); break; default: return -EINVAL; } + smu7_hwmgr_init(hwmgr); break; default: return -EINVAL; @@ -215,8 +217,6 @@ int phm_wait_on_register(struct pp_hwmgr *hwmgr, uint32_t index, } - - /** * Returns once the part of the register indicated by the mask has * reached the given value.The indirect space is described by giving @@ -794,3 +794,22 @@ int tonga_set_asic_special_caps(struct pp_hwmgr *hwmgr) return 0; } + +int topaz_set_asic_special_caps(struct pp_hwmgr *hwmgr) +{ + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SQRamping); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DBRamping); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_TDRamping); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_TCPRamping); + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_TablelessHardwareInterface); + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_CAC); + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_EVV); + return 0; +} From e805ed83ba1ca0961d19496c944faed27aef82a3 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Fri, 19 Aug 2016 21:00:27 +0800 Subject: [PATCH 22/49] drm/amd/powerplay: delete useless files. Signed-off-by: Rex Zhu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- .../powerplay/hwmgr/fiji_clockpowergating.c | 121 - .../powerplay/hwmgr/fiji_clockpowergating.h | 35 - .../amd/powerplay/hwmgr/fiji_dyn_defaults.h | 105 - .../gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c | 5601 --------------- .../gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h | 350 - .../drm/amd/powerplay/hwmgr/fiji_powertune.c | 610 -- .../drm/amd/powerplay/hwmgr/fiji_powertune.h | 81 - .../drm/amd/powerplay/hwmgr/fiji_thermal.c | 687 -- .../drm/amd/powerplay/hwmgr/fiji_thermal.h | 62 - .../hwmgr/iceland_clockpowergating.c | 119 - .../hwmgr/iceland_clockpowergating.h | 38 - .../powerplay/hwmgr/iceland_dyn_defaults.h | 41 - .../drm/amd/powerplay/hwmgr/iceland_hwmgr.c | 5666 --------------- .../drm/amd/powerplay/hwmgr/iceland_hwmgr.h | 424 -- .../amd/powerplay/hwmgr/iceland_powertune.c | 490 -- .../amd/powerplay/hwmgr/iceland_powertune.h | 60 - .../drm/amd/powerplay/hwmgr/iceland_thermal.c | 595 -- .../drm/amd/powerplay/hwmgr/iceland_thermal.h | 58 - .../hwmgr/polaris10_clockpowergating.c | 444 -- .../hwmgr/polaris10_clockpowergating.h | 40 - .../powerplay/hwmgr/polaris10_dyn_defaults.h | 62 - .../drm/amd/powerplay/hwmgr/polaris10_hwmgr.c | 5290 -------------- .../drm/amd/powerplay/hwmgr/polaris10_hwmgr.h | 354 - .../amd/powerplay/hwmgr/polaris10_powertune.c | 988 --- .../amd/powerplay/hwmgr/polaris10_powertune.h | 81 - .../amd/powerplay/hwmgr/polaris10_thermal.c | 716 -- .../amd/powerplay/hwmgr/polaris10_thermal.h | 62 - .../powerplay/hwmgr/tonga_clockpowergating.c | 350 - .../powerplay/hwmgr/tonga_clockpowergating.h | 36 - .../amd/powerplay/hwmgr/tonga_dyn_defaults.h | 107 - .../gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c | 6371 ----------------- .../gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h | 402 -- .../drm/amd/powerplay/hwmgr/tonga_powertune.c | 495 -- .../drm/amd/powerplay/hwmgr/tonga_powertune.h | 80 - .../drm/amd/powerplay/hwmgr/tonga_thermal.c | 590 -- .../drm/amd/powerplay/hwmgr/tonga_thermal.h | 61 - 36 files changed, 31672 deletions(-) delete mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.c delete mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.h delete mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/fiji_dyn_defaults.h delete mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c delete mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h delete mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c delete mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.h delete mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.c delete mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.h delete mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/iceland_clockpowergating.c delete mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/iceland_clockpowergating.h delete mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/iceland_dyn_defaults.h delete mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c delete mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.h delete mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/iceland_powertune.c delete mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/iceland_powertune.h delete mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/iceland_thermal.c delete mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/iceland_thermal.h delete mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.c delete mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.h delete mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_dyn_defaults.h delete mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c delete mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h delete mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c delete mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.h delete mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.c delete mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.h delete mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/tonga_clockpowergating.c delete mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/tonga_clockpowergating.h delete mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/tonga_dyn_defaults.h delete mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c delete mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h delete mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/tonga_powertune.c delete mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/tonga_powertune.h delete mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/tonga_thermal.c delete mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/tonga_thermal.h diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.c deleted file mode 100644 index 5afe82068b29..000000000000 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.c +++ /dev/null @@ -1,121 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#include "hwmgr.h" -#include "fiji_clockpowergating.h" -#include "fiji_ppsmc.h" -#include "fiji_hwmgr.h" - -int fiji_phm_disable_clock_power_gating(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - - data->uvd_power_gated = false; - data->vce_power_gated = false; - data->samu_power_gated = false; - data->acp_power_gated = false; - - return 0; -} - -int fiji_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - - if (data->uvd_power_gated == bgate) - return 0; - - data->uvd_power_gated = bgate; - - if (bgate) { - cgs_set_clockgating_state(hwmgr->device, - AMD_IP_BLOCK_TYPE_UVD, - AMD_CG_STATE_GATE); - fiji_update_uvd_dpm(hwmgr, true); - } else { - fiji_update_uvd_dpm(hwmgr, false); - cgs_set_clockgating_state(hwmgr->device, - AMD_IP_BLOCK_TYPE_UVD, - AMD_CG_STATE_UNGATE); - } - - return 0; -} - -int fiji_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - struct phm_set_power_state_input states; - const struct pp_power_state *pcurrent; - struct pp_power_state *requested; - - if (data->vce_power_gated == bgate) - return 0; - - data->vce_power_gated = bgate; - - pcurrent = hwmgr->current_ps; - requested = hwmgr->request_ps; - - states.pcurrent_state = &(pcurrent->hardware); - states.pnew_state = &(requested->hardware); - - fiji_update_vce_dpm(hwmgr, &states); - fiji_enable_disable_vce_dpm(hwmgr, !bgate); - - return 0; -} - -int fiji_phm_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - - if (data->samu_power_gated == bgate) - return 0; - - data->samu_power_gated = bgate; - - if (bgate) - fiji_update_samu_dpm(hwmgr, true); - else - fiji_update_samu_dpm(hwmgr, false); - - return 0; -} - -int fiji_phm_powergate_acp(struct pp_hwmgr *hwmgr, bool bgate) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - - if (data->acp_power_gated == bgate) - return 0; - - data->acp_power_gated = bgate; - - if (bgate) - fiji_update_acp_dpm(hwmgr, true); - else - fiji_update_acp_dpm(hwmgr, false); - - return 0; -} diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.h b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.h deleted file mode 100644 index 33af5f511ab8..000000000000 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.h +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#ifndef _FIJI_CLOCK_POWER_GATING_H_ -#define _FIJI_CLOCK_POWER_GATING_H_ - -#include "fiji_hwmgr.h" -#include "pp_asicblocks.h" - -extern int fiji_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate); -extern int fiji_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate); -extern int fiji_phm_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate); -extern int fiji_phm_powergate_acp(struct pp_hwmgr *hwmgr, bool bgate); -extern int fiji_phm_disable_clock_power_gating(struct pp_hwmgr *hwmgr); -#endif /* _TONGA_CLOCK_POWER_GATING_H_ */ diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_dyn_defaults.h b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_dyn_defaults.h deleted file mode 100644 index 32d43e8fecb2..000000000000 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_dyn_defaults.h +++ /dev/null @@ -1,105 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#ifndef FIJI_DYN_DEFAULTS_H -#define FIJI_DYN_DEFAULTS_H - -/** \file -* Volcanic Islands Dynamic default parameters. -*/ - -enum FIJIdpm_TrendDetection -{ - FIJIAdpm_TrendDetection_AUTO, - FIJIAdpm_TrendDetection_UP, - FIJIAdpm_TrendDetection_DOWN -}; -typedef enum FIJIdpm_TrendDetection FIJIdpm_TrendDetection; - -/* We need to fill in the default values!!!!!!!!!!!!!!!!!!!!!!! */ - -/* Bit vector representing same fields as hardware register. */ -#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT0 0x3FFFC102 /* CP_Gfx_busy ???? - * HDP_busy - * IH_busy - * UVD_busy - * VCE_busy - * ACP_busy - * SAMU_busy - * SDMA enabled */ -#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT1 0x000400 /* FE_Gfx_busy - Intended for primary usage. Rest are for flexibility. ???? - * SH_Gfx_busy - * RB_Gfx_busy - * VCE_busy */ - -#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT2 0xC00080 /* SH_Gfx_busy - Intended for primary usage. Rest are for flexibility. - * FE_Gfx_busy - * RB_Gfx_busy - * ACP_busy */ - -#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT3 0xC00200 /* RB_Gfx_busy - Intended for primary usage. Rest are for flexibility. - * FE_Gfx_busy - * SH_Gfx_busy - * UVD_busy */ - -#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT4 0xC01680 /* UVD_busy - * VCE_busy - * ACP_busy - * SAMU_busy */ - -#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT5 0xC00033 /* GFX, HDP */ -#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT6 0xC00033 /* GFX, HDP */ -#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT7 0x3FFFC000 /* GFX, HDP */ - - -/* thermal protection counter (units). */ -#define PPFIJI_THERMALPROTECTCOUNTER_DFLT 0x200 /* ~19us */ - -/* static screen threshold unit */ -#define PPFIJI_STATICSCREENTHRESHOLDUNIT_DFLT 0 - -/* static screen threshold */ -#define PPFIJI_STATICSCREENTHRESHOLD_DFLT 0x00C8 - -/* gfx idle clock stop threshold */ -#define PPFIJI_GFXIDLECLOCKSTOPTHRESHOLD_DFLT 0x200 /* ~19us with static screen threshold unit of 0 */ - -/* Fixed reference divider to use when building baby stepping tables. */ -#define PPFIJI_REFERENCEDIVIDER_DFLT 4 - -/* ULV voltage change delay time - * Used to be delay_vreg in N.I. split for S.I. - * Using N.I. delay_vreg value as default - * ReferenceClock = 2700 - * VoltageResponseTime = 1000 - * VDDCDelayTime = (VoltageResponseTime * ReferenceClock) / 1600 = 1687 - */ -#define PPFIJI_ULVVOLTAGECHANGEDELAY_DFLT 1687 - -#define PPFIJI_CGULVPARAMETER_DFLT 0x00040035 -#define PPFIJI_CGULVCONTROL_DFLT 0x00007450 -#define PPFIJI_TARGETACTIVITY_DFLT 30 /* 30%*/ -#define PPFIJI_MCLK_TARGETACTIVITY_DFLT 10 /* 10% */ - -#endif - diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c deleted file mode 100644 index 0d4c99b9e3f9..000000000000 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c +++ /dev/null @@ -1,5601 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#include -#include -#include -#include "linux/delay.h" - -#include "hwmgr.h" -#include "fiji_smumgr.h" -#include "atombios.h" -#include "hardwaremanager.h" -#include "ppatomctrl.h" -#include "atombios.h" -#include "cgs_common.h" -#include "fiji_dyn_defaults.h" -#include "fiji_powertune.h" -#include "smu73.h" -#include "smu/smu_7_1_3_d.h" -#include "smu/smu_7_1_3_sh_mask.h" -#include "gmc/gmc_8_1_d.h" -#include "gmc/gmc_8_1_sh_mask.h" -#include "bif/bif_5_0_d.h" -#include "bif/bif_5_0_sh_mask.h" -#include "dce/dce_10_0_d.h" -#include "dce/dce_10_0_sh_mask.h" -#include "pppcielanes.h" -#include "fiji_hwmgr.h" -#include "process_pptables_v1_0.h" -#include "pptable_v1_0.h" -#include "pp_debug.h" -#include "pp_acpi.h" -#include "amd_pcie_helpers.h" -#include "cgs_linux.h" -#include "ppinterrupt.h" - -#include "fiji_clockpowergating.h" -#include "fiji_thermal.h" - -#define VOLTAGE_SCALE 4 -#define SMC_RAM_END 0x40000 -#define VDDC_VDDCI_DELTA 300 - -#define MC_SEQ_MISC0_GDDR5_SHIFT 28 -#define MC_SEQ_MISC0_GDDR5_MASK 0xf0000000 -#define MC_SEQ_MISC0_GDDR5_VALUE 5 - -#define MC_CG_ARB_FREQ_F0 0x0a /* boot-up default */ -#define MC_CG_ARB_FREQ_F1 0x0b -#define MC_CG_ARB_FREQ_F2 0x0c -#define MC_CG_ARB_FREQ_F3 0x0d - -/* From smc_reg.h */ -#define SMC_CG_IND_START 0xc0030000 -#define SMC_CG_IND_END 0xc0040000 /* First byte after SMC_CG_IND */ - -#define VOLTAGE_SCALE 4 -#define VOLTAGE_VID_OFFSET_SCALE1 625 -#define VOLTAGE_VID_OFFSET_SCALE2 100 - -#define VDDC_VDDCI_DELTA 300 - -#define ixSWRST_COMMAND_1 0x1400103 -#define MC_SEQ_CNTL__CAC_EN_MASK 0x40000000 - -/** Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */ -enum DPM_EVENT_SRC { - DPM_EVENT_SRC_ANALOG = 0, /* Internal analog trip point */ - DPM_EVENT_SRC_EXTERNAL = 1, /* External (GPIO 17) signal */ - DPM_EVENT_SRC_DIGITAL = 2, /* Internal digital trip point (DIG_THERM_DPM) */ - DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3, /* Internal analog or external */ - DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL = 4 /* Internal digital or external */ -}; - - -/* [2.5%,~2.5%] Clock stretched is multiple of 2.5% vs - * not and [Fmin, Fmax, LDO_REFSEL, USE_FOR_LOW_FREQ] - */ -static const uint16_t fiji_clock_stretcher_lookup_table[2][4] = -{ {600, 1050, 3, 0}, {600, 1050, 6, 1} }; - -/* [FF, SS] type, [] 4 voltage ranges, and - * [Floor Freq, Boundary Freq, VID min , VID max] - */ -static const uint32_t fiji_clock_stretcher_ddt_table[2][4][4] = -{ { {265, 529, 120, 128}, {325, 650, 96, 119}, {430, 860, 32, 95}, {0, 0, 0, 31} }, - { {275, 550, 104, 112}, {319, 638, 96, 103}, {360, 720, 64, 95}, {384, 768, 32, 63} } }; - -/* [Use_For_Low_freq] value, [0%, 5%, 10%, 7.14%, 14.28%, 20%] - * (coming from PWR_CKS_CNTL.stretch_amount reg spec) - */ -static const uint8_t fiji_clock_stretch_amount_conversion[2][6] = -{ {0, 1, 3, 2, 4, 5}, {0, 2, 4, 5, 6, 5} }; - -static const unsigned long PhwFiji_Magic = (unsigned long)(PHM_VIslands_Magic); - -static struct fiji_power_state *cast_phw_fiji_power_state( - struct pp_hw_power_state *hw_ps) -{ - PP_ASSERT_WITH_CODE((PhwFiji_Magic == hw_ps->magic), - "Invalid Powerstate Type!", - return NULL;); - - return (struct fiji_power_state *)hw_ps; -} - -static const struct -fiji_power_state *cast_const_phw_fiji_power_state( - const struct pp_hw_power_state *hw_ps) -{ - PP_ASSERT_WITH_CODE((PhwFiji_Magic == hw_ps->magic), - "Invalid Powerstate Type!", - return NULL;); - - return (const struct fiji_power_state *)hw_ps; -} - -static bool fiji_is_dpm_running(struct pp_hwmgr *hwmgr) -{ - return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device, - CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON)) - ? true : false; -} - -static void fiji_init_dpm_defaults(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - struct fiji_ulv_parm *ulv = &data->ulv; - - ulv->cg_ulv_parameter = PPFIJI_CGULVPARAMETER_DFLT; - data->voting_rights_clients0 = PPFIJI_VOTINGRIGHTSCLIENTS_DFLT0; - data->voting_rights_clients1 = PPFIJI_VOTINGRIGHTSCLIENTS_DFLT1; - data->voting_rights_clients2 = PPFIJI_VOTINGRIGHTSCLIENTS_DFLT2; - data->voting_rights_clients3 = PPFIJI_VOTINGRIGHTSCLIENTS_DFLT3; - data->voting_rights_clients4 = PPFIJI_VOTINGRIGHTSCLIENTS_DFLT4; - data->voting_rights_clients5 = PPFIJI_VOTINGRIGHTSCLIENTS_DFLT5; - data->voting_rights_clients6 = PPFIJI_VOTINGRIGHTSCLIENTS_DFLT6; - data->voting_rights_clients7 = PPFIJI_VOTINGRIGHTSCLIENTS_DFLT7; - - data->static_screen_threshold_unit = - PPFIJI_STATICSCREENTHRESHOLDUNIT_DFLT; - data->static_screen_threshold = - PPFIJI_STATICSCREENTHRESHOLD_DFLT; - - /* Unset ABM cap as it moved to DAL. - * Add PHM_PlatformCaps_NonABMSupportInPPLib - * for re-direct ABM related request to DAL - */ - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ABM); - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_NonABMSupportInPPLib); - - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_DynamicACTiming); - - fiji_initialize_power_tune_defaults(hwmgr); - - data->mclk_stutter_mode_threshold = 60000; - data->pcie_gen_performance.max = PP_PCIEGen1; - data->pcie_gen_performance.min = PP_PCIEGen3; - data->pcie_gen_power_saving.max = PP_PCIEGen1; - data->pcie_gen_power_saving.min = PP_PCIEGen3; - data->pcie_lane_performance.max = 0; - data->pcie_lane_performance.min = 16; - data->pcie_lane_power_saving.max = 0; - data->pcie_lane_power_saving.min = 16; - - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_DynamicUVDState); -} - -static int fiji_get_sclk_for_voltage_evv(struct pp_hwmgr *hwmgr, - phm_ppt_v1_voltage_lookup_table *lookup_table, - uint16_t virtual_voltage_id, int32_t *sclk) -{ - uint8_t entryId; - uint8_t voltageId; - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - - PP_ASSERT_WITH_CODE(lookup_table->count != 0, "Lookup table is empty", return -EINVAL); - - /* search for leakage voltage ID 0xff01 ~ 0xff08 and sckl */ - for (entryId = 0; entryId < table_info->vdd_dep_on_sclk->count; entryId++) { - voltageId = table_info->vdd_dep_on_sclk->entries[entryId].vddInd; - if (lookup_table->entries[voltageId].us_vdd == virtual_voltage_id) - break; - } - - PP_ASSERT_WITH_CODE(entryId < table_info->vdd_dep_on_sclk->count, - "Can't find requested voltage id in vdd_dep_on_sclk table!", - return -EINVAL; - ); - - *sclk = table_info->vdd_dep_on_sclk->entries[entryId].clk; - - return 0; -} - -/** -* Get Leakage VDDC based on leakage ID. -* -* @param hwmgr the address of the powerplay hardware manager. -* @return always 0 -*/ -static int fiji_get_evv_voltages(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - uint16_t vv_id; - uint16_t vddc = 0; - uint16_t evv_default = 1150; - uint16_t i, j; - uint32_t sclk = 0; - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)hwmgr->pptable; - struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = - table_info->vdd_dep_on_sclk; - int result; - - for (i = 0; i < FIJI_MAX_LEAKAGE_COUNT; i++) { - vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i; - if (!fiji_get_sclk_for_voltage_evv(hwmgr, - table_info->vddc_lookup_table, vv_id, &sclk)) { - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ClockStretcher)) { - for (j = 1; j < sclk_table->count; j++) { - if (sclk_table->entries[j].clk == sclk && - sclk_table->entries[j].cks_enable == 0) { - sclk += 5000; - break; - } - } - } - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_EnableDriverEVV)) - result = atomctrl_calculate_voltage_evv_on_sclk(hwmgr, - VOLTAGE_TYPE_VDDC, sclk, vv_id, &vddc, i, true); - else - result = -EINVAL; - - if (result) - result = atomctrl_get_voltage_evv_on_sclk(hwmgr, - VOLTAGE_TYPE_VDDC, sclk,vv_id, &vddc); - - /* need to make sure vddc is less than 2v or else, it could burn the ASIC. */ - PP_ASSERT_WITH_CODE((vddc < 2000), - "Invalid VDDC value, greater than 2v!", result = -EINVAL;); - - if (result) - /* 1.15V is the default safe value for Fiji */ - vddc = evv_default; - - /* the voltage should not be zero nor equal to leakage ID */ - if (vddc != 0 && vddc != vv_id) { - data->vddc_leakage.actual_voltage - [data->vddc_leakage.count] = vddc; - data->vddc_leakage.leakage_id - [data->vddc_leakage.count] = vv_id; - data->vddc_leakage.count++; - } - } - } - return 0; -} - -/** - * Change virtual leakage voltage to actual value. - * - * @param hwmgr the address of the powerplay hardware manager. - * @param pointer to changing voltage - * @param pointer to leakage table - */ -static void fiji_patch_with_vdd_leakage(struct pp_hwmgr *hwmgr, - uint16_t *voltage, struct fiji_leakage_voltage *leakage_table) -{ - uint32_t index; - - /* search for leakage voltage ID 0xff01 ~ 0xff08 */ - for (index = 0; index < leakage_table->count; index++) { - /* if this voltage matches a leakage voltage ID */ - /* patch with actual leakage voltage */ - if (leakage_table->leakage_id[index] == *voltage) { - *voltage = leakage_table->actual_voltage[index]; - break; - } - } - - if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0) - printk(KERN_ERR "Voltage value looks like a Leakage ID but it's not patched \n"); -} - -/** -* Patch voltage lookup table by EVV leakages. -* -* @param hwmgr the address of the powerplay hardware manager. -* @param pointer to voltage lookup table -* @param pointer to leakage table -* @return always 0 -*/ -static int fiji_patch_lookup_table_with_leakage(struct pp_hwmgr *hwmgr, - phm_ppt_v1_voltage_lookup_table *lookup_table, - struct fiji_leakage_voltage *leakage_table) -{ - uint32_t i; - - for (i = 0; i < lookup_table->count; i++) - fiji_patch_with_vdd_leakage(hwmgr, - &lookup_table->entries[i].us_vdd, leakage_table); - - return 0; -} - -static int fiji_patch_clock_voltage_limits_with_vddc_leakage( - struct pp_hwmgr *hwmgr, struct fiji_leakage_voltage *leakage_table, - uint16_t *vddc) -{ - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - fiji_patch_with_vdd_leakage(hwmgr, (uint16_t *)vddc, leakage_table); - hwmgr->dyn_state.max_clock_voltage_on_dc.vddc = - table_info->max_clock_voltage_on_dc.vddc; - return 0; -} - -static int fiji_patch_voltage_dependency_tables_with_lookup_table( - struct pp_hwmgr *hwmgr) -{ - uint8_t entryId; - uint8_t voltageId; - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - - struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = - table_info->vdd_dep_on_sclk; - struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table = - table_info->vdd_dep_on_mclk; - struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = - table_info->mm_dep_table; - - for (entryId = 0; entryId < sclk_table->count; ++entryId) { - voltageId = sclk_table->entries[entryId].vddInd; - sclk_table->entries[entryId].vddc = - table_info->vddc_lookup_table->entries[voltageId].us_vdd; - } - - for (entryId = 0; entryId < mclk_table->count; ++entryId) { - voltageId = mclk_table->entries[entryId].vddInd; - mclk_table->entries[entryId].vddc = - table_info->vddc_lookup_table->entries[voltageId].us_vdd; - } - - for (entryId = 0; entryId < mm_table->count; ++entryId) { - voltageId = mm_table->entries[entryId].vddcInd; - mm_table->entries[entryId].vddc = - table_info->vddc_lookup_table->entries[voltageId].us_vdd; - } - - return 0; - -} - -static int fiji_calc_voltage_dependency_tables(struct pp_hwmgr *hwmgr) -{ - /* Need to determine if we need calculated voltage. */ - return 0; -} - -static int fiji_calc_mm_voltage_dependency_table(struct pp_hwmgr *hwmgr) -{ - /* Need to determine if we need calculated voltage from mm table. */ - return 0; -} - -static int fiji_sort_lookup_table(struct pp_hwmgr *hwmgr, - struct phm_ppt_v1_voltage_lookup_table *lookup_table) -{ - uint32_t table_size, i, j; - struct phm_ppt_v1_voltage_lookup_record tmp_voltage_lookup_record; - table_size = lookup_table->count; - - PP_ASSERT_WITH_CODE(0 != lookup_table->count, - "Lookup table is empty", return -EINVAL); - - /* Sorting voltages */ - for (i = 0; i < table_size - 1; i++) { - for (j = i + 1; j > 0; j--) { - if (lookup_table->entries[j].us_vdd < - lookup_table->entries[j - 1].us_vdd) { - tmp_voltage_lookup_record = lookup_table->entries[j - 1]; - lookup_table->entries[j - 1] = lookup_table->entries[j]; - lookup_table->entries[j] = tmp_voltage_lookup_record; - } - } - } - - return 0; -} - -static int fiji_complete_dependency_tables(struct pp_hwmgr *hwmgr) -{ - int result = 0; - int tmp_result; - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - - tmp_result = fiji_patch_lookup_table_with_leakage(hwmgr, - table_info->vddc_lookup_table, &(data->vddc_leakage)); - if (tmp_result) - result = tmp_result; - - tmp_result = fiji_patch_clock_voltage_limits_with_vddc_leakage(hwmgr, - &(data->vddc_leakage), &table_info->max_clock_voltage_on_dc.vddc); - if (tmp_result) - result = tmp_result; - - tmp_result = fiji_patch_voltage_dependency_tables_with_lookup_table(hwmgr); - if (tmp_result) - result = tmp_result; - - tmp_result = fiji_calc_voltage_dependency_tables(hwmgr); - if (tmp_result) - result = tmp_result; - - tmp_result = fiji_calc_mm_voltage_dependency_table(hwmgr); - if (tmp_result) - result = tmp_result; - - tmp_result = fiji_sort_lookup_table(hwmgr, table_info->vddc_lookup_table); - if(tmp_result) - result = tmp_result; - - return result; -} - -static int fiji_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - - struct phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table = - table_info->vdd_dep_on_sclk; - struct phm_ppt_v1_clock_voltage_dependency_table *allowed_mclk_vdd_table = - table_info->vdd_dep_on_mclk; - - PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table != NULL, - "VDD dependency on SCLK table is missing. \ - This table is mandatory", return -EINVAL); - PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1, - "VDD dependency on SCLK table has to have is missing. \ - This table is mandatory", return -EINVAL); - - PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table != NULL, - "VDD dependency on MCLK table is missing. \ - This table is mandatory", return -EINVAL); - PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1, - "VDD dependency on MCLK table has to have is missing. \ - This table is mandatory", return -EINVAL); - - data->min_vddc_in_pptable = (uint16_t)allowed_sclk_vdd_table->entries[0].vddc; - data->max_vddc_in_pptable = (uint16_t)allowed_sclk_vdd_table-> - entries[allowed_sclk_vdd_table->count - 1].vddc; - - table_info->max_clock_voltage_on_ac.sclk = - allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].clk; - table_info->max_clock_voltage_on_ac.mclk = - allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].clk; - table_info->max_clock_voltage_on_ac.vddc = - allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc; - table_info->max_clock_voltage_on_ac.vddci = - allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].vddci; - - hwmgr->dyn_state.max_clock_voltage_on_ac.sclk = - table_info->max_clock_voltage_on_ac.sclk; - hwmgr->dyn_state.max_clock_voltage_on_ac.mclk = - table_info->max_clock_voltage_on_ac.mclk; - hwmgr->dyn_state.max_clock_voltage_on_ac.vddc = - table_info->max_clock_voltage_on_ac.vddc; - hwmgr->dyn_state.max_clock_voltage_on_ac.vddci = - table_info->max_clock_voltage_on_ac.vddci; - - return 0; -} - -static uint16_t fiji_get_current_pcie_speed(struct pp_hwmgr *hwmgr) -{ - uint32_t speedCntl = 0; - - /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */ - speedCntl = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__PCIE, - ixPCIE_LC_SPEED_CNTL); - return((uint16_t)PHM_GET_FIELD(speedCntl, - PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE)); -} - -static int fiji_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr) -{ - uint32_t link_width; - - /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */ - link_width = PHM_READ_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE, - PCIE_LC_LINK_WIDTH_CNTL, LC_LINK_WIDTH_RD); - - PP_ASSERT_WITH_CODE((7 >= link_width), - "Invalid PCIe lane width!", return 0); - - return decode_pcie_lane_width(link_width); -} - -/** Patch the Boot State to match VBIOS boot clocks and voltage. -* -* @param hwmgr Pointer to the hardware manager. -* @param pPowerState The address of the PowerState instance being created. -* -*/ -static int fiji_patch_boot_state(struct pp_hwmgr *hwmgr, - struct pp_hw_power_state *hw_ps) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - struct fiji_power_state *ps = (struct fiji_power_state *)hw_ps; - ATOM_FIRMWARE_INFO_V2_2 *fw_info; - uint16_t size; - uint8_t frev, crev; - int index = GetIndexIntoMasterTable(DATA, FirmwareInfo); - - /* First retrieve the Boot clocks and VDDC from the firmware info table. - * We assume here that fw_info is unchanged if this call fails. - */ - fw_info = (ATOM_FIRMWARE_INFO_V2_2 *)cgs_atom_get_data_table( - hwmgr->device, index, - &size, &frev, &crev); - if (!fw_info) - /* During a test, there is no firmware info table. */ - return 0; - - /* Patch the state. */ - data->vbios_boot_state.sclk_bootup_value = - le32_to_cpu(fw_info->ulDefaultEngineClock); - data->vbios_boot_state.mclk_bootup_value = - le32_to_cpu(fw_info->ulDefaultMemoryClock); - data->vbios_boot_state.mvdd_bootup_value = - le16_to_cpu(fw_info->usBootUpMVDDCVoltage); - data->vbios_boot_state.vddc_bootup_value = - le16_to_cpu(fw_info->usBootUpVDDCVoltage); - data->vbios_boot_state.vddci_bootup_value = - le16_to_cpu(fw_info->usBootUpVDDCIVoltage); - data->vbios_boot_state.pcie_gen_bootup_value = - fiji_get_current_pcie_speed(hwmgr); - data->vbios_boot_state.pcie_lane_bootup_value = - (uint16_t)fiji_get_current_pcie_lane_number(hwmgr); - - /* set boot power state */ - ps->performance_levels[0].memory_clock = data->vbios_boot_state.mclk_bootup_value; - ps->performance_levels[0].engine_clock = data->vbios_boot_state.sclk_bootup_value; - ps->performance_levels[0].pcie_gen = data->vbios_boot_state.pcie_gen_bootup_value; - ps->performance_levels[0].pcie_lane = data->vbios_boot_state.pcie_lane_bootup_value; - - return 0; -} - -static int fiji_hwmgr_backend_fini(struct pp_hwmgr *hwmgr) -{ - return phm_hwmgr_backend_fini(hwmgr); -} - -static int fiji_hwmgr_backend_init(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data; - uint32_t i; - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - bool stay_in_boot; - int result; - - data = kzalloc(sizeof(struct fiji_hwmgr), GFP_KERNEL); - if (data == NULL) - return -ENOMEM; - - hwmgr->backend = data; - - data->dll_default_on = false; - data->sram_end = SMC_RAM_END; - - for (i = 0; i < SMU73_MAX_LEVELS_GRAPHICS; i++) - data->activity_target[i] = FIJI_AT_DFLT; - - data->vddc_vddci_delta = VDDC_VDDCI_DELTA; - - data->mclk_activity_target = PPFIJI_MCLK_TARGETACTIVITY_DFLT; - data->mclk_dpm0_activity_target = 0xa; - - data->sclk_dpm_key_disabled = 0; - data->mclk_dpm_key_disabled = 0; - data->pcie_dpm_key_disabled = 0; - - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_UnTabledHardwareInterface); - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_TablelessHardwareInterface); - - data->gpio_debug = 0; - - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_DynamicPatchPowerState); - - /* need to set voltage control types before EVV patching */ - data->voltage_control = FIJI_VOLTAGE_CONTROL_NONE; - data->vddci_control = FIJI_VOLTAGE_CONTROL_NONE; - data->mvdd_control = FIJI_VOLTAGE_CONTROL_NONE; - - data->force_pcie_gen = PP_PCIEGenInvalid; - - if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, - VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) - data->voltage_control = FIJI_VOLTAGE_CONTROL_BY_SVID2; - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_EnableMVDDControl)) - if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, - VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT)) - data->mvdd_control = FIJI_VOLTAGE_CONTROL_BY_GPIO; - - if (data->mvdd_control == FIJI_VOLTAGE_CONTROL_NONE) - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_EnableMVDDControl); - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ControlVDDCI)) { - if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, - VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT)) - data->vddci_control = FIJI_VOLTAGE_CONTROL_BY_GPIO; - else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, - VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2)) - data->vddci_control = FIJI_VOLTAGE_CONTROL_BY_SVID2; - } - - if (data->vddci_control == FIJI_VOLTAGE_CONTROL_NONE) - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ControlVDDCI); - - if (table_info && table_info->cac_dtp_table->usClockStretchAmount) - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ClockStretcher); - - fiji_init_dpm_defaults(hwmgr); - - /* Get leakage voltage based on leakage ID. */ - fiji_get_evv_voltages(hwmgr); - - /* Patch our voltage dependency table with actual leakage voltage - * We need to perform leakage translation before it's used by other functions - */ - fiji_complete_dependency_tables(hwmgr); - - /* Parse pptable data read from VBIOS */ - fiji_set_private_data_based_on_pptable(hwmgr); - - /* ULV Support */ - data->ulv.ulv_supported = true; /* ULV feature is enabled by default */ - - /* Initalize Dynamic State Adjustment Rule Settings */ - result = tonga_initializa_dynamic_state_adjustment_rule_settings(hwmgr); - - if (!result) { - data->uvd_enabled = false; - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_EnableSMU7ThermalManagement); - data->vddc_phase_shed_control = false; - } - - stay_in_boot = phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_StayInBootState); - - if (0 == result) { - struct cgs_system_info sys_info = {0}; - - data->is_tlu_enabled = false; - hwmgr->platform_descriptor.hardwareActivityPerformanceLevels = - FIJI_MAX_HARDWARE_POWERLEVELS; - hwmgr->platform_descriptor.hardwarePerformanceLevels = 2; - hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50; - - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_FanSpeedInTableIsRPM); - - if (table_info->cac_dtp_table->usDefaultTargetOperatingTemp && - hwmgr->thermal_controller. - advanceFanControlParameters.ucFanControlMode) { - hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanPWM = - hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM; - hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM = - hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanRPM; - hwmgr->dyn_state.cac_dtp_table->usOperatingTempMinLimit = - table_info->cac_dtp_table->usOperatingTempMinLimit; - hwmgr->dyn_state.cac_dtp_table->usOperatingTempMaxLimit = - table_info->cac_dtp_table->usOperatingTempMaxLimit; - hwmgr->dyn_state.cac_dtp_table->usDefaultTargetOperatingTemp = - table_info->cac_dtp_table->usDefaultTargetOperatingTemp; - hwmgr->dyn_state.cac_dtp_table->usOperatingTempStep = - table_info->cac_dtp_table->usOperatingTempStep; - hwmgr->dyn_state.cac_dtp_table->usTargetOperatingTemp = - table_info->cac_dtp_table->usTargetOperatingTemp; - - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ODFuzzyFanControlSupport); - } - - sys_info.size = sizeof(struct cgs_system_info); - sys_info.info_id = CGS_SYSTEM_INFO_PCIE_GEN_INFO; - result = cgs_query_system_info(hwmgr->device, &sys_info); - if (result) - data->pcie_gen_cap = AMDGPU_DEFAULT_PCIE_GEN_MASK; - else - data->pcie_gen_cap = (uint32_t)sys_info.value; - if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) - data->pcie_spc_cap = 20; - sys_info.size = sizeof(struct cgs_system_info); - sys_info.info_id = CGS_SYSTEM_INFO_PCIE_MLW; - result = cgs_query_system_info(hwmgr->device, &sys_info); - if (result) - data->pcie_lane_cap = AMDGPU_DEFAULT_PCIE_MLW_MASK; - else - data->pcie_lane_cap = (uint32_t)sys_info.value; - } else { - /* Ignore return value in here, we are cleaning up a mess. */ - fiji_hwmgr_backend_fini(hwmgr); - } - - return 0; -} - -/** - * Read clock related registers. - * - * @param hwmgr the address of the powerplay hardware manager. - * @return always 0 - */ -static int fiji_read_clock_registers(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - - data->clock_registers.vCG_SPLL_FUNC_CNTL = - cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_SPLL_FUNC_CNTL); - data->clock_registers.vCG_SPLL_FUNC_CNTL_2 = - cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_SPLL_FUNC_CNTL_2); - data->clock_registers.vCG_SPLL_FUNC_CNTL_3 = - cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_SPLL_FUNC_CNTL_3); - data->clock_registers.vCG_SPLL_FUNC_CNTL_4 = - cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_SPLL_FUNC_CNTL_4); - data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM = - cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_SPLL_SPREAD_SPECTRUM); - data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2 = - cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_SPLL_SPREAD_SPECTRUM_2); - - return 0; -} - -/** - * Find out if memory is GDDR5. - * - * @param hwmgr the address of the powerplay hardware manager. - * @return always 0 - */ -static int fiji_get_memory_type(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - uint32_t temp; - - temp = cgs_read_register(hwmgr->device, mmMC_SEQ_MISC0); - - data->is_memory_gddr5 = (MC_SEQ_MISC0_GDDR5_VALUE == - ((temp & MC_SEQ_MISC0_GDDR5_MASK) >> - MC_SEQ_MISC0_GDDR5_SHIFT)); - - return 0; -} - -/** - * Enables Dynamic Power Management by SMC - * - * @param hwmgr the address of the powerplay hardware manager. - * @return always 0 - */ -static int fiji_enable_acpi_power_management(struct pp_hwmgr *hwmgr) -{ - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - GENERAL_PWRMGT, STATIC_PM_EN, 1); - - return 0; -} - -/** - * Initialize PowerGating States for different engines - * - * @param hwmgr the address of the powerplay hardware manager. - * @return always 0 - */ -static int fiji_init_power_gate_state(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - - data->uvd_power_gated = false; - data->vce_power_gated = false; - data->samu_power_gated = false; - data->acp_power_gated = false; - data->pg_acp_init = true; - - return 0; -} - -static int fiji_init_sclk_threshold(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - data->low_sclk_interrupt_threshold = 0; - - return 0; -} - -static int fiji_setup_asic_task(struct pp_hwmgr *hwmgr) -{ - int tmp_result, result = 0; - - tmp_result = fiji_read_clock_registers(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to read clock registers!", result = tmp_result); - - tmp_result = fiji_get_memory_type(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to get memory type!", result = tmp_result); - - tmp_result = fiji_enable_acpi_power_management(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to enable ACPI power management!", result = tmp_result); - - tmp_result = fiji_init_power_gate_state(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to init power gate state!", result = tmp_result); - - tmp_result = tonga_get_mc_microcode_version(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to get MC microcode version!", result = tmp_result); - - tmp_result = fiji_init_sclk_threshold(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to init sclk threshold!", result = tmp_result); - - return result; -} - -/** -* Checks if we want to support voltage control -* -* @param hwmgr the address of the powerplay hardware manager. -*/ -static bool fiji_voltage_control(const struct pp_hwmgr *hwmgr) -{ - const struct fiji_hwmgr *data = - (const struct fiji_hwmgr *)(hwmgr->backend); - - return (FIJI_VOLTAGE_CONTROL_NONE != data->voltage_control); -} - -/** -* Enable voltage control -* -* @param hwmgr the address of the powerplay hardware manager. -* @return always 0 -*/ -static int fiji_enable_voltage_control(struct pp_hwmgr *hwmgr) -{ - /* enable voltage control */ - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - GENERAL_PWRMGT, VOLT_PWRMGT_EN, 1); - - return 0; -} - -/** -* Remove repeated voltage values and create table with unique values. -* -* @param hwmgr the address of the powerplay hardware manager. -* @param vol_table the pointer to changing voltage table -* @return 0 in success -*/ - -static int fiji_trim_voltage_table(struct pp_hwmgr *hwmgr, - struct pp_atomctrl_voltage_table *vol_table) -{ - uint32_t i, j; - uint16_t vvalue; - bool found = false; - struct pp_atomctrl_voltage_table *table; - - PP_ASSERT_WITH_CODE((NULL != vol_table), - "Voltage Table empty.", return -EINVAL); - table = kzalloc(sizeof(struct pp_atomctrl_voltage_table), - GFP_KERNEL); - - if (NULL == table) - return -ENOMEM; - - table->mask_low = vol_table->mask_low; - table->phase_delay = vol_table->phase_delay; - - for (i = 0; i < vol_table->count; i++) { - vvalue = vol_table->entries[i].value; - found = false; - - for (j = 0; j < table->count; j++) { - if (vvalue == table->entries[j].value) { - found = true; - break; - } - } - - if (!found) { - table->entries[table->count].value = vvalue; - table->entries[table->count].smio_low = - vol_table->entries[i].smio_low; - table->count++; - } - } - - memcpy(vol_table, table, sizeof(struct pp_atomctrl_voltage_table)); - kfree(table); - - return 0; -} - -static int fiji_get_svi2_mvdd_voltage_table(struct pp_hwmgr *hwmgr, - phm_ppt_v1_clock_voltage_dependency_table *dep_table) -{ - uint32_t i; - int result; - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - struct pp_atomctrl_voltage_table *vol_table = &(data->mvdd_voltage_table); - - PP_ASSERT_WITH_CODE((0 != dep_table->count), - "Voltage Dependency Table empty.", return -EINVAL); - - vol_table->mask_low = 0; - vol_table->phase_delay = 0; - vol_table->count = dep_table->count; - - for (i = 0; i < dep_table->count; i++) { - vol_table->entries[i].value = dep_table->entries[i].mvdd; - vol_table->entries[i].smio_low = 0; - } - - result = fiji_trim_voltage_table(hwmgr, vol_table); - PP_ASSERT_WITH_CODE((0 == result), - "Failed to trim MVDD table.", return result); - - return 0; -} - -static int fiji_get_svi2_vddci_voltage_table(struct pp_hwmgr *hwmgr, - phm_ppt_v1_clock_voltage_dependency_table *dep_table) -{ - uint32_t i; - int result; - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - struct pp_atomctrl_voltage_table *vol_table = &(data->vddci_voltage_table); - - PP_ASSERT_WITH_CODE((0 != dep_table->count), - "Voltage Dependency Table empty.", return -EINVAL); - - vol_table->mask_low = 0; - vol_table->phase_delay = 0; - vol_table->count = dep_table->count; - - for (i = 0; i < dep_table->count; i++) { - vol_table->entries[i].value = dep_table->entries[i].vddci; - vol_table->entries[i].smio_low = 0; - } - - result = fiji_trim_voltage_table(hwmgr, vol_table); - PP_ASSERT_WITH_CODE((0 == result), - "Failed to trim VDDCI table.", return result); - - return 0; -} - -static int fiji_get_svi2_vdd_voltage_table(struct pp_hwmgr *hwmgr, - phm_ppt_v1_voltage_lookup_table *lookup_table) -{ - int i = 0; - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - struct pp_atomctrl_voltage_table *vol_table = &(data->vddc_voltage_table); - - PP_ASSERT_WITH_CODE((0 != lookup_table->count), - "Voltage Lookup Table empty.", return -EINVAL); - - vol_table->mask_low = 0; - vol_table->phase_delay = 0; - - vol_table->count = lookup_table->count; - - for (i = 0; i < vol_table->count; i++) { - vol_table->entries[i].value = lookup_table->entries[i].us_vdd; - vol_table->entries[i].smio_low = 0; - } - - return 0; -} - -/* ---- Voltage Tables ---- - * If the voltage table would be bigger than - * what will fit into the state table on - * the SMC keep only the higher entries. - */ -static void fiji_trim_voltage_table_to_fit_state_table(struct pp_hwmgr *hwmgr, - uint32_t max_vol_steps, struct pp_atomctrl_voltage_table *vol_table) -{ - unsigned int i, diff; - - if (vol_table->count <= max_vol_steps) - return; - - diff = vol_table->count - max_vol_steps; - - for (i = 0; i < max_vol_steps; i++) - vol_table->entries[i] = vol_table->entries[i + diff]; - - vol_table->count = max_vol_steps; - - return; -} - -/** -* Create Voltage Tables. -* -* @param hwmgr the address of the powerplay hardware manager. -* @return always 0 -*/ -static int fiji_construct_voltage_tables(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)hwmgr->pptable; - int result; - - if (FIJI_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) { - result = atomctrl_get_voltage_table_v3(hwmgr, - VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT, - &(data->mvdd_voltage_table)); - PP_ASSERT_WITH_CODE((0 == result), - "Failed to retrieve MVDD table.", - return result); - } else if (FIJI_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) { - result = fiji_get_svi2_mvdd_voltage_table(hwmgr, - table_info->vdd_dep_on_mclk); - PP_ASSERT_WITH_CODE((0 == result), - "Failed to retrieve SVI2 MVDD table from dependancy table.", - return result;); - } - - if (FIJI_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) { - result = atomctrl_get_voltage_table_v3(hwmgr, - VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT, - &(data->vddci_voltage_table)); - PP_ASSERT_WITH_CODE((0 == result), - "Failed to retrieve VDDCI table.", - return result); - } else if (FIJI_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) { - result = fiji_get_svi2_vddci_voltage_table(hwmgr, - table_info->vdd_dep_on_mclk); - PP_ASSERT_WITH_CODE((0 == result), - "Failed to retrieve SVI2 VDDCI table from dependancy table.", - return result); - } - - if(FIJI_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) { - result = fiji_get_svi2_vdd_voltage_table(hwmgr, - table_info->vddc_lookup_table); - PP_ASSERT_WITH_CODE((0 == result), - "Failed to retrieve SVI2 VDDC table from lookup table.", - return result); - } - - PP_ASSERT_WITH_CODE( - (data->vddc_voltage_table.count <= (SMU73_MAX_LEVELS_VDDC)), - "Too many voltage values for VDDC. Trimming to fit state table.", - fiji_trim_voltage_table_to_fit_state_table(hwmgr, - SMU73_MAX_LEVELS_VDDC, &(data->vddc_voltage_table))); - - PP_ASSERT_WITH_CODE( - (data->vddci_voltage_table.count <= (SMU73_MAX_LEVELS_VDDCI)), - "Too many voltage values for VDDCI. Trimming to fit state table.", - fiji_trim_voltage_table_to_fit_state_table(hwmgr, - SMU73_MAX_LEVELS_VDDCI, &(data->vddci_voltage_table))); - - PP_ASSERT_WITH_CODE( - (data->mvdd_voltage_table.count <= (SMU73_MAX_LEVELS_MVDD)), - "Too many voltage values for MVDD. Trimming to fit state table.", - fiji_trim_voltage_table_to_fit_state_table(hwmgr, - SMU73_MAX_LEVELS_MVDD, &(data->mvdd_voltage_table))); - - return 0; -} - -static int fiji_initialize_mc_reg_table(struct pp_hwmgr *hwmgr) -{ - /* Program additional LP registers - * that are no longer programmed by VBIOS - */ - cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP, - cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING)); - cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP, - cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING)); - cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP, - cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2)); - cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP, - cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1)); - cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP, - cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0)); - cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP, - cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1)); - cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP, - cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING)); - - return 0; -} - -/** -* Programs static screed detection parameters -* -* @param hwmgr the address of the powerplay hardware manager. -* @return always 0 -*/ -static int fiji_program_static_screen_threshold_parameters( - struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - - /* Set static screen threshold unit */ - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD_UNIT, - data->static_screen_threshold_unit); - /* Set static screen threshold */ - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD, - data->static_screen_threshold); - - return 0; -} - -/** -* Setup display gap for glitch free memory clock switching. -* -* @param hwmgr the address of the powerplay hardware manager. -* @return always 0 -*/ -static int fiji_enable_display_gap(struct pp_hwmgr *hwmgr) -{ - uint32_t displayGap = - cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_DISPLAY_GAP_CNTL); - - displayGap = PHM_SET_FIELD(displayGap, CG_DISPLAY_GAP_CNTL, - DISP_GAP, DISPLAY_GAP_IGNORE); - - displayGap = PHM_SET_FIELD(displayGap, CG_DISPLAY_GAP_CNTL, - DISP_GAP_MCHG, DISPLAY_GAP_VBLANK); - - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_DISPLAY_GAP_CNTL, displayGap); - - return 0; -} - -/** -* Programs activity state transition voting clients -* -* @param hwmgr the address of the powerplay hardware manager. -* @return always 0 -*/ -static int fiji_program_voting_clients(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - - /* Clear reset for voting clients before enabling DPM */ - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 0); - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 0); - - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_0, data->voting_rights_clients0); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_1, data->voting_rights_clients1); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_2, data->voting_rights_clients2); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_3, data->voting_rights_clients3); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_4, data->voting_rights_clients4); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_5, data->voting_rights_clients5); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_6, data->voting_rights_clients6); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_7, data->voting_rights_clients7); - - return 0; -} - -static int fiji_clear_voting_clients(struct pp_hwmgr *hwmgr) -{ - /* Reset voting clients before disabling DPM */ - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 1); - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 1); - - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_0, 0); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_1, 0); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_2, 0); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_3, 0); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_4, 0); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_5, 0); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_6, 0); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_7, 0); - - return 0; -} - -/** -* Get the location of various tables inside the FW image. -* -* @param hwmgr the address of the powerplay hardware manager. -* @return always 0 -*/ -static int fiji_process_firmware_header(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); - uint32_t tmp; - int result; - bool error = false; - - result = fiji_read_smc_sram_dword(hwmgr->smumgr, - SMU7_FIRMWARE_HEADER_LOCATION + - offsetof(SMU73_Firmware_Header, DpmTable), - &tmp, data->sram_end); - - if (0 == result) - data->dpm_table_start = tmp; - - error |= (0 != result); - - result = fiji_read_smc_sram_dword(hwmgr->smumgr, - SMU7_FIRMWARE_HEADER_LOCATION + - offsetof(SMU73_Firmware_Header, SoftRegisters), - &tmp, data->sram_end); - - if (!result) { - data->soft_regs_start = tmp; - smu_data->soft_regs_start = tmp; - } - - error |= (0 != result); - - result = fiji_read_smc_sram_dword(hwmgr->smumgr, - SMU7_FIRMWARE_HEADER_LOCATION + - offsetof(SMU73_Firmware_Header, mcRegisterTable), - &tmp, data->sram_end); - - if (!result) - data->mc_reg_table_start = tmp; - - result = fiji_read_smc_sram_dword(hwmgr->smumgr, - SMU7_FIRMWARE_HEADER_LOCATION + - offsetof(SMU73_Firmware_Header, FanTable), - &tmp, data->sram_end); - - if (!result) - data->fan_table_start = tmp; - - error |= (0 != result); - - result = fiji_read_smc_sram_dword(hwmgr->smumgr, - SMU7_FIRMWARE_HEADER_LOCATION + - offsetof(SMU73_Firmware_Header, mcArbDramTimingTable), - &tmp, data->sram_end); - - if (!result) - data->arb_table_start = tmp; - - error |= (0 != result); - - result = fiji_read_smc_sram_dword(hwmgr->smumgr, - SMU7_FIRMWARE_HEADER_LOCATION + - offsetof(SMU73_Firmware_Header, Version), - &tmp, data->sram_end); - - if (!result) - hwmgr->microcode_version_info.SMC = tmp; - - error |= (0 != result); - - return error ? -1 : 0; -} - -/* Copy one arb setting to another and then switch the active set. - * arb_src and arb_dest is one of the MC_CG_ARB_FREQ_Fx constants. - */ -static int fiji_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr, - uint32_t arb_src, uint32_t arb_dest) -{ - uint32_t mc_arb_dram_timing; - uint32_t mc_arb_dram_timing2; - uint32_t burst_time; - uint32_t mc_cg_config; - - switch (arb_src) { - case MC_CG_ARB_FREQ_F0: - mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING); - mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2); - burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0); - break; - case MC_CG_ARB_FREQ_F1: - mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1); - mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1); - burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1); - break; - default: - return -EINVAL; - } - - switch (arb_dest) { - case MC_CG_ARB_FREQ_F0: - cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing); - cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2); - PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0, burst_time); - break; - case MC_CG_ARB_FREQ_F1: - cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing); - cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2); - PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1, burst_time); - break; - default: - return -EINVAL; - } - - mc_cg_config = cgs_read_register(hwmgr->device, mmMC_CG_CONFIG); - mc_cg_config |= 0x0000000F; - cgs_write_register(hwmgr->device, mmMC_CG_CONFIG, mc_cg_config); - PHM_WRITE_FIELD(hwmgr->device, MC_ARB_CG, CG_ARB_REQ, arb_dest); - - return 0; -} - -/** -* Call SMC to reset S0/S1 to S1 and Reset SMIO to initial value -* -* @param hwmgr the address of the powerplay hardware manager. -* @return if success then 0; -*/ -static int fiji_reset_to_default(struct pp_hwmgr *hwmgr) -{ - return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_ResetToDefaults); -} - -/** -* Initial switch from ARB F0->F1 -* -* @param hwmgr the address of the powerplay hardware manager. -* @return always 0 -* This function is to be called from the SetPowerState table. -*/ -static int fiji_initial_switch_from_arbf0_to_f1(struct pp_hwmgr *hwmgr) -{ - return fiji_copy_and_switch_arb_sets(hwmgr, - MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1); -} - -static int fiji_force_switch_to_arbf0(struct pp_hwmgr *hwmgr) -{ - uint32_t tmp; - - tmp = (cgs_read_ind_register(hwmgr->device, - CGS_IND_REG__SMC, ixSMC_SCRATCH9) & - 0x0000ff00) >> 8; - - if (tmp == MC_CG_ARB_FREQ_F0) - return 0; - - return fiji_copy_and_switch_arb_sets(hwmgr, - tmp, MC_CG_ARB_FREQ_F0); -} - -static int fiji_reset_single_dpm_table(struct pp_hwmgr *hwmgr, - struct fiji_single_dpm_table *dpm_table, uint32_t count) -{ - int i; - PP_ASSERT_WITH_CODE(count <= MAX_REGULAR_DPM_NUMBER, - "Fatal error, can not set up single DPM table entries " - "to exceed max number!",); - - dpm_table->count = count; - for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++) - dpm_table->dpm_levels[i].enabled = false; - - return 0; -} - -static void fiji_setup_pcie_table_entry( - struct fiji_single_dpm_table *dpm_table, - uint32_t index, uint32_t pcie_gen, - uint32_t pcie_lanes) -{ - dpm_table->dpm_levels[index].value = pcie_gen; - dpm_table->dpm_levels[index].param1 = pcie_lanes; - dpm_table->dpm_levels[index].enabled = true; -} - -static int fiji_setup_default_pcie_table(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table; - uint32_t i, max_entry; - - PP_ASSERT_WITH_CODE((data->use_pcie_performance_levels || - data->use_pcie_power_saving_levels), "No pcie performance levels!", - return -EINVAL); - - if (data->use_pcie_performance_levels && - !data->use_pcie_power_saving_levels) { - data->pcie_gen_power_saving = data->pcie_gen_performance; - data->pcie_lane_power_saving = data->pcie_lane_performance; - } else if (!data->use_pcie_performance_levels && - data->use_pcie_power_saving_levels) { - data->pcie_gen_performance = data->pcie_gen_power_saving; - data->pcie_lane_performance = data->pcie_lane_power_saving; - } - - fiji_reset_single_dpm_table(hwmgr, - &data->dpm_table.pcie_speed_table, SMU73_MAX_LEVELS_LINK); - - if (pcie_table != NULL) { - /* max_entry is used to make sure we reserve one PCIE level - * for boot level (fix for A+A PSPP issue). - * If PCIE table from PPTable have ULV entry + 8 entries, - * then ignore the last entry.*/ - max_entry = (SMU73_MAX_LEVELS_LINK < pcie_table->count) ? - SMU73_MAX_LEVELS_LINK : pcie_table->count; - for (i = 1; i < max_entry; i++) { - fiji_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, i - 1, - get_pcie_gen_support(data->pcie_gen_cap, - pcie_table->entries[i].gen_speed), - get_pcie_lane_support(data->pcie_lane_cap, - pcie_table->entries[i].lane_width)); - } - data->dpm_table.pcie_speed_table.count = max_entry - 1; - } else { - /* Hardcode Pcie Table */ - fiji_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 0, - get_pcie_gen_support(data->pcie_gen_cap, - PP_Min_PCIEGen), - get_pcie_lane_support(data->pcie_lane_cap, - PP_Max_PCIELane)); - fiji_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 1, - get_pcie_gen_support(data->pcie_gen_cap, - PP_Min_PCIEGen), - get_pcie_lane_support(data->pcie_lane_cap, - PP_Max_PCIELane)); - fiji_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 2, - get_pcie_gen_support(data->pcie_gen_cap, - PP_Max_PCIEGen), - get_pcie_lane_support(data->pcie_lane_cap, - PP_Max_PCIELane)); - fiji_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 3, - get_pcie_gen_support(data->pcie_gen_cap, - PP_Max_PCIEGen), - get_pcie_lane_support(data->pcie_lane_cap, - PP_Max_PCIELane)); - fiji_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 4, - get_pcie_gen_support(data->pcie_gen_cap, - PP_Max_PCIEGen), - get_pcie_lane_support(data->pcie_lane_cap, - PP_Max_PCIELane)); - fiji_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 5, - get_pcie_gen_support(data->pcie_gen_cap, - PP_Max_PCIEGen), - get_pcie_lane_support(data->pcie_lane_cap, - PP_Max_PCIELane)); - - data->dpm_table.pcie_speed_table.count = 6; - } - /* Populate last level for boot PCIE level, but do not increment count. */ - fiji_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, - data->dpm_table.pcie_speed_table.count, - get_pcie_gen_support(data->pcie_gen_cap, - PP_Min_PCIEGen), - get_pcie_lane_support(data->pcie_lane_cap, - PP_Max_PCIELane)); - - return 0; -} - -/* - * This function is to initalize all DPM state tables - * for SMU7 based on the dependency table. - * Dynamic state patching function will then trim these - * state tables to the allowed range based - * on the power policy or external client requests, - * such as UVD request, etc. - */ -static int fiji_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - uint32_t i; - - struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table = - table_info->vdd_dep_on_sclk; - struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table = - table_info->vdd_dep_on_mclk; - - PP_ASSERT_WITH_CODE(dep_sclk_table != NULL, - "SCLK dependency table is missing. This table is mandatory", - return -EINVAL); - PP_ASSERT_WITH_CODE(dep_sclk_table->count >= 1, - "SCLK dependency table has to have is missing. " - "This table is mandatory", - return -EINVAL); - - PP_ASSERT_WITH_CODE(dep_mclk_table != NULL, - "MCLK dependency table is missing. This table is mandatory", - return -EINVAL); - PP_ASSERT_WITH_CODE(dep_mclk_table->count >= 1, - "MCLK dependency table has to have is missing. " - "This table is mandatory", - return -EINVAL); - - /* clear the state table to reset everything to default */ - fiji_reset_single_dpm_table(hwmgr, - &data->dpm_table.sclk_table, SMU73_MAX_LEVELS_GRAPHICS); - fiji_reset_single_dpm_table(hwmgr, - &data->dpm_table.mclk_table, SMU73_MAX_LEVELS_MEMORY); - - /* Initialize Sclk DPM table based on allow Sclk values */ - data->dpm_table.sclk_table.count = 0; - for (i = 0; i < dep_sclk_table->count; i++) { - if (i == 0 || data->dpm_table.sclk_table.dpm_levels - [data->dpm_table.sclk_table.count - 1].value != - dep_sclk_table->entries[i].clk) { - data->dpm_table.sclk_table.dpm_levels - [data->dpm_table.sclk_table.count].value = - dep_sclk_table->entries[i].clk; - data->dpm_table.sclk_table.dpm_levels - [data->dpm_table.sclk_table.count].enabled = - (i == 0) ? true : false; - data->dpm_table.sclk_table.count++; - } - } - - /* Initialize Mclk DPM table based on allow Mclk values */ - data->dpm_table.mclk_table.count = 0; - for (i=0; icount; i++) { - if ( i==0 || data->dpm_table.mclk_table.dpm_levels - [data->dpm_table.mclk_table.count - 1].value != - dep_mclk_table->entries[i].clk) { - data->dpm_table.mclk_table.dpm_levels - [data->dpm_table.mclk_table.count].value = - dep_mclk_table->entries[i].clk; - data->dpm_table.mclk_table.dpm_levels - [data->dpm_table.mclk_table.count].enabled = - (i == 0) ? true : false; - data->dpm_table.mclk_table.count++; - } - } - - /* setup PCIE gen speed levels */ - fiji_setup_default_pcie_table(hwmgr); - - /* save a copy of the default DPM table */ - memcpy(&(data->golden_dpm_table), &(data->dpm_table), - sizeof(struct fiji_dpm_table)); - - return 0; -} - -/** - * @brief PhwFiji_GetVoltageOrder - * Returns index of requested voltage record in lookup(table) - * @param lookup_table - lookup list to search in - * @param voltage - voltage to look for - * @return 0 on success - */ -static uint8_t fiji_get_voltage_index( - struct phm_ppt_v1_voltage_lookup_table *lookup_table, uint16_t voltage) -{ - uint8_t count = (uint8_t) (lookup_table->count); - uint8_t i; - - PP_ASSERT_WITH_CODE((NULL != lookup_table), - "Lookup Table empty.", return 0); - PP_ASSERT_WITH_CODE((0 != count), - "Lookup Table empty.", return 0); - - for (i = 0; i < lookup_table->count; i++) { - /* find first voltage equal or bigger than requested */ - if (lookup_table->entries[i].us_vdd >= voltage) - return i; - } - /* voltage is bigger than max voltage in the table */ - return i - 1; -} - -/** -* Preparation of vddc and vddgfx CAC tables for SMC. -* -* @param hwmgr the address of the hardware manager -* @param table the SMC DPM table structure to be populated -* @return always 0 -*/ -static int fiji_populate_cac_table(struct pp_hwmgr *hwmgr, - struct SMU73_Discrete_DpmTable *table) -{ - uint32_t count; - uint8_t index; - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - struct phm_ppt_v1_voltage_lookup_table *lookup_table = - table_info->vddc_lookup_table; - /* tables is already swapped, so in order to use the value from it, - * we need to swap it back. - * We are populating vddc CAC data to BapmVddc table - * in split and merged mode - */ - for( count = 0; countcount; count++) { - index = fiji_get_voltage_index(lookup_table, - data->vddc_voltage_table.entries[count].value); - table->BapmVddcVidLoSidd[count] = (uint8_t) ((6200 - - (lookup_table->entries[index].us_cac_low * - VOLTAGE_SCALE)) / 25); - table->BapmVddcVidHiSidd[count] = (uint8_t) ((6200 - - (lookup_table->entries[index].us_cac_high * - VOLTAGE_SCALE)) / 25); - } - - return 0; -} - -/** -* Preparation of voltage tables for SMC. -* -* @param hwmgr the address of the hardware manager -* @param table the SMC DPM table structure to be populated -* @return always 0 -*/ - -static int fiji_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr, - struct SMU73_Discrete_DpmTable *table) -{ - int result; - - result = fiji_populate_cac_table(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, - "can not populate CAC voltage tables to SMC", - return -EINVAL); - - return 0; -} - -static int fiji_populate_ulv_level(struct pp_hwmgr *hwmgr, - struct SMU73_Discrete_Ulv *state) -{ - int result = 0; - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - - state->CcPwrDynRm = 0; - state->CcPwrDynRm1 = 0; - - state->VddcOffset = (uint16_t) table_info->us_ulv_voltage_offset; - state->VddcOffsetVid = (uint8_t)( table_info->us_ulv_voltage_offset * - VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1 ); - - state->VddcPhase = (data->vddc_phase_shed_control) ? 0 : 1; - - if (!result) { - CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm); - CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1); - CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset); - } - return result; -} - -static int fiji_populate_ulv_state(struct pp_hwmgr *hwmgr, - struct SMU73_Discrete_DpmTable *table) -{ - return fiji_populate_ulv_level(hwmgr, &table->Ulv); -} - -static int32_t fiji_get_dpm_level_enable_mask_value( - struct fiji_single_dpm_table* dpm_table) -{ - int32_t i; - int32_t mask = 0; - - for (i = dpm_table->count; i > 0; i--) { - mask = mask << 1; - if (dpm_table->dpm_levels[i - 1].enabled) - mask |= 0x1; - else - mask &= 0xFFFFFFFE; - } - return mask; -} - -static int fiji_populate_smc_link_level(struct pp_hwmgr *hwmgr, - struct SMU73_Discrete_DpmTable *table) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - struct fiji_dpm_table *dpm_table = &data->dpm_table; - int i; - - /* Index (dpm_table->pcie_speed_table.count) - * is reserved for PCIE boot level. */ - for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) { - table->LinkLevel[i].PcieGenSpeed = - (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value; - table->LinkLevel[i].PcieLaneCount = (uint8_t)encode_pcie_lane_width( - dpm_table->pcie_speed_table.dpm_levels[i].param1); - table->LinkLevel[i].EnabledForActivity = 1; - table->LinkLevel[i].SPC = (uint8_t)(data->pcie_spc_cap & 0xff); - table->LinkLevel[i].DownThreshold = PP_HOST_TO_SMC_UL(5); - table->LinkLevel[i].UpThreshold = PP_HOST_TO_SMC_UL(30); - } - - data->smc_state_table.LinkLevelCount = - (uint8_t)dpm_table->pcie_speed_table.count; - data->dpm_level_enable_mask.pcie_dpm_enable_mask = - fiji_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table); - - return 0; -} - -/** -* Calculates the SCLK dividers using the provided engine clock -* -* @param hwmgr the address of the hardware manager -* @param clock the engine clock to use to populate the structure -* @param sclk the SMC SCLK structure to be populated -*/ -static int fiji_calculate_sclk_params(struct pp_hwmgr *hwmgr, - uint32_t clock, struct SMU73_Discrete_GraphicsLevel *sclk) -{ - const struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - struct pp_atomctrl_clock_dividers_vi dividers; - uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL; - uint32_t spll_func_cntl_3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3; - uint32_t spll_func_cntl_4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4; - uint32_t cg_spll_spread_spectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM; - uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2; - uint32_t ref_clock; - uint32_t ref_divider; - uint32_t fbdiv; - int result; - - /* get the engine clock dividers for this clock value */ - result = atomctrl_get_engine_pll_dividers_vi(hwmgr, clock, ÷rs); - - PP_ASSERT_WITH_CODE(result == 0, - "Error retrieving Engine Clock dividers from VBIOS.", - return result); - - /* To get FBDIV we need to multiply this by 16384 and divide it by Fref. */ - ref_clock = atomctrl_get_reference_clock(hwmgr); - ref_divider = 1 + dividers.uc_pll_ref_div; - - /* low 14 bits is fraction and high 12 bits is divider */ - fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF; - - /* SPLL_FUNC_CNTL setup */ - spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL, - SPLL_REF_DIV, dividers.uc_pll_ref_div); - spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL, - SPLL_PDIV_A, dividers.uc_pll_post_div); - - /* SPLL_FUNC_CNTL_3 setup*/ - spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3, - SPLL_FB_DIV, fbdiv); - - /* set to use fractional accumulation*/ - spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3, - SPLL_DITHEN, 1); - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_EngineSpreadSpectrumSupport)) { - struct pp_atomctrl_internal_ss_info ssInfo; - - uint32_t vco_freq = clock * dividers.uc_pll_post_div; - if (!atomctrl_get_engine_clock_spread_spectrum(hwmgr, - vco_freq, &ssInfo)) { - /* - * ss_info.speed_spectrum_percentage -- in unit of 0.01% - * ss_info.speed_spectrum_rate -- in unit of khz - * - * clks = reference_clock * 10 / (REFDIV + 1) / speed_spectrum_rate / 2 - */ - uint32_t clk_s = ref_clock * 5 / - (ref_divider * ssInfo.speed_spectrum_rate); - /* clkv = 2 * D * fbdiv / NS */ - uint32_t clk_v = 4 * ssInfo.speed_spectrum_percentage * - fbdiv / (clk_s * 10000); - - cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum, - CG_SPLL_SPREAD_SPECTRUM, CLKS, clk_s); - cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum, - CG_SPLL_SPREAD_SPECTRUM, SSEN, 1); - cg_spll_spread_spectrum_2 = PHM_SET_FIELD(cg_spll_spread_spectrum_2, - CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clk_v); - } - } - - sclk->SclkFrequency = clock; - sclk->CgSpllFuncCntl3 = spll_func_cntl_3; - sclk->CgSpllFuncCntl4 = spll_func_cntl_4; - sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum; - sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2; - sclk->SclkDid = (uint8_t)dividers.pll_post_divider; - - return 0; -} - -static uint16_t fiji_find_closest_vddci(struct pp_hwmgr *hwmgr, uint16_t vddci) -{ - uint32_t i; - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - struct pp_atomctrl_voltage_table *vddci_table = - &(data->vddci_voltage_table); - - for (i = 0; i < vddci_table->count; i++) { - if (vddci_table->entries[i].value >= vddci) - return vddci_table->entries[i].value; - } - - PP_ASSERT_WITH_CODE(false, - "VDDCI is larger than max VDDCI in VDDCI Voltage Table!", - return vddci_table->entries[i-1].value); -} - -static int fiji_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr, - struct phm_ppt_v1_clock_voltage_dependency_table* dep_table, - uint32_t clock, SMU_VoltageLevel *voltage, uint32_t *mvdd) -{ - uint32_t i; - uint16_t vddci; - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - - *voltage = *mvdd = 0; - - /* clock - voltage dependency table is empty table */ - if (dep_table->count == 0) - return -EINVAL; - - for (i = 0; i < dep_table->count; i++) { - /* find first sclk bigger than request */ - if (dep_table->entries[i].clk >= clock) { - *voltage |= (dep_table->entries[i].vddc * - VOLTAGE_SCALE) << VDDC_SHIFT; - if (FIJI_VOLTAGE_CONTROL_NONE == data->vddci_control) - *voltage |= (data->vbios_boot_state.vddci_bootup_value * - VOLTAGE_SCALE) << VDDCI_SHIFT; - else if (dep_table->entries[i].vddci) - *voltage |= (dep_table->entries[i].vddci * - VOLTAGE_SCALE) << VDDCI_SHIFT; - else { - vddci = fiji_find_closest_vddci(hwmgr, - (dep_table->entries[i].vddc - - (uint16_t)data->vddc_vddci_delta)); - *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT; - } - - if (FIJI_VOLTAGE_CONTROL_NONE == data->mvdd_control) - *mvdd = data->vbios_boot_state.mvdd_bootup_value * - VOLTAGE_SCALE; - else if (dep_table->entries[i].mvdd) - *mvdd = (uint32_t) dep_table->entries[i].mvdd * - VOLTAGE_SCALE; - - *voltage |= 1 << PHASES_SHIFT; - return 0; - } - } - - /* sclk is bigger than max sclk in the dependence table */ - *voltage |= (dep_table->entries[i - 1].vddc * VOLTAGE_SCALE) << VDDC_SHIFT; - - if (FIJI_VOLTAGE_CONTROL_NONE == data->vddci_control) - *voltage |= (data->vbios_boot_state.vddci_bootup_value * - VOLTAGE_SCALE) << VDDCI_SHIFT; - else if (dep_table->entries[i-1].vddci) { - vddci = fiji_find_closest_vddci(hwmgr, - (dep_table->entries[i].vddc - - (uint16_t)data->vddc_vddci_delta)); - *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT; - } - - if (FIJI_VOLTAGE_CONTROL_NONE == data->mvdd_control) - *mvdd = data->vbios_boot_state.mvdd_bootup_value * VOLTAGE_SCALE; - else if (dep_table->entries[i].mvdd) - *mvdd = (uint32_t) dep_table->entries[i - 1].mvdd * VOLTAGE_SCALE; - - return 0; -} - -static uint8_t fiji_get_sleep_divider_id_from_clock(uint32_t clock, - uint32_t clock_insr) -{ - uint8_t i; - uint32_t temp; - uint32_t min = max(clock_insr, (uint32_t)FIJI_MINIMUM_ENGINE_CLOCK); - - PP_ASSERT_WITH_CODE((clock >= min), "Engine clock can't satisfy stutter requirement!", return 0); - for (i = FIJI_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) { - temp = clock >> i; - - if (temp >= min || i == 0) - break; - } - return i; -} -/** -* Populates single SMC SCLK structure using the provided engine clock -* -* @param hwmgr the address of the hardware manager -* @param clock the engine clock to use to populate the structure -* @param sclk the SMC SCLK structure to be populated -*/ - -static int fiji_populate_single_graphic_level(struct pp_hwmgr *hwmgr, - uint32_t clock, uint16_t sclk_al_threshold, - struct SMU73_Discrete_GraphicsLevel *level) -{ - int result; - /* PP_Clocks minClocks; */ - uint32_t threshold, mvdd; - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - - result = fiji_calculate_sclk_params(hwmgr, clock, level); - - /* populate graphics levels */ - result = fiji_get_dependency_volt_by_clk(hwmgr, - table_info->vdd_dep_on_sclk, clock, - &level->MinVoltage, &mvdd); - PP_ASSERT_WITH_CODE((0 == result), - "can not find VDDC voltage value for " - "VDDC engine clock dependency table", - return result); - - level->SclkFrequency = clock; - level->ActivityLevel = sclk_al_threshold; - level->CcPwrDynRm = 0; - level->CcPwrDynRm1 = 0; - level->EnabledForActivity = 0; - level->EnabledForThrottle = 1; - level->UpHyst = 10; - level->DownHyst = 0; - level->VoltageDownHyst = 0; - level->PowerThrottle = 0; - - threshold = clock * data->fast_watermark_threshold / 100; - - - data->display_timing.min_clock_in_sr = hwmgr->display_config.min_core_set_clock_in_sr; - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) - level->DeepSleepDivId = fiji_get_sleep_divider_id_from_clock(clock, - hwmgr->display_config.min_core_set_clock_in_sr); - - - /* Default to slow, highest DPM level will be - * set to PPSMC_DISPLAY_WATERMARK_LOW later. - */ - level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; - - CONVERT_FROM_HOST_TO_SMC_UL(level->MinVoltage); - CONVERT_FROM_HOST_TO_SMC_UL(level->SclkFrequency); - CONVERT_FROM_HOST_TO_SMC_US(level->ActivityLevel); - CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl3); - CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl4); - CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum); - CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum2); - CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm); - CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm1); - - return 0; -} -/** -* Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states -* -* @param hwmgr the address of the hardware manager -*/ -static int fiji_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - struct fiji_dpm_table *dpm_table = &data->dpm_table; - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table; - uint8_t pcie_entry_cnt = (uint8_t) data->dpm_table.pcie_speed_table.count; - int result = 0; - uint32_t array = data->dpm_table_start + - offsetof(SMU73_Discrete_DpmTable, GraphicsLevel); - uint32_t array_size = sizeof(struct SMU73_Discrete_GraphicsLevel) * - SMU73_MAX_LEVELS_GRAPHICS; - struct SMU73_Discrete_GraphicsLevel *levels = - data->smc_state_table.GraphicsLevel; - uint32_t i, max_entry; - uint8_t hightest_pcie_level_enabled = 0, - lowest_pcie_level_enabled = 0, - mid_pcie_level_enabled = 0, - count = 0; - - for (i = 0; i < dpm_table->sclk_table.count; i++) { - result = fiji_populate_single_graphic_level(hwmgr, - dpm_table->sclk_table.dpm_levels[i].value, - (uint16_t)data->activity_target[i], - &levels[i]); - if (result) - return result; - - /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */ - if (i > 1) - levels[i].DeepSleepDivId = 0; - } - - /* Only enable level 0 for now.*/ - levels[0].EnabledForActivity = 1; - - /* set highest level watermark to high */ - levels[dpm_table->sclk_table.count - 1].DisplayWatermark = - PPSMC_DISPLAY_WATERMARK_HIGH; - - data->smc_state_table.GraphicsDpmLevelCount = - (uint8_t)dpm_table->sclk_table.count; - data->dpm_level_enable_mask.sclk_dpm_enable_mask = - fiji_get_dpm_level_enable_mask_value(&dpm_table->sclk_table); - - if (pcie_table != NULL) { - PP_ASSERT_WITH_CODE((1 <= pcie_entry_cnt), - "There must be 1 or more PCIE levels defined in PPTable.", - return -EINVAL); - max_entry = pcie_entry_cnt - 1; - for (i = 0; i < dpm_table->sclk_table.count; i++) - levels[i].pcieDpmLevel = - (uint8_t) ((i < max_entry)? i : max_entry); - } else { - while (data->dpm_level_enable_mask.pcie_dpm_enable_mask && - ((data->dpm_level_enable_mask.pcie_dpm_enable_mask & - (1 << (hightest_pcie_level_enabled + 1))) != 0 )) - hightest_pcie_level_enabled++; - - while (data->dpm_level_enable_mask.pcie_dpm_enable_mask && - ((data->dpm_level_enable_mask.pcie_dpm_enable_mask & - (1 << lowest_pcie_level_enabled)) == 0 )) - lowest_pcie_level_enabled++; - - while ((count < hightest_pcie_level_enabled) && - ((data->dpm_level_enable_mask.pcie_dpm_enable_mask & - (1 << (lowest_pcie_level_enabled + 1 + count))) == 0 )) - count++; - - mid_pcie_level_enabled = (lowest_pcie_level_enabled + 1+ count) < - hightest_pcie_level_enabled? - (lowest_pcie_level_enabled + 1 + count) : - hightest_pcie_level_enabled; - - /* set pcieDpmLevel to hightest_pcie_level_enabled */ - for(i = 2; i < dpm_table->sclk_table.count; i++) - levels[i].pcieDpmLevel = hightest_pcie_level_enabled; - - /* set pcieDpmLevel to lowest_pcie_level_enabled */ - levels[0].pcieDpmLevel = lowest_pcie_level_enabled; - - /* set pcieDpmLevel to mid_pcie_level_enabled */ - levels[1].pcieDpmLevel = mid_pcie_level_enabled; - } - /* level count will send to smc once at init smc table and never change */ - result = fiji_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels, - (uint32_t)array_size, data->sram_end); - - return result; -} - -/** - * MCLK Frequency Ratio - * SEQ_CG_RESP Bit[31:24] - 0x0 - * Bit[27:24] \96 DDR3 Frequency ratio - * 0x0 <= 100MHz, 450 < 0x8 <= 500MHz - * 100 < 0x1 <= 150MHz, 500 < 0x9 <= 550MHz - * 150 < 0x2 <= 200MHz, 550 < 0xA <= 600MHz - * 200 < 0x3 <= 250MHz, 600 < 0xB <= 650MHz - * 250 < 0x4 <= 300MHz, 650 < 0xC <= 700MHz - * 300 < 0x5 <= 350MHz, 700 < 0xD <= 750MHz - * 350 < 0x6 <= 400MHz, 750 < 0xE <= 800MHz - * 400 < 0x7 <= 450MHz, 800 < 0xF - */ -static uint8_t fiji_get_mclk_frequency_ratio(uint32_t mem_clock) -{ - if (mem_clock <= 10000) return 0x0; - if (mem_clock <= 15000) return 0x1; - if (mem_clock <= 20000) return 0x2; - if (mem_clock <= 25000) return 0x3; - if (mem_clock <= 30000) return 0x4; - if (mem_clock <= 35000) return 0x5; - if (mem_clock <= 40000) return 0x6; - if (mem_clock <= 45000) return 0x7; - if (mem_clock <= 50000) return 0x8; - if (mem_clock <= 55000) return 0x9; - if (mem_clock <= 60000) return 0xa; - if (mem_clock <= 65000) return 0xb; - if (mem_clock <= 70000) return 0xc; - if (mem_clock <= 75000) return 0xd; - if (mem_clock <= 80000) return 0xe; - /* mem_clock > 800MHz */ - return 0xf; -} - -/** -* Populates the SMC MCLK structure using the provided memory clock -* -* @param hwmgr the address of the hardware manager -* @param clock the memory clock to use to populate the structure -* @param sclk the SMC SCLK structure to be populated -*/ -static int fiji_calculate_mclk_params(struct pp_hwmgr *hwmgr, - uint32_t clock, struct SMU73_Discrete_MemoryLevel *mclk) -{ - struct pp_atomctrl_memory_clock_param mem_param; - int result; - - result = atomctrl_get_memory_pll_dividers_vi(hwmgr, clock, &mem_param); - PP_ASSERT_WITH_CODE((0 == result), - "Failed to get Memory PLL Dividers.",); - - /* Save the result data to outpupt memory level structure */ - mclk->MclkFrequency = clock; - mclk->MclkDivider = (uint8_t)mem_param.mpll_post_divider; - mclk->FreqRange = fiji_get_mclk_frequency_ratio(clock); - - return result; -} - -static int fiji_populate_single_memory_level(struct pp_hwmgr *hwmgr, - uint32_t clock, struct SMU73_Discrete_MemoryLevel *mem_level) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - int result = 0; - - if (table_info->vdd_dep_on_mclk) { - result = fiji_get_dependency_volt_by_clk(hwmgr, - table_info->vdd_dep_on_mclk, clock, - &mem_level->MinVoltage, &mem_level->MinMvdd); - PP_ASSERT_WITH_CODE((0 == result), - "can not find MinVddc voltage value from memory " - "VDDC voltage dependency table", return result); - } - - mem_level->EnabledForThrottle = 1; - mem_level->EnabledForActivity = 0; - mem_level->UpHyst = 0; - mem_level->DownHyst = 100; - mem_level->VoltageDownHyst = 0; - mem_level->ActivityLevel = (uint16_t)data->mclk_activity_target; - mem_level->StutterEnable = false; - - mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; - - /* enable stutter mode if all the follow condition applied - * PECI_GetNumberOfActiveDisplays(hwmgr->pPECI, - * &(data->DisplayTiming.numExistingDisplays)); - */ - data->display_timing.num_existing_displays = 1; - - if ((data->mclk_stutter_mode_threshold) && - (clock <= data->mclk_stutter_mode_threshold) && - (!data->is_uvd_enabled) && - (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL, - STUTTER_ENABLE) & 0x1)) - mem_level->StutterEnable = true; - - result = fiji_calculate_mclk_params(hwmgr, clock, mem_level); - if (!result) { - CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinMvdd); - CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MclkFrequency); - CONVERT_FROM_HOST_TO_SMC_US(mem_level->ActivityLevel); - CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinVoltage); - } - return result; -} - -/** -* Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states -* -* @param hwmgr the address of the hardware manager -*/ -static int fiji_populate_all_memory_levels(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - struct fiji_dpm_table *dpm_table = &data->dpm_table; - int result; - /* populate MCLK dpm table to SMU7 */ - uint32_t array = data->dpm_table_start + - offsetof(SMU73_Discrete_DpmTable, MemoryLevel); - uint32_t array_size = sizeof(SMU73_Discrete_MemoryLevel) * - SMU73_MAX_LEVELS_MEMORY; - struct SMU73_Discrete_MemoryLevel *levels = - data->smc_state_table.MemoryLevel; - uint32_t i; - - for (i = 0; i < dpm_table->mclk_table.count; i++) { - PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value), - "can not populate memory level as memory clock is zero", - return -EINVAL); - result = fiji_populate_single_memory_level(hwmgr, - dpm_table->mclk_table.dpm_levels[i].value, - &levels[i]); - if (result) - return result; - } - - /* Only enable level 0 for now. */ - levels[0].EnabledForActivity = 1; - - /* in order to prevent MC activity from stutter mode to push DPM up. - * the UVD change complements this by putting the MCLK in - * a higher state by default such that we are not effected by - * up threshold or and MCLK DPM latency. - */ - levels[0].ActivityLevel = (uint16_t)data->mclk_dpm0_activity_target; - CONVERT_FROM_HOST_TO_SMC_US(levels[0].ActivityLevel); - - data->smc_state_table.MemoryDpmLevelCount = - (uint8_t)dpm_table->mclk_table.count; - data->dpm_level_enable_mask.mclk_dpm_enable_mask = - fiji_get_dpm_level_enable_mask_value(&dpm_table->mclk_table); - /* set highest level watermark to high */ - levels[dpm_table->mclk_table.count - 1].DisplayWatermark = - PPSMC_DISPLAY_WATERMARK_HIGH; - - /* level count will send to smc once at init smc table and never change */ - result = fiji_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels, - (uint32_t)array_size, data->sram_end); - - return result; -} - -/** -* Populates the SMC MVDD structure using the provided memory clock. -* -* @param hwmgr the address of the hardware manager -* @param mclk the MCLK value to be used in the decision if MVDD should be high or low. -* @param voltage the SMC VOLTAGE structure to be populated -*/ -static int fiji_populate_mvdd_value(struct pp_hwmgr *hwmgr, - uint32_t mclk, SMIO_Pattern *smio_pat) -{ - const struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - uint32_t i = 0; - - if (FIJI_VOLTAGE_CONTROL_NONE != data->mvdd_control) { - /* find mvdd value which clock is more than request */ - for (i = 0; i < table_info->vdd_dep_on_mclk->count; i++) { - if (mclk <= table_info->vdd_dep_on_mclk->entries[i].clk) { - smio_pat->Voltage = data->mvdd_voltage_table.entries[i].value; - break; - } - } - PP_ASSERT_WITH_CODE(i < table_info->vdd_dep_on_mclk->count, - "MVDD Voltage is outside the supported range.", - return -EINVAL); - } else - return -EINVAL; - - return 0; -} - -static int fiji_populate_smc_acpi_level(struct pp_hwmgr *hwmgr, - SMU73_Discrete_DpmTable *table) -{ - int result = 0; - const struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - struct pp_atomctrl_clock_dividers_vi dividers; - SMIO_Pattern vol_level; - uint32_t mvdd; - uint16_t us_mvdd; - uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL; - uint32_t spll_func_cntl_2 = data->clock_registers.vCG_SPLL_FUNC_CNTL_2; - - table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC; - - if (!data->sclk_dpm_key_disabled) { - /* Get MinVoltage and Frequency from DPM0, - * already converted to SMC_UL */ - table->ACPILevel.SclkFrequency = - data->dpm_table.sclk_table.dpm_levels[0].value; - result = fiji_get_dependency_volt_by_clk(hwmgr, - table_info->vdd_dep_on_sclk, - table->ACPILevel.SclkFrequency, - &table->ACPILevel.MinVoltage, &mvdd); - PP_ASSERT_WITH_CODE((0 == result), - "Cannot find ACPI VDDC voltage value " - "in Clock Dependency Table",); - } else { - table->ACPILevel.SclkFrequency = - data->vbios_boot_state.sclk_bootup_value; - table->ACPILevel.MinVoltage = - data->vbios_boot_state.vddc_bootup_value * VOLTAGE_SCALE; - } - - /* get the engine clock dividers for this clock value */ - result = atomctrl_get_engine_pll_dividers_vi(hwmgr, - table->ACPILevel.SclkFrequency, ÷rs); - PP_ASSERT_WITH_CODE(result == 0, - "Error retrieving Engine Clock dividers from VBIOS.", - return result); - - table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider; - table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; - table->ACPILevel.DeepSleepDivId = 0; - - spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL, - SPLL_PWRON, 0); - spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL, - SPLL_RESET, 1); - spll_func_cntl_2 = PHM_SET_FIELD(spll_func_cntl_2, CG_SPLL_FUNC_CNTL_2, - SCLK_MUX_SEL, 4); - - table->ACPILevel.CgSpllFuncCntl = spll_func_cntl; - table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2; - table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3; - table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4; - table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM; - table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2; - table->ACPILevel.CcPwrDynRm = 0; - table->ACPILevel.CcPwrDynRm1 = 0; - - CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags); - CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency); - CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.MinVoltage); - CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl); - CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2); - CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3); - CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4); - CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum); - CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2); - CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm); - CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1); - - if (!data->mclk_dpm_key_disabled) { - /* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */ - table->MemoryACPILevel.MclkFrequency = - data->dpm_table.mclk_table.dpm_levels[0].value; - result = fiji_get_dependency_volt_by_clk(hwmgr, - table_info->vdd_dep_on_mclk, - table->MemoryACPILevel.MclkFrequency, - &table->MemoryACPILevel.MinVoltage, &mvdd); - PP_ASSERT_WITH_CODE((0 == result), - "Cannot find ACPI VDDCI voltage value " - "in Clock Dependency Table",); - } else { - table->MemoryACPILevel.MclkFrequency = - data->vbios_boot_state.mclk_bootup_value; - table->MemoryACPILevel.MinVoltage = - data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE; - } - - us_mvdd = 0; - if ((FIJI_VOLTAGE_CONTROL_NONE == data->mvdd_control) || - (data->mclk_dpm_key_disabled)) - us_mvdd = data->vbios_boot_state.mvdd_bootup_value; - else { - if (!fiji_populate_mvdd_value(hwmgr, - data->dpm_table.mclk_table.dpm_levels[0].value, - &vol_level)) - us_mvdd = vol_level.Voltage; - } - - table->MemoryACPILevel.MinMvdd = - PP_HOST_TO_SMC_UL(us_mvdd * VOLTAGE_SCALE); - - table->MemoryACPILevel.EnabledForThrottle = 0; - table->MemoryACPILevel.EnabledForActivity = 0; - table->MemoryACPILevel.UpHyst = 0; - table->MemoryACPILevel.DownHyst = 100; - table->MemoryACPILevel.VoltageDownHyst = 0; - table->MemoryACPILevel.ActivityLevel = - PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target); - - table->MemoryACPILevel.StutterEnable = false; - CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MclkFrequency); - CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage); - - return result; -} - -static int fiji_populate_smc_vce_level(struct pp_hwmgr *hwmgr, - SMU73_Discrete_DpmTable *table) -{ - int result = -EINVAL; - uint8_t count; - struct pp_atomctrl_clock_dividers_vi dividers; - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = - table_info->mm_dep_table; - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - - table->VceLevelCount = (uint8_t)(mm_table->count); - table->VceBootLevel = 0; - - for(count = 0; count < table->VceLevelCount; count++) { - table->VceLevel[count].Frequency = mm_table->entries[count].eclk; - table->VceLevel[count].MinVoltage = 0; - table->VceLevel[count].MinVoltage |= - (mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT; - table->VceLevel[count].MinVoltage |= - ((mm_table->entries[count].vddc - data->vddc_vddci_delta) * - VOLTAGE_SCALE) << VDDCI_SHIFT; - table->VceLevel[count].MinVoltage |= 1 << PHASES_SHIFT; - - /*retrieve divider value for VBIOS */ - result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, - table->VceLevel[count].Frequency, ÷rs); - PP_ASSERT_WITH_CODE((0 == result), - "can not find divide id for VCE engine clock", - return result); - - table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider; - - CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency); - CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].MinVoltage); - } - return result; -} - -static int fiji_populate_smc_acp_level(struct pp_hwmgr *hwmgr, - SMU73_Discrete_DpmTable *table) -{ - int result = -EINVAL; - uint8_t count; - struct pp_atomctrl_clock_dividers_vi dividers; - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = - table_info->mm_dep_table; - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - - table->AcpLevelCount = (uint8_t)(mm_table->count); - table->AcpBootLevel = 0; - - for (count = 0; count < table->AcpLevelCount; count++) { - table->AcpLevel[count].Frequency = mm_table->entries[count].aclk; - table->AcpLevel[count].MinVoltage |= (mm_table->entries[count].vddc * - VOLTAGE_SCALE) << VDDC_SHIFT; - table->AcpLevel[count].MinVoltage |= ((mm_table->entries[count].vddc - - data->vddc_vddci_delta) * VOLTAGE_SCALE) << VDDCI_SHIFT; - table->AcpLevel[count].MinVoltage |= 1 << PHASES_SHIFT; - - /* retrieve divider value for VBIOS */ - result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, - table->AcpLevel[count].Frequency, ÷rs); - PP_ASSERT_WITH_CODE((0 == result), - "can not find divide id for engine clock", return result); - - table->AcpLevel[count].Divider = (uint8_t)dividers.pll_post_divider; - - CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].Frequency); - CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].MinVoltage); - } - return result; -} - -static int fiji_populate_smc_samu_level(struct pp_hwmgr *hwmgr, - SMU73_Discrete_DpmTable *table) -{ - int result = -EINVAL; - uint8_t count; - struct pp_atomctrl_clock_dividers_vi dividers; - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = - table_info->mm_dep_table; - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - - table->SamuBootLevel = 0; - table->SamuLevelCount = (uint8_t)(mm_table->count); - - for (count = 0; count < table->SamuLevelCount; count++) { - /* not sure whether we need evclk or not */ - table->SamuLevel[count].MinVoltage = 0; - table->SamuLevel[count].Frequency = mm_table->entries[count].samclock; - table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc * - VOLTAGE_SCALE) << VDDC_SHIFT; - table->SamuLevel[count].MinVoltage |= ((mm_table->entries[count].vddc - - data->vddc_vddci_delta) * VOLTAGE_SCALE) << VDDCI_SHIFT; - table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT; - - /* retrieve divider value for VBIOS */ - result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, - table->SamuLevel[count].Frequency, ÷rs); - PP_ASSERT_WITH_CODE((0 == result), - "can not find divide id for samu clock", return result); - - table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider; - - CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency); - CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].MinVoltage); - } - return result; -} - -static int fiji_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr, - int32_t eng_clock, int32_t mem_clock, - struct SMU73_Discrete_MCArbDramTimingTableEntry *arb_regs) -{ - uint32_t dram_timing; - uint32_t dram_timing2; - uint32_t burstTime; - ULONG state, trrds, trrdl; - int result; - - result = atomctrl_set_engine_dram_timings_rv770(hwmgr, - eng_clock, mem_clock); - PP_ASSERT_WITH_CODE(result == 0, - "Error calling VBIOS to set DRAM_TIMING.", return result); - - dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING); - dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2); - burstTime = cgs_read_register(hwmgr->device, mmMC_ARB_BURST_TIME); - - state = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, STATE0); - trrds = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, TRRDS0); - trrdl = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, TRRDL0); - - arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dram_timing); - arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dram_timing2); - arb_regs->McArbBurstTime = (uint8_t)burstTime; - arb_regs->TRRDS = (uint8_t)trrds; - arb_regs->TRRDL = (uint8_t)trrdl; - - return 0; -} - -static int fiji_program_memory_timing_parameters(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - struct SMU73_Discrete_MCArbDramTimingTable arb_regs; - uint32_t i, j; - int result = 0; - - for (i = 0; i < data->dpm_table.sclk_table.count; i++) { - for (j = 0; j < data->dpm_table.mclk_table.count; j++) { - result = fiji_populate_memory_timing_parameters(hwmgr, - data->dpm_table.sclk_table.dpm_levels[i].value, - data->dpm_table.mclk_table.dpm_levels[j].value, - &arb_regs.entries[i][j]); - if (result) - break; - } - } - - if (!result) - result = fiji_copy_bytes_to_smc( - hwmgr->smumgr, - data->arb_table_start, - (uint8_t *)&arb_regs, - sizeof(SMU73_Discrete_MCArbDramTimingTable), - data->sram_end); - return result; -} - -static int fiji_populate_smc_uvd_level(struct pp_hwmgr *hwmgr, - struct SMU73_Discrete_DpmTable *table) -{ - int result = -EINVAL; - uint8_t count; - struct pp_atomctrl_clock_dividers_vi dividers; - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = - table_info->mm_dep_table; - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - - table->UvdLevelCount = (uint8_t)(mm_table->count); - table->UvdBootLevel = 0; - - for (count = 0; count < table->UvdLevelCount; count++) { - table->UvdLevel[count].MinVoltage = 0; - table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk; - table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk; - table->UvdLevel[count].MinVoltage |= (mm_table->entries[count].vddc * - VOLTAGE_SCALE) << VDDC_SHIFT; - table->UvdLevel[count].MinVoltage |= ((mm_table->entries[count].vddc - - data->vddc_vddci_delta) * VOLTAGE_SCALE) << VDDCI_SHIFT; - table->UvdLevel[count].MinVoltage |= 1 << PHASES_SHIFT; - - /* retrieve divider value for VBIOS */ - result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, - table->UvdLevel[count].VclkFrequency, ÷rs); - PP_ASSERT_WITH_CODE((0 == result), - "can not find divide id for Vclk clock", return result); - - table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider; - - result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, - table->UvdLevel[count].DclkFrequency, ÷rs); - PP_ASSERT_WITH_CODE((0 == result), - "can not find divide id for Dclk clock", return result); - - table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider; - - CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency); - CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency); - CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].MinVoltage); - - } - return result; -} - -static int fiji_find_boot_level(struct fiji_single_dpm_table *table, - uint32_t value, uint32_t *boot_level) -{ - int result = -EINVAL; - uint32_t i; - - for (i = 0; i < table->count; i++) { - if (value == table->dpm_levels[i].value) { - *boot_level = i; - result = 0; - } - } - return result; -} - -static int fiji_populate_smc_boot_level(struct pp_hwmgr *hwmgr, - struct SMU73_Discrete_DpmTable *table) -{ - int result = 0; - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - - table->GraphicsBootLevel = 0; - table->MemoryBootLevel = 0; - - /* find boot level from dpm table */ - result = fiji_find_boot_level(&(data->dpm_table.sclk_table), - data->vbios_boot_state.sclk_bootup_value, - (uint32_t *)&(table->GraphicsBootLevel)); - - result = fiji_find_boot_level(&(data->dpm_table.mclk_table), - data->vbios_boot_state.mclk_bootup_value, - (uint32_t *)&(table->MemoryBootLevel)); - - table->BootVddc = data->vbios_boot_state.vddc_bootup_value * - VOLTAGE_SCALE; - table->BootVddci = data->vbios_boot_state.vddci_bootup_value * - VOLTAGE_SCALE; - table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value * - VOLTAGE_SCALE; - - CONVERT_FROM_HOST_TO_SMC_US(table->BootVddc); - CONVERT_FROM_HOST_TO_SMC_US(table->BootVddci); - CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd); - - return 0; -} - -static int fiji_populate_smc_initailial_state(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - uint8_t count, level; - - count = (uint8_t)(table_info->vdd_dep_on_sclk->count); - for (level = 0; level < count; level++) { - if(table_info->vdd_dep_on_sclk->entries[level].clk >= - data->vbios_boot_state.sclk_bootup_value) { - data->smc_state_table.GraphicsBootLevel = level; - break; - } - } - - count = (uint8_t)(table_info->vdd_dep_on_mclk->count); - for (level = 0; level < count; level++) { - if(table_info->vdd_dep_on_mclk->entries[level].clk >= - data->vbios_boot_state.mclk_bootup_value) { - data->smc_state_table.MemoryBootLevel = level; - break; - } - } - - return 0; -} - -static int fiji_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr) -{ - uint32_t ro, efuse, efuse2, clock_freq, volt_without_cks, - volt_with_cks, value; - uint16_t clock_freq_u16; - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - uint8_t type, i, j, cks_setting, stretch_amount, stretch_amount2, - volt_offset = 0; - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = - table_info->vdd_dep_on_sclk; - - stretch_amount = (uint8_t)table_info->cac_dtp_table->usClockStretchAmount; - - /* Read SMU_Eefuse to read and calculate RO and determine - * if the part is SS or FF. if RO >= 1660MHz, part is FF. - */ - efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixSMU_EFUSE_0 + (146 * 4)); - efuse2 = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixSMU_EFUSE_0 + (148 * 4)); - efuse &= 0xFF000000; - efuse = efuse >> 24; - efuse2 &= 0xF; - - if (efuse2 == 1) - ro = (2300 - 1350) * efuse / 255 + 1350; - else - ro = (2500 - 1000) * efuse / 255 + 1000; - - if (ro >= 1660) - type = 0; - else - type = 1; - - /* Populate Stretch amount */ - data->smc_state_table.ClockStretcherAmount = stretch_amount; - - /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */ - for (i = 0; i < sclk_table->count; i++) { - data->smc_state_table.Sclk_CKS_masterEn0_7 |= - sclk_table->entries[i].cks_enable << i; - volt_without_cks = (uint32_t)((14041 * - (sclk_table->entries[i].clk/100) / 10000 + 3571 + 75 - ro) * 1000 / - (4026 - (13924 * (sclk_table->entries[i].clk/100) / 10000))); - volt_with_cks = (uint32_t)((13946 * - (sclk_table->entries[i].clk/100) / 10000 + 3320 + 45 - ro) * 1000 / - (3664 - (11454 * (sclk_table->entries[i].clk/100) / 10000))); - if (volt_without_cks >= volt_with_cks) - volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks + - sclk_table->entries[i].cks_voffset) * 100 / 625) + 1); - data->smc_state_table.Sclk_voltageOffset[i] = volt_offset; - } - - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE, - STRETCH_ENABLE, 0x0); - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE, - masterReset, 0x1); - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE, - staticEnable, 0x1); - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE, - masterReset, 0x0); - - /* Populate CKS Lookup Table */ - if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5) - stretch_amount2 = 0; - else if (stretch_amount == 3 || stretch_amount == 4) - stretch_amount2 = 1; - else { - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ClockStretcher); - PP_ASSERT_WITH_CODE(false, - "Stretch Amount in PPTable not supported\n", - return -EINVAL); - } - - value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixPWR_CKS_CNTL); - value &= 0xFFC2FF87; - data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].minFreq = - fiji_clock_stretcher_lookup_table[stretch_amount2][0]; - data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].maxFreq = - fiji_clock_stretcher_lookup_table[stretch_amount2][1]; - clock_freq_u16 = (uint16_t)(PP_SMC_TO_HOST_UL(data->smc_state_table. - GraphicsLevel[data->smc_state_table.GraphicsDpmLevelCount - 1]. - SclkFrequency) / 100); - if (fiji_clock_stretcher_lookup_table[stretch_amount2][0] < - clock_freq_u16 && - fiji_clock_stretcher_lookup_table[stretch_amount2][1] > - clock_freq_u16) { - /* Program PWR_CKS_CNTL. CKS_USE_FOR_LOW_FREQ */ - value |= (fiji_clock_stretcher_lookup_table[stretch_amount2][3]) << 16; - /* Program PWR_CKS_CNTL. CKS_LDO_REFSEL */ - value |= (fiji_clock_stretcher_lookup_table[stretch_amount2][2]) << 18; - /* Program PWR_CKS_CNTL. CKS_STRETCH_AMOUNT */ - value |= (fiji_clock_stretch_amount_conversion - [fiji_clock_stretcher_lookup_table[stretch_amount2][3]] - [stretch_amount]) << 3; - } - CONVERT_FROM_HOST_TO_SMC_US(data->smc_state_table.CKS_LOOKUPTable. - CKS_LOOKUPTableEntry[0].minFreq); - CONVERT_FROM_HOST_TO_SMC_US(data->smc_state_table.CKS_LOOKUPTable. - CKS_LOOKUPTableEntry[0].maxFreq); - data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting = - fiji_clock_stretcher_lookup_table[stretch_amount2][2] & 0x7F; - data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting |= - (fiji_clock_stretcher_lookup_table[stretch_amount2][3]) << 7; - - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixPWR_CKS_CNTL, value); - - /* Populate DDT Lookup Table */ - for (i = 0; i < 4; i++) { - /* Assign the minimum and maximum VID stored - * in the last row of Clock Stretcher Voltage Table. - */ - data->smc_state_table.ClockStretcherDataTable. - ClockStretcherDataTableEntry[i].minVID = - (uint8_t) fiji_clock_stretcher_ddt_table[type][i][2]; - data->smc_state_table.ClockStretcherDataTable. - ClockStretcherDataTableEntry[i].maxVID = - (uint8_t) fiji_clock_stretcher_ddt_table[type][i][3]; - /* Loop through each SCLK and check the frequency - * to see if it lies within the frequency for clock stretcher. - */ - for (j = 0; j < data->smc_state_table.GraphicsDpmLevelCount; j++) { - cks_setting = 0; - clock_freq = PP_SMC_TO_HOST_UL( - data->smc_state_table.GraphicsLevel[j].SclkFrequency); - /* Check the allowed frequency against the sclk level[j]. - * Sclk's endianness has already been converted, - * and it's in 10Khz unit, - * as opposed to Data table, which is in Mhz unit. - */ - if (clock_freq >= - (fiji_clock_stretcher_ddt_table[type][i][0]) * 100) { - cks_setting |= 0x2; - if (clock_freq < - (fiji_clock_stretcher_ddt_table[type][i][1]) * 100) - cks_setting |= 0x1; - } - data->smc_state_table.ClockStretcherDataTable. - ClockStretcherDataTableEntry[i].setting |= cks_setting << (j * 2); - } - CONVERT_FROM_HOST_TO_SMC_US(data->smc_state_table. - ClockStretcherDataTable. - ClockStretcherDataTableEntry[i].setting); - } - - value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL); - value &= 0xFFFFFFFE; - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL, value); - - return 0; -} - -/** -* Populates the SMC VRConfig field in DPM table. -* -* @param hwmgr the address of the hardware manager -* @param table the SMC DPM table structure to be populated -* @return always 0 -*/ -static int fiji_populate_vr_config(struct pp_hwmgr *hwmgr, - struct SMU73_Discrete_DpmTable *table) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - uint16_t config; - - config = VR_MERGED_WITH_VDDC; - table->VRConfig |= (config << VRCONF_VDDGFX_SHIFT); - - /* Set Vddc Voltage Controller */ - if(FIJI_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) { - config = VR_SVI2_PLANE_1; - table->VRConfig |= config; - } else { - PP_ASSERT_WITH_CODE(false, - "VDDC should be on SVI2 control in merged mode!",); - } - /* Set Vddci Voltage Controller */ - if(FIJI_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) { - config = VR_SVI2_PLANE_2; /* only in merged mode */ - table->VRConfig |= (config << VRCONF_VDDCI_SHIFT); - } else if (FIJI_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) { - config = VR_SMIO_PATTERN_1; - table->VRConfig |= (config << VRCONF_VDDCI_SHIFT); - } else { - config = VR_STATIC_VOLTAGE; - table->VRConfig |= (config << VRCONF_VDDCI_SHIFT); - } - /* Set Mvdd Voltage Controller */ - if(FIJI_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) { - config = VR_SVI2_PLANE_2; - table->VRConfig |= (config << VRCONF_MVDD_SHIFT); - } else if(FIJI_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) { - config = VR_SMIO_PATTERN_2; - table->VRConfig |= (config << VRCONF_MVDD_SHIFT); - } else { - config = VR_STATIC_VOLTAGE; - table->VRConfig |= (config << VRCONF_MVDD_SHIFT); - } - - return 0; -} - -/** -* Initializes the SMC table and uploads it -* -* @param hwmgr the address of the powerplay hardware manager. -* @param pInput the pointer to input data (PowerState) -* @return always 0 -*/ -static int fiji_init_smc_table(struct pp_hwmgr *hwmgr) -{ - int result; - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - struct SMU73_Discrete_DpmTable *table = &(data->smc_state_table); - const struct fiji_ulv_parm *ulv = &(data->ulv); - uint8_t i; - struct pp_atomctrl_gpio_pin_assignment gpio_pin; - - result = fiji_setup_default_dpm_tables(hwmgr); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to setup default DPM tables!", return result); - - if(FIJI_VOLTAGE_CONTROL_NONE != data->voltage_control) - fiji_populate_smc_voltage_tables(hwmgr, table); - - table->SystemFlags = 0; - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_AutomaticDCTransition)) - table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC; - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_StepVddc)) - table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC; - - if (data->is_memory_gddr5) - table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5; - - if (ulv->ulv_supported && table_info->us_ulv_voltage_offset) { - result = fiji_populate_ulv_state(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to initialize ULV state!", return result); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_ULV_PARAMETER, ulv->cg_ulv_parameter); - } - - result = fiji_populate_smc_link_level(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to initialize Link Level!", return result); - - result = fiji_populate_all_graphic_levels(hwmgr); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to initialize Graphics Level!", return result); - - result = fiji_populate_all_memory_levels(hwmgr); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to initialize Memory Level!", return result); - - result = fiji_populate_smc_acpi_level(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to initialize ACPI Level!", return result); - - result = fiji_populate_smc_vce_level(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to initialize VCE Level!", return result); - - result = fiji_populate_smc_acp_level(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to initialize ACP Level!", return result); - - result = fiji_populate_smc_samu_level(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to initialize SAMU Level!", return result); - - /* Since only the initial state is completely set up at this point - * (the other states are just copies of the boot state) we only - * need to populate the ARB settings for the initial state. - */ - result = fiji_program_memory_timing_parameters(hwmgr); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to Write ARB settings for the initial state.", return result); - - result = fiji_populate_smc_uvd_level(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to initialize UVD Level!", return result); - - result = fiji_populate_smc_boot_level(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to initialize Boot Level!", return result); - - result = fiji_populate_smc_initailial_state(hwmgr); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to initialize Boot State!", return result); - - result = fiji_populate_bapm_parameters_in_dpm_table(hwmgr); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to populate BAPM Parameters!", return result); - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ClockStretcher)) { - result = fiji_populate_clock_stretcher_data_table(hwmgr); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to populate Clock Stretcher Data Table!", - return result); - } - - table->GraphicsVoltageChangeEnable = 1; - table->GraphicsThermThrottleEnable = 1; - table->GraphicsInterval = 1; - table->VoltageInterval = 1; - table->ThermalInterval = 1; - table->TemperatureLimitHigh = - table_info->cac_dtp_table->usTargetOperatingTemp * - FIJI_Q88_FORMAT_CONVERSION_UNIT; - table->TemperatureLimitLow = - (table_info->cac_dtp_table->usTargetOperatingTemp - 1) * - FIJI_Q88_FORMAT_CONVERSION_UNIT; - table->MemoryVoltageChangeEnable = 1; - table->MemoryInterval = 1; - table->VoltageResponseTime = 0; - table->PhaseResponseTime = 0; - table->MemoryThermThrottleEnable = 1; - table->PCIeBootLinkLevel = 0; /* 0:Gen1 1:Gen2 2:Gen3*/ - table->PCIeGenInterval = 1; - table->VRConfig = 0; - - result = fiji_populate_vr_config(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to populate VRConfig setting!", return result); - - table->ThermGpio = 17; - table->SclkStepSize = 0x4000; - - if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID, &gpio_pin)) { - table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift; - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_RegulatorHot); - } else { - table->VRHotGpio = FIJI_UNUSED_GPIO_PIN; - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_RegulatorHot); - } - - if (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID, - &gpio_pin)) { - table->AcDcGpio = gpio_pin.uc_gpio_pin_bit_shift; - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_AutomaticDCTransition); - } else { - table->AcDcGpio = FIJI_UNUSED_GPIO_PIN; - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_AutomaticDCTransition); - } - - /* Thermal Output GPIO */ - if (atomctrl_get_pp_assign_pin(hwmgr, THERMAL_INT_OUTPUT_GPIO_PINID, - &gpio_pin)) { - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ThermalOutGPIO); - - table->ThermOutGpio = gpio_pin.uc_gpio_pin_bit_shift; - - /* For porlarity read GPIOPAD_A with assigned Gpio pin - * since VBIOS will program this register to set 'inactive state', - * driver can then determine 'active state' from this and - * program SMU with correct polarity - */ - table->ThermOutPolarity = (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A) & - (1 << gpio_pin.uc_gpio_pin_bit_shift))) ? 1:0; - table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY; - - /* if required, combine VRHot/PCC with thermal out GPIO */ - if(phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_RegulatorHot) && - phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_CombinePCCWithThermalSignal)) - table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT; - } else { - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ThermalOutGPIO); - table->ThermOutGpio = 17; - table->ThermOutPolarity = 1; - table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE; - } - - for (i = 0; i < SMU73_MAX_ENTRIES_SMIO; i++) - table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]); - - CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags); - CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig); - CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1); - CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2); - CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize); - CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh); - CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow); - CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime); - CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime); - - /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */ - result = fiji_copy_bytes_to_smc(hwmgr->smumgr, - data->dpm_table_start + - offsetof(SMU73_Discrete_DpmTable, SystemFlags), - (uint8_t *)&(table->SystemFlags), - sizeof(SMU73_Discrete_DpmTable) - 3 * sizeof(SMU73_PIDController), - data->sram_end); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to upload dpm data to SMC memory!", return result); - - return 0; -} - -/** -* Initialize the ARB DRAM timing table's index field. -* -* @param hwmgr the address of the powerplay hardware manager. -* @return always 0 -*/ -static int fiji_init_arb_table_index(struct pp_hwmgr *hwmgr) -{ - const struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - uint32_t tmp; - int result; - - /* This is a read-modify-write on the first byte of the ARB table. - * The first byte in the SMU73_Discrete_MCArbDramTimingTable structure - * is the field 'current'. - * This solution is ugly, but we never write the whole table only - * individual fields in it. - * In reality this field should not be in that structure - * but in a soft register. - */ - result = fiji_read_smc_sram_dword(hwmgr->smumgr, - data->arb_table_start, &tmp, data->sram_end); - - if (result) - return result; - - tmp &= 0x00FFFFFF; - tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24; - - return fiji_write_smc_sram_dword(hwmgr->smumgr, - data->arb_table_start, tmp, data->sram_end); -} - -static int fiji_enable_vrhot_gpio_interrupt(struct pp_hwmgr *hwmgr) -{ - if(phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_RegulatorHot)) - return smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_EnableVRHotGPIOInterrupt); - - return 0; -} - -static int fiji_enable_sclk_control(struct pp_hwmgr *hwmgr) -{ - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, - SCLK_PWRMGT_OFF, 0); - return 0; -} - -static int fiji_enable_ulv(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - struct fiji_ulv_parm *ulv = &(data->ulv); - - if (ulv->ulv_supported) - return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_EnableULV); - - return 0; -} - -static int fiji_disable_ulv(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - struct fiji_ulv_parm *ulv = &(data->ulv); - - if (ulv->ulv_supported) - return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DisableULV); - - return 0; -} - -static int fiji_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr) -{ - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_SclkDeepSleep)) { - if (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_MASTER_DeepSleep_ON)) - PP_ASSERT_WITH_CODE(false, - "Attempt to enable Master Deep Sleep switch failed!", - return -1); - } else { - if (smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_MASTER_DeepSleep_OFF)) { - PP_ASSERT_WITH_CODE(false, - "Attempt to disable Master Deep Sleep switch failed!", - return -1); - } - } - - return 0; -} - -static int fiji_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr) -{ - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_SclkDeepSleep)) { - if (smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_MASTER_DeepSleep_OFF)) { - PP_ASSERT_WITH_CODE(false, - "Attempt to disable Master Deep Sleep switch failed!", - return -1); - } - } - - return 0; -} - -static int fiji_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - uint32_t val, val0, val2; - uint32_t i, cpl_cntl, cpl_threshold, mc_threshold; - - /* enable SCLK dpm */ - if(!data->sclk_dpm_key_disabled) - PP_ASSERT_WITH_CODE( - (0 == smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DPM_Enable)), - "Failed to enable SCLK DPM during DPM Start Function!", - return -1); - - /* enable MCLK dpm */ - if(0 == data->mclk_dpm_key_disabled) { - cpl_threshold = 0; - mc_threshold = 0; - - /* Read per MCD tile (0 - 7) */ - for (i = 0; i < 8; i++) { - PHM_WRITE_FIELD(hwmgr->device, MC_CONFIG_MCD, MC_RD_ENABLE, i); - val = cgs_read_register(hwmgr->device, mmMC_SEQ_RESERVE_0_S) & 0xf0000000; - if (0xf0000000 != val) { - /* count number of MCQ that has channel(s) enabled */ - cpl_threshold++; - /* only harvest 3 or full 4 supported */ - mc_threshold = val ? 3 : 4; - } - } - PP_ASSERT_WITH_CODE(0 != cpl_threshold, - "Number of MCQ is zero!", return -EINVAL;); - - mc_threshold = ((mc_threshold & LCAC_MC0_CNTL__MC0_THRESHOLD_MASK) << - LCAC_MC0_CNTL__MC0_THRESHOLD__SHIFT) | - LCAC_MC0_CNTL__MC0_ENABLE_MASK; - cpl_cntl = ((cpl_threshold & LCAC_CPL_CNTL__CPL_THRESHOLD_MASK) << - LCAC_CPL_CNTL__CPL_THRESHOLD__SHIFT) | - LCAC_CPL_CNTL__CPL_ENABLE_MASK; - cpl_cntl = (cpl_cntl | (8 << LCAC_CPL_CNTL__CPL_BLOCK_ID__SHIFT)); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixLCAC_MC0_CNTL, mc_threshold); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixLCAC_MC1_CNTL, mc_threshold); - if (8 == cpl_threshold) { - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixLCAC_MC2_CNTL, mc_threshold); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixLCAC_MC3_CNTL, mc_threshold); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixLCAC_MC4_CNTL, mc_threshold); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixLCAC_MC5_CNTL, mc_threshold); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixLCAC_MC6_CNTL, mc_threshold); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixLCAC_MC7_CNTL, mc_threshold); - } - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixLCAC_CPL_CNTL, cpl_cntl); - - udelay(5); - - mc_threshold = mc_threshold | - (1 << LCAC_MC0_CNTL__MC0_SIGNAL_ID__SHIFT); - cpl_cntl = cpl_cntl | (1 << LCAC_CPL_CNTL__CPL_SIGNAL_ID__SHIFT); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixLCAC_MC0_CNTL, mc_threshold); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixLCAC_MC1_CNTL, mc_threshold); - if (8 == cpl_threshold) { - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixLCAC_MC2_CNTL, mc_threshold); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixLCAC_MC3_CNTL, mc_threshold); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixLCAC_MC4_CNTL, mc_threshold); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixLCAC_MC5_CNTL, mc_threshold); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixLCAC_MC6_CNTL, mc_threshold); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixLCAC_MC7_CNTL, mc_threshold); - } - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixLCAC_CPL_CNTL, cpl_cntl); - - /* Program CAC_EN per MCD (0-7) Tile */ - val0 = val = cgs_read_register(hwmgr->device, mmMC_CONFIG_MCD); - val &= ~(MC_CONFIG_MCD__MCD0_WR_ENABLE_MASK | - MC_CONFIG_MCD__MCD1_WR_ENABLE_MASK | - MC_CONFIG_MCD__MCD2_WR_ENABLE_MASK | - MC_CONFIG_MCD__MCD3_WR_ENABLE_MASK | - MC_CONFIG_MCD__MCD4_WR_ENABLE_MASK | - MC_CONFIG_MCD__MCD5_WR_ENABLE_MASK | - MC_CONFIG_MCD__MCD6_WR_ENABLE_MASK | - MC_CONFIG_MCD__MCD7_WR_ENABLE_MASK | - MC_CONFIG_MCD__MC_RD_ENABLE_MASK); - - for (i = 0; i < 8; i++) { - /* Enable MCD i Tile read & write */ - val2 = (val | (i << MC_CONFIG_MCD__MC_RD_ENABLE__SHIFT) | - (1 << i)); - cgs_write_register(hwmgr->device, mmMC_CONFIG_MCD, val2); - /* Enbale CAC_ON MCD i Tile */ - val2 = cgs_read_register(hwmgr->device, mmMC_SEQ_CNTL); - val2 |= MC_SEQ_CNTL__CAC_EN_MASK; - cgs_write_register(hwmgr->device, mmMC_SEQ_CNTL, val2); - } - /* Set MC_CONFIG_MCD back to its default setting val0 */ - cgs_write_register(hwmgr->device, mmMC_CONFIG_MCD, val0); - - PP_ASSERT_WITH_CODE( - (0 == smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_MCLKDPM_Enable)), - "Failed to enable MCLK DPM during DPM Start Function!", - return -1); - } - return 0; -} - -static int fiji_start_dpm(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - - /*enable general power management */ - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, - GLOBAL_PWRMGT_EN, 1); - /* enable sclk deep sleep */ - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, - DYNAMIC_PM_EN, 1); - /* prepare for PCIE DPM */ - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - data->soft_regs_start + offsetof(SMU73_SoftRegisters, - VoltageChangeTimeout), 0x1000); - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE, - SWRST_COMMAND_1, RESETLC, 0x0); - - PP_ASSERT_WITH_CODE( - (0 == smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_Voltage_Cntl_Enable)), - "Failed to enable voltage DPM during DPM Start Function!", - return -1); - - if (fiji_enable_sclk_mclk_dpm(hwmgr)) { - printk(KERN_ERR "Failed to enable Sclk DPM and Mclk DPM!"); - return -1; - } - - /* enable PCIE dpm */ - if(!data->pcie_dpm_key_disabled) { - PP_ASSERT_WITH_CODE( - (0 == smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_PCIeDPM_Enable)), - "Failed to enable pcie DPM during DPM Start Function!", - return -1); - } - - return 0; -} - -static int fiji_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - - /* disable SCLK dpm */ - if (!data->sclk_dpm_key_disabled) - PP_ASSERT_WITH_CODE( - (smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_DPM_Disable) == 0), - "Failed to disable SCLK DPM!", - return -1); - - /* disable MCLK dpm */ - if (!data->mclk_dpm_key_disabled) { - PP_ASSERT_WITH_CODE( - (smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_MCLKDPM_SetEnabledMask, 1) == 0), - "Failed to force MCLK DPM0!", - return -1); - - PP_ASSERT_WITH_CODE( - (smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_MCLKDPM_Disable) == 0), - "Failed to disable MCLK DPM!", - return -1); - } - - return 0; -} - -static int fiji_stop_dpm(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - - /* disable general power management */ - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, - GLOBAL_PWRMGT_EN, 0); - /* disable sclk deep sleep */ - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, - DYNAMIC_PM_EN, 0); - - /* disable PCIE dpm */ - if (!data->pcie_dpm_key_disabled) { - PP_ASSERT_WITH_CODE( - (smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_PCIeDPM_Disable) == 0), - "Failed to disable pcie DPM during DPM Stop Function!", - return -1); - } - - if (fiji_disable_sclk_mclk_dpm(hwmgr)) { - printk(KERN_ERR "Failed to disable Sclk DPM and Mclk DPM!"); - return -1; - } - - PP_ASSERT_WITH_CODE( - (smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_Voltage_Cntl_Disable) == 0), - "Failed to disable voltage DPM during DPM Stop Function!", - return -1); - - return 0; -} - -static void fiji_set_dpm_event_sources(struct pp_hwmgr *hwmgr, - uint32_t sources) -{ - bool protection; - enum DPM_EVENT_SRC src; - - switch (sources) { - default: - printk(KERN_ERR "Unknown throttling event sources."); - /* fall through */ - case 0: - protection = false; - /* src is unused */ - break; - case (1 << PHM_AutoThrottleSource_Thermal): - protection = true; - src = DPM_EVENT_SRC_DIGITAL; - break; - case (1 << PHM_AutoThrottleSource_External): - protection = true; - src = DPM_EVENT_SRC_EXTERNAL; - break; - case (1 << PHM_AutoThrottleSource_External) | - (1 << PHM_AutoThrottleSource_Thermal): - protection = true; - src = DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL; - break; - } - /* Order matters - don't enable thermal protection for the wrong source. */ - if (protection) { - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_CTRL, - DPM_EVENT_SRC, src); - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, - THERMAL_PROTECTION_DIS, - !phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ThermalController)); - } else - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, - THERMAL_PROTECTION_DIS, 1); -} - -static int fiji_enable_auto_throttle_source(struct pp_hwmgr *hwmgr, - PHM_AutoThrottleSource source) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - - if (!(data->active_auto_throttle_sources & (1 << source))) { - data->active_auto_throttle_sources |= 1 << source; - fiji_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources); - } - return 0; -} - -static int fiji_enable_thermal_auto_throttle(struct pp_hwmgr *hwmgr) -{ - return fiji_enable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal); -} - -static int fiji_disable_auto_throttle_source(struct pp_hwmgr *hwmgr, - PHM_AutoThrottleSource source) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - - if (data->active_auto_throttle_sources & (1 << source)) { - data->active_auto_throttle_sources &= ~(1 << source); - fiji_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources); - } - return 0; -} - -static int fiji_disable_thermal_auto_throttle(struct pp_hwmgr *hwmgr) -{ - return fiji_disable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal); -} - -static int fiji_enable_dpm_tasks(struct pp_hwmgr *hwmgr) -{ - int tmp_result, result = 0; - - tmp_result = (!fiji_is_dpm_running(hwmgr))? 0 : -1; - PP_ASSERT_WITH_CODE(result == 0, - "DPM is already running right now, no need to enable DPM!", - return 0); - - if (fiji_voltage_control(hwmgr)) { - tmp_result = fiji_enable_voltage_control(hwmgr); - PP_ASSERT_WITH_CODE(tmp_result == 0, - "Failed to enable voltage control!", - result = tmp_result); - } - - if (fiji_voltage_control(hwmgr)) { - tmp_result = fiji_construct_voltage_tables(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to contruct voltage tables!", - result = tmp_result); - } - - tmp_result = fiji_initialize_mc_reg_table(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to initialize MC reg table!", result = tmp_result); - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_EngineSpreadSpectrumSupport)) - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 1); - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ThermalController)) - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 0); - - tmp_result = fiji_program_static_screen_threshold_parameters(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to program static screen threshold parameters!", - result = tmp_result); - - tmp_result = fiji_enable_display_gap(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to enable display gap!", result = tmp_result); - - tmp_result = fiji_program_voting_clients(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to program voting clients!", result = tmp_result); - - tmp_result = fiji_process_firmware_header(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to process firmware header!", result = tmp_result); - - tmp_result = fiji_initial_switch_from_arbf0_to_f1(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to initialize switch from ArbF0 to F1!", - result = tmp_result); - - tmp_result = fiji_init_smc_table(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to initialize SMC table!", result = tmp_result); - - tmp_result = fiji_init_arb_table_index(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to initialize ARB table index!", result = tmp_result); - - tmp_result = fiji_populate_pm_fuses(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to populate PM fuses!", result = tmp_result); - - tmp_result = fiji_enable_vrhot_gpio_interrupt(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to enable VR hot GPIO interrupt!", result = tmp_result); - - tmp_result = tonga_notify_smc_display_change(hwmgr, false); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to notify no display!", result = tmp_result); - - tmp_result = fiji_enable_sclk_control(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to enable SCLK control!", result = tmp_result); - - tmp_result = fiji_enable_ulv(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to enable ULV!", result = tmp_result); - - tmp_result = fiji_enable_deep_sleep_master_switch(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to enable deep sleep master switch!", result = tmp_result); - - tmp_result = fiji_start_dpm(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to start DPM!", result = tmp_result); - - tmp_result = fiji_enable_smc_cac(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to enable SMC CAC!", result = tmp_result); - - tmp_result = fiji_enable_power_containment(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to enable power containment!", result = tmp_result); - - tmp_result = fiji_power_control_set_level(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to power control set level!", result = tmp_result); - - tmp_result = fiji_enable_thermal_auto_throttle(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to enable thermal auto throttle!", result = tmp_result); - - return result; -} - -static int fiji_disable_dpm_tasks(struct pp_hwmgr *hwmgr) -{ - int tmp_result, result = 0; - - tmp_result = (fiji_is_dpm_running(hwmgr)) ? 0 : -1; - PP_ASSERT_WITH_CODE(tmp_result == 0, - "DPM is not running right now, no need to disable DPM!", - return 0); - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ThermalController)) - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 1); - - tmp_result = fiji_disable_power_containment(hwmgr); - PP_ASSERT_WITH_CODE((tmp_result == 0), - "Failed to disable power containment!", result = tmp_result); - - tmp_result = fiji_disable_smc_cac(hwmgr); - PP_ASSERT_WITH_CODE((tmp_result == 0), - "Failed to disable SMC CAC!", result = tmp_result); - - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_SPLL_SPREAD_SPECTRUM, SSEN, 0); - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 0); - - tmp_result = fiji_disable_thermal_auto_throttle(hwmgr); - PP_ASSERT_WITH_CODE((tmp_result == 0), - "Failed to disable thermal auto throttle!", result = tmp_result); - - tmp_result = fiji_stop_dpm(hwmgr); - PP_ASSERT_WITH_CODE((tmp_result == 0), - "Failed to stop DPM!", result = tmp_result); - - tmp_result = fiji_disable_deep_sleep_master_switch(hwmgr); - PP_ASSERT_WITH_CODE((tmp_result == 0), - "Failed to disable deep sleep master switch!", result = tmp_result); - - tmp_result = fiji_disable_ulv(hwmgr); - PP_ASSERT_WITH_CODE((tmp_result == 0), - "Failed to disable ULV!", result = tmp_result); - - tmp_result = fiji_clear_voting_clients(hwmgr); - PP_ASSERT_WITH_CODE((tmp_result == 0), - "Failed to clear voting clients!", result = tmp_result); - - tmp_result = fiji_reset_to_default(hwmgr); - PP_ASSERT_WITH_CODE((tmp_result == 0), - "Failed to reset to default!", result = tmp_result); - - tmp_result = fiji_force_switch_to_arbf0(hwmgr); - PP_ASSERT_WITH_CODE((tmp_result == 0), - "Failed to force to switch arbf0!", result = tmp_result); - - return result; -} - -static int fiji_force_dpm_highest(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - uint32_t level, tmp; - - if (!data->sclk_dpm_key_disabled) { - if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) { - level = 0; - tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask; - while (tmp >>= 1) - level++; - if (level) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_SCLKDPM_SetEnabledMask, - (1 << level)); - } - } - - if (!data->mclk_dpm_key_disabled) { - if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) { - level = 0; - tmp = data->dpm_level_enable_mask.mclk_dpm_enable_mask; - while (tmp >>= 1) - level++; - if (level) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_MCLKDPM_SetEnabledMask, - (1 << level)); - } - } - - if (!data->pcie_dpm_key_disabled) { - if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) { - level = 0; - tmp = data->dpm_level_enable_mask.pcie_dpm_enable_mask; - while (tmp >>= 1) - level++; - if (level) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_PCIeDPM_ForceLevel, - (1 << level)); - } - } - return 0; -} - -static int fiji_upload_dpmlevel_enable_mask(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - - phm_apply_dal_min_voltage_request(hwmgr); - - if (!data->sclk_dpm_key_disabled) { - if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_SCLKDPM_SetEnabledMask, - data->dpm_level_enable_mask.sclk_dpm_enable_mask); - } - return 0; -} - -static int fiji_unforce_dpm_levels(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - - if (!fiji_is_dpm_running(hwmgr)) - return -EINVAL; - - if (!data->pcie_dpm_key_disabled) { - smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_PCIeDPM_UnForceLevel); - } - - return fiji_upload_dpmlevel_enable_mask(hwmgr); -} - -static uint32_t fiji_get_lowest_enabled_level( - struct pp_hwmgr *hwmgr, uint32_t mask) -{ - uint32_t level = 0; - - while(0 == (mask & (1 << level))) - level++; - - return level; -} - -static int fiji_force_dpm_lowest(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data = - (struct fiji_hwmgr *)(hwmgr->backend); - uint32_t level; - - if (!data->sclk_dpm_key_disabled) - if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) { - level = fiji_get_lowest_enabled_level(hwmgr, - data->dpm_level_enable_mask.sclk_dpm_enable_mask); - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_SCLKDPM_SetEnabledMask, - (1 << level)); - - } - - if (!data->mclk_dpm_key_disabled) { - if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) { - level = fiji_get_lowest_enabled_level(hwmgr, - data->dpm_level_enable_mask.mclk_dpm_enable_mask); - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_MCLKDPM_SetEnabledMask, - (1 << level)); - } - } - - if (!data->pcie_dpm_key_disabled) { - if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) { - level = fiji_get_lowest_enabled_level(hwmgr, - data->dpm_level_enable_mask.pcie_dpm_enable_mask); - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_PCIeDPM_ForceLevel, - (1 << level)); - } - } - - return 0; - -} -static int fiji_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, - enum amd_dpm_forced_level level) -{ - int ret = 0; - - switch (level) { - case AMD_DPM_FORCED_LEVEL_HIGH: - ret = fiji_force_dpm_highest(hwmgr); - if (ret) - return ret; - break; - case AMD_DPM_FORCED_LEVEL_LOW: - ret = fiji_force_dpm_lowest(hwmgr); - if (ret) - return ret; - break; - case AMD_DPM_FORCED_LEVEL_AUTO: - ret = fiji_unforce_dpm_levels(hwmgr); - if (ret) - return ret; - break; - default: - break; - } - - hwmgr->dpm_level = level; - - return ret; -} - -static int fiji_get_power_state_size(struct pp_hwmgr *hwmgr) -{ - return sizeof(struct fiji_power_state); -} - -static int fiji_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr, - void *state, struct pp_power_state *power_state, - void *pp_table, uint32_t classification_flag) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - struct fiji_power_state *fiji_power_state = - (struct fiji_power_state *)(&(power_state->hardware)); - struct fiji_performance_level *performance_level; - ATOM_Tonga_State *state_entry = (ATOM_Tonga_State *)state; - ATOM_Tonga_POWERPLAYTABLE *powerplay_table = - (ATOM_Tonga_POWERPLAYTABLE *)pp_table; - ATOM_Tonga_SCLK_Dependency_Table *sclk_dep_table = - (ATOM_Tonga_SCLK_Dependency_Table *) - (((unsigned long)powerplay_table) + - le16_to_cpu(powerplay_table->usSclkDependencyTableOffset)); - ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table = - (ATOM_Tonga_MCLK_Dependency_Table *) - (((unsigned long)powerplay_table) + - le16_to_cpu(powerplay_table->usMclkDependencyTableOffset)); - - /* The following fields are not initialized here: id orderedList allStatesList */ - power_state->classification.ui_label = - (le16_to_cpu(state_entry->usClassification) & - ATOM_PPLIB_CLASSIFICATION_UI_MASK) >> - ATOM_PPLIB_CLASSIFICATION_UI_SHIFT; - power_state->classification.flags = classification_flag; - /* NOTE: There is a classification2 flag in BIOS that is not being used right now */ - - power_state->classification.temporary_state = false; - power_state->classification.to_be_deleted = false; - - power_state->validation.disallowOnDC = - (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) & - ATOM_Tonga_DISALLOW_ON_DC)); - - power_state->pcie.lanes = 0; - - power_state->display.disableFrameModulation = false; - power_state->display.limitRefreshrate = false; - power_state->display.enableVariBright = - (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) & - ATOM_Tonga_ENABLE_VARIBRIGHT)); - - power_state->validation.supportedPowerLevels = 0; - power_state->uvd_clocks.VCLK = 0; - power_state->uvd_clocks.DCLK = 0; - power_state->temperatures.min = 0; - power_state->temperatures.max = 0; - - performance_level = &(fiji_power_state->performance_levels - [fiji_power_state->performance_level_count++]); - - PP_ASSERT_WITH_CODE( - (fiji_power_state->performance_level_count < SMU73_MAX_LEVELS_GRAPHICS), - "Performance levels exceeds SMC limit!", - return -1); - - PP_ASSERT_WITH_CODE( - (fiji_power_state->performance_level_count <= - hwmgr->platform_descriptor.hardwareActivityPerformanceLevels), - "Performance levels exceeds Driver limit!", - return -1); - - /* Performance levels are arranged from low to high. */ - performance_level->memory_clock = mclk_dep_table->entries - [state_entry->ucMemoryClockIndexLow].ulMclk; - performance_level->engine_clock = sclk_dep_table->entries - [state_entry->ucEngineClockIndexLow].ulSclk; - performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap, - state_entry->ucPCIEGenLow); - performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap, - state_entry->ucPCIELaneHigh); - - performance_level = &(fiji_power_state->performance_levels - [fiji_power_state->performance_level_count++]); - performance_level->memory_clock = mclk_dep_table->entries - [state_entry->ucMemoryClockIndexHigh].ulMclk; - performance_level->engine_clock = sclk_dep_table->entries - [state_entry->ucEngineClockIndexHigh].ulSclk; - performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap, - state_entry->ucPCIEGenHigh); - performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap, - state_entry->ucPCIELaneHigh); - - return 0; -} - -static int fiji_get_pp_table_entry(struct pp_hwmgr *hwmgr, - unsigned long entry_index, struct pp_power_state *state) -{ - int result; - struct fiji_power_state *ps; - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table = - table_info->vdd_dep_on_mclk; - - state->hardware.magic = PHM_VIslands_Magic; - - ps = (struct fiji_power_state *)(&state->hardware); - - result = get_powerplay_table_entry_v1_0(hwmgr, entry_index, state, - fiji_get_pp_table_entry_callback_func); - - /* This is the earliest time we have all the dependency table and the VBIOS boot state - * as PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot state - * if there is only one VDDCI/MCLK level, check if it's the same as VBIOS boot state - */ - if (dep_mclk_table != NULL && dep_mclk_table->count == 1) { - if (dep_mclk_table->entries[0].clk != - data->vbios_boot_state.mclk_bootup_value) - printk(KERN_ERR "Single MCLK entry VDDCI/MCLK dependency table " - "does not match VBIOS boot MCLK level"); - if (dep_mclk_table->entries[0].vddci != - data->vbios_boot_state.vddci_bootup_value) - printk(KERN_ERR "Single VDDCI entry VDDCI/MCLK dependency table " - "does not match VBIOS boot VDDCI level"); - } - - /* set DC compatible flag if this state supports DC */ - if (!state->validation.disallowOnDC) - ps->dc_compatible = true; - - if (state->classification.flags & PP_StateClassificationFlag_ACPI) - data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen; - - ps->uvd_clks.vclk = state->uvd_clocks.VCLK; - ps->uvd_clks.dclk = state->uvd_clocks.DCLK; - - if (!result) { - uint32_t i; - - switch (state->classification.ui_label) { - case PP_StateUILabel_Performance: - data->use_pcie_performance_levels = true; - - for (i = 0; i < ps->performance_level_count; i++) { - if (data->pcie_gen_performance.max < - ps->performance_levels[i].pcie_gen) - data->pcie_gen_performance.max = - ps->performance_levels[i].pcie_gen; - - if (data->pcie_gen_performance.min > - ps->performance_levels[i].pcie_gen) - data->pcie_gen_performance.min = - ps->performance_levels[i].pcie_gen; - - if (data->pcie_lane_performance.max < - ps->performance_levels[i].pcie_lane) - data->pcie_lane_performance.max = - ps->performance_levels[i].pcie_lane; - - if (data->pcie_lane_performance.min > - ps->performance_levels[i].pcie_lane) - data->pcie_lane_performance.min = - ps->performance_levels[i].pcie_lane; - } - break; - case PP_StateUILabel_Battery: - data->use_pcie_power_saving_levels = true; - - for (i = 0; i < ps->performance_level_count; i++) { - if (data->pcie_gen_power_saving.max < - ps->performance_levels[i].pcie_gen) - data->pcie_gen_power_saving.max = - ps->performance_levels[i].pcie_gen; - - if (data->pcie_gen_power_saving.min > - ps->performance_levels[i].pcie_gen) - data->pcie_gen_power_saving.min = - ps->performance_levels[i].pcie_gen; - - if (data->pcie_lane_power_saving.max < - ps->performance_levels[i].pcie_lane) - data->pcie_lane_power_saving.max = - ps->performance_levels[i].pcie_lane; - - if (data->pcie_lane_power_saving.min > - ps->performance_levels[i].pcie_lane) - data->pcie_lane_power_saving.min = - ps->performance_levels[i].pcie_lane; - } - break; - default: - break; - } - } - return 0; -} - -static int fiji_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, - struct pp_power_state *request_ps, - const struct pp_power_state *current_ps) -{ - struct fiji_power_state *fiji_ps = - cast_phw_fiji_power_state(&request_ps->hardware); - uint32_t sclk; - uint32_t mclk; - struct PP_Clocks minimum_clocks = {0}; - bool disable_mclk_switching; - bool disable_mclk_switching_for_frame_lock; - struct cgs_display_info info = {0}; - const struct phm_clock_and_voltage_limits *max_limits; - uint32_t i; - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - int32_t count; - int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0; - - data->battery_state = (PP_StateUILabel_Battery == - request_ps->classification.ui_label); - - PP_ASSERT_WITH_CODE(fiji_ps->performance_level_count == 2, - "VI should always have 2 performance levels",); - - max_limits = (PP_PowerSource_AC == hwmgr->power_source) ? - &(hwmgr->dyn_state.max_clock_voltage_on_ac) : - &(hwmgr->dyn_state.max_clock_voltage_on_dc); - - /* Cap clock DPM tables at DC MAX if it is in DC. */ - if (PP_PowerSource_DC == hwmgr->power_source) { - for (i = 0; i < fiji_ps->performance_level_count; i++) { - if (fiji_ps->performance_levels[i].memory_clock > max_limits->mclk) - fiji_ps->performance_levels[i].memory_clock = max_limits->mclk; - if (fiji_ps->performance_levels[i].engine_clock > max_limits->sclk) - fiji_ps->performance_levels[i].engine_clock = max_limits->sclk; - } - } - - fiji_ps->vce_clks.evclk = hwmgr->vce_arbiter.evclk; - fiji_ps->vce_clks.ecclk = hwmgr->vce_arbiter.ecclk; - - fiji_ps->acp_clk = hwmgr->acp_arbiter.acpclk; - - cgs_get_active_displays_info(hwmgr->device, &info); - - /*TO DO result = PHM_CheckVBlankTime(hwmgr, &vblankTooShort);*/ - - /* TO DO GetMinClockSettings(hwmgr->pPECI, &minimum_clocks); */ - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_StablePState)) { - max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac); - stable_pstate_sclk = (max_limits->sclk * 75) / 100; - - for (count = table_info->vdd_dep_on_sclk->count - 1; - count >= 0; count--) { - if (stable_pstate_sclk >= - table_info->vdd_dep_on_sclk->entries[count].clk) { - stable_pstate_sclk = - table_info->vdd_dep_on_sclk->entries[count].clk; - break; - } - } - - if (count < 0) - stable_pstate_sclk = table_info->vdd_dep_on_sclk->entries[0].clk; - - stable_pstate_mclk = max_limits->mclk; - - minimum_clocks.engineClock = stable_pstate_sclk; - minimum_clocks.memoryClock = stable_pstate_mclk; - } - - if (minimum_clocks.engineClock < hwmgr->gfx_arbiter.sclk) - minimum_clocks.engineClock = hwmgr->gfx_arbiter.sclk; - - if (minimum_clocks.memoryClock < hwmgr->gfx_arbiter.mclk) - minimum_clocks.memoryClock = hwmgr->gfx_arbiter.mclk; - - fiji_ps->sclk_threshold = hwmgr->gfx_arbiter.sclk_threshold; - - if (0 != hwmgr->gfx_arbiter.sclk_over_drive) { - PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.sclk_over_drive <= - hwmgr->platform_descriptor.overdriveLimit.engineClock), - "Overdrive sclk exceeds limit", - hwmgr->gfx_arbiter.sclk_over_drive = - hwmgr->platform_descriptor.overdriveLimit.engineClock); - - if (hwmgr->gfx_arbiter.sclk_over_drive >= hwmgr->gfx_arbiter.sclk) - fiji_ps->performance_levels[1].engine_clock = - hwmgr->gfx_arbiter.sclk_over_drive; - } - - if (0 != hwmgr->gfx_arbiter.mclk_over_drive) { - PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.mclk_over_drive <= - hwmgr->platform_descriptor.overdriveLimit.memoryClock), - "Overdrive mclk exceeds limit", - hwmgr->gfx_arbiter.mclk_over_drive = - hwmgr->platform_descriptor.overdriveLimit.memoryClock); - - if (hwmgr->gfx_arbiter.mclk_over_drive >= hwmgr->gfx_arbiter.mclk) - fiji_ps->performance_levels[1].memory_clock = - hwmgr->gfx_arbiter.mclk_over_drive; - } - - disable_mclk_switching_for_frame_lock = phm_cap_enabled( - hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_DisableMclkSwitchingForFrameLock); - - disable_mclk_switching = (1 < info.display_count) || - disable_mclk_switching_for_frame_lock; - - sclk = fiji_ps->performance_levels[0].engine_clock; - mclk = fiji_ps->performance_levels[0].memory_clock; - - if (disable_mclk_switching) - mclk = fiji_ps->performance_levels - [fiji_ps->performance_level_count - 1].memory_clock; - - if (sclk < minimum_clocks.engineClock) - sclk = (minimum_clocks.engineClock > max_limits->sclk) ? - max_limits->sclk : minimum_clocks.engineClock; - - if (mclk < minimum_clocks.memoryClock) - mclk = (minimum_clocks.memoryClock > max_limits->mclk) ? - max_limits->mclk : minimum_clocks.memoryClock; - - fiji_ps->performance_levels[0].engine_clock = sclk; - fiji_ps->performance_levels[0].memory_clock = mclk; - - fiji_ps->performance_levels[1].engine_clock = - (fiji_ps->performance_levels[1].engine_clock >= - fiji_ps->performance_levels[0].engine_clock) ? - fiji_ps->performance_levels[1].engine_clock : - fiji_ps->performance_levels[0].engine_clock; - - if (disable_mclk_switching) { - if (mclk < fiji_ps->performance_levels[1].memory_clock) - mclk = fiji_ps->performance_levels[1].memory_clock; - - fiji_ps->performance_levels[0].memory_clock = mclk; - fiji_ps->performance_levels[1].memory_clock = mclk; - } else { - if (fiji_ps->performance_levels[1].memory_clock < - fiji_ps->performance_levels[0].memory_clock) - fiji_ps->performance_levels[1].memory_clock = - fiji_ps->performance_levels[0].memory_clock; - } - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_StablePState)) { - for (i = 0; i < fiji_ps->performance_level_count; i++) { - fiji_ps->performance_levels[i].engine_clock = stable_pstate_sclk; - fiji_ps->performance_levels[i].memory_clock = stable_pstate_mclk; - fiji_ps->performance_levels[i].pcie_gen = data->pcie_gen_performance.max; - fiji_ps->performance_levels[i].pcie_lane = data->pcie_gen_performance.max; - } - } - - return 0; -} - -static int fiji_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input) -{ - const struct phm_set_power_state_input *states = - (const struct phm_set_power_state_input *)input; - const struct fiji_power_state *fiji_ps = - cast_const_phw_fiji_power_state(states->pnew_state); - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - struct fiji_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table); - uint32_t sclk = fiji_ps->performance_levels - [fiji_ps->performance_level_count - 1].engine_clock; - struct fiji_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table); - uint32_t mclk = fiji_ps->performance_levels - [fiji_ps->performance_level_count - 1].memory_clock; - uint32_t i; - struct cgs_display_info info = {0}; - - data->need_update_smu7_dpm_table = 0; - - for (i = 0; i < sclk_table->count; i++) { - if (sclk == sclk_table->dpm_levels[i].value) - break; - } - - if (i >= sclk_table->count) - data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK; - else { - if(data->display_timing.min_clock_in_sr != - hwmgr->display_config.min_core_set_clock_in_sr) - data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK; - } - - for (i = 0; i < mclk_table->count; i++) { - if (mclk == mclk_table->dpm_levels[i].value) - break; - } - - if (i >= mclk_table->count) - data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK; - - cgs_get_active_displays_info(hwmgr->device, &info); - - if (data->display_timing.num_existing_displays != info.display_count) - data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK; - - return 0; -} - -static uint16_t fiji_get_maximum_link_speed(struct pp_hwmgr *hwmgr, - const struct fiji_power_state *fiji_ps) -{ - uint32_t i; - uint32_t sclk, max_sclk = 0; - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - struct fiji_dpm_table *dpm_table = &data->dpm_table; - - for (i = 0; i < fiji_ps->performance_level_count; i++) { - sclk = fiji_ps->performance_levels[i].engine_clock; - if (max_sclk < sclk) - max_sclk = sclk; - } - - for (i = 0; i < dpm_table->sclk_table.count; i++) { - if (dpm_table->sclk_table.dpm_levels[i].value == max_sclk) - return (uint16_t) ((i >= dpm_table->pcie_speed_table.count) ? - dpm_table->pcie_speed_table.dpm_levels - [dpm_table->pcie_speed_table.count - 1].value : - dpm_table->pcie_speed_table.dpm_levels[i].value); - } - - return 0; -} - -static int fiji_request_link_speed_change_before_state_change( - struct pp_hwmgr *hwmgr, const void *input) -{ - const struct phm_set_power_state_input *states = - (const struct phm_set_power_state_input *)input; - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - const struct fiji_power_state *fiji_nps = - cast_const_phw_fiji_power_state(states->pnew_state); - const struct fiji_power_state *fiji_cps = - cast_const_phw_fiji_power_state(states->pcurrent_state); - - uint16_t target_link_speed = fiji_get_maximum_link_speed(hwmgr, fiji_nps); - uint16_t current_link_speed; - - if (data->force_pcie_gen == PP_PCIEGenInvalid) - current_link_speed = fiji_get_maximum_link_speed(hwmgr, fiji_cps); - else - current_link_speed = data->force_pcie_gen; - - data->force_pcie_gen = PP_PCIEGenInvalid; - data->pspp_notify_required = false; - if (target_link_speed > current_link_speed) { - switch(target_link_speed) { - case PP_PCIEGen3: - if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN3, false)) - break; - data->force_pcie_gen = PP_PCIEGen2; - if (current_link_speed == PP_PCIEGen2) - break; - case PP_PCIEGen2: - if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN2, false)) - break; - default: - data->force_pcie_gen = fiji_get_current_pcie_speed(hwmgr); - break; - } - } else { - if (target_link_speed < current_link_speed) - data->pspp_notify_required = true; - } - - return 0; -} - -static int fiji_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - - if (0 == data->need_update_smu7_dpm_table) - return 0; - - if ((0 == data->sclk_dpm_key_disabled) && - (data->need_update_smu7_dpm_table & - (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) { - PP_ASSERT_WITH_CODE(fiji_is_dpm_running(hwmgr), - "Trying to freeze SCLK DPM when DPM is disabled", - ); - PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_SCLKDPM_FreezeLevel), - "Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!", - return -1); - } - - if ((0 == data->mclk_dpm_key_disabled) && - (data->need_update_smu7_dpm_table & - DPMTABLE_OD_UPDATE_MCLK)) { - PP_ASSERT_WITH_CODE(fiji_is_dpm_running(hwmgr), - "Trying to freeze MCLK DPM when DPM is disabled", - ); - PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_MCLKDPM_FreezeLevel), - "Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!", - return -1); - } - - return 0; -} - -static int fiji_populate_and_upload_sclk_mclk_dpm_levels( - struct pp_hwmgr *hwmgr, const void *input) -{ - int result = 0; - const struct phm_set_power_state_input *states = - (const struct phm_set_power_state_input *)input; - const struct fiji_power_state *fiji_ps = - cast_const_phw_fiji_power_state(states->pnew_state); - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - uint32_t sclk = fiji_ps->performance_levels - [fiji_ps->performance_level_count - 1].engine_clock; - uint32_t mclk = fiji_ps->performance_levels - [fiji_ps->performance_level_count - 1].memory_clock; - struct fiji_dpm_table *dpm_table = &data->dpm_table; - - struct fiji_dpm_table *golden_dpm_table = &data->golden_dpm_table; - uint32_t dpm_count, clock_percent; - uint32_t i; - - if (0 == data->need_update_smu7_dpm_table) - return 0; - - if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) { - dpm_table->sclk_table.dpm_levels - [dpm_table->sclk_table.count - 1].value = sclk; - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_OD6PlusinACSupport) || - phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_OD6PlusinDCSupport)) { - /* Need to do calculation based on the golden DPM table - * as the Heatmap GPU Clock axis is also based on the default values - */ - PP_ASSERT_WITH_CODE( - (golden_dpm_table->sclk_table.dpm_levels - [golden_dpm_table->sclk_table.count - 1].value != 0), - "Divide by 0!", - return -1); - dpm_count = dpm_table->sclk_table.count < 2 ? - 0 : dpm_table->sclk_table.count - 2; - for (i = dpm_count; i > 1; i--) { - if (sclk > golden_dpm_table->sclk_table.dpm_levels - [golden_dpm_table->sclk_table.count-1].value) { - clock_percent = - ((sclk - golden_dpm_table->sclk_table.dpm_levels - [golden_dpm_table->sclk_table.count-1].value) * 100) / - golden_dpm_table->sclk_table.dpm_levels - [golden_dpm_table->sclk_table.count-1].value; - - dpm_table->sclk_table.dpm_levels[i].value = - golden_dpm_table->sclk_table.dpm_levels[i].value + - (golden_dpm_table->sclk_table.dpm_levels[i].value * - clock_percent)/100; - - } else if (golden_dpm_table->sclk_table.dpm_levels - [dpm_table->sclk_table.count-1].value > sclk) { - clock_percent = - ((golden_dpm_table->sclk_table.dpm_levels - [golden_dpm_table->sclk_table.count - 1].value - sclk) * - 100) / - golden_dpm_table->sclk_table.dpm_levels - [golden_dpm_table->sclk_table.count-1].value; - - dpm_table->sclk_table.dpm_levels[i].value = - golden_dpm_table->sclk_table.dpm_levels[i].value - - (golden_dpm_table->sclk_table.dpm_levels[i].value * - clock_percent) / 100; - } else - dpm_table->sclk_table.dpm_levels[i].value = - golden_dpm_table->sclk_table.dpm_levels[i].value; - } - } - } - - if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK) { - dpm_table->mclk_table.dpm_levels - [dpm_table->mclk_table.count - 1].value = mclk; - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_OD6PlusinACSupport) || - phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_OD6PlusinDCSupport)) { - - PP_ASSERT_WITH_CODE( - (golden_dpm_table->mclk_table.dpm_levels - [golden_dpm_table->mclk_table.count-1].value != 0), - "Divide by 0!", - return -1); - dpm_count = dpm_table->mclk_table.count < 2 ? - 0 : dpm_table->mclk_table.count - 2; - for (i = dpm_count; i > 1; i--) { - if (mclk > golden_dpm_table->mclk_table.dpm_levels - [golden_dpm_table->mclk_table.count-1].value) { - clock_percent = ((mclk - - golden_dpm_table->mclk_table.dpm_levels - [golden_dpm_table->mclk_table.count-1].value) * 100) / - golden_dpm_table->mclk_table.dpm_levels - [golden_dpm_table->mclk_table.count-1].value; - - dpm_table->mclk_table.dpm_levels[i].value = - golden_dpm_table->mclk_table.dpm_levels[i].value + - (golden_dpm_table->mclk_table.dpm_levels[i].value * - clock_percent) / 100; - - } else if (golden_dpm_table->mclk_table.dpm_levels - [dpm_table->mclk_table.count-1].value > mclk) { - clock_percent = ((golden_dpm_table->mclk_table.dpm_levels - [golden_dpm_table->mclk_table.count-1].value - mclk) * 100) / - golden_dpm_table->mclk_table.dpm_levels - [golden_dpm_table->mclk_table.count-1].value; - - dpm_table->mclk_table.dpm_levels[i].value = - golden_dpm_table->mclk_table.dpm_levels[i].value - - (golden_dpm_table->mclk_table.dpm_levels[i].value * - clock_percent) / 100; - } else - dpm_table->mclk_table.dpm_levels[i].value = - golden_dpm_table->mclk_table.dpm_levels[i].value; - } - } - } - - if (data->need_update_smu7_dpm_table & - (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) { - result = fiji_populate_all_graphic_levels(hwmgr); - PP_ASSERT_WITH_CODE((0 == result), - "Failed to populate SCLK during PopulateNewDPMClocksStates Function!", - return result); - } - - if (data->need_update_smu7_dpm_table & - (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) { - /*populate MCLK dpm table to SMU7 */ - result = fiji_populate_all_memory_levels(hwmgr); - PP_ASSERT_WITH_CODE((0 == result), - "Failed to populate MCLK during PopulateNewDPMClocksStates Function!", - return result); - } - - return result; -} - -static int fiji_trim_single_dpm_states(struct pp_hwmgr *hwmgr, - struct fiji_single_dpm_table * dpm_table, - uint32_t low_limit, uint32_t high_limit) -{ - uint32_t i; - - for (i = 0; i < dpm_table->count; i++) { - if ((dpm_table->dpm_levels[i].value < low_limit) || - (dpm_table->dpm_levels[i].value > high_limit)) - dpm_table->dpm_levels[i].enabled = false; - else - dpm_table->dpm_levels[i].enabled = true; - } - return 0; -} - -static int fiji_trim_dpm_states(struct pp_hwmgr *hwmgr, - const struct fiji_power_state *fiji_ps) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - uint32_t high_limit_count; - - PP_ASSERT_WITH_CODE((fiji_ps->performance_level_count >= 1), - "power state did not have any performance level", - return -1); - - high_limit_count = (1 == fiji_ps->performance_level_count) ? 0 : 1; - - fiji_trim_single_dpm_states(hwmgr, - &(data->dpm_table.sclk_table), - fiji_ps->performance_levels[0].engine_clock, - fiji_ps->performance_levels[high_limit_count].engine_clock); - - fiji_trim_single_dpm_states(hwmgr, - &(data->dpm_table.mclk_table), - fiji_ps->performance_levels[0].memory_clock, - fiji_ps->performance_levels[high_limit_count].memory_clock); - - return 0; -} - -static int fiji_generate_dpm_level_enable_mask( - struct pp_hwmgr *hwmgr, const void *input) -{ - int result; - const struct phm_set_power_state_input *states = - (const struct phm_set_power_state_input *)input; - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - const struct fiji_power_state *fiji_ps = - cast_const_phw_fiji_power_state(states->pnew_state); - - result = fiji_trim_dpm_states(hwmgr, fiji_ps); - if (result) - return result; - - data->dpm_level_enable_mask.sclk_dpm_enable_mask = - fiji_get_dpm_level_enable_mask_value(&data->dpm_table.sclk_table); - data->dpm_level_enable_mask.mclk_dpm_enable_mask = - fiji_get_dpm_level_enable_mask_value(&data->dpm_table.mclk_table); - data->last_mclk_dpm_enable_mask = - data->dpm_level_enable_mask.mclk_dpm_enable_mask; - - if (data->uvd_enabled) { - if (data->dpm_level_enable_mask.mclk_dpm_enable_mask & 1) - data->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE; - } - - data->dpm_level_enable_mask.pcie_dpm_enable_mask = - fiji_get_dpm_level_enable_mask_value(&data->dpm_table.pcie_speed_table); - - return 0; -} - -static int fiji_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable) -{ - return smum_send_msg_to_smc(hwmgr->smumgr, enable ? - (PPSMC_Msg)PPSMC_MSG_UVDDPM_Enable : - (PPSMC_Msg)PPSMC_MSG_UVDDPM_Disable); -} - -int fiji_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable) -{ - return smum_send_msg_to_smc(hwmgr->smumgr, enable? - PPSMC_MSG_VCEDPM_Enable : - PPSMC_MSG_VCEDPM_Disable); -} - -static int fiji_enable_disable_samu_dpm(struct pp_hwmgr *hwmgr, bool enable) -{ - return smum_send_msg_to_smc(hwmgr->smumgr, enable? - PPSMC_MSG_SAMUDPM_Enable : - PPSMC_MSG_SAMUDPM_Disable); -} - -static int fiji_enable_disable_acp_dpm(struct pp_hwmgr *hwmgr, bool enable) -{ - return smum_send_msg_to_smc(hwmgr->smumgr, enable? - PPSMC_MSG_ACPDPM_Enable : - PPSMC_MSG_ACPDPM_Disable); -} - -int fiji_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - uint32_t mm_boot_level_offset, mm_boot_level_value; - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - - if (!bgate) { - data->smc_state_table.UvdBootLevel = 0; - if (table_info->mm_dep_table->count > 0) - data->smc_state_table.UvdBootLevel = - (uint8_t) (table_info->mm_dep_table->count - 1); - mm_boot_level_offset = data->dpm_table_start + - offsetof(SMU73_Discrete_DpmTable, UvdBootLevel); - mm_boot_level_offset /= 4; - mm_boot_level_offset *= 4; - mm_boot_level_value = cgs_read_ind_register(hwmgr->device, - CGS_IND_REG__SMC, mm_boot_level_offset); - mm_boot_level_value &= 0x00FFFFFF; - mm_boot_level_value |= data->smc_state_table.UvdBootLevel << 24; - cgs_write_ind_register(hwmgr->device, - CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value); - - if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_UVDDPM) || - phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_StablePState)) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_UVDDPM_SetEnabledMask, - (uint32_t)(1 << data->smc_state_table.UvdBootLevel)); - } - - return fiji_enable_disable_uvd_dpm(hwmgr, !bgate); -} - -int fiji_update_vce_dpm(struct pp_hwmgr *hwmgr, const void *input) -{ - const struct phm_set_power_state_input *states = - (const struct phm_set_power_state_input *)input; - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - const struct fiji_power_state *fiji_nps = - cast_const_phw_fiji_power_state(states->pnew_state); - const struct fiji_power_state *fiji_cps = - cast_const_phw_fiji_power_state(states->pcurrent_state); - - uint32_t mm_boot_level_offset, mm_boot_level_value; - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - - if (fiji_nps->vce_clks.evclk >0 && - (fiji_cps == NULL || fiji_cps->vce_clks.evclk == 0)) { - data->smc_state_table.VceBootLevel = - (uint8_t) (table_info->mm_dep_table->count - 1); - - mm_boot_level_offset = data->dpm_table_start + - offsetof(SMU73_Discrete_DpmTable, VceBootLevel); - mm_boot_level_offset /= 4; - mm_boot_level_offset *= 4; - mm_boot_level_value = cgs_read_ind_register(hwmgr->device, - CGS_IND_REG__SMC, mm_boot_level_offset); - mm_boot_level_value &= 0xFF00FFFF; - mm_boot_level_value |= data->smc_state_table.VceBootLevel << 16; - cgs_write_ind_register(hwmgr->device, - CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value); - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_StablePState)) { - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_VCEDPM_SetEnabledMask, - (uint32_t)1 << data->smc_state_table.VceBootLevel); - - fiji_enable_disable_vce_dpm(hwmgr, true); - } else if (fiji_nps->vce_clks.evclk == 0 && - fiji_cps != NULL && - fiji_cps->vce_clks.evclk > 0) - fiji_enable_disable_vce_dpm(hwmgr, false); - } - - return 0; -} - -int fiji_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - uint32_t mm_boot_level_offset, mm_boot_level_value; - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - - if (!bgate) { - data->smc_state_table.SamuBootLevel = - (uint8_t) (table_info->mm_dep_table->count - 1); - mm_boot_level_offset = data->dpm_table_start + - offsetof(SMU73_Discrete_DpmTable, SamuBootLevel); - mm_boot_level_offset /= 4; - mm_boot_level_offset *= 4; - mm_boot_level_value = cgs_read_ind_register(hwmgr->device, - CGS_IND_REG__SMC, mm_boot_level_offset); - mm_boot_level_value &= 0xFFFFFF00; - mm_boot_level_value |= data->smc_state_table.SamuBootLevel << 0; - cgs_write_ind_register(hwmgr->device, - CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value); - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_StablePState)) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_SAMUDPM_SetEnabledMask, - (uint32_t)(1 << data->smc_state_table.SamuBootLevel)); - } - - return fiji_enable_disable_samu_dpm(hwmgr, !bgate); -} - -int fiji_update_acp_dpm(struct pp_hwmgr *hwmgr, bool bgate) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - uint32_t mm_boot_level_offset, mm_boot_level_value; - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - - if (!bgate) { - data->smc_state_table.AcpBootLevel = - (uint8_t) (table_info->mm_dep_table->count - 1); - mm_boot_level_offset = data->dpm_table_start + - offsetof(SMU73_Discrete_DpmTable, AcpBootLevel); - mm_boot_level_offset /= 4; - mm_boot_level_offset *= 4; - mm_boot_level_value = cgs_read_ind_register(hwmgr->device, - CGS_IND_REG__SMC, mm_boot_level_offset); - mm_boot_level_value &= 0xFFFF00FF; - mm_boot_level_value |= data->smc_state_table.AcpBootLevel << 8; - cgs_write_ind_register(hwmgr->device, - CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value); - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_StablePState)) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_ACPDPM_SetEnabledMask, - (uint32_t)(1 << data->smc_state_table.AcpBootLevel)); - } - - return fiji_enable_disable_acp_dpm(hwmgr, !bgate); -} - -static int fiji_update_sclk_threshold(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - - int result = 0; - uint32_t low_sclk_interrupt_threshold = 0; - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_SclkThrottleLowNotification) - && (hwmgr->gfx_arbiter.sclk_threshold != - data->low_sclk_interrupt_threshold)) { - data->low_sclk_interrupt_threshold = - hwmgr->gfx_arbiter.sclk_threshold; - low_sclk_interrupt_threshold = - data->low_sclk_interrupt_threshold; - - CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold); - - result = fiji_copy_bytes_to_smc( - hwmgr->smumgr, - data->dpm_table_start + - offsetof(SMU73_Discrete_DpmTable, - LowSclkInterruptThreshold), - (uint8_t *)&low_sclk_interrupt_threshold, - sizeof(uint32_t), - data->sram_end); - } - - return result; -} - -static int fiji_program_mem_timing_parameters(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - - if (data->need_update_smu7_dpm_table & - (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK)) - return fiji_program_memory_timing_parameters(hwmgr); - - return 0; -} - -static int fiji_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - - if (0 == data->need_update_smu7_dpm_table) - return 0; - - if ((0 == data->sclk_dpm_key_disabled) && - (data->need_update_smu7_dpm_table & - (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) { - - PP_ASSERT_WITH_CODE(fiji_is_dpm_running(hwmgr), - "Trying to Unfreeze SCLK DPM when DPM is disabled", - ); - PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_SCLKDPM_UnfreezeLevel), - "Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!", - return -1); - } - - if ((0 == data->mclk_dpm_key_disabled) && - (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) { - - PP_ASSERT_WITH_CODE(fiji_is_dpm_running(hwmgr), - "Trying to Unfreeze MCLK DPM when DPM is disabled", - ); - PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_SCLKDPM_UnfreezeLevel), - "Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!", - return -1); - } - - data->need_update_smu7_dpm_table = 0; - - return 0; -} - -/* Look up the voltaged based on DAL's requested level. - * and then send the requested VDDC voltage to SMC - */ -static void fiji_apply_dal_minimum_voltage_request(struct pp_hwmgr *hwmgr) -{ - return; -} - -static int fiji_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr) -{ - int result; - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - - /* Apply minimum voltage based on DAL's request level */ - fiji_apply_dal_minimum_voltage_request(hwmgr); - - if (0 == data->sclk_dpm_key_disabled) { - /* Checking if DPM is running. If we discover hang because of this, - * we should skip this message. - */ - if (!fiji_is_dpm_running(hwmgr)) - printk(KERN_ERR "[ powerplay ] " - "Trying to set Enable Mask when DPM is disabled \n"); - - if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) { - result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_SCLKDPM_SetEnabledMask, - data->dpm_level_enable_mask.sclk_dpm_enable_mask); - PP_ASSERT_WITH_CODE((0 == result), - "Set Sclk Dpm enable Mask failed", return -1); - } - } - - if (0 == data->mclk_dpm_key_disabled) { - /* Checking if DPM is running. If we discover hang because of this, - * we should skip this message. - */ - if (!fiji_is_dpm_running(hwmgr)) - printk(KERN_ERR "[ powerplay ]" - " Trying to set Enable Mask when DPM is disabled \n"); - - if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) { - result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_MCLKDPM_SetEnabledMask, - data->dpm_level_enable_mask.mclk_dpm_enable_mask); - PP_ASSERT_WITH_CODE((0 == result), - "Set Mclk Dpm enable Mask failed", return -1); - } - } - - return 0; -} - -static int fiji_notify_link_speed_change_after_state_change( - struct pp_hwmgr *hwmgr, const void *input) -{ - const struct phm_set_power_state_input *states = - (const struct phm_set_power_state_input *)input; - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - const struct fiji_power_state *fiji_ps = - cast_const_phw_fiji_power_state(states->pnew_state); - uint16_t target_link_speed = fiji_get_maximum_link_speed(hwmgr, fiji_ps); - uint8_t request; - - if (data->pspp_notify_required) { - if (target_link_speed == PP_PCIEGen3) - request = PCIE_PERF_REQ_GEN3; - else if (target_link_speed == PP_PCIEGen2) - request = PCIE_PERF_REQ_GEN2; - else - request = PCIE_PERF_REQ_GEN1; - - if(request == PCIE_PERF_REQ_GEN1 && - fiji_get_current_pcie_speed(hwmgr) > 0) - return 0; - - if (acpi_pcie_perf_request(hwmgr->device, request, false)) { - if (PP_PCIEGen2 == target_link_speed) - printk("PSPP request to switch to Gen2 from Gen3 Failed!"); - else - printk("PSPP request to switch to Gen1 from Gen2 Failed!"); - } - } - - return 0; -} - -static int fiji_set_power_state_tasks(struct pp_hwmgr *hwmgr, - const void *input) -{ - int tmp_result, result = 0; - - tmp_result = fiji_find_dpm_states_clocks_in_dpm_table(hwmgr, input); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to find DPM states clocks in DPM table!", - result = tmp_result); - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_PCIEPerformanceRequest)) { - tmp_result = - fiji_request_link_speed_change_before_state_change(hwmgr, input); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to request link speed change before state change!", - result = tmp_result); - } - - tmp_result = fiji_freeze_sclk_mclk_dpm(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to freeze SCLK MCLK DPM!", result = tmp_result); - - tmp_result = fiji_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to populate and upload SCLK MCLK DPM levels!", - result = tmp_result); - - tmp_result = fiji_generate_dpm_level_enable_mask(hwmgr, input); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to generate DPM level enabled mask!", - result = tmp_result); - - tmp_result = fiji_update_vce_dpm(hwmgr, input); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to update VCE DPM!", - result = tmp_result); - - tmp_result = fiji_update_sclk_threshold(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to update SCLK threshold!", - result = tmp_result); - - tmp_result = fiji_program_mem_timing_parameters(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to program memory timing parameters!", - result = tmp_result); - - tmp_result = fiji_unfreeze_sclk_mclk_dpm(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to unfreeze SCLK MCLK DPM!", - result = tmp_result); - - tmp_result = fiji_upload_dpm_level_enable_mask(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to upload DPM level enabled mask!", - result = tmp_result); - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_PCIEPerformanceRequest)) { - tmp_result = - fiji_notify_link_speed_change_after_state_change(hwmgr, input); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to notify link speed change after state change!", - result = tmp_result); - } - - return result; -} - -static int fiji_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low) -{ - struct pp_power_state *ps; - struct fiji_power_state *fiji_ps; - - if (hwmgr == NULL) - return -EINVAL; - - ps = hwmgr->request_ps; - - if (ps == NULL) - return -EINVAL; - - fiji_ps = cast_phw_fiji_power_state(&ps->hardware); - - if (low) - return fiji_ps->performance_levels[0].engine_clock; - else - return fiji_ps->performance_levels - [fiji_ps->performance_level_count-1].engine_clock; -} - -static int fiji_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low) -{ - struct pp_power_state *ps; - struct fiji_power_state *fiji_ps; - - if (hwmgr == NULL) - return -EINVAL; - - ps = hwmgr->request_ps; - - if (ps == NULL) - return -EINVAL; - - fiji_ps = cast_phw_fiji_power_state(&ps->hardware); - - if (low) - return fiji_ps->performance_levels[0].memory_clock; - else - return fiji_ps->performance_levels - [fiji_ps->performance_level_count-1].memory_clock; -} - -static void fiji_print_current_perforce_level( - struct pp_hwmgr *hwmgr, struct seq_file *m) -{ - uint32_t sclk, mclk, activity_percent = 0; - uint32_t offset; - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - - smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency); - - sclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); - - smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency); - - mclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); - seq_printf(m, "\n [ mclk ]: %u MHz\n\n [ sclk ]: %u MHz\n", - mclk / 100, sclk / 100); - - offset = data->soft_regs_start + offsetof(SMU73_SoftRegisters, AverageGraphicsActivity); - activity_percent = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset); - activity_percent += 0x80; - activity_percent >>= 8; - - seq_printf(m, "\n [GPU load]: %u%%\n\n", activity_percent > 100 ? 100 : activity_percent); - - seq_printf(m, "uvd %sabled\n", data->uvd_power_gated ? "dis" : "en"); - - seq_printf(m, "vce %sabled\n", data->vce_power_gated ? "dis" : "en"); -} - -static int fiji_program_display_gap(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - uint32_t num_active_displays = 0; - uint32_t display_gap = cgs_read_ind_register(hwmgr->device, - CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL); - uint32_t display_gap2; - uint32_t pre_vbi_time_in_us; - uint32_t frame_time_in_us; - uint32_t ref_clock; - uint32_t refresh_rate = 0; - struct cgs_display_info info = {0}; - struct cgs_mode_info mode_info; - - info.mode_info = &mode_info; - - cgs_get_active_displays_info(hwmgr->device, &info); - num_active_displays = info.display_count; - - display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, - DISP_GAP, (num_active_displays > 0)? - DISPLAY_GAP_VBLANK_OR_WM : DISPLAY_GAP_IGNORE); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_DISPLAY_GAP_CNTL, display_gap); - - ref_clock = mode_info.ref_clock; - refresh_rate = mode_info.refresh_rate; - - if (refresh_rate == 0) - refresh_rate = 60; - - frame_time_in_us = 1000000 / refresh_rate; - - pre_vbi_time_in_us = frame_time_in_us - 200 - mode_info.vblank_time_us; - display_gap2 = pre_vbi_time_in_us * (ref_clock / 100); - - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_DISPLAY_GAP_CNTL2, display_gap2); - - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - data->soft_regs_start + - offsetof(SMU73_SoftRegisters, PreVBlankGap), 0x64); - - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - data->soft_regs_start + - offsetof(SMU73_SoftRegisters, VBlankTimeout), - (frame_time_in_us - pre_vbi_time_in_us)); - - if (num_active_displays == 1) - tonga_notify_smc_display_change(hwmgr, true); - - return 0; -} - -static int fiji_display_configuration_changed_task(struct pp_hwmgr *hwmgr) -{ - return fiji_program_display_gap(hwmgr); -} - -static int fiji_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, - uint16_t us_max_fan_pwm) -{ - hwmgr->thermal_controller. - advanceFanControlParameters.usMaxFanPWM = us_max_fan_pwm; - - if (phm_is_hw_access_blocked(hwmgr)) - return 0; - - return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm); -} - -static int fiji_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, - uint16_t us_max_fan_rpm) -{ - hwmgr->thermal_controller. - advanceFanControlParameters.usMaxFanRPM = us_max_fan_rpm; - - if (phm_is_hw_access_blocked(hwmgr)) - return 0; - - return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_SetFanRpmMax, us_max_fan_rpm); -} - -static int fiji_dpm_set_interrupt_state(void *private_data, - unsigned src_id, unsigned type, - int enabled) -{ - uint32_t cg_thermal_int; - struct pp_hwmgr *hwmgr = ((struct pp_eventmgr *)private_data)->hwmgr; - - if (hwmgr == NULL) - return -EINVAL; - - switch (type) { - case AMD_THERMAL_IRQ_LOW_TO_HIGH: - if (enabled) { - cg_thermal_int = cgs_read_ind_register(hwmgr->device, - CGS_IND_REG__SMC, ixCG_THERMAL_INT); - cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK; - cgs_write_ind_register(hwmgr->device, - CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int); - } else { - cg_thermal_int = cgs_read_ind_register(hwmgr->device, - CGS_IND_REG__SMC, ixCG_THERMAL_INT); - cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK; - cgs_write_ind_register(hwmgr->device, - CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int); - } - break; - - case AMD_THERMAL_IRQ_HIGH_TO_LOW: - if (enabled) { - cg_thermal_int = cgs_read_ind_register(hwmgr->device, - CGS_IND_REG__SMC, ixCG_THERMAL_INT); - cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK; - cgs_write_ind_register(hwmgr->device, - CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int); - } else { - cg_thermal_int = cgs_read_ind_register(hwmgr->device, - CGS_IND_REG__SMC, ixCG_THERMAL_INT); - cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK; - cgs_write_ind_register(hwmgr->device, - CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int); - } - break; - default: - break; - } - return 0; -} - -static int fiji_register_internal_thermal_interrupt(struct pp_hwmgr *hwmgr, - const void *thermal_interrupt_info) -{ - int result; - const struct pp_interrupt_registration_info *info = - (const struct pp_interrupt_registration_info *) - thermal_interrupt_info; - - if (info == NULL) - return -EINVAL; - - result = cgs_add_irq_source(hwmgr->device, 230, AMD_THERMAL_IRQ_LAST, - fiji_dpm_set_interrupt_state, - info->call_back, info->context); - - if (result) - return -EINVAL; - - result = cgs_add_irq_source(hwmgr->device, 231, AMD_THERMAL_IRQ_LAST, - fiji_dpm_set_interrupt_state, - info->call_back, info->context); - - if (result) - return -EINVAL; - - return 0; -} - -static int fiji_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode) -{ - if (mode) { - /* stop auto-manage */ - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_MicrocodeFanControl)) - fiji_fan_ctrl_stop_smc_fan_control(hwmgr); - fiji_fan_ctrl_set_static_mode(hwmgr, mode); - } else - /* restart auto-manage */ - fiji_fan_ctrl_reset_fan_speed_to_default(hwmgr); - - return 0; -} - -static int fiji_get_fan_control_mode(struct pp_hwmgr *hwmgr) -{ - if (hwmgr->fan_ctrl_is_in_default_mode) - return hwmgr->fan_ctrl_default_mode; - else - return PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_FDO_CTRL2, FDO_PWM_MODE); -} - -static int fiji_force_clock_level(struct pp_hwmgr *hwmgr, - enum pp_clock_type type, uint32_t mask) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - - if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) - return -EINVAL; - - switch (type) { - case PP_SCLK: - if (!data->sclk_dpm_key_disabled) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_SCLKDPM_SetEnabledMask, - data->dpm_level_enable_mask.sclk_dpm_enable_mask & mask); - break; - - case PP_MCLK: - if (!data->mclk_dpm_key_disabled) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_MCLKDPM_SetEnabledMask, - data->dpm_level_enable_mask.mclk_dpm_enable_mask & mask); - break; - - case PP_PCIE: - { - uint32_t tmp = mask & data->dpm_level_enable_mask.pcie_dpm_enable_mask; - uint32_t level = 0; - - while (tmp >>= 1) - level++; - - if (!data->pcie_dpm_key_disabled) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_PCIeDPM_ForceLevel, - level); - break; - } - default: - break; - } - - return 0; -} - -static int fiji_print_clock_levels(struct pp_hwmgr *hwmgr, - enum pp_clock_type type, char *buf) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - struct fiji_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table); - struct fiji_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table); - struct fiji_single_dpm_table *pcie_table = &(data->dpm_table.pcie_speed_table); - int i, now, size = 0; - uint32_t clock, pcie_speed; - - switch (type) { - case PP_SCLK: - smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency); - clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); - - for (i = 0; i < sclk_table->count; i++) { - if (clock > sclk_table->dpm_levels[i].value) - continue; - break; - } - now = i; - - for (i = 0; i < sclk_table->count; i++) - size += sprintf(buf + size, "%d: %uMhz %s\n", - i, sclk_table->dpm_levels[i].value / 100, - (i == now) ? "*" : ""); - break; - case PP_MCLK: - smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency); - clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); - - for (i = 0; i < mclk_table->count; i++) { - if (clock > mclk_table->dpm_levels[i].value) - continue; - break; - } - now = i; - - for (i = 0; i < mclk_table->count; i++) - size += sprintf(buf + size, "%d: %uMhz %s\n", - i, mclk_table->dpm_levels[i].value / 100, - (i == now) ? "*" : ""); - break; - case PP_PCIE: - pcie_speed = fiji_get_current_pcie_speed(hwmgr); - for (i = 0; i < pcie_table->count; i++) { - if (pcie_speed != pcie_table->dpm_levels[i].value) - continue; - break; - } - now = i; - - for (i = 0; i < pcie_table->count; i++) - size += sprintf(buf + size, "%d: %s %s\n", i, - (pcie_table->dpm_levels[i].value == 0) ? "2.5GB, x1" : - (pcie_table->dpm_levels[i].value == 1) ? "5.0GB, x16" : - (pcie_table->dpm_levels[i].value == 2) ? "8.0GB, x16" : "", - (i == now) ? "*" : ""); - break; - default: - break; - } - return size; -} - -static inline bool fiji_are_power_levels_equal(const struct fiji_performance_level *pl1, - const struct fiji_performance_level *pl2) -{ - return ((pl1->memory_clock == pl2->memory_clock) && - (pl1->engine_clock == pl2->engine_clock) && - (pl1->pcie_gen == pl2->pcie_gen) && - (pl1->pcie_lane == pl2->pcie_lane)); -} - -static int -fiji_check_states_equal(struct pp_hwmgr *hwmgr, - const struct pp_hw_power_state *pstate1, - const struct pp_hw_power_state *pstate2, bool *equal) -{ - const struct fiji_power_state *psa = cast_const_phw_fiji_power_state(pstate1); - const struct fiji_power_state *psb = cast_const_phw_fiji_power_state(pstate2); - int i; - - if (equal == NULL || psa == NULL || psb == NULL) - return -EINVAL; - - /* If the two states don't even have the same number of performance levels they cannot be the same state. */ - if (psa->performance_level_count != psb->performance_level_count) { - *equal = false; - return 0; - } - - for (i = 0; i < psa->performance_level_count; i++) { - if (!fiji_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) { - /* If we have found even one performance level pair that is different the states are different. */ - *equal = false; - return 0; - } - } - - /* If all performance levels are the same try to use the UVD clocks to break the tie.*/ - *equal = ((psa->uvd_clks.vclk == psb->uvd_clks.vclk) && (psa->uvd_clks.dclk == psb->uvd_clks.dclk)); - *equal &= ((psa->vce_clks.evclk == psb->vce_clks.evclk) && (psa->vce_clks.ecclk == psb->vce_clks.ecclk)); - *equal &= (psa->sclk_threshold == psb->sclk_threshold); - *equal &= (psa->acp_clk == psb->acp_clk); - - return 0; -} - -static bool -fiji_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - bool is_update_required = false; - struct cgs_display_info info = {0,0,NULL}; - - cgs_get_active_displays_info(hwmgr->device, &info); - - if (data->display_timing.num_existing_displays != info.display_count) - is_update_required = true; - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) { - if(hwmgr->display_config.min_core_set_clock_in_sr != data->display_timing.min_clock_in_sr) - is_update_required = true; - } - - return is_update_required; -} - -static int fiji_get_sclk_od(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - struct fiji_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table); - struct fiji_single_dpm_table *golden_sclk_table = - &(data->golden_dpm_table.sclk_table); - int value; - - value = (sclk_table->dpm_levels[sclk_table->count - 1].value - - golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) * - 100 / - golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value; - - return value; -} - -static int fiji_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - struct fiji_single_dpm_table *golden_sclk_table = - &(data->golden_dpm_table.sclk_table); - struct pp_power_state *ps; - struct fiji_power_state *fiji_ps; - - if (value > 20) - value = 20; - - ps = hwmgr->request_ps; - - if (ps == NULL) - return -EINVAL; - - fiji_ps = cast_phw_fiji_power_state(&ps->hardware); - - fiji_ps->performance_levels[fiji_ps->performance_level_count - 1].engine_clock = - golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value * - value / 100 + - golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value; - - return 0; -} - -static int fiji_get_mclk_od(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - struct fiji_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table); - struct fiji_single_dpm_table *golden_mclk_table = - &(data->golden_dpm_table.mclk_table); - int value; - - value = (mclk_table->dpm_levels[mclk_table->count - 1].value - - golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) * - 100 / - golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value; - - return value; -} - -static int fiji_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - struct fiji_single_dpm_table *golden_mclk_table = - &(data->golden_dpm_table.mclk_table); - struct pp_power_state *ps; - struct fiji_power_state *fiji_ps; - - if (value > 20) - value = 20; - - ps = hwmgr->request_ps; - - if (ps == NULL) - return -EINVAL; - - fiji_ps = cast_phw_fiji_power_state(&ps->hardware); - - fiji_ps->performance_levels[fiji_ps->performance_level_count - 1].memory_clock = - golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value * - value / 100 + - golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value; - - return 0; -} - -static const struct pp_hwmgr_func fiji_hwmgr_funcs = { - .backend_init = &fiji_hwmgr_backend_init, - .backend_fini = &fiji_hwmgr_backend_fini, - .asic_setup = &fiji_setup_asic_task, - .dynamic_state_management_enable = &fiji_enable_dpm_tasks, - .dynamic_state_management_disable = &fiji_disable_dpm_tasks, - .force_dpm_level = &fiji_dpm_force_dpm_level, - .get_num_of_pp_table_entries = &get_number_of_powerplay_table_entries_v1_0, - .get_power_state_size = &fiji_get_power_state_size, - .get_pp_table_entry = &fiji_get_pp_table_entry, - .patch_boot_state = &fiji_patch_boot_state, - .apply_state_adjust_rules = &fiji_apply_state_adjust_rules, - .power_state_set = &fiji_set_power_state_tasks, - .get_sclk = &fiji_dpm_get_sclk, - .get_mclk = &fiji_dpm_get_mclk, - .print_current_perforce_level = &fiji_print_current_perforce_level, - .powergate_uvd = &fiji_phm_powergate_uvd, - .powergate_vce = &fiji_phm_powergate_vce, - .disable_clock_power_gating = &fiji_phm_disable_clock_power_gating, - .notify_smc_display_config_after_ps_adjustment = - &tonga_notify_smc_display_config_after_ps_adjustment, - .display_config_changed = &fiji_display_configuration_changed_task, - .set_max_fan_pwm_output = fiji_set_max_fan_pwm_output, - .set_max_fan_rpm_output = fiji_set_max_fan_rpm_output, - .get_temperature = fiji_thermal_get_temperature, - .stop_thermal_controller = fiji_thermal_stop_thermal_controller, - .get_fan_speed_info = fiji_fan_ctrl_get_fan_speed_info, - .get_fan_speed_percent = fiji_fan_ctrl_get_fan_speed_percent, - .set_fan_speed_percent = fiji_fan_ctrl_set_fan_speed_percent, - .reset_fan_speed_to_default = fiji_fan_ctrl_reset_fan_speed_to_default, - .get_fan_speed_rpm = fiji_fan_ctrl_get_fan_speed_rpm, - .set_fan_speed_rpm = fiji_fan_ctrl_set_fan_speed_rpm, - .uninitialize_thermal_controller = fiji_thermal_ctrl_uninitialize_thermal_controller, - .register_internal_thermal_interrupt = fiji_register_internal_thermal_interrupt, - .set_fan_control_mode = fiji_set_fan_control_mode, - .get_fan_control_mode = fiji_get_fan_control_mode, - .check_states_equal = fiji_check_states_equal, - .check_smc_update_required_for_display_configuration = fiji_check_smc_update_required_for_display_configuration, - .force_clock_level = fiji_force_clock_level, - .print_clock_levels = fiji_print_clock_levels, - .get_sclk_od = fiji_get_sclk_od, - .set_sclk_od = fiji_set_sclk_od, - .get_mclk_od = fiji_get_mclk_od, - .set_mclk_od = fiji_set_mclk_od, -}; - -int fiji_hwmgr_init(struct pp_hwmgr *hwmgr) -{ - hwmgr->hwmgr_func = &fiji_hwmgr_funcs; - hwmgr->pptable_func = &pptable_v1_0_funcs; - pp_fiji_thermal_initialize(hwmgr); - return 0; -} diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h deleted file mode 100644 index bf67c2a92c68..000000000000 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h +++ /dev/null @@ -1,350 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#ifndef _FIJI_HWMGR_H_ -#define _FIJI_HWMGR_H_ - -#include "hwmgr.h" -#include "smu73.h" -#include "smu73_discrete.h" -#include "ppatomctrl.h" -#include "fiji_ppsmc.h" -#include "pp_endian.h" - -#define FIJI_MAX_HARDWARE_POWERLEVELS 2 -#define FIJI_AT_DFLT 30 - -#define FIJI_VOLTAGE_CONTROL_NONE 0x0 -#define FIJI_VOLTAGE_CONTROL_BY_GPIO 0x1 -#define FIJI_VOLTAGE_CONTROL_BY_SVID2 0x2 -#define FIJI_VOLTAGE_CONTROL_MERGED 0x3 - -#define DPMTABLE_OD_UPDATE_SCLK 0x00000001 -#define DPMTABLE_OD_UPDATE_MCLK 0x00000002 -#define DPMTABLE_UPDATE_SCLK 0x00000004 -#define DPMTABLE_UPDATE_MCLK 0x00000008 - -struct fiji_performance_level { - uint32_t memory_clock; - uint32_t engine_clock; - uint16_t pcie_gen; - uint16_t pcie_lane; -}; - -struct fiji_uvd_clocks { - uint32_t vclk; - uint32_t dclk; -}; - -struct fiji_vce_clocks { - uint32_t evclk; - uint32_t ecclk; -}; - -struct fiji_power_state { - uint32_t magic; - struct fiji_uvd_clocks uvd_clks; - struct fiji_vce_clocks vce_clks; - uint32_t sam_clk; - uint32_t acp_clk; - uint16_t performance_level_count; - bool dc_compatible; - uint32_t sclk_threshold; - struct fiji_performance_level performance_levels[FIJI_MAX_HARDWARE_POWERLEVELS]; -}; - -struct fiji_dpm_level { - bool enabled; - uint32_t value; - uint32_t param1; -}; - -#define FIJI_MAX_DEEPSLEEP_DIVIDER_ID 5 -#define MAX_REGULAR_DPM_NUMBER 8 -#define FIJI_MINIMUM_ENGINE_CLOCK 2500 - -struct fiji_single_dpm_table { - uint32_t count; - struct fiji_dpm_level dpm_levels[MAX_REGULAR_DPM_NUMBER]; -}; - -struct fiji_dpm_table { - struct fiji_single_dpm_table sclk_table; - struct fiji_single_dpm_table mclk_table; - struct fiji_single_dpm_table pcie_speed_table; - struct fiji_single_dpm_table vddc_table; - struct fiji_single_dpm_table vddci_table; - struct fiji_single_dpm_table mvdd_table; -}; - -struct fiji_clock_registers { - uint32_t vCG_SPLL_FUNC_CNTL; - uint32_t vCG_SPLL_FUNC_CNTL_2; - uint32_t vCG_SPLL_FUNC_CNTL_3; - uint32_t vCG_SPLL_FUNC_CNTL_4; - uint32_t vCG_SPLL_SPREAD_SPECTRUM; - uint32_t vCG_SPLL_SPREAD_SPECTRUM_2; - uint32_t vDLL_CNTL; - uint32_t vMCLK_PWRMGT_CNTL; - uint32_t vMPLL_AD_FUNC_CNTL; - uint32_t vMPLL_DQ_FUNC_CNTL; - uint32_t vMPLL_FUNC_CNTL; - uint32_t vMPLL_FUNC_CNTL_1; - uint32_t vMPLL_FUNC_CNTL_2; - uint32_t vMPLL_SS1; - uint32_t vMPLL_SS2; -}; - -struct fiji_voltage_smio_registers { - uint32_t vS0_VID_LOWER_SMIO_CNTL; -}; - -#define FIJI_MAX_LEAKAGE_COUNT 8 -struct fiji_leakage_voltage { - uint16_t count; - uint16_t leakage_id[FIJI_MAX_LEAKAGE_COUNT]; - uint16_t actual_voltage[FIJI_MAX_LEAKAGE_COUNT]; -}; - -struct fiji_vbios_boot_state { - uint16_t mvdd_bootup_value; - uint16_t vddc_bootup_value; - uint16_t vddci_bootup_value; - uint32_t sclk_bootup_value; - uint32_t mclk_bootup_value; - uint16_t pcie_gen_bootup_value; - uint16_t pcie_lane_bootup_value; -}; - -struct fiji_bacos { - uint32_t best_match; - uint32_t baco_flags; - struct fiji_performance_level performance_level; -}; - -/* Ultra Low Voltage parameter structure */ -struct fiji_ulv_parm { - bool ulv_supported; - uint32_t cg_ulv_parameter; - uint32_t ulv_volt_change_delay; - struct fiji_performance_level ulv_power_level; -}; - -struct fiji_display_timing { - uint32_t min_clock_in_sr; - uint32_t num_existing_displays; -}; - -struct fiji_dpmlevel_enable_mask { - uint32_t uvd_dpm_enable_mask; - uint32_t vce_dpm_enable_mask; - uint32_t acp_dpm_enable_mask; - uint32_t samu_dpm_enable_mask; - uint32_t sclk_dpm_enable_mask; - uint32_t mclk_dpm_enable_mask; - uint32_t pcie_dpm_enable_mask; -}; - -struct fiji_pcie_perf_range { - uint16_t max; - uint16_t min; -}; - -struct fiji_hwmgr { - struct fiji_dpm_table dpm_table; - struct fiji_dpm_table golden_dpm_table; - - uint32_t voting_rights_clients0; - uint32_t voting_rights_clients1; - uint32_t voting_rights_clients2; - uint32_t voting_rights_clients3; - uint32_t voting_rights_clients4; - uint32_t voting_rights_clients5; - uint32_t voting_rights_clients6; - uint32_t voting_rights_clients7; - uint32_t static_screen_threshold_unit; - uint32_t static_screen_threshold; - uint32_t voltage_control; - uint32_t vddc_vddci_delta; - - uint32_t active_auto_throttle_sources; - - struct fiji_clock_registers clock_registers; - struct fiji_voltage_smio_registers voltage_smio_registers; - - bool is_memory_gddr5; - uint16_t acpi_vddc; - bool pspp_notify_required; - uint16_t force_pcie_gen; - uint16_t acpi_pcie_gen; - uint32_t pcie_gen_cap; - uint32_t pcie_lane_cap; - uint32_t pcie_spc_cap; - struct fiji_leakage_voltage vddc_leakage; - struct fiji_leakage_voltage Vddci_leakage; - - uint32_t mvdd_control; - uint32_t vddc_mask_low; - uint32_t mvdd_mask_low; - uint16_t max_vddc_in_pptable; - uint16_t min_vddc_in_pptable; - uint16_t max_vddci_in_pptable; - uint16_t min_vddci_in_pptable; - uint32_t mclk_strobe_mode_threshold; - uint32_t mclk_stutter_mode_threshold; - uint32_t mclk_edc_enable_threshold; - uint32_t mclk_edcwr_enable_threshold; - bool is_uvd_enabled; - struct fiji_vbios_boot_state vbios_boot_state; - - bool battery_state; - bool is_tlu_enabled; - - /* ---- SMC SRAM Address of firmware header tables ---- */ - uint32_t sram_end; - uint32_t dpm_table_start; - uint32_t soft_regs_start; - uint32_t mc_reg_table_start; - uint32_t fan_table_start; - uint32_t arb_table_start; - struct SMU73_Discrete_DpmTable smc_state_table; - struct SMU73_Discrete_Ulv ulv_setting; - - /* ---- Stuff originally coming from Evergreen ---- */ - uint32_t vddci_control; - struct pp_atomctrl_voltage_table vddc_voltage_table; - struct pp_atomctrl_voltage_table vddci_voltage_table; - struct pp_atomctrl_voltage_table mvdd_voltage_table; - - uint32_t mgcg_cgtt_local2; - uint32_t mgcg_cgtt_local3; - uint32_t gpio_debug; - uint32_t mc_micro_code_feature; - uint32_t highest_mclk; - uint16_t acpi_vddci; - uint8_t mvdd_high_index; - uint8_t mvdd_low_index; - bool dll_default_on; - bool performance_request_registered; - - /* ---- Low Power Features ---- */ - struct fiji_bacos bacos; - struct fiji_ulv_parm ulv; - - /* ---- CAC Stuff ---- */ - uint32_t cac_table_start; - bool cac_configuration_required; - bool driver_calculate_cac_leakage; - bool cac_enabled; - - /* ---- DPM2 Parameters ---- */ - uint32_t power_containment_features; - bool enable_dte_feature; - bool enable_tdc_limit_feature; - bool enable_pkg_pwr_tracking_feature; - bool disable_uvd_power_tune_feature; - const struct fiji_pt_defaults *power_tune_defaults; - struct SMU73_Discrete_PmFuses power_tune_table; - uint32_t dte_tj_offset; - uint32_t fast_watermark_threshold; - - /* ---- Phase Shedding ---- */ - bool vddc_phase_shed_control; - - /* ---- DI/DT ---- */ - struct fiji_display_timing display_timing; - - /* ---- Thermal Temperature Setting ---- */ - struct fiji_dpmlevel_enable_mask dpm_level_enable_mask; - uint32_t need_update_smu7_dpm_table; - uint32_t sclk_dpm_key_disabled; - uint32_t mclk_dpm_key_disabled; - uint32_t pcie_dpm_key_disabled; - uint32_t min_engine_clocks; - struct fiji_pcie_perf_range pcie_gen_performance; - struct fiji_pcie_perf_range pcie_lane_performance; - struct fiji_pcie_perf_range pcie_gen_power_saving; - struct fiji_pcie_perf_range pcie_lane_power_saving; - bool use_pcie_performance_levels; - bool use_pcie_power_saving_levels; - uint32_t activity_target[SMU73_MAX_LEVELS_GRAPHICS]; - uint32_t mclk_activity_target; - uint32_t mclk_dpm0_activity_target; - uint32_t low_sclk_interrupt_threshold; - uint32_t last_mclk_dpm_enable_mask; - bool uvd_enabled; - - /* ---- Power Gating States ---- */ - bool uvd_power_gated; - bool vce_power_gated; - bool samu_power_gated; - bool acp_power_gated; - bool pg_acp_init; - bool frtc_enabled; - bool frtc_status_changed; -}; - -/* To convert to Q8.8 format for firmware */ -#define FIJI_Q88_FORMAT_CONVERSION_UNIT 256 - -enum Fiji_I2CLineID { - Fiji_I2CLineID_DDC1 = 0x90, - Fiji_I2CLineID_DDC2 = 0x91, - Fiji_I2CLineID_DDC3 = 0x92, - Fiji_I2CLineID_DDC4 = 0x93, - Fiji_I2CLineID_DDC5 = 0x94, - Fiji_I2CLineID_DDC6 = 0x95, - Fiji_I2CLineID_SCLSDA = 0x96, - Fiji_I2CLineID_DDCVGA = 0x97 -}; - -#define Fiji_I2C_DDC1DATA 0 -#define Fiji_I2C_DDC1CLK 1 -#define Fiji_I2C_DDC2DATA 2 -#define Fiji_I2C_DDC2CLK 3 -#define Fiji_I2C_DDC3DATA 4 -#define Fiji_I2C_DDC3CLK 5 -#define Fiji_I2C_SDA 40 -#define Fiji_I2C_SCL 41 -#define Fiji_I2C_DDC4DATA 65 -#define Fiji_I2C_DDC4CLK 66 -#define Fiji_I2C_DDC5DATA 0x48 -#define Fiji_I2C_DDC5CLK 0x49 -#define Fiji_I2C_DDC6DATA 0x4a -#define Fiji_I2C_DDC6CLK 0x4b -#define Fiji_I2C_DDCVGADATA 0x4c -#define Fiji_I2C_DDCVGACLK 0x4d - -#define FIJI_UNUSED_GPIO_PIN 0x7F - -extern int tonga_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr); -extern int tonga_get_mc_microcode_version (struct pp_hwmgr *hwmgr); -extern int tonga_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr); -extern int tonga_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display); -int fiji_update_vce_dpm(struct pp_hwmgr *hwmgr, const void *input); -int fiji_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate); -int fiji_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate); -int fiji_update_acp_dpm(struct pp_hwmgr *hwmgr, bool bgate); -int fiji_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable); - -#endif /* _FIJI_HWMGR_H_ */ diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c deleted file mode 100644 index f5992ea0c56f..000000000000 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c +++ /dev/null @@ -1,610 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#include "hwmgr.h" -#include "smumgr.h" -#include "fiji_hwmgr.h" -#include "fiji_powertune.h" -#include "fiji_smumgr.h" -#include "smu73_discrete.h" -#include "pp_debug.h" - -#define VOLTAGE_SCALE 4 -#define POWERTUNE_DEFAULT_SET_MAX 1 - -const struct fiji_pt_defaults fiji_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = { - /*sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc */ - {1, 0xF, 0xFD, - /* TDC_MAWt, TdcWaterfallCtl, DTEAmbientTempBase */ - 0x19, 5, 45} -}; - -void fiji_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *fiji_hwmgr = (struct fiji_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - uint32_t tmp = 0; - - if(table_info && - table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX && - table_info->cac_dtp_table->usPowerTuneDataSetID) - fiji_hwmgr->power_tune_defaults = - &fiji_power_tune_data_set_array - [table_info->cac_dtp_table->usPowerTuneDataSetID - 1]; - else - fiji_hwmgr->power_tune_defaults = &fiji_power_tune_data_set_array[0]; - - /* Assume disabled */ - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_CAC); - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_SQRamping); - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_DBRamping); - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_TDRamping); - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_TCPRamping); - - fiji_hwmgr->dte_tj_offset = tmp; - - if (!tmp) { - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_CAC); - - fiji_hwmgr->fast_watermark_threshold = 100; - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_PowerContainment)) { - tmp = 1; - fiji_hwmgr->enable_dte_feature = tmp ? false : true; - fiji_hwmgr->enable_tdc_limit_feature = tmp ? true : false; - fiji_hwmgr->enable_pkg_pwr_tracking_feature = tmp ? true : false; - } - } -} - -/* PPGen has the gain setting generated in x * 100 unit - * This function is to convert the unit to x * 4096(0x1000) unit. - * This is the unit expected by SMC firmware - */ -static uint16_t scale_fan_gain_settings(uint16_t raw_setting) -{ - uint32_t tmp; - tmp = raw_setting * 4096 / 100; - return (uint16_t)tmp; -} - -static void get_scl_sda_value(uint8_t line, uint8_t *scl, uint8_t* sda) -{ - switch (line) { - case Fiji_I2CLineID_DDC1 : - *scl = Fiji_I2C_DDC1CLK; - *sda = Fiji_I2C_DDC1DATA; - break; - case Fiji_I2CLineID_DDC2 : - *scl = Fiji_I2C_DDC2CLK; - *sda = Fiji_I2C_DDC2DATA; - break; - case Fiji_I2CLineID_DDC3 : - *scl = Fiji_I2C_DDC3CLK; - *sda = Fiji_I2C_DDC3DATA; - break; - case Fiji_I2CLineID_DDC4 : - *scl = Fiji_I2C_DDC4CLK; - *sda = Fiji_I2C_DDC4DATA; - break; - case Fiji_I2CLineID_DDC5 : - *scl = Fiji_I2C_DDC5CLK; - *sda = Fiji_I2C_DDC5DATA; - break; - case Fiji_I2CLineID_DDC6 : - *scl = Fiji_I2C_DDC6CLK; - *sda = Fiji_I2C_DDC6DATA; - break; - case Fiji_I2CLineID_SCLSDA : - *scl = Fiji_I2C_SCL; - *sda = Fiji_I2C_SDA; - break; - case Fiji_I2CLineID_DDCVGA : - *scl = Fiji_I2C_DDCVGACLK; - *sda = Fiji_I2C_DDCVGADATA; - break; - default: - *scl = 0; - *sda = 0; - break; - } -} - -int fiji_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - const struct fiji_pt_defaults *defaults = data->power_tune_defaults; - SMU73_Discrete_DpmTable *dpm_table = &(data->smc_state_table); - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table; - struct pp_advance_fan_control_parameters *fan_table= - &hwmgr->thermal_controller.advanceFanControlParameters; - uint8_t uc_scl, uc_sda; - - /* TDP number of fraction bits are changed from 8 to 7 for Fiji - * as requested by SMC team - */ - dpm_table->DefaultTdp = PP_HOST_TO_SMC_US( - (uint16_t)(cac_dtp_table->usTDP * 128)); - dpm_table->TargetTdp = PP_HOST_TO_SMC_US( - (uint16_t)(cac_dtp_table->usTDP * 128)); - - PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255, - "Target Operating Temp is out of Range!",); - - dpm_table->GpuTjMax = (uint8_t)(cac_dtp_table->usTargetOperatingTemp); - dpm_table->GpuTjHyst = 8; - - dpm_table->DTEAmbientTempBase = defaults->DTEAmbientTempBase; - - /* The following are for new Fiji Multi-input fan/thermal control */ - dpm_table->TemperatureLimitEdge = PP_HOST_TO_SMC_US( - cac_dtp_table->usTargetOperatingTemp * 256); - dpm_table->TemperatureLimitHotspot = PP_HOST_TO_SMC_US( - cac_dtp_table->usTemperatureLimitHotspot * 256); - dpm_table->TemperatureLimitLiquid1 = PP_HOST_TO_SMC_US( - cac_dtp_table->usTemperatureLimitLiquid1 * 256); - dpm_table->TemperatureLimitLiquid2 = PP_HOST_TO_SMC_US( - cac_dtp_table->usTemperatureLimitLiquid2 * 256); - dpm_table->TemperatureLimitVrVddc = PP_HOST_TO_SMC_US( - cac_dtp_table->usTemperatureLimitVrVddc * 256); - dpm_table->TemperatureLimitVrMvdd = PP_HOST_TO_SMC_US( - cac_dtp_table->usTemperatureLimitVrMvdd * 256); - dpm_table->TemperatureLimitPlx = PP_HOST_TO_SMC_US( - cac_dtp_table->usTemperatureLimitPlx * 256); - - dpm_table->FanGainEdge = PP_HOST_TO_SMC_US( - scale_fan_gain_settings(fan_table->usFanGainEdge)); - dpm_table->FanGainHotspot = PP_HOST_TO_SMC_US( - scale_fan_gain_settings(fan_table->usFanGainHotspot)); - dpm_table->FanGainLiquid = PP_HOST_TO_SMC_US( - scale_fan_gain_settings(fan_table->usFanGainLiquid)); - dpm_table->FanGainVrVddc = PP_HOST_TO_SMC_US( - scale_fan_gain_settings(fan_table->usFanGainVrVddc)); - dpm_table->FanGainVrMvdd = PP_HOST_TO_SMC_US( - scale_fan_gain_settings(fan_table->usFanGainVrMvdd)); - dpm_table->FanGainPlx = PP_HOST_TO_SMC_US( - scale_fan_gain_settings(fan_table->usFanGainPlx)); - dpm_table->FanGainHbm = PP_HOST_TO_SMC_US( - scale_fan_gain_settings(fan_table->usFanGainHbm)); - - dpm_table->Liquid1_I2C_address = cac_dtp_table->ucLiquid1_I2C_address; - dpm_table->Liquid2_I2C_address = cac_dtp_table->ucLiquid2_I2C_address; - dpm_table->Vr_I2C_address = cac_dtp_table->ucVr_I2C_address; - dpm_table->Plx_I2C_address = cac_dtp_table->ucPlx_I2C_address; - - get_scl_sda_value(cac_dtp_table->ucLiquid_I2C_Line, &uc_scl, &uc_sda); - dpm_table->Liquid_I2C_LineSCL = uc_scl; - dpm_table->Liquid_I2C_LineSDA = uc_sda; - - get_scl_sda_value(cac_dtp_table->ucVr_I2C_Line, &uc_scl, &uc_sda); - dpm_table->Vr_I2C_LineSCL = uc_scl; - dpm_table->Vr_I2C_LineSDA = uc_sda; - - get_scl_sda_value(cac_dtp_table->ucPlx_I2C_Line, &uc_scl, &uc_sda); - dpm_table->Plx_I2C_LineSCL = uc_scl; - dpm_table->Plx_I2C_LineSDA = uc_sda; - - return 0; -} - -static int fiji_populate_svi_load_line(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - const struct fiji_pt_defaults *defaults = data->power_tune_defaults; - - data->power_tune_table.SviLoadLineEn = defaults->SviLoadLineEn; - data->power_tune_table.SviLoadLineVddC = defaults->SviLoadLineVddC; - data->power_tune_table.SviLoadLineTrimVddC = 3; - data->power_tune_table.SviLoadLineOffsetVddC = 0; - - return 0; -} - -static int fiji_populate_tdc_limit(struct pp_hwmgr *hwmgr) -{ - uint16_t tdc_limit; - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - const struct fiji_pt_defaults *defaults = data->power_tune_defaults; - - /* TDC number of fraction bits are changed from 8 to 7 - * for Fiji as requested by SMC team - */ - tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 128); - data->power_tune_table.TDC_VDDC_PkgLimit = - CONVERT_FROM_HOST_TO_SMC_US(tdc_limit); - data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc = - defaults->TDC_VDDC_ThrottleReleaseLimitPerc; - data->power_tune_table.TDC_MAWt = defaults->TDC_MAWt; - - return 0; -} - -static int fiji_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - const struct fiji_pt_defaults *defaults = data->power_tune_defaults; - uint32_t temp; - - if (fiji_read_smc_sram_dword(hwmgr->smumgr, - fuse_table_offset + - offsetof(SMU73_Discrete_PmFuses, TdcWaterfallCtl), - (uint32_t *)&temp, data->sram_end)) - PP_ASSERT_WITH_CODE(false, - "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!", - return -EINVAL); - else { - data->power_tune_table.TdcWaterfallCtl = defaults->TdcWaterfallCtl; - data->power_tune_table.LPMLTemperatureMin = - (uint8_t)((temp >> 16) & 0xff); - data->power_tune_table.LPMLTemperatureMax = - (uint8_t)((temp >> 8) & 0xff); - data->power_tune_table.Reserved = (uint8_t)(temp & 0xff); - } - return 0; -} - -static int fiji_populate_temperature_scaler(struct pp_hwmgr *hwmgr) -{ - int i; - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - - /* Currently not used. Set all to zero. */ - for (i = 0; i < 16; i++) - data->power_tune_table.LPMLTemperatureScaler[i] = 0; - - return 0; -} - -static int fiji_populate_fuzzy_fan(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - - if( (hwmgr->thermal_controller.advanceFanControlParameters. - usFanOutputSensitivity & (1 << 15)) || - 0 == hwmgr->thermal_controller.advanceFanControlParameters. - usFanOutputSensitivity ) - hwmgr->thermal_controller.advanceFanControlParameters. - usFanOutputSensitivity = hwmgr->thermal_controller. - advanceFanControlParameters.usDefaultFanOutputSensitivity; - - data->power_tune_table.FuzzyFan_PwmSetDelta = - PP_HOST_TO_SMC_US(hwmgr->thermal_controller. - advanceFanControlParameters.usFanOutputSensitivity); - return 0; -} - -static int fiji_populate_gnb_lpml(struct pp_hwmgr *hwmgr) -{ - int i; - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - - /* Currently not used. Set all to zero. */ - for (i = 0; i < 16; i++) - data->power_tune_table.GnbLPML[i] = 0; - - return 0; -} - -static int fiji_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr) -{ - /* int i, min, max; - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - uint8_t * pHiVID = data->power_tune_table.BapmVddCVidHiSidd; - uint8_t * pLoVID = data->power_tune_table.BapmVddCVidLoSidd; - - min = max = pHiVID[0]; - for (i = 0; i < 8; i++) { - if (0 != pHiVID[i]) { - if (min > pHiVID[i]) - min = pHiVID[i]; - if (max < pHiVID[i]) - max = pHiVID[i]; - } - - if (0 != pLoVID[i]) { - if (min > pLoVID[i]) - min = pLoVID[i]; - if (max < pLoVID[i]) - max = pLoVID[i]; - } - } - - PP_ASSERT_WITH_CODE((0 != min) && (0 != max), "BapmVddcVidSidd table does not exist!", return int_Failed); - data->power_tune_table.GnbLPMLMaxVid = (uint8_t)max; - data->power_tune_table.GnbLPMLMinVid = (uint8_t)min; -*/ - return 0; -} - -static int fiji_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - uint16_t HiSidd = data->power_tune_table.BapmVddCBaseLeakageHiSidd; - uint16_t LoSidd = data->power_tune_table.BapmVddCBaseLeakageLoSidd; - struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table; - - HiSidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256); - LoSidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256); - - data->power_tune_table.BapmVddCBaseLeakageHiSidd = - CONVERT_FROM_HOST_TO_SMC_US(HiSidd); - data->power_tune_table.BapmVddCBaseLeakageLoSidd = - CONVERT_FROM_HOST_TO_SMC_US(LoSidd); - - return 0; -} - -int fiji_populate_pm_fuses(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - uint32_t pm_fuse_table_offset; - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_PowerContainment)) { - if (fiji_read_smc_sram_dword(hwmgr->smumgr, - SMU7_FIRMWARE_HEADER_LOCATION + - offsetof(SMU73_Firmware_Header, PmFuseTable), - &pm_fuse_table_offset, data->sram_end)) - PP_ASSERT_WITH_CODE(false, - "Attempt to get pm_fuse_table_offset Failed!", - return -EINVAL); - - /* DW6 */ - if (fiji_populate_svi_load_line(hwmgr)) - PP_ASSERT_WITH_CODE(false, - "Attempt to populate SviLoadLine Failed!", - return -EINVAL); - /* DW7 */ - if (fiji_populate_tdc_limit(hwmgr)) - PP_ASSERT_WITH_CODE(false, - "Attempt to populate TDCLimit Failed!", return -EINVAL); - /* DW8 */ - if (fiji_populate_dw8(hwmgr, pm_fuse_table_offset)) - PP_ASSERT_WITH_CODE(false, - "Attempt to populate TdcWaterfallCtl, " - "LPMLTemperature Min and Max Failed!", - return -EINVAL); - - /* DW9-DW12 */ - if (0 != fiji_populate_temperature_scaler(hwmgr)) - PP_ASSERT_WITH_CODE(false, - "Attempt to populate LPMLTemperatureScaler Failed!", - return -EINVAL); - - /* DW13-DW14 */ - if(fiji_populate_fuzzy_fan(hwmgr)) - PP_ASSERT_WITH_CODE(false, - "Attempt to populate Fuzzy Fan Control parameters Failed!", - return -EINVAL); - - /* DW15-DW18 */ - if (fiji_populate_gnb_lpml(hwmgr)) - PP_ASSERT_WITH_CODE(false, - "Attempt to populate GnbLPML Failed!", - return -EINVAL); - - /* DW19 */ - if (fiji_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr)) - PP_ASSERT_WITH_CODE(false, - "Attempt to populate GnbLPML Min and Max Vid Failed!", - return -EINVAL); - - /* DW20 */ - if (fiji_populate_bapm_vddc_base_leakage_sidd(hwmgr)) - PP_ASSERT_WITH_CODE(false, - "Attempt to populate BapmVddCBaseLeakage Hi and Lo " - "Sidd Failed!", return -EINVAL); - - if (fiji_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset, - (uint8_t *)&data->power_tune_table, - sizeof(struct SMU73_Discrete_PmFuses), data->sram_end)) - PP_ASSERT_WITH_CODE(false, - "Attempt to download PmFuseTable Failed!", - return -EINVAL); - } - return 0; -} - -int fiji_enable_smc_cac(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - int result = 0; - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_CAC)) { - int smc_result; - smc_result = smum_send_msg_to_smc(hwmgr->smumgr, - (uint16_t)(PPSMC_MSG_EnableCac)); - PP_ASSERT_WITH_CODE((0 == smc_result), - "Failed to enable CAC in SMC.", result = -1); - - data->cac_enabled = (0 == smc_result) ? true : false; - } - return result; -} - -int fiji_disable_smc_cac(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - int result = 0; - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_CAC) && data->cac_enabled) { - int smc_result = smum_send_msg_to_smc(hwmgr->smumgr, - (uint16_t)(PPSMC_MSG_DisableCac)); - PP_ASSERT_WITH_CODE((smc_result == 0), - "Failed to disable CAC in SMC.", result = -1); - - data->cac_enabled = false; - } - return result; -} - -int fiji_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - - if(data->power_containment_features & - POWERCONTAINMENT_FEATURE_PkgPwrLimit) - return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_PkgPwrSetLimit, n); - return 0; -} - -static int fiji_set_overdriver_target_tdp(struct pp_hwmgr *pHwMgr, uint32_t target_tdp) -{ - return smum_send_msg_to_smc_with_parameter(pHwMgr->smumgr, - PPSMC_MSG_OverDriveSetTargetTdp, target_tdp); -} - -int fiji_enable_power_containment(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - int smc_result; - int result = 0; - - data->power_containment_features = 0; - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_PowerContainment)) { - if (data->enable_dte_feature) { - smc_result = smum_send_msg_to_smc(hwmgr->smumgr, - (uint16_t)(PPSMC_MSG_EnableDTE)); - PP_ASSERT_WITH_CODE((0 == smc_result), - "Failed to enable DTE in SMC.", result = -1;); - if (0 == smc_result) - data->power_containment_features |= POWERCONTAINMENT_FEATURE_DTE; - } - - if (data->enable_tdc_limit_feature) { - smc_result = smum_send_msg_to_smc(hwmgr->smumgr, - (uint16_t)(PPSMC_MSG_TDCLimitEnable)); - PP_ASSERT_WITH_CODE((0 == smc_result), - "Failed to enable TDCLimit in SMC.", result = -1;); - if (0 == smc_result) - data->power_containment_features |= - POWERCONTAINMENT_FEATURE_TDCLimit; - } - - if (data->enable_pkg_pwr_tracking_feature) { - smc_result = smum_send_msg_to_smc(hwmgr->smumgr, - (uint16_t)(PPSMC_MSG_PkgPwrLimitEnable)); - PP_ASSERT_WITH_CODE((0 == smc_result), - "Failed to enable PkgPwrTracking in SMC.", result = -1;); - if (0 == smc_result) { - struct phm_cac_tdp_table *cac_table = - table_info->cac_dtp_table; - uint32_t default_limit = - (uint32_t)(cac_table->usMaximumPowerDeliveryLimit * 256); - - data->power_containment_features |= - POWERCONTAINMENT_FEATURE_PkgPwrLimit; - - if (fiji_set_power_limit(hwmgr, default_limit)) - printk(KERN_ERR "Failed to set Default Power Limit in SMC!"); - } - } - } - return result; -} - -int fiji_disable_power_containment(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - int result = 0; - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_PowerContainment) && - data->power_containment_features) { - int smc_result; - - if (data->power_containment_features & - POWERCONTAINMENT_FEATURE_TDCLimit) { - smc_result = smum_send_msg_to_smc(hwmgr->smumgr, - (uint16_t)(PPSMC_MSG_TDCLimitDisable)); - PP_ASSERT_WITH_CODE((smc_result == 0), - "Failed to disable TDCLimit in SMC.", - result = smc_result); - } - - if (data->power_containment_features & - POWERCONTAINMENT_FEATURE_DTE) { - smc_result = smum_send_msg_to_smc(hwmgr->smumgr, - (uint16_t)(PPSMC_MSG_DisableDTE)); - PP_ASSERT_WITH_CODE((smc_result == 0), - "Failed to disable DTE in SMC.", - result = smc_result); - } - - if (data->power_containment_features & - POWERCONTAINMENT_FEATURE_PkgPwrLimit) { - smc_result = smum_send_msg_to_smc(hwmgr->smumgr, - (uint16_t)(PPSMC_MSG_PkgPwrLimitDisable)); - PP_ASSERT_WITH_CODE((smc_result == 0), - "Failed to disable PkgPwrTracking in SMC.", - result = smc_result); - } - data->power_containment_features = 0; - } - - return result; -} - -int fiji_power_control_set_level(struct pp_hwmgr *hwmgr) -{ - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table; - int adjust_percent, target_tdp; - int result = 0; - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_PowerContainment)) { - /* adjustment percentage has already been validated */ - adjust_percent = hwmgr->platform_descriptor.TDPAdjustmentPolarity ? - hwmgr->platform_descriptor.TDPAdjustment : - (-1 * hwmgr->platform_descriptor.TDPAdjustment); - /* SMC requested that target_tdp to be 7 bit fraction in DPM table - * but message to be 8 bit fraction for messages - */ - target_tdp = ((100 + adjust_percent) * (int)(cac_table->usTDP * 256)) / 100; - result = fiji_set_overdriver_target_tdp(hwmgr, (uint32_t)target_tdp); - } - - return result; -} diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.h b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.h deleted file mode 100644 index fec772421733..000000000000 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.h +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#ifndef FIJI_POWERTUNE_H -#define FIJI_POWERTUNE_H - -enum fiji_pt_config_reg_type { - FIJI_CONFIGREG_MMR = 0, - FIJI_CONFIGREG_SMC_IND, - FIJI_CONFIGREG_DIDT_IND, - FIJI_CONFIGREG_CACHE, - FIJI_CONFIGREG_MAX -}; - -/* PowerContainment Features */ -#define POWERCONTAINMENT_FEATURE_DTE 0x00000001 -#define POWERCONTAINMENT_FEATURE_TDCLimit 0x00000002 -#define POWERCONTAINMENT_FEATURE_PkgPwrLimit 0x00000004 - -#define DIDT_SQ_CTRL0__UNUSED_0_MASK 0xffffffc0 -#define DIDT_SQ_CTRL0__UNUSED_0__SHIFT 0x6 -#define DIDT_TD_CTRL0__UNUSED_0_MASK 0xffffffc0 -#define DIDT_TD_CTRL0__UNUSED_0__SHIFT 0x6 -#define DIDT_TCP_CTRL0__UNUSED_0_MASK 0xffffffc0 -#define DIDT_TCP_CTRL0__UNUSED_0__SHIFT 0x6 -#define DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK 0xe0000000 -#define DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT 0x0000001d -#define DIDT_TD_TUNING_CTRL__UNUSED_0_MASK 0xe0000000 -#define DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT 0x0000001d -#define DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK 0xe0000000 -#define DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT 0x0000001d - -struct fiji_pt_config_reg { - uint32_t offset; - uint32_t mask; - uint32_t shift; - uint32_t value; - enum fiji_pt_config_reg_type type; -}; - -struct fiji_pt_defaults -{ - uint8_t SviLoadLineEn; - uint8_t SviLoadLineVddC; - uint8_t TDC_VDDC_ThrottleReleaseLimitPerc; - uint8_t TDC_MAWt; - uint8_t TdcWaterfallCtl; - uint8_t DTEAmbientTempBase; -}; - -void fiji_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr); -int fiji_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr); -int fiji_populate_pm_fuses(struct pp_hwmgr *hwmgr); -int fiji_enable_smc_cac(struct pp_hwmgr *hwmgr); -int fiji_disable_smc_cac(struct pp_hwmgr *hwmgr); -int fiji_enable_power_containment(struct pp_hwmgr *hwmgr); -int fiji_disable_power_containment(struct pp_hwmgr *hwmgr); -int fiji_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n); -int fiji_power_control_set_level(struct pp_hwmgr *hwmgr); - -#endif /* FIJI_POWERTUNE_H */ - diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.c deleted file mode 100644 index 7f431e762262..000000000000 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.c +++ /dev/null @@ -1,687 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#include -#include "fiji_thermal.h" -#include "fiji_hwmgr.h" -#include "fiji_smumgr.h" -#include "fiji_ppsmc.h" -#include "smu/smu_7_1_3_d.h" -#include "smu/smu_7_1_3_sh_mask.h" - -int fiji_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, - struct phm_fan_speed_info *fan_speed_info) -{ - - if (hwmgr->thermal_controller.fanInfo.bNoFan) - return 0; - - fan_speed_info->supports_percent_read = true; - fan_speed_info->supports_percent_write = true; - fan_speed_info->min_percent = 0; - fan_speed_info->max_percent = 100; - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_FanSpeedInTableIsRPM) && - hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution) { - fan_speed_info->supports_rpm_read = true; - fan_speed_info->supports_rpm_write = true; - fan_speed_info->min_rpm = hwmgr->thermal_controller.fanInfo.ulMinRPM; - fan_speed_info->max_rpm = hwmgr->thermal_controller.fanInfo.ulMaxRPM; - } else { - fan_speed_info->min_rpm = 0; - fan_speed_info->max_rpm = 0; - } - - return 0; -} - -int fiji_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, - uint32_t *speed) -{ - uint32_t duty100; - uint32_t duty; - uint64_t tmp64; - - if (hwmgr->thermal_controller.fanInfo.bNoFan) - return 0; - - duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_FDO_CTRL1, FMAX_DUTY100); - duty = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_THERMAL_STATUS, FDO_PWM_DUTY); - - if (duty100 == 0) - return -EINVAL; - - - tmp64 = (uint64_t)duty * 100; - do_div(tmp64, duty100); - *speed = (uint32_t)tmp64; - - if (*speed > 100) - *speed = 100; - - return 0; -} - -int fiji_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed) -{ - uint32_t tach_period; - uint32_t crystal_clock_freq; - - if (hwmgr->thermal_controller.fanInfo.bNoFan || - (hwmgr->thermal_controller.fanInfo. - ucTachometerPulsesPerRevolution == 0)) - return 0; - - tach_period = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_TACH_STATUS, TACH_PERIOD); - - if (tach_period == 0) - return -EINVAL; - - crystal_clock_freq = tonga_get_xclk(hwmgr); - - *speed = 60 * crystal_clock_freq * 10000/ tach_period; - - return 0; -} - -/** -* Set Fan Speed Control to static mode, so that the user can decide what speed to use. -* @param hwmgr the address of the powerplay hardware manager. -* mode the fan control mode, 0 default, 1 by percent, 5, by RPM -* @exception Should always succeed. -*/ -int fiji_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode) -{ - - if (hwmgr->fan_ctrl_is_in_default_mode) { - hwmgr->fan_ctrl_default_mode = - PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_FDO_CTRL2, FDO_PWM_MODE); - hwmgr->tmin = - PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_FDO_CTRL2, TMIN); - hwmgr->fan_ctrl_is_in_default_mode = false; - } - - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_FDO_CTRL2, TMIN, 0); - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_FDO_CTRL2, FDO_PWM_MODE, mode); - - return 0; -} - -/** -* Reset Fan Speed Control to default mode. -* @param hwmgr the address of the powerplay hardware manager. -* @exception Should always succeed. -*/ -int fiji_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr) -{ - if (!hwmgr->fan_ctrl_is_in_default_mode) { - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_FDO_CTRL2, FDO_PWM_MODE, hwmgr->fan_ctrl_default_mode); - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_FDO_CTRL2, TMIN, hwmgr->tmin); - hwmgr->fan_ctrl_is_in_default_mode = true; - } - - return 0; -} - -static int fiji_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr) -{ - int result; - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ODFuzzyFanControlSupport)) { - cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, FAN_CONTROL_FUZZY); - result = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StartFanControl); - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_FanSpeedInTableIsRPM)) - hwmgr->hwmgr_func->set_max_fan_rpm_output(hwmgr, - hwmgr->thermal_controller. - advanceFanControlParameters.usMaxFanRPM); - else - hwmgr->hwmgr_func->set_max_fan_pwm_output(hwmgr, - hwmgr->thermal_controller. - advanceFanControlParameters.usMaxFanPWM); - - } else { - cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, FAN_CONTROL_TABLE); - result = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StartFanControl); - } - - if (!result && hwmgr->thermal_controller. - advanceFanControlParameters.ucTargetTemperature) - result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_SetFanTemperatureTarget, - hwmgr->thermal_controller. - advanceFanControlParameters.ucTargetTemperature); - - return result; -} - - -int fiji_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr) -{ - return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StopFanControl); -} - -/** -* Set Fan Speed in percent. -* @param hwmgr the address of the powerplay hardware manager. -* @param speed is the percentage value (0% - 100%) to be set. -* @exception Fails is the 100% setting appears to be 0. -*/ -int fiji_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, - uint32_t speed) -{ - uint32_t duty100; - uint32_t duty; - uint64_t tmp64; - - if (hwmgr->thermal_controller.fanInfo.bNoFan) - return 0; - - if (speed > 100) - speed = 100; - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_MicrocodeFanControl)) - fiji_fan_ctrl_stop_smc_fan_control(hwmgr); - - duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_FDO_CTRL1, FMAX_DUTY100); - - if (duty100 == 0) - return -EINVAL; - - tmp64 = (uint64_t)speed * duty100; - do_div(tmp64, 100); - duty = (uint32_t)tmp64; - - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_FDO_CTRL0, FDO_STATIC_DUTY, duty); - - return fiji_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC); -} - -/** -* Reset Fan Speed to default. -* @param hwmgr the address of the powerplay hardware manager. -* @exception Always succeeds. -*/ -int fiji_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr) -{ - int result; - - if (hwmgr->thermal_controller.fanInfo.bNoFan) - return 0; - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_MicrocodeFanControl)) { - result = fiji_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC); - if (!result) - result = fiji_fan_ctrl_start_smc_fan_control(hwmgr); - } else - result = fiji_fan_ctrl_set_default_mode(hwmgr); - - return result; -} - -/** -* Set Fan Speed in RPM. -* @param hwmgr the address of the powerplay hardware manager. -* @param speed is the percentage value (min - max) to be set. -* @exception Fails is the speed not lie between min and max. -*/ -int fiji_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed) -{ - uint32_t tach_period; - uint32_t crystal_clock_freq; - - if (hwmgr->thermal_controller.fanInfo.bNoFan || - (hwmgr->thermal_controller.fanInfo. - ucTachometerPulsesPerRevolution == 0) || - (speed < hwmgr->thermal_controller.fanInfo.ulMinRPM) || - (speed > hwmgr->thermal_controller.fanInfo.ulMaxRPM)) - return 0; - - crystal_clock_freq = tonga_get_xclk(hwmgr); - - tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed); - - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_TACH_STATUS, TACH_PERIOD, tach_period); - - return fiji_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC); -} - -/** -* Reads the remote temperature from the SIslands thermal controller. -* -* @param hwmgr The address of the hardware manager. -*/ -int fiji_thermal_get_temperature(struct pp_hwmgr *hwmgr) -{ - int temp; - - temp = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_MULT_THERMAL_STATUS, CTF_TEMP); - - /* Bit 9 means the reading is lower than the lowest usable value. */ - if (temp & 0x200) - temp = FIJI_THERMAL_MAXIMUM_TEMP_READING; - else - temp = temp & 0x1ff; - - temp *= PP_TEMPERATURE_UNITS_PER_CENTIGRADES; - - return temp; -} - -/** -* Set the requested temperature range for high and low alert signals -* -* @param hwmgr The address of the hardware manager. -* @param range Temperature range to be programmed for high and low alert signals -* @exception PP_Result_BadInput if the input data is not valid. -*/ -static int fiji_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, - uint32_t low_temp, uint32_t high_temp) -{ - uint32_t low = FIJI_THERMAL_MINIMUM_ALERT_TEMP * - PP_TEMPERATURE_UNITS_PER_CENTIGRADES; - uint32_t high = FIJI_THERMAL_MAXIMUM_ALERT_TEMP * - PP_TEMPERATURE_UNITS_PER_CENTIGRADES; - - if (low < low_temp) - low = low_temp; - if (high > high_temp) - high = high_temp; - - if (low > high) - return -EINVAL; - - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_THERMAL_INT, DIG_THERM_INTH, - (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_THERMAL_INT, DIG_THERM_INTL, - (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_THERMAL_CTRL, DIG_THERM_DPM, - (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); - - return 0; -} - -/** -* Programs thermal controller one-time setting registers -* -* @param hwmgr The address of the hardware manager. -*/ -static int fiji_thermal_initialize(struct pp_hwmgr *hwmgr) -{ - if (hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution) - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_TACH_CTRL, EDGE_PER_REV, - hwmgr->thermal_controller.fanInfo. - ucTachometerPulsesPerRevolution - 1); - - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_FDO_CTRL2, TACH_PWM_RESP_RATE, 0x28); - - return 0; -} - -/** -* Enable thermal alerts on the RV770 thermal controller. -* -* @param hwmgr The address of the hardware manager. -*/ -static int fiji_thermal_enable_alert(struct pp_hwmgr *hwmgr) -{ - uint32_t alert; - - alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_THERMAL_INT, THERM_INT_MASK); - alert &= ~(FIJI_THERMAL_HIGH_ALERT_MASK | FIJI_THERMAL_LOW_ALERT_MASK); - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_THERMAL_INT, THERM_INT_MASK, alert); - - /* send message to SMU to enable internal thermal interrupts */ - return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Thermal_Cntl_Enable); -} - -/** -* Disable thermal alerts on the RV770 thermal controller. -* @param hwmgr The address of the hardware manager. -*/ -static int fiji_thermal_disable_alert(struct pp_hwmgr *hwmgr) -{ - uint32_t alert; - - alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_THERMAL_INT, THERM_INT_MASK); - alert |= (FIJI_THERMAL_HIGH_ALERT_MASK | FIJI_THERMAL_LOW_ALERT_MASK); - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_THERMAL_INT, THERM_INT_MASK, alert); - - /* send message to SMU to disable internal thermal interrupts */ - return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Thermal_Cntl_Disable); -} - -/** -* Uninitialize the thermal controller. -* Currently just disables alerts. -* @param hwmgr The address of the hardware manager. -*/ -int fiji_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr) -{ - int result = fiji_thermal_disable_alert(hwmgr); - - if (hwmgr->thermal_controller.fanInfo.bNoFan) - fiji_fan_ctrl_set_default_mode(hwmgr); - - return result; -} - -/** -* Set up the fan table to control the fan using the SMC. -* @param hwmgr the address of the powerplay hardware manager. -* @param pInput the pointer to input data -* @param pOutput the pointer to output data -* @param pStorage the pointer to temporary storage -* @param Result the last failure code -* @return result from set temperature range routine -*/ -static int tf_fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr, - void *input, void *output, void *storage, int result) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - SMU73_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE }; - uint32_t duty100; - uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2; - uint16_t fdo_min, slope1, slope2; - uint32_t reference_clock; - int res; - uint64_t tmp64; - - if (data->fan_table_start == 0) { - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_MicrocodeFanControl); - return 0; - } - - duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_FDO_CTRL1, FMAX_DUTY100); - - if (duty100 == 0) { - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_MicrocodeFanControl); - return 0; - } - - tmp64 = hwmgr->thermal_controller.advanceFanControlParameters. - usPWMMin * duty100; - do_div(tmp64, 10000); - fdo_min = (uint16_t)tmp64; - - t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed - - hwmgr->thermal_controller.advanceFanControlParameters.usTMin; - t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh - - hwmgr->thermal_controller.advanceFanControlParameters.usTMed; - - pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed - - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin; - pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh - - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed; - - slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100); - slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100); - - fan_table.TempMin = cpu_to_be16((50 + hwmgr-> - thermal_controller.advanceFanControlParameters.usTMin) / 100); - fan_table.TempMed = cpu_to_be16((50 + hwmgr-> - thermal_controller.advanceFanControlParameters.usTMed) / 100); - fan_table.TempMax = cpu_to_be16((50 + hwmgr-> - thermal_controller.advanceFanControlParameters.usTMax) / 100); - - fan_table.Slope1 = cpu_to_be16(slope1); - fan_table.Slope2 = cpu_to_be16(slope2); - - fan_table.FdoMin = cpu_to_be16(fdo_min); - - fan_table.HystDown = cpu_to_be16(hwmgr-> - thermal_controller.advanceFanControlParameters.ucTHyst); - - fan_table.HystUp = cpu_to_be16(1); - - fan_table.HystSlope = cpu_to_be16(1); - - fan_table.TempRespLim = cpu_to_be16(5); - - reference_clock = tonga_get_xclk(hwmgr); - - fan_table.RefreshPeriod = cpu_to_be32((hwmgr-> - thermal_controller.advanceFanControlParameters.ulCycleDelay * - reference_clock) / 1600); - - fan_table.FdoMax = cpu_to_be16((uint16_t)duty100); - - fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD( - hwmgr->device, CGS_IND_REG__SMC, - CG_MULT_THERMAL_CTRL, TEMP_SEL); - - res = fiji_copy_bytes_to_smc(hwmgr->smumgr, data->fan_table_start, - (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), - data->sram_end); - - if (!res && hwmgr->thermal_controller. - advanceFanControlParameters.ucMinimumPWMLimit) - res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_SetFanMinPwm, - hwmgr->thermal_controller. - advanceFanControlParameters.ucMinimumPWMLimit); - - if (!res && hwmgr->thermal_controller. - advanceFanControlParameters.ulMinFanSCLKAcousticLimit) - res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_SetFanSclkTarget, - hwmgr->thermal_controller. - advanceFanControlParameters.ulMinFanSCLKAcousticLimit); - - if (res) - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_MicrocodeFanControl); - - return 0; -} - -/** -* Start the fan control on the SMC. -* @param hwmgr the address of the powerplay hardware manager. -* @param pInput the pointer to input data -* @param pOutput the pointer to output data -* @param pStorage the pointer to temporary storage -* @param Result the last failure code -* @return result from set temperature range routine -*/ -static int tf_fiji_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr, - void *input, void *output, void *storage, int result) -{ -/* If the fantable setup has failed we could have disabled - * PHM_PlatformCaps_MicrocodeFanControl even after - * this function was included in the table. - * Make sure that we still think controlling the fan is OK. -*/ - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_MicrocodeFanControl)) { - fiji_fan_ctrl_start_smc_fan_control(hwmgr); - fiji_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC); - } - - return 0; -} - -/** -* Set temperature range for high and low alerts -* @param hwmgr the address of the powerplay hardware manager. -* @param pInput the pointer to input data -* @param pOutput the pointer to output data -* @param pStorage the pointer to temporary storage -* @param Result the last failure code -* @return result from set temperature range routine -*/ -int tf_fiji_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, - void *input, void *output, void *storage, int result) -{ - struct PP_TemperatureRange *range = (struct PP_TemperatureRange *)input; - - if (range == NULL) - return -EINVAL; - - return fiji_thermal_set_temperature_range(hwmgr, range->min, range->max); -} - -/** -* Programs one-time setting registers -* @param hwmgr the address of the powerplay hardware manager. -* @param pInput the pointer to input data -* @param pOutput the pointer to output data -* @param pStorage the pointer to temporary storage -* @param Result the last failure code -* @return result from initialize thermal controller routine -*/ -int tf_fiji_thermal_initialize(struct pp_hwmgr *hwmgr, - void *input, void *output, void *storage, int result) -{ - return fiji_thermal_initialize(hwmgr); -} - -/** -* Enable high and low alerts -* @param hwmgr the address of the powerplay hardware manager. -* @param pInput the pointer to input data -* @param pOutput the pointer to output data -* @param pStorage the pointer to temporary storage -* @param Result the last failure code -* @return result from enable alert routine -*/ -int tf_fiji_thermal_enable_alert(struct pp_hwmgr *hwmgr, - void *input, void *output, void *storage, int result) -{ - return fiji_thermal_enable_alert(hwmgr); -} - -/** -* Disable high and low alerts -* @param hwmgr the address of the powerplay hardware manager. -* @param pInput the pointer to input data -* @param pOutput the pointer to output data -* @param pStorage the pointer to temporary storage -* @param Result the last failure code -* @return result from disable alert routine -*/ -static int tf_fiji_thermal_disable_alert(struct pp_hwmgr *hwmgr, - void *input, void *output, void *storage, int result) -{ - return fiji_thermal_disable_alert(hwmgr); -} - -static const struct phm_master_table_item -fiji_thermal_start_thermal_controller_master_list[] = { - {NULL, tf_fiji_thermal_initialize}, - {NULL, tf_fiji_thermal_set_temperature_range}, - {NULL, tf_fiji_thermal_enable_alert}, -/* We should restrict performance levels to low before we halt the SMC. - * On the other hand we are still in boot state when we do this - * so it would be pointless. - * If this assumption changes we have to revisit this table. - */ - {NULL, tf_fiji_thermal_setup_fan_table}, - {NULL, tf_fiji_thermal_start_smc_fan_control}, - {NULL, NULL} -}; - -static const struct phm_master_table_header -fiji_thermal_start_thermal_controller_master = { - 0, - PHM_MasterTableFlag_None, - fiji_thermal_start_thermal_controller_master_list -}; - -static const struct phm_master_table_item -fiji_thermal_set_temperature_range_master_list[] = { - {NULL, tf_fiji_thermal_disable_alert}, - {NULL, tf_fiji_thermal_set_temperature_range}, - {NULL, tf_fiji_thermal_enable_alert}, - {NULL, NULL} -}; - -static const struct phm_master_table_header -fiji_thermal_set_temperature_range_master = { - 0, - PHM_MasterTableFlag_None, - fiji_thermal_set_temperature_range_master_list -}; - -int fiji_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr) -{ - if (!hwmgr->thermal_controller.fanInfo.bNoFan) - fiji_fan_ctrl_set_default_mode(hwmgr); - return 0; -} - -/** -* Initializes the thermal controller related functions in the Hardware Manager structure. -* @param hwmgr The address of the hardware manager. -* @exception Any error code from the low-level communication. -*/ -int pp_fiji_thermal_initialize(struct pp_hwmgr *hwmgr) -{ - int result; - - result = phm_construct_table(hwmgr, - &fiji_thermal_set_temperature_range_master, - &(hwmgr->set_temperature_range)); - - if (!result) { - result = phm_construct_table(hwmgr, - &fiji_thermal_start_thermal_controller_master, - &(hwmgr->start_thermal_controller)); - if (result) - phm_destroy_table(hwmgr, &(hwmgr->set_temperature_range)); - } - - if (!result) - hwmgr->fan_ctrl_is_in_default_mode = true; - return result; -} - diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.h b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.h deleted file mode 100644 index 8621493b8574..000000000000 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.h +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#ifndef FIJI_THERMAL_H -#define FIJI_THERMAL_H - -#include "hwmgr.h" - -#define FIJI_THERMAL_HIGH_ALERT_MASK 0x1 -#define FIJI_THERMAL_LOW_ALERT_MASK 0x2 - -#define FIJI_THERMAL_MINIMUM_TEMP_READING -256 -#define FIJI_THERMAL_MAXIMUM_TEMP_READING 255 - -#define FIJI_THERMAL_MINIMUM_ALERT_TEMP 0 -#define FIJI_THERMAL_MAXIMUM_ALERT_TEMP 255 - -#define FDO_PWM_MODE_STATIC 1 -#define FDO_PWM_MODE_STATIC_RPM 5 - - -extern int tf_fiji_thermal_initialize(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result); -extern int tf_fiji_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result); -extern int tf_fiji_thermal_enable_alert(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result); - -extern int fiji_thermal_get_temperature(struct pp_hwmgr *hwmgr); -extern int fiji_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr); -extern int fiji_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, struct phm_fan_speed_info *fan_speed_info); -extern int fiji_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t *speed); -extern int fiji_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr); -extern int fiji_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode); -extern int fiji_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t speed); -extern int fiji_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr); -extern int pp_fiji_thermal_initialize(struct pp_hwmgr *hwmgr); -extern int fiji_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr); -extern int fiji_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed); -extern int fiji_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed); -extern int fiji_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr); -extern uint32_t tonga_get_xclk(struct pp_hwmgr *hwmgr); - -#endif - diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_clockpowergating.c deleted file mode 100644 index 47949f5cd073..000000000000 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_clockpowergating.c +++ /dev/null @@ -1,119 +0,0 @@ -/* - * Copyright 2016 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Author: Huang Rui - * - */ - -#include "hwmgr.h" -#include "iceland_clockpowergating.h" -#include "ppsmc.h" -#include "iceland_hwmgr.h" - -int iceland_phm_powerdown_uvd(struct pp_hwmgr *hwmgr) -{ - /* iceland does not have MM hardware block */ - return 0; -} - -static int iceland_phm_powerup_uvd(struct pp_hwmgr *hwmgr) -{ - /* iceland does not have MM hardware block */ - return 0; -} - -static int iceland_phm_powerdown_vce(struct pp_hwmgr *hwmgr) -{ - /* iceland does not have MM hardware block */ - return 0; -} - -static int iceland_phm_powerup_vce(struct pp_hwmgr *hwmgr) -{ - /* iceland does not have MM hardware block */ - return 0; -} - -int iceland_phm_set_asic_block_gating(struct pp_hwmgr *hwmgr, enum - PHM_AsicBlock block, enum PHM_ClockGateSetting gating) -{ - int ret = 0; - - switch (block) { - case PHM_AsicBlock_UVD_MVC: - case PHM_AsicBlock_UVD: - case PHM_AsicBlock_UVD_HD: - case PHM_AsicBlock_UVD_SD: - if (gating == PHM_ClockGateSetting_StaticOff) - ret = iceland_phm_powerdown_uvd(hwmgr); - else - ret = iceland_phm_powerup_uvd(hwmgr); - break; - case PHM_AsicBlock_GFX: - default: - break; - } - - return ret; -} - -int iceland_phm_disable_clock_power_gating(struct pp_hwmgr *hwmgr) -{ - struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); - - data->uvd_power_gated = false; - data->vce_power_gated = false; - - iceland_phm_powerup_uvd(hwmgr); - iceland_phm_powerup_vce(hwmgr); - - return 0; -} - -int iceland_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate) -{ - if (bgate) { - iceland_update_uvd_dpm(hwmgr, true); - iceland_phm_powerdown_uvd(hwmgr); - } else { - iceland_phm_powerup_uvd(hwmgr); - iceland_update_uvd_dpm(hwmgr, false); - } - - return 0; -} - -int iceland_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate) -{ - if (bgate) - return iceland_phm_powerdown_vce(hwmgr); - else - return iceland_phm_powerup_vce(hwmgr); - - return 0; -} - -int iceland_phm_update_clock_gatings(struct pp_hwmgr *hwmgr, - const uint32_t *msg_id) -{ - /* iceland does not have MM hardware block */ - return 0; -} diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_clockpowergating.h b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_clockpowergating.h deleted file mode 100644 index ff5ef00c7c68..000000000000 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_clockpowergating.h +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright 2016 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Author: Huang Rui - * - */ - -#ifndef _ICELAND_CLOCK_POWER_GATING_H_ -#define _ICELAND_CLOCK_POWER_GATING_H_ - -#include "iceland_hwmgr.h" -#include "pp_asicblocks.h" - -extern int iceland_phm_set_asic_block_gating(struct pp_hwmgr *hwmgr, enum PHM_AsicBlock block, enum PHM_ClockGateSetting gating); -extern int iceland_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate); -extern int iceland_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate); -extern int iceland_phm_powerdown_uvd(struct pp_hwmgr *hwmgr); -extern int iceland_phm_disable_clock_power_gating(struct pp_hwmgr *hwmgr); -extern int iceland_phm_update_clock_gatings(struct pp_hwmgr *hwmgr, const uint32_t *msg_id); -#endif /* _ICELAND_CLOCK_POWER_GATING_H_ */ diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_dyn_defaults.h b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_dyn_defaults.h deleted file mode 100644 index a7b4bc6caea2..000000000000 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_dyn_defaults.h +++ /dev/null @@ -1,41 +0,0 @@ -#ifndef ICELAND_DYN_DEFAULTS_H -#define ICELAND_DYN_DEFAULTS_H - -enum ICELANDdpm_TrendDetection -{ - ICELANDdpm_TrendDetection_AUTO, - ICELANDdpm_TrendDetection_UP, - ICELANDdpm_TrendDetection_DOWN -}; -typedef enum ICELANDdpm_TrendDetection ICELANDdpm_TrendDetection; - - -#define PPICELAND_VOTINGRIGHTSCLIENTS_DFLT0 0x3FFFC102 -#define PPICELAND_VOTINGRIGHTSCLIENTS_DFLT1 0x000400 -#define PPICELAND_VOTINGRIGHTSCLIENTS_DFLT2 0xC00080 -#define PPICELAND_VOTINGRIGHTSCLIENTS_DFLT3 0xC00200 -#define PPICELAND_VOTINGRIGHTSCLIENTS_DFLT4 0xC01680 -#define PPICELAND_VOTINGRIGHTSCLIENTS_DFLT5 0xC00033 -#define PPICELAND_VOTINGRIGHTSCLIENTS_DFLT6 0xC00033 -#define PPICELAND_VOTINGRIGHTSCLIENTS_DFLT7 0x3FFFC000 - - -#define PPICELAND_THERMALPROTECTCOUNTER_DFLT 0x200 - -#define PPICELAND_STATICSCREENTHRESHOLDUNIT_DFLT 0 - -#define PPICELAND_STATICSCREENTHRESHOLD_DFLT 0x00C8 - -#define PPICELAND_GFXIDLECLOCKSTOPTHRESHOLD_DFLT 0x200 - -#define PPICELAND_REFERENCEDIVIDER_DFLT 4 - -#define PPICELAND_ULVVOLTAGECHANGEDELAY_DFLT 1687 - -#define PPICELAND_CGULVPARAMETER_DFLT 0x00040035 -#define PPICELAND_CGULVCONTROL_DFLT 0x00007450 -#define PPICELAND_TARGETACTIVITY_DFLT 30 -#define PPICELAND_MCLK_TARGETACTIVITY_DFLT 10 - -#endif - diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c deleted file mode 100644 index 50aa23f15540..000000000000 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c +++ /dev/null @@ -1,5666 +0,0 @@ -/* - * Copyright 2016 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Author: Huang Rui - * - */ -#include -#include -#include -#include "linux/delay.h" -#include "pp_acpi.h" -#include "hwmgr.h" -#include -#include "iceland_hwmgr.h" -#include "pptable.h" -#include "processpptables.h" -#include "pp_debug.h" -#include "ppsmc.h" -#include "cgs_common.h" -#include "pppcielanes.h" -#include "iceland_dyn_defaults.h" -#include "smumgr.h" -#include "iceland_smumgr.h" -#include "iceland_clockpowergating.h" -#include "iceland_thermal.h" -#include "iceland_powertune.h" - -#include "gmc/gmc_8_1_d.h" -#include "gmc/gmc_8_1_sh_mask.h" - -#include "bif/bif_5_0_d.h" -#include "bif/bif_5_0_sh_mask.h" - -#include "smu/smu_7_1_1_d.h" -#include "smu/smu_7_1_1_sh_mask.h" - -#include "cgs_linux.h" -#include "eventmgr.h" -#include "amd_pcie_helpers.h" - -#define MC_CG_ARB_FREQ_F0 0x0a -#define MC_CG_ARB_FREQ_F1 0x0b -#define MC_CG_ARB_FREQ_F2 0x0c -#define MC_CG_ARB_FREQ_F3 0x0d - -#define MC_CG_SEQ_DRAMCONF_S0 0x05 -#define MC_CG_SEQ_DRAMCONF_S1 0x06 -#define MC_CG_SEQ_YCLK_SUSPEND 0x04 -#define MC_CG_SEQ_YCLK_RESUME 0x0a - -#define PCIE_BUS_CLK 10000 -#define TCLK (PCIE_BUS_CLK / 10) - -#define SMC_RAM_END 0x40000 -#define SMC_CG_IND_START 0xc0030000 -#define SMC_CG_IND_END 0xc0040000 /* First byte after SMC_CG_IND*/ - -#define VOLTAGE_SCALE 4 -#define VOLTAGE_VID_OFFSET_SCALE1 625 -#define VOLTAGE_VID_OFFSET_SCALE2 100 - -const uint32_t iceland_magic = (uint32_t)(PHM_VIslands_Magic); - -#define MC_SEQ_MISC0_GDDR5_SHIFT 28 -#define MC_SEQ_MISC0_GDDR5_MASK 0xf0000000 -#define MC_SEQ_MISC0_GDDR5_VALUE 5 - -/** Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */ -enum DPM_EVENT_SRC { - DPM_EVENT_SRC_ANALOG = 0, /* Internal analog trip point */ - DPM_EVENT_SRC_EXTERNAL = 1, /* External (GPIO 17) signal */ - DPM_EVENT_SRC_DIGITAL = 2, /* Internal digital trip point (DIG_THERM_DPM) */ - DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3, /* Internal analog or external */ - DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL = 4 /* Internal digital or external */ -}; - -static int iceland_read_clock_registers(struct pp_hwmgr *hwmgr) -{ - iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); - - data->clock_registers.vCG_SPLL_FUNC_CNTL = - cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL); - data->clock_registers.vCG_SPLL_FUNC_CNTL_2 = - cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_2); - data->clock_registers.vCG_SPLL_FUNC_CNTL_3 = - cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_3); - data->clock_registers.vCG_SPLL_FUNC_CNTL_4 = - cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_4); - data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM = - cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM); - data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2 = - cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM_2); - data->clock_registers.vDLL_CNTL = - cgs_read_register(hwmgr->device, mmDLL_CNTL); - data->clock_registers.vMCLK_PWRMGT_CNTL = - cgs_read_register(hwmgr->device, mmMCLK_PWRMGT_CNTL); - data->clock_registers.vMPLL_AD_FUNC_CNTL = - cgs_read_register(hwmgr->device, mmMPLL_AD_FUNC_CNTL); - data->clock_registers.vMPLL_DQ_FUNC_CNTL = - cgs_read_register(hwmgr->device, mmMPLL_DQ_FUNC_CNTL); - data->clock_registers.vMPLL_FUNC_CNTL = - cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL); - data->clock_registers.vMPLL_FUNC_CNTL_1 = - cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_1); - data->clock_registers.vMPLL_FUNC_CNTL_2 = - cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_2); - data->clock_registers.vMPLL_SS1 = - cgs_read_register(hwmgr->device, mmMPLL_SS1); - data->clock_registers.vMPLL_SS2 = - cgs_read_register(hwmgr->device, mmMPLL_SS2); - - return 0; -} - -/** - * Find out if memory is GDDR5. - * - * @param hwmgr the address of the powerplay hardware manager. - * @return always 0 - */ -int iceland_get_memory_type(struct pp_hwmgr *hwmgr) -{ - iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); - uint32_t temp; - - temp = cgs_read_register(hwmgr->device, mmMC_SEQ_MISC0); - - data->is_memory_GDDR5 = (MC_SEQ_MISC0_GDDR5_VALUE == - ((temp & MC_SEQ_MISC0_GDDR5_MASK) >> - MC_SEQ_MISC0_GDDR5_SHIFT)); - - return 0; -} - -int iceland_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate) -{ - /* iceland does not have MM hardware blocks */ - return 0; -} - -/** - * Enables Dynamic Power Management by SMC - * - * @param hwmgr the address of the powerplay hardware manager. - * @return always 0 - */ -int iceland_enable_acpi_power_management(struct pp_hwmgr *hwmgr) -{ - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, STATIC_PM_EN, 1); - - return 0; -} - -/** - * Find the MC microcode version and store it in the HwMgr struct - * - * @param hwmgr the address of the powerplay hardware manager. - * @return always 0 - */ -int iceland_get_mc_microcode_version(struct pp_hwmgr *hwmgr) -{ - cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX, 0x9F); - - hwmgr->microcode_version_info.MC = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA); - - return 0; -} - -static int iceland_init_sclk_threshold(struct pp_hwmgr *hwmgr) -{ - iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); - - data->low_sclk_interrupt_threshold = 0; - - return 0; -} - - -static int iceland_setup_asic_task(struct pp_hwmgr *hwmgr) -{ - int tmp_result, result = 0; - - tmp_result = iceland_read_clock_registers(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to read clock registers!", result = tmp_result); - - tmp_result = iceland_get_memory_type(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to get memory type!", result = tmp_result); - - tmp_result = iceland_enable_acpi_power_management(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to enable ACPI power management!", result = tmp_result); - - tmp_result = iceland_get_mc_microcode_version(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to get MC microcode version!", result = tmp_result); - - tmp_result = iceland_init_sclk_threshold(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to init sclk threshold!", result = tmp_result); - - return result; -} - -static bool cf_iceland_voltage_control(struct pp_hwmgr *hwmgr) -{ - struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); - - return ICELAND_VOLTAGE_CONTROL_NONE != data->voltage_control; -} - -/* - * -------------- Voltage Tables ---------------------- - * If the voltage table would be bigger than what will fit into the - * state table on the SMC keep only the higher entries. - */ - -static void iceland_trim_voltage_table_to_fit_state_table( - struct pp_hwmgr *hwmgr, - uint32_t max_voltage_steps, - pp_atomctrl_voltage_table *voltage_table) -{ - unsigned int i, diff; - - if (voltage_table->count <= max_voltage_steps) { - return; - } - - diff = voltage_table->count - max_voltage_steps; - - for (i = 0; i < max_voltage_steps; i++) { - voltage_table->entries[i] = voltage_table->entries[i + diff]; - } - - voltage_table->count = max_voltage_steps; - - return; -} - -/** - * Enable voltage control - * - * @param hwmgr the address of the powerplay hardware manager. - * @return always 0 - */ -int iceland_enable_voltage_control(struct pp_hwmgr *hwmgr) -{ - /* enable voltage control */ - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, VOLT_PWRMGT_EN, 1); - - return 0; -} - -static int iceland_get_svi2_voltage_table(struct pp_hwmgr *hwmgr, - struct phm_clock_voltage_dependency_table *voltage_dependency_table, - pp_atomctrl_voltage_table *voltage_table) -{ - uint32_t i; - - PP_ASSERT_WITH_CODE((NULL != voltage_table), - "Voltage Dependency Table empty.", return -EINVAL;); - - voltage_table->mask_low = 0; - voltage_table->phase_delay = 0; - voltage_table->count = voltage_dependency_table->count; - - for (i = 0; i < voltage_dependency_table->count; i++) { - voltage_table->entries[i].value = - voltage_dependency_table->entries[i].v; - voltage_table->entries[i].smio_low = 0; - } - - return 0; -} - -/** - * Create Voltage Tables. - * - * @param hwmgr the address of the powerplay hardware manager. - * @return always 0 - */ -int iceland_construct_voltage_tables(struct pp_hwmgr *hwmgr) -{ - iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); - int result; - - /* GPIO voltage */ - if (ICELAND_VOLTAGE_CONTROL_BY_GPIO == data->voltage_control) { - result = atomctrl_get_voltage_table_v3(hwmgr, - VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT, - &data->vddc_voltage_table); - PP_ASSERT_WITH_CODE((0 == result), - "Failed to retrieve VDDC table.", return result;); - } else if (ICELAND_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) { - /* SVI2 VDDC voltage */ - result = iceland_get_svi2_voltage_table(hwmgr, - hwmgr->dyn_state.vddc_dependency_on_mclk, - &data->vddc_voltage_table); - PP_ASSERT_WITH_CODE((0 == result), - "Failed to retrieve SVI2 VDDC table from dependancy table.", return result;); - } - - PP_ASSERT_WITH_CODE( - (data->vddc_voltage_table.count <= (SMU71_MAX_LEVELS_VDDC)), - "Too many voltage values for VDDC. Trimming to fit state table.", - iceland_trim_voltage_table_to_fit_state_table(hwmgr, - SMU71_MAX_LEVELS_VDDC, &(data->vddc_voltage_table)); - ); - - /* GPIO */ - if (ICELAND_VOLTAGE_CONTROL_BY_GPIO == data->vdd_ci_control) { - result = atomctrl_get_voltage_table_v3(hwmgr, - VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT, &(data->vddci_voltage_table)); - PP_ASSERT_WITH_CODE((0 == result), - "Failed to retrieve VDDCI table.", return result;); - } - - /* SVI2 VDDCI voltage */ - if (ICELAND_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_ci_control) { - result = iceland_get_svi2_voltage_table(hwmgr, - hwmgr->dyn_state.vddci_dependency_on_mclk, - &data->vddci_voltage_table); - PP_ASSERT_WITH_CODE((0 == result), - "Failed to retrieve SVI2 VDDCI table from dependancy table.", return result;); - } - - PP_ASSERT_WITH_CODE( - (data->vddci_voltage_table.count <= (SMU71_MAX_LEVELS_VDDCI)), - "Too many voltage values for VDDCI. Trimming to fit state table.", - iceland_trim_voltage_table_to_fit_state_table(hwmgr, - SMU71_MAX_LEVELS_VDDCI, &(data->vddci_voltage_table)); - ); - - - /* GPIO */ - if (ICELAND_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) { - result = atomctrl_get_voltage_table_v3(hwmgr, - VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT, &(data->mvdd_voltage_table)); - PP_ASSERT_WITH_CODE((0 == result), - "Failed to retrieve table.", return result;); - } - - /* SVI2 voltage control */ - if (ICELAND_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) { - result = iceland_get_svi2_voltage_table(hwmgr, - hwmgr->dyn_state.mvdd_dependency_on_mclk, - &data->mvdd_voltage_table); - PP_ASSERT_WITH_CODE((0 == result), - "Failed to retrieve SVI2 MVDD table from dependancy table.", return result;); - } - - PP_ASSERT_WITH_CODE( - (data->mvdd_voltage_table.count <= (SMU71_MAX_LEVELS_MVDD)), - "Too many voltage values for MVDD. Trimming to fit state table.", - iceland_trim_voltage_table_to_fit_state_table(hwmgr, - SMU71_MAX_LEVELS_MVDD, &(data->mvdd_voltage_table)); - ); - - return 0; -} - -/*---------------------------MC----------------------------*/ - -uint8_t iceland_get_memory_module_index(struct pp_hwmgr *hwmgr) -{ - return (uint8_t) (0xFF & (cgs_read_register(hwmgr->device, mmBIOS_SCRATCH_4) >> 16)); -} - -bool iceland_check_s0_mc_reg_index(uint16_t inReg, uint16_t *outReg) -{ - bool result = true; - - switch (inReg) { - case mmMC_SEQ_RAS_TIMING: - *outReg = mmMC_SEQ_RAS_TIMING_LP; - break; - - case mmMC_SEQ_DLL_STBY: - *outReg = mmMC_SEQ_DLL_STBY_LP; - break; - - case mmMC_SEQ_G5PDX_CMD0: - *outReg = mmMC_SEQ_G5PDX_CMD0_LP; - break; - - case mmMC_SEQ_G5PDX_CMD1: - *outReg = mmMC_SEQ_G5PDX_CMD1_LP; - break; - - case mmMC_SEQ_G5PDX_CTRL: - *outReg = mmMC_SEQ_G5PDX_CTRL_LP; - break; - - case mmMC_SEQ_CAS_TIMING: - *outReg = mmMC_SEQ_CAS_TIMING_LP; - break; - - case mmMC_SEQ_MISC_TIMING: - *outReg = mmMC_SEQ_MISC_TIMING_LP; - break; - - case mmMC_SEQ_MISC_TIMING2: - *outReg = mmMC_SEQ_MISC_TIMING2_LP; - break; - - case mmMC_SEQ_PMG_DVS_CMD: - *outReg = mmMC_SEQ_PMG_DVS_CMD_LP; - break; - - case mmMC_SEQ_PMG_DVS_CTL: - *outReg = mmMC_SEQ_PMG_DVS_CTL_LP; - break; - - case mmMC_SEQ_RD_CTL_D0: - *outReg = mmMC_SEQ_RD_CTL_D0_LP; - break; - - case mmMC_SEQ_RD_CTL_D1: - *outReg = mmMC_SEQ_RD_CTL_D1_LP; - break; - - case mmMC_SEQ_WR_CTL_D0: - *outReg = mmMC_SEQ_WR_CTL_D0_LP; - break; - - case mmMC_SEQ_WR_CTL_D1: - *outReg = mmMC_SEQ_WR_CTL_D1_LP; - break; - - case mmMC_PMG_CMD_EMRS: - *outReg = mmMC_SEQ_PMG_CMD_EMRS_LP; - break; - - case mmMC_PMG_CMD_MRS: - *outReg = mmMC_SEQ_PMG_CMD_MRS_LP; - break; - - case mmMC_PMG_CMD_MRS1: - *outReg = mmMC_SEQ_PMG_CMD_MRS1_LP; - break; - - case mmMC_SEQ_PMG_TIMING: - *outReg = mmMC_SEQ_PMG_TIMING_LP; - break; - - case mmMC_PMG_CMD_MRS2: - *outReg = mmMC_SEQ_PMG_CMD_MRS2_LP; - break; - - case mmMC_SEQ_WR_CTL_2: - *outReg = mmMC_SEQ_WR_CTL_2_LP; - break; - - default: - result = false; - break; - } - - return result; -} - -int iceland_set_s0_mc_reg_index(phw_iceland_mc_reg_table *table) -{ - uint32_t i; - uint16_t address; - - for (i = 0; i < table->last; i++) { - table->mc_reg_address[i].s0 = - iceland_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) - ? address : table->mc_reg_address[i].s1; - } - return 0; -} - -int iceland_copy_vbios_smc_reg_table(const pp_atomctrl_mc_reg_table *table, phw_iceland_mc_reg_table *ni_table) -{ - uint8_t i, j; - - PP_ASSERT_WITH_CODE((table->last <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE), - "Invalid VramInfo table.", return -1); - PP_ASSERT_WITH_CODE((table->num_entries <= MAX_AC_TIMING_ENTRIES), - "Invalid VramInfo table.", return -1); - - for (i = 0; i < table->last; i++) { - ni_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1; - } - ni_table->last = table->last; - - for (i = 0; i < table->num_entries; i++) { - ni_table->mc_reg_table_entry[i].mclk_max = - table->mc_reg_table_entry[i].mclk_max; - for (j = 0; j < table->last; j++) { - ni_table->mc_reg_table_entry[i].mc_data[j] = - table->mc_reg_table_entry[i].mc_data[j]; - } - } - - ni_table->num_entries = table->num_entries; - - return 0; -} - -/** - * VBIOS omits some information to reduce size, we need to recover them here. - * 1. when we see mmMC_SEQ_MISC1, bit[31:16] EMRS1, need to be write to mmMC_PMG_CMD_EMRS /_LP[15:0]. - * Bit[15:0] MRS, need to be update mmMC_PMG_CMD_MRS/_LP[15:0] - * 2. when we see mmMC_SEQ_RESERVE_M, bit[15:0] EMRS2, need to be write to mmMC_PMG_CMD_MRS1/_LP[15:0]. - * 3. need to set these data for each clock range - * - * @param hwmgr the address of the powerplay hardware manager. - * @param table the address of MCRegTable - * @return always 0 - */ -static int iceland_set_mc_special_registers(struct pp_hwmgr *hwmgr, phw_iceland_mc_reg_table *table) -{ - uint8_t i, j, k; - uint32_t temp_reg; - const iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); - - for (i = 0, j = table->last; i < table->last; i++) { - PP_ASSERT_WITH_CODE((j < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE), - "Invalid VramInfo table.", return -1); - switch (table->mc_reg_address[i].s1) { - /* - * mmMC_SEQ_MISC1, bit[31:16] EMRS1, need to be write - * to mmMC_PMG_CMD_EMRS/_LP[15:0]. Bit[15:0] MRS, need - * to be update mmMC_PMG_CMD_MRS/_LP[15:0] - */ - case mmMC_SEQ_MISC1: - temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS); - table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS; - table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP; - for (k = 0; k < table->num_entries; k++) { - table->mc_reg_table_entry[k].mc_data[j] = - ((temp_reg & 0xffff0000)) | - ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16); - } - j++; - PP_ASSERT_WITH_CODE((j < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE), - "Invalid VramInfo table.", return -1); - - temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS); - table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS; - table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP; - for (k = 0; k < table->num_entries; k++) { - table->mc_reg_table_entry[k].mc_data[j] = - (temp_reg & 0xffff0000) | - (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); - - if (!data->is_memory_GDDR5) { - table->mc_reg_table_entry[k].mc_data[j] |= 0x100; - } - } - j++; - PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE), - "Invalid VramInfo table.", return -1); - - if (!data->is_memory_GDDR5) { - table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD; - table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD; - for (k = 0; k < table->num_entries; k++) { - table->mc_reg_table_entry[k].mc_data[j] = - (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16; - } - j++; - PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE), - "Invalid VramInfo table.", return -1); - } - - break; - - case mmMC_SEQ_RESERVE_M: - temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1); - table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1; - table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP; - for (k = 0; k < table->num_entries; k++) { - table->mc_reg_table_entry[k].mc_data[j] = - (temp_reg & 0xffff0000) | - (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); - } - j++; - PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE), - "Invalid VramInfo table.", return -1); - break; - - default: - break; - } - - } - - table->last = j; - - return 0; -} - - -static int iceland_set_valid_flag(phw_iceland_mc_reg_table *table) -{ - uint8_t i, j; - for (i = 0; i < table->last; i++) { - for (j = 1; j < table->num_entries; j++) { - if (table->mc_reg_table_entry[j-1].mc_data[i] != - table->mc_reg_table_entry[j].mc_data[i]) { - table->validflag |= (1<backend); - pp_atomctrl_mc_reg_table *table; - phw_iceland_mc_reg_table *ni_table = &data->iceland_mc_reg_table; - uint8_t module_index = iceland_get_memory_module_index(hwmgr); - - table = kzalloc(sizeof(pp_atomctrl_mc_reg_table), GFP_KERNEL); - - if (NULL == table) - return -ENOMEM; - - /* Program additional LP registers that are no longer programmed by VBIOS */ - cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING)); - cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING)); - cgs_write_register(hwmgr->device, mmMC_SEQ_DLL_STBY_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_DLL_STBY)); - cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0)); - cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1)); - cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL)); - cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD)); - cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL)); - cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING)); - cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2)); - cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_EMRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS)); - cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS)); - cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS1_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1)); - cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0)); - cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1)); - cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0)); - cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1)); - cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING)); - cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS2_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS2)); - cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_2)); - - memset(table, 0x00, sizeof(pp_atomctrl_mc_reg_table)); - - result = atomctrl_initialize_mc_reg_table(hwmgr, module_index, table); - - if (0 == result) - result = iceland_copy_vbios_smc_reg_table(table, ni_table); - - if (0 == result) { - iceland_set_s0_mc_reg_index(ni_table); - result = iceland_set_mc_special_registers(hwmgr, ni_table); - } - - if (0 == result) - iceland_set_valid_flag(ni_table); - - kfree(table); - return result; -} - -/** - * Programs static screed detection parameters - * - * @param hwmgr the address of the powerplay hardware manager. - * @return always 0 - */ -int iceland_program_static_screen_threshold_parameters(struct pp_hwmgr *hwmgr) -{ - iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); - - /* Set static screen threshold unit*/ - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, - CGS_IND_REG__SMC, CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD_UNIT, - data->static_screen_threshold_unit); - /* Set static screen threshold*/ - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, - CGS_IND_REG__SMC, CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD, - data->static_screen_threshold); - - return 0; -} - -/** - * Setup display gap for glitch free memory clock switching. - * - * @param hwmgr the address of the powerplay hardware manager. - * @return always 0 - */ -int iceland_enable_display_gap(struct pp_hwmgr *hwmgr) -{ - uint32_t display_gap = cgs_read_ind_register(hwmgr->device, - CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL); - - display_gap = PHM_SET_FIELD(display_gap, - CG_DISPLAY_GAP_CNTL, DISP_GAP, DISPLAY_GAP_IGNORE); - - display_gap = PHM_SET_FIELD(display_gap, - CG_DISPLAY_GAP_CNTL, DISP_GAP_MCHG, DISPLAY_GAP_VBLANK); - - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_DISPLAY_GAP_CNTL, display_gap); - - return 0; -} - -/** - * Programs activity state transition voting clients - * - * @param hwmgr the address of the powerplay hardware manager. - * @return always 0 - */ -int iceland_program_voting_clients(struct pp_hwmgr *hwmgr) -{ - iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); - - /* Clear reset for voting clients before enabling DPM */ - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 0); - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 0); - - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_0, data->voting_rights_clients0); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_1, data->voting_rights_clients1); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_2, data->voting_rights_clients2); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_3, data->voting_rights_clients3); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_4, data->voting_rights_clients4); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_5, data->voting_rights_clients5); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_6, data->voting_rights_clients6); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_7, data->voting_rights_clients7); - - return 0; -} - -static int iceland_upload_firmware(struct pp_hwmgr *hwmgr) -{ - return 0; -} - -/** - * Get the location of various tables inside the FW image. - * - * @param hwmgr the address of the powerplay hardware manager. - * @return always 0 - */ -static int iceland_process_firmware_header(struct pp_hwmgr *hwmgr) -{ - iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); - - uint32_t tmp; - int result; - bool error = 0; - - result = smu7_read_smc_sram_dword(hwmgr->smumgr, - SMU71_FIRMWARE_HEADER_LOCATION + - offsetof(SMU71_Firmware_Header, DpmTable), - &tmp, data->sram_end); - - if (0 == result) { - data->dpm_table_start = tmp; - } - - error |= (0 != result); - - result = smu7_read_smc_sram_dword(hwmgr->smumgr, - SMU71_FIRMWARE_HEADER_LOCATION + - offsetof(SMU71_Firmware_Header, SoftRegisters), - &tmp, data->sram_end); - - if (0 == result) { - data->soft_regs_start = tmp; - } - - error |= (0 != result); - - - result = smu7_read_smc_sram_dword(hwmgr->smumgr, - SMU71_FIRMWARE_HEADER_LOCATION + - offsetof(SMU71_Firmware_Header, mcRegisterTable), - &tmp, data->sram_end); - - if (0 == result) { - data->mc_reg_table_start = tmp; - } - - result = smu7_read_smc_sram_dword(hwmgr->smumgr, - SMU71_FIRMWARE_HEADER_LOCATION + - offsetof(SMU71_Firmware_Header, FanTable), - &tmp, data->sram_end); - - if (0 == result) { - data->fan_table_start = tmp; - } - - error |= (0 != result); - - result = smu7_read_smc_sram_dword(hwmgr->smumgr, - SMU71_FIRMWARE_HEADER_LOCATION + - offsetof(SMU71_Firmware_Header, mcArbDramTimingTable), - &tmp, data->sram_end); - - if (0 == result) { - data->arb_table_start = tmp; - } - - error |= (0 != result); - - - result = smu7_read_smc_sram_dword(hwmgr->smumgr, - SMU71_FIRMWARE_HEADER_LOCATION + - offsetof(SMU71_Firmware_Header, Version), - &tmp, data->sram_end); - - if (0 == result) { - hwmgr->microcode_version_info.SMC = tmp; - } - - error |= (0 != result); - - result = smu7_read_smc_sram_dword(hwmgr->smumgr, - SMU71_FIRMWARE_HEADER_LOCATION + - offsetof(SMU71_Firmware_Header, UlvSettings), - &tmp, data->sram_end); - - if (0 == result) { - data->ulv_settings_start = tmp; - } - - error |= (0 != result); - - return error ? 1 : 0; -} - -/* -* Copy one arb setting to another and then switch the active set. -* arbFreqSrc and arbFreqDest is one of the MC_CG_ARB_FREQ_Fx constants. -*/ -int iceland_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr, - uint32_t arbFreqSrc, uint32_t arbFreqDest) -{ - uint32_t mc_arb_dram_timing; - uint32_t mc_arb_dram_timing2; - uint32_t burst_time; - uint32_t mc_cg_config; - - switch (arbFreqSrc) { - case MC_CG_ARB_FREQ_F0: - mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING); - mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2); - burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0); - break; - - case MC_CG_ARB_FREQ_F1: - mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1); - mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1); - burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1); - break; - - default: - return -1; - } - - switch (arbFreqDest) { - case MC_CG_ARB_FREQ_F0: - cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing); - cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2); - PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0, burst_time); - break; - - case MC_CG_ARB_FREQ_F1: - cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing); - cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2); - PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1, burst_time); - break; - - default: - return -1; - } - - mc_cg_config = cgs_read_register(hwmgr->device, mmMC_CG_CONFIG); - mc_cg_config |= 0x0000000F; - cgs_write_register(hwmgr->device, mmMC_CG_CONFIG, mc_cg_config); - PHM_WRITE_FIELD(hwmgr->device, MC_ARB_CG, CG_ARB_REQ, arbFreqDest); - - return 0; -} - -/** - * Initial switch from ARB F0->F1 - * - * @param hwmgr the address of the powerplay hardware manager. - * @return always 0 - * This function is to be called from the SetPowerState table. - */ -int iceland_initial_switch_from_arb_f0_to_f1(struct pp_hwmgr *hwmgr) -{ - return iceland_copy_and_switch_arb_sets(hwmgr, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1); -} - -/* ---------------------------------------- ULV related functions ----------------------------------------------------*/ - - -static int iceland_reset_single_dpm_table( - struct pp_hwmgr *hwmgr, - struct iceland_single_dpm_table *dpm_table, - uint32_t count) -{ - uint32_t i; - if (!(count <= MAX_REGULAR_DPM_NUMBER)) - printk(KERN_ERR "[ powerplay ] Fatal error, can not set up single DPM \ - table entries to exceed max number! \n"); - - dpm_table->count = count; - for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++) { - dpm_table->dpm_levels[i].enabled = 0; - } - - return 0; -} - -static void iceland_setup_pcie_table_entry( - struct iceland_single_dpm_table *dpm_table, - uint32_t index, uint32_t pcie_gen, - uint32_t pcie_lanes) -{ - dpm_table->dpm_levels[index].value = pcie_gen; - dpm_table->dpm_levels[index].param1 = pcie_lanes; - dpm_table->dpm_levels[index].enabled = 1; -} - -/* - * Set up the PCIe DPM table as follows: - * - * A = Performance State, Max, Gen Speed - * C = Performance State, Min, Gen Speed - * 1 = Performance State, Max, Lane # - * 3 = Performance State, Min, Lane # - * - * B = Power Saving State, Max, Gen Speed - * D = Power Saving State, Min, Gen Speed - * 2 = Power Saving State, Max, Lane # - * 4 = Power Saving State, Min, Lane # - * - * - * DPM Index Gen Speed Lane # - * 5 A 1 - * 4 B 2 - * 3 C 1 - * 2 D 2 - * 1 C 3 - * 0 D 4 - * - */ -static int iceland_setup_default_pcie_tables(struct pp_hwmgr *hwmgr) -{ - iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); - - PP_ASSERT_WITH_CODE((data->use_pcie_performance_levels || - data->use_pcie_power_saving_levels), - "No pcie performance levels!", return -EINVAL); - - if (data->use_pcie_performance_levels && !data->use_pcie_power_saving_levels) { - data->pcie_gen_power_saving = data->pcie_gen_performance; - data->pcie_lane_power_saving = data->pcie_lane_performance; - } else if (!data->use_pcie_performance_levels && data->use_pcie_power_saving_levels) { - data->pcie_gen_performance = data->pcie_gen_power_saving; - data->pcie_lane_performance = data->pcie_lane_power_saving; - } - - iceland_reset_single_dpm_table(hwmgr, &data->dpm_table.pcie_speed_table, SMU71_MAX_LEVELS_LINK); - - /* Hardcode Pcie Table */ - iceland_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 0, - get_pcie_gen_support(data->pcie_gen_cap, PP_Min_PCIEGen), - get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane)); - iceland_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 1, - get_pcie_gen_support(data->pcie_gen_cap, PP_Min_PCIEGen), - get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane)); - iceland_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 2, - get_pcie_gen_support(data->pcie_gen_cap, PP_Max_PCIEGen), - get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane)); - iceland_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 3, - get_pcie_gen_support(data->pcie_gen_cap, PP_Max_PCIEGen), - get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane)); - iceland_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 4, - get_pcie_gen_support(data->pcie_gen_cap, PP_Max_PCIEGen), - get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane)); - iceland_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 5, - get_pcie_gen_support(data->pcie_gen_cap, PP_Max_PCIEGen), - get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane)); - data->dpm_table.pcie_speed_table.count = 6; - - return 0; - -} - - -/* - * This function is to initalize all DPM state tables for SMU7 based on the dependency table. - * Dynamic state patching function will then trim these state tables to the allowed range based - * on the power policy or external client requests, such as UVD request, etc. - */ -static int iceland_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) -{ - iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); - uint32_t i; - - struct phm_clock_voltage_dependency_table *allowed_vdd_sclk_table = - hwmgr->dyn_state.vddc_dependency_on_sclk; - struct phm_clock_voltage_dependency_table *allowed_vdd_mclk_table = - hwmgr->dyn_state.vddc_dependency_on_mclk; - struct phm_cac_leakage_table *std_voltage_table = - hwmgr->dyn_state.cac_leakage_table; - - PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table != NULL, - "SCLK dependency table is missing. This table is mandatory", return -1); - PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table->count >= 1, - "SCLK dependency table has to have is missing. This table is mandatory", return -1); - - PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL, - "MCLK dependency table is missing. This table is mandatory", return -1); - PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table->count >= 1, - "VMCLK dependency table has to have is missing. This table is mandatory", return -1); - - /* clear the state table to reset everything to default */ - memset(&(data->dpm_table), 0x00, sizeof(data->dpm_table)); - iceland_reset_single_dpm_table(hwmgr, &data->dpm_table.sclk_table, SMU71_MAX_LEVELS_GRAPHICS); - iceland_reset_single_dpm_table(hwmgr, &data->dpm_table.mclk_table, SMU71_MAX_LEVELS_MEMORY); - iceland_reset_single_dpm_table(hwmgr, &data->dpm_table.vddc_table, SMU71_MAX_LEVELS_VDDC); - iceland_reset_single_dpm_table(hwmgr, &data->dpm_table.vdd_ci_table, SMU71_MAX_LEVELS_VDDCI); - iceland_reset_single_dpm_table(hwmgr, &data->dpm_table.mvdd_table, SMU71_MAX_LEVELS_MVDD); - - PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table != NULL, - "SCLK dependency table is missing. This table is mandatory", return -1); - /* Initialize Sclk DPM table based on allow Sclk values*/ - data->dpm_table.sclk_table.count = 0; - - for (i = 0; i < allowed_vdd_sclk_table->count; i++) { - if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count-1].value != - allowed_vdd_sclk_table->entries[i].clk) { - data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value = - allowed_vdd_sclk_table->entries[i].clk; - data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = 1; /*(i==0) ? 1 : 0; to do */ - data->dpm_table.sclk_table.count++; - } - } - - PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL, - "MCLK dependency table is missing. This table is mandatory", return -1); - /* Initialize Mclk DPM table based on allow Mclk values */ - data->dpm_table.mclk_table.count = 0; - for (i = 0; i < allowed_vdd_mclk_table->count; i++) { - if (i == 0 || data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count-1].value != - allowed_vdd_mclk_table->entries[i].clk) { - data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value = - allowed_vdd_mclk_table->entries[i].clk; - data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled = 1; /*(i==0) ? 1 : 0; */ - data->dpm_table.mclk_table.count++; - } - } - - /* Initialize Vddc DPM table based on allow Vddc values. And populate corresponding std values. */ - for (i = 0; i < allowed_vdd_sclk_table->count; i++) { - data->dpm_table.vddc_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v; - data->dpm_table.vddc_table.dpm_levels[i].param1 = std_voltage_table->entries[i].Leakage; - /* param1 is for corresponding std voltage */ - data->dpm_table.vddc_table.dpm_levels[i].enabled = 1; - } - - data->dpm_table.vddc_table.count = allowed_vdd_sclk_table->count; - allowed_vdd_mclk_table = hwmgr->dyn_state.vddci_dependency_on_mclk; - - if (NULL != allowed_vdd_mclk_table) { - /* Initialize Vddci DPM table based on allow Mclk values */ - for (i = 0; i < allowed_vdd_mclk_table->count; i++) { - data->dpm_table.vdd_ci_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v; - data->dpm_table.vdd_ci_table.dpm_levels[i].enabled = 1; - } - data->dpm_table.vdd_ci_table.count = allowed_vdd_mclk_table->count; - } - - allowed_vdd_mclk_table = hwmgr->dyn_state.mvdd_dependency_on_mclk; - - if (NULL != allowed_vdd_mclk_table) { - /* - * Initialize MVDD DPM table based on allow Mclk - * values - */ - for (i = 0; i < allowed_vdd_mclk_table->count; i++) { - data->dpm_table.mvdd_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v; - data->dpm_table.mvdd_table.dpm_levels[i].enabled = 1; - } - data->dpm_table.mvdd_table.count = allowed_vdd_mclk_table->count; - } - - /* setup PCIE gen speed levels*/ - iceland_setup_default_pcie_tables(hwmgr); - - /* save a copy of the default DPM table*/ - memcpy(&(data->golden_dpm_table), &(data->dpm_table), sizeof(struct iceland_dpm_table)); - - return 0; -} - -/** - * @brief PhwIceland_GetVoltageOrder - * Returns index of requested voltage record in lookup(table) - * @param hwmgr - pointer to hardware manager - * @param lookutab - lookup list to search in - * @param voltage - voltage to look for - * @return 0 on success - */ -uint8_t iceland_get_voltage_index(phm_ppt_v1_voltage_lookup_table *look_up_table, - uint16_t voltage) -{ - uint8_t count = (uint8_t) (look_up_table->count); - uint8_t i; - - PP_ASSERT_WITH_CODE((NULL != look_up_table), "Lookup Table empty.", return 0;); - PP_ASSERT_WITH_CODE((0 != count), "Lookup Table empty.", return 0;); - - for (i = 0; i < count; i++) { - /* find first voltage equal or bigger than requested */ - if (look_up_table->entries[i].us_vdd >= voltage) - return i; - } - - /* voltage is bigger than max voltage in the table */ - return i-1; -} - - -static int iceland_get_std_voltage_value_sidd(struct pp_hwmgr *hwmgr, - pp_atomctrl_voltage_table_entry *tab, uint16_t *hi, - uint16_t *lo) -{ - uint16_t v_index; - bool vol_found = false; - *hi = tab->value * VOLTAGE_SCALE; - *lo = tab->value * VOLTAGE_SCALE; - - /* SCLK/VDDC Dependency Table has to exist. */ - PP_ASSERT_WITH_CODE(NULL != hwmgr->dyn_state.vddc_dependency_on_sclk, - "The SCLK/VDDC Dependency Table does not exist.\n", - return -EINVAL); - - if (NULL == hwmgr->dyn_state.cac_leakage_table) { - pr_warning("CAC Leakage Table does not exist, using vddc.\n"); - return 0; - } - - /* - * Since voltage in the sclk/vddc dependency table is not - * necessarily in ascending order because of ELB voltage - * patching, loop through entire list to find exact voltage. - */ - for (v_index = 0; (uint32_t)v_index < hwmgr->dyn_state.vddc_dependency_on_sclk->count; v_index++) { - if (tab->value == hwmgr->dyn_state.vddc_dependency_on_sclk->entries[v_index].v) { - vol_found = true; - if ((uint32_t)v_index < hwmgr->dyn_state.cac_leakage_table->count) { - *lo = hwmgr->dyn_state.cac_leakage_table->entries[v_index].Vddc * VOLTAGE_SCALE; - *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[v_index].Leakage * VOLTAGE_SCALE); - } else { - pr_warning("Index from SCLK/VDDC Dependency Table exceeds the CAC Leakage Table index, using maximum index from CAC table.\n"); - *lo = hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Vddc * VOLTAGE_SCALE; - *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Leakage * VOLTAGE_SCALE); - } - break; - } - } - - /* - * If voltage is not found in the first pass, loop again to - * find the best match, equal or higher value. - */ - if (!vol_found) { - for (v_index = 0; (uint32_t)v_index < hwmgr->dyn_state.vddc_dependency_on_sclk->count; v_index++) { - if (tab->value <= hwmgr->dyn_state.vddc_dependency_on_sclk->entries[v_index].v) { - vol_found = true; - if ((uint32_t)v_index < hwmgr->dyn_state.cac_leakage_table->count) { - *lo = hwmgr->dyn_state.cac_leakage_table->entries[v_index].Vddc * VOLTAGE_SCALE; - *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[v_index].Leakage) * VOLTAGE_SCALE; - } else { - pr_warning("Index from SCLK/VDDC Dependency Table exceeds the CAC Leakage Table index in second look up, using maximum index from CAC table."); - *lo = hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Vddc * VOLTAGE_SCALE; - *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Leakage * VOLTAGE_SCALE); - } - break; - } - } - - if (!vol_found) - pr_warning("Unable to get std_vddc from SCLK/VDDC Dependency Table, using vddc.\n"); - } - - return 0; -} - -static int iceland_populate_smc_voltage_table(struct pp_hwmgr *hwmgr, - pp_atomctrl_voltage_table_entry *tab, - SMU71_Discrete_VoltageLevel *smc_voltage_tab) { - int result; - - - result = iceland_get_std_voltage_value_sidd(hwmgr, tab, - &smc_voltage_tab->StdVoltageHiSidd, - &smc_voltage_tab->StdVoltageLoSidd); - if (0 != result) { - smc_voltage_tab->StdVoltageHiSidd = tab->value * VOLTAGE_SCALE; - smc_voltage_tab->StdVoltageLoSidd = tab->value * VOLTAGE_SCALE; - } - - smc_voltage_tab->Voltage = PP_HOST_TO_SMC_US(tab->value * VOLTAGE_SCALE); - CONVERT_FROM_HOST_TO_SMC_US(smc_voltage_tab->StdVoltageHiSidd); - CONVERT_FROM_HOST_TO_SMC_US(smc_voltage_tab->StdVoltageHiSidd); - - return 0; -} - -/** - * Vddc table preparation for SMC. - * - * @param hwmgr the address of the hardware manager - * @param table the SMC DPM table structure to be populated - * @return always 0 - */ -static int iceland_populate_smc_vddc_table(struct pp_hwmgr *hwmgr, - SMU71_Discrete_DpmTable *table) -{ - unsigned int count; - int result; - - iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); - - table->VddcLevelCount = data->vddc_voltage_table.count; - for (count = 0; count < table->VddcLevelCount; count++) { - result = iceland_populate_smc_voltage_table(hwmgr, - &data->vddc_voltage_table.entries[count], - &table->VddcLevel[count]); - PP_ASSERT_WITH_CODE(0 == result, "do not populate SMC VDDC voltage table", return -EINVAL); - - /* GPIO voltage control */ - if (ICELAND_VOLTAGE_CONTROL_BY_GPIO == data->voltage_control) - table->VddcLevel[count].Smio |= data->vddc_voltage_table.entries[count].smio_low; - else if (ICELAND_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) - table->VddcLevel[count].Smio = 0; - } - - CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount); - - return 0; -} - -/** - * Vddci table preparation for SMC. - * - * @param *hwmgr The address of the hardware manager. - * @param *table The SMC DPM table structure to be populated. - * @return 0 - */ -static int iceland_populate_smc_vdd_ci_table(struct pp_hwmgr *hwmgr, - SMU71_Discrete_DpmTable *table) -{ - int result; - uint32_t count; - iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); - - table->VddciLevelCount = data->vddci_voltage_table.count; - for (count = 0; count < table->VddciLevelCount; count++) { - result = iceland_populate_smc_voltage_table(hwmgr, - &data->vddci_voltage_table.entries[count], - &table->VddciLevel[count]); - PP_ASSERT_WITH_CODE(0 == result, "do not populate SMC VDDCI voltage table", return -EINVAL); - - /* GPIO voltage control */ - if (ICELAND_VOLTAGE_CONTROL_BY_GPIO == data->vdd_ci_control) - table->VddciLevel[count].Smio |= data->vddci_voltage_table.entries[count].smio_low; - else - table->VddciLevel[count].Smio = 0; - } - - CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount); - - return 0; -} - -/** - * Mvdd table preparation for SMC. - * - * @param *hwmgr The address of the hardware manager. - * @param *table The SMC DPM table structure to be populated. - * @return 0 - */ -static int iceland_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr, - SMU71_Discrete_DpmTable *table) -{ - int result; - uint32_t count; - iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); - - table->MvddLevelCount = data->mvdd_voltage_table.count; - for (count = 0; count < table->MvddLevelCount; count++) { - result = iceland_populate_smc_voltage_table(hwmgr, - &data->mvdd_voltage_table.entries[count], - &table->MvddLevel[count]); - PP_ASSERT_WITH_CODE(0 == result, "do not populate SMC VDDCI voltage table", return -EINVAL); - - /* GPIO voltage control */ - if (ICELAND_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) - table->MvddLevel[count].Smio |= data->mvdd_voltage_table.entries[count].smio_low; - else - table->MvddLevel[count].Smio = 0; - } - - CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount); - - return 0; -} - -int iceland_populate_bapm_vddc_vid_sidd(struct pp_hwmgr *hwmgr) -{ - int i; - struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); - uint8_t * hi_vid = data->power_tune_table.BapmVddCVidHiSidd; - uint8_t * lo_vid = data->power_tune_table.BapmVddCVidLoSidd; - - PP_ASSERT_WITH_CODE(NULL != hwmgr->dyn_state.cac_leakage_table, - "The CAC Leakage table does not exist!", return -EINVAL); - PP_ASSERT_WITH_CODE(hwmgr->dyn_state.cac_leakage_table->count <= 8, - "There should never be more than 8 entries for BapmVddcVid!!!", return -EINVAL); - PP_ASSERT_WITH_CODE(hwmgr->dyn_state.cac_leakage_table->count == hwmgr->dyn_state.vddc_dependency_on_sclk->count, - "CACLeakageTable->count and VddcDependencyOnSCLk->count not equal", return -EINVAL); - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_EVV)) { - for (i = 0; (uint32_t) i < hwmgr->dyn_state.cac_leakage_table->count; i++) { - lo_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc1); - hi_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc2); - } - } else { - PP_ASSERT_WITH_CODE(false, "Iceland should always support EVV", return -EINVAL); - } - - return 0; -} - -int iceland_populate_vddc_vid(struct pp_hwmgr *hwmgr) -{ - int i; - struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); - uint8_t *vid = data->power_tune_table.VddCVid; - - PP_ASSERT_WITH_CODE(data->vddc_voltage_table.count <= 8, - "There should never be more than 8 entries for VddcVid!!!", - return -EINVAL); - - for (i = 0; i < (int)data->vddc_voltage_table.count; i++) { - vid[i] = convert_to_vid(data->vddc_voltage_table.entries[i].value); - } - - return 0; -} - -/** - * Preparation of voltage tables for SMC. - * - * @param hwmgr the address of the hardware manager - * @param table the SMC DPM table structure to be populated - * @return always 0 - */ - -int iceland_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr, - SMU71_Discrete_DpmTable *table) -{ - int result; - - result = iceland_populate_smc_vddc_table(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, - "can not populate VDDC voltage table to SMC", return -1); - - result = iceland_populate_smc_vdd_ci_table(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, - "can not populate VDDCI voltage table to SMC", return -1); - - result = iceland_populate_smc_mvdd_table(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, - "can not populate MVDD voltage table to SMC", return -1); - - return 0; -} - - -/** - * Re-generate the DPM level mask value - * @param hwmgr the address of the hardware manager - */ -static uint32_t iceland_get_dpm_level_enable_mask_value( - struct iceland_single_dpm_table * dpm_table) -{ - uint32_t i; - uint32_t mask_value = 0; - - for (i = dpm_table->count; i > 0; i--) { - mask_value = mask_value << 1; - - if (dpm_table->dpm_levels[i-1].enabled) - mask_value |= 0x1; - else - mask_value &= 0xFFFFFFFE; - } - return mask_value; -} - -int iceland_populate_memory_timing_parameters( - struct pp_hwmgr *hwmgr, - uint32_t engine_clock, - uint32_t memory_clock, - struct SMU71_Discrete_MCArbDramTimingTableEntry *arb_regs - ) -{ - uint32_t dramTiming; - uint32_t dramTiming2; - uint32_t burstTime; - int result; - - result = atomctrl_set_engine_dram_timings_rv770(hwmgr, - engine_clock, memory_clock); - - PP_ASSERT_WITH_CODE(result == 0, - "Error calling VBIOS to set DRAM_TIMING.", return result); - - dramTiming = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING); - dramTiming2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2); - burstTime = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0); - - arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dramTiming); - arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dramTiming2); - arb_regs->McArbBurstTime = (uint8_t)burstTime; - - return 0; -} - -/** - * Setup parameters for the MC ARB. - * - * @param hwmgr the address of the powerplay hardware manager. - * @return always 0 - * This function is to be called from the SetPowerState table. - */ -int iceland_program_memory_timing_parameters(struct pp_hwmgr *hwmgr) -{ - iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); - int result = 0; - SMU71_Discrete_MCArbDramTimingTable arb_regs; - uint32_t i, j; - - memset(&arb_regs, 0x00, sizeof(SMU71_Discrete_MCArbDramTimingTable)); - - for (i = 0; i < data->dpm_table.sclk_table.count; i++) { - for (j = 0; j < data->dpm_table.mclk_table.count; j++) { - result = iceland_populate_memory_timing_parameters - (hwmgr, data->dpm_table.sclk_table.dpm_levels[i].value, - data->dpm_table.mclk_table.dpm_levels[j].value, - &arb_regs.entries[i][j]); - - if (0 != result) { - break; - } - } - } - - if (0 == result) { - result = smu7_copy_bytes_to_smc( - hwmgr->smumgr, - data->arb_table_start, - (uint8_t *)&arb_regs, - sizeof(SMU71_Discrete_MCArbDramTimingTable), - data->sram_end - ); - } - - return result; -} - -static int iceland_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU71_Discrete_DpmTable *table) -{ - iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); - struct iceland_dpm_table *dpm_table = &data->dpm_table; - uint32_t i; - - /* Index (dpm_table->pcie_speed_table.count) is reserved for PCIE boot level. */ - for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) { - table->LinkLevel[i].PcieGenSpeed = - (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value; - table->LinkLevel[i].PcieLaneCount = - (uint8_t)encode_pcie_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1); - table->LinkLevel[i].EnabledForActivity = - 1; - table->LinkLevel[i].SPC = - (uint8_t)(data->pcie_spc_cap & 0xff); - table->LinkLevel[i].DownThreshold = - PP_HOST_TO_SMC_UL(5); - table->LinkLevel[i].UpThreshold = - PP_HOST_TO_SMC_UL(30); - } - - data->smc_state_table.LinkLevelCount = - (uint8_t)dpm_table->pcie_speed_table.count; - data->dpm_level_enable_mask.pcie_dpm_enable_mask = - iceland_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table); - - return 0; -} - -static int iceland_populate_smc_uvd_level(struct pp_hwmgr *hwmgr, - SMU71_Discrete_DpmTable *table) -{ - return 0; -} - -uint8_t iceland_get_voltage_id(pp_atomctrl_voltage_table *voltage_table, - uint32_t voltage) -{ - uint8_t count = (uint8_t) (voltage_table->count); - uint8_t i = 0; - - PP_ASSERT_WITH_CODE((NULL != voltage_table), - "Voltage Table empty.", return 0;); - PP_ASSERT_WITH_CODE((0 != count), - "Voltage Table empty.", return 0;); - - for (i = 0; i < count; i++) { - /* find first voltage bigger than requested */ - if (voltage_table->entries[i].value >= voltage) - return i; - } - - /* voltage is bigger than max voltage in the table */ - return i - 1; -} - -static int iceland_populate_smc_vce_level(struct pp_hwmgr *hwmgr, - SMU71_Discrete_DpmTable *table) -{ - return 0; -} - -static int iceland_populate_smc_acp_level(struct pp_hwmgr *hwmgr, - SMU71_Discrete_DpmTable *table) -{ - return 0; -} - -static int iceland_populate_smc_samu_level(struct pp_hwmgr *hwmgr, - SMU71_Discrete_DpmTable *table) -{ - return 0; -} - - -static int iceland_populate_smc_svi2_config(struct pp_hwmgr *hwmgr, - SMU71_Discrete_DpmTable *tab) -{ - iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); - - if(ICELAND_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) - tab->SVI2Enable |= VDDC_ON_SVI2; - - if(ICELAND_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_ci_control) - tab->SVI2Enable |= VDDCI_ON_SVI2; - else - tab->MergedVddci = 1; - - if(ICELAND_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) - tab->SVI2Enable |= MVDD_ON_SVI2; - - PP_ASSERT_WITH_CODE( tab->SVI2Enable != (VDDC_ON_SVI2 | VDDCI_ON_SVI2 | MVDD_ON_SVI2) && - (tab->SVI2Enable & VDDC_ON_SVI2), "SVI2 domain configuration is incorrect!", return -EINVAL); - - return 0; -} - -static int iceland_get_dependecy_volt_by_clk(struct pp_hwmgr *hwmgr, - struct phm_clock_voltage_dependency_table *allowed_clock_voltage_table, - uint32_t clock, uint32_t *vol) -{ - uint32_t i = 0; - - /* clock - voltage dependency table is empty table */ - if (allowed_clock_voltage_table->count == 0) - return -EINVAL; - - for (i = 0; i < allowed_clock_voltage_table->count; i++) { - /* find first sclk bigger than request */ - if (allowed_clock_voltage_table->entries[i].clk >= clock) { - *vol = allowed_clock_voltage_table->entries[i].v; - return 0; - } - } - - /* sclk is bigger than max sclk in the dependence table */ - *vol = allowed_clock_voltage_table->entries[i - 1].v; - - return 0; -} - -static uint8_t iceland_get_mclk_frequency_ratio(uint32_t memory_clock, - bool strobe_mode) -{ - uint8_t mc_para_index; - - if (strobe_mode) { - if (memory_clock < 12500) { - mc_para_index = 0x00; - } else if (memory_clock > 47500) { - mc_para_index = 0x0f; - } else { - mc_para_index = (uint8_t)((memory_clock - 10000) / 2500); - } - } else { - if (memory_clock < 65000) { - mc_para_index = 0x00; - } else if (memory_clock > 135000) { - mc_para_index = 0x0f; - } else { - mc_para_index = (uint8_t)((memory_clock - 60000) / 5000); - } - } - - return mc_para_index; -} - -static uint8_t iceland_get_ddr3_mclk_frequency_ratio(uint32_t memory_clock) -{ - uint8_t mc_para_index; - - if (memory_clock < 10000) { - mc_para_index = 0; - } else if (memory_clock >= 80000) { - mc_para_index = 0x0f; - } else { - mc_para_index = (uint8_t)((memory_clock - 10000) / 5000 + 1); - } - - return mc_para_index; -} - -static int iceland_populate_phase_value_based_on_sclk(struct pp_hwmgr *hwmgr, const struct phm_phase_shedding_limits_table *pl, - uint32_t sclk, uint32_t *p_shed) -{ - unsigned int i; - - /* use the minimum phase shedding */ - *p_shed = 1; - - /* - * PPGen ensures the phase shedding limits table is sorted - * from lowest voltage/sclk/mclk to highest voltage/sclk/mclk. - * VBIOS ensures the phase shedding masks table is sorted from - * least phases enabled (phase shedding on) to most phases - * enabled (phase shedding off). - */ - for (i = 0; i < pl->count; i++) { - if (sclk < pl->entries[i].Sclk) { - /* Enable phase shedding */ - *p_shed = i; - break; - } - } - - return 0; -} - -static int iceland_populate_phase_value_based_on_mclk(struct pp_hwmgr *hwmgr, const struct phm_phase_shedding_limits_table *pl, - uint32_t memory_clock, uint32_t *p_shed) -{ - unsigned int i; - - /* use the minimum phase shedding */ - *p_shed = 1; - - /* - * PPGen ensures the phase shedding limits table is sorted - * from lowest voltage/sclk/mclk to highest voltage/sclk/mclk. - * VBIOS ensures the phase shedding masks table is sorted from - * least phases enabled (phase shedding on) to most phases - * enabled (phase shedding off). - */ - for (i = 0; i < pl->count; i++) { - if (memory_clock < pl->entries[i].Mclk) { - /* Enable phase shedding */ - *p_shed = i; - break; - } - } - - return 0; -} - -/** - * Populates the SMC MCLK structure using the provided memory clock - * - * @param hwmgr the address of the hardware manager - * @param memory_clock the memory clock to use to populate the structure - * @param sclk the SMC SCLK structure to be populated - */ -static int iceland_calculate_mclk_params( - struct pp_hwmgr *hwmgr, - uint32_t memory_clock, - SMU71_Discrete_MemoryLevel *mclk, - bool strobe_mode, - bool dllStateOn - ) -{ - const iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); - uint32_t dll_cntl = data->clock_registers.vDLL_CNTL; - uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL; - uint32_t mpll_ad_func_cntl = data->clock_registers.vMPLL_AD_FUNC_CNTL; - uint32_t mpll_dq_func_cntl = data->clock_registers.vMPLL_DQ_FUNC_CNTL; - uint32_t mpll_func_cntl = data->clock_registers.vMPLL_FUNC_CNTL; - uint32_t mpll_func_cntl_1 = data->clock_registers.vMPLL_FUNC_CNTL_1; - uint32_t mpll_func_cntl_2 = data->clock_registers.vMPLL_FUNC_CNTL_2; - uint32_t mpll_ss1 = data->clock_registers.vMPLL_SS1; - uint32_t mpll_ss2 = data->clock_registers.vMPLL_SS2; - - pp_atomctrl_memory_clock_param mpll_param; - int result; - - result = atomctrl_get_memory_pll_dividers_si(hwmgr, - memory_clock, &mpll_param, strobe_mode); - PP_ASSERT_WITH_CODE(0 == result, - "Error retrieving Memory Clock Parameters from VBIOS.", return result); - - /* MPLL_FUNC_CNTL setup*/ - mpll_func_cntl = PHM_SET_FIELD(mpll_func_cntl, MPLL_FUNC_CNTL, BWCTRL, mpll_param.bw_ctrl); - - /* MPLL_FUNC_CNTL_1 setup*/ - mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1, - MPLL_FUNC_CNTL_1, CLKF, mpll_param.mpll_fb_divider.cl_kf); - mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1, - MPLL_FUNC_CNTL_1, CLKFRAC, mpll_param.mpll_fb_divider.clk_frac); - mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1, - MPLL_FUNC_CNTL_1, VCO_MODE, mpll_param.vco_mode); - - /* MPLL_AD_FUNC_CNTL setup*/ - mpll_ad_func_cntl = PHM_SET_FIELD(mpll_ad_func_cntl, - MPLL_AD_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider); - - if (data->is_memory_GDDR5) { - /* MPLL_DQ_FUNC_CNTL setup*/ - mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl, - MPLL_DQ_FUNC_CNTL, YCLK_SEL, mpll_param.yclk_sel); - mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl, - MPLL_DQ_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider); - } - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_MemorySpreadSpectrumSupport)) { - /* - ************************************ - Fref = Reference Frequency - NF = Feedback divider ratio - NR = Reference divider ratio - Fnom = Nominal VCO output frequency = Fref * NF / NR - Fs = Spreading Rate - D = Percentage down-spread / 2 - Fint = Reference input frequency to PFD = Fref / NR - NS = Spreading rate divider ratio = int(Fint / (2 * Fs)) - CLKS = NS - 1 = ISS_STEP_NUM[11:0] - NV = D * Fs / Fnom * 4 * ((Fnom/Fref * NR) ^ 2) - CLKV = 65536 * NV = ISS_STEP_SIZE[25:0] - ************************************* - */ - pp_atomctrl_internal_ss_info ss_info; - uint32_t freq_nom; - uint32_t tmp; - uint32_t reference_clock = atomctrl_get_mpll_reference_clock(hwmgr); - - /* for GDDR5 for all modes and DDR3 */ - if (1 == mpll_param.qdr) - freq_nom = memory_clock * 4 * (1 << mpll_param.mpll_post_divider); - else - freq_nom = memory_clock * 2 * (1 << mpll_param.mpll_post_divider); - - /* tmp = (freq_nom / reference_clock * reference_divider) ^ 2 Note: S.I. reference_divider = 1*/ - tmp = (freq_nom / reference_clock); - tmp = tmp * tmp; - - if (0 == atomctrl_get_memory_clock_spread_spectrum(hwmgr, freq_nom, &ss_info)) { - /* ss_info.speed_spectrum_percentage -- in unit of 0.01% */ - /* ss.Info.speed_spectrum_rate -- in unit of khz */ - /* CLKS = reference_clock / (2 * speed_spectrum_rate * reference_divider) * 10 */ - /* = reference_clock * 5 / speed_spectrum_rate */ - uint32_t clks = reference_clock * 5 / ss_info.speed_spectrum_rate; - - /* CLKV = 65536 * speed_spectrum_percentage / 2 * spreadSpecrumRate / freq_nom * 4 / 100000 * ((freq_nom / reference_clock) ^ 2) */ - /* = 131 * speed_spectrum_percentage * speed_spectrum_rate / 100 * ((freq_nom / reference_clock) ^ 2) / freq_nom */ - uint32_t clkv = - (uint32_t)((((131 * ss_info.speed_spectrum_percentage * - ss_info.speed_spectrum_rate) / 100) * tmp) / freq_nom); - - mpll_ss1 = PHM_SET_FIELD(mpll_ss1, MPLL_SS1, CLKV, clkv); - mpll_ss2 = PHM_SET_FIELD(mpll_ss2, MPLL_SS2, CLKS, clks); - } - } - - /* MCLK_PWRMGT_CNTL setup */ - mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, - MCLK_PWRMGT_CNTL, DLL_SPEED, mpll_param.dll_speed); - mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, - MCLK_PWRMGT_CNTL, MRDCK0_PDNB, dllStateOn); - mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, - MCLK_PWRMGT_CNTL, MRDCK1_PDNB, dllStateOn); - - - /* Save the result data to outpupt memory level structure */ - mclk->MclkFrequency = memory_clock; - mclk->MpllFuncCntl = mpll_func_cntl; - mclk->MpllFuncCntl_1 = mpll_func_cntl_1; - mclk->MpllFuncCntl_2 = mpll_func_cntl_2; - mclk->MpllAdFuncCntl = mpll_ad_func_cntl; - mclk->MpllDqFuncCntl = mpll_dq_func_cntl; - mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl; - mclk->DllCntl = dll_cntl; - mclk->MpllSs1 = mpll_ss1; - mclk->MpllSs2 = mpll_ss2; - - return 0; -} - -static int iceland_populate_single_memory_level( - struct pp_hwmgr *hwmgr, - uint32_t memory_clock, - SMU71_Discrete_MemoryLevel *memory_level - ) -{ - iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); - int result = 0; - bool dllStateOn; - struct cgs_display_info info = {0}; - - - if (NULL != hwmgr->dyn_state.vddc_dependency_on_mclk) { - result = iceland_get_dependecy_volt_by_clk(hwmgr, - hwmgr->dyn_state.vddc_dependency_on_mclk, memory_clock, &memory_level->MinVddc); - PP_ASSERT_WITH_CODE((0 == result), - "can not find MinVddc voltage value from memory VDDC voltage dependency table", return result); - } - - if (data->vdd_ci_control == ICELAND_VOLTAGE_CONTROL_NONE) { - memory_level->MinVddci = memory_level->MinVddc; - } else if (NULL != hwmgr->dyn_state.vddci_dependency_on_mclk) { - result = iceland_get_dependecy_volt_by_clk(hwmgr, - hwmgr->dyn_state.vddci_dependency_on_mclk, - memory_clock, - &memory_level->MinVddci); - PP_ASSERT_WITH_CODE((0 == result), - "can not find MinVddci voltage value from memory VDDCI voltage dependency table", return result); - } - - if (NULL != hwmgr->dyn_state.mvdd_dependency_on_mclk) { - result = iceland_get_dependecy_volt_by_clk(hwmgr, - hwmgr->dyn_state.mvdd_dependency_on_mclk, memory_clock, &memory_level->MinMvdd); - PP_ASSERT_WITH_CODE((0 == result), - "can not find MinMVDD voltage value from memory MVDD voltage dependency table", return result); - } - - memory_level->MinVddcPhases = 1; - - if (data->vddc_phase_shed_control) { - iceland_populate_phase_value_based_on_mclk(hwmgr, hwmgr->dyn_state.vddc_phase_shed_limits_table, - memory_clock, &memory_level->MinVddcPhases); - } - - memory_level->EnabledForThrottle = 1; - memory_level->EnabledForActivity = 1; - memory_level->UpHyst = 0; - memory_level->DownHyst = 100; - memory_level->VoltageDownHyst = 0; - - /* Indicates maximum activity level for this performance level.*/ - memory_level->ActivityLevel = (uint16_t)data->mclk_activity_target; - memory_level->StutterEnable = 0; - memory_level->StrobeEnable = 0; - memory_level->EdcReadEnable = 0; - memory_level->EdcWriteEnable = 0; - memory_level->RttEnable = 0; - - /* default set to low watermark. Highest level will be set to high later.*/ - memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; - - cgs_get_active_displays_info(hwmgr->device, &info); - data->display_timing.num_existing_displays = info.display_count; - - //if ((data->mclk_stutter_mode_threshold != 0) && - // (memory_clock <= data->mclk_stutter_mode_threshold) && - // (data->is_uvd_enabled == 0) - // && (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL, STUTTER_ENABLE) & 0x1) - // && (data->display_timing.num_existing_displays <= 2) - // && (data->display_timing.num_existing_displays != 0)) - // memory_level->StutterEnable = 1; - - /* decide strobe mode*/ - memory_level->StrobeEnable = (data->mclk_strobe_mode_threshold != 0) && - (memory_clock <= data->mclk_strobe_mode_threshold); - - /* decide EDC mode and memory clock ratio*/ - if (data->is_memory_GDDR5) { - memory_level->StrobeRatio = iceland_get_mclk_frequency_ratio(memory_clock, - memory_level->StrobeEnable); - - if ((data->mclk_edc_enable_threshold != 0) && - (memory_clock > data->mclk_edc_enable_threshold)) { - memory_level->EdcReadEnable = 1; - } - - if ((data->mclk_edc_wr_enable_threshold != 0) && - (memory_clock > data->mclk_edc_wr_enable_threshold)) { - memory_level->EdcWriteEnable = 1; - } - - if (memory_level->StrobeEnable) { - if (iceland_get_mclk_frequency_ratio(memory_clock, 1) >= - ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC7) >> 16) & 0xf)) { - dllStateOn = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0; - } else { - dllStateOn = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC6) >> 1) & 0x1) ? 1 : 0; - } - - } else { - dllStateOn = data->dll_defaule_on; - } - } else { - memory_level->StrobeRatio = - iceland_get_ddr3_mclk_frequency_ratio(memory_clock); - dllStateOn = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0; - } - - result = iceland_calculate_mclk_params(hwmgr, - memory_clock, memory_level, memory_level->StrobeEnable, dllStateOn); - - if (0 == result) { - memory_level->MinVddc = PP_HOST_TO_SMC_UL(memory_level->MinVddc * VOLTAGE_SCALE); - CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MinVddcPhases); - memory_level->MinVddci = PP_HOST_TO_SMC_UL(memory_level->MinVddci * VOLTAGE_SCALE); - memory_level->MinMvdd = PP_HOST_TO_SMC_UL(memory_level->MinMvdd * VOLTAGE_SCALE); - /* MCLK frequency in units of 10KHz*/ - CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkFrequency); - /* Indicates maximum activity level for this performance level.*/ - CONVERT_FROM_HOST_TO_SMC_US(memory_level->ActivityLevel); - CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl); - CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_1); - CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_2); - CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllAdFuncCntl); - CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllDqFuncCntl); - CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkPwrmgtCntl); - CONVERT_FROM_HOST_TO_SMC_UL(memory_level->DllCntl); - CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs1); - CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs2); - } - - return result; -} - -/** - * Populates the SMC MVDD structure using the provided memory clock. - * - * @param hwmgr the address of the hardware manager - * @param mclk the MCLK value to be used in the decision if MVDD should be high or low. - * @param voltage the SMC VOLTAGE structure to be populated - */ -int iceland_populate_mvdd_value(struct pp_hwmgr *hwmgr, uint32_t mclk, SMU71_Discrete_VoltageLevel *voltage) -{ - const iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); - uint32_t i = 0; - - if (ICELAND_VOLTAGE_CONTROL_NONE != data->mvdd_control) { - /* find mvdd value which clock is more than request */ - for (i = 0; i < hwmgr->dyn_state.mvdd_dependency_on_mclk->count; i++) { - if (mclk <= hwmgr->dyn_state.mvdd_dependency_on_mclk->entries[i].clk) { - /* Always round to higher voltage. */ - voltage->Voltage = data->mvdd_voltage_table.entries[i].value; - break; - } - } - - PP_ASSERT_WITH_CODE(i < hwmgr->dyn_state.mvdd_dependency_on_mclk->count, - "MVDD Voltage is outside the supported range.", return -1); - - } else { - return -1; - } - - return 0; -} - - -static int iceland_populate_smc_acpi_level(struct pp_hwmgr *hwmgr, - SMU71_Discrete_DpmTable *table) -{ - int result = 0; - const iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); - pp_atomctrl_clock_dividers_vi dividers; - SMU71_Discrete_VoltageLevel voltage_level; - uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL; - uint32_t spll_func_cntl_2 = data->clock_registers.vCG_SPLL_FUNC_CNTL_2; - uint32_t dll_cntl = data->clock_registers.vDLL_CNTL; - uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL; - - /* The ACPI state should not do DPM on DC (or ever).*/ - table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC; - - if (data->acpi_vddc) - table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->acpi_vddc * VOLTAGE_SCALE); - else - table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->min_vddc_in_pp_table * VOLTAGE_SCALE); - - table->ACPILevel.MinVddcPhases = (data->vddc_phase_shed_control) ? 0 : 1; - - /* assign zero for now*/ - table->ACPILevel.SclkFrequency = atomctrl_get_reference_clock(hwmgr); - - /* get the engine clock dividers for this clock value*/ - result = atomctrl_get_engine_pll_dividers_vi(hwmgr, - table->ACPILevel.SclkFrequency, ÷rs); - - PP_ASSERT_WITH_CODE(result == 0, - "Error retrieving Engine Clock dividers from VBIOS.", return result); - - /* divider ID for required SCLK*/ - table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider; - table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; - table->ACPILevel.DeepSleepDivId = 0; - - spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, - CG_SPLL_FUNC_CNTL, SPLL_PWRON, 0); - spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, - CG_SPLL_FUNC_CNTL, SPLL_RESET, 1); - spll_func_cntl_2 = PHM_SET_FIELD(spll_func_cntl_2, - CG_SPLL_FUNC_CNTL_2, SCLK_MUX_SEL, 4); - - table->ACPILevel.CgSpllFuncCntl = spll_func_cntl; - table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2; - table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3; - table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4; - table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM; - table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2; - table->ACPILevel.CcPwrDynRm = 0; - table->ACPILevel.CcPwrDynRm1 = 0; - - - /* For various features to be enabled/disabled while this level is active.*/ - CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags); - /* SCLK frequency in units of 10KHz*/ - CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency); - CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl); - CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2); - CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3); - CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4); - CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum); - CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2); - CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm); - CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1); - - table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc; - table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases; - - /* CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);*/ - - if (0 == iceland_populate_mvdd_value(hwmgr, 0, &voltage_level)) - table->MemoryACPILevel.MinMvdd = - PP_HOST_TO_SMC_UL(voltage_level.Voltage * VOLTAGE_SCALE); - else - table->MemoryACPILevel.MinMvdd = 0; - - /* Force reset on DLL*/ - mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, - MCLK_PWRMGT_CNTL, MRDCK0_RESET, 0x1); - mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, - MCLK_PWRMGT_CNTL, MRDCK1_RESET, 0x1); - - /* Disable DLL in ACPIState*/ - mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, - MCLK_PWRMGT_CNTL, MRDCK0_PDNB, 0); - mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, - MCLK_PWRMGT_CNTL, MRDCK1_PDNB, 0); - - /* Enable DLL bypass signal*/ - dll_cntl = PHM_SET_FIELD(dll_cntl, - DLL_CNTL, MRDCK0_BYPASS, 0); - dll_cntl = PHM_SET_FIELD(dll_cntl, - DLL_CNTL, MRDCK1_BYPASS, 0); - - table->MemoryACPILevel.DllCntl = - PP_HOST_TO_SMC_UL(dll_cntl); - table->MemoryACPILevel.MclkPwrmgtCntl = - PP_HOST_TO_SMC_UL(mclk_pwrmgt_cntl); - table->MemoryACPILevel.MpllAdFuncCntl = - PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_AD_FUNC_CNTL); - table->MemoryACPILevel.MpllDqFuncCntl = - PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_DQ_FUNC_CNTL); - table->MemoryACPILevel.MpllFuncCntl = - PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL); - table->MemoryACPILevel.MpllFuncCntl_1 = - PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_1); - table->MemoryACPILevel.MpllFuncCntl_2 = - PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_2); - table->MemoryACPILevel.MpllSs1 = - PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS1); - table->MemoryACPILevel.MpllSs2 = - PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS2); - - table->MemoryACPILevel.EnabledForThrottle = 0; - table->MemoryACPILevel.EnabledForActivity = 0; - table->MemoryACPILevel.UpHyst = 0; - table->MemoryACPILevel.DownHyst = 100; - table->MemoryACPILevel.VoltageDownHyst = 0; - /* Indicates maximum activity level for this performance level.*/ - table->MemoryACPILevel.ActivityLevel = PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target); - - table->MemoryACPILevel.StutterEnable = 0; - table->MemoryACPILevel.StrobeEnable = 0; - table->MemoryACPILevel.EdcReadEnable = 0; - table->MemoryACPILevel.EdcWriteEnable = 0; - table->MemoryACPILevel.RttEnable = 0; - - return result; -} - -static int iceland_find_boot_level(struct iceland_single_dpm_table *table, uint32_t value, uint32_t *boot_level) -{ - int result = 0; - uint32_t i; - - for (i = 0; i < table->count; i++) { - if (value == table->dpm_levels[i].value) { - *boot_level = i; - result = 0; - } - } - return result; -} - -/** - * Calculates the SCLK dividers using the provided engine clock - * - * @param hwmgr the address of the hardware manager - * @param engine_clock the engine clock to use to populate the structure - * @param sclk the SMC SCLK structure to be populated - */ -int iceland_calculate_sclk_params(struct pp_hwmgr *hwmgr, - uint32_t engine_clock, SMU71_Discrete_GraphicsLevel *sclk) -{ - const iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); - pp_atomctrl_clock_dividers_vi dividers; - uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL; - uint32_t spll_func_cntl_3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3; - uint32_t spll_func_cntl_4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4; - uint32_t cg_spll_spread_spectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM; - uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2; - uint32_t reference_clock; - uint32_t reference_divider; - uint32_t fbdiv; - int result; - - /* get the engine clock dividers for this clock value*/ - result = atomctrl_get_engine_pll_dividers_vi(hwmgr, engine_clock, ÷rs); - - PP_ASSERT_WITH_CODE(result == 0, - "Error retrieving Engine Clock dividers from VBIOS.", return result); - - /* To get FBDIV we need to multiply this by 16384 and divide it by Fref.*/ - reference_clock = atomctrl_get_reference_clock(hwmgr); - - reference_divider = 1 + dividers.uc_pll_ref_div; - - /* low 14 bits is fraction and high 12 bits is divider*/ - fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF; - - /* SPLL_FUNC_CNTL setup*/ - spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, - CG_SPLL_FUNC_CNTL, SPLL_REF_DIV, dividers.uc_pll_ref_div); - spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, - CG_SPLL_FUNC_CNTL, SPLL_PDIV_A, dividers.uc_pll_post_div); - - /* SPLL_FUNC_CNTL_3 setup*/ - spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, - CG_SPLL_FUNC_CNTL_3, SPLL_FB_DIV, fbdiv); - - /* set to use fractional accumulation*/ - spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, - CG_SPLL_FUNC_CNTL_3, SPLL_DITHEN, 1); - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_EngineSpreadSpectrumSupport)) { - pp_atomctrl_internal_ss_info ss_info; - - uint32_t vcoFreq = engine_clock * dividers.uc_pll_post_div; - if (0 == atomctrl_get_engine_clock_spread_spectrum(hwmgr, vcoFreq, &ss_info)) { - /* - * ss_info.speed_spectrum_percentage -- in unit of 0.01% - * ss_info.speed_spectrum_rate -- in unit of khz - */ - /* clks = reference_clock * 10 / (REFDIV + 1) / speed_spectrum_rate / 2 */ - uint32_t clkS = reference_clock * 5 / (reference_divider * ss_info.speed_spectrum_rate); - - /* clkv = 2 * D * fbdiv / NS */ - uint32_t clkV = 4 * ss_info.speed_spectrum_percentage * fbdiv / (clkS * 10000); - - cg_spll_spread_spectrum = - PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, CLKS, clkS); - cg_spll_spread_spectrum = - PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, SSEN, 1); - cg_spll_spread_spectrum_2 = - PHM_SET_FIELD(cg_spll_spread_spectrum_2, CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clkV); - } - } - - sclk->SclkFrequency = engine_clock; - sclk->CgSpllFuncCntl3 = spll_func_cntl_3; - sclk->CgSpllFuncCntl4 = spll_func_cntl_4; - sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum; - sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2; - sclk->SclkDid = (uint8_t)dividers.pll_post_divider; - - return 0; -} - -static uint8_t iceland_get_sleep_divider_id_from_clock(struct pp_hwmgr *hwmgr, - uint32_t engine_clock, uint32_t min_engine_clock_in_sr) -{ - uint32_t i, temp; - uint32_t min = (min_engine_clock_in_sr > ICELAND_MINIMUM_ENGINE_CLOCK) ? - min_engine_clock_in_sr : ICELAND_MINIMUM_ENGINE_CLOCK; - - PP_ASSERT_WITH_CODE((engine_clock >= min), - "Engine clock can't satisfy stutter requirement!", return 0); - - for (i = ICELAND_MAX_DEEPSLEEP_DIVIDER_ID;; i--) { - temp = engine_clock / (1 << i); - - if(temp >= min || i == 0) - break; - } - return (uint8_t)i; -} - -/** - * Populates single SMC SCLK structure using the provided engine clock - * - * @param hwmgr the address of the hardware manager - * @param engine_clock the engine clock to use to populate the structure - * @param sclk the SMC SCLK structure to be populated - */ -static int iceland_populate_single_graphic_level(struct pp_hwmgr *hwmgr, - uint32_t engine_clock, uint16_t sclk_activity_level_threshold, - SMU71_Discrete_GraphicsLevel *graphic_level) -{ - int result; - uint32_t threshold; - iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); - - result = iceland_calculate_sclk_params(hwmgr, engine_clock, graphic_level); - - - /* populate graphics levels*/ - result = iceland_get_dependecy_volt_by_clk(hwmgr, - hwmgr->dyn_state.vddc_dependency_on_sclk, engine_clock, &graphic_level->MinVddc); - PP_ASSERT_WITH_CODE((0 == result), - "can not find VDDC voltage value for VDDC engine clock dependency table", return result); - - /* SCLK frequency in units of 10KHz*/ - graphic_level->SclkFrequency = engine_clock; - - /* - * Minimum VDDC phases required to support this level, it - * should get from dependence table. - */ - graphic_level->MinVddcPhases = 1; - - if (data->vddc_phase_shed_control) { - iceland_populate_phase_value_based_on_sclk(hwmgr, - hwmgr->dyn_state.vddc_phase_shed_limits_table, - engine_clock, - &graphic_level->MinVddcPhases); - } - - /* Indicates maximum activity level for this performance level. 50% for now*/ - graphic_level->ActivityLevel = sclk_activity_level_threshold; - - graphic_level->CcPwrDynRm = 0; - graphic_level->CcPwrDynRm1 = 0; - /* this level can be used if activity is high enough.*/ - graphic_level->EnabledForActivity = 1; - /* this level can be used for throttling.*/ - graphic_level->EnabledForThrottle = 1; - graphic_level->UpHyst = 0; - graphic_level->DownHyst = 100; - graphic_level->VoltageDownHyst = 0; - graphic_level->PowerThrottle = 0; - - threshold = engine_clock * data->fast_watermark_threshold / 100; - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_SclkDeepSleep)) { - graphic_level->DeepSleepDivId = - iceland_get_sleep_divider_id_from_clock(hwmgr, engine_clock, - data->display_timing.min_clock_insr); - } - - /* Default to slow, highest DPM level will be set to PPSMC_DISPLAY_WATERMARK_LOW later.*/ - graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; - - if (0 == result) { - graphic_level->MinVddc = PP_HOST_TO_SMC_UL(graphic_level->MinVddc * VOLTAGE_SCALE); - /* CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVoltage);*/ - CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVddcPhases); - CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SclkFrequency); - CONVERT_FROM_HOST_TO_SMC_US(graphic_level->ActivityLevel); - CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl3); - CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl4); - CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum); - CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum2); - CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm); - CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm1); - } - - return result; -} - -/** - * Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states - * - * @param hwmgr the address of the hardware manager - */ -static int iceland_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) -{ - iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); - struct iceland_dpm_table *dpm_table = &data->dpm_table; - int result = 0; - uint32_t level_array_adress = data->dpm_table_start + - offsetof(SMU71_Discrete_DpmTable, GraphicsLevel); - - uint32_t level_array_size = sizeof(SMU71_Discrete_GraphicsLevel) * SMU71_MAX_LEVELS_GRAPHICS; - SMU71_Discrete_GraphicsLevel *levels = data->smc_state_table.GraphicsLevel; - uint32_t i; - uint8_t highest_pcie_level_enabled = 0, lowest_pcie_level_enabled = 0, mid_pcie_level_enabled = 0, count = 0; - memset(levels, 0x00, level_array_size); - - for (i = 0; i < dpm_table->sclk_table.count; i++) { - result = iceland_populate_single_graphic_level(hwmgr, - dpm_table->sclk_table.dpm_levels[i].value, - (uint16_t)data->activity_target[i], - &(data->smc_state_table.GraphicsLevel[i])); - if (0 != result) - return result; - - /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */ - if (i > 1) - data->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0; - } - - /* set highest level watermark to high */ - if (dpm_table->sclk_table.count > 1) - data->smc_state_table.GraphicsLevel[dpm_table->sclk_table.count-1].DisplayWatermark = - PPSMC_DISPLAY_WATERMARK_HIGH; - - data->smc_state_table.GraphicsDpmLevelCount = - (uint8_t)dpm_table->sclk_table.count; - data->dpm_level_enable_mask.sclk_dpm_enable_mask = - iceland_get_dpm_level_enable_mask_value(&dpm_table->sclk_table); - - while ((data->dpm_level_enable_mask.pcie_dpm_enable_mask & - (1 << (highest_pcie_level_enabled + 1))) != 0) { - highest_pcie_level_enabled++; - } - - while ((data->dpm_level_enable_mask.pcie_dpm_enable_mask & - (1 << lowest_pcie_level_enabled)) == 0) { - lowest_pcie_level_enabled++; - } - - while ((count < highest_pcie_level_enabled) && - ((data->dpm_level_enable_mask.pcie_dpm_enable_mask & - (1 << (lowest_pcie_level_enabled + 1 + count))) == 0)) { - count++; - } - - mid_pcie_level_enabled = (lowest_pcie_level_enabled+1+count) < highest_pcie_level_enabled ? - (lowest_pcie_level_enabled + 1 + count) : highest_pcie_level_enabled; - - /* set pcieDpmLevel to highest_pcie_level_enabled*/ - for (i = 2; i < dpm_table->sclk_table.count; i++) { - data->smc_state_table.GraphicsLevel[i].pcieDpmLevel = highest_pcie_level_enabled; - } - - /* set pcieDpmLevel to lowest_pcie_level_enabled*/ - data->smc_state_table.GraphicsLevel[0].pcieDpmLevel = lowest_pcie_level_enabled; - - /* set pcieDpmLevel to mid_pcie_level_enabled*/ - data->smc_state_table.GraphicsLevel[1].pcieDpmLevel = mid_pcie_level_enabled; - - /* level count will send to smc once at init smc table and never change*/ - result = smu7_copy_bytes_to_smc(hwmgr->smumgr, level_array_adress, (uint8_t *)levels, (uint32_t)level_array_size, data->sram_end); - - if (0 != result) - return result; - - return 0; -} - -/** - * Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states - * - * @param hwmgr the address of the hardware manager - */ - -static int iceland_populate_all_memory_levels(struct pp_hwmgr *hwmgr) -{ - iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); - struct iceland_dpm_table *dpm_table = &data->dpm_table; - int result; - /* populate MCLK dpm table to SMU7 */ - uint32_t level_array_adress = data->dpm_table_start + offsetof(SMU71_Discrete_DpmTable, MemoryLevel); - uint32_t level_array_size = sizeof(SMU71_Discrete_MemoryLevel) * SMU71_MAX_LEVELS_MEMORY; - SMU71_Discrete_MemoryLevel *levels = data->smc_state_table.MemoryLevel; - uint32_t i; - - memset(levels, 0x00, level_array_size); - - for (i = 0; i < dpm_table->mclk_table.count; i++) { - PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value), - "can not populate memory level as memory clock is zero", return -1); - result = iceland_populate_single_memory_level(hwmgr, dpm_table->mclk_table.dpm_levels[i].value, - &(data->smc_state_table.MemoryLevel[i])); - if (0 != result) { - return result; - } - } - - /* Only enable level 0 for now.*/ - data->smc_state_table.MemoryLevel[0].EnabledForActivity = 1; - - /* - * in order to prevent MC activity from stutter mode to push DPM up. - * the UVD change complements this by putting the MCLK in a higher state - * by default such that we are not effected by up threshold or and MCLK DPM latency. - */ - data->smc_state_table.MemoryLevel[0].ActivityLevel = 0x1F; - CONVERT_FROM_HOST_TO_SMC_US(data->smc_state_table.MemoryLevel[0].ActivityLevel); - - data->smc_state_table.MemoryDpmLevelCount = (uint8_t)dpm_table->mclk_table.count; - data->dpm_level_enable_mask.mclk_dpm_enable_mask = iceland_get_dpm_level_enable_mask_value(&dpm_table->mclk_table); - /* set highest level watermark to high*/ - data->smc_state_table.MemoryLevel[dpm_table->mclk_table.count-1].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH; - - /* level count will send to smc once at init smc table and never change*/ - result = smu7_copy_bytes_to_smc(hwmgr->smumgr, - level_array_adress, (uint8_t *)levels, (uint32_t)level_array_size, data->sram_end); - - if (0 != result) { - return result; - } - - return 0; -} - -struct ICELAND_DLL_SPEED_SETTING -{ - uint16_t Min; /* Minimum Data Rate*/ - uint16_t Max; /* Maximum Data Rate*/ - uint32_t dll_speed; /* The desired DLL_SPEED setting*/ -}; - -static int iceland_populate_ulv_level(struct pp_hwmgr *hwmgr, SMU71_Discrete_Ulv *pstate) -{ - int result = 0; - iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); - uint32_t voltage_response_time, ulv_voltage; - - pstate->CcPwrDynRm = 0; - pstate->CcPwrDynRm1 = 0; - - //backbiasResponseTime is use for ULV state voltage value. - result = pp_tables_get_response_times(hwmgr, &voltage_response_time, &ulv_voltage); - PP_ASSERT_WITH_CODE((0 == result), "can not get ULV voltage value", return result;); - - if(!ulv_voltage) { - data->ulv.ulv_supported = false; - return 0; - } - - if (ICELAND_VOLTAGE_CONTROL_BY_SVID2 != data->voltage_control) { - /* use minimum voltage if ulv voltage in pptable is bigger than minimum voltage */ - if (ulv_voltage > hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v) { - pstate->VddcOffset = 0; - } - else { - /* used in SMIO Mode. not implemented for now. this is backup only for CI. */ - pstate->VddcOffset = (uint16_t)(hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v - ulv_voltage); - } - } else { - /* use minimum voltage if ulv voltage in pptable is bigger than minimum voltage */ - if(ulv_voltage > hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v) { - pstate->VddcOffsetVid = 0; - } else { - /* used in SVI2 Mode */ - pstate->VddcOffsetVid = (uint8_t)((hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v - ulv_voltage) * VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1); - } - } - - /* used in SVI2 Mode to shed phase */ - pstate->VddcPhase = (data->vddc_phase_shed_control) ? 0 : 1; - - if (0 == result) { - CONVERT_FROM_HOST_TO_SMC_UL(pstate->CcPwrDynRm); - CONVERT_FROM_HOST_TO_SMC_UL(pstate->CcPwrDynRm1); - CONVERT_FROM_HOST_TO_SMC_US(pstate->VddcOffset); - } - - return result; -} - -static int iceland_populate_ulv_state(struct pp_hwmgr *hwmgr, SMU71_Discrete_Ulv *ulv) -{ - return iceland_populate_ulv_level(hwmgr, ulv); -} - -static int iceland_populate_smc_initial_state(struct pp_hwmgr *hwmgr) -{ - iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); - uint8_t count, level; - - count = (uint8_t)(hwmgr->dyn_state.vddc_dependency_on_sclk->count); - - for (level = 0; level < count; level++) { - if (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[level].clk - >= data->vbios_boot_state.sclk_bootup_value) { - data->smc_state_table.GraphicsBootLevel = level; - break; - } - } - - count = (uint8_t)(hwmgr->dyn_state.vddc_dependency_on_mclk->count); - - for (level = 0; level < count; level++) { - if (hwmgr->dyn_state.vddc_dependency_on_mclk->entries[level].clk - >= data->vbios_boot_state.mclk_bootup_value) { - data->smc_state_table.MemoryBootLevel = level; - break; - } - } - - return 0; -} - -/** - * Initializes the SMC table and uploads it - * - * @param hwmgr the address of the powerplay hardware manager. - * @param pInput the pointer to input data (PowerState) - * @return always 0 - */ -static int iceland_init_smc_table(struct pp_hwmgr *hwmgr) -{ - int result; - iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); - SMU71_Discrete_DpmTable *table = &(data->smc_state_table); - const struct phw_iceland_ulv_parm *ulv = &(data->ulv); - - result = iceland_setup_default_dpm_tables(hwmgr); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to setup default DPM tables!", return result;); - memset(&(data->smc_state_table), 0x00, sizeof(data->smc_state_table)); - - if (ICELAND_VOLTAGE_CONTROL_NONE != data->voltage_control) { - iceland_populate_smc_voltage_tables(hwmgr, table); - } - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_AutomaticDCTransition)) { - table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC; - } - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_StepVddc)) { - table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC; - } - - if (data->is_memory_GDDR5) { - table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5; - } - - if (ulv->ulv_supported) { - result = iceland_populate_ulv_state(hwmgr, &data->ulv_setting); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to initialize ULV state!", return result;); - - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_ULV_PARAMETER, ulv->ch_ulv_parameter); - } - - result = iceland_populate_smc_link_level(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to initialize Link Level!", return result;); - - result = iceland_populate_all_graphic_levels(hwmgr); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to initialize Graphics Level!", return result;); - - result = iceland_populate_all_memory_levels(hwmgr); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to initialize Memory Level!", return result;); - - result = iceland_populate_smc_acpi_level(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to initialize ACPI Level!", return result;); - - result = iceland_populate_smc_vce_level(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to initialize VCE Level!", return result;); - - result = iceland_populate_smc_acp_level(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to initialize ACP Level!", return result;); - - result = iceland_populate_smc_samu_level(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to initialize SAMU Level!", return result;); - - /* - * Since only the initial state is completely set up at this - * point (the other states are just copies of the boot state) - * we only need to populate the ARB settings for the initial - * state. - */ - result = iceland_program_memory_timing_parameters(hwmgr); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to Write ARB settings for the initial state.", return result;); - - result = iceland_populate_smc_uvd_level(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to initialize UVD Level!", return result;); - - table->GraphicsBootLevel = 0; - table->MemoryBootLevel = 0; - - /* find boot level from dpm table */ - result = iceland_find_boot_level(&(data->dpm_table.sclk_table), - data->vbios_boot_state.sclk_bootup_value, - (uint32_t *)&(data->smc_state_table.GraphicsBootLevel)); - - if (result) - pr_warning("VBIOS did not find boot engine clock value in dependency table.\n"); - - result = iceland_find_boot_level(&(data->dpm_table.mclk_table), - data->vbios_boot_state.mclk_bootup_value, - (uint32_t *)&(data->smc_state_table.MemoryBootLevel)); - - if (result) - pr_warning("VBIOS did not find boot memory clock value in dependency table.\n"); - - table->BootVddc = data->vbios_boot_state.vddc_bootup_value; - if (ICELAND_VOLTAGE_CONTROL_NONE == data->vdd_ci_control) { - table->BootVddci = table->BootVddc; - } - else { - table->BootVddci = data->vbios_boot_state.vddci_bootup_value; - } - table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value; - - result = iceland_populate_smc_initial_state(hwmgr); - PP_ASSERT_WITH_CODE(0 == result, "Failed to initialize Boot State!", return result); - - result = iceland_populate_bapm_parameters_in_dpm_table(hwmgr); - PP_ASSERT_WITH_CODE(0 == result, "Failed to populate BAPM Parameters!", return result); - - table->GraphicsVoltageChangeEnable = 1; - table->GraphicsThermThrottleEnable = 1; - table->GraphicsInterval = 1; - table->VoltageInterval = 1; - table->ThermalInterval = 1; - table->TemperatureLimitHigh = - (data->thermal_temp_setting.temperature_high * - ICELAND_Q88_FORMAT_CONVERSION_UNIT) / PP_TEMPERATURE_UNITS_PER_CENTIGRADES; - table->TemperatureLimitLow = - (data->thermal_temp_setting.temperature_low * - ICELAND_Q88_FORMAT_CONVERSION_UNIT) / PP_TEMPERATURE_UNITS_PER_CENTIGRADES; - table->MemoryVoltageChangeEnable = 1; - table->MemoryInterval = 1; - table->VoltageResponseTime = 0; - table->PhaseResponseTime = 0; - table->MemoryThermThrottleEnable = 1; - table->PCIeBootLinkLevel = 0; - table->PCIeGenInterval = 1; - - result = iceland_populate_smc_svi2_config(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to populate SVI2 setting!", return result); - - table->ThermGpio = 17; - table->SclkStepSize = 0x4000; - - CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags); - CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcVid); - CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcPhase); - CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddciVid); - CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskMvddVid); - CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize); - CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh); - CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow); - CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime); - CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime); - - table->BootVddc = PP_HOST_TO_SMC_US(table->BootVddc * VOLTAGE_SCALE); - table->BootVddci = PP_HOST_TO_SMC_US(table->BootVddci * VOLTAGE_SCALE); - table->BootMVdd = PP_HOST_TO_SMC_US(table->BootMVdd * VOLTAGE_SCALE); - - /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */ - result = smu7_copy_bytes_to_smc(hwmgr->smumgr, data->dpm_table_start + - offsetof(SMU71_Discrete_DpmTable, SystemFlags), - (uint8_t *)&(table->SystemFlags), - sizeof(SMU71_Discrete_DpmTable) - 3 * sizeof(SMU71_PIDController), - data->sram_end); - - PP_ASSERT_WITH_CODE(0 == result, - "Failed to upload dpm data to SMC memory!", return result); - - /* Upload all ulv setting to SMC memory.(dpm level, dpm level count etc) */ - result = smu7_copy_bytes_to_smc(hwmgr->smumgr, - data->ulv_settings_start, - (uint8_t *)&(data->ulv_setting), - sizeof(SMU71_Discrete_Ulv), - data->sram_end); - -#if 0 - /* Notify SMC to follow new GPIO scheme */ - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_AutomaticDCTransition)) { - if (0 == iceland_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_UseNewGPIOScheme)) - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_SMCtoPPLIBAcdcGpioScheme); - } -#endif - - return result; -} - -int iceland_populate_mc_reg_address(struct pp_hwmgr *hwmgr, SMU71_Discrete_MCRegisters *mc_reg_table) -{ - const struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); - - uint32_t i, j; - - for (i = 0, j = 0; j < data->iceland_mc_reg_table.last; j++) { - if (data->iceland_mc_reg_table.validflag & 1<address[] array out of boundary", return -1); - mc_reg_table->address[i].s0 = - PP_HOST_TO_SMC_US(data->iceland_mc_reg_table.mc_reg_address[j].s0); - mc_reg_table->address[i].s1 = - PP_HOST_TO_SMC_US(data->iceland_mc_reg_table.mc_reg_address[j].s1); - i++; - } - } - - mc_reg_table->last = (uint8_t)i; - - return 0; -} - -/* convert register values from driver to SMC format */ -void iceland_convert_mc_registers( - const phw_iceland_mc_reg_entry * pEntry, - SMU71_Discrete_MCRegisterSet *pData, - uint32_t numEntries, uint32_t validflag) -{ - uint32_t i, j; - - for (i = 0, j = 0; j < numEntries; j++) { - if (validflag & 1<value[i] = PP_HOST_TO_SMC_UL(pEntry->mc_data[j]); - i++; - } - } -} - -/* find the entry in the memory range table, then populate the value to SMC's iceland_mc_reg_table */ -int iceland_convert_mc_reg_table_entry_to_smc( - struct pp_hwmgr *hwmgr, - const uint32_t memory_clock, - SMU71_Discrete_MCRegisterSet *mc_reg_table_data - ) -{ - const iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); - uint32_t i = 0; - - for (i = 0; i < data->iceland_mc_reg_table.num_entries; i++) { - if (memory_clock <= - data->iceland_mc_reg_table.mc_reg_table_entry[i].mclk_max) { - break; - } - } - - if ((i == data->iceland_mc_reg_table.num_entries) && (i > 0)) - --i; - - iceland_convert_mc_registers(&data->iceland_mc_reg_table.mc_reg_table_entry[i], - mc_reg_table_data, data->iceland_mc_reg_table.last, data->iceland_mc_reg_table.validflag); - - return 0; -} - -int iceland_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr, - SMU71_Discrete_MCRegisters *mc_reg_table) -{ - int result = 0; - iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); - int res; - uint32_t i; - - for (i = 0; i < data->dpm_table.mclk_table.count; i++) { - res = iceland_convert_mc_reg_table_entry_to_smc( - hwmgr, - data->dpm_table.mclk_table.dpm_levels[i].value, - &mc_reg_table->data[i] - ); - - if (0 != res) - result = res; - } - - return result; -} - -int iceland_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr) -{ - int result; - struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); - - memset(&data->mc_reg_table, 0x00, sizeof(SMU71_Discrete_MCRegisters)); - result = iceland_populate_mc_reg_address(hwmgr, &(data->mc_reg_table)); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to initialize MCRegTable for the MC register addresses!", return result;); - - result = iceland_convert_mc_reg_table_to_smc(hwmgr, &data->mc_reg_table); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to initialize MCRegTable for driver state!", return result;); - - return smu7_copy_bytes_to_smc(hwmgr->smumgr, data->mc_reg_table_start, - (uint8_t *)&data->mc_reg_table, sizeof(SMU71_Discrete_MCRegisters), data->sram_end); -} - -int iceland_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display) -{ - PPSMC_Msg msg = has_display? (PPSMC_Msg)PPSMC_HasDisplay : (PPSMC_Msg)PPSMC_NoDisplay; - - return (smum_send_msg_to_smc(hwmgr->smumgr, msg) == 0) ? 0 : -1; -} - -int iceland_enable_sclk_control(struct pp_hwmgr *hwmgr) -{ - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, SCLK_PWRMGT_OFF, 0); - - return 0; -} - -int iceland_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) -{ - iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); - - /* enable SCLK dpm */ - if (0 == data->sclk_dpm_key_disabled) { - PP_ASSERT_WITH_CODE( - (0 == smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_DPM_Enable)), - "Failed to enable SCLK DPM during DPM Start Function!", - return -1); - } - - /* enable MCLK dpm */ - if (0 == data->mclk_dpm_key_disabled) { - PP_ASSERT_WITH_CODE( - (0 == smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_MCLKDPM_Enable)), - "Failed to enable MCLK DPM during DPM Start Function!", - return -1); - - PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1); - - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixLCAC_MC0_CNTL, 0x05);/* CH0,1 read */ - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixLCAC_MC1_CNTL, 0x05);/* CH2,3 read */ - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixLCAC_CPL_CNTL, 0x100005);/*Read */ - - udelay(10); - - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixLCAC_MC0_CNTL, 0x400005);/* CH0,1 write */ - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixLCAC_MC1_CNTL, 0x400005);/* CH2,3 write */ - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixLCAC_CPL_CNTL, 0x500005);/* write */ - - } - - return 0; -} - -int iceland_start_dpm(struct pp_hwmgr *hwmgr) -{ - iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); - - /* enable general power management */ - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, 1); - /* enable sclk deep sleep */ - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, DYNAMIC_PM_EN, 1); - - /* prepare for PCIE DPM */ - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SOFT_REGISTERS_TABLE_12, VoltageChangeTimeout, 0x1000); - - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE, SWRST_COMMAND_1, RESETLC, 0x0); - - PP_ASSERT_WITH_CODE( - (0 == smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_Voltage_Cntl_Enable)), - "Failed to enable voltage DPM during DPM Start Function!", - return -1); - - if (0 != iceland_enable_sclk_mclk_dpm(hwmgr)) { - PP_ASSERT_WITH_CODE(0, "Failed to enable Sclk DPM and Mclk DPM!", return -1); - } - - /* enable PCIE dpm */ - if (0 == data->pcie_dpm_key_disabled) { - PP_ASSERT_WITH_CODE( - (0 == smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_PCIeDPM_Enable)), - "Failed to enable pcie DPM during DPM Start Function!", - return -1 - ); - } - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_Falcon_QuickTransition)) { - smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_EnableACDCGPIOInterrupt); - } - - return 0; -} - -static void iceland_set_dpm_event_sources(struct pp_hwmgr *hwmgr, - uint32_t sources) -{ - bool protection; - enum DPM_EVENT_SRC src; - - switch (sources) { - default: - printk(KERN_ERR "Unknown throttling event sources."); - /* fall through */ - case 0: - protection = false; - /* src is unused */ - break; - case (1 << PHM_AutoThrottleSource_Thermal): - protection = true; - src = DPM_EVENT_SRC_DIGITAL; - break; - case (1 << PHM_AutoThrottleSource_External): - protection = true; - src = DPM_EVENT_SRC_EXTERNAL; - break; - case (1 << PHM_AutoThrottleSource_External) | - (1 << PHM_AutoThrottleSource_Thermal): - protection = true; - src = DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL; - break; - } - /* Order matters - don't enable thermal protection for the wrong source. */ - if (protection) { - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_CTRL, - DPM_EVENT_SRC, src); - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, - THERMAL_PROTECTION_DIS, - !phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ThermalController)); - } else - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, - THERMAL_PROTECTION_DIS, 1); -} - -static int iceland_enable_auto_throttle_source(struct pp_hwmgr *hwmgr, - PHM_AutoThrottleSource source) -{ - struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); - - if (!(data->active_auto_throttle_sources & (1 << source))) { - data->active_auto_throttle_sources |= 1 << source; - iceland_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources); - } - return 0; -} - -static int iceland_enable_thermal_auto_throttle(struct pp_hwmgr *hwmgr) -{ - return iceland_enable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal); -} - - -/** -* Programs the Deep Sleep registers -* -* @param pHwMgr the address of the powerplay hardware manager. -* @param pInput the pointer to input data (PhwEvergreen_DisplayConfiguration) -* @param pOutput the pointer to output data (unused) -* @param pStorage the pointer to temporary storage (unused) -* @param Result the last failure code (unused) -* @return always 0 -*/ -static int iceland_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr) -{ - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_SclkDeepSleep)) { - if (smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_MASTER_DeepSleep_ON) != 0) - PP_ASSERT_WITH_CODE(false, - "Attempt to enable Master Deep Sleep switch failed!", - return -EINVAL); - } else { - if (smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_MASTER_DeepSleep_OFF) != 0) - PP_ASSERT_WITH_CODE(false, - "Attempt to disable Master Deep Sleep switch failed!", - return -EINVAL); - } - - return 0; -} - -static int iceland_enable_dpm_tasks(struct pp_hwmgr *hwmgr) -{ - int tmp_result, result = 0; - - if (cf_iceland_voltage_control(hwmgr)) { - tmp_result = iceland_enable_voltage_control(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to enable voltage control!", return tmp_result); - - tmp_result = iceland_construct_voltage_tables(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to contruct voltage tables!", return tmp_result); - } - - tmp_result = iceland_initialize_mc_reg_table(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to initialize MC reg table!", return tmp_result); - - tmp_result = iceland_program_static_screen_threshold_parameters(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to program static screen threshold parameters!", return tmp_result); - - tmp_result = iceland_enable_display_gap(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to enable display gap!", return tmp_result); - - tmp_result = iceland_program_voting_clients(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to program voting clients!", return tmp_result); - - tmp_result = iceland_upload_firmware(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to upload firmware header!", return tmp_result); - - tmp_result = iceland_process_firmware_header(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to process firmware header!", return tmp_result); - - tmp_result = iceland_initial_switch_from_arb_f0_to_f1(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to initialize switch from ArbF0 to F1!", return tmp_result); - - tmp_result = iceland_init_smc_table(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to initialize SMC table!", return tmp_result); - - tmp_result = iceland_populate_initial_mc_reg_table(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to populate initialize MC Reg table!", return tmp_result); - - tmp_result = iceland_populate_pm_fuses(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to populate PM fuses!", return tmp_result); - - - /* enable SCLK control */ - tmp_result = iceland_enable_sclk_control(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to enable SCLK control!", return tmp_result); - - tmp_result = iceland_enable_deep_sleep_master_switch(hwmgr); - PP_ASSERT_WITH_CODE((tmp_result == 0), - "Failed to enable deep sleep!", return tmp_result); - - /* enable DPM */ - tmp_result = iceland_start_dpm(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to start DPM!", return tmp_result); - - tmp_result = iceland_enable_smc_cac(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to enable SMC CAC!", return tmp_result); - - tmp_result = iceland_enable_power_containment(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to enable power containment!", return tmp_result); - - tmp_result = iceland_power_control_set_level(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to power control set level!", result = tmp_result); - - tmp_result = iceland_enable_thermal_auto_throttle(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to enable thermal auto throttle!", result = tmp_result); - - return result; -} - -static int iceland_hwmgr_backend_fini(struct pp_hwmgr *hwmgr) -{ - return phm_hwmgr_backend_fini(hwmgr); -} - -static void iceland_initialize_dpm_defaults(struct pp_hwmgr *hwmgr) -{ - iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); - struct phw_iceland_ulv_parm *ulv; - - ulv = &data->ulv; - ulv->ch_ulv_parameter = PPICELAND_CGULVPARAMETER_DFLT; - data->voting_rights_clients0 = PPICELAND_VOTINGRIGHTSCLIENTS_DFLT0; - data->voting_rights_clients1 = PPICELAND_VOTINGRIGHTSCLIENTS_DFLT1; - data->voting_rights_clients2 = PPICELAND_VOTINGRIGHTSCLIENTS_DFLT2; - data->voting_rights_clients3 = PPICELAND_VOTINGRIGHTSCLIENTS_DFLT3; - data->voting_rights_clients4 = PPICELAND_VOTINGRIGHTSCLIENTS_DFLT4; - data->voting_rights_clients5 = PPICELAND_VOTINGRIGHTSCLIENTS_DFLT5; - data->voting_rights_clients6 = PPICELAND_VOTINGRIGHTSCLIENTS_DFLT6; - data->voting_rights_clients7 = PPICELAND_VOTINGRIGHTSCLIENTS_DFLT7; - - data->static_screen_threshold_unit = PPICELAND_STATICSCREENTHRESHOLDUNIT_DFLT; - data->static_screen_threshold = PPICELAND_STATICSCREENTHRESHOLD_DFLT; - - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ABM); - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_NonABMSupportInPPLib); - - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_DynamicACTiming); - - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_DisableMemoryTransition); - - iceland_initialize_power_tune_defaults(hwmgr); - - data->mclk_strobe_mode_threshold = 40000; - data->mclk_stutter_mode_threshold = 30000; - data->mclk_edc_enable_threshold = 40000; - data->mclk_edc_wr_enable_threshold = 40000; - - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_DisableMCLS); - - data->pcie_gen_performance.max = PP_PCIEGen1; - data->pcie_gen_performance.min = PP_PCIEGen3; - data->pcie_gen_power_saving.max = PP_PCIEGen1; - data->pcie_gen_power_saving.min = PP_PCIEGen3; - - data->pcie_lane_performance.max = 0; - data->pcie_lane_performance.min = 16; - data->pcie_lane_power_saving.max = 0; - data->pcie_lane_power_saving.min = 16; - - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_SclkThrottleLowNotification); -} - -static int iceland_get_evv_voltage(struct pp_hwmgr *hwmgr) -{ - iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); - uint16_t virtual_voltage_id; - uint16_t vddc = 0; - uint16_t i; - - /* the count indicates actual number of entries */ - data->vddc_leakage.count = 0; - data->vddci_leakage.count = 0; - - if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_EVV)) { - pr_err("Iceland should always support EVV\n"); - return -EINVAL; - } - - /* retrieve voltage for leakage ID (0xff01 + i) */ - for (i = 0; i < ICELAND_MAX_LEAKAGE_COUNT; i++) { - virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i; - - PP_ASSERT_WITH_CODE((0 == atomctrl_get_voltage_evv(hwmgr, virtual_voltage_id, &vddc)), - "Error retrieving EVV voltage value!\n", continue); - - if (vddc >= 2000) - pr_warning("Invalid VDDC value!\n"); - - if (vddc != 0 && vddc != virtual_voltage_id) { - data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = vddc; - data->vddc_leakage.leakage_id[data->vddc_leakage.count] = virtual_voltage_id; - data->vddc_leakage.count++; - } - } - - return 0; -} - -static void iceland_patch_with_vddc_leakage(struct pp_hwmgr *hwmgr, - uint32_t *vddc) -{ - iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); - uint32_t leakage_index; - struct phw_iceland_leakage_voltage *leakage_table = &data->vddc_leakage; - - /* search for leakage voltage ID 0xff01 ~ 0xff08 */ - for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) { - /* - * If this voltage matches a leakage voltage ID, patch - * with actual leakage voltage. - */ - if (leakage_table->leakage_id[leakage_index] == *vddc) { - /* - * Need to make sure vddc is less than 2v or - * else, it could burn the ASIC. - */ - if (leakage_table->actual_voltage[leakage_index] >= 2000) - pr_warning("Invalid VDDC value!\n"); - *vddc = leakage_table->actual_voltage[leakage_index]; - /* we found leakage voltage */ - break; - } - } - - if (*vddc >= ATOM_VIRTUAL_VOLTAGE_ID0) - pr_warning("Voltage value looks like a Leakage ID but it's not patched\n"); -} - -static void iceland_patch_with_vddci_leakage(struct pp_hwmgr *hwmgr, - uint32_t *vddci) -{ - iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); - uint32_t leakage_index; - struct phw_iceland_leakage_voltage *leakage_table = &data->vddci_leakage; - - /* search for leakage voltage ID 0xff01 ~ 0xff08 */ - for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) { - /* - * If this voltage matches a leakage voltage ID, patch - * with actual leakage voltage. - */ - if (leakage_table->leakage_id[leakage_index] == *vddci) { - *vddci = leakage_table->actual_voltage[leakage_index]; - /* we found leakage voltage */ - break; - } - } - - if (*vddci >= ATOM_VIRTUAL_VOLTAGE_ID0) - pr_warning("Voltage value looks like a Leakage ID but it's not patched\n"); -} - -static int iceland_patch_vddc(struct pp_hwmgr *hwmgr, - struct phm_clock_voltage_dependency_table *tab) -{ - uint16_t i; - - if (tab) - for (i = 0; i < tab->count; i++) - iceland_patch_with_vddc_leakage(hwmgr, &tab->entries[i].v); - - return 0; -} - -static int iceland_patch_vddci(struct pp_hwmgr *hwmgr, - struct phm_clock_voltage_dependency_table *tab) -{ - uint16_t i; - - if (tab) - for (i = 0; i < tab->count; i++) - iceland_patch_with_vddci_leakage(hwmgr, &tab->entries[i].v); - - return 0; -} - -static int iceland_patch_vce_vddc(struct pp_hwmgr *hwmgr, - struct phm_vce_clock_voltage_dependency_table *tab) -{ - uint16_t i; - - if (tab) - for (i = 0; i < tab->count; i++) - iceland_patch_with_vddc_leakage(hwmgr, &tab->entries[i].v); - - return 0; -} - - -static int iceland_patch_uvd_vddc(struct pp_hwmgr *hwmgr, - struct phm_uvd_clock_voltage_dependency_table *tab) -{ - uint16_t i; - - if (tab) - for (i = 0; i < tab->count; i++) - iceland_patch_with_vddc_leakage(hwmgr, &tab->entries[i].v); - - return 0; -} - -static int iceland_patch_vddc_shed_limit(struct pp_hwmgr *hwmgr, - struct phm_phase_shedding_limits_table *tab) -{ - uint16_t i; - - if (tab) - for (i = 0; i < tab->count; i++) - iceland_patch_with_vddc_leakage(hwmgr, &tab->entries[i].Voltage); - - return 0; -} - -static int iceland_patch_samu_vddc(struct pp_hwmgr *hwmgr, - struct phm_samu_clock_voltage_dependency_table *tab) -{ - uint16_t i; - - if (tab) - for (i = 0; i < tab->count; i++) - iceland_patch_with_vddc_leakage(hwmgr, &tab->entries[i].v); - - return 0; -} - -static int iceland_patch_acp_vddc(struct pp_hwmgr *hwmgr, - struct phm_acp_clock_voltage_dependency_table *tab) -{ - uint16_t i; - - if (tab) - for (i = 0; i < tab->count; i++) - iceland_patch_with_vddc_leakage(hwmgr, &tab->entries[i].v); - - return 0; -} - -static int iceland_patch_limits_vddc(struct pp_hwmgr *hwmgr, - struct phm_clock_and_voltage_limits *tab) -{ - if (tab) { - iceland_patch_with_vddc_leakage(hwmgr, (uint32_t *)&tab->vddc); - iceland_patch_with_vddci_leakage(hwmgr, (uint32_t *)&tab->vddci); - } - - return 0; -} - -static int iceland_patch_cac_vddc(struct pp_hwmgr *hwmgr, struct phm_cac_leakage_table *tab) -{ - uint32_t i; - uint32_t vddc; - - if (tab) { - for (i = 0; i < tab->count; i++) { - vddc = (uint32_t)(tab->entries[i].Vddc); - iceland_patch_with_vddc_leakage(hwmgr, &vddc); - tab->entries[i].Vddc = (uint16_t)vddc; - } - } - - return 0; -} - -static int iceland_patch_dependency_tables_with_leakage(struct pp_hwmgr *hwmgr) -{ - int tmp; - - tmp = iceland_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dependency_on_sclk); - if(tmp) - return -EINVAL; - - tmp = iceland_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dependency_on_mclk); - if(tmp) - return -EINVAL; - - tmp = iceland_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dep_on_dal_pwrl); - if(tmp) - return -EINVAL; - - tmp = iceland_patch_vddci(hwmgr, hwmgr->dyn_state.vddci_dependency_on_mclk); - if(tmp) - return -EINVAL; - - tmp = iceland_patch_vce_vddc(hwmgr, hwmgr->dyn_state.vce_clock_voltage_dependency_table); - if(tmp) - return -EINVAL; - - tmp = iceland_patch_uvd_vddc(hwmgr, hwmgr->dyn_state.uvd_clock_voltage_dependency_table); - if(tmp) - return -EINVAL; - - tmp = iceland_patch_samu_vddc(hwmgr, hwmgr->dyn_state.samu_clock_voltage_dependency_table); - if(tmp) - return -EINVAL; - - tmp = iceland_patch_acp_vddc(hwmgr, hwmgr->dyn_state.acp_clock_voltage_dependency_table); - if(tmp) - return -EINVAL; - - tmp = iceland_patch_vddc_shed_limit(hwmgr, hwmgr->dyn_state.vddc_phase_shed_limits_table); - if(tmp) - return -EINVAL; - - tmp = iceland_patch_limits_vddc(hwmgr, &hwmgr->dyn_state.max_clock_voltage_on_ac); - if(tmp) - return -EINVAL; - - tmp = iceland_patch_limits_vddc(hwmgr, &hwmgr->dyn_state.max_clock_voltage_on_dc); - if(tmp) - return -EINVAL; - - tmp = iceland_patch_cac_vddc(hwmgr, hwmgr->dyn_state.cac_leakage_table); - if(tmp) - return -EINVAL; - - return 0; -} - -static int iceland_set_private_var_based_on_pptale(struct pp_hwmgr *hwmgr) -{ - iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); - - struct phm_clock_voltage_dependency_table *allowed_sclk_vddc_table = hwmgr->dyn_state.vddc_dependency_on_sclk; - struct phm_clock_voltage_dependency_table *allowed_mclk_vddc_table = hwmgr->dyn_state.vddc_dependency_on_mclk; - struct phm_clock_voltage_dependency_table *allowed_mclk_vddci_table = hwmgr->dyn_state.vddci_dependency_on_mclk; - - PP_ASSERT_WITH_CODE(allowed_sclk_vddc_table != NULL, - "VDDC dependency on SCLK table is missing. This table is mandatory\n", return -EINVAL); - PP_ASSERT_WITH_CODE(allowed_sclk_vddc_table->count >= 1, - "VDDC dependency on SCLK table has to have is missing. This table is mandatory\n", return -EINVAL); - - PP_ASSERT_WITH_CODE(allowed_mclk_vddc_table != NULL, - "VDDC dependency on MCLK table is missing. This table is mandatory\n", return -EINVAL); - PP_ASSERT_WITH_CODE(allowed_mclk_vddc_table->count >= 1, - "VDD dependency on MCLK table has to have is missing. This table is mandatory\n", return -EINVAL); - - data->min_vddc_in_pp_table = (uint16_t)allowed_sclk_vddc_table->entries[0].v; - data->max_vddc_in_pp_table = (uint16_t)allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v; - - hwmgr->dyn_state.max_clock_voltage_on_ac.sclk = - allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk; - hwmgr->dyn_state.max_clock_voltage_on_ac.mclk = - allowed_mclk_vddc_table->entries[allowed_mclk_vddc_table->count - 1].clk; - hwmgr->dyn_state.max_clock_voltage_on_ac.vddc = - allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v; - - if (allowed_mclk_vddci_table != NULL && allowed_mclk_vddci_table->count >= 1) { - data->min_vddci_in_pp_table = (uint16_t)allowed_mclk_vddci_table->entries[0].v; - data->max_vddci_in_pp_table = (uint16_t)allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v; - } - - if (hwmgr->dyn_state.vddci_dependency_on_mclk != NULL && hwmgr->dyn_state.vddci_dependency_on_mclk->count > 1) - hwmgr->dyn_state.max_clock_voltage_on_ac.vddci = hwmgr->dyn_state.vddci_dependency_on_mclk->entries[hwmgr->dyn_state.vddci_dependency_on_mclk->count - 1].v; - - return 0; -} - -static int iceland_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr) -{ - uint32_t table_size; - struct phm_clock_voltage_dependency_table *table_clk_vlt; - - hwmgr->dyn_state.mclk_sclk_ratio = 4; - hwmgr->dyn_state.sclk_mclk_delta = 15000; /* 150 MHz */ - hwmgr->dyn_state.vddc_vddci_delta = 200; /* 200mV */ - - /* initialize vddc_dep_on_dal_pwrl table */ - table_size = sizeof(uint32_t) + 4 * sizeof(struct phm_clock_voltage_dependency_record); - table_clk_vlt = (struct phm_clock_voltage_dependency_table *)kzalloc(table_size, GFP_KERNEL); - - if (NULL == table_clk_vlt) { - pr_err("[ powerplay ] Can not allocate space for vddc_dep_on_dal_pwrl! \n"); - return -ENOMEM; - } else { - table_clk_vlt->count = 4; - table_clk_vlt->entries[0].clk = PP_DAL_POWERLEVEL_ULTRALOW; - table_clk_vlt->entries[0].v = 0; - table_clk_vlt->entries[1].clk = PP_DAL_POWERLEVEL_LOW; - table_clk_vlt->entries[1].v = 720; - table_clk_vlt->entries[2].clk = PP_DAL_POWERLEVEL_NOMINAL; - table_clk_vlt->entries[2].v = 810; - table_clk_vlt->entries[3].clk = PP_DAL_POWERLEVEL_PERFORMANCE; - table_clk_vlt->entries[3].v = 900; - hwmgr->dyn_state.vddc_dep_on_dal_pwrl = table_clk_vlt; - } - - return 0; -} - -/** - * Initializes the Volcanic Islands Hardware Manager - * - * @param hwmgr the address of the powerplay hardware manager. - * @return 1 if success; otherwise appropriate error code. - */ -static int iceland_hwmgr_backend_init(struct pp_hwmgr *hwmgr) -{ - int result = 0; - SMU71_Discrete_DpmTable *table = NULL; - iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); - pp_atomctrl_gpio_pin_assignment gpio_pin_assignment; - bool stay_in_boot; - struct phw_iceland_ulv_parm *ulv; - struct cgs_system_info sys_info = {0}; - - PP_ASSERT_WITH_CODE((NULL != hwmgr), - "Invalid Parameter!", return -EINVAL;); - - data->dll_defaule_on = 0; - data->sram_end = SMC_RAM_END; - - data->activity_target[0] = PPICELAND_TARGETACTIVITY_DFLT; - data->activity_target[1] = PPICELAND_TARGETACTIVITY_DFLT; - data->activity_target[2] = PPICELAND_TARGETACTIVITY_DFLT; - data->activity_target[3] = PPICELAND_TARGETACTIVITY_DFLT; - data->activity_target[4] = PPICELAND_TARGETACTIVITY_DFLT; - data->activity_target[5] = PPICELAND_TARGETACTIVITY_DFLT; - data->activity_target[6] = PPICELAND_TARGETACTIVITY_DFLT; - data->activity_target[7] = PPICELAND_TARGETACTIVITY_DFLT; - - data->mclk_activity_target = PPICELAND_MCLK_TARGETACTIVITY_DFLT; - - data->sclk_dpm_key_disabled = 0; - data->mclk_dpm_key_disabled = 0; - data->pcie_dpm_key_disabled = 0; - data->pcc_monitor_enabled = 0; - - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_UnTabledHardwareInterface); - - data->gpio_debug = 0; - data->engine_clock_data = 0; - data->memory_clock_data = 0; - - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_SclkDeepSleepAboveLow); - - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_DynamicPatchPowerState); - - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_TablelessHardwareInterface); - - /* Initializes DPM default values. */ - iceland_initialize_dpm_defaults(hwmgr); - - /* Enable Platform EVV support. */ - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_EVV); - - /* Get leakage voltage based on leakage ID. */ - result = iceland_get_evv_voltage(hwmgr); - if (result) - goto failed; - - /** - * Patch our voltage dependency table with actual leakage - * voltage. We need to perform leakage translation before it's - * used by other functions such as - * iceland_set_hwmgr_variables_based_on_pptable. - */ - result = iceland_patch_dependency_tables_with_leakage(hwmgr); - if (result) - goto failed; - - /* Parse pptable data read from VBIOS. */ - result = iceland_set_private_var_based_on_pptale(hwmgr); - if (result) - goto failed; - - /* ULV support */ - ulv = &(data->ulv); - ulv->ulv_supported = 1; - - /* Initalize Dynamic State Adjustment Rule Settings*/ - result = iceland_initializa_dynamic_state_adjustment_rule_settings(hwmgr); - if (result) { - pr_err("[ powerplay ] iceland_initializa_dynamic_state_adjustment_rule_settings failed!\n"); - goto failed; - } - - data->voltage_control = ICELAND_VOLTAGE_CONTROL_NONE; - data->vdd_ci_control = ICELAND_VOLTAGE_CONTROL_NONE; - data->mvdd_control = ICELAND_VOLTAGE_CONTROL_NONE; - - /* - * Hardcode thermal temperature settings for now, these will - * be overwritten if a custom policy exists. - */ - data->thermal_temp_setting.temperature_low = 99500; - data->thermal_temp_setting.temperature_high = 100000; - data->thermal_temp_setting.temperature_shutdown = 104000; - data->uvd_enabled = false; - - table = &data->smc_state_table; - - if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID, - &gpio_pin_assignment)) { - table->VRHotGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift; - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_RegulatorHot); - } else { - table->VRHotGpio = ICELAND_UNUSED_GPIO_PIN; - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_RegulatorHot); - } - - if (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID, - &gpio_pin_assignment)) { - table->AcDcGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift; - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_AutomaticDCTransition); - } else { - table->AcDcGpio = ICELAND_UNUSED_GPIO_PIN; - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_AutomaticDCTransition); - } - - /* - * If ucGPIO_ID=VDDC_PCC_GPIO_PINID in GPIO_LUTable, Peak. - * Current Control feature is enabled and we should program - * PCC HW register - */ - if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_PCC_GPIO_PINID, - &gpio_pin_assignment)) { - uint32_t temp_reg = cgs_read_ind_register(hwmgr->device, - CGS_IND_REG__SMC, - ixCNB_PWRMGT_CNTL); - - switch (gpio_pin_assignment.uc_gpio_pin_bit_shift) { - case 0: - temp_reg = PHM_SET_FIELD(temp_reg, - CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x1); - break; - case 1: - temp_reg = PHM_SET_FIELD(temp_reg, - CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x2); - break; - case 2: - temp_reg = PHM_SET_FIELD(temp_reg, - CNB_PWRMGT_CNTL, GNB_SLOW, 0x1); - break; - case 3: - temp_reg = PHM_SET_FIELD(temp_reg, - CNB_PWRMGT_CNTL, FORCE_NB_PS1, 0x1); - break; - case 4: - temp_reg = PHM_SET_FIELD(temp_reg, - CNB_PWRMGT_CNTL, DPM_ENABLED, 0x1); - break; - default: - pr_warning("[ powerplay ] Failed to setup PCC HW register! Wrong GPIO assigned for VDDC_PCC_GPIO_PINID!\n"); - break; - } - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCNB_PWRMGT_CNTL, temp_reg); - } - - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_EnableSMU7ThermalManagement); - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_SMU7); - - if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, - VOLTAGE_TYPE_VDDC, - VOLTAGE_OBJ_GPIO_LUT)) - data->voltage_control = ICELAND_VOLTAGE_CONTROL_BY_GPIO; - else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, - VOLTAGE_TYPE_VDDC, - VOLTAGE_OBJ_SVID2)) - data->voltage_control = ICELAND_VOLTAGE_CONTROL_BY_SVID2; - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ControlVDDCI)) { - if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, - VOLTAGE_TYPE_VDDCI, - VOLTAGE_OBJ_GPIO_LUT)) - data->vdd_ci_control = ICELAND_VOLTAGE_CONTROL_BY_GPIO; - else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, - VOLTAGE_TYPE_VDDCI, - VOLTAGE_OBJ_SVID2)) - data->vdd_ci_control = ICELAND_VOLTAGE_CONTROL_BY_SVID2; - } - - if (data->vdd_ci_control == ICELAND_VOLTAGE_CONTROL_NONE) - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ControlVDDCI); - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_EnableMVDDControl)) { - if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, - VOLTAGE_TYPE_MVDDC, - VOLTAGE_OBJ_GPIO_LUT)) - data->mvdd_control = ICELAND_VOLTAGE_CONTROL_BY_GPIO; - else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, - VOLTAGE_TYPE_MVDDC, - VOLTAGE_OBJ_SVID2)) - data->mvdd_control = ICELAND_VOLTAGE_CONTROL_BY_SVID2; - } - - if (data->mvdd_control == ICELAND_VOLTAGE_CONTROL_NONE) - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_EnableMVDDControl); - - data->vddc_phase_shed_control = false; - - stay_in_boot = phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_StayInBootState); - - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_DynamicPowerManagement); - - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ActivityReporting); - - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_GFXClockGatingSupport); - - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_MemorySpreadSpectrumSupport); - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_EngineSpreadSpectrumSupport); - - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_DynamicPCIEGen2Support); - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_SMC); - - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_DisablePowerGating); - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_BACO); - - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ThermalAutoThrottling); - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_DisableLSClockGating); - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_SamuDPM); - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_AcpDPM); - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_OD6inACSupport); - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_EnablePlatformPowerManagement); - - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_PauseMMSessions); - - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_OD6PlusinACSupport); - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_PauseMMSessions); - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_GFXClockGatingManagedInCAIL); - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_IcelandULPSSWWorkAround); - - - /* iceland doesn't support UVD and VCE */ - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_UVDPowerGating); - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_VCEPowerGating); - - sys_info.size = sizeof(struct cgs_system_info); - sys_info.info_id = CGS_SYSTEM_INFO_PG_FLAGS; - result = cgs_query_system_info(hwmgr->device, &sys_info); - if (!result) { - if (sys_info.value & AMD_PG_SUPPORT_UVD) - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_UVDPowerGating); - if (sys_info.value & AMD_PG_SUPPORT_VCE) - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_VCEPowerGating); - - data->is_tlu_enabled = false; - hwmgr->platform_descriptor.hardwareActivityPerformanceLevels = - ICELAND_MAX_HARDWARE_POWERLEVELS; - hwmgr->platform_descriptor.hardwarePerformanceLevels = 2; - hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50; - - sys_info.size = sizeof(struct cgs_system_info); - sys_info.info_id = CGS_SYSTEM_INFO_PCIE_GEN_INFO; - result = cgs_query_system_info(hwmgr->device, &sys_info); - if (result) - data->pcie_gen_cap = AMDGPU_DEFAULT_PCIE_GEN_MASK; - else - data->pcie_gen_cap = (uint32_t)sys_info.value; - if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) - data->pcie_spc_cap = 20; - sys_info.size = sizeof(struct cgs_system_info); - sys_info.info_id = CGS_SYSTEM_INFO_PCIE_MLW; - result = cgs_query_system_info(hwmgr->device, &sys_info); - if (result) - data->pcie_lane_cap = AMDGPU_DEFAULT_PCIE_MLW_MASK; - else - data->pcie_lane_cap = (uint32_t)sys_info.value; - } else { - /* Ignore return value in here, we are cleaning up a mess. */ - iceland_hwmgr_backend_fini(hwmgr); - } - - return 0; -failed: - return result; -} - -static int iceland_get_num_of_entries(struct pp_hwmgr *hwmgr) -{ - int result; - unsigned long ret = 0; - - result = pp_tables_get_num_of_entries(hwmgr, &ret); - - return result ? 0 : ret; -} - -static const unsigned long PhwIceland_Magic = (unsigned long)(PHM_VIslands_Magic); - -struct iceland_power_state *cast_phw_iceland_power_state( - struct pp_hw_power_state *hw_ps) -{ - if (hw_ps == NULL) - return NULL; - - PP_ASSERT_WITH_CODE((PhwIceland_Magic == hw_ps->magic), - "Invalid Powerstate Type!", - return NULL); - - return (struct iceland_power_state *)hw_ps; -} - -static int iceland_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, - struct pp_power_state *prequest_ps, - const struct pp_power_state *pcurrent_ps) -{ - struct iceland_power_state *iceland_ps = - cast_phw_iceland_power_state(&prequest_ps->hardware); - - uint32_t sclk; - uint32_t mclk; - struct PP_Clocks minimum_clocks = {0}; - bool disable_mclk_switching; - bool disable_mclk_switching_for_frame_lock; - struct cgs_display_info info = {0}; - const struct phm_clock_and_voltage_limits *max_limits; - uint32_t i; - iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); - - int32_t count; - int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0; - - data->battery_state = (PP_StateUILabel_Battery == prequest_ps->classification.ui_label); - - PP_ASSERT_WITH_CODE(iceland_ps->performance_level_count == 2, - "VI should always have 2 performance levels", - ); - - max_limits = (PP_PowerSource_AC == hwmgr->power_source) ? - &(hwmgr->dyn_state.max_clock_voltage_on_ac) : - &(hwmgr->dyn_state.max_clock_voltage_on_dc); - - if (PP_PowerSource_DC == hwmgr->power_source) { - for (i = 0; i < iceland_ps->performance_level_count; i++) { - if (iceland_ps->performance_levels[i].memory_clock > max_limits->mclk) - iceland_ps->performance_levels[i].memory_clock = max_limits->mclk; - if (iceland_ps->performance_levels[i].engine_clock > max_limits->sclk) - iceland_ps->performance_levels[i].engine_clock = max_limits->sclk; - } - } - - iceland_ps->vce_clocks.EVCLK = hwmgr->vce_arbiter.evclk; - iceland_ps->vce_clocks.ECCLK = hwmgr->vce_arbiter.ecclk; - - cgs_get_active_displays_info(hwmgr->device, &info); - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) { - - max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac); - stable_pstate_sclk = (max_limits->sclk * 75) / 100; - - for (count = hwmgr->dyn_state.vddc_dependency_on_sclk->count-1; count >= 0; count--) { - if (stable_pstate_sclk >= hwmgr->dyn_state.vddc_dependency_on_sclk->entries[count].clk) { - stable_pstate_sclk = hwmgr->dyn_state.vddc_dependency_on_sclk->entries[count].clk; - break; - } - } - - if (count < 0) - stable_pstate_sclk = hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].clk; - - stable_pstate_mclk = max_limits->mclk; - - minimum_clocks.engineClock = stable_pstate_sclk; - minimum_clocks.memoryClock = stable_pstate_mclk; - } - - if (minimum_clocks.engineClock < hwmgr->gfx_arbiter.sclk) - minimum_clocks.engineClock = hwmgr->gfx_arbiter.sclk; - - if (minimum_clocks.memoryClock < hwmgr->gfx_arbiter.mclk) - minimum_clocks.memoryClock = hwmgr->gfx_arbiter.mclk; - - iceland_ps->sclk_threshold = hwmgr->gfx_arbiter.sclk_threshold; - - if (0 != hwmgr->gfx_arbiter.sclk_over_drive) { - PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.sclk_over_drive <= hwmgr->platform_descriptor.overdriveLimit.engineClock), - "Overdrive sclk exceeds limit", - hwmgr->gfx_arbiter.sclk_over_drive = hwmgr->platform_descriptor.overdriveLimit.engineClock); - - if (hwmgr->gfx_arbiter.sclk_over_drive >= hwmgr->gfx_arbiter.sclk) - iceland_ps->performance_levels[1].engine_clock = hwmgr->gfx_arbiter.sclk_over_drive; - } - - if (0 != hwmgr->gfx_arbiter.mclk_over_drive) { - PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.mclk_over_drive <= hwmgr->platform_descriptor.overdriveLimit.memoryClock), - "Overdrive mclk exceeds limit", - hwmgr->gfx_arbiter.mclk_over_drive = hwmgr->platform_descriptor.overdriveLimit.memoryClock); - - if (hwmgr->gfx_arbiter.mclk_over_drive >= hwmgr->gfx_arbiter.mclk) - iceland_ps->performance_levels[1].memory_clock = hwmgr->gfx_arbiter.mclk_over_drive; - } - - disable_mclk_switching_for_frame_lock = phm_cap_enabled( - hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_DisableMclkSwitchingForFrameLock); - - disable_mclk_switching = (1 < info.display_count) || - disable_mclk_switching_for_frame_lock; - - sclk = iceland_ps->performance_levels[0].engine_clock; - mclk = iceland_ps->performance_levels[0].memory_clock; - - if (disable_mclk_switching) - mclk = iceland_ps->performance_levels[iceland_ps->performance_level_count - 1].memory_clock; - - if (sclk < minimum_clocks.engineClock) - sclk = (minimum_clocks.engineClock > max_limits->sclk) ? max_limits->sclk : minimum_clocks.engineClock; - - if (mclk < minimum_clocks.memoryClock) - mclk = (minimum_clocks.memoryClock > max_limits->mclk) ? max_limits->mclk : minimum_clocks.memoryClock; - - iceland_ps->performance_levels[0].engine_clock = sclk; - iceland_ps->performance_levels[0].memory_clock = mclk; - - iceland_ps->performance_levels[1].engine_clock = - (iceland_ps->performance_levels[1].engine_clock >= iceland_ps->performance_levels[0].engine_clock) ? - iceland_ps->performance_levels[1].engine_clock : - iceland_ps->performance_levels[0].engine_clock; - - if (disable_mclk_switching) { - if (mclk < iceland_ps->performance_levels[1].memory_clock) - mclk = iceland_ps->performance_levels[1].memory_clock; - - iceland_ps->performance_levels[0].memory_clock = mclk; - iceland_ps->performance_levels[1].memory_clock = mclk; - } else { - if (iceland_ps->performance_levels[1].memory_clock < iceland_ps->performance_levels[0].memory_clock) - iceland_ps->performance_levels[1].memory_clock = iceland_ps->performance_levels[0].memory_clock; - } - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) { - for (i=0; i < iceland_ps->performance_level_count; i++) { - iceland_ps->performance_levels[i].engine_clock = stable_pstate_sclk; - iceland_ps->performance_levels[i].memory_clock = stable_pstate_mclk; - iceland_ps->performance_levels[i].pcie_gen = data->pcie_gen_performance.max; - iceland_ps->performance_levels[i].pcie_lane = data->pcie_gen_performance.max; - } - } - - return 0; -} - -static bool iceland_is_dpm_running(struct pp_hwmgr *hwmgr) -{ - /* - * We return the status of Voltage Control instead of checking SCLK/MCLK DPM - * because we may have test scenarios that need us intentionly disable SCLK/MCLK DPM, - * whereas voltage control is a fundemental change that will not be disabled - */ - return (0 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - FEATURE_STATUS, VOLTAGE_CONTROLLER_ON) ? 1 : 0); -} - -/** - * force DPM power State - * - * @param hwmgr: the address of the powerplay hardware manager. - * @param n : DPM level - * @return The response that came from the SMC. - */ -int iceland_dpm_force_state(struct pp_hwmgr *hwmgr, uint32_t n) -{ - iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); - - /* Checking if DPM is running. If we discover hang because of this, we should skip this message. */ - PP_ASSERT_WITH_CODE(0 == iceland_is_dpm_running(hwmgr), - "Trying to force SCLK when DPM is disabled", return -1;); - if (0 == data->sclk_dpm_key_disabled) - return (0 == smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, - PPSMC_MSG_DPM_ForceState, - n) ? 0 : 1); - - return 0; -} - -/** - * force DPM power State - * - * @param hwmgr: the address of the powerplay hardware manager. - * @param n : DPM level - * @return The response that came from the SMC. - */ -int iceland_dpm_force_state_mclk(struct pp_hwmgr *hwmgr, uint32_t n) -{ - iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); - - /* Checking if DPM is running. If we discover hang because of this, we should skip this message. */ - PP_ASSERT_WITH_CODE(0 == iceland_is_dpm_running(hwmgr), - "Trying to Force MCLK when DPM is disabled", return -1;); - if (0 == data->mclk_dpm_key_disabled) - return (0 == smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, - PPSMC_MSG_MCLKDPM_ForceState, - n) ? 0 : 1); - - return 0; -} - -/** - * force DPM power State - * - * @param hwmgr: the address of the powerplay hardware manager. - * @param n : DPM level - * @return The response that came from the SMC. - */ -int iceland_dpm_force_state_pcie(struct pp_hwmgr *hwmgr, uint32_t n) -{ - iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); - - /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/ - PP_ASSERT_WITH_CODE(0 == iceland_is_dpm_running(hwmgr), - "Trying to Force PCIE level when DPM is disabled", return -1;); - if (0 == data->pcie_dpm_key_disabled) - return (0 == smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, - PPSMC_MSG_PCIeDPM_ForceLevel, - n) ? 0 : 1); - - return 0; -} - -static int iceland_force_dpm_highest(struct pp_hwmgr *hwmgr) -{ - uint32_t level, tmp; - iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); - - if (0 == data->sclk_dpm_key_disabled) { - /* SCLK */ - if (data->dpm_level_enable_mask.sclk_dpm_enable_mask != 0) { - level = 0; - tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask; - while (tmp >>= 1) - level++ ; - - if (0 != level) { - PP_ASSERT_WITH_CODE((0 == iceland_dpm_force_state(hwmgr, level)), - "force highest sclk dpm state failed!", return -1); - PHM_WAIT_INDIRECT_FIELD(hwmgr->device, - SMC_IND, TARGET_AND_CURRENT_PROFILE_INDEX, CURR_SCLK_INDEX, level); - } - } - } - - if (0 == data->mclk_dpm_key_disabled) { - /* MCLK */ - if (data->dpm_level_enable_mask.mclk_dpm_enable_mask != 0) { - level = 0; - tmp = data->dpm_level_enable_mask.mclk_dpm_enable_mask; - while (tmp >>= 1) - level++ ; - - if (0 != level) { - PP_ASSERT_WITH_CODE((0 == iceland_dpm_force_state_mclk(hwmgr, level)), - "force highest mclk dpm state failed!", return -1); - PHM_WAIT_INDIRECT_FIELD(hwmgr->device, SMC_IND, - TARGET_AND_CURRENT_PROFILE_INDEX, CURR_MCLK_INDEX, level); - } - } - } - - if (0 == data->pcie_dpm_key_disabled) { - /* PCIE */ - if (data->dpm_level_enable_mask.pcie_dpm_enable_mask != 0) { - level = 0; - tmp = data->dpm_level_enable_mask.pcie_dpm_enable_mask; - while (tmp >>= 1) - level++ ; - - if (0 != level) { - PP_ASSERT_WITH_CODE((0 == iceland_dpm_force_state_pcie(hwmgr, level)), - "force highest pcie dpm state failed!", return -1); - } - } - } - - return 0; -} - -static uint32_t iceland_get_lowest_enable_level(struct pp_hwmgr *hwmgr, - uint32_t level_mask) -{ - uint32_t level = 0; - - while (0 == (level_mask & (1 << level))) - level++; - - return level; -} - -static int iceland_force_dpm_lowest(struct pp_hwmgr *hwmgr) -{ - uint32_t level; - iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); - - /* for now force only sclk */ - if (0 != data->dpm_level_enable_mask.sclk_dpm_enable_mask) { - level = iceland_get_lowest_enable_level(hwmgr, - data->dpm_level_enable_mask.sclk_dpm_enable_mask); - - PP_ASSERT_WITH_CODE((0 == iceland_dpm_force_state(hwmgr, level)), - "force sclk dpm state failed!", return -1); - - PHM_WAIT_INDIRECT_FIELD(hwmgr->device, SMC_IND, - TARGET_AND_CURRENT_PROFILE_INDEX, - CURR_SCLK_INDEX, - level); - } - - return 0; -} - -int iceland_unforce_dpm_levels(struct pp_hwmgr *hwmgr) -{ - iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); - - PP_ASSERT_WITH_CODE (0 == iceland_is_dpm_running(hwmgr), - "Trying to Unforce DPM when DPM is disabled. Returning without sending SMC message.", - return -1); - - if (0 == data->sclk_dpm_key_disabled) { - PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc( - hwmgr->smumgr, - PPSMC_MSG_NoForcedLevel)), - "unforce sclk dpm state failed!", - return -1); - } - - if (0 == data->mclk_dpm_key_disabled) { - PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc( - hwmgr->smumgr, - PPSMC_MSG_MCLKDPM_NoForcedLevel)), - "unforce mclk dpm state failed!", - return -1); - } - - if (0 == data->pcie_dpm_key_disabled) { - PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc( - hwmgr->smumgr, - PPSMC_MSG_PCIeDPM_UnForceLevel)), - "unforce pcie level failed!", - return -1); - } - - return 0; -} - -static int iceland_force_dpm_level(struct pp_hwmgr *hwmgr, - enum amd_dpm_forced_level level) -{ - int ret = 0; - - switch (level) { - case AMD_DPM_FORCED_LEVEL_HIGH: - ret = iceland_force_dpm_highest(hwmgr); - if (ret) - return ret; - break; - case AMD_DPM_FORCED_LEVEL_LOW: - ret = iceland_force_dpm_lowest(hwmgr); - if (ret) - return ret; - break; - case AMD_DPM_FORCED_LEVEL_AUTO: - ret = iceland_unforce_dpm_levels(hwmgr); - if (ret) - return ret; - break; - default: - break; - } - - hwmgr->dpm_level = level; - return ret; -} - -const struct iceland_power_state *cast_const_phw_iceland_power_state( - const struct pp_hw_power_state *hw_ps) -{ - if (hw_ps == NULL) - return NULL; - - PP_ASSERT_WITH_CODE((PhwIceland_Magic == hw_ps->magic), - "Invalid Powerstate Type!", - return NULL); - - return (const struct iceland_power_state *)hw_ps; -} - -static int iceland_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input) -{ - const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input; - const struct iceland_power_state *iceland_ps = cast_const_phw_iceland_power_state(states->pnew_state); - struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); - struct iceland_single_dpm_table *psclk_table = &(data->dpm_table.sclk_table); - uint32_t sclk = iceland_ps->performance_levels[iceland_ps->performance_level_count-1].engine_clock; - struct iceland_single_dpm_table *pmclk_table = &(data->dpm_table.mclk_table); - uint32_t mclk = iceland_ps->performance_levels[iceland_ps->performance_level_count-1].memory_clock; - struct PP_Clocks min_clocks = {0}; - uint32_t i; - struct cgs_display_info info = {0}; - - data->need_update_smu7_dpm_table = 0; - - for (i = 0; i < psclk_table->count; i++) { - if (sclk == psclk_table->dpm_levels[i].value) - break; - } - - if (i >= psclk_table->count) - data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK; - else { - /* - * TODO: Check SCLK in DAL's minimum clocks in case DeepSleep - * divider update is required. - */ - if(data->display_timing.min_clock_insr != min_clocks.engineClockInSR) - data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK; - } - - for (i = 0; i < pmclk_table->count; i++) { - if (mclk == pmclk_table->dpm_levels[i].value) - break; - } - - if (i >= pmclk_table->count) - data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK; - - cgs_get_active_displays_info(hwmgr->device, &info); - - if (data->display_timing.num_existing_displays != info.display_count) - data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK; - - return 0; -} - -static uint16_t iceland_get_maximum_link_speed(struct pp_hwmgr *hwmgr, const struct iceland_power_state *hw_ps) -{ - uint32_t i; - uint32_t pcie_speed, max_speed = 0; - - for (i = 0; i < hw_ps->performance_level_count; i++) { - pcie_speed = hw_ps->performance_levels[i].pcie_gen; - if (max_speed < pcie_speed) - max_speed = pcie_speed; - } - - return max_speed; -} - -static uint16_t iceland_get_current_pcie_speed(struct pp_hwmgr *hwmgr) -{ - uint32_t speed_cntl = 0; - - speed_cntl = cgs_read_ind_register(hwmgr->device, - CGS_IND_REG__PCIE, - ixPCIE_LC_SPEED_CNTL); - return((uint16_t)PHM_GET_FIELD(speed_cntl, - PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE)); -} - - -static int iceland_request_link_speed_change_before_state_change(struct pp_hwmgr *hwmgr, const void *input) -{ - const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input; - struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); - const struct iceland_power_state *iceland_nps = cast_const_phw_iceland_power_state(states->pnew_state); - const struct iceland_power_state *iceland_cps = cast_const_phw_iceland_power_state(states->pcurrent_state); - - uint16_t target_link_speed = iceland_get_maximum_link_speed(hwmgr, iceland_nps); - uint16_t current_link_speed; - - if (data->force_pcie_gen == PP_PCIEGenInvalid) - current_link_speed = iceland_get_maximum_link_speed(hwmgr, iceland_cps); - else - current_link_speed = data->force_pcie_gen; - - data->force_pcie_gen = PP_PCIEGenInvalid; - data->pspp_notify_required = false; - if (target_link_speed > current_link_speed) { - switch(target_link_speed) { - case PP_PCIEGen3: - if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN3, false)) - break; - data->force_pcie_gen = PP_PCIEGen2; - if (current_link_speed == PP_PCIEGen2) - break; - case PP_PCIEGen2: - if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN2, false)) - break; - default: - data->force_pcie_gen = iceland_get_current_pcie_speed(hwmgr); - break; - } - } else { - if (target_link_speed < current_link_speed) - data->pspp_notify_required = true; - } - - return 0; -} - -static int iceland_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) -{ - struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); - - if (0 == data->need_update_smu7_dpm_table) - return 0; - - if ((0 == data->sclk_dpm_key_disabled) && - (data->need_update_smu7_dpm_table & - (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) { - PP_ASSERT_WITH_CODE( - 0 == iceland_is_dpm_running(hwmgr), - "Trying to freeze SCLK DPM when DPM is disabled", - ); - PP_ASSERT_WITH_CODE( - 0 == smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_SCLKDPM_FreezeLevel), - "Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!", - return -1); - } - - if ((0 == data->mclk_dpm_key_disabled) && - (data->need_update_smu7_dpm_table & - DPMTABLE_OD_UPDATE_MCLK)) { - PP_ASSERT_WITH_CODE(0 == iceland_is_dpm_running(hwmgr), - "Trying to freeze MCLK DPM when DPM is disabled", - ); - PP_ASSERT_WITH_CODE( - 0 == smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_MCLKDPM_FreezeLevel), - "Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!", - return -1); - } - - return 0; -} - -static int iceland_populate_and_upload_sclk_mclk_dpm_levels(struct pp_hwmgr *hwmgr, const void *input) -{ - int result = 0; - - const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input; - const struct iceland_power_state *iceland_ps = cast_const_phw_iceland_power_state(states->pnew_state); - struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); - uint32_t sclk = iceland_ps->performance_levels[iceland_ps->performance_level_count-1].engine_clock; - uint32_t mclk = iceland_ps->performance_levels[iceland_ps->performance_level_count-1].memory_clock; - struct iceland_dpm_table *pdpm_table = &data->dpm_table; - - struct iceland_dpm_table *pgolden_dpm_table = &data->golden_dpm_table; - uint32_t dpm_count, clock_percent; - uint32_t i; - - if (0 == data->need_update_smu7_dpm_table) - return 0; - - if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) { - pdpm_table->sclk_table.dpm_levels[pdpm_table->sclk_table.count-1].value = sclk; - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinACSupport) || - phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinDCSupport)) { - /* - * Need to do calculation based on the golden DPM table - * as the Heatmap GPU Clock axis is also based on the default values - */ - PP_ASSERT_WITH_CODE( - (pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value != 0), - "Divide by 0!", - return -1); - dpm_count = pdpm_table->sclk_table.count < 2 ? 0 : pdpm_table->sclk_table.count-2; - for (i = dpm_count; i > 1; i--) { - if (sclk > pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value) { - clock_percent = ((sclk - pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value)*100) / - pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value; - - pdpm_table->sclk_table.dpm_levels[i].value = - pgolden_dpm_table->sclk_table.dpm_levels[i].value + - (pgolden_dpm_table->sclk_table.dpm_levels[i].value * clock_percent)/100; - - } else if (pgolden_dpm_table->sclk_table.dpm_levels[pdpm_table->sclk_table.count-1].value > sclk) { - clock_percent = ((pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value - sclk)*100) / - pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value; - - pdpm_table->sclk_table.dpm_levels[i].value = - pgolden_dpm_table->sclk_table.dpm_levels[i].value - - (pgolden_dpm_table->sclk_table.dpm_levels[i].value * clock_percent)/100; - } else - pdpm_table->sclk_table.dpm_levels[i].value = - pgolden_dpm_table->sclk_table.dpm_levels[i].value; - } - } - } - - if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK) { - pdpm_table->mclk_table.dpm_levels[pdpm_table->mclk_table.count-1].value = mclk; - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinACSupport) || - phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinDCSupport)) { - - PP_ASSERT_WITH_CODE( - (pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value != 0), - "Divide by 0!", - return -1); - dpm_count = pdpm_table->mclk_table.count < 2? 0 : pdpm_table->mclk_table.count-2; - for (i = dpm_count; i > 1; i--) { - if (mclk > pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value) { - clock_percent = ((mclk - pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value)*100) / - pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value; - - pdpm_table->mclk_table.dpm_levels[i].value = - pgolden_dpm_table->mclk_table.dpm_levels[i].value + - (pgolden_dpm_table->mclk_table.dpm_levels[i].value * clock_percent)/100; - - } else if (pgolden_dpm_table->mclk_table.dpm_levels[pdpm_table->mclk_table.count-1].value > mclk) { - clock_percent = ((pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value - mclk)*100) / - pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value; - - pdpm_table->mclk_table.dpm_levels[i].value = - pgolden_dpm_table->mclk_table.dpm_levels[i].value - - (pgolden_dpm_table->mclk_table.dpm_levels[i].value * clock_percent)/100; - } else - pdpm_table->mclk_table.dpm_levels[i].value = pgolden_dpm_table->mclk_table.dpm_levels[i].value; - } - } - } - - - if (data->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) { - result = iceland_populate_all_graphic_levels(hwmgr); - PP_ASSERT_WITH_CODE((0 == result), - "Failed to populate SCLK during PopulateNewDPMClocksStates Function!", - return result); - } - - if (data->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) { - /*populate MCLK dpm table to SMU7 */ - result = iceland_populate_all_memory_levels(hwmgr); - PP_ASSERT_WITH_CODE((0 == result), - "Failed to populate MCLK during PopulateNewDPMClocksStates Function!", - return result); - } - - return result; -} - -static int iceland_trim_single_dpm_states(struct pp_hwmgr *hwmgr, - struct iceland_single_dpm_table *pdpm_table, - uint32_t low_limit, uint32_t high_limit) -{ - uint32_t i; - - for (i = 0; i < pdpm_table->count; i++) { - if ((pdpm_table->dpm_levels[i].value < low_limit) || - (pdpm_table->dpm_levels[i].value > high_limit)) - pdpm_table->dpm_levels[i].enabled = false; - else - pdpm_table->dpm_levels[i].enabled = true; - } - return 0; -} - -static int iceland_trim_dpm_states(struct pp_hwmgr *hwmgr, const struct iceland_power_state *hw_state) -{ - int result = 0; - struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); - uint32_t high_limit_count; - - PP_ASSERT_WITH_CODE((hw_state->performance_level_count >= 1), - "power state did not have any performance level", - return -1); - - high_limit_count = (1 == hw_state->performance_level_count) ? 0: 1; - - iceland_trim_single_dpm_states(hwmgr, &(data->dpm_table.sclk_table), - hw_state->performance_levels[0].engine_clock, - hw_state->performance_levels[high_limit_count].engine_clock); - - iceland_trim_single_dpm_states(hwmgr, &(data->dpm_table.mclk_table), - hw_state->performance_levels[0].memory_clock, - hw_state->performance_levels[high_limit_count].memory_clock); - - return result; -} - -static int iceland_generate_dpm_level_enable_mask(struct pp_hwmgr *hwmgr, const void *input) -{ - int result; - const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input; - struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); - const struct iceland_power_state *iceland_ps = cast_const_phw_iceland_power_state(states->pnew_state); - - result = iceland_trim_dpm_states(hwmgr, iceland_ps); - if (0 != result) - return result; - - data->dpm_level_enable_mask.sclk_dpm_enable_mask = iceland_get_dpm_level_enable_mask_value(&data->dpm_table.sclk_table); - data->dpm_level_enable_mask.mclk_dpm_enable_mask = iceland_get_dpm_level_enable_mask_value(&data->dpm_table.mclk_table); - data->last_mclk_dpm_enable_mask = data->dpm_level_enable_mask.mclk_dpm_enable_mask; - if (data->uvd_enabled && (data->dpm_level_enable_mask.mclk_dpm_enable_mask & 1)) - data->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE; - - data->dpm_level_enable_mask.pcie_dpm_enable_mask = iceland_get_dpm_level_enable_mask_value(&data->dpm_table.pcie_speed_table); - - return 0; -} - -static int iceland_update_vce_dpm(struct pp_hwmgr *hwmgr, const void *input) -{ - return 0; -} - -static int iceland_update_sclk_threshold(struct pp_hwmgr *hwmgr) -{ - iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); - - int result = 0; - uint32_t low_sclk_interrupt_threshold = 0; - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_SclkThrottleLowNotification) - && (hwmgr->gfx_arbiter.sclk_threshold != data->low_sclk_interrupt_threshold)) { - data->low_sclk_interrupt_threshold = hwmgr->gfx_arbiter.sclk_threshold; - low_sclk_interrupt_threshold = data->low_sclk_interrupt_threshold; - - CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold); - - result = smu7_copy_bytes_to_smc( - hwmgr->smumgr, - data->dpm_table_start + offsetof(SMU71_Discrete_DpmTable, - LowSclkInterruptThreshold), - (uint8_t *)&low_sclk_interrupt_threshold, - sizeof(uint32_t), - data->sram_end - ); - } - - return result; -} - -static int iceland_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr) -{ - struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); - - uint32_t address; - int32_t result; - - if (0 == (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) - return 0; - - - memset(&data->mc_reg_table, 0, sizeof(SMU71_Discrete_MCRegisters)); - - result = iceland_convert_mc_reg_table_to_smc(hwmgr, &(data->mc_reg_table)); - - if(result != 0) - return result; - - - address = data->mc_reg_table_start + (uint32_t)offsetof(SMU71_Discrete_MCRegisters, data[0]); - - return smu7_copy_bytes_to_smc(hwmgr->smumgr, address, - (uint8_t *)&data->mc_reg_table.data[0], - sizeof(SMU71_Discrete_MCRegisterSet) * data->dpm_table.mclk_table.count, - data->sram_end); -} - -static int iceland_program_memory_timing_parameters_conditionally(struct pp_hwmgr *hwmgr) -{ - struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); - - if (data->need_update_smu7_dpm_table & - (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK)) - return iceland_program_memory_timing_parameters(hwmgr); - - return 0; -} - -static int iceland_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) -{ - struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); - - if (0 == data->need_update_smu7_dpm_table) - return 0; - - if ((0 == data->sclk_dpm_key_disabled) && - (data->need_update_smu7_dpm_table & - (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) { - - PP_ASSERT_WITH_CODE(0 == iceland_is_dpm_running(hwmgr), - "Trying to Unfreeze SCLK DPM when DPM is disabled", - ); - PP_ASSERT_WITH_CODE( - 0 == smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_SCLKDPM_UnfreezeLevel), - "Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!", - return -1); - } - - if ((0 == data->mclk_dpm_key_disabled) && - (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) { - - PP_ASSERT_WITH_CODE( - 0 == iceland_is_dpm_running(hwmgr), - "Trying to Unfreeze MCLK DPM when DPM is disabled", - ); - PP_ASSERT_WITH_CODE( - 0 == smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_MCLKDPM_UnfreezeLevel), - "Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!", - return -1); - } - - data->need_update_smu7_dpm_table = 0; - - return 0; -} - -static int iceland_notify_link_speed_change_after_state_change(struct pp_hwmgr *hwmgr, const void *input) -{ - const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input; - struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); - const struct iceland_power_state *iceland_ps = cast_const_phw_iceland_power_state(states->pnew_state); - uint16_t target_link_speed = iceland_get_maximum_link_speed(hwmgr, iceland_ps); - uint8_t request; - - if (data->pspp_notify_required || - data->pcie_performance_request) { - if (target_link_speed == PP_PCIEGen3) - request = PCIE_PERF_REQ_GEN3; - else if (target_link_speed == PP_PCIEGen2) - request = PCIE_PERF_REQ_GEN2; - else - request = PCIE_PERF_REQ_GEN1; - - if(request == PCIE_PERF_REQ_GEN1 && iceland_get_current_pcie_speed(hwmgr) > 0) { - data->pcie_performance_request = false; - return 0; - } - - if (0 != acpi_pcie_perf_request(hwmgr->device, request, false)) { - if (PP_PCIEGen2 == target_link_speed) - printk("PSPP request to switch to Gen2 from Gen3 Failed!"); - else - printk("PSPP request to switch to Gen1 from Gen2 Failed!"); - } - } - - data->pcie_performance_request = false; - return 0; -} - -int iceland_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr) -{ - PPSMC_Result result; - iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); - - if (0 == data->sclk_dpm_key_disabled) { - /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/ - if (0 != iceland_is_dpm_running(hwmgr)) - printk(KERN_ERR "[ powerplay ] Trying to set Enable Sclk Mask when DPM is disabled \n"); - - if (0 != data->dpm_level_enable_mask.sclk_dpm_enable_mask) { - result = smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, - (PPSMC_Msg)PPSMC_MSG_SCLKDPM_SetEnabledMask, - data->dpm_level_enable_mask.sclk_dpm_enable_mask); - PP_ASSERT_WITH_CODE((0 == result), - "Set Sclk Dpm enable Mask failed", return -1); - } - } - - if (0 == data->mclk_dpm_key_disabled) { - /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/ - if (0 != iceland_is_dpm_running(hwmgr)) - printk(KERN_ERR "[ powerplay ] Trying to set Enable Mclk Mask when DPM is disabled \n"); - - if (0 != data->dpm_level_enable_mask.mclk_dpm_enable_mask) { - result = smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, - (PPSMC_Msg)PPSMC_MSG_MCLKDPM_SetEnabledMask, - data->dpm_level_enable_mask.mclk_dpm_enable_mask); - PP_ASSERT_WITH_CODE((0 == result), - "Set Mclk Dpm enable Mask failed", return -1); - } - } - - return 0; -} - -static int iceland_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input) -{ - int tmp_result, result = 0; - - tmp_result = iceland_find_dpm_states_clocks_in_dpm_table(hwmgr, input); - PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to find DPM states clocks in DPM table!", result = tmp_result); - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest)) { - tmp_result = iceland_request_link_speed_change_before_state_change(hwmgr, input); - PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to request link speed change before state change!", result = tmp_result); - } - - tmp_result = iceland_freeze_sclk_mclk_dpm(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to freeze SCLK MCLK DPM!", result = tmp_result); - - tmp_result = iceland_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input); - PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to populate and upload SCLK MCLK DPM levels!", result = tmp_result); - - tmp_result = iceland_generate_dpm_level_enable_mask(hwmgr, input); - PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to generate DPM level enabled mask!", result = tmp_result); - - tmp_result = iceland_update_vce_dpm(hwmgr, input); - PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to update VCE DPM!", result = tmp_result); - - tmp_result = iceland_update_sclk_threshold(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to update SCLK threshold!", result = tmp_result); - - tmp_result = iceland_update_and_upload_mc_reg_table(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to upload MC reg table!", result = tmp_result); - - tmp_result = iceland_program_memory_timing_parameters_conditionally(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to program memory timing parameters!", result = tmp_result); - - tmp_result = iceland_unfreeze_sclk_mclk_dpm(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to unfreeze SCLK MCLK DPM!", result = tmp_result); - - tmp_result = iceland_upload_dpm_level_enable_mask(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to upload DPM level enabled mask!", result = tmp_result); - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest)) { - tmp_result = iceland_notify_link_speed_change_after_state_change(hwmgr, input); - PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to notify link speed change after state change!", result = tmp_result); - } - - return result; -} - -static int iceland_get_power_state_size(struct pp_hwmgr *hwmgr) -{ - return sizeof(struct iceland_power_state); -} - -static int iceland_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low) -{ - struct pp_power_state *ps; - struct iceland_power_state *iceland_ps; - - if (hwmgr == NULL) - return -EINVAL; - - ps = hwmgr->request_ps; - - if (ps == NULL) - return -EINVAL; - - iceland_ps = cast_phw_iceland_power_state(&ps->hardware); - - if (low) - return iceland_ps->performance_levels[0].memory_clock; - else - return iceland_ps->performance_levels[iceland_ps->performance_level_count-1].memory_clock; -} - -static int iceland_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low) -{ - struct pp_power_state *ps; - struct iceland_power_state *iceland_ps; - - if (hwmgr == NULL) - return -EINVAL; - - ps = hwmgr->request_ps; - - if (ps == NULL) - return -EINVAL; - - iceland_ps = cast_phw_iceland_power_state(&ps->hardware); - - if (low) - return iceland_ps->performance_levels[0].engine_clock; - else - return iceland_ps->performance_levels[iceland_ps->performance_level_count-1].engine_clock; -} - -static int iceland_get_current_pcie_lane_number( - struct pp_hwmgr *hwmgr) -{ - uint32_t link_width; - - link_width = PHM_READ_INDIRECT_FIELD(hwmgr->device, - CGS_IND_REG__PCIE, - PCIE_LC_LINK_WIDTH_CNTL, - LC_LINK_WIDTH_RD); - - PP_ASSERT_WITH_CODE((7 >= link_width), - "Invalid PCIe lane width!", return 0); - - return decode_pcie_lane_width(link_width); -} - -static int iceland_dpm_patch_boot_state(struct pp_hwmgr *hwmgr, - struct pp_hw_power_state *hw_ps) -{ - struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); - struct iceland_power_state *ps = (struct iceland_power_state *)hw_ps; - ATOM_FIRMWARE_INFO_V2_2 *fw_info; - uint16_t size; - uint8_t frev, crev; - int index = GetIndexIntoMasterTable(DATA, FirmwareInfo); - - /* First retrieve the Boot clocks and VDDC from the firmware info table. - * We assume here that fw_info is unchanged if this call fails. - */ - fw_info = (ATOM_FIRMWARE_INFO_V2_2 *)cgs_atom_get_data_table( - hwmgr->device, index, - &size, &frev, &crev); - if (!fw_info) - /* During a test, there is no firmware info table. */ - return 0; - - /* Patch the state. */ - data->vbios_boot_state.sclk_bootup_value = le32_to_cpu(fw_info->ulDefaultEngineClock); - data->vbios_boot_state.mclk_bootup_value = le32_to_cpu(fw_info->ulDefaultMemoryClock); - data->vbios_boot_state.mvdd_bootup_value = le16_to_cpu(fw_info->usBootUpMVDDCVoltage); - data->vbios_boot_state.vddc_bootup_value = le16_to_cpu(fw_info->usBootUpVDDCVoltage); - data->vbios_boot_state.vddci_bootup_value = le16_to_cpu(fw_info->usBootUpVDDCIVoltage); - data->vbios_boot_state.pcie_gen_bootup_value = iceland_get_current_pcie_speed(hwmgr); - data->vbios_boot_state.pcie_lane_bootup_value = - (uint16_t)iceland_get_current_pcie_lane_number(hwmgr); - - /* set boot power state */ - ps->performance_levels[0].memory_clock = data->vbios_boot_state.mclk_bootup_value; - ps->performance_levels[0].engine_clock = data->vbios_boot_state.sclk_bootup_value; - ps->performance_levels[0].pcie_gen = data->vbios_boot_state.pcie_gen_bootup_value; - ps->performance_levels[0].pcie_lane = data->vbios_boot_state.pcie_lane_bootup_value; - - return 0; -} - -static int iceland_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr, - struct pp_hw_power_state *power_state, - unsigned int index, const void *clock_info) -{ - struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); - struct iceland_power_state *iceland_power_state = cast_phw_iceland_power_state(power_state); - const ATOM_PPLIB_CI_CLOCK_INFO *visland_clk_info = clock_info; - struct iceland_performance_level *performance_level; - uint32_t engine_clock, memory_clock; - uint16_t pcie_gen_from_bios; - - engine_clock = visland_clk_info->ucEngineClockHigh << 16 | visland_clk_info->usEngineClockLow; - memory_clock = visland_clk_info->ucMemoryClockHigh << 16 | visland_clk_info->usMemoryClockLow; - - if (!(data->mc_micro_code_feature & DISABLE_MC_LOADMICROCODE) && memory_clock > data->highest_mclk) - data->highest_mclk = memory_clock; - - performance_level = &(iceland_power_state->performance_levels - [iceland_power_state->performance_level_count++]); - - PP_ASSERT_WITH_CODE( - (iceland_power_state->performance_level_count < SMU71_MAX_LEVELS_GRAPHICS), - "Performance levels exceeds SMC limit!", - return -1); - - PP_ASSERT_WITH_CODE( - (iceland_power_state->performance_level_count <= - hwmgr->platform_descriptor.hardwareActivityPerformanceLevels), - "Performance levels exceeds Driver limit!", - return -1); - - /* Performance levels are arranged from low to high. */ - performance_level->memory_clock = memory_clock; - performance_level->engine_clock = engine_clock; - - pcie_gen_from_bios = visland_clk_info->ucPCIEGen; - - performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap, pcie_gen_from_bios); - performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap, visland_clk_info->usPCIELane); - - return 0; -} - -static int iceland_get_pp_table_entry(struct pp_hwmgr *hwmgr, - unsigned long entry_index, struct pp_power_state *state) -{ - int result; - struct iceland_power_state *ps; - struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); - struct phm_clock_voltage_dependency_table *dep_mclk_table = - hwmgr->dyn_state.vddci_dependency_on_mclk; - - memset(&state->hardware, 0x00, sizeof(struct pp_hw_power_state)); - - state->hardware.magic = PHM_VIslands_Magic; - - ps = (struct iceland_power_state *)(&state->hardware); - - result = pp_tables_get_entry(hwmgr, entry_index, state, - iceland_get_pp_table_entry_callback_func); - - /* - * This is the earliest time we have all the dependency table - * and the VBIOS boot state as - * PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot - * state if there is only one VDDCI/MCLK level, check if it's - * the same as VBIOS boot state - */ - if (dep_mclk_table != NULL && dep_mclk_table->count == 1) { - if (dep_mclk_table->entries[0].clk != - data->vbios_boot_state.mclk_bootup_value) - printk(KERN_ERR "Single MCLK entry VDDCI/MCLK dependency table " - "does not match VBIOS boot MCLK level"); - if (dep_mclk_table->entries[0].v != - data->vbios_boot_state.vddci_bootup_value) - printk(KERN_ERR "Single VDDCI entry VDDCI/MCLK dependency table " - "does not match VBIOS boot VDDCI level"); - } - - /* set DC compatible flag if this state supports DC */ - if (!state->validation.disallowOnDC) - ps->dc_compatible = true; - - if (state->classification.flags & PP_StateClassificationFlag_ACPI) - data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen; - else if (0 != (state->classification.flags & PP_StateClassificationFlag_Boot)) { - if (data->bacos.best_match == 0xffff) { - /* For C.I. use boot state as base BACO state */ - data->bacos.best_match = PP_StateClassificationFlag_Boot; - data->bacos.performance_level = ps->performance_levels[0]; - } - } - - - ps->uvd_clocks.VCLK = state->uvd_clocks.VCLK; - ps->uvd_clocks.DCLK = state->uvd_clocks.DCLK; - - if (!result) { - uint32_t i; - - switch (state->classification.ui_label) { - case PP_StateUILabel_Performance: - data->use_pcie_performance_levels = true; - - for (i = 0; i < ps->performance_level_count; i++) { - if (data->pcie_gen_performance.max < - ps->performance_levels[i].pcie_gen) - data->pcie_gen_performance.max = - ps->performance_levels[i].pcie_gen; - - if (data->pcie_gen_performance.min > - ps->performance_levels[i].pcie_gen) - data->pcie_gen_performance.min = - ps->performance_levels[i].pcie_gen; - - if (data->pcie_lane_performance.max < - ps->performance_levels[i].pcie_lane) - data->pcie_lane_performance.max = - ps->performance_levels[i].pcie_lane; - - if (data->pcie_lane_performance.min > - ps->performance_levels[i].pcie_lane) - data->pcie_lane_performance.min = - ps->performance_levels[i].pcie_lane; - } - break; - case PP_StateUILabel_Battery: - data->use_pcie_power_saving_levels = true; - - for (i = 0; i < ps->performance_level_count; i++) { - if (data->pcie_gen_power_saving.max < - ps->performance_levels[i].pcie_gen) - data->pcie_gen_power_saving.max = - ps->performance_levels[i].pcie_gen; - - if (data->pcie_gen_power_saving.min > - ps->performance_levels[i].pcie_gen) - data->pcie_gen_power_saving.min = - ps->performance_levels[i].pcie_gen; - - if (data->pcie_lane_power_saving.max < - ps->performance_levels[i].pcie_lane) - data->pcie_lane_power_saving.max = - ps->performance_levels[i].pcie_lane; - - if (data->pcie_lane_power_saving.min > - ps->performance_levels[i].pcie_lane) - data->pcie_lane_power_saving.min = - ps->performance_levels[i].pcie_lane; - } - break; - default: - break; - } - } - return 0; -} - -static void -iceland_print_current_perforce_level(struct pp_hwmgr *hwmgr, struct seq_file *m) -{ - uint32_t sclk, mclk, activity_percent; - uint32_t offset; - struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); - - smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)(PPSMC_MSG_API_GetSclkFrequency)); - - sclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); - - smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)(PPSMC_MSG_API_GetMclkFrequency)); - - mclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); - seq_printf(m, "\n [ mclk ]: %u MHz\n\n [ sclk ]: %u MHz\n", mclk/100, sclk/100); - - offset = data->soft_regs_start + offsetof(SMU71_SoftRegisters, AverageGraphicsActivity); - activity_percent = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset); - activity_percent += 0x80; - activity_percent >>= 8; - - seq_printf(m, "\n [GPU load]: %u%%\n\n", activity_percent > 100 ? 100 : activity_percent); - - seq_printf(m, "uvd %sabled\n", data->uvd_power_gated ? "dis" : "en"); - - seq_printf(m, "vce %sabled\n", data->vce_power_gated ? "dis" : "en"); -} - -int iceland_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr) -{ - uint32_t num_active_displays = 0; - struct cgs_display_info info = {0}; - info.mode_info = NULL; - - cgs_get_active_displays_info(hwmgr->device, &info); - - num_active_displays = info.display_count; - - if (num_active_displays > 1) /* to do && (pHwMgr->pPECI->displayConfiguration.bMultiMonitorInSync != TRUE)) */ - iceland_notify_smc_display_change(hwmgr, false); - else - iceland_notify_smc_display_change(hwmgr, true); - - return 0; -} - -/** -* Programs the display gap -* -* @param hwmgr the address of the powerplay hardware manager. -* @return always OK -*/ -int iceland_program_display_gap(struct pp_hwmgr *hwmgr) -{ - uint32_t num_active_displays = 0; - uint32_t display_gap = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL); - uint32_t display_gap2; - uint32_t pre_vbi_time_in_us; - uint32_t frame_time_in_us; - uint32_t ref_clock; - uint32_t refresh_rate = 0; - struct cgs_display_info info = {0}; - struct cgs_mode_info mode_info; - - info.mode_info = &mode_info; - - cgs_get_active_displays_info(hwmgr->device, &info); - num_active_displays = info.display_count; - - display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, DISP_GAP, (num_active_displays > 0)? DISPLAY_GAP_VBLANK_OR_WM : DISPLAY_GAP_IGNORE); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL, display_gap); - - ref_clock = mode_info.ref_clock; - refresh_rate = mode_info.refresh_rate; - - if(0 == refresh_rate) - refresh_rate = 60; - - frame_time_in_us = 1000000 / refresh_rate; - - pre_vbi_time_in_us = frame_time_in_us - 200 - mode_info.vblank_time_us; - display_gap2 = pre_vbi_time_in_us * (ref_clock / 100); - - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL2, display_gap2); - - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SOFT_REGISTERS_TABLE_4, PreVBlankGap, 0x64); - - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SOFT_REGISTERS_TABLE_5, VBlankTimeout, (frame_time_in_us - pre_vbi_time_in_us)); - - if (num_active_displays == 1) - iceland_notify_smc_display_change(hwmgr, true); - - return 0; -} - -int iceland_display_configuration_changed_task(struct pp_hwmgr *hwmgr) -{ - iceland_program_display_gap(hwmgr); - - return 0; -} - -/** -* Set maximum target operating fan output PWM -* -* @param pHwMgr: the address of the powerplay hardware manager. -* @param usMaxFanPwm: max operating fan PWM in percents -* @return The response that came from the SMC. -*/ -static int iceland_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm) -{ - hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanPWM = us_max_fan_pwm; - - if (phm_is_hw_access_blocked(hwmgr)) - return 0; - - return (0 == smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm) ? 0 : -1); -} - -/** -* Set maximum target operating fan output RPM -* -* @param pHwMgr: the address of the powerplay hardware manager. -* @param usMaxFanRpm: max operating fan RPM value. -* @return The response that came from the SMC. -*/ -static int iceland_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm) -{ - hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM = us_max_fan_pwm; - - if (phm_is_hw_access_blocked(hwmgr)) - return 0; - - return (0 == smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetFanRpmMax, us_max_fan_pwm) ? 0 : -1); -} - -static int iceland_dpm_set_interrupt_state(void *private_data, - unsigned src_id, unsigned type, - int enabled) -{ - uint32_t cg_thermal_int; - struct pp_hwmgr *hwmgr = ((struct pp_eventmgr *)private_data)->hwmgr; - - if (hwmgr == NULL) - return -EINVAL; - - switch (type) { - case AMD_THERMAL_IRQ_LOW_TO_HIGH: - if (enabled) { - cg_thermal_int = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT); - cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK; - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int); - } else { - cg_thermal_int = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT); - cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK; - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int); - } - break; - - case AMD_THERMAL_IRQ_HIGH_TO_LOW: - if (enabled) { - cg_thermal_int = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT); - cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK; - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int); - } else { - cg_thermal_int = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT); - cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK; - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int); - } - break; - default: - break; - } - return 0; -} - -static int iceland_register_internal_thermal_interrupt(struct pp_hwmgr *hwmgr, - const void *thermal_interrupt_info) -{ - int result; - const struct pp_interrupt_registration_info *info = - (const struct pp_interrupt_registration_info *)thermal_interrupt_info; - - if (info == NULL) - return -EINVAL; - - result = cgs_add_irq_source(hwmgr->device, 230, AMD_THERMAL_IRQ_LAST, - iceland_dpm_set_interrupt_state, - info->call_back, info->context); - - if (result) - return -EINVAL; - - result = cgs_add_irq_source(hwmgr->device, 231, AMD_THERMAL_IRQ_LAST, - iceland_dpm_set_interrupt_state, - info->call_back, info->context); - - if (result) - return -EINVAL; - - return 0; -} - - -static bool iceland_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr) -{ - struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); - bool is_update_required = false; - struct cgs_display_info info = {0,0,NULL}; - - cgs_get_active_displays_info(hwmgr->device, &info); - - if (data->display_timing.num_existing_displays != info.display_count) - is_update_required = true; -/* TO DO NEED TO GET DEEP SLEEP CLOCK FROM DAL - if (phm_cap_enabled(hwmgr->hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) { - cgs_get_min_clock_settings(hwmgr->device, &min_clocks); - if(min_clocks.engineClockInSR != data->display_timing.minClockInSR) - is_update_required = true; -*/ - return is_update_required; -} - - -static inline bool iceland_are_power_levels_equal(const struct iceland_performance_level *pl1, - const struct iceland_performance_level *pl2) -{ - return ((pl1->memory_clock == pl2->memory_clock) && - (pl1->engine_clock == pl2->engine_clock) && - (pl1->pcie_gen == pl2->pcie_gen) && - (pl1->pcie_lane == pl2->pcie_lane)); -} - -int iceland_check_states_equal(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *pstate1, - const struct pp_hw_power_state *pstate2, bool *equal) -{ - const struct iceland_power_state *psa = cast_const_phw_iceland_power_state(pstate1); - const struct iceland_power_state *psb = cast_const_phw_iceland_power_state(pstate2); - int i; - - if (equal == NULL || psa == NULL || psb == NULL) - return -EINVAL; - - /* If the two states don't even have the same number of performance levels they cannot be the same state. */ - if (psa->performance_level_count != psb->performance_level_count) { - *equal = false; - return 0; - } - - for (i = 0; i < psa->performance_level_count; i++) { - if (!iceland_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) { - /* If we have found even one performance level pair that is different the states are different. */ - *equal = false; - return 0; - } - } - - /* If all performance levels are the same try to use the UVD clocks to break the tie.*/ - *equal = ((psa->uvd_clocks.VCLK == psb->uvd_clocks.VCLK) && (psa->uvd_clocks.DCLK == psb->uvd_clocks.DCLK)); - *equal &= ((psa->vce_clocks.EVCLK == psb->vce_clocks.EVCLK) && (psa->vce_clocks.ECCLK == psb->vce_clocks.ECCLK)); - *equal &= (psa->sclk_threshold == psb->sclk_threshold); - *equal &= (psa->acp_clk == psb->acp_clk); - - return 0; -} - -static int iceland_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode) -{ - if (mode) { - /* stop auto-manage */ - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_MicrocodeFanControl)) - iceland_fan_ctrl_stop_smc_fan_control(hwmgr); - iceland_fan_ctrl_set_static_mode(hwmgr, mode); - } else - /* restart auto-manage */ - iceland_fan_ctrl_reset_fan_speed_to_default(hwmgr); - - return 0; -} - -static int iceland_get_fan_control_mode(struct pp_hwmgr *hwmgr) -{ - if (hwmgr->fan_ctrl_is_in_default_mode) - return hwmgr->fan_ctrl_default_mode; - else - return PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_FDO_CTRL2, FDO_PWM_MODE); -} - -static int iceland_force_clock_level(struct pp_hwmgr *hwmgr, - enum pp_clock_type type, uint32_t mask) -{ - struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); - - if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) - return -EINVAL; - - switch (type) { - case PP_SCLK: - if (!data->sclk_dpm_key_disabled) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_SCLKDPM_SetEnabledMask, - data->dpm_level_enable_mask.sclk_dpm_enable_mask & mask); - break; - case PP_MCLK: - if (!data->mclk_dpm_key_disabled) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_MCLKDPM_SetEnabledMask, - data->dpm_level_enable_mask.mclk_dpm_enable_mask & mask); - break; - case PP_PCIE: - { - uint32_t tmp = mask & data->dpm_level_enable_mask.pcie_dpm_enable_mask; - uint32_t level = 0; - - while (tmp >>= 1) - level++; - - if (!data->pcie_dpm_key_disabled) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_PCIeDPM_ForceLevel, - level); - break; - } - default: - break; - } - - return 0; -} - -static int iceland_print_clock_levels(struct pp_hwmgr *hwmgr, - enum pp_clock_type type, char *buf) -{ - struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); - struct iceland_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table); - struct iceland_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table); - struct iceland_single_dpm_table *pcie_table = &(data->dpm_table.pcie_speed_table); - int i, now, size = 0; - uint32_t clock, pcie_speed; - - switch (type) { - case PP_SCLK: - smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency); - clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); - - for (i = 0; i < sclk_table->count; i++) { - if (clock > sclk_table->dpm_levels[i].value) - continue; - break; - } - now = i; - - for (i = 0; i < sclk_table->count; i++) - size += sprintf(buf + size, "%d: %uMhz %s\n", - i, sclk_table->dpm_levels[i].value / 100, - (i == now) ? "*" : ""); - break; - case PP_MCLK: - smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency); - clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); - - for (i = 0; i < mclk_table->count; i++) { - if (clock > mclk_table->dpm_levels[i].value) - continue; - break; - } - now = i; - - for (i = 0; i < mclk_table->count; i++) - size += sprintf(buf + size, "%d: %uMhz %s\n", - i, mclk_table->dpm_levels[i].value / 100, - (i == now) ? "*" : ""); - break; - case PP_PCIE: - pcie_speed = iceland_get_current_pcie_speed(hwmgr); - for (i = 0; i < pcie_table->count; i++) { - if (pcie_speed != pcie_table->dpm_levels[i].value) - continue; - break; - } - now = i; - - for (i = 0; i < pcie_table->count; i++) - size += sprintf(buf + size, "%d: %s %s\n", i, - (pcie_table->dpm_levels[i].value == 0) ? "2.5GB, x8" : - (pcie_table->dpm_levels[i].value == 1) ? "5.0GB, x16" : - (pcie_table->dpm_levels[i].value == 2) ? "8.0GB, x16" : "", - (i == now) ? "*" : ""); - break; - default: - break; - } - return size; -} - -static int iceland_get_sclk_od(struct pp_hwmgr *hwmgr) -{ - struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); - struct iceland_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table); - struct iceland_single_dpm_table *golden_sclk_table = - &(data->golden_dpm_table.sclk_table); - int value; - - value = (sclk_table->dpm_levels[sclk_table->count - 1].value - - golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) * - 100 / - golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value; - - return value; -} - -static int iceland_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value) -{ - struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); - struct iceland_single_dpm_table *golden_sclk_table = - &(data->golden_dpm_table.sclk_table); - struct pp_power_state *ps; - struct iceland_power_state *iceland_ps; - - if (value > 20) - value = 20; - - ps = hwmgr->request_ps; - - if (ps == NULL) - return -EINVAL; - - iceland_ps = cast_phw_iceland_power_state(&ps->hardware); - - iceland_ps->performance_levels[iceland_ps->performance_level_count - 1].engine_clock = - golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value * - value / 100 + - golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value; - - return 0; -} - -static int iceland_get_mclk_od(struct pp_hwmgr *hwmgr) -{ - struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); - struct iceland_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table); - struct iceland_single_dpm_table *golden_mclk_table = - &(data->golden_dpm_table.mclk_table); - int value; - - value = (mclk_table->dpm_levels[mclk_table->count - 1].value - - golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) * - 100 / - golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value; - - return value; -} - -uint32_t iceland_get_xclk(struct pp_hwmgr *hwmgr) -{ - uint32_t reference_clock; - uint32_t tc; - uint32_t divide; - - ATOM_FIRMWARE_INFO *fw_info; - uint16_t size; - uint8_t frev, crev; - int index = GetIndexIntoMasterTable(DATA, FirmwareInfo); - - tc = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK); - - if (tc) - return TCLK; - - fw_info = (ATOM_FIRMWARE_INFO *)cgs_atom_get_data_table(hwmgr->device, index, - &size, &frev, &crev); - - if (!fw_info) - return 0; - - reference_clock = le16_to_cpu(fw_info->usReferenceClock); - - divide = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_CLKPIN_CNTL, XTALIN_DIVIDE); - - if (0 != divide) - return reference_clock / 4; - - return reference_clock; -} - -static int iceland_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value) -{ - struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); - struct iceland_single_dpm_table *golden_mclk_table = - &(data->golden_dpm_table.mclk_table); - struct pp_power_state *ps; - struct iceland_power_state *iceland_ps; - - if (value > 20) - value = 20; - - ps = hwmgr->request_ps; - - if (ps == NULL) - return -EINVAL; - - iceland_ps = cast_phw_iceland_power_state(&ps->hardware); - - iceland_ps->performance_levels[iceland_ps->performance_level_count - 1].memory_clock = - golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value * - value / 100 + - golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value; - - return 0; -} - -static const struct pp_hwmgr_func iceland_hwmgr_funcs = { - .backend_init = &iceland_hwmgr_backend_init, - .backend_fini = &iceland_hwmgr_backend_fini, - .asic_setup = &iceland_setup_asic_task, - .dynamic_state_management_enable = &iceland_enable_dpm_tasks, - .apply_state_adjust_rules = iceland_apply_state_adjust_rules, - .force_dpm_level = &iceland_force_dpm_level, - .power_state_set = iceland_set_power_state_tasks, - .get_power_state_size = iceland_get_power_state_size, - .get_mclk = iceland_dpm_get_mclk, - .get_sclk = iceland_dpm_get_sclk, - .patch_boot_state = iceland_dpm_patch_boot_state, - .get_pp_table_entry = iceland_get_pp_table_entry, - .get_num_of_pp_table_entries = iceland_get_num_of_entries, - .print_current_perforce_level = iceland_print_current_perforce_level, - .powerdown_uvd = iceland_phm_powerdown_uvd, - .powergate_uvd = iceland_phm_powergate_uvd, - .powergate_vce = iceland_phm_powergate_vce, - .disable_clock_power_gating = iceland_phm_disable_clock_power_gating, - .update_clock_gatings = iceland_phm_update_clock_gatings, - .notify_smc_display_config_after_ps_adjustment = iceland_notify_smc_display_config_after_ps_adjustment, - .display_config_changed = iceland_display_configuration_changed_task, - .set_max_fan_pwm_output = iceland_set_max_fan_pwm_output, - .set_max_fan_rpm_output = iceland_set_max_fan_rpm_output, - .get_temperature = iceland_thermal_get_temperature, - .stop_thermal_controller = iceland_thermal_stop_thermal_controller, - .get_fan_speed_info = iceland_fan_ctrl_get_fan_speed_info, - .get_fan_speed_percent = iceland_fan_ctrl_get_fan_speed_percent, - .set_fan_speed_percent = iceland_fan_ctrl_set_fan_speed_percent, - .reset_fan_speed_to_default = iceland_fan_ctrl_reset_fan_speed_to_default, - .get_fan_speed_rpm = iceland_fan_ctrl_get_fan_speed_rpm, - .set_fan_speed_rpm = iceland_fan_ctrl_set_fan_speed_rpm, - .uninitialize_thermal_controller = iceland_thermal_ctrl_uninitialize_thermal_controller, - .register_internal_thermal_interrupt = iceland_register_internal_thermal_interrupt, - .check_smc_update_required_for_display_configuration = iceland_check_smc_update_required_for_display_configuration, - .check_states_equal = iceland_check_states_equal, - .set_fan_control_mode = iceland_set_fan_control_mode, - .get_fan_control_mode = iceland_get_fan_control_mode, - .force_clock_level = iceland_force_clock_level, - .print_clock_levels = iceland_print_clock_levels, - .get_sclk_od = iceland_get_sclk_od, - .set_sclk_od = iceland_set_sclk_od, - .get_mclk_od = iceland_get_mclk_od, - .set_mclk_od = iceland_set_mclk_od, -}; - -int iceland_hwmgr_init(struct pp_hwmgr *hwmgr) -{ - iceland_hwmgr *data; - - data = kzalloc (sizeof(iceland_hwmgr), GFP_KERNEL); - if (data == NULL) - return -ENOMEM; - memset(data, 0x00, sizeof(iceland_hwmgr)); - - hwmgr->backend = data; - hwmgr->hwmgr_func = &iceland_hwmgr_funcs; - hwmgr->pptable_func = &pptable_funcs; - - /* thermal */ - pp_iceland_thermal_initialize(hwmgr); - return 0; -} diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.h deleted file mode 100644 index f253988de2d2..000000000000 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.h +++ /dev/null @@ -1,424 +0,0 @@ -/* - * Copyright 2016 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Author: Huang Rui - * - */ -#ifndef ICELAND_HWMGR_H -#define ICELAND_HWMGR_H - -#include "hwmgr.h" -#include "ppatomctrl.h" -#include "ppinterrupt.h" -#include "ppsmc.h" -#include "iceland_powertune.h" -#include "pp_endian.h" -#include "smu71_discrete.h" - -#define ICELAND_MAX_HARDWARE_POWERLEVELS 2 -#define ICELAND_DYNCLK_NUMBER_OF_TREND_COEFFICIENTS 15 - -struct iceland_performance_level { - uint32_t memory_clock; - uint32_t engine_clock; - uint16_t pcie_gen; - uint16_t pcie_lane; -}; - -struct _phw_iceland_bacos { - uint32_t best_match; - uint32_t baco_flags; - struct iceland_performance_level performance_level; -}; -typedef struct _phw_iceland_bacos phw_iceland_bacos; - -struct _phw_iceland_uvd_clocks { - uint32_t VCLK; - uint32_t DCLK; -}; - -typedef struct _phw_iceland_uvd_clocks phw_iceland_uvd_clocks; - -struct _phw_iceland_vce_clocks { - uint32_t EVCLK; - uint32_t ECCLK; -}; - -typedef struct _phw_iceland_vce_clocks phw_iceland_vce_clocks; - -struct iceland_power_state { - uint32_t magic; - phw_iceland_uvd_clocks uvd_clocks; - phw_iceland_vce_clocks vce_clocks; - uint32_t sam_clk; - uint32_t acp_clk; - uint16_t performance_level_count; - bool dc_compatible; - uint32_t sclk_threshold; - struct iceland_performance_level performance_levels[ICELAND_MAX_HARDWARE_POWERLEVELS]; -}; - -struct _phw_iceland_dpm_level { - bool enabled; - uint32_t value; - uint32_t param1; -}; -typedef struct _phw_iceland_dpm_level phw_iceland_dpm_level; - -#define ICELAND_MAX_DEEPSLEEP_DIVIDER_ID 5 -#define MAX_REGULAR_DPM_NUMBER 8 -#define ICELAND_MINIMUM_ENGINE_CLOCK 5000 - -struct iceland_single_dpm_table { - uint32_t count; - phw_iceland_dpm_level dpm_levels[MAX_REGULAR_DPM_NUMBER]; -}; - -struct iceland_dpm_table { - struct iceland_single_dpm_table sclk_table; - struct iceland_single_dpm_table mclk_table; - struct iceland_single_dpm_table pcie_speed_table; - struct iceland_single_dpm_table vddc_table; - struct iceland_single_dpm_table vdd_gfx_table; - struct iceland_single_dpm_table vdd_ci_table; - struct iceland_single_dpm_table mvdd_table; -}; -typedef struct _phw_iceland_dpm_table phw_iceland_dpm_table; - - -struct _phw_iceland_clock_regisiters { - uint32_t vCG_SPLL_FUNC_CNTL; - uint32_t vCG_SPLL_FUNC_CNTL_2; - uint32_t vCG_SPLL_FUNC_CNTL_3; - uint32_t vCG_SPLL_FUNC_CNTL_4; - uint32_t vCG_SPLL_SPREAD_SPECTRUM; - uint32_t vCG_SPLL_SPREAD_SPECTRUM_2; - uint32_t vDLL_CNTL; - uint32_t vMCLK_PWRMGT_CNTL; - uint32_t vMPLL_AD_FUNC_CNTL; - uint32_t vMPLL_DQ_FUNC_CNTL; - uint32_t vMPLL_FUNC_CNTL; - uint32_t vMPLL_FUNC_CNTL_1; - uint32_t vMPLL_FUNC_CNTL_2; - uint32_t vMPLL_SS1; - uint32_t vMPLL_SS2; -}; -typedef struct _phw_iceland_clock_regisiters phw_iceland_clock_registers; - -struct _phw_iceland_voltage_smio_registers { - uint32_t vs0_vid_lower_smio_cntl; -}; -typedef struct _phw_iceland_voltage_smio_registers phw_iceland_voltage_smio_registers; - - -struct _phw_iceland_mc_reg_entry { - uint32_t mclk_max; - uint32_t mc_data[SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE]; -}; -typedef struct _phw_iceland_mc_reg_entry phw_iceland_mc_reg_entry; - -struct _phw_iceland_mc_reg_table { - uint8_t last; /* number of registers*/ - uint8_t num_entries; /* number of entries in mc_reg_table_entry used*/ - uint16_t validflag; /* indicate the corresponding register is valid or not. 1: valid, 0: invalid. bit0->address[0], bit1->address[1], etc.*/ - phw_iceland_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES]; - SMU71_Discrete_MCRegisterAddress mc_reg_address[SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE]; -}; -typedef struct _phw_iceland_mc_reg_table phw_iceland_mc_reg_table; - -#define DISABLE_MC_LOADMICROCODE 1 -#define DISABLE_MC_CFGPROGRAMMING 2 - - -/*Ultra Low Voltage parameter structure */ -struct phw_iceland_ulv_parm{ - bool ulv_supported; - uint32_t ch_ulv_parameter; - uint32_t ulv_volt_change_delay; - struct iceland_performance_level ulv_power_level; -}; - -#define ICELAND_MAX_LEAKAGE_COUNT 8 - -struct phw_iceland_leakage_voltage { - uint16_t count; - uint16_t leakage_id[ICELAND_MAX_LEAKAGE_COUNT]; - uint16_t actual_voltage[ICELAND_MAX_LEAKAGE_COUNT]; -}; - -struct _phw_iceland_display_timing { - uint32_t min_clock_insr; - uint32_t num_existing_displays; -}; -typedef struct _phw_iceland_display_timing phw_iceland_display_timing; - - -struct phw_iceland_thermal_temperature_setting -{ - long temperature_low; - long temperature_high; - long temperature_shutdown; -}; - -struct _phw_iceland_dpmlevel_enable_mask { - uint32_t uvd_dpm_enable_mask; - uint32_t vce_dpm_enable_mask; - uint32_t acp_dpm_enable_mask; - uint32_t samu_dpm_enable_mask; - uint32_t sclk_dpm_enable_mask; - uint32_t mclk_dpm_enable_mask; - uint32_t pcie_dpm_enable_mask; -}; -typedef struct _phw_iceland_dpmlevel_enable_mask phw_iceland_dpmlevel_enable_mask; - -struct _phw_iceland_pcie_perf_range { - uint16_t max; - uint16_t min; -}; -typedef struct _phw_iceland_pcie_perf_range phw_iceland_pcie_perf_range; - -struct _phw_iceland_vbios_boot_state { - uint16_t mvdd_bootup_value; - uint16_t vddc_bootup_value; - uint16_t vddci_bootup_value; - uint16_t vddgfx_bootup_value; - uint32_t sclk_bootup_value; - uint32_t mclk_bootup_value; - uint16_t pcie_gen_bootup_value; - uint16_t pcie_lane_bootup_value; -}; -typedef struct _phw_iceland_vbios_boot_state phw_iceland_vbios_boot_state; - -#define DPMTABLE_OD_UPDATE_SCLK 0x00000001 -#define DPMTABLE_OD_UPDATE_MCLK 0x00000002 -#define DPMTABLE_UPDATE_SCLK 0x00000004 -#define DPMTABLE_UPDATE_MCLK 0x00000008 - -/* We need to review which fields are needed. */ -/* This is mostly a copy of the RV7xx/Evergreen structure which is close, but not identical to the N.Islands one. */ -struct iceland_hwmgr { - struct iceland_dpm_table dpm_table; - struct iceland_dpm_table golden_dpm_table; - - uint32_t voting_rights_clients0; - uint32_t voting_rights_clients1; - uint32_t voting_rights_clients2; - uint32_t voting_rights_clients3; - uint32_t voting_rights_clients4; - uint32_t voting_rights_clients5; - uint32_t voting_rights_clients6; - uint32_t voting_rights_clients7; - uint32_t static_screen_threshold_unit; - uint32_t static_screen_threshold; - uint32_t voltage_control; - uint32_t vdd_gfx_control; - - uint32_t vddc_vddci_delta; - uint32_t vddc_vddgfx_delta; - - struct pp_interrupt_registration_info internal_high_thermal_interrupt_info; - struct pp_interrupt_registration_info internal_low_thermal_interrupt_info; - struct pp_interrupt_registration_info smc_to_host_interrupt_info; - uint32_t active_auto_throttle_sources; - - struct pp_interrupt_registration_info external_throttle_interrupt; - irq_handler_func_t external_throttle_callback; - void *external_throttle_context; - - struct pp_interrupt_registration_info ctf_interrupt_info; - irq_handler_func_t ctf_callback; - void *ctf_context; - - phw_iceland_clock_registers clock_registers; - phw_iceland_voltage_smio_registers voltage_smio_registers; - - bool is_memory_GDDR5; - uint16_t acpi_vddc; - bool pspp_notify_required; /* Flag to indicate if PSPP notification to SBIOS is required */ - uint16_t force_pcie_gen; /* The forced PCI-E speed if not 0xffff */ - uint16_t acpi_pcie_gen; /* The PCI-E speed at ACPI time */ - uint32_t pcie_gen_cap; /* The PCI-E speed capabilities bitmap from CAIL */ - uint32_t pcie_lane_cap; /* The PCI-E lane capabilities bitmap from CAIL */ - uint32_t pcie_spc_cap; /* Symbol Per Clock Capabilities from registry */ - struct phw_iceland_leakage_voltage vddc_leakage; /* The Leakage VDDC supported (based on leakage ID).*/ - struct phw_iceland_leakage_voltage vddcgfx_leakage; /* The Leakage VDDC supported (based on leakage ID). */ - struct phw_iceland_leakage_voltage vddci_leakage; /* The Leakage VDDCI supported (based on leakage ID). */ - - uint32_t mvdd_control; - uint32_t vddc_mask_low; - uint32_t mvdd_mask_low; - uint16_t max_vddc_in_pp_table; /* the maximum VDDC value in the powerplay table*/ - uint16_t min_vddc_in_pp_table; - uint16_t max_vddci_in_pp_table; /* the maximum VDDCI value in the powerplay table */ - uint16_t min_vddci_in_pp_table; - uint32_t mclk_strobe_mode_threshold; - uint32_t mclk_stutter_mode_threshold; - uint32_t mclk_edc_enable_threshold; - uint32_t mclk_edc_wr_enable_threshold; - bool is_uvd_enabled; - bool is_xdma_enabled; - phw_iceland_vbios_boot_state vbios_boot_state; - - bool battery_state; - bool is_tlu_enabled; - bool pcie_performance_request; - - /* -------------- SMC SRAM Address of firmware header tables ----------------*/ - uint32_t sram_end; /* The first address after the SMC SRAM. */ - uint32_t dpm_table_start; /* The start of the dpm table in the SMC SRAM. */ - uint32_t soft_regs_start; /* The start of the soft registers in the SMC SRAM. */ - uint32_t mc_reg_table_start; /* The start of the mc register table in the SMC SRAM. */ - uint32_t fan_table_start; /* The start of the fan table in the SMC SRAM. */ - uint32_t arb_table_start; /* The start of the ARB setting table in the SMC SRAM. */ - uint32_t ulv_settings_start; - SMU71_Discrete_DpmTable smc_state_table; /* The carbon copy of the SMC state table. */ - SMU71_Discrete_MCRegisters mc_reg_table; - SMU71_Discrete_Ulv ulv_setting; /* The carbon copy of ULV setting. */ - - /* -------------- Stuff originally coming from Evergreen --------------------*/ - phw_iceland_mc_reg_table iceland_mc_reg_table; - uint32_t vdd_ci_control; - pp_atomctrl_voltage_table vddc_voltage_table; - pp_atomctrl_voltage_table vddci_voltage_table; - pp_atomctrl_voltage_table vddgfx_voltage_table; - pp_atomctrl_voltage_table mvdd_voltage_table; - - uint32_t mgcg_cgtt_local2; - uint32_t mgcg_cgtt_local3; - uint32_t gpio_debug; - uint32_t mc_micro_code_feature; - uint32_t highest_mclk; - uint16_t acpi_vdd_ci; - uint8_t mvdd_high_index; - uint8_t mvdd_low_index; - bool dll_defaule_on; - bool performance_request_registered; - - /* ----------------- Low Power Features ---------------------*/ - phw_iceland_bacos bacos; - struct phw_iceland_ulv_parm ulv; - - /* ----------------- CAC Stuff ---------------------*/ - uint32_t cac_table_start; - bool cac_configuration_required; /* TRUE if PP_CACConfigurationRequired == 1 */ - bool driver_calculate_cac_leakage; /* TRUE if PP_DriverCalculateCACLeakage == 1 */ - bool cac_enabled; - - /* ----------------- DPM2 Parameters ---------------------*/ - uint32_t power_containment_features; - bool enable_bapm_feature; - bool enable_dte_feature; - bool enable_tdc_limit_feature; - bool enable_pkg_pwr_tracking_feature; - bool disable_uvd_power_tune_feature; - struct iceland_pt_defaults *power_tune_defaults; - SMU71_Discrete_PmFuses power_tune_table; - uint32_t ul_dte_tj_offset; /* Fudge factor in DPM table to correct HW DTE errors */ - uint32_t fast_watermark_threshold; /* use fast watermark if clock is equal or above this. In percentage of the target high sclk. */ - - /* ----------------- Phase Shedding ---------------------*/ - bool vddc_phase_shed_control; - - /* --------------------- DI/DT --------------------------*/ - phw_iceland_display_timing display_timing; - - /* --------- ReadRegistry data for memory and engine clock margins ---- */ - uint32_t engine_clock_data; - uint32_t memory_clock_data; - - /* -------- Thermal Temperature Setting --------------*/ - struct phw_iceland_thermal_temperature_setting thermal_temp_setting; - phw_iceland_dpmlevel_enable_mask dpm_level_enable_mask; - - uint32_t need_update_smu7_dpm_table; - uint32_t sclk_dpm_key_disabled; - uint32_t mclk_dpm_key_disabled; - uint32_t pcie_dpm_key_disabled; - /* used to store the previous dal min sclock */ - uint32_t min_engine_clocks; - phw_iceland_pcie_perf_range pcie_gen_performance; - phw_iceland_pcie_perf_range pcie_lane_performance; - phw_iceland_pcie_perf_range pcie_gen_power_saving; - phw_iceland_pcie_perf_range pcie_lane_power_saving; - bool use_pcie_performance_levels; - bool use_pcie_power_saving_levels; - /* percentage value from 0-100, default 50 */ - uint32_t activity_target[SMU71_MAX_LEVELS_GRAPHICS]; - uint32_t mclk_activity_target; - uint32_t low_sclk_interrupt_threshold; - uint32_t last_mclk_dpm_enable_mask; - bool uvd_enabled; - uint32_t pcc_monitor_enabled; - - /* --------- Power Gating States ------------*/ - bool uvd_power_gated; /* 1: gated, 0:not gated */ - bool vce_power_gated; /* 1: gated, 0:not gated */ - bool samu_power_gated; /* 1: gated, 0:not gated */ - bool acp_power_gated; /* 1: gated, 0:not gated */ - bool pg_acp_init; - - /* soft pptable for re-uploading into smu */ - void *soft_pp_table; -}; - -typedef struct iceland_hwmgr iceland_hwmgr; - -int iceland_hwmgr_init(struct pp_hwmgr *hwmgr); -int iceland_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate); -uint32_t iceland_get_xclk(struct pp_hwmgr *hwmgr); -int iceland_populate_bapm_vddc_vid_sidd(struct pp_hwmgr *hwmgr); -int iceland_populate_vddc_vid(struct pp_hwmgr *hwmgr); - -#define ICELAND_DPM2_NEAR_TDP_DEC 10 -#define ICELAND_DPM2_ABOVE_SAFE_INC 5 -#define ICELAND_DPM2_BELOW_SAFE_INC 20 - -/* - * Log2 of the LTA window size (l2numWin_TDP). Eg. If LTA windows size - * is 128, then this value should be Log2(128) = 7. - */ -#define ICELAND_DPM2_LTA_WINDOW_SIZE 7 - -#define ICELAND_DPM2_LTS_TRUNCATE 0 - -#define ICELAND_DPM2_TDP_SAFE_LIMIT_PERCENT 80 // Maximum 100 - -#define ICELAND_DPM2_MAXPS_PERCENT_H 90 // Maximum 0xFF -#define ICELAND_DPM2_MAXPS_PERCENT_M 90 // Maximum 0xFF - -#define ICELAND_DPM2_PWREFFICIENCYRATIO_MARGIN 50 - -#define ICELAND_DPM2_SQ_RAMP_MAX_POWER 0x3FFF -#define ICELAND_DPM2_SQ_RAMP_MIN_POWER 0x12 -#define ICELAND_DPM2_SQ_RAMP_MAX_POWER_DELTA 0x15 -#define ICELAND_DPM2_SQ_RAMP_SHORT_TERM_INTERVAL_SIZE 0x1E -#define ICELAND_DPM2_SQ_RAMP_LONG_TERM_INTERVAL_RATIO 0xF - -#define ICELAND_VOLTAGE_CONTROL_NONE 0x0 -#define ICELAND_VOLTAGE_CONTROL_BY_GPIO 0x1 -#define ICELAND_VOLTAGE_CONTROL_BY_SVID2 0x2 - -/* convert to Q8.8 format for firmware */ -#define ICELAND_Q88_FORMAT_CONVERSION_UNIT 256 - -#define ICELAND_UNUSED_GPIO_PIN 0x7F - -#endif diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_powertune.c deleted file mode 100644 index 766280626836..000000000000 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_powertune.c +++ /dev/null @@ -1,490 +0,0 @@ -/* - * Copyright 2016 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Author: Huang Rui - * - */ - -#include "amdgpu.h" -#include "hwmgr.h" -#include "smumgr.h" -#include "iceland_hwmgr.h" -#include "iceland_powertune.h" -#include "iceland_smumgr.h" -#include "smu71_discrete.h" -#include "smu71.h" -#include "pp_debug.h" -#include "cgs_common.h" -#include "pp_endian.h" - -#include "bif/bif_5_0_d.h" -#include "bif/bif_5_0_sh_mask.h" - -#define VOLTAGE_SCALE 4 -#define POWERTUNE_DEFAULT_SET_MAX 1 - -#define DEVICE_ID_VI_ICELAND_M_6900 0x6900 -#define DEVICE_ID_VI_ICELAND_M_6901 0x6901 -#define DEVICE_ID_VI_ICELAND_M_6902 0x6902 -#define DEVICE_ID_VI_ICELAND_M_6903 0x6903 - - -struct iceland_pt_defaults defaults_iceland = -{ - /* - * sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc, - * TDC_MAWt, TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT - */ - 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000, - { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61 }, - { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 } -}; - -/* 35W - XT, XTL */ -struct iceland_pt_defaults defaults_icelandxt = -{ - /* - * sviLoadLIneEn, SviLoadLineVddC, - * TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt, - * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, - * BAPM_TEMP_GRADIENT - */ - 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x0, - { 0xA7, 0x0, 0x0, 0xB5, 0x0, 0x0, 0x9F, 0x0, 0x0, 0xD6, 0x0, 0x0, 0xD7, 0x0, 0x0}, - { 0x1EA, 0x0, 0x0, 0x224, 0x0, 0x0, 0x25E, 0x0, 0x0, 0x28E, 0x0, 0x0, 0x2AB, 0x0, 0x0} -}; - -/* 25W - PRO, LE */ -struct iceland_pt_defaults defaults_icelandpro = -{ - /* - * sviLoadLIneEn, SviLoadLineVddC, - * TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt, - * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, - * BAPM_TEMP_GRADIENT - */ - 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x0, - { 0xB7, 0x0, 0x0, 0xC3, 0x0, 0x0, 0xB5, 0x0, 0x0, 0xEA, 0x0, 0x0, 0xE6, 0x0, 0x0}, - { 0x1EA, 0x0, 0x0, 0x224, 0x0, 0x0, 0x25E, 0x0, 0x0, 0x28E, 0x0, 0x0, 0x2AB, 0x0, 0x0} -}; - -void iceland_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr) -{ - struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); - uint32_t tmp = 0; - struct cgs_system_info sys_info = {0}; - uint32_t pdev_id; - - sys_info.size = sizeof(struct cgs_system_info); - sys_info.info_id = CGS_SYSTEM_INFO_PCIE_DEV; - cgs_query_system_info(hwmgr->device, &sys_info); - pdev_id = (uint32_t)sys_info.value; - - switch (pdev_id) { - case DEVICE_ID_VI_ICELAND_M_6900: - case DEVICE_ID_VI_ICELAND_M_6903: - data->power_tune_defaults = &defaults_icelandxt; - break; - - case DEVICE_ID_VI_ICELAND_M_6901: - case DEVICE_ID_VI_ICELAND_M_6902: - data->power_tune_defaults = &defaults_icelandpro; - break; - default: - /* TODO: need to assign valid defaults */ - data->power_tune_defaults = &defaults_iceland; - pr_warning("Unknown V.I. Device ID.\n"); - break; - } - - /* Assume disabled */ - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_PowerContainment); - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_CAC); - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_SQRamping); - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_DBRamping); - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_TDRamping); - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_TCPRamping); - - data->ul_dte_tj_offset = tmp; - - if (!tmp) { - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_CAC); - - data->fast_watermark_threshold = 100; - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_PowerContainment)) { - tmp = 1; - data->enable_dte_feature = tmp ? false : true; - data->enable_tdc_limit_feature = tmp ? true : false; - data->enable_pkg_pwr_tracking_feature = tmp ? true : false; - } - } -} - -int iceland_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr) -{ - struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); - struct iceland_pt_defaults *defaults = data->power_tune_defaults; - SMU71_Discrete_DpmTable *dpm_table = &(data->smc_state_table); - struct phm_cac_tdp_table *cac_dtp_table = hwmgr->dyn_state.cac_dtp_table; - struct phm_ppm_table *ppm = hwmgr->dyn_state.ppm_parameter_table; - uint16_t *def1, *def2; - int i, j, k; - - /* - * TDP number of fraction bits are changed from 8 to 7 for Iceland - * as requested by SMC team - */ - dpm_table->DefaultTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 256)); - dpm_table->TargetTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usConfigurableTDP * 256)); - - dpm_table->DTETjOffset = (uint8_t)data->ul_dte_tj_offset; - - dpm_table->GpuTjMax = (uint8_t)(data->thermal_temp_setting.temperature_high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES); - dpm_table->GpuTjHyst = 8; - - dpm_table->DTEAmbientTempBase = defaults->dte_ambient_temp_base; - - /* The following are for new Iceland Multi-input fan/thermal control */ - if(NULL != ppm) { - dpm_table->PPM_PkgPwrLimit = (uint16_t)ppm->dgpu_tdp * 256 / 1000; - dpm_table->PPM_TemperatureLimit = (uint16_t)ppm->tj_max * 256; - } else { - dpm_table->PPM_PkgPwrLimit = 0; - dpm_table->PPM_TemperatureLimit = 0; - } - - CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_PkgPwrLimit); - CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_TemperatureLimit); - - dpm_table->BAPM_TEMP_GRADIENT = PP_HOST_TO_SMC_UL(defaults->bamp_temp_gradient); - def1 = defaults->bapmti_r; - def2 = defaults->bapmti_rc; - - for (i = 0; i < SMU71_DTE_ITERATIONS; i++) { - for (j = 0; j < SMU71_DTE_SOURCES; j++) { - for (k = 0; k < SMU71_DTE_SINKS; k++) { - dpm_table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*def1); - dpm_table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*def2); - def1++; - def2++; - } - } - } - - return 0; -} - -static int iceland_populate_svi_load_line(struct pp_hwmgr *hwmgr) -{ - struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); - const struct iceland_pt_defaults *defaults = data->power_tune_defaults; - - data->power_tune_table.SviLoadLineEn = defaults->svi_load_line_en; - data->power_tune_table.SviLoadLineVddC = defaults->svi_load_line_vddc; - data->power_tune_table.SviLoadLineTrimVddC = 3; - data->power_tune_table.SviLoadLineOffsetVddC = 0; - - return 0; -} - -static int iceland_populate_tdc_limit(struct pp_hwmgr *hwmgr) -{ - uint16_t tdc_limit; - struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); - const struct iceland_pt_defaults *defaults = data->power_tune_defaults; - - /* TDC number of fraction bits are changed from 8 to 7 - * for Iceland as requested by SMC team - */ - tdc_limit = (uint16_t)(hwmgr->dyn_state.cac_dtp_table->usTDC * 256); - data->power_tune_table.TDC_VDDC_PkgLimit = - CONVERT_FROM_HOST_TO_SMC_US(tdc_limit); - data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc = - defaults->tdc_vddc_throttle_release_limit_perc; - data->power_tune_table.TDC_MAWt = defaults->tdc_mawt; - - return 0; -} - -static int iceland_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset) -{ - struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); - const struct iceland_pt_defaults *defaults = data->power_tune_defaults; - uint32_t temp; - - if (smu7_read_smc_sram_dword(hwmgr->smumgr, - fuse_table_offset + - offsetof(SMU71_Discrete_PmFuses, TdcWaterfallCtl), - (uint32_t *)&temp, data->sram_end)) - PP_ASSERT_WITH_CODE(false, - "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!", - return -EINVAL); - else - data->power_tune_table.TdcWaterfallCtl = defaults->tdc_waterfall_ctl; - - return 0; -} - -static int iceland_populate_temperature_scaler(struct pp_hwmgr *hwmgr) -{ - return 0; -} - -static int iceland_populate_gnb_lpml(struct pp_hwmgr *hwmgr) -{ - int i; - struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); - - /* Currently not used. Set all to zero. */ - for (i = 0; i < 8; i++) - data->power_tune_table.GnbLPML[i] = 0; - - return 0; -} - -static int iceland_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr) -{ - return 0; -} - -static int iceland_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr) -{ - struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); - uint16_t HiSidd = data->power_tune_table.BapmVddCBaseLeakageHiSidd; - uint16_t LoSidd = data->power_tune_table.BapmVddCBaseLeakageLoSidd; - struct phm_cac_tdp_table *cac_table = hwmgr->dyn_state.cac_dtp_table; - - HiSidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256); - LoSidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256); - - data->power_tune_table.BapmVddCBaseLeakageHiSidd = - CONVERT_FROM_HOST_TO_SMC_US(HiSidd); - data->power_tune_table.BapmVddCBaseLeakageLoSidd = - CONVERT_FROM_HOST_TO_SMC_US(LoSidd); - - return 0; -} - -int iceland_populate_pm_fuses(struct pp_hwmgr *hwmgr) -{ - struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); - uint32_t pm_fuse_table_offset; - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_PowerContainment)) { - if (smu7_read_smc_sram_dword(hwmgr->smumgr, - SMU71_FIRMWARE_HEADER_LOCATION + - offsetof(SMU71_Firmware_Header, PmFuseTable), - &pm_fuse_table_offset, data->sram_end)) - PP_ASSERT_WITH_CODE(false, - "Attempt to get pm_fuse_table_offset Failed!", - return -EINVAL); - - /* DW0 - DW3 */ - if (iceland_populate_bapm_vddc_vid_sidd(hwmgr)) - PP_ASSERT_WITH_CODE(false, - "Attempt to populate bapm vddc vid Failed!", - return -EINVAL); - - /* DW4 - DW5 */ - if (iceland_populate_vddc_vid(hwmgr)) - PP_ASSERT_WITH_CODE(false, - "Attempt to populate vddc vid Failed!", - return -EINVAL); - - /* DW6 */ - if (iceland_populate_svi_load_line(hwmgr)) - PP_ASSERT_WITH_CODE(false, - "Attempt to populate SviLoadLine Failed!", - return -EINVAL); - /* DW7 */ - if (iceland_populate_tdc_limit(hwmgr)) - PP_ASSERT_WITH_CODE(false, - "Attempt to populate TDCLimit Failed!", return -EINVAL); - /* DW8 */ - if (iceland_populate_dw8(hwmgr, pm_fuse_table_offset)) - PP_ASSERT_WITH_CODE(false, - "Attempt to populate TdcWaterfallCtl, " - "LPMLTemperature Min and Max Failed!", - return -EINVAL); - - /* DW9-DW12 */ - if (0 != iceland_populate_temperature_scaler(hwmgr)) - PP_ASSERT_WITH_CODE(false, - "Attempt to populate LPMLTemperatureScaler Failed!", - return -EINVAL); - - /* DW13-DW16 */ - if (iceland_populate_gnb_lpml(hwmgr)) - PP_ASSERT_WITH_CODE(false, - "Attempt to populate GnbLPML Failed!", - return -EINVAL); - - /* DW17 */ - if (iceland_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr)) - PP_ASSERT_WITH_CODE(false, - "Attempt to populate GnbLPML Min and Max Vid Failed!", - return -EINVAL); - - /* DW18 */ - if (iceland_populate_bapm_vddc_base_leakage_sidd(hwmgr)) - PP_ASSERT_WITH_CODE(false, - "Attempt to populate BapmVddCBaseLeakage Hi and Lo Sidd Failed!", - return -EINVAL); - - if (smu7_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset, - (uint8_t *)&data->power_tune_table, - sizeof(struct SMU71_Discrete_PmFuses), data->sram_end)) - PP_ASSERT_WITH_CODE(false, - "Attempt to download PmFuseTable Failed!", - return -EINVAL); - } - return 0; -} - -int iceland_enable_smc_cac(struct pp_hwmgr *hwmgr) -{ - struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); - int result = 0; - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_CAC)) { - int smc_result; - smc_result = smum_send_msg_to_smc(hwmgr->smumgr, - (uint16_t)(PPSMC_MSG_EnableCac)); - PP_ASSERT_WITH_CODE((0 == smc_result), - "Failed to enable CAC in SMC.", result = -1); - - data->cac_enabled = (0 == smc_result) ? true : false; - } - return result; -} - -static int iceland_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n) -{ - struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); - - if(data->power_containment_features & - POWERCONTAINMENT_FEATURE_PkgPwrLimit) - return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_PkgPwrSetLimit, n); - return 0; -} - -static int iceland_set_overdriver_target_tdp(struct pp_hwmgr *pHwMgr, uint32_t target_tdp) -{ - return smum_send_msg_to_smc_with_parameter(pHwMgr->smumgr, - PPSMC_MSG_OverDriveSetTargetTdp, target_tdp); -} - -int iceland_enable_power_containment(struct pp_hwmgr *hwmgr) -{ - struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); - SMU71_Discrete_DpmTable *dpm_table = &data->smc_state_table; - int smc_result; - int result = 0; - uint32_t is_asic_kicker; - - data->power_containment_features = 0; - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_PowerContainment)) { - is_asic_kicker = cgs_read_register(hwmgr->device, mmCC_BIF_BX_STRAP2); - is_asic_kicker = (is_asic_kicker >> 12) & 0x01; - - if (data->enable_bapm_feature && - (!is_asic_kicker || - phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_DisableUsingActualTemperatureForPowerCalc))) { - smc_result = smum_send_msg_to_smc(hwmgr->smumgr, - (uint16_t)(PPSMC_MSG_EnableDTE)); - PP_ASSERT_WITH_CODE((0 == smc_result), - "Failed to enable BAPM in SMC.", result = -1;); - if (0 == smc_result) - data->power_containment_features |= POWERCONTAINMENT_FEATURE_BAPM; - } - - if (is_asic_kicker && !phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_DisableUsingActualTemperatureForPowerCalc)) - dpm_table->DTEMode = 2; - - if (data->enable_tdc_limit_feature) { - smc_result = smum_send_msg_to_smc(hwmgr->smumgr, - (uint16_t)(PPSMC_MSG_TDCLimitEnable)); - PP_ASSERT_WITH_CODE((0 == smc_result), - "Failed to enable TDCLimit in SMC.", result = -1;); - if (0 == smc_result) - data->power_containment_features |= - POWERCONTAINMENT_FEATURE_TDCLimit; - } - - if (data->enable_pkg_pwr_tracking_feature) { - smc_result = smum_send_msg_to_smc(hwmgr->smumgr, - (uint16_t)(PPSMC_MSG_PkgPwrLimitEnable)); - PP_ASSERT_WITH_CODE((0 == smc_result), - "Failed to enable PkgPwrTracking in SMC.", result = -1;); - if (0 == smc_result) { - struct phm_cac_tdp_table *cac_table = - hwmgr->dyn_state.cac_dtp_table; - uint32_t default_limit = - (uint32_t)(cac_table->usMaximumPowerDeliveryLimit * 256); - - data->power_containment_features |= - POWERCONTAINMENT_FEATURE_PkgPwrLimit; - - if (iceland_set_power_limit(hwmgr, default_limit)) - printk(KERN_ERR "Failed to set Default Power Limit in SMC!"); - } - } - } - return result; -} - -int iceland_power_control_set_level(struct pp_hwmgr *hwmgr) -{ - struct phm_cac_tdp_table *cac_table = hwmgr->dyn_state.cac_dtp_table; - int adjust_percent, target_tdp; - int result = 0; - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_PowerContainment)) { - /* adjustment percentage has already been validated */ - adjust_percent = hwmgr->platform_descriptor.TDPAdjustmentPolarity ? - hwmgr->platform_descriptor.TDPAdjustment : - (-1 * hwmgr->platform_descriptor.TDPAdjustment); - /* - * SMC requested that target_tdp to be 7 bit fraction in DPM table - * but message to be 8 bit fraction for messages - */ - target_tdp = ((100 + adjust_percent) * (int)(cac_table->usTDP * 256)) / 100; - result = iceland_set_overdriver_target_tdp(hwmgr, (uint32_t)target_tdp); - } - - return result; -} diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_powertune.h b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_powertune.h deleted file mode 100644 index 4008d49617e4..000000000000 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_powertune.h +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Copyright 2016 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Author: Huang Rui - * - */ -#ifndef ICELAND_POWERTUNE_H -#define ICELAND_POWERTUNE_H - -#include "smu71.h" - -enum iceland_pt_config_reg_type { - ICELAND_CONFIGREG_MMR = 0, - ICELAND_CONFIGREG_SMC_IND, - ICELAND_CONFIGREG_DIDT_IND, - ICELAND_CONFIGREG_CACHE, - ICELAND_CONFIGREG_MAX -}; - -/* PowerContainment Features */ -#define POWERCONTAINMENT_FEATURE_DTE 0x00000001 -#define POWERCONTAINMENT_FEATURE_TDCLimit 0x00000002 -#define POWERCONTAINMENT_FEATURE_PkgPwrLimit 0x00000004 -#define POWERCONTAINMENT_FEATURE_BAPM 0x00000001 - -struct iceland_pt_config_reg { - uint32_t offset; - uint32_t mask; - uint32_t shift; - uint32_t value; - enum iceland_pt_config_reg_type type; -}; - -void iceland_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr); -int iceland_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr); -int iceland_populate_pm_fuses(struct pp_hwmgr *hwmgr); -int iceland_enable_smc_cac(struct pp_hwmgr *hwmgr); -int iceland_enable_power_containment(struct pp_hwmgr *hwmgr); -int iceland_power_control_set_level(struct pp_hwmgr *hwmgr); - -#endif /* ICELAND_POWERTUNE_H */ - diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_thermal.c deleted file mode 100644 index 45d17d715640..000000000000 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_thermal.c +++ /dev/null @@ -1,595 +0,0 @@ -/* - * Copyright 2016 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Author: Huang Rui - * - */ -#include -#include "iceland_thermal.h" -#include "iceland_hwmgr.h" -#include "iceland_smumgr.h" -#include "atombios.h" -#include "ppsmc.h" - -#include "gmc/gmc_8_1_d.h" -#include "gmc/gmc_8_1_sh_mask.h" - -#include "bif/bif_5_0_d.h" -#include "bif/bif_5_0_sh_mask.h" - -#include "smu/smu_7_1_1_d.h" -#include "smu/smu_7_1_1_sh_mask.h" - - -/** -* Get Fan Speed Control Parameters. -* @param hwmgr the address of the powerplay hardware manager. -* @param pSpeed is the address of the structure where the result is to be placed. -* @exception Always succeeds except if we cannot zero out the output structure. -*/ -int iceland_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, - struct phm_fan_speed_info *fan_speed_info) -{ - - if (hwmgr->thermal_controller.fanInfo.bNoFan) - return 0; - - fan_speed_info->supports_percent_read = true; - fan_speed_info->supports_percent_write = true; - fan_speed_info->min_percent = 0; - fan_speed_info->max_percent = 100; - - if (0 != hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution) { - fan_speed_info->supports_rpm_read = true; - fan_speed_info->supports_rpm_write = true; - fan_speed_info->min_rpm = hwmgr->thermal_controller.fanInfo.ulMinRPM; - fan_speed_info->max_rpm = hwmgr->thermal_controller.fanInfo.ulMaxRPM; - } else { - fan_speed_info->min_rpm = 0; - fan_speed_info->max_rpm = 0; - } - - return 0; -} - -/** -* Get Fan Speed in percent. -* @param hwmgr the address of the powerplay hardware manager. -* @param pSpeed is the address of the structure where the result is to be placed. -* @exception Fails is the 100% setting appears to be 0. -*/ -int iceland_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t *speed) -{ - uint32_t duty100; - uint32_t duty; - uint64_t tmp64; - - if (hwmgr->thermal_controller.fanInfo.bNoFan) - return 0; - - duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100); - duty = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_STATUS, FDO_PWM_DUTY); - - if (0 == duty100) - return -EINVAL; - - - tmp64 = (uint64_t)duty * 100; - do_div(tmp64, duty100); - *speed = (uint32_t)tmp64; - - if (*speed > 100) - *speed = 100; - - return 0; -} - -/** -* Get Fan Speed in RPM. -* @param hwmgr the address of the powerplay hardware manager. -* @param speed is the address of the structure where the result is to be placed. -* @exception Returns not supported if no fan is found or if pulses per revolution are not set -*/ -int iceland_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed) -{ - return 0; -} - -/** -* Set Fan Speed Control to static mode, so that the user can decide what speed to use. -* @param hwmgr the address of the powerplay hardware manager. -* mode the fan control mode, 0 default, 1 by percent, 5, by RPM -* @exception Should always succeed. -*/ -int iceland_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode) -{ - - if (hwmgr->fan_ctrl_is_in_default_mode) { - hwmgr->fan_ctrl_default_mode = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, FDO_PWM_MODE); - hwmgr->tmin = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, TMIN); - hwmgr->fan_ctrl_is_in_default_mode = false; - } - - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, TMIN, 0); - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, FDO_PWM_MODE, mode); - - return 0; -} - -/** -* Reset Fan Speed Control to default mode. -* @param hwmgr the address of the powerplay hardware manager. -* @exception Should always succeed. -*/ -static int iceland_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr) -{ - if (!hwmgr->fan_ctrl_is_in_default_mode) { - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, FDO_PWM_MODE, hwmgr->fan_ctrl_default_mode); - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, TMIN, hwmgr->tmin); - hwmgr->fan_ctrl_is_in_default_mode = true; - } - - return 0; -} - -int iceland_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr) -{ - return (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StartFanControl) == 0) ? 0 : -EINVAL; -} - - -int iceland_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr) -{ - return (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StopFanControl) == 0) ? 0 : -EINVAL; -} - -/** -* Set Fan Speed in percent. -* @param hwmgr the address of the powerplay hardware manager. -* @param speed is the percentage value (0% - 100%) to be set. -* @exception Fails is the 100% setting appears to be 0. -*/ -int iceland_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t speed) -{ - uint32_t duty100; - uint32_t duty; - uint64_t tmp64; - - if (hwmgr->thermal_controller.fanInfo.bNoFan) - return -EINVAL; - - if (speed > 100) { - pr_warning("Cannot set more than 100%% duty cycle. Set it to 100.\n"); - speed = 100; - } - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl)) - iceland_fan_ctrl_stop_smc_fan_control(hwmgr); - - duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100); - - if (0 == duty100) - return -EINVAL; - - tmp64 = (uint64_t)speed * duty100; - do_div(tmp64, 100); - duty = (uint32_t)tmp64; - - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL0, FDO_STATIC_DUTY, duty); - - return iceland_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC); -} - -/** -* Reset Fan Speed to default. -* @param hwmgr the address of the powerplay hardware manager. -* @exception Always succeeds. -*/ -int iceland_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr) -{ - int result; - - if (hwmgr->thermal_controller.fanInfo.bNoFan) - return 0; - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl)) { - result = iceland_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC); - if (0 == result) - result = iceland_fan_ctrl_start_smc_fan_control(hwmgr); - } else - result = iceland_fan_ctrl_set_default_mode(hwmgr); - - return result; -} - -/** -* Set Fan Speed in RPM. -* @param hwmgr the address of the powerplay hardware manager. -* @param speed is the percentage value (min - max) to be set. -* @exception Fails is the speed not lie between min and max. -*/ -int iceland_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed) -{ - return 0; -} - -/** -* Reads the remote temperature from the SIslands thermal controller. -* -* @param hwmgr The address of the hardware manager. -*/ -int iceland_thermal_get_temperature(struct pp_hwmgr *hwmgr) -{ - int temp; - - temp = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_STATUS, CTF_TEMP); - - /* - * Bit 9 means the reading is lower than the lowest usable - * value. - */ - if (0 != (0x200 & temp)) - temp = ICELAND_THERMAL_MAXIMUM_TEMP_READING; - else - temp = (temp & 0x1ff); - - temp = temp * PP_TEMPERATURE_UNITS_PER_CENTIGRADES; - - return temp; -} - -/** -* Set the requested temperature range for high and low alert signals -* -* @param hwmgr The address of the hardware manager. -* @param range Temperature range to be programmed for high and low alert signals -* @exception PP_Result_BadInput if the input data is not valid. -*/ -static int iceland_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, uint32_t low_temp, uint32_t high_temp) -{ - uint32_t low = ICELAND_THERMAL_MINIMUM_ALERT_TEMP * PP_TEMPERATURE_UNITS_PER_CENTIGRADES; - uint32_t high = ICELAND_THERMAL_MAXIMUM_ALERT_TEMP * PP_TEMPERATURE_UNITS_PER_CENTIGRADES; - - if (low < low_temp) - low = low_temp; - if (high > high_temp) - high = high_temp; - - if (low > high) - return -EINVAL; - - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_CTRL, DIG_THERM_DPM, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); - - return 0; -} - -/** -* Programs thermal controller one-time setting registers -* -* @param hwmgr The address of the hardware manager. -*/ -static int iceland_thermal_initialize(struct pp_hwmgr *hwmgr) -{ - if (0 != hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution) - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_TACH_CTRL, EDGE_PER_REV, - hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution - 1); - - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, TACH_PWM_RESP_RATE, 0x28); - - return 0; -} - -/** -* Enable thermal alerts on the RV770 thermal controller. -* -* @param hwmgr The address of the hardware manager. -*/ -static int iceland_thermal_enable_alert(struct pp_hwmgr *hwmgr) -{ - uint32_t alert; - - alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, THERM_INT_MASK); - alert &= ~(ICELAND_THERMAL_HIGH_ALERT_MASK | ICELAND_THERMAL_LOW_ALERT_MASK); - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, THERM_INT_MASK, alert); - - /* send message to SMU to enable internal thermal interrupts */ - return (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Thermal_Cntl_Enable) == 0) ? 0 : -1; -} - -/** -* Disable thermal alerts on the RV770 thermal controller. -* @param hwmgr The address of the hardware manager. -*/ -static int iceland_thermal_disable_alert(struct pp_hwmgr *hwmgr) -{ - uint32_t alert; - - alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, THERM_INT_MASK); - alert |= (ICELAND_THERMAL_HIGH_ALERT_MASK | ICELAND_THERMAL_LOW_ALERT_MASK); - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, THERM_INT_MASK, alert); - - /* send message to SMU to disable internal thermal interrupts */ - return (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Thermal_Cntl_Disable) == 0) ? 0 : -1; -} - -/** -* Uninitialize the thermal controller. -* Currently just disables alerts. -* @param hwmgr The address of the hardware manager. -*/ -int iceland_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr) -{ - int result = iceland_thermal_disable_alert(hwmgr); - - if (result) - pr_warning("Failed to disable thermal alerts!\n"); - - if (hwmgr->thermal_controller.fanInfo.bNoFan) - iceland_fan_ctrl_set_default_mode(hwmgr); - - return result; -} - -/** -* Set up the fan table to control the fan using the SMC. -* @param hwmgr the address of the powerplay hardware manager. -* @param pInput the pointer to input data -* @param pOutput the pointer to output data -* @param pStorage the pointer to temporary storage -* @param Result the last failure code -* @return result from set temperature range routine -*/ -int tf_iceland_thermal_setup_fan_table(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result) -{ - struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); - SMU71_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE }; - uint32_t duty100; - uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2; - uint16_t fdo_min, slope1, slope2; - uint32_t reference_clock; - int res; - uint64_t tmp64; - - if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl)) - return 0; - - if (0 == data->fan_table_start) { - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl); - return 0; - } - - duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100); - - if (0 == duty100) { - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl); - return 0; - } - - tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin * duty100; - do_div(tmp64, 10000); - fdo_min = (uint16_t)tmp64; - - t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed - hwmgr->thermal_controller.advanceFanControlParameters.usTMin; - t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh - hwmgr->thermal_controller.advanceFanControlParameters.usTMed; - - pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin; - pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed; - - slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100); - slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100); - - fan_table.TempMin = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMin) / 100); - fan_table.TempMed = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMed) / 100); - fan_table.TempMax = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMax) / 100); - - fan_table.Slope1 = cpu_to_be16(slope1); - fan_table.Slope2 = cpu_to_be16(slope2); - - fan_table.FdoMin = cpu_to_be16(fdo_min); - - fan_table.HystDown = cpu_to_be16(hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst); - - fan_table.HystUp = cpu_to_be16(1); - - fan_table.HystSlope = cpu_to_be16(1); - - fan_table.TempRespLim = cpu_to_be16(5); - - reference_clock = iceland_get_xclk(hwmgr); - - fan_table.RefreshPeriod = cpu_to_be32((hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600); - - fan_table.FdoMax = cpu_to_be16((uint16_t)duty100); - - fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_CTRL, TEMP_SEL); - - //fan_table.FanControl_GL_Flag = 1; - - res = smu7_copy_bytes_to_smc(hwmgr->smumgr, data->fan_table_start, (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), data->sram_end); -/* TO DO FOR SOME DEVICE ID 0X692b, send this msg return invalid command. - if (res == 0 && hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit != 0) - res = (0 == smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetFanMinPwm, \ - hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit) ? 0 : -1); - - if (res == 0 && hwmgr->thermal_controller.advanceFanControlParameters.ulMinFanSCLKAcousticLimit != 0) - res = (0 == smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetFanSclkTarget, \ - hwmgr->thermal_controller.advanceFanControlParameters.ulMinFanSCLKAcousticLimit) ? 0 : -1); - - if (0 != res) - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl); -*/ - return 0; -} - -/** -* Start the fan control on the SMC. -* @param hwmgr the address of the powerplay hardware manager. -* @param pInput the pointer to input data -* @param pOutput the pointer to output data -* @param pStorage the pointer to temporary storage -* @param Result the last failure code -* @return result from set temperature range routine -*/ -int tf_iceland_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result) -{ -/* If the fantable setup has failed we could have disabled PHM_PlatformCaps_MicrocodeFanControl even after this function was included in the table. - * Make sure that we still think controlling the fan is OK. -*/ - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl)) { - iceland_fan_ctrl_start_smc_fan_control(hwmgr); - iceland_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC); - } - - return 0; -} - -/** -* Set temperature range for high and low alerts -* @param hwmgr the address of the powerplay hardware manager. -* @param pInput the pointer to input data -* @param pOutput the pointer to output data -* @param pStorage the pointer to temporary storage -* @param Result the last failure code -* @return result from set temperature range routine -*/ -static int tf_iceland_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, - void *input, void *output, void *storage, int result) -{ - struct PP_TemperatureRange *range = (struct PP_TemperatureRange *)input; - - if (range == NULL) - return -EINVAL; - - return iceland_thermal_set_temperature_range(hwmgr, range->min, range->max); -} - -/** -* Programs one-time setting registers -* @param hwmgr the address of the powerplay hardware manager. -* @param pInput the pointer to input data -* @param pOutput the pointer to output data -* @param pStorage the pointer to temporary storage -* @param Result the last failure code -* @return result from initialize thermal controller routine -*/ -static int tf_iceland_thermal_initialize(struct pp_hwmgr *hwmgr, void *input, - void *output, void *storage, int result) -{ - return iceland_thermal_initialize(hwmgr); -} - -/** -* Enable high and low alerts -* @param hwmgr the address of the powerplay hardware manager. -* @param pInput the pointer to input data -* @param pOutput the pointer to output data -* @param pStorage the pointer to temporary storage -* @param Result the last failure code -* @return result from enable alert routine -*/ -static int tf_iceland_thermal_enable_alert(struct pp_hwmgr *hwmgr, - void *input, void *output, void *storage, int result) -{ - return iceland_thermal_enable_alert(hwmgr); -} - -/** -* Disable high and low alerts -* @param hwmgr the address of the powerplay hardware manager. -* @param pInput the pointer to input data -* @param pOutput the pointer to output data -* @param pStorage the pointer to temporary storage -* @param Result the last failure code -* @return result from disable alert routine -*/ -static int tf_iceland_thermal_disable_alert(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result) -{ - return iceland_thermal_disable_alert(hwmgr); -} - -static const struct phm_master_table_item iceland_thermal_start_thermal_controller_master_list[] = { - { NULL, tf_iceland_thermal_initialize }, - { NULL, tf_iceland_thermal_set_temperature_range }, - { NULL, tf_iceland_thermal_enable_alert }, - /* - * We should restrict performance levels to low before we halt - * the SMC. On the other hand we are still in boot state when - * we do this so it would be pointless. If this assumption - * changes we have to revisit this table. - */ - { NULL, tf_iceland_thermal_setup_fan_table}, - { NULL, tf_iceland_thermal_start_smc_fan_control}, - { NULL, NULL } -}; - -static const struct phm_master_table_header iceland_thermal_start_thermal_controller_master = { - 0, - PHM_MasterTableFlag_None, - iceland_thermal_start_thermal_controller_master_list -}; - -static const struct phm_master_table_item iceland_thermal_set_temperature_range_master_list[] = { - { NULL, tf_iceland_thermal_disable_alert}, - { NULL, tf_iceland_thermal_set_temperature_range}, - { NULL, tf_iceland_thermal_enable_alert}, - { NULL, NULL } -}; - -static const struct phm_master_table_header iceland_thermal_set_temperature_range_master = { - 0, - PHM_MasterTableFlag_None, - iceland_thermal_set_temperature_range_master_list -}; - -int iceland_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr) -{ - if (!hwmgr->thermal_controller.fanInfo.bNoFan) - iceland_fan_ctrl_set_default_mode(hwmgr); - return 0; -} - -/** -* Initializes the thermal controller related functions in the Hardware Manager structure. -* @param hwmgr The address of the hardware manager. -* @exception Any error code from the low-level communication. -*/ -int pp_iceland_thermal_initialize(struct pp_hwmgr *hwmgr) -{ - int result; - - result = phm_construct_table(hwmgr, &iceland_thermal_set_temperature_range_master, &(hwmgr->set_temperature_range)); - - if (0 == result) { - result = phm_construct_table(hwmgr, - &iceland_thermal_start_thermal_controller_master, - &(hwmgr->start_thermal_controller)); - if (0 != result) - phm_destroy_table(hwmgr, &(hwmgr->set_temperature_range)); - } - - if (0 == result) - hwmgr->fan_ctrl_is_in_default_mode = true; - return result; -} - diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_thermal.h b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_thermal.h deleted file mode 100644 index 267945f4df71..000000000000 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_thermal.h +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Copyright 2016 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Author: Huang Rui - * - */ - -#ifndef ICELAND_THERMAL_H -#define ICELAND_THERMAL_H - -#include "hwmgr.h" - -#define ICELAND_THERMAL_HIGH_ALERT_MASK 0x1 -#define ICELAND_THERMAL_LOW_ALERT_MASK 0x2 - -#define ICELAND_THERMAL_MINIMUM_TEMP_READING -256 -#define ICELAND_THERMAL_MAXIMUM_TEMP_READING 255 - -#define ICELAND_THERMAL_MINIMUM_ALERT_TEMP 0 -#define ICELAND_THERMAL_MAXIMUM_ALERT_TEMP 255 - -#define FDO_PWM_MODE_STATIC 1 -#define FDO_PWM_MODE_STATIC_RPM 5 - - -extern int iceland_thermal_get_temperature(struct pp_hwmgr *hwmgr); -extern int iceland_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr); -extern int iceland_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, struct phm_fan_speed_info *fan_speed_info); -extern int iceland_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t *speed); -extern int iceland_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode); -extern int iceland_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t speed); -extern int iceland_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr); -extern int pp_iceland_thermal_initialize(struct pp_hwmgr *hwmgr); -extern int iceland_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr); -extern int iceland_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed); -extern int iceland_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed); -extern int iceland_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr); - -#endif - diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.c deleted file mode 100644 index 7e405b04c2c5..000000000000 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.c +++ /dev/null @@ -1,444 +0,0 @@ -/* - * Copyright 2016 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#include "polaris10_clockpowergating.h" - -int polaris10_phm_powerdown_uvd(struct pp_hwmgr *hwmgr) -{ - if (phm_cf_want_uvd_power_gating(hwmgr)) - return smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_UVDPowerOFF); - return 0; -} - -static int polaris10_phm_powerup_uvd(struct pp_hwmgr *hwmgr) -{ - if (phm_cf_want_uvd_power_gating(hwmgr)) { - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_UVDDynamicPowerGating)) { - return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_UVDPowerON, 1); - } else { - return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_UVDPowerON, 0); - } - } - - return 0; -} - -static int polaris10_phm_powerdown_vce(struct pp_hwmgr *hwmgr) -{ - if (phm_cf_want_vce_power_gating(hwmgr)) - return smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_VCEPowerOFF); - return 0; -} - -static int polaris10_phm_powerup_vce(struct pp_hwmgr *hwmgr) -{ - if (phm_cf_want_vce_power_gating(hwmgr)) - return smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_VCEPowerON); - return 0; -} - -static int polaris10_phm_powerdown_samu(struct pp_hwmgr *hwmgr) -{ - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_SamuPowerGating)) - return smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_SAMPowerOFF); - return 0; -} - -static int polaris10_phm_powerup_samu(struct pp_hwmgr *hwmgr) -{ - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_SamuPowerGating)) - return smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_SAMPowerON); - return 0; -} - -int polaris10_phm_disable_clock_power_gating(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - - data->uvd_power_gated = false; - data->vce_power_gated = false; - data->samu_power_gated = false; - - polaris10_phm_powerup_uvd(hwmgr); - polaris10_phm_powerup_vce(hwmgr); - polaris10_phm_powerup_samu(hwmgr); - - return 0; -} - -int polaris10_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - - if (data->uvd_power_gated == bgate) - return 0; - - data->uvd_power_gated = bgate; - - if (bgate) { - cgs_set_clockgating_state(hwmgr->device, - AMD_IP_BLOCK_TYPE_UVD, - AMD_CG_STATE_GATE); - polaris10_update_uvd_dpm(hwmgr, true); - polaris10_phm_powerdown_uvd(hwmgr); - } else { - polaris10_phm_powerup_uvd(hwmgr); - polaris10_update_uvd_dpm(hwmgr, false); - cgs_set_clockgating_state(hwmgr->device, - AMD_IP_BLOCK_TYPE_UVD, - AMD_CG_STATE_UNGATE); - } - - return 0; -} - -int polaris10_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - - if (data->vce_power_gated == bgate) - return 0; - - data->vce_power_gated = bgate; - - if (bgate) { - cgs_set_clockgating_state(hwmgr->device, - AMD_IP_BLOCK_TYPE_VCE, - AMD_CG_STATE_GATE); - polaris10_update_vce_dpm(hwmgr, true); - polaris10_phm_powerdown_vce(hwmgr); - } else { - polaris10_phm_powerup_vce(hwmgr); - polaris10_update_vce_dpm(hwmgr, false); - cgs_set_clockgating_state(hwmgr->device, - AMD_IP_BLOCK_TYPE_VCE, - AMD_CG_STATE_UNGATE); - } - return 0; -} - -int polaris10_phm_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - - if (data->samu_power_gated == bgate) - return 0; - - data->samu_power_gated = bgate; - - if (bgate) { - polaris10_update_samu_dpm(hwmgr, true); - polaris10_phm_powerdown_samu(hwmgr); - } else { - polaris10_phm_powerup_samu(hwmgr); - polaris10_update_samu_dpm(hwmgr, false); - } - - return 0; -} - -int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr, - const uint32_t *msg_id) -{ - PPSMC_Msg msg; - uint32_t value; - - switch ((*msg_id & PP_GROUP_MASK) >> PP_GROUP_SHIFT) { - case PP_GROUP_GFX: - switch ((*msg_id & PP_BLOCK_MASK) >> PP_BLOCK_SHIFT) { - case PP_BLOCK_GFX_CG: - if (PP_STATE_SUPPORT_CG & *msg_id) { - msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG) ? - PPSMC_MSG_EnableClockGatingFeature : - PPSMC_MSG_DisableClockGatingFeature; - value = CG_GFX_CGCG_MASK; - - if (smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, msg, value)) - return -1; - } - if (PP_STATE_SUPPORT_LS & *msg_id) { - msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS - ? PPSMC_MSG_EnableClockGatingFeature - : PPSMC_MSG_DisableClockGatingFeature; - value = CG_GFX_CGLS_MASK; - - if (smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, msg, value)) - return -1; - } - break; - - case PP_BLOCK_GFX_3D: - if (PP_STATE_SUPPORT_CG & *msg_id) { - msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG) ? - PPSMC_MSG_EnableClockGatingFeature : - PPSMC_MSG_DisableClockGatingFeature; - value = CG_GFX_3DCG_MASK; - - if (smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, msg, value)) - return -1; - } - - if (PP_STATE_SUPPORT_LS & *msg_id) { - msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS ? - PPSMC_MSG_EnableClockGatingFeature : - PPSMC_MSG_DisableClockGatingFeature; - value = CG_GFX_3DLS_MASK; - - if (smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, msg, value)) - return -1; - } - break; - - case PP_BLOCK_GFX_RLC: - if (PP_STATE_SUPPORT_LS & *msg_id) { - msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS ? - PPSMC_MSG_EnableClockGatingFeature : - PPSMC_MSG_DisableClockGatingFeature; - value = CG_GFX_RLC_LS_MASK; - - if (smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, msg, value)) - return -1; - } - break; - - case PP_BLOCK_GFX_CP: - if (PP_STATE_SUPPORT_LS & *msg_id) { - msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS ? - PPSMC_MSG_EnableClockGatingFeature : - PPSMC_MSG_DisableClockGatingFeature; - value = CG_GFX_CP_LS_MASK; - - if (smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, msg, value)) - return -1; - } - break; - - case PP_BLOCK_GFX_MG: - if (PP_STATE_SUPPORT_CG & *msg_id) { - msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG) ? - PPSMC_MSG_EnableClockGatingFeature : - PPSMC_MSG_DisableClockGatingFeature; - value = (CG_CPF_MGCG_MASK | CG_RLC_MGCG_MASK | - CG_GFX_OTHERS_MGCG_MASK); - - if (smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, msg, value)) - return -1; - } - break; - - default: - return -1; - } - break; - - case PP_GROUP_SYS: - switch ((*msg_id & PP_BLOCK_MASK) >> PP_BLOCK_SHIFT) { - case PP_BLOCK_SYS_BIF: - if (PP_STATE_SUPPORT_CG & *msg_id) { - msg = (*msg_id & PP_STATE_MASK) & PP_STATE_CG ? - PPSMC_MSG_EnableClockGatingFeature : - PPSMC_MSG_DisableClockGatingFeature; - value = CG_SYS_BIF_MGCG_MASK; - - if (smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, msg, value)) - return -1; - } - if (PP_STATE_SUPPORT_LS & *msg_id) { - msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS ? - PPSMC_MSG_EnableClockGatingFeature : - PPSMC_MSG_DisableClockGatingFeature; - value = CG_SYS_BIF_MGLS_MASK; - - if (smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, msg, value)) - return -1; - } - break; - - case PP_BLOCK_SYS_MC: - if (PP_STATE_SUPPORT_CG & *msg_id) { - msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG) ? - PPSMC_MSG_EnableClockGatingFeature : - PPSMC_MSG_DisableClockGatingFeature; - value = CG_SYS_MC_MGCG_MASK; - - if (smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, msg, value)) - return -1; - } - - if (PP_STATE_SUPPORT_LS & *msg_id) { - msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS ? - PPSMC_MSG_EnableClockGatingFeature : - PPSMC_MSG_DisableClockGatingFeature; - value = CG_SYS_MC_MGLS_MASK; - - if (smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, msg, value)) - return -1; - } - break; - - case PP_BLOCK_SYS_DRM: - if (PP_STATE_SUPPORT_CG & *msg_id) { - msg = (*msg_id & PP_STATE_MASK) & PP_STATE_CG ? - PPSMC_MSG_EnableClockGatingFeature : - PPSMC_MSG_DisableClockGatingFeature; - value = CG_SYS_DRM_MGCG_MASK; - - if (smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, msg, value)) - return -1; - } - if (PP_STATE_SUPPORT_LS & *msg_id) { - msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS ? - PPSMC_MSG_EnableClockGatingFeature : - PPSMC_MSG_DisableClockGatingFeature; - value = CG_SYS_DRM_MGLS_MASK; - - if (smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, msg, value)) - return -1; - } - break; - - case PP_BLOCK_SYS_HDP: - if (PP_STATE_SUPPORT_CG & *msg_id) { - msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG) ? - PPSMC_MSG_EnableClockGatingFeature : - PPSMC_MSG_DisableClockGatingFeature; - value = CG_SYS_HDP_MGCG_MASK; - - if (smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, msg, value)) - return -1; - } - - if (PP_STATE_SUPPORT_LS & *msg_id) { - msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS ? - PPSMC_MSG_EnableClockGatingFeature : - PPSMC_MSG_DisableClockGatingFeature; - value = CG_SYS_HDP_MGLS_MASK; - - if (smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, msg, value)) - return -1; - } - break; - - case PP_BLOCK_SYS_SDMA: - if (PP_STATE_SUPPORT_CG & *msg_id) { - msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG) ? - PPSMC_MSG_EnableClockGatingFeature : - PPSMC_MSG_DisableClockGatingFeature; - value = CG_SYS_SDMA_MGCG_MASK; - - if (smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, msg, value)) - return -1; - } - - if (PP_STATE_SUPPORT_LS & *msg_id) { - msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS ? - PPSMC_MSG_EnableClockGatingFeature : - PPSMC_MSG_DisableClockGatingFeature; - value = CG_SYS_SDMA_MGLS_MASK; - - if (smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, msg, value)) - return -1; - } - break; - - case PP_BLOCK_SYS_ROM: - if (PP_STATE_SUPPORT_CG & *msg_id) { - msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG) ? - PPSMC_MSG_EnableClockGatingFeature : - PPSMC_MSG_DisableClockGatingFeature; - value = CG_SYS_ROM_MASK; - - if (smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, msg, value)) - return -1; - } - break; - - default: - return -1; - - } - break; - - default: - return -1; - - } - - return 0; -} - -/* This function is for Polaris11 only for now, - * Powerplay will only control the static per CU Power Gating. - * Dynamic per CU Power Gating will be done in gfx. - */ -int polaris10_phm_enable_per_cu_power_gating(struct pp_hwmgr *hwmgr, bool enable) -{ - struct cgs_system_info sys_info = {0}; - uint32_t active_cus; - int result; - - sys_info.size = sizeof(struct cgs_system_info); - sys_info.info_id = CGS_SYSTEM_INFO_GFX_CU_INFO; - - result = cgs_query_system_info(hwmgr->device, &sys_info); - - if (result) - return -EINVAL; - else - active_cus = sys_info.value; - - if (enable) - return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_GFX_CU_PG_ENABLE, active_cus); - else - return smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_GFX_CU_PG_DISABLE); -} diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.h b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.h deleted file mode 100644 index 88d68cb6e89d..000000000000 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.h +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright 2016 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#ifndef _POLARIS10_CLOCK_POWER_GATING_H_ -#define _POLARIS10_CLOCK_POWER_GATING_H_ - -#include "polaris10_hwmgr.h" -#include "pp_asicblocks.h" - -int polaris10_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate); -int polaris10_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate); -int polaris10_phm_powerdown_uvd(struct pp_hwmgr *hwmgr); -int polaris10_phm_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate); -int polaris10_phm_powergate_acp(struct pp_hwmgr *hwmgr, bool bgate); -int polaris10_phm_disable_clock_power_gating(struct pp_hwmgr *hwmgr); -int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr, - const uint32_t *msg_id); -int polaris10_phm_enable_per_cu_power_gating(struct pp_hwmgr *hwmgr, bool enable); - -#endif /* _POLARIS10_CLOCK_POWER_GATING_H_ */ diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_dyn_defaults.h b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_dyn_defaults.h deleted file mode 100644 index f78ffd935cee..000000000000 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_dyn_defaults.h +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#ifndef POLARIS10_DYN_DEFAULTS_H -#define POLARIS10_DYN_DEFAULTS_H - - -enum Polaris10dpm_TrendDetection { - Polaris10Adpm_TrendDetection_AUTO, - Polaris10Adpm_TrendDetection_UP, - Polaris10Adpm_TrendDetection_DOWN -}; -typedef enum Polaris10dpm_TrendDetection Polaris10dpm_TrendDetection; - -/* We need to fill in the default values */ - - -#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT0 0x3FFFC102 -#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT1 0x000400 -#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT2 0xC00080 -#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT3 0xC00200 -#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT4 0xC01680 -#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT5 0xC00033 -#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT6 0xC00033 -#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT7 0x3FFFC000 - - -#define PPPOLARIS10_THERMALPROTECTCOUNTER_DFLT 0x200 -#define PPPOLARIS10_STATICSCREENTHRESHOLDUNIT_DFLT 0 -#define PPPOLARIS10_STATICSCREENTHRESHOLD_DFLT 0x00C8 -#define PPPOLARIS10_GFXIDLECLOCKSTOPTHRESHOLD_DFLT 0x200 -#define PPPOLARIS10_REFERENCEDIVIDER_DFLT 4 - -#define PPPOLARIS10_ULVVOLTAGECHANGEDELAY_DFLT 1687 - -#define PPPOLARIS10_CGULVPARAMETER_DFLT 0x00040035 -#define PPPOLARIS10_CGULVCONTROL_DFLT 0x00007450 -#define PPPOLARIS10_TARGETACTIVITY_DFLT 50 -#define PPPOLARIS10_MCLK_TARGETACTIVITY_DFLT 10 - -#endif - diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c deleted file mode 100644 index 970e3930452d..000000000000 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c +++ /dev/null @@ -1,5290 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#include -#include -#include -#include -#include "linux/delay.h" -#include "pp_acpi.h" -#include "hwmgr.h" -#include "polaris10_hwmgr.h" -#include "polaris10_powertune.h" -#include "polaris10_dyn_defaults.h" -#include "polaris10_smumgr.h" -#include "pp_debug.h" -#include "ppatomctrl.h" -#include "atombios.h" -#include "pptable_v1_0.h" -#include "pppcielanes.h" -#include "amd_pcie_helpers.h" -#include "hardwaremanager.h" -#include "process_pptables_v1_0.h" -#include "cgs_common.h" -#include "smu74.h" -#include "smu_ucode_xfer_vi.h" -#include "smu74_discrete.h" -#include "smu/smu_7_1_3_d.h" -#include "smu/smu_7_1_3_sh_mask.h" -#include "gmc/gmc_8_1_d.h" -#include "gmc/gmc_8_1_sh_mask.h" -#include "oss/oss_3_0_d.h" -#include "gca/gfx_8_0_d.h" -#include "bif/bif_5_0_d.h" -#include "bif/bif_5_0_sh_mask.h" -#include "gmc/gmc_8_1_d.h" -#include "gmc/gmc_8_1_sh_mask.h" -#include "bif/bif_5_0_d.h" -#include "bif/bif_5_0_sh_mask.h" -#include "dce/dce_10_0_d.h" -#include "dce/dce_10_0_sh_mask.h" - -#include "polaris10_thermal.h" -#include "polaris10_clockpowergating.h" - -#define MC_CG_ARB_FREQ_F0 0x0a -#define MC_CG_ARB_FREQ_F1 0x0b -#define MC_CG_ARB_FREQ_F2 0x0c -#define MC_CG_ARB_FREQ_F3 0x0d - -#define MC_CG_SEQ_DRAMCONF_S0 0x05 -#define MC_CG_SEQ_DRAMCONF_S1 0x06 -#define MC_CG_SEQ_YCLK_SUSPEND 0x04 -#define MC_CG_SEQ_YCLK_RESUME 0x0a - - -#define SMC_RAM_END 0x40000 - -#define SMC_CG_IND_START 0xc0030000 -#define SMC_CG_IND_END 0xc0040000 - -#define VOLTAGE_SCALE 4 -#define VOLTAGE_VID_OFFSET_SCALE1 625 -#define VOLTAGE_VID_OFFSET_SCALE2 100 - -#define VDDC_VDDCI_DELTA 200 - -#define MEM_FREQ_LOW_LATENCY 25000 -#define MEM_FREQ_HIGH_LATENCY 80000 - -#define MEM_LATENCY_HIGH 45 -#define MEM_LATENCY_LOW 35 -#define MEM_LATENCY_ERR 0xFFFF - -#define MC_SEQ_MISC0_GDDR5_SHIFT 28 -#define MC_SEQ_MISC0_GDDR5_MASK 0xf0000000 -#define MC_SEQ_MISC0_GDDR5_VALUE 5 - - -#define PCIE_BUS_CLK 10000 -#define TCLK (PCIE_BUS_CLK / 10) - -/** Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */ -enum DPM_EVENT_SRC { - DPM_EVENT_SRC_ANALOG = 0, - DPM_EVENT_SRC_EXTERNAL = 1, - DPM_EVENT_SRC_DIGITAL = 2, - DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3, - DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL = 4 -}; - -static const unsigned long PhwPolaris10_Magic = (unsigned long)(PHM_VIslands_Magic); - -static struct polaris10_power_state *cast_phw_polaris10_power_state( - struct pp_hw_power_state *hw_ps) -{ - PP_ASSERT_WITH_CODE((PhwPolaris10_Magic == hw_ps->magic), - "Invalid Powerstate Type!", - return NULL); - - return (struct polaris10_power_state *)hw_ps; -} - -static const struct polaris10_power_state * -cast_const_phw_polaris10_power_state( - const struct pp_hw_power_state *hw_ps) -{ - PP_ASSERT_WITH_CODE((PhwPolaris10_Magic == hw_ps->magic), - "Invalid Powerstate Type!", - return NULL); - - return (const struct polaris10_power_state *)hw_ps; -} - -static bool polaris10_is_dpm_running(struct pp_hwmgr *hwmgr) -{ - return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device, - CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON)) - ? true : false; -} - -/** - * Find the MC microcode version and store it in the HwMgr struct - * - * @param hwmgr the address of the powerplay hardware manager. - * @return always 0 - */ -static int phm_get_mc_microcode_version(struct pp_hwmgr *hwmgr) -{ - cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX, 0x9F); - - hwmgr->microcode_version_info.MC = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA); - - return 0; -} - -static uint16_t phm_get_current_pcie_speed(struct pp_hwmgr *hwmgr) -{ - uint32_t speedCntl = 0; - - /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */ - speedCntl = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__PCIE, - ixPCIE_LC_SPEED_CNTL); - return((uint16_t)PHM_GET_FIELD(speedCntl, - PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE)); -} - -static int phm_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr) -{ - uint32_t link_width; - - /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */ - link_width = PHM_READ_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE, - PCIE_LC_LINK_WIDTH_CNTL, LC_LINK_WIDTH_RD); - - PP_ASSERT_WITH_CODE((7 >= link_width), - "Invalid PCIe lane width!", return 0); - - return decode_pcie_lane_width(link_width); -} - -/** -* Enable voltage control -* -* @param pHwMgr the address of the powerplay hardware manager. -* @return always PP_Result_OK -*/ -static int polaris10_enable_smc_voltage_controller(struct pp_hwmgr *hwmgr) -{ - PP_ASSERT_WITH_CODE( - (hwmgr->smumgr->smumgr_funcs->send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Voltage_Cntl_Enable) == 0), - "Failed to enable voltage DPM during DPM Start Function!", - return 1; - ); - - return 0; -} - -/** -* Checks if we want to support voltage control -* -* @param hwmgr the address of the powerplay hardware manager. -*/ -static bool polaris10_voltage_control(const struct pp_hwmgr *hwmgr) -{ - const struct polaris10_hwmgr *data = - (const struct polaris10_hwmgr *)(hwmgr->backend); - - return (POLARIS10_VOLTAGE_CONTROL_NONE != data->voltage_control); -} - -/** -* Enable voltage control -* -* @param hwmgr the address of the powerplay hardware manager. -* @return always 0 -*/ -static int polaris10_enable_voltage_control(struct pp_hwmgr *hwmgr) -{ - /* enable voltage control */ - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - GENERAL_PWRMGT, VOLT_PWRMGT_EN, 1); - - return 0; -} - -/** -* Create Voltage Tables. -* -* @param hwmgr the address of the powerplay hardware manager. -* @return always 0 -*/ -static int polaris10_construct_voltage_tables(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)hwmgr->pptable; - int result; - - if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) { - result = atomctrl_get_voltage_table_v3(hwmgr, - VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT, - &(data->mvdd_voltage_table)); - PP_ASSERT_WITH_CODE((0 == result), - "Failed to retrieve MVDD table.", - return result); - } else if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) { - result = phm_get_svi2_mvdd_voltage_table(&(data->mvdd_voltage_table), - table_info->vdd_dep_on_mclk); - PP_ASSERT_WITH_CODE((0 == result), - "Failed to retrieve SVI2 MVDD table from dependancy table.", - return result;); - } - - if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) { - result = atomctrl_get_voltage_table_v3(hwmgr, - VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT, - &(data->vddci_voltage_table)); - PP_ASSERT_WITH_CODE((0 == result), - "Failed to retrieve VDDCI table.", - return result); - } else if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) { - result = phm_get_svi2_vddci_voltage_table(&(data->vddci_voltage_table), - table_info->vdd_dep_on_mclk); - PP_ASSERT_WITH_CODE((0 == result), - "Failed to retrieve SVI2 VDDCI table from dependancy table.", - return result); - } - - if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) { - result = phm_get_svi2_vdd_voltage_table(&(data->vddc_voltage_table), - table_info->vddc_lookup_table); - PP_ASSERT_WITH_CODE((0 == result), - "Failed to retrieve SVI2 VDDC table from lookup table.", - return result); - } - - PP_ASSERT_WITH_CODE( - (data->vddc_voltage_table.count <= (SMU74_MAX_LEVELS_VDDC)), - "Too many voltage values for VDDC. Trimming to fit state table.", - phm_trim_voltage_table_to_fit_state_table(SMU74_MAX_LEVELS_VDDC, - &(data->vddc_voltage_table))); - - PP_ASSERT_WITH_CODE( - (data->vddci_voltage_table.count <= (SMU74_MAX_LEVELS_VDDCI)), - "Too many voltage values for VDDCI. Trimming to fit state table.", - phm_trim_voltage_table_to_fit_state_table(SMU74_MAX_LEVELS_VDDCI, - &(data->vddci_voltage_table))); - - PP_ASSERT_WITH_CODE( - (data->mvdd_voltage_table.count <= (SMU74_MAX_LEVELS_MVDD)), - "Too many voltage values for MVDD. Trimming to fit state table.", - phm_trim_voltage_table_to_fit_state_table(SMU74_MAX_LEVELS_MVDD, - &(data->mvdd_voltage_table))); - - return 0; -} - -/** -* Programs static screed detection parameters -* -* @param hwmgr the address of the powerplay hardware manager. -* @return always 0 -*/ -static int polaris10_program_static_screen_threshold_parameters( - struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - - /* Set static screen threshold unit */ - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD_UNIT, - data->static_screen_threshold_unit); - /* Set static screen threshold */ - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD, - data->static_screen_threshold); - - return 0; -} - -/** -* Setup display gap for glitch free memory clock switching. -* -* @param hwmgr the address of the powerplay hardware manager. -* @return always 0 -*/ -static int polaris10_enable_display_gap(struct pp_hwmgr *hwmgr) -{ - uint32_t display_gap = - cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_DISPLAY_GAP_CNTL); - - display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, - DISP_GAP, DISPLAY_GAP_IGNORE); - - display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, - DISP_GAP_MCHG, DISPLAY_GAP_VBLANK); - - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_DISPLAY_GAP_CNTL, display_gap); - - return 0; -} - -/** -* Programs activity state transition voting clients -* -* @param hwmgr the address of the powerplay hardware manager. -* @return always 0 -*/ -static int polaris10_program_voting_clients(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - - /* Clear reset for voting clients before enabling DPM */ - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 0); - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 0); - - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_0, data->voting_rights_clients0); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_1, data->voting_rights_clients1); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_2, data->voting_rights_clients2); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_3, data->voting_rights_clients3); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_4, data->voting_rights_clients4); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_5, data->voting_rights_clients5); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_6, data->voting_rights_clients6); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_7, data->voting_rights_clients7); - - return 0; -} - -static int polaris10_clear_voting_clients(struct pp_hwmgr *hwmgr) -{ - /* Reset voting clients before disabling DPM */ - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 1); - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 1); - - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_0, 0); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_1, 0); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_2, 0); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_3, 0); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_4, 0); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_5, 0); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_6, 0); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_7, 0); - - return 0; -} - -/** -* Get the location of various tables inside the FW image. -* -* @param hwmgr the address of the powerplay hardware manager. -* @return always 0 -*/ -static int polaris10_process_firmware_header(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); - uint32_t tmp; - int result; - bool error = false; - - result = polaris10_read_smc_sram_dword(hwmgr->smumgr, - SMU7_FIRMWARE_HEADER_LOCATION + - offsetof(SMU74_Firmware_Header, DpmTable), - &tmp, data->sram_end); - - if (0 == result) - data->dpm_table_start = tmp; - - error |= (0 != result); - - result = polaris10_read_smc_sram_dword(hwmgr->smumgr, - SMU7_FIRMWARE_HEADER_LOCATION + - offsetof(SMU74_Firmware_Header, SoftRegisters), - &tmp, data->sram_end); - - if (!result) { - data->soft_regs_start = tmp; - smu_data->soft_regs_start = tmp; - } - - error |= (0 != result); - - result = polaris10_read_smc_sram_dword(hwmgr->smumgr, - SMU7_FIRMWARE_HEADER_LOCATION + - offsetof(SMU74_Firmware_Header, mcRegisterTable), - &tmp, data->sram_end); - - if (!result) - data->mc_reg_table_start = tmp; - - result = polaris10_read_smc_sram_dword(hwmgr->smumgr, - SMU7_FIRMWARE_HEADER_LOCATION + - offsetof(SMU74_Firmware_Header, FanTable), - &tmp, data->sram_end); - - if (!result) - data->fan_table_start = tmp; - - error |= (0 != result); - - result = polaris10_read_smc_sram_dword(hwmgr->smumgr, - SMU7_FIRMWARE_HEADER_LOCATION + - offsetof(SMU74_Firmware_Header, mcArbDramTimingTable), - &tmp, data->sram_end); - - if (!result) - data->arb_table_start = tmp; - - error |= (0 != result); - - result = polaris10_read_smc_sram_dword(hwmgr->smumgr, - SMU7_FIRMWARE_HEADER_LOCATION + - offsetof(SMU74_Firmware_Header, Version), - &tmp, data->sram_end); - - if (!result) - hwmgr->microcode_version_info.SMC = tmp; - - error |= (0 != result); - - return error ? -1 : 0; -} - -/* Copy one arb setting to another and then switch the active set. - * arb_src and arb_dest is one of the MC_CG_ARB_FREQ_Fx constants. - */ -static int polaris10_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr, - uint32_t arb_src, uint32_t arb_dest) -{ - uint32_t mc_arb_dram_timing; - uint32_t mc_arb_dram_timing2; - uint32_t burst_time; - uint32_t mc_cg_config; - - switch (arb_src) { - case MC_CG_ARB_FREQ_F0: - mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING); - mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2); - burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0); - break; - case MC_CG_ARB_FREQ_F1: - mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1); - mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1); - burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1); - break; - default: - return -EINVAL; - } - - switch (arb_dest) { - case MC_CG_ARB_FREQ_F0: - cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing); - cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2); - PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0, burst_time); - break; - case MC_CG_ARB_FREQ_F1: - cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing); - cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2); - PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1, burst_time); - break; - default: - return -EINVAL; - } - - mc_cg_config = cgs_read_register(hwmgr->device, mmMC_CG_CONFIG); - mc_cg_config |= 0x0000000F; - cgs_write_register(hwmgr->device, mmMC_CG_CONFIG, mc_cg_config); - PHM_WRITE_FIELD(hwmgr->device, MC_ARB_CG, CG_ARB_REQ, arb_dest); - - return 0; -} - -static int polaris10_reset_to_default(struct pp_hwmgr *hwmgr) -{ - return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_ResetToDefaults); -} - -/** -* Initial switch from ARB F0->F1 -* -* @param hwmgr the address of the powerplay hardware manager. -* @return always 0 -* This function is to be called from the SetPowerState table. -*/ -static int polaris10_initial_switch_from_arbf0_to_f1(struct pp_hwmgr *hwmgr) -{ - return polaris10_copy_and_switch_arb_sets(hwmgr, - MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1); -} - -static int polaris10_force_switch_to_arbf0(struct pp_hwmgr *hwmgr) -{ - uint32_t tmp; - - tmp = (cgs_read_ind_register(hwmgr->device, - CGS_IND_REG__SMC, ixSMC_SCRATCH9) & - 0x0000ff00) >> 8; - - if (tmp == MC_CG_ARB_FREQ_F0) - return 0; - - return polaris10_copy_and_switch_arb_sets(hwmgr, - tmp, MC_CG_ARB_FREQ_F0); -} - -static int polaris10_setup_default_pcie_table(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table; - uint32_t i, max_entry; - - PP_ASSERT_WITH_CODE((data->use_pcie_performance_levels || - data->use_pcie_power_saving_levels), "No pcie performance levels!", - return -EINVAL); - - if (data->use_pcie_performance_levels && - !data->use_pcie_power_saving_levels) { - data->pcie_gen_power_saving = data->pcie_gen_performance; - data->pcie_lane_power_saving = data->pcie_lane_performance; - } else if (!data->use_pcie_performance_levels && - data->use_pcie_power_saving_levels) { - data->pcie_gen_performance = data->pcie_gen_power_saving; - data->pcie_lane_performance = data->pcie_lane_power_saving; - } - - phm_reset_single_dpm_table(&data->dpm_table.pcie_speed_table, - SMU74_MAX_LEVELS_LINK, - MAX_REGULAR_DPM_NUMBER); - - if (pcie_table != NULL) { - /* max_entry is used to make sure we reserve one PCIE level - * for boot level (fix for A+A PSPP issue). - * If PCIE table from PPTable have ULV entry + 8 entries, - * then ignore the last entry.*/ - max_entry = (SMU74_MAX_LEVELS_LINK < pcie_table->count) ? - SMU74_MAX_LEVELS_LINK : pcie_table->count; - for (i = 1; i < max_entry; i++) { - phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, i - 1, - get_pcie_gen_support(data->pcie_gen_cap, - pcie_table->entries[i].gen_speed), - get_pcie_lane_support(data->pcie_lane_cap, - pcie_table->entries[i].lane_width)); - } - data->dpm_table.pcie_speed_table.count = max_entry - 1; - - /* Setup BIF_SCLK levels */ - for (i = 0; i < max_entry; i++) - data->bif_sclk_table[i] = pcie_table->entries[i].pcie_sclk; - } else { - /* Hardcode Pcie Table */ - phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 0, - get_pcie_gen_support(data->pcie_gen_cap, - PP_Min_PCIEGen), - get_pcie_lane_support(data->pcie_lane_cap, - PP_Max_PCIELane)); - phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 1, - get_pcie_gen_support(data->pcie_gen_cap, - PP_Min_PCIEGen), - get_pcie_lane_support(data->pcie_lane_cap, - PP_Max_PCIELane)); - phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 2, - get_pcie_gen_support(data->pcie_gen_cap, - PP_Max_PCIEGen), - get_pcie_lane_support(data->pcie_lane_cap, - PP_Max_PCIELane)); - phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 3, - get_pcie_gen_support(data->pcie_gen_cap, - PP_Max_PCIEGen), - get_pcie_lane_support(data->pcie_lane_cap, - PP_Max_PCIELane)); - phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 4, - get_pcie_gen_support(data->pcie_gen_cap, - PP_Max_PCIEGen), - get_pcie_lane_support(data->pcie_lane_cap, - PP_Max_PCIELane)); - phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 5, - get_pcie_gen_support(data->pcie_gen_cap, - PP_Max_PCIEGen), - get_pcie_lane_support(data->pcie_lane_cap, - PP_Max_PCIELane)); - - data->dpm_table.pcie_speed_table.count = 6; - } - /* Populate last level for boot PCIE level, but do not increment count. */ - phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, - data->dpm_table.pcie_speed_table.count, - get_pcie_gen_support(data->pcie_gen_cap, - PP_Min_PCIEGen), - get_pcie_lane_support(data->pcie_lane_cap, - PP_Max_PCIELane)); - - return 0; -} - -/* - * This function is to initalize all DPM state tables - * for SMU7 based on the dependency table. - * Dynamic state patching function will then trim these - * state tables to the allowed range based - * on the power policy or external client requests, - * such as UVD request, etc. - */ -static int polaris10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - uint32_t i; - - struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table = - table_info->vdd_dep_on_sclk; - struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table = - table_info->vdd_dep_on_mclk; - - PP_ASSERT_WITH_CODE(dep_sclk_table != NULL, - "SCLK dependency table is missing. This table is mandatory", - return -EINVAL); - PP_ASSERT_WITH_CODE(dep_sclk_table->count >= 1, - "SCLK dependency table has to have is missing." - "This table is mandatory", - return -EINVAL); - - PP_ASSERT_WITH_CODE(dep_mclk_table != NULL, - "MCLK dependency table is missing. This table is mandatory", - return -EINVAL); - PP_ASSERT_WITH_CODE(dep_mclk_table->count >= 1, - "MCLK dependency table has to have is missing." - "This table is mandatory", - return -EINVAL); - - /* clear the state table to reset everything to default */ - phm_reset_single_dpm_table( - &data->dpm_table.sclk_table, SMU74_MAX_LEVELS_GRAPHICS, MAX_REGULAR_DPM_NUMBER); - phm_reset_single_dpm_table( - &data->dpm_table.mclk_table, SMU74_MAX_LEVELS_MEMORY, MAX_REGULAR_DPM_NUMBER); - - - /* Initialize Sclk DPM table based on allow Sclk values */ - data->dpm_table.sclk_table.count = 0; - for (i = 0; i < dep_sclk_table->count; i++) { - if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count - 1].value != - dep_sclk_table->entries[i].clk) { - - data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value = - dep_sclk_table->entries[i].clk; - - data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = - (i == 0) ? true : false; - data->dpm_table.sclk_table.count++; - } - } - - /* Initialize Mclk DPM table based on allow Mclk values */ - data->dpm_table.mclk_table.count = 0; - for (i = 0; i < dep_mclk_table->count; i++) { - if (i == 0 || data->dpm_table.mclk_table.dpm_levels - [data->dpm_table.mclk_table.count - 1].value != - dep_mclk_table->entries[i].clk) { - data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value = - dep_mclk_table->entries[i].clk; - data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled = - (i == 0) ? true : false; - data->dpm_table.mclk_table.count++; - } - } - - /* setup PCIE gen speed levels */ - polaris10_setup_default_pcie_table(hwmgr); - - /* save a copy of the default DPM table */ - memcpy(&(data->golden_dpm_table), &(data->dpm_table), - sizeof(struct polaris10_dpm_table)); - - return 0; -} - -/** - * Mvdd table preparation for SMC. - * - * @param *hwmgr The address of the hardware manager. - * @param *table The SMC DPM table structure to be populated. - * @return 0 - */ -static int polaris10_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr, - SMU74_Discrete_DpmTable *table) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - uint32_t count, level; - - if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) { - count = data->mvdd_voltage_table.count; - if (count > SMU_MAX_SMIO_LEVELS) - count = SMU_MAX_SMIO_LEVELS; - for (level = 0; level < count; level++) { - table->SmioTable2.Pattern[level].Voltage = - PP_HOST_TO_SMC_US(data->mvdd_voltage_table.entries[count].value * VOLTAGE_SCALE); - /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level.*/ - table->SmioTable2.Pattern[level].Smio = - (uint8_t) level; - table->Smio[level] |= - data->mvdd_voltage_table.entries[level].smio_low; - } - table->SmioMask2 = data->mvdd_voltage_table.mask_low; - - table->MvddLevelCount = (uint32_t) PP_HOST_TO_SMC_UL(count); - } - - return 0; -} - -static int polaris10_populate_smc_vddci_table(struct pp_hwmgr *hwmgr, - struct SMU74_Discrete_DpmTable *table) -{ - uint32_t count, level; - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - - count = data->vddci_voltage_table.count; - - if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) { - if (count > SMU_MAX_SMIO_LEVELS) - count = SMU_MAX_SMIO_LEVELS; - for (level = 0; level < count; ++level) { - table->SmioTable1.Pattern[level].Voltage = - PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[level].value * VOLTAGE_SCALE); - table->SmioTable1.Pattern[level].Smio = (uint8_t) level; - - table->Smio[level] |= data->vddci_voltage_table.entries[level].smio_low; - } - } - - table->SmioMask1 = data->vddci_voltage_table.mask_low; - - return 0; -} - -/** -* Preparation of vddc and vddgfx CAC tables for SMC. -* -* @param hwmgr the address of the hardware manager -* @param table the SMC DPM table structure to be populated -* @return always 0 -*/ -static int polaris10_populate_cac_table(struct pp_hwmgr *hwmgr, - struct SMU74_Discrete_DpmTable *table) -{ - uint32_t count; - uint8_t index; - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - struct phm_ppt_v1_voltage_lookup_table *lookup_table = - table_info->vddc_lookup_table; - /* tables is already swapped, so in order to use the value from it, - * we need to swap it back. - * We are populating vddc CAC data to BapmVddc table - * in split and merged mode - */ - for (count = 0; count < lookup_table->count; count++) { - index = phm_get_voltage_index(lookup_table, - data->vddc_voltage_table.entries[count].value); - table->BapmVddcVidLoSidd[count] = convert_to_vid(lookup_table->entries[index].us_cac_low); - table->BapmVddcVidHiSidd[count] = convert_to_vid(lookup_table->entries[index].us_cac_mid); - table->BapmVddcVidHiSidd2[count] = convert_to_vid(lookup_table->entries[index].us_cac_high); - } - - return 0; -} - -/** -* Preparation of voltage tables for SMC. -* -* @param hwmgr the address of the hardware manager -* @param table the SMC DPM table structure to be populated -* @return always 0 -*/ - -static int polaris10_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr, - struct SMU74_Discrete_DpmTable *table) -{ - polaris10_populate_smc_vddci_table(hwmgr, table); - polaris10_populate_smc_mvdd_table(hwmgr, table); - polaris10_populate_cac_table(hwmgr, table); - - return 0; -} - -static int polaris10_populate_ulv_level(struct pp_hwmgr *hwmgr, - struct SMU74_Discrete_Ulv *state) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - - state->CcPwrDynRm = 0; - state->CcPwrDynRm1 = 0; - - state->VddcOffset = (uint16_t) table_info->us_ulv_voltage_offset; - state->VddcOffsetVid = (uint8_t)(table_info->us_ulv_voltage_offset * - VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1); - - state->VddcPhase = (data->vddc_phase_shed_control) ? 0 : 1; - - CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm); - CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1); - CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset); - - return 0; -} - -static int polaris10_populate_ulv_state(struct pp_hwmgr *hwmgr, - struct SMU74_Discrete_DpmTable *table) -{ - return polaris10_populate_ulv_level(hwmgr, &table->Ulv); -} - -static int polaris10_populate_smc_link_level(struct pp_hwmgr *hwmgr, - struct SMU74_Discrete_DpmTable *table) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - struct polaris10_dpm_table *dpm_table = &data->dpm_table; - int i; - - /* Index (dpm_table->pcie_speed_table.count) - * is reserved for PCIE boot level. */ - for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) { - table->LinkLevel[i].PcieGenSpeed = - (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value; - table->LinkLevel[i].PcieLaneCount = (uint8_t)encode_pcie_lane_width( - dpm_table->pcie_speed_table.dpm_levels[i].param1); - table->LinkLevel[i].EnabledForActivity = 1; - table->LinkLevel[i].SPC = (uint8_t)(data->pcie_spc_cap & 0xff); - table->LinkLevel[i].DownThreshold = PP_HOST_TO_SMC_UL(5); - table->LinkLevel[i].UpThreshold = PP_HOST_TO_SMC_UL(30); - } - - data->smc_state_table.LinkLevelCount = - (uint8_t)dpm_table->pcie_speed_table.count; - data->dpm_level_enable_mask.pcie_dpm_enable_mask = - phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table); - - return 0; -} - -static uint32_t polaris10_get_xclk(struct pp_hwmgr *hwmgr) -{ - uint32_t reference_clock, tmp; - struct cgs_display_info info = {0}; - struct cgs_mode_info mode_info; - - info.mode_info = &mode_info; - - tmp = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK); - - if (tmp) - return TCLK; - - cgs_get_active_displays_info(hwmgr->device, &info); - reference_clock = mode_info.ref_clock; - - tmp = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_CLKPIN_CNTL, XTALIN_DIVIDE); - - if (0 != tmp) - return reference_clock / 4; - - return reference_clock; -} - -/** -* Calculates the SCLK dividers using the provided engine clock -* -* @param hwmgr the address of the hardware manager -* @param clock the engine clock to use to populate the structure -* @param sclk the SMC SCLK structure to be populated -*/ -static int polaris10_calculate_sclk_params(struct pp_hwmgr *hwmgr, - uint32_t clock, SMU_SclkSetting *sclk_setting) -{ - const struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - const SMU74_Discrete_DpmTable *table = &(data->smc_state_table); - struct pp_atomctrl_clock_dividers_ai dividers; - - uint32_t ref_clock; - uint32_t pcc_target_percent, pcc_target_freq, ss_target_percent, ss_target_freq; - uint8_t i; - int result; - uint64_t temp; - - sclk_setting->SclkFrequency = clock; - /* get the engine clock dividers for this clock value */ - result = atomctrl_get_engine_pll_dividers_ai(hwmgr, clock, ÷rs); - if (result == 0) { - sclk_setting->Fcw_int = dividers.usSclk_fcw_int; - sclk_setting->Fcw_frac = dividers.usSclk_fcw_frac; - sclk_setting->Pcc_fcw_int = dividers.usPcc_fcw_int; - sclk_setting->PllRange = dividers.ucSclkPllRange; - sclk_setting->Sclk_slew_rate = 0x400; - sclk_setting->Pcc_up_slew_rate = dividers.usPcc_fcw_slew_frac; - sclk_setting->Pcc_down_slew_rate = 0xffff; - sclk_setting->SSc_En = dividers.ucSscEnable; - sclk_setting->Fcw1_int = dividers.usSsc_fcw1_int; - sclk_setting->Fcw1_frac = dividers.usSsc_fcw1_frac; - sclk_setting->Sclk_ss_slew_rate = dividers.usSsc_fcw_slew_frac; - return result; - } - - ref_clock = polaris10_get_xclk(hwmgr); - - for (i = 0; i < NUM_SCLK_RANGE; i++) { - if (clock > data->range_table[i].trans_lower_frequency - && clock <= data->range_table[i].trans_upper_frequency) { - sclk_setting->PllRange = i; - break; - } - } - - sclk_setting->Fcw_int = (uint16_t)((clock << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock); - temp = clock << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv; - temp <<= 0x10; - do_div(temp, ref_clock); - sclk_setting->Fcw_frac = temp & 0xffff; - - pcc_target_percent = 10; /* Hardcode 10% for now. */ - pcc_target_freq = clock - (clock * pcc_target_percent / 100); - sclk_setting->Pcc_fcw_int = (uint16_t)((pcc_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock); - - ss_target_percent = 2; /* Hardcode 2% for now. */ - sclk_setting->SSc_En = 0; - if (ss_target_percent) { - sclk_setting->SSc_En = 1; - ss_target_freq = clock - (clock * ss_target_percent / 100); - sclk_setting->Fcw1_int = (uint16_t)((ss_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock); - temp = ss_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv; - temp <<= 0x10; - do_div(temp, ref_clock); - sclk_setting->Fcw1_frac = temp & 0xffff; - } - - return 0; -} - -static int polaris10_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr, - struct phm_ppt_v1_clock_voltage_dependency_table *dep_table, - uint32_t clock, SMU_VoltageLevel *voltage, uint32_t *mvdd) -{ - uint32_t i; - uint16_t vddci; - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - - *voltage = *mvdd = 0; - - /* clock - voltage dependency table is empty table */ - if (dep_table->count == 0) - return -EINVAL; - - for (i = 0; i < dep_table->count; i++) { - /* find first sclk bigger than request */ - if (dep_table->entries[i].clk >= clock) { - *voltage |= (dep_table->entries[i].vddc * - VOLTAGE_SCALE) << VDDC_SHIFT; - if (POLARIS10_VOLTAGE_CONTROL_NONE == data->vddci_control) - *voltage |= (data->vbios_boot_state.vddci_bootup_value * - VOLTAGE_SCALE) << VDDCI_SHIFT; - else if (dep_table->entries[i].vddci) - *voltage |= (dep_table->entries[i].vddci * - VOLTAGE_SCALE) << VDDCI_SHIFT; - else { - vddci = phm_find_closest_vddci(&(data->vddci_voltage_table), - (dep_table->entries[i].vddc - - (uint16_t)data->vddc_vddci_delta)); - *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT; - } - - if (POLARIS10_VOLTAGE_CONTROL_NONE == data->mvdd_control) - *mvdd = data->vbios_boot_state.mvdd_bootup_value * - VOLTAGE_SCALE; - else if (dep_table->entries[i].mvdd) - *mvdd = (uint32_t) dep_table->entries[i].mvdd * - VOLTAGE_SCALE; - - *voltage |= 1 << PHASES_SHIFT; - return 0; - } - } - - /* sclk is bigger than max sclk in the dependence table */ - *voltage |= (dep_table->entries[i - 1].vddc * VOLTAGE_SCALE) << VDDC_SHIFT; - - if (POLARIS10_VOLTAGE_CONTROL_NONE == data->vddci_control) - *voltage |= (data->vbios_boot_state.vddci_bootup_value * - VOLTAGE_SCALE) << VDDCI_SHIFT; - else if (dep_table->entries[i-1].vddci) { - vddci = phm_find_closest_vddci(&(data->vddci_voltage_table), - (dep_table->entries[i].vddc - - (uint16_t)data->vddc_vddci_delta)); - *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT; - } - - if (POLARIS10_VOLTAGE_CONTROL_NONE == data->mvdd_control) - *mvdd = data->vbios_boot_state.mvdd_bootup_value * VOLTAGE_SCALE; - else if (dep_table->entries[i].mvdd) - *mvdd = (uint32_t) dep_table->entries[i - 1].mvdd * VOLTAGE_SCALE; - - return 0; -} - -static const sclkFcwRange_t Range_Table[NUM_SCLK_RANGE] = -{ {VCO_2_4, POSTDIV_DIV_BY_16, 75, 160, 112}, - {VCO_3_6, POSTDIV_DIV_BY_16, 112, 224, 160}, - {VCO_2_4, POSTDIV_DIV_BY_8, 75, 160, 112}, - {VCO_3_6, POSTDIV_DIV_BY_8, 112, 224, 160}, - {VCO_2_4, POSTDIV_DIV_BY_4, 75, 160, 112}, - {VCO_3_6, POSTDIV_DIV_BY_4, 112, 216, 160}, - {VCO_2_4, POSTDIV_DIV_BY_2, 75, 160, 108}, - {VCO_3_6, POSTDIV_DIV_BY_2, 112, 216, 160} }; - -static void polaris10_get_sclk_range_table(struct pp_hwmgr *hwmgr) -{ - uint32_t i, ref_clk; - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - SMU74_Discrete_DpmTable *table = &(data->smc_state_table); - struct pp_atom_ctrl_sclk_range_table range_table_from_vbios = { { {0} } }; - - ref_clk = polaris10_get_xclk(hwmgr); - - if (0 == atomctrl_get_smc_sclk_range_table(hwmgr, &range_table_from_vbios)) { - for (i = 0; i < NUM_SCLK_RANGE; i++) { - table->SclkFcwRangeTable[i].vco_setting = range_table_from_vbios.entry[i].ucVco_setting; - table->SclkFcwRangeTable[i].postdiv = range_table_from_vbios.entry[i].ucPostdiv; - table->SclkFcwRangeTable[i].fcw_pcc = range_table_from_vbios.entry[i].usFcw_pcc; - - table->SclkFcwRangeTable[i].fcw_trans_upper = range_table_from_vbios.entry[i].usFcw_trans_upper; - table->SclkFcwRangeTable[i].fcw_trans_lower = range_table_from_vbios.entry[i].usRcw_trans_lower; - - CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_pcc); - CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_upper); - CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_lower); - } - return; - } - - for (i = 0; i < NUM_SCLK_RANGE; i++) { - - data->range_table[i].trans_lower_frequency = (ref_clk * Range_Table[i].fcw_trans_lower) >> Range_Table[i].postdiv; - data->range_table[i].trans_upper_frequency = (ref_clk * Range_Table[i].fcw_trans_upper) >> Range_Table[i].postdiv; - - table->SclkFcwRangeTable[i].vco_setting = Range_Table[i].vco_setting; - table->SclkFcwRangeTable[i].postdiv = Range_Table[i].postdiv; - table->SclkFcwRangeTable[i].fcw_pcc = Range_Table[i].fcw_pcc; - - table->SclkFcwRangeTable[i].fcw_trans_upper = Range_Table[i].fcw_trans_upper; - table->SclkFcwRangeTable[i].fcw_trans_lower = Range_Table[i].fcw_trans_lower; - - CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_pcc); - CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_upper); - CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_lower); - } -} - -/** -* Populates single SMC SCLK structure using the provided engine clock -* -* @param hwmgr the address of the hardware manager -* @param clock the engine clock to use to populate the structure -* @param sclk the SMC SCLK structure to be populated -*/ - -static int polaris10_populate_single_graphic_level(struct pp_hwmgr *hwmgr, - uint32_t clock, uint16_t sclk_al_threshold, - struct SMU74_Discrete_GraphicsLevel *level) -{ - int result, i, temp; - /* PP_Clocks minClocks; */ - uint32_t mvdd; - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - SMU_SclkSetting curr_sclk_setting = { 0 }; - - result = polaris10_calculate_sclk_params(hwmgr, clock, &curr_sclk_setting); - - /* populate graphics levels */ - result = polaris10_get_dependency_volt_by_clk(hwmgr, - table_info->vdd_dep_on_sclk, clock, - &level->MinVoltage, &mvdd); - - PP_ASSERT_WITH_CODE((0 == result), - "can not find VDDC voltage value for " - "VDDC engine clock dependency table", - return result); - level->ActivityLevel = sclk_al_threshold; - - level->CcPwrDynRm = 0; - level->CcPwrDynRm1 = 0; - level->EnabledForActivity = 0; - level->EnabledForThrottle = 1; - level->UpHyst = 10; - level->DownHyst = 0; - level->VoltageDownHyst = 0; - level->PowerThrottle = 0; - - /* - * TODO: get minimum clocks from dal configaration - * PECI_GetMinClockSettings(hwmgr->pPECI, &minClocks); - */ - /* data->DisplayTiming.minClockInSR = minClocks.engineClockInSR; */ - - /* get level->DeepSleepDivId - if (phm_cap_enabled(hwmgr->platformDescriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) - level->DeepSleepDivId = PhwFiji_GetSleepDividerIdFromClock(hwmgr, clock, minClocks.engineClockInSR); - */ - PP_ASSERT_WITH_CODE((clock >= POLARIS10_MINIMUM_ENGINE_CLOCK), "Engine clock can't satisfy stutter requirement!", return 0); - for (i = POLARIS10_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) { - temp = clock >> i; - - if (temp >= POLARIS10_MINIMUM_ENGINE_CLOCK || i == 0) - break; - } - - level->DeepSleepDivId = i; - - /* Default to slow, highest DPM level will be - * set to PPSMC_DISPLAY_WATERMARK_LOW later. - */ - if (data->update_up_hyst) - level->UpHyst = (uint8_t)data->up_hyst; - if (data->update_down_hyst) - level->DownHyst = (uint8_t)data->down_hyst; - - level->SclkSetting = curr_sclk_setting; - - CONVERT_FROM_HOST_TO_SMC_UL(level->MinVoltage); - CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm); - CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm1); - CONVERT_FROM_HOST_TO_SMC_US(level->ActivityLevel); - CONVERT_FROM_HOST_TO_SMC_UL(level->SclkSetting.SclkFrequency); - CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw_int); - CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw_frac); - CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_fcw_int); - CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Sclk_slew_rate); - CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_up_slew_rate); - CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_down_slew_rate); - CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw1_int); - CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw1_frac); - CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Sclk_ss_slew_rate); - return 0; -} - -/** -* Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states -* -* @param hwmgr the address of the hardware manager -*/ -static int polaris10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - struct polaris10_dpm_table *dpm_table = &data->dpm_table; - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table; - uint8_t pcie_entry_cnt = (uint8_t) data->dpm_table.pcie_speed_table.count; - int result = 0; - uint32_t array = data->dpm_table_start + - offsetof(SMU74_Discrete_DpmTable, GraphicsLevel); - uint32_t array_size = sizeof(struct SMU74_Discrete_GraphicsLevel) * - SMU74_MAX_LEVELS_GRAPHICS; - struct SMU74_Discrete_GraphicsLevel *levels = - data->smc_state_table.GraphicsLevel; - uint32_t i, max_entry; - uint8_t hightest_pcie_level_enabled = 0, - lowest_pcie_level_enabled = 0, - mid_pcie_level_enabled = 0, - count = 0; - - polaris10_get_sclk_range_table(hwmgr); - - for (i = 0; i < dpm_table->sclk_table.count; i++) { - - result = polaris10_populate_single_graphic_level(hwmgr, - dpm_table->sclk_table.dpm_levels[i].value, - (uint16_t)data->activity_target[i], - &(data->smc_state_table.GraphicsLevel[i])); - if (result) - return result; - - /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */ - if (i > 1) - levels[i].DeepSleepDivId = 0; - } - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_SPLLShutdownSupport)) - data->smc_state_table.GraphicsLevel[0].SclkSetting.SSc_En = 0; - - data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1; - data->smc_state_table.GraphicsDpmLevelCount = - (uint8_t)dpm_table->sclk_table.count; - data->dpm_level_enable_mask.sclk_dpm_enable_mask = - phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table); - - - if (pcie_table != NULL) { - PP_ASSERT_WITH_CODE((1 <= pcie_entry_cnt), - "There must be 1 or more PCIE levels defined in PPTable.", - return -EINVAL); - max_entry = pcie_entry_cnt - 1; - for (i = 0; i < dpm_table->sclk_table.count; i++) - levels[i].pcieDpmLevel = - (uint8_t) ((i < max_entry) ? i : max_entry); - } else { - while (data->dpm_level_enable_mask.pcie_dpm_enable_mask && - ((data->dpm_level_enable_mask.pcie_dpm_enable_mask & - (1 << (hightest_pcie_level_enabled + 1))) != 0)) - hightest_pcie_level_enabled++; - - while (data->dpm_level_enable_mask.pcie_dpm_enable_mask && - ((data->dpm_level_enable_mask.pcie_dpm_enable_mask & - (1 << lowest_pcie_level_enabled)) == 0)) - lowest_pcie_level_enabled++; - - while ((count < hightest_pcie_level_enabled) && - ((data->dpm_level_enable_mask.pcie_dpm_enable_mask & - (1 << (lowest_pcie_level_enabled + 1 + count))) == 0)) - count++; - - mid_pcie_level_enabled = (lowest_pcie_level_enabled + 1 + count) < - hightest_pcie_level_enabled ? - (lowest_pcie_level_enabled + 1 + count) : - hightest_pcie_level_enabled; - - /* set pcieDpmLevel to hightest_pcie_level_enabled */ - for (i = 2; i < dpm_table->sclk_table.count; i++) - levels[i].pcieDpmLevel = hightest_pcie_level_enabled; - - /* set pcieDpmLevel to lowest_pcie_level_enabled */ - levels[0].pcieDpmLevel = lowest_pcie_level_enabled; - - /* set pcieDpmLevel to mid_pcie_level_enabled */ - levels[1].pcieDpmLevel = mid_pcie_level_enabled; - } - /* level count will send to smc once at init smc table and never change */ - result = polaris10_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels, - (uint32_t)array_size, data->sram_end); - - return result; -} - -static int polaris10_populate_single_memory_level(struct pp_hwmgr *hwmgr, - uint32_t clock, struct SMU74_Discrete_MemoryLevel *mem_level) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - int result = 0; - struct cgs_display_info info = {0, 0, NULL}; - - cgs_get_active_displays_info(hwmgr->device, &info); - - if (table_info->vdd_dep_on_mclk) { - result = polaris10_get_dependency_volt_by_clk(hwmgr, - table_info->vdd_dep_on_mclk, clock, - &mem_level->MinVoltage, &mem_level->MinMvdd); - PP_ASSERT_WITH_CODE((0 == result), - "can not find MinVddc voltage value from memory " - "VDDC voltage dependency table", return result); - } - - mem_level->MclkFrequency = clock; - mem_level->EnabledForThrottle = 1; - mem_level->EnabledForActivity = 0; - mem_level->UpHyst = 0; - mem_level->DownHyst = 100; - mem_level->VoltageDownHyst = 0; - mem_level->ActivityLevel = (uint16_t)data->mclk_activity_target; - mem_level->StutterEnable = false; - mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; - - data->display_timing.num_existing_displays = info.display_count; - - if ((data->mclk_stutter_mode_threshold) && - (clock <= data->mclk_stutter_mode_threshold) && - (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL, - STUTTER_ENABLE) & 0x1)) - mem_level->StutterEnable = true; - - if (!result) { - CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinMvdd); - CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MclkFrequency); - CONVERT_FROM_HOST_TO_SMC_US(mem_level->ActivityLevel); - CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinVoltage); - } - return result; -} - -/** -* Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states -* -* @param hwmgr the address of the hardware manager -*/ -static int polaris10_populate_all_memory_levels(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - struct polaris10_dpm_table *dpm_table = &data->dpm_table; - int result; - /* populate MCLK dpm table to SMU7 */ - uint32_t array = data->dpm_table_start + - offsetof(SMU74_Discrete_DpmTable, MemoryLevel); - uint32_t array_size = sizeof(SMU74_Discrete_MemoryLevel) * - SMU74_MAX_LEVELS_MEMORY; - struct SMU74_Discrete_MemoryLevel *levels = - data->smc_state_table.MemoryLevel; - uint32_t i; - - for (i = 0; i < dpm_table->mclk_table.count; i++) { - PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value), - "can not populate memory level as memory clock is zero", - return -EINVAL); - result = polaris10_populate_single_memory_level(hwmgr, - dpm_table->mclk_table.dpm_levels[i].value, - &levels[i]); - if (i == dpm_table->mclk_table.count - 1) { - levels[i].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH; - levels[i].EnabledForActivity = 1; - } - if (result) - return result; - } - - /* In order to prevent MC activity from stutter mode to push DPM up, - * the UVD change complements this by putting the MCLK in - * a higher state by default such that we are not affected by - * up threshold or and MCLK DPM latency. - */ - levels[0].ActivityLevel = 0x1f; - CONVERT_FROM_HOST_TO_SMC_US(levels[0].ActivityLevel); - - data->smc_state_table.MemoryDpmLevelCount = - (uint8_t)dpm_table->mclk_table.count; - data->dpm_level_enable_mask.mclk_dpm_enable_mask = - phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table); - - /* level count will send to smc once at init smc table and never change */ - result = polaris10_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels, - (uint32_t)array_size, data->sram_end); - - return result; -} - -/** -* Populates the SMC MVDD structure using the provided memory clock. -* -* @param hwmgr the address of the hardware manager -* @param mclk the MCLK value to be used in the decision if MVDD should be high or low. -* @param voltage the SMC VOLTAGE structure to be populated -*/ -static int polaris10_populate_mvdd_value(struct pp_hwmgr *hwmgr, - uint32_t mclk, SMIO_Pattern *smio_pat) -{ - const struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - uint32_t i = 0; - - if (POLARIS10_VOLTAGE_CONTROL_NONE != data->mvdd_control) { - /* find mvdd value which clock is more than request */ - for (i = 0; i < table_info->vdd_dep_on_mclk->count; i++) { - if (mclk <= table_info->vdd_dep_on_mclk->entries[i].clk) { - smio_pat->Voltage = data->mvdd_voltage_table.entries[i].value; - break; - } - } - PP_ASSERT_WITH_CODE(i < table_info->vdd_dep_on_mclk->count, - "MVDD Voltage is outside the supported range.", - return -EINVAL); - } else - return -EINVAL; - - return 0; -} - -static int polaris10_populate_smc_acpi_level(struct pp_hwmgr *hwmgr, - SMU74_Discrete_DpmTable *table) -{ - int result = 0; - uint32_t sclk_frequency; - const struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - SMIO_Pattern vol_level; - uint32_t mvdd; - uint16_t us_mvdd; - - table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC; - - - /* Get MinVoltage and Frequency from DPM0, - * already converted to SMC_UL */ - sclk_frequency = data->vbios_boot_state.sclk_bootup_value; - result = polaris10_get_dependency_volt_by_clk(hwmgr, - table_info->vdd_dep_on_sclk, - sclk_frequency, - &table->ACPILevel.MinVoltage, &mvdd); - PP_ASSERT_WITH_CODE((0 == result), - "Cannot find ACPI VDDC voltage value " - "in Clock Dependency Table", - ); - - - result = polaris10_calculate_sclk_params(hwmgr, sclk_frequency, &(table->ACPILevel.SclkSetting)); - PP_ASSERT_WITH_CODE(result == 0, "Error retrieving Engine Clock dividers from VBIOS.", return result); - - table->ACPILevel.DeepSleepDivId = 0; - table->ACPILevel.CcPwrDynRm = 0; - table->ACPILevel.CcPwrDynRm1 = 0; - - CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags); - CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.MinVoltage); - CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm); - CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1); - - CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkSetting.SclkFrequency); - CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw_int); - CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw_frac); - CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_fcw_int); - CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_slew_rate); - CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_up_slew_rate); - CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_down_slew_rate); - CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_int); - CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_frac); - CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_ss_slew_rate); - - - /* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */ - table->MemoryACPILevel.MclkFrequency = data->vbios_boot_state.mclk_bootup_value; - result = polaris10_get_dependency_volt_by_clk(hwmgr, - table_info->vdd_dep_on_mclk, - table->MemoryACPILevel.MclkFrequency, - &table->MemoryACPILevel.MinVoltage, &mvdd); - PP_ASSERT_WITH_CODE((0 == result), - "Cannot find ACPI VDDCI voltage value " - "in Clock Dependency Table", - ); - - us_mvdd = 0; - if ((POLARIS10_VOLTAGE_CONTROL_NONE == data->mvdd_control) || - (data->mclk_dpm_key_disabled)) - us_mvdd = data->vbios_boot_state.mvdd_bootup_value; - else { - if (!polaris10_populate_mvdd_value(hwmgr, - data->dpm_table.mclk_table.dpm_levels[0].value, - &vol_level)) - us_mvdd = vol_level.Voltage; - } - - if (0 == polaris10_populate_mvdd_value(hwmgr, 0, &vol_level)) - table->MemoryACPILevel.MinMvdd = PP_HOST_TO_SMC_UL(vol_level.Voltage); - else - table->MemoryACPILevel.MinMvdd = 0; - - table->MemoryACPILevel.StutterEnable = false; - - table->MemoryACPILevel.EnabledForThrottle = 0; - table->MemoryACPILevel.EnabledForActivity = 0; - table->MemoryACPILevel.UpHyst = 0; - table->MemoryACPILevel.DownHyst = 100; - table->MemoryACPILevel.VoltageDownHyst = 0; - table->MemoryACPILevel.ActivityLevel = - PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target); - - CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MclkFrequency); - CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage); - - return result; -} - -static int polaris10_populate_smc_vce_level(struct pp_hwmgr *hwmgr, - SMU74_Discrete_DpmTable *table) -{ - int result = -EINVAL; - uint8_t count; - struct pp_atomctrl_clock_dividers_vi dividers; - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = - table_info->mm_dep_table; - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - uint32_t vddci; - - table->VceLevelCount = (uint8_t)(mm_table->count); - table->VceBootLevel = 0; - - for (count = 0; count < table->VceLevelCount; count++) { - table->VceLevel[count].Frequency = mm_table->entries[count].eclk; - table->VceLevel[count].MinVoltage = 0; - table->VceLevel[count].MinVoltage |= - (mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT; - - if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) - vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table), - mm_table->entries[count].vddc - VDDC_VDDCI_DELTA); - else if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) - vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA; - else - vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT; - - - table->VceLevel[count].MinVoltage |= - (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT; - table->VceLevel[count].MinVoltage |= 1 << PHASES_SHIFT; - - /*retrieve divider value for VBIOS */ - result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, - table->VceLevel[count].Frequency, ÷rs); - PP_ASSERT_WITH_CODE((0 == result), - "can not find divide id for VCE engine clock", - return result); - - table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider; - - CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency); - CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].MinVoltage); - } - return result; -} - -static int polaris10_populate_smc_samu_level(struct pp_hwmgr *hwmgr, - SMU74_Discrete_DpmTable *table) -{ - int result = -EINVAL; - uint8_t count; - struct pp_atomctrl_clock_dividers_vi dividers; - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = - table_info->mm_dep_table; - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - uint32_t vddci; - - table->SamuBootLevel = 0; - table->SamuLevelCount = (uint8_t)(mm_table->count); - - for (count = 0; count < table->SamuLevelCount; count++) { - /* not sure whether we need evclk or not */ - table->SamuLevel[count].MinVoltage = 0; - table->SamuLevel[count].Frequency = mm_table->entries[count].samclock; - table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc * - VOLTAGE_SCALE) << VDDC_SHIFT; - - if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) - vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table), - mm_table->entries[count].vddc - VDDC_VDDCI_DELTA); - else if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) - vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA; - else - vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT; - - table->SamuLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT; - table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT; - - /* retrieve divider value for VBIOS */ - result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, - table->SamuLevel[count].Frequency, ÷rs); - PP_ASSERT_WITH_CODE((0 == result), - "can not find divide id for samu clock", return result); - - table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider; - - CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency); - CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].MinVoltage); - } - return result; -} - -static int polaris10_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr, - int32_t eng_clock, int32_t mem_clock, - SMU74_Discrete_MCArbDramTimingTableEntry *arb_regs) -{ - uint32_t dram_timing; - uint32_t dram_timing2; - uint32_t burst_time; - int result; - - result = atomctrl_set_engine_dram_timings_rv770(hwmgr, - eng_clock, mem_clock); - PP_ASSERT_WITH_CODE(result == 0, - "Error calling VBIOS to set DRAM_TIMING.", return result); - - dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING); - dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2); - burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0); - - - arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dram_timing); - arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dram_timing2); - arb_regs->McArbBurstTime = (uint8_t)burst_time; - - return 0; -} - -static int polaris10_program_memory_timing_parameters(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - struct SMU74_Discrete_MCArbDramTimingTable arb_regs; - uint32_t i, j; - int result = 0; - - for (i = 0; i < data->dpm_table.sclk_table.count; i++) { - for (j = 0; j < data->dpm_table.mclk_table.count; j++) { - result = polaris10_populate_memory_timing_parameters(hwmgr, - data->dpm_table.sclk_table.dpm_levels[i].value, - data->dpm_table.mclk_table.dpm_levels[j].value, - &arb_regs.entries[i][j]); - if (result == 0) - result = atomctrl_set_ac_timing_ai(hwmgr, data->dpm_table.mclk_table.dpm_levels[j].value, j); - if (result != 0) - return result; - } - } - - result = polaris10_copy_bytes_to_smc( - hwmgr->smumgr, - data->arb_table_start, - (uint8_t *)&arb_regs, - sizeof(SMU74_Discrete_MCArbDramTimingTable), - data->sram_end); - return result; -} - -static int polaris10_populate_smc_uvd_level(struct pp_hwmgr *hwmgr, - struct SMU74_Discrete_DpmTable *table) -{ - int result = -EINVAL; - uint8_t count; - struct pp_atomctrl_clock_dividers_vi dividers; - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = - table_info->mm_dep_table; - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - uint32_t vddci; - - table->UvdLevelCount = (uint8_t)(mm_table->count); - table->UvdBootLevel = 0; - - for (count = 0; count < table->UvdLevelCount; count++) { - table->UvdLevel[count].MinVoltage = 0; - table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk; - table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk; - table->UvdLevel[count].MinVoltage |= (mm_table->entries[count].vddc * - VOLTAGE_SCALE) << VDDC_SHIFT; - - if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) - vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table), - mm_table->entries[count].vddc - VDDC_VDDCI_DELTA); - else if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) - vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA; - else - vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT; - - table->UvdLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT; - table->UvdLevel[count].MinVoltage |= 1 << PHASES_SHIFT; - - /* retrieve divider value for VBIOS */ - result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, - table->UvdLevel[count].VclkFrequency, ÷rs); - PP_ASSERT_WITH_CODE((0 == result), - "can not find divide id for Vclk clock", return result); - - table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider; - - result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, - table->UvdLevel[count].DclkFrequency, ÷rs); - PP_ASSERT_WITH_CODE((0 == result), - "can not find divide id for Dclk clock", return result); - - table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider; - - CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency); - CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency); - CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].MinVoltage); - } - - return result; -} - -static int polaris10_populate_smc_boot_level(struct pp_hwmgr *hwmgr, - struct SMU74_Discrete_DpmTable *table) -{ - int result = 0; - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - - table->GraphicsBootLevel = 0; - table->MemoryBootLevel = 0; - - /* find boot level from dpm table */ - result = phm_find_boot_level(&(data->dpm_table.sclk_table), - data->vbios_boot_state.sclk_bootup_value, - (uint32_t *)&(table->GraphicsBootLevel)); - - result = phm_find_boot_level(&(data->dpm_table.mclk_table), - data->vbios_boot_state.mclk_bootup_value, - (uint32_t *)&(table->MemoryBootLevel)); - - table->BootVddc = data->vbios_boot_state.vddc_bootup_value * - VOLTAGE_SCALE; - table->BootVddci = data->vbios_boot_state.vddci_bootup_value * - VOLTAGE_SCALE; - table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value * - VOLTAGE_SCALE; - - CONVERT_FROM_HOST_TO_SMC_US(table->BootVddc); - CONVERT_FROM_HOST_TO_SMC_US(table->BootVddci); - CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd); - - return 0; -} - - -static int polaris10_populate_smc_initailial_state(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - uint8_t count, level; - - count = (uint8_t)(table_info->vdd_dep_on_sclk->count); - - for (level = 0; level < count; level++) { - if (table_info->vdd_dep_on_sclk->entries[level].clk >= - data->vbios_boot_state.sclk_bootup_value) { - data->smc_state_table.GraphicsBootLevel = level; - break; - } - } - - count = (uint8_t)(table_info->vdd_dep_on_mclk->count); - for (level = 0; level < count; level++) { - if (table_info->vdd_dep_on_mclk->entries[level].clk >= - data->vbios_boot_state.mclk_bootup_value) { - data->smc_state_table.MemoryBootLevel = level; - break; - } - } - - return 0; -} - -static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr) -{ - uint32_t ro, efuse, volt_without_cks, volt_with_cks, value, max, min; - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - uint8_t i, stretch_amount, volt_offset = 0; - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = - table_info->vdd_dep_on_sclk; - - stretch_amount = (uint8_t)table_info->cac_dtp_table->usClockStretchAmount; - - /* Read SMU_Eefuse to read and calculate RO and determine - * if the part is SS or FF. if RO >= 1660MHz, part is FF. - */ - efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixSMU_EFUSE_0 + (67 * 4)); - efuse &= 0xFF000000; - efuse = efuse >> 24; - - if (hwmgr->chip_id == CHIP_POLARIS10) { - min = 1000; - max = 2300; - } else { - min = 1100; - max = 2100; - } - - ro = efuse * (max -min)/255 + min; - - /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */ - for (i = 0; i < sclk_table->count; i++) { - data->smc_state_table.Sclk_CKS_masterEn0_7 |= - sclk_table->entries[i].cks_enable << i; - if (hwmgr->chip_id == CHIP_POLARIS10) { - volt_without_cks = (uint32_t)((2753594000U + (sclk_table->entries[i].clk/100) * 136418 -(ro - 70) * 1000000) / \ - (2424180 - (sclk_table->entries[i].clk/100) * 1132925/1000)); - volt_with_cks = (uint32_t)((2797202000U + sclk_table->entries[i].clk/100 * 3232 - (ro - 65) * 1000000) / \ - (2522480 - sclk_table->entries[i].clk/100 * 115764/100)); - } else { - volt_without_cks = (uint32_t)((2416794800U + (sclk_table->entries[i].clk/100) * 1476925/10 -(ro - 50) * 1000000) / \ - (2625416 - (sclk_table->entries[i].clk/100) * (12586807/10000))); - volt_with_cks = (uint32_t)((2999656000U - sclk_table->entries[i].clk/100 * 392803 - (ro - 44) * 1000000) / \ - (3422454 - sclk_table->entries[i].clk/100 * (18886376/10000))); - } - - if (volt_without_cks >= volt_with_cks) - volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks + - sclk_table->entries[i].cks_voffset) * 100 + 624) / 625); - - data->smc_state_table.Sclk_voltageOffset[i] = volt_offset; - } - - data->smc_state_table.LdoRefSel = (table_info->cac_dtp_table->ucCKS_LDO_REFSEL != 0) ? table_info->cac_dtp_table->ucCKS_LDO_REFSEL : 6; - /* Populate CKS Lookup Table */ - if (stretch_amount != 1 && stretch_amount != 2 && stretch_amount != 3 && - stretch_amount != 4 && stretch_amount != 5) { - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ClockStretcher); - PP_ASSERT_WITH_CODE(false, - "Stretch Amount in PPTable not supported\n", - return -EINVAL); - } - - value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL); - value &= 0xFFFFFFFE; - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL, value); - - return 0; -} - -/** -* Populates the SMC VRConfig field in DPM table. -* -* @param hwmgr the address of the hardware manager -* @param table the SMC DPM table structure to be populated -* @return always 0 -*/ -static int polaris10_populate_vr_config(struct pp_hwmgr *hwmgr, - struct SMU74_Discrete_DpmTable *table) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - uint16_t config; - - config = VR_MERGED_WITH_VDDC; - table->VRConfig |= (config << VRCONF_VDDGFX_SHIFT); - - /* Set Vddc Voltage Controller */ - if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) { - config = VR_SVI2_PLANE_1; - table->VRConfig |= config; - } else { - PP_ASSERT_WITH_CODE(false, - "VDDC should be on SVI2 control in merged mode!", - ); - } - /* Set Vddci Voltage Controller */ - if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) { - config = VR_SVI2_PLANE_2; /* only in merged mode */ - table->VRConfig |= (config << VRCONF_VDDCI_SHIFT); - } else if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) { - config = VR_SMIO_PATTERN_1; - table->VRConfig |= (config << VRCONF_VDDCI_SHIFT); - } else { - config = VR_STATIC_VOLTAGE; - table->VRConfig |= (config << VRCONF_VDDCI_SHIFT); - } - /* Set Mvdd Voltage Controller */ - if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) { - config = VR_SVI2_PLANE_2; - table->VRConfig |= (config << VRCONF_MVDD_SHIFT); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, data->soft_regs_start + - offsetof(SMU74_SoftRegisters, AllowMvddSwitch), 0x1); - } else { - config = VR_STATIC_VOLTAGE; - table->VRConfig |= (config << VRCONF_MVDD_SHIFT); - } - - return 0; -} - - -static int polaris10_populate_avfs_parameters(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - SMU74_Discrete_DpmTable *table = &(data->smc_state_table); - int result = 0; - struct pp_atom_ctrl__avfs_parameters avfs_params = {0}; - AVFS_meanNsigma_t AVFS_meanNsigma = { {0} }; - AVFS_Sclk_Offset_t AVFS_SclkOffset = { {0} }; - uint32_t tmp, i; - struct pp_smumgr *smumgr = hwmgr->smumgr; - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); - - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)hwmgr->pptable; - struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = - table_info->vdd_dep_on_sclk; - - - if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED) - return result; - - result = atomctrl_get_avfs_information(hwmgr, &avfs_params); - - if (0 == result) { - table->BTCGB_VDROOP_TABLE[0].a0 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a0); - table->BTCGB_VDROOP_TABLE[0].a1 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a1); - table->BTCGB_VDROOP_TABLE[0].a2 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a2); - table->BTCGB_VDROOP_TABLE[1].a0 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a0); - table->BTCGB_VDROOP_TABLE[1].a1 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a1); - table->BTCGB_VDROOP_TABLE[1].a2 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a2); - table->AVFSGB_VDROOP_TABLE[0].m1 = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_m1); - table->AVFSGB_VDROOP_TABLE[0].m2 = PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSON_m2); - table->AVFSGB_VDROOP_TABLE[0].b = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_b); - table->AVFSGB_VDROOP_TABLE[0].m1_shift = 24; - table->AVFSGB_VDROOP_TABLE[0].m2_shift = 12; - table->AVFSGB_VDROOP_TABLE[1].m1 = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_m1); - table->AVFSGB_VDROOP_TABLE[1].m2 = PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSOFF_m2); - table->AVFSGB_VDROOP_TABLE[1].b = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_b); - table->AVFSGB_VDROOP_TABLE[1].m1_shift = 24; - table->AVFSGB_VDROOP_TABLE[1].m2_shift = 12; - table->MaxVoltage = PP_HOST_TO_SMC_US(avfs_params.usMaxVoltage_0_25mv); - AVFS_meanNsigma.Aconstant[0] = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant0); - AVFS_meanNsigma.Aconstant[1] = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant1); - AVFS_meanNsigma.Aconstant[2] = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant2); - AVFS_meanNsigma.DC_tol_sigma = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_DC_tol_sigma); - AVFS_meanNsigma.Platform_mean = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_Platform_mean); - AVFS_meanNsigma.PSM_Age_CompFactor = PP_HOST_TO_SMC_US(avfs_params.usPSM_Age_ComFactor); - AVFS_meanNsigma.Platform_sigma = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_Platform_sigma); - - for (i = 0; i < NUM_VFT_COLUMNS; i++) { - AVFS_meanNsigma.Static_Voltage_Offset[i] = (uint8_t)(sclk_table->entries[i].cks_voffset * 100 / 625); - AVFS_SclkOffset.Sclk_Offset[i] = PP_HOST_TO_SMC_US((uint16_t)(sclk_table->entries[i].sclk_offset) / 100); - } - - result = polaris10_read_smc_sram_dword(smumgr, - SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, AvfsMeanNSigma), - &tmp, data->sram_end); - - polaris10_copy_bytes_to_smc(smumgr, - tmp, - (uint8_t *)&AVFS_meanNsigma, - sizeof(AVFS_meanNsigma_t), - data->sram_end); - - result = polaris10_read_smc_sram_dword(smumgr, - SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, AvfsSclkOffsetTable), - &tmp, data->sram_end); - polaris10_copy_bytes_to_smc(smumgr, - tmp, - (uint8_t *)&AVFS_SclkOffset, - sizeof(AVFS_Sclk_Offset_t), - data->sram_end); - - data->avfs_vdroop_override_setting = (avfs_params.ucEnableGB_VDROOP_TABLE_CKSON << BTCGB0_Vdroop_Enable_SHIFT) | - (avfs_params.ucEnableGB_VDROOP_TABLE_CKSOFF << BTCGB1_Vdroop_Enable_SHIFT) | - (avfs_params.ucEnableGB_FUSE_TABLE_CKSON << AVFSGB0_Vdroop_Enable_SHIFT) | - (avfs_params.ucEnableGB_FUSE_TABLE_CKSOFF << AVFSGB1_Vdroop_Enable_SHIFT); - data->apply_avfs_cks_off_voltage = (avfs_params.ucEnableApplyAVFS_CKS_OFF_Voltage == 1) ? true : false; - } - return result; -} - - -/** -* Initializes the SMC table and uploads it -* -* @param hwmgr the address of the powerplay hardware manager. -* @return always 0 -*/ -static int polaris10_init_smc_table(struct pp_hwmgr *hwmgr) -{ - int result; - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - struct SMU74_Discrete_DpmTable *table = &(data->smc_state_table); - const struct polaris10_ulv_parm *ulv = &(data->ulv); - uint8_t i; - struct pp_atomctrl_gpio_pin_assignment gpio_pin; - pp_atomctrl_clock_dividers_vi dividers; - - result = polaris10_setup_default_dpm_tables(hwmgr); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to setup default DPM tables!", return result); - - if (POLARIS10_VOLTAGE_CONTROL_NONE != data->voltage_control) - polaris10_populate_smc_voltage_tables(hwmgr, table); - - table->SystemFlags = 0; - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_AutomaticDCTransition)) - table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC; - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_StepVddc)) - table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC; - - if (data->is_memory_gddr5) - table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5; - - if (ulv->ulv_supported && table_info->us_ulv_voltage_offset) { - result = polaris10_populate_ulv_state(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to initialize ULV state!", return result); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_ULV_PARAMETER, PPPOLARIS10_CGULVPARAMETER_DFLT); - } - - result = polaris10_populate_smc_link_level(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to initialize Link Level!", return result); - - result = polaris10_populate_all_graphic_levels(hwmgr); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to initialize Graphics Level!", return result); - - result = polaris10_populate_all_memory_levels(hwmgr); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to initialize Memory Level!", return result); - - result = polaris10_populate_smc_acpi_level(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to initialize ACPI Level!", return result); - - result = polaris10_populate_smc_vce_level(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to initialize VCE Level!", return result); - - result = polaris10_populate_smc_samu_level(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to initialize SAMU Level!", return result); - - /* Since only the initial state is completely set up at this point - * (the other states are just copies of the boot state) we only - * need to populate the ARB settings for the initial state. - */ - result = polaris10_program_memory_timing_parameters(hwmgr); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to Write ARB settings for the initial state.", return result); - - result = polaris10_populate_smc_uvd_level(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to initialize UVD Level!", return result); - - result = polaris10_populate_smc_boot_level(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to initialize Boot Level!", return result); - - result = polaris10_populate_smc_initailial_state(hwmgr); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to initialize Boot State!", return result); - - result = polaris10_populate_bapm_parameters_in_dpm_table(hwmgr); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to populate BAPM Parameters!", return result); - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ClockStretcher)) { - result = polaris10_populate_clock_stretcher_data_table(hwmgr); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to populate Clock Stretcher Data Table!", - return result); - } - - result = polaris10_populate_avfs_parameters(hwmgr); - PP_ASSERT_WITH_CODE(0 == result, "Failed to populate AVFS Parameters!", return result;); - - table->CurrSclkPllRange = 0xff; - table->GraphicsVoltageChangeEnable = 1; - table->GraphicsThermThrottleEnable = 1; - table->GraphicsInterval = 1; - table->VoltageInterval = 1; - table->ThermalInterval = 1; - table->TemperatureLimitHigh = - table_info->cac_dtp_table->usTargetOperatingTemp * - POLARIS10_Q88_FORMAT_CONVERSION_UNIT; - table->TemperatureLimitLow = - (table_info->cac_dtp_table->usTargetOperatingTemp - 1) * - POLARIS10_Q88_FORMAT_CONVERSION_UNIT; - table->MemoryVoltageChangeEnable = 1; - table->MemoryInterval = 1; - table->VoltageResponseTime = 0; - table->PhaseResponseTime = 0; - table->MemoryThermThrottleEnable = 1; - table->PCIeBootLinkLevel = 0; - table->PCIeGenInterval = 1; - table->VRConfig = 0; - - result = polaris10_populate_vr_config(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to populate VRConfig setting!", return result); - - table->ThermGpio = 17; - table->SclkStepSize = 0x4000; - - if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID, &gpio_pin)) { - table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift; - } else { - table->VRHotGpio = POLARIS10_UNUSED_GPIO_PIN; - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_RegulatorHot); - } - - if (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID, - &gpio_pin)) { - table->AcDcGpio = gpio_pin.uc_gpio_pin_bit_shift; - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_AutomaticDCTransition); - } else { - table->AcDcGpio = POLARIS10_UNUSED_GPIO_PIN; - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_AutomaticDCTransition); - } - - /* Thermal Output GPIO */ - if (atomctrl_get_pp_assign_pin(hwmgr, THERMAL_INT_OUTPUT_GPIO_PINID, - &gpio_pin)) { - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ThermalOutGPIO); - - table->ThermOutGpio = gpio_pin.uc_gpio_pin_bit_shift; - - /* For porlarity read GPIOPAD_A with assigned Gpio pin - * since VBIOS will program this register to set 'inactive state', - * driver can then determine 'active state' from this and - * program SMU with correct polarity - */ - table->ThermOutPolarity = (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A) - & (1 << gpio_pin.uc_gpio_pin_bit_shift))) ? 1:0; - table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY; - - /* if required, combine VRHot/PCC with thermal out GPIO */ - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_RegulatorHot) - && phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_CombinePCCWithThermalSignal)) - table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT; - } else { - table->ThermOutGpio = 17; - table->ThermOutPolarity = 1; - table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE; - } - - /* Populate BIF_SCLK levels into SMC DPM table */ - for (i = 0; i <= data->dpm_table.pcie_speed_table.count; i++) { - result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, data->bif_sclk_table[i], ÷rs); - PP_ASSERT_WITH_CODE((result == 0), "Can not find DFS divide id for Sclk", return result); - - if (i == 0) - table->Ulv.BifSclkDfs = PP_HOST_TO_SMC_US((USHORT)(dividers.pll_post_divider)); - else - table->LinkLevel[i-1].BifSclkDfs = PP_HOST_TO_SMC_US((USHORT)(dividers.pll_post_divider)); - } - - for (i = 0; i < SMU74_MAX_ENTRIES_SMIO; i++) - table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]); - - CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags); - CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig); - CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1); - CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2); - CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize); - CONVERT_FROM_HOST_TO_SMC_UL(table->CurrSclkPllRange); - CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh); - CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow); - CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime); - CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime); - - /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */ - result = polaris10_copy_bytes_to_smc(hwmgr->smumgr, - data->dpm_table_start + - offsetof(SMU74_Discrete_DpmTable, SystemFlags), - (uint8_t *)&(table->SystemFlags), - sizeof(SMU74_Discrete_DpmTable) - 3 * sizeof(SMU74_PIDController), - data->sram_end); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to upload dpm data to SMC memory!", return result); - - return 0; -} - -/** -* Initialize the ARB DRAM timing table's index field. -* -* @param hwmgr the address of the powerplay hardware manager. -* @return always 0 -*/ -static int polaris10_init_arb_table_index(struct pp_hwmgr *hwmgr) -{ - const struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - uint32_t tmp; - int result; - - /* This is a read-modify-write on the first byte of the ARB table. - * The first byte in the SMU73_Discrete_MCArbDramTimingTable structure - * is the field 'current'. - * This solution is ugly, but we never write the whole table only - * individual fields in it. - * In reality this field should not be in that structure - * but in a soft register. - */ - result = polaris10_read_smc_sram_dword(hwmgr->smumgr, - data->arb_table_start, &tmp, data->sram_end); - - if (result) - return result; - - tmp &= 0x00FFFFFF; - tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24; - - return polaris10_write_smc_sram_dword(hwmgr->smumgr, - data->arb_table_start, tmp, data->sram_end); -} - -static int polaris10_enable_vrhot_gpio_interrupt(struct pp_hwmgr *hwmgr) -{ - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_RegulatorHot)) - return smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_EnableVRHotGPIOInterrupt); - - return 0; -} - -static int polaris10_enable_sclk_control(struct pp_hwmgr *hwmgr) -{ - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, - SCLK_PWRMGT_OFF, 0); - return 0; -} - -static int polaris10_enable_ulv(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - struct polaris10_ulv_parm *ulv = &(data->ulv); - - if (ulv->ulv_supported) - return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_EnableULV); - - return 0; -} - -static int polaris10_disable_ulv(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - struct polaris10_ulv_parm *ulv = &(data->ulv); - - if (ulv->ulv_supported) - return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DisableULV); - - return 0; -} - -static int polaris10_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr) -{ - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_SclkDeepSleep)) { - if (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_MASTER_DeepSleep_ON)) - PP_ASSERT_WITH_CODE(false, - "Attempt to enable Master Deep Sleep switch failed!", - return -1); - } else { - if (smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_MASTER_DeepSleep_OFF)) { - PP_ASSERT_WITH_CODE(false, - "Attempt to disable Master Deep Sleep switch failed!", - return -1); - } - } - - return 0; -} - -static int polaris10_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr) -{ - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_SclkDeepSleep)) { - if (smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_MASTER_DeepSleep_OFF)) { - PP_ASSERT_WITH_CODE(false, - "Attempt to disable Master Deep Sleep switch failed!", - return -1); - } - } - - return 0; -} - -static int polaris10_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - uint32_t soft_register_value = 0; - uint32_t handshake_disables_offset = data->soft_regs_start - + offsetof(SMU74_SoftRegisters, HandshakeDisables); - - /* enable SCLK dpm */ - if (!data->sclk_dpm_key_disabled) - PP_ASSERT_WITH_CODE( - (0 == smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DPM_Enable)), - "Failed to enable SCLK DPM during DPM Start Function!", - return -1); - - /* enable MCLK dpm */ - if (0 == data->mclk_dpm_key_disabled) { -/* Disable UVD - SMU handshake for MCLK. */ - soft_register_value = cgs_read_ind_register(hwmgr->device, - CGS_IND_REG__SMC, handshake_disables_offset); - soft_register_value |= SMU7_UVD_MCLK_HANDSHAKE_DISABLE; - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - handshake_disables_offset, soft_register_value); - - PP_ASSERT_WITH_CODE( - (0 == smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_MCLKDPM_Enable)), - "Failed to enable MCLK DPM during DPM Start Function!", - return -1); - - PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1); - - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x5); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x5); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x100005); - udelay(10); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400005); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400005); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x500005); - } - - return 0; -} - -static int polaris10_start_dpm(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - - /*enable general power management */ - - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, - GLOBAL_PWRMGT_EN, 1); - - /* enable sclk deep sleep */ - - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, - DYNAMIC_PM_EN, 1); - - /* prepare for PCIE DPM */ - - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - data->soft_regs_start + offsetof(SMU74_SoftRegisters, - VoltageChangeTimeout), 0x1000); - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE, - SWRST_COMMAND_1, RESETLC, 0x0); -/* - PP_ASSERT_WITH_CODE( - (0 == smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_Voltage_Cntl_Enable)), - "Failed to enable voltage DPM during DPM Start Function!", - return -1); -*/ - - if (polaris10_enable_sclk_mclk_dpm(hwmgr)) { - printk(KERN_ERR "Failed to enable Sclk DPM and Mclk DPM!"); - return -1; - } - - /* enable PCIE dpm */ - if (0 == data->pcie_dpm_key_disabled) { - PP_ASSERT_WITH_CODE( - (0 == smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_PCIeDPM_Enable)), - "Failed to enable pcie DPM during DPM Start Function!", - return -1); - } - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_Falcon_QuickTransition)) { - PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_EnableACDCGPIOInterrupt)), - "Failed to enable AC DC GPIO Interrupt!", - ); - } - - return 0; -} - -static int polaris10_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - - /* disable SCLK dpm */ - if (!data->sclk_dpm_key_disabled) - PP_ASSERT_WITH_CODE( - (smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_DPM_Disable) == 0), - "Failed to disable SCLK DPM!", - return -1); - - /* disable MCLK dpm */ - if (!data->mclk_dpm_key_disabled) { - PP_ASSERT_WITH_CODE( - (smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_MCLKDPM_Disable) == 0), - "Failed to disable MCLK DPM!", - return -1); - } - - return 0; -} - -static int polaris10_stop_dpm(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - - /* disable general power management */ - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, - GLOBAL_PWRMGT_EN, 0); - /* disable sclk deep sleep */ - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, - DYNAMIC_PM_EN, 0); - - /* disable PCIE dpm */ - if (!data->pcie_dpm_key_disabled) { - PP_ASSERT_WITH_CODE( - (smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_PCIeDPM_Disable) == 0), - "Failed to disable pcie DPM during DPM Stop Function!", - return -1); - } - - if (polaris10_disable_sclk_mclk_dpm(hwmgr)) { - printk(KERN_ERR "Failed to disable Sclk DPM and Mclk DPM!"); - return -1; - } - - return 0; -} - -static void polaris10_set_dpm_event_sources(struct pp_hwmgr *hwmgr, uint32_t sources) -{ - bool protection; - enum DPM_EVENT_SRC src; - - switch (sources) { - default: - printk(KERN_ERR "Unknown throttling event sources."); - /* fall through */ - case 0: - protection = false; - /* src is unused */ - break; - case (1 << PHM_AutoThrottleSource_Thermal): - protection = true; - src = DPM_EVENT_SRC_DIGITAL; - break; - case (1 << PHM_AutoThrottleSource_External): - protection = true; - src = DPM_EVENT_SRC_EXTERNAL; - break; - case (1 << PHM_AutoThrottleSource_External) | - (1 << PHM_AutoThrottleSource_Thermal): - protection = true; - src = DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL; - break; - } - /* Order matters - don't enable thermal protection for the wrong source. */ - if (protection) { - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_CTRL, - DPM_EVENT_SRC, src); - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, - THERMAL_PROTECTION_DIS, - !phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ThermalController)); - } else - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, - THERMAL_PROTECTION_DIS, 1); -} - -static int polaris10_enable_auto_throttle_source(struct pp_hwmgr *hwmgr, - PHM_AutoThrottleSource source) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - - if (!(data->active_auto_throttle_sources & (1 << source))) { - data->active_auto_throttle_sources |= 1 << source; - polaris10_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources); - } - return 0; -} - -static int polaris10_enable_thermal_auto_throttle(struct pp_hwmgr *hwmgr) -{ - return polaris10_enable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal); -} - -static int polaris10_disable_auto_throttle_source(struct pp_hwmgr *hwmgr, - PHM_AutoThrottleSource source) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - - if (data->active_auto_throttle_sources & (1 << source)) { - data->active_auto_throttle_sources &= ~(1 << source); - polaris10_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources); - } - return 0; -} - -static int polaris10_disable_thermal_auto_throttle(struct pp_hwmgr *hwmgr) -{ - return polaris10_disable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal); -} - -static int polaris10_pcie_performance_request(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - data->pcie_performance_request = true; - - return 0; -} - -static int polaris10_enable_dpm_tasks(struct pp_hwmgr *hwmgr) -{ - int tmp_result, result = 0; - tmp_result = (!polaris10_is_dpm_running(hwmgr)) ? 0 : -1; - PP_ASSERT_WITH_CODE(result == 0, - "DPM is already running right now, no need to enable DPM!", - return 0); - - if (polaris10_voltage_control(hwmgr)) { - tmp_result = polaris10_enable_voltage_control(hwmgr); - PP_ASSERT_WITH_CODE(tmp_result == 0, - "Failed to enable voltage control!", - result = tmp_result); - - tmp_result = polaris10_construct_voltage_tables(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to contruct voltage tables!", - result = tmp_result); - } - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_EngineSpreadSpectrumSupport)) - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 1); - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ThermalController)) - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 0); - - tmp_result = polaris10_program_static_screen_threshold_parameters(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to program static screen threshold parameters!", - result = tmp_result); - - tmp_result = polaris10_enable_display_gap(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to enable display gap!", result = tmp_result); - - tmp_result = polaris10_program_voting_clients(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to program voting clients!", result = tmp_result); - - tmp_result = polaris10_process_firmware_header(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to process firmware header!", result = tmp_result); - - tmp_result = polaris10_initial_switch_from_arbf0_to_f1(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to initialize switch from ArbF0 to F1!", - result = tmp_result); - - tmp_result = polaris10_init_smc_table(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to initialize SMC table!", result = tmp_result); - - tmp_result = polaris10_init_arb_table_index(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to initialize ARB table index!", result = tmp_result); - - tmp_result = polaris10_populate_pm_fuses(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to populate PM fuses!", result = tmp_result); - - tmp_result = polaris10_enable_vrhot_gpio_interrupt(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to enable VR hot GPIO interrupt!", result = tmp_result); - - smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_HasDisplay); - - tmp_result = polaris10_enable_sclk_control(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to enable SCLK control!", result = tmp_result); - - tmp_result = polaris10_enable_smc_voltage_controller(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to enable voltage control!", result = tmp_result); - - tmp_result = polaris10_enable_ulv(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to enable ULV!", result = tmp_result); - - tmp_result = polaris10_enable_deep_sleep_master_switch(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to enable deep sleep master switch!", result = tmp_result); - - tmp_result = polaris10_enable_didt_config(hwmgr); - PP_ASSERT_WITH_CODE((tmp_result == 0), - "Failed to enable deep sleep master switch!", result = tmp_result); - - tmp_result = polaris10_start_dpm(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to start DPM!", result = tmp_result); - - tmp_result = polaris10_enable_smc_cac(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to enable SMC CAC!", result = tmp_result); - - tmp_result = polaris10_enable_power_containment(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to enable power containment!", result = tmp_result); - - tmp_result = polaris10_power_control_set_level(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to power control set level!", result = tmp_result); - - tmp_result = polaris10_enable_thermal_auto_throttle(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to enable thermal auto throttle!", result = tmp_result); - - tmp_result = polaris10_pcie_performance_request(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "pcie performance request failed!", result = tmp_result); - - return result; -} - -int polaris10_disable_dpm_tasks(struct pp_hwmgr *hwmgr) -{ - int tmp_result, result = 0; - - tmp_result = (polaris10_is_dpm_running(hwmgr)) ? 0 : -1; - PP_ASSERT_WITH_CODE(tmp_result == 0, - "DPM is not running right now, no need to disable DPM!", - return 0); - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ThermalController)) - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 1); - - tmp_result = polaris10_disable_power_containment(hwmgr); - PP_ASSERT_WITH_CODE((tmp_result == 0), - "Failed to disable power containment!", result = tmp_result); - - tmp_result = polaris10_disable_smc_cac(hwmgr); - PP_ASSERT_WITH_CODE((tmp_result == 0), - "Failed to disable SMC CAC!", result = tmp_result); - - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_SPLL_SPREAD_SPECTRUM, SSEN, 0); - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 0); - - tmp_result = polaris10_disable_thermal_auto_throttle(hwmgr); - PP_ASSERT_WITH_CODE((tmp_result == 0), - "Failed to disable thermal auto throttle!", result = tmp_result); - - tmp_result = polaris10_stop_dpm(hwmgr); - PP_ASSERT_WITH_CODE((tmp_result == 0), - "Failed to stop DPM!", result = tmp_result); - - tmp_result = polaris10_disable_deep_sleep_master_switch(hwmgr); - PP_ASSERT_WITH_CODE((tmp_result == 0), - "Failed to disable deep sleep master switch!", result = tmp_result); - - tmp_result = polaris10_disable_ulv(hwmgr); - PP_ASSERT_WITH_CODE((tmp_result == 0), - "Failed to disable ULV!", result = tmp_result); - - tmp_result = polaris10_clear_voting_clients(hwmgr); - PP_ASSERT_WITH_CODE((tmp_result == 0), - "Failed to clear voting clients!", result = tmp_result); - - tmp_result = polaris10_reset_to_default(hwmgr); - PP_ASSERT_WITH_CODE((tmp_result == 0), - "Failed to reset to default!", result = tmp_result); - - tmp_result = polaris10_force_switch_to_arbf0(hwmgr); - PP_ASSERT_WITH_CODE((tmp_result == 0), - "Failed to force to switch arbf0!", result = tmp_result); - - return result; -} - -int polaris10_reset_asic_tasks(struct pp_hwmgr *hwmgr) -{ - - return 0; -} - -static int polaris10_hwmgr_backend_fini(struct pp_hwmgr *hwmgr) -{ - return phm_hwmgr_backend_fini(hwmgr); -} - -static int polaris10_set_features_platform_caps(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_DynamicPatchPowerState); - - if (data->mvdd_control == POLARIS10_VOLTAGE_CONTROL_NONE) - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_EnableMVDDControl); - - if (data->vddci_control == POLARIS10_VOLTAGE_CONTROL_NONE) - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ControlVDDCI); - - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_TablelessHardwareInterface); - - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_EnableSMU7ThermalManagement); - - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_DynamicPowerManagement); - - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_UnTabledHardwareInterface); - - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_TablelessHardwareInterface); - - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_SMC); - - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_NonABMSupportInPPLib); - - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_DynamicUVDState); - - /* power tune caps Assume disabled */ - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_SQRamping); - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_DBRamping); - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_TDRamping); - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_TCPRamping); - - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_CAC); - - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_RegulatorHot); - - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_AutomaticDCTransition); - - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ODFuzzyFanControlSupport); - - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_FanSpeedInTableIsRPM); - - if (hwmgr->chip_id == CHIP_POLARIS11) - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_SPLLShutdownSupport); - return 0; -} - -static void polaris10_init_dpm_defaults(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - - polaris10_initialize_power_tune_defaults(hwmgr); - - data->pcie_gen_performance.max = PP_PCIEGen1; - data->pcie_gen_performance.min = PP_PCIEGen3; - data->pcie_gen_power_saving.max = PP_PCIEGen1; - data->pcie_gen_power_saving.min = PP_PCIEGen3; - data->pcie_lane_performance.max = 0; - data->pcie_lane_performance.min = 16; - data->pcie_lane_power_saving.max = 0; - data->pcie_lane_power_saving.min = 16; -} - -/** -* Get Leakage VDDC based on leakage ID. -* -* @param hwmgr the address of the powerplay hardware manager. -* @return always 0 -*/ -static int polaris10_get_evv_voltages(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - uint16_t vv_id; - uint32_t vddc = 0; - uint16_t i, j; - uint32_t sclk = 0; - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)hwmgr->pptable; - struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = - table_info->vdd_dep_on_sclk; - int result; - - for (i = 0; i < POLARIS10_MAX_LEAKAGE_COUNT; i++) { - vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i; - if (!phm_get_sclk_for_voltage_evv(hwmgr, - table_info->vddc_lookup_table, vv_id, &sclk)) { - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ClockStretcher)) { - for (j = 1; j < sclk_table->count; j++) { - if (sclk_table->entries[j].clk == sclk && - sclk_table->entries[j].cks_enable == 0) { - sclk += 5000; - break; - } - } - } - - if (atomctrl_get_voltage_evv_on_sclk_ai(hwmgr, - VOLTAGE_TYPE_VDDC, - sclk, vv_id, &vddc) != 0) { - printk(KERN_WARNING "failed to retrieving EVV voltage!\n"); - continue; - } - - /* need to make sure vddc is less than 2V or else, it could burn the ASIC. - * real voltage level in unit of 0.01mV */ - PP_ASSERT_WITH_CODE((vddc < 200000 && vddc != 0), - "Invalid VDDC value", result = -EINVAL;); - - /* the voltage should not be zero nor equal to leakage ID */ - if (vddc != 0 && vddc != vv_id) { - data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = (uint16_t)(vddc/100); - data->vddc_leakage.leakage_id[data->vddc_leakage.count] = vv_id; - data->vddc_leakage.count++; - } - } - } - - return 0; -} - -/** - * Change virtual leakage voltage to actual value. - * - * @param hwmgr the address of the powerplay hardware manager. - * @param pointer to changing voltage - * @param pointer to leakage table - */ -static void polaris10_patch_with_vdd_leakage(struct pp_hwmgr *hwmgr, - uint16_t *voltage, struct polaris10_leakage_voltage *leakage_table) -{ - uint32_t index; - - /* search for leakage voltage ID 0xff01 ~ 0xff08 */ - for (index = 0; index < leakage_table->count; index++) { - /* if this voltage matches a leakage voltage ID */ - /* patch with actual leakage voltage */ - if (leakage_table->leakage_id[index] == *voltage) { - *voltage = leakage_table->actual_voltage[index]; - break; - } - } - - if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0) - printk(KERN_ERR "Voltage value looks like a Leakage ID but it's not patched \n"); -} - -/** -* Patch voltage lookup table by EVV leakages. -* -* @param hwmgr the address of the powerplay hardware manager. -* @param pointer to voltage lookup table -* @param pointer to leakage table -* @return always 0 -*/ -static int polaris10_patch_lookup_table_with_leakage(struct pp_hwmgr *hwmgr, - phm_ppt_v1_voltage_lookup_table *lookup_table, - struct polaris10_leakage_voltage *leakage_table) -{ - uint32_t i; - - for (i = 0; i < lookup_table->count; i++) - polaris10_patch_with_vdd_leakage(hwmgr, - &lookup_table->entries[i].us_vdd, leakage_table); - - return 0; -} - -static int polaris10_patch_clock_voltage_limits_with_vddc_leakage( - struct pp_hwmgr *hwmgr, struct polaris10_leakage_voltage *leakage_table, - uint16_t *vddc) -{ - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - polaris10_patch_with_vdd_leakage(hwmgr, (uint16_t *)vddc, leakage_table); - hwmgr->dyn_state.max_clock_voltage_on_dc.vddc = - table_info->max_clock_voltage_on_dc.vddc; - return 0; -} - -static int polaris10_patch_voltage_dependency_tables_with_lookup_table( - struct pp_hwmgr *hwmgr) -{ - uint8_t entryId; - uint8_t voltageId; - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - - struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = - table_info->vdd_dep_on_sclk; - struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table = - table_info->vdd_dep_on_mclk; - struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = - table_info->mm_dep_table; - - for (entryId = 0; entryId < sclk_table->count; ++entryId) { - voltageId = sclk_table->entries[entryId].vddInd; - sclk_table->entries[entryId].vddc = - table_info->vddc_lookup_table->entries[voltageId].us_vdd; - } - - for (entryId = 0; entryId < mclk_table->count; ++entryId) { - voltageId = mclk_table->entries[entryId].vddInd; - mclk_table->entries[entryId].vddc = - table_info->vddc_lookup_table->entries[voltageId].us_vdd; - } - - for (entryId = 0; entryId < mm_table->count; ++entryId) { - voltageId = mm_table->entries[entryId].vddcInd; - mm_table->entries[entryId].vddc = - table_info->vddc_lookup_table->entries[voltageId].us_vdd; - } - - return 0; - -} - -static int polaris10_calc_voltage_dependency_tables(struct pp_hwmgr *hwmgr) -{ - /* Need to determine if we need calculated voltage. */ - return 0; -} - -static int polaris10_calc_mm_voltage_dependency_table(struct pp_hwmgr *hwmgr) -{ - /* Need to determine if we need calculated voltage from mm table. */ - return 0; -} - -static int polaris10_sort_lookup_table(struct pp_hwmgr *hwmgr, - struct phm_ppt_v1_voltage_lookup_table *lookup_table) -{ - uint32_t table_size, i, j; - struct phm_ppt_v1_voltage_lookup_record tmp_voltage_lookup_record; - table_size = lookup_table->count; - - PP_ASSERT_WITH_CODE(0 != lookup_table->count, - "Lookup table is empty", return -EINVAL); - - /* Sorting voltages */ - for (i = 0; i < table_size - 1; i++) { - for (j = i + 1; j > 0; j--) { - if (lookup_table->entries[j].us_vdd < - lookup_table->entries[j - 1].us_vdd) { - tmp_voltage_lookup_record = lookup_table->entries[j - 1]; - lookup_table->entries[j - 1] = lookup_table->entries[j]; - lookup_table->entries[j] = tmp_voltage_lookup_record; - } - } - } - - return 0; -} - -static int polaris10_complete_dependency_tables(struct pp_hwmgr *hwmgr) -{ - int result = 0; - int tmp_result; - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - - tmp_result = polaris10_patch_lookup_table_with_leakage(hwmgr, - table_info->vddc_lookup_table, &(data->vddc_leakage)); - if (tmp_result) - result = tmp_result; - - tmp_result = polaris10_patch_clock_voltage_limits_with_vddc_leakage(hwmgr, - &(data->vddc_leakage), &table_info->max_clock_voltage_on_dc.vddc); - if (tmp_result) - result = tmp_result; - - tmp_result = polaris10_patch_voltage_dependency_tables_with_lookup_table(hwmgr); - if (tmp_result) - result = tmp_result; - - tmp_result = polaris10_calc_voltage_dependency_tables(hwmgr); - if (tmp_result) - result = tmp_result; - - tmp_result = polaris10_calc_mm_voltage_dependency_table(hwmgr); - if (tmp_result) - result = tmp_result; - - tmp_result = polaris10_sort_lookup_table(hwmgr, table_info->vddc_lookup_table); - if (tmp_result) - result = tmp_result; - - return result; -} - -static int polaris10_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr) -{ - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - - struct phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table = - table_info->vdd_dep_on_sclk; - struct phm_ppt_v1_clock_voltage_dependency_table *allowed_mclk_vdd_table = - table_info->vdd_dep_on_mclk; - - PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table != NULL, - "VDD dependency on SCLK table is missing. \ - This table is mandatory", return -EINVAL); - PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1, - "VDD dependency on SCLK table has to have is missing. \ - This table is mandatory", return -EINVAL); - - PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table != NULL, - "VDD dependency on MCLK table is missing. \ - This table is mandatory", return -EINVAL); - PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1, - "VDD dependency on MCLK table has to have is missing. \ - This table is mandatory", return -EINVAL); - - table_info->max_clock_voltage_on_ac.sclk = - allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].clk; - table_info->max_clock_voltage_on_ac.mclk = - allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].clk; - table_info->max_clock_voltage_on_ac.vddc = - allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc; - table_info->max_clock_voltage_on_ac.vddci = - allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].vddci; - - hwmgr->dyn_state.max_clock_voltage_on_ac.sclk = table_info->max_clock_voltage_on_ac.sclk; - hwmgr->dyn_state.max_clock_voltage_on_ac.mclk = table_info->max_clock_voltage_on_ac.mclk; - hwmgr->dyn_state.max_clock_voltage_on_ac.vddc = table_info->max_clock_voltage_on_ac.vddc; - hwmgr->dyn_state.max_clock_voltage_on_ac.vddci =table_info->max_clock_voltage_on_ac.vddci; - - return 0; -} - -static int polaris10_patch_voltage_workaround(struct pp_hwmgr *hwmgr) -{ - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table = - table_info->vdd_dep_on_mclk; - struct phm_ppt_v1_voltage_lookup_table *lookup_table = - table_info->vddc_lookup_table; - uint32_t i; - uint32_t hw_revision, sub_vendor_id, sub_sys_id; - struct cgs_system_info sys_info = {0}; - - sys_info.size = sizeof(struct cgs_system_info); - - sys_info.info_id = CGS_SYSTEM_INFO_PCIE_REV; - cgs_query_system_info(hwmgr->device, &sys_info); - hw_revision = (uint32_t)sys_info.value; - - sys_info.info_id = CGS_SYSTEM_INFO_PCIE_SUB_SYS_ID; - cgs_query_system_info(hwmgr->device, &sys_info); - sub_sys_id = (uint32_t)sys_info.value; - - sys_info.info_id = CGS_SYSTEM_INFO_PCIE_SUB_SYS_VENDOR_ID; - cgs_query_system_info(hwmgr->device, &sys_info); - sub_vendor_id = (uint32_t)sys_info.value; - - if (hwmgr->chip_id == CHIP_POLARIS10 && hw_revision == 0xC7 && - ((sub_sys_id == 0xb37 && sub_vendor_id == 0x1002) || - (sub_sys_id == 0x4a8 && sub_vendor_id == 0x1043) || - (sub_sys_id == 0x9480 && sub_vendor_id == 0x1682))) { - if (lookup_table->entries[dep_mclk_table->entries[dep_mclk_table->count-1].vddInd].us_vdd >= 1000) - return 0; - - for (i = 0; i < lookup_table->count; i++) { - if (lookup_table->entries[i].us_vdd < 0xff01 && lookup_table->entries[i].us_vdd >= 1000) { - dep_mclk_table->entries[dep_mclk_table->count-1].vddInd = (uint8_t) i; - return 0; - } - } - } - return 0; -} - - -static int polaris10_hwmgr_backend_init(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data; - struct pp_atomctrl_gpio_pin_assignment gpio_pin_assignment; - uint32_t temp_reg; - int result; - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - - data = kzalloc(sizeof(struct polaris10_hwmgr), GFP_KERNEL); - if (data == NULL) - return -ENOMEM; - - hwmgr->backend = data; - - data->dll_default_on = false; - data->sram_end = SMC_RAM_END; - data->mclk_dpm0_activity_target = 0xa; - data->disable_dpm_mask = 0xFF; - data->static_screen_threshold = PPPOLARIS10_STATICSCREENTHRESHOLD_DFLT; - data->static_screen_threshold_unit = PPPOLARIS10_STATICSCREENTHRESHOLD_DFLT; - data->activity_target[0] = PPPOLARIS10_TARGETACTIVITY_DFLT; - data->activity_target[1] = PPPOLARIS10_TARGETACTIVITY_DFLT; - data->activity_target[2] = PPPOLARIS10_TARGETACTIVITY_DFLT; - data->activity_target[3] = PPPOLARIS10_TARGETACTIVITY_DFLT; - data->activity_target[4] = PPPOLARIS10_TARGETACTIVITY_DFLT; - data->activity_target[5] = PPPOLARIS10_TARGETACTIVITY_DFLT; - data->activity_target[6] = PPPOLARIS10_TARGETACTIVITY_DFLT; - data->activity_target[7] = PPPOLARIS10_TARGETACTIVITY_DFLT; - - data->voting_rights_clients0 = PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT0; - data->voting_rights_clients1 = PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT1; - data->voting_rights_clients2 = PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT2; - data->voting_rights_clients3 = PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT3; - data->voting_rights_clients4 = PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT4; - data->voting_rights_clients5 = PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT5; - data->voting_rights_clients6 = PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT6; - data->voting_rights_clients7 = PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT7; - - data->vddc_vddci_delta = VDDC_VDDCI_DELTA; - - data->mclk_activity_target = PPPOLARIS10_MCLK_TARGETACTIVITY_DFLT; - - /* need to set voltage control types before EVV patching */ - data->voltage_control = POLARIS10_VOLTAGE_CONTROL_NONE; - data->vddci_control = POLARIS10_VOLTAGE_CONTROL_NONE; - data->mvdd_control = POLARIS10_VOLTAGE_CONTROL_NONE; - - data->enable_tdc_limit_feature = true; - data->enable_pkg_pwr_tracking_feature = true; - data->force_pcie_gen = PP_PCIEGenInvalid; - data->mclk_stutter_mode_threshold = 40000; - - if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, - VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) - data->voltage_control = POLARIS10_VOLTAGE_CONTROL_BY_SVID2; - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_EnableMVDDControl)) { - if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, - VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT)) - data->mvdd_control = POLARIS10_VOLTAGE_CONTROL_BY_GPIO; - else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, - VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2)) - data->mvdd_control = POLARIS10_VOLTAGE_CONTROL_BY_SVID2; - } - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ControlVDDCI)) { - if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, - VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT)) - data->vddci_control = POLARIS10_VOLTAGE_CONTROL_BY_GPIO; - else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, - VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2)) - data->vddci_control = POLARIS10_VOLTAGE_CONTROL_BY_SVID2; - } - - if (table_info->cac_dtp_table->usClockStretchAmount != 0) - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ClockStretcher); - - polaris10_set_features_platform_caps(hwmgr); - - polaris10_patch_voltage_workaround(hwmgr); - polaris10_init_dpm_defaults(hwmgr); - - /* Get leakage voltage based on leakage ID. */ - result = polaris10_get_evv_voltages(hwmgr); - - if (result) { - printk("Get EVV Voltage Failed. Abort Driver loading!\n"); - return -1; - } - - polaris10_complete_dependency_tables(hwmgr); - polaris10_set_private_data_based_on_pptable(hwmgr); - - /* Initalize Dynamic State Adjustment Rule Settings */ - result = phm_initializa_dynamic_state_adjustment_rule_settings(hwmgr); - - if (0 == result) { - struct cgs_system_info sys_info = {0}; - - data->is_tlu_enabled = false; - - hwmgr->platform_descriptor.hardwareActivityPerformanceLevels = - POLARIS10_MAX_HARDWARE_POWERLEVELS; - hwmgr->platform_descriptor.hardwarePerformanceLevels = 2; - hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50; - - - if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_PCC_GPIO_PINID, &gpio_pin_assignment)) { - temp_reg = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL); - switch (gpio_pin_assignment.uc_gpio_pin_bit_shift) { - case 0: - temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x1); - break; - case 1: - temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x2); - break; - case 2: - temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW, 0x1); - break; - case 3: - temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, FORCE_NB_PS1, 0x1); - break; - case 4: - temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, DPM_ENABLED, 0x1); - break; - default: - PP_ASSERT_WITH_CODE(0, - "Failed to setup PCC HW register! Wrong GPIO assigned for VDDC_PCC_GPIO_PINID!", - ); - break; - } - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL, temp_reg); - } - - if (table_info->cac_dtp_table->usDefaultTargetOperatingTemp != 0 && - hwmgr->thermal_controller.advanceFanControlParameters.ucFanControlMode) { - hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMinLimit = - (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit; - - hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMaxLimit = - (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM; - - hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMStep = 1; - - hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMaxLimit = 100; - - hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMinLimit = - (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit; - - hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMStep = 1; - - table_info->cac_dtp_table->usDefaultTargetOperatingTemp = (table_info->cac_dtp_table->usDefaultTargetOperatingTemp >= 50) ? - (table_info->cac_dtp_table->usDefaultTargetOperatingTemp -50) : 0; - - table_info->cac_dtp_table->usOperatingTempMaxLimit = table_info->cac_dtp_table->usDefaultTargetOperatingTemp; - table_info->cac_dtp_table->usOperatingTempStep = 1; - table_info->cac_dtp_table->usOperatingTempHyst = 1; - - hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanPWM = - hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM; - - hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM = - hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanRPM; - - hwmgr->dyn_state.cac_dtp_table->usOperatingTempMinLimit = - table_info->cac_dtp_table->usOperatingTempMinLimit; - - hwmgr->dyn_state.cac_dtp_table->usOperatingTempMaxLimit = - table_info->cac_dtp_table->usOperatingTempMaxLimit; - - hwmgr->dyn_state.cac_dtp_table->usDefaultTargetOperatingTemp = - table_info->cac_dtp_table->usDefaultTargetOperatingTemp; - - hwmgr->dyn_state.cac_dtp_table->usOperatingTempStep = - table_info->cac_dtp_table->usOperatingTempStep; - - hwmgr->dyn_state.cac_dtp_table->usTargetOperatingTemp = - table_info->cac_dtp_table->usTargetOperatingTemp; - } - - sys_info.size = sizeof(struct cgs_system_info); - sys_info.info_id = CGS_SYSTEM_INFO_PCIE_GEN_INFO; - result = cgs_query_system_info(hwmgr->device, &sys_info); - if (result) - data->pcie_gen_cap = AMDGPU_DEFAULT_PCIE_GEN_MASK; - else - data->pcie_gen_cap = (uint32_t)sys_info.value; - if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) - data->pcie_spc_cap = 20; - sys_info.size = sizeof(struct cgs_system_info); - sys_info.info_id = CGS_SYSTEM_INFO_PCIE_MLW; - result = cgs_query_system_info(hwmgr->device, &sys_info); - if (result) - data->pcie_lane_cap = AMDGPU_DEFAULT_PCIE_MLW_MASK; - else - data->pcie_lane_cap = (uint32_t)sys_info.value; - - hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */ -/* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */ - hwmgr->platform_descriptor.clockStep.engineClock = 500; - hwmgr->platform_descriptor.clockStep.memoryClock = 500; - } else { - /* Ignore return value in here, we are cleaning up a mess. */ - polaris10_hwmgr_backend_fini(hwmgr); - } - - return 0; -} - -static int polaris10_force_dpm_highest(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - uint32_t level, tmp; - - if (!data->pcie_dpm_key_disabled) { - if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) { - level = 0; - tmp = data->dpm_level_enable_mask.pcie_dpm_enable_mask; - while (tmp >>= 1) - level++; - - if (level) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_PCIeDPM_ForceLevel, level); - } - } - - if (!data->sclk_dpm_key_disabled) { - if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) { - level = 0; - tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask; - while (tmp >>= 1) - level++; - - if (level) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_SCLKDPM_SetEnabledMask, - (1 << level)); - } - } - - if (!data->mclk_dpm_key_disabled) { - if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) { - level = 0; - tmp = data->dpm_level_enable_mask.mclk_dpm_enable_mask; - while (tmp >>= 1) - level++; - - if (level) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_MCLKDPM_SetEnabledMask, - (1 << level)); - } - } - - return 0; -} - -static int polaris10_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - - phm_apply_dal_min_voltage_request(hwmgr); - - if (!data->sclk_dpm_key_disabled) { - if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_SCLKDPM_SetEnabledMask, - data->dpm_level_enable_mask.sclk_dpm_enable_mask); - } - - if (!data->mclk_dpm_key_disabled) { - if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_MCLKDPM_SetEnabledMask, - data->dpm_level_enable_mask.mclk_dpm_enable_mask); - } - - return 0; -} - -static int polaris10_unforce_dpm_levels(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - - if (!polaris10_is_dpm_running(hwmgr)) - return -EINVAL; - - if (!data->pcie_dpm_key_disabled) { - smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_PCIeDPM_UnForceLevel); - } - - return polaris10_upload_dpm_level_enable_mask(hwmgr); -} - -static int polaris10_force_dpm_lowest(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = - (struct polaris10_hwmgr *)(hwmgr->backend); - uint32_t level; - - if (!data->sclk_dpm_key_disabled) - if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) { - level = phm_get_lowest_enabled_level(hwmgr, - data->dpm_level_enable_mask.sclk_dpm_enable_mask); - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_SCLKDPM_SetEnabledMask, - (1 << level)); - - } - - if (!data->mclk_dpm_key_disabled) { - if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) { - level = phm_get_lowest_enabled_level(hwmgr, - data->dpm_level_enable_mask.mclk_dpm_enable_mask); - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_MCLKDPM_SetEnabledMask, - (1 << level)); - } - } - - if (!data->pcie_dpm_key_disabled) { - if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) { - level = phm_get_lowest_enabled_level(hwmgr, - data->dpm_level_enable_mask.pcie_dpm_enable_mask); - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_PCIeDPM_ForceLevel, - (level)); - } - } - - return 0; - -} -static int polaris10_force_dpm_level(struct pp_hwmgr *hwmgr, - enum amd_dpm_forced_level level) -{ - int ret = 0; - - switch (level) { - case AMD_DPM_FORCED_LEVEL_HIGH: - ret = polaris10_force_dpm_highest(hwmgr); - if (ret) - return ret; - break; - case AMD_DPM_FORCED_LEVEL_LOW: - ret = polaris10_force_dpm_lowest(hwmgr); - if (ret) - return ret; - break; - case AMD_DPM_FORCED_LEVEL_AUTO: - ret = polaris10_unforce_dpm_levels(hwmgr); - if (ret) - return ret; - break; - default: - break; - } - - hwmgr->dpm_level = level; - - return ret; -} - -static int polaris10_get_power_state_size(struct pp_hwmgr *hwmgr) -{ - return sizeof(struct polaris10_power_state); -} - - -static int polaris10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, - struct pp_power_state *request_ps, - const struct pp_power_state *current_ps) -{ - - struct polaris10_power_state *polaris10_ps = - cast_phw_polaris10_power_state(&request_ps->hardware); - uint32_t sclk; - uint32_t mclk; - struct PP_Clocks minimum_clocks = {0}; - bool disable_mclk_switching; - bool disable_mclk_switching_for_frame_lock; - struct cgs_display_info info = {0}; - const struct phm_clock_and_voltage_limits *max_limits; - uint32_t i; - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - int32_t count; - int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0; - - data->battery_state = (PP_StateUILabel_Battery == - request_ps->classification.ui_label); - - PP_ASSERT_WITH_CODE(polaris10_ps->performance_level_count == 2, - "VI should always have 2 performance levels", - ); - - max_limits = (PP_PowerSource_AC == hwmgr->power_source) ? - &(hwmgr->dyn_state.max_clock_voltage_on_ac) : - &(hwmgr->dyn_state.max_clock_voltage_on_dc); - - /* Cap clock DPM tables at DC MAX if it is in DC. */ - if (PP_PowerSource_DC == hwmgr->power_source) { - for (i = 0; i < polaris10_ps->performance_level_count; i++) { - if (polaris10_ps->performance_levels[i].memory_clock > max_limits->mclk) - polaris10_ps->performance_levels[i].memory_clock = max_limits->mclk; - if (polaris10_ps->performance_levels[i].engine_clock > max_limits->sclk) - polaris10_ps->performance_levels[i].engine_clock = max_limits->sclk; - } - } - - polaris10_ps->vce_clks.evclk = hwmgr->vce_arbiter.evclk; - polaris10_ps->vce_clks.ecclk = hwmgr->vce_arbiter.ecclk; - - cgs_get_active_displays_info(hwmgr->device, &info); - - /*TO DO result = PHM_CheckVBlankTime(hwmgr, &vblankTooShort);*/ - - /* TO DO GetMinClockSettings(hwmgr->pPECI, &minimum_clocks); */ - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_StablePState)) { - max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac); - stable_pstate_sclk = (max_limits->sclk * 75) / 100; - - for (count = table_info->vdd_dep_on_sclk->count - 1; - count >= 0; count--) { - if (stable_pstate_sclk >= - table_info->vdd_dep_on_sclk->entries[count].clk) { - stable_pstate_sclk = - table_info->vdd_dep_on_sclk->entries[count].clk; - break; - } - } - - if (count < 0) - stable_pstate_sclk = table_info->vdd_dep_on_sclk->entries[0].clk; - - stable_pstate_mclk = max_limits->mclk; - - minimum_clocks.engineClock = stable_pstate_sclk; - minimum_clocks.memoryClock = stable_pstate_mclk; - } - - if (minimum_clocks.engineClock < hwmgr->gfx_arbiter.sclk) - minimum_clocks.engineClock = hwmgr->gfx_arbiter.sclk; - - if (minimum_clocks.memoryClock < hwmgr->gfx_arbiter.mclk) - minimum_clocks.memoryClock = hwmgr->gfx_arbiter.mclk; - - polaris10_ps->sclk_threshold = hwmgr->gfx_arbiter.sclk_threshold; - - if (0 != hwmgr->gfx_arbiter.sclk_over_drive) { - PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.sclk_over_drive <= - hwmgr->platform_descriptor.overdriveLimit.engineClock), - "Overdrive sclk exceeds limit", - hwmgr->gfx_arbiter.sclk_over_drive = - hwmgr->platform_descriptor.overdriveLimit.engineClock); - - if (hwmgr->gfx_arbiter.sclk_over_drive >= hwmgr->gfx_arbiter.sclk) - polaris10_ps->performance_levels[1].engine_clock = - hwmgr->gfx_arbiter.sclk_over_drive; - } - - if (0 != hwmgr->gfx_arbiter.mclk_over_drive) { - PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.mclk_over_drive <= - hwmgr->platform_descriptor.overdriveLimit.memoryClock), - "Overdrive mclk exceeds limit", - hwmgr->gfx_arbiter.mclk_over_drive = - hwmgr->platform_descriptor.overdriveLimit.memoryClock); - - if (hwmgr->gfx_arbiter.mclk_over_drive >= hwmgr->gfx_arbiter.mclk) - polaris10_ps->performance_levels[1].memory_clock = - hwmgr->gfx_arbiter.mclk_over_drive; - } - - disable_mclk_switching_for_frame_lock = phm_cap_enabled( - hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_DisableMclkSwitchingForFrameLock); - - - disable_mclk_switching = (1 < info.display_count) || - disable_mclk_switching_for_frame_lock; - - sclk = polaris10_ps->performance_levels[0].engine_clock; - mclk = polaris10_ps->performance_levels[0].memory_clock; - - if (disable_mclk_switching) - mclk = polaris10_ps->performance_levels - [polaris10_ps->performance_level_count - 1].memory_clock; - - if (sclk < minimum_clocks.engineClock) - sclk = (minimum_clocks.engineClock > max_limits->sclk) ? - max_limits->sclk : minimum_clocks.engineClock; - - if (mclk < minimum_clocks.memoryClock) - mclk = (minimum_clocks.memoryClock > max_limits->mclk) ? - max_limits->mclk : minimum_clocks.memoryClock; - - polaris10_ps->performance_levels[0].engine_clock = sclk; - polaris10_ps->performance_levels[0].memory_clock = mclk; - - polaris10_ps->performance_levels[1].engine_clock = - (polaris10_ps->performance_levels[1].engine_clock >= - polaris10_ps->performance_levels[0].engine_clock) ? - polaris10_ps->performance_levels[1].engine_clock : - polaris10_ps->performance_levels[0].engine_clock; - - if (disable_mclk_switching) { - if (mclk < polaris10_ps->performance_levels[1].memory_clock) - mclk = polaris10_ps->performance_levels[1].memory_clock; - - polaris10_ps->performance_levels[0].memory_clock = mclk; - polaris10_ps->performance_levels[1].memory_clock = mclk; - } else { - if (polaris10_ps->performance_levels[1].memory_clock < - polaris10_ps->performance_levels[0].memory_clock) - polaris10_ps->performance_levels[1].memory_clock = - polaris10_ps->performance_levels[0].memory_clock; - } - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_StablePState)) { - for (i = 0; i < polaris10_ps->performance_level_count; i++) { - polaris10_ps->performance_levels[i].engine_clock = stable_pstate_sclk; - polaris10_ps->performance_levels[i].memory_clock = stable_pstate_mclk; - polaris10_ps->performance_levels[i].pcie_gen = data->pcie_gen_performance.max; - polaris10_ps->performance_levels[i].pcie_lane = data->pcie_gen_performance.max; - } - } - return 0; -} - - -static int polaris10_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low) -{ - struct pp_power_state *ps; - struct polaris10_power_state *polaris10_ps; - - if (hwmgr == NULL) - return -EINVAL; - - ps = hwmgr->request_ps; - - if (ps == NULL) - return -EINVAL; - - polaris10_ps = cast_phw_polaris10_power_state(&ps->hardware); - - if (low) - return polaris10_ps->performance_levels[0].memory_clock; - else - return polaris10_ps->performance_levels - [polaris10_ps->performance_level_count-1].memory_clock; -} - -static int polaris10_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low) -{ - struct pp_power_state *ps; - struct polaris10_power_state *polaris10_ps; - - if (hwmgr == NULL) - return -EINVAL; - - ps = hwmgr->request_ps; - - if (ps == NULL) - return -EINVAL; - - polaris10_ps = cast_phw_polaris10_power_state(&ps->hardware); - - if (low) - return polaris10_ps->performance_levels[0].engine_clock; - else - return polaris10_ps->performance_levels - [polaris10_ps->performance_level_count-1].engine_clock; -} - -static int polaris10_dpm_patch_boot_state(struct pp_hwmgr *hwmgr, - struct pp_hw_power_state *hw_ps) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - struct polaris10_power_state *ps = (struct polaris10_power_state *)hw_ps; - ATOM_FIRMWARE_INFO_V2_2 *fw_info; - uint16_t size; - uint8_t frev, crev; - int index = GetIndexIntoMasterTable(DATA, FirmwareInfo); - - /* First retrieve the Boot clocks and VDDC from the firmware info table. - * We assume here that fw_info is unchanged if this call fails. - */ - fw_info = (ATOM_FIRMWARE_INFO_V2_2 *)cgs_atom_get_data_table( - hwmgr->device, index, - &size, &frev, &crev); - if (!fw_info) - /* During a test, there is no firmware info table. */ - return 0; - - /* Patch the state. */ - data->vbios_boot_state.sclk_bootup_value = - le32_to_cpu(fw_info->ulDefaultEngineClock); - data->vbios_boot_state.mclk_bootup_value = - le32_to_cpu(fw_info->ulDefaultMemoryClock); - data->vbios_boot_state.mvdd_bootup_value = - le16_to_cpu(fw_info->usBootUpMVDDCVoltage); - data->vbios_boot_state.vddc_bootup_value = - le16_to_cpu(fw_info->usBootUpVDDCVoltage); - data->vbios_boot_state.vddci_bootup_value = - le16_to_cpu(fw_info->usBootUpVDDCIVoltage); - data->vbios_boot_state.pcie_gen_bootup_value = - phm_get_current_pcie_speed(hwmgr); - - data->vbios_boot_state.pcie_lane_bootup_value = - (uint16_t)phm_get_current_pcie_lane_number(hwmgr); - - /* set boot power state */ - ps->performance_levels[0].memory_clock = data->vbios_boot_state.mclk_bootup_value; - ps->performance_levels[0].engine_clock = data->vbios_boot_state.sclk_bootup_value; - ps->performance_levels[0].pcie_gen = data->vbios_boot_state.pcie_gen_bootup_value; - ps->performance_levels[0].pcie_lane = data->vbios_boot_state.pcie_lane_bootup_value; - - return 0; -} - -static int polaris10_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr, - void *state, struct pp_power_state *power_state, - void *pp_table, uint32_t classification_flag) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - struct polaris10_power_state *polaris10_power_state = - (struct polaris10_power_state *)(&(power_state->hardware)); - struct polaris10_performance_level *performance_level; - ATOM_Tonga_State *state_entry = (ATOM_Tonga_State *)state; - ATOM_Tonga_POWERPLAYTABLE *powerplay_table = - (ATOM_Tonga_POWERPLAYTABLE *)pp_table; - PPTable_Generic_SubTable_Header *sclk_dep_table = - (PPTable_Generic_SubTable_Header *) - (((unsigned long)powerplay_table) + - le16_to_cpu(powerplay_table->usSclkDependencyTableOffset)); - - ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table = - (ATOM_Tonga_MCLK_Dependency_Table *) - (((unsigned long)powerplay_table) + - le16_to_cpu(powerplay_table->usMclkDependencyTableOffset)); - - /* The following fields are not initialized here: id orderedList allStatesList */ - power_state->classification.ui_label = - (le16_to_cpu(state_entry->usClassification) & - ATOM_PPLIB_CLASSIFICATION_UI_MASK) >> - ATOM_PPLIB_CLASSIFICATION_UI_SHIFT; - power_state->classification.flags = classification_flag; - /* NOTE: There is a classification2 flag in BIOS that is not being used right now */ - - power_state->classification.temporary_state = false; - power_state->classification.to_be_deleted = false; - - power_state->validation.disallowOnDC = - (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) & - ATOM_Tonga_DISALLOW_ON_DC)); - - power_state->pcie.lanes = 0; - - power_state->display.disableFrameModulation = false; - power_state->display.limitRefreshrate = false; - power_state->display.enableVariBright = - (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) & - ATOM_Tonga_ENABLE_VARIBRIGHT)); - - power_state->validation.supportedPowerLevels = 0; - power_state->uvd_clocks.VCLK = 0; - power_state->uvd_clocks.DCLK = 0; - power_state->temperatures.min = 0; - power_state->temperatures.max = 0; - - performance_level = &(polaris10_power_state->performance_levels - [polaris10_power_state->performance_level_count++]); - - PP_ASSERT_WITH_CODE( - (polaris10_power_state->performance_level_count < SMU74_MAX_LEVELS_GRAPHICS), - "Performance levels exceeds SMC limit!", - return -1); - - PP_ASSERT_WITH_CODE( - (polaris10_power_state->performance_level_count <= - hwmgr->platform_descriptor.hardwareActivityPerformanceLevels), - "Performance levels exceeds Driver limit!", - return -1); - - /* Performance levels are arranged from low to high. */ - performance_level->memory_clock = mclk_dep_table->entries - [state_entry->ucMemoryClockIndexLow].ulMclk; - if (sclk_dep_table->ucRevId == 0) - performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries - [state_entry->ucEngineClockIndexLow].ulSclk; - else if (sclk_dep_table->ucRevId == 1) - performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries - [state_entry->ucEngineClockIndexLow].ulSclk; - performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap, - state_entry->ucPCIEGenLow); - performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap, - state_entry->ucPCIELaneHigh); - - performance_level = &(polaris10_power_state->performance_levels - [polaris10_power_state->performance_level_count++]); - performance_level->memory_clock = mclk_dep_table->entries - [state_entry->ucMemoryClockIndexHigh].ulMclk; - - if (sclk_dep_table->ucRevId == 0) - performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries - [state_entry->ucEngineClockIndexHigh].ulSclk; - else if (sclk_dep_table->ucRevId == 1) - performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries - [state_entry->ucEngineClockIndexHigh].ulSclk; - - performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap, - state_entry->ucPCIEGenHigh); - performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap, - state_entry->ucPCIELaneHigh); - - return 0; -} - -static int polaris10_get_pp_table_entry(struct pp_hwmgr *hwmgr, - unsigned long entry_index, struct pp_power_state *state) -{ - int result; - struct polaris10_power_state *ps; - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table = - table_info->vdd_dep_on_mclk; - - state->hardware.magic = PHM_VIslands_Magic; - - ps = (struct polaris10_power_state *)(&state->hardware); - - result = get_powerplay_table_entry_v1_0(hwmgr, entry_index, state, - polaris10_get_pp_table_entry_callback_func); - - /* This is the earliest time we have all the dependency table and the VBIOS boot state - * as PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot state - * if there is only one VDDCI/MCLK level, check if it's the same as VBIOS boot state - */ - if (dep_mclk_table != NULL && dep_mclk_table->count == 1) { - if (dep_mclk_table->entries[0].clk != - data->vbios_boot_state.mclk_bootup_value) - printk(KERN_ERR "Single MCLK entry VDDCI/MCLK dependency table " - "does not match VBIOS boot MCLK level"); - if (dep_mclk_table->entries[0].vddci != - data->vbios_boot_state.vddci_bootup_value) - printk(KERN_ERR "Single VDDCI entry VDDCI/MCLK dependency table " - "does not match VBIOS boot VDDCI level"); - } - - /* set DC compatible flag if this state supports DC */ - if (!state->validation.disallowOnDC) - ps->dc_compatible = true; - - if (state->classification.flags & PP_StateClassificationFlag_ACPI) - data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen; - - ps->uvd_clks.vclk = state->uvd_clocks.VCLK; - ps->uvd_clks.dclk = state->uvd_clocks.DCLK; - - if (!result) { - uint32_t i; - - switch (state->classification.ui_label) { - case PP_StateUILabel_Performance: - data->use_pcie_performance_levels = true; - for (i = 0; i < ps->performance_level_count; i++) { - if (data->pcie_gen_performance.max < - ps->performance_levels[i].pcie_gen) - data->pcie_gen_performance.max = - ps->performance_levels[i].pcie_gen; - - if (data->pcie_gen_performance.min > - ps->performance_levels[i].pcie_gen) - data->pcie_gen_performance.min = - ps->performance_levels[i].pcie_gen; - - if (data->pcie_lane_performance.max < - ps->performance_levels[i].pcie_lane) - data->pcie_lane_performance.max = - ps->performance_levels[i].pcie_lane; - if (data->pcie_lane_performance.min > - ps->performance_levels[i].pcie_lane) - data->pcie_lane_performance.min = - ps->performance_levels[i].pcie_lane; - } - break; - case PP_StateUILabel_Battery: - data->use_pcie_power_saving_levels = true; - - for (i = 0; i < ps->performance_level_count; i++) { - if (data->pcie_gen_power_saving.max < - ps->performance_levels[i].pcie_gen) - data->pcie_gen_power_saving.max = - ps->performance_levels[i].pcie_gen; - - if (data->pcie_gen_power_saving.min > - ps->performance_levels[i].pcie_gen) - data->pcie_gen_power_saving.min = - ps->performance_levels[i].pcie_gen; - - if (data->pcie_lane_power_saving.max < - ps->performance_levels[i].pcie_lane) - data->pcie_lane_power_saving.max = - ps->performance_levels[i].pcie_lane; - - if (data->pcie_lane_power_saving.min > - ps->performance_levels[i].pcie_lane) - data->pcie_lane_power_saving.min = - ps->performance_levels[i].pcie_lane; - } - break; - default: - break; - } - } - return 0; -} - -static void -polaris10_print_current_perforce_level(struct pp_hwmgr *hwmgr, struct seq_file *m) -{ - uint32_t sclk, mclk, activity_percent; - uint32_t offset; - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - - smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency); - - sclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); - - smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency); - - mclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); - seq_printf(m, "\n [ mclk ]: %u MHz\n\n [ sclk ]: %u MHz\n", - mclk / 100, sclk / 100); - - offset = data->soft_regs_start + offsetof(SMU74_SoftRegisters, AverageGraphicsActivity); - activity_percent = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset); - activity_percent += 0x80; - activity_percent >>= 8; - - seq_printf(m, "\n [GPU load]: %u%%\n\n", activity_percent > 100 ? 100 : activity_percent); - - seq_printf(m, "uvd %sabled\n", data->uvd_power_gated ? "dis" : "en"); - - seq_printf(m, "vce %sabled\n", data->vce_power_gated ? "dis" : "en"); -} - -static int polaris10_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input) -{ - const struct phm_set_power_state_input *states = - (const struct phm_set_power_state_input *)input; - const struct polaris10_power_state *polaris10_ps = - cast_const_phw_polaris10_power_state(states->pnew_state); - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - struct polaris10_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table); - uint32_t sclk = polaris10_ps->performance_levels - [polaris10_ps->performance_level_count - 1].engine_clock; - struct polaris10_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table); - uint32_t mclk = polaris10_ps->performance_levels - [polaris10_ps->performance_level_count - 1].memory_clock; - struct PP_Clocks min_clocks = {0}; - uint32_t i; - struct cgs_display_info info = {0}; - - data->need_update_smu7_dpm_table = 0; - - for (i = 0; i < sclk_table->count; i++) { - if (sclk == sclk_table->dpm_levels[i].value) - break; - } - - if (i >= sclk_table->count) - data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK; - else { - /* TODO: Check SCLK in DAL's minimum clocks - * in case DeepSleep divider update is required. - */ - if (data->display_timing.min_clock_in_sr != min_clocks.engineClockInSR && - (min_clocks.engineClockInSR >= POLARIS10_MINIMUM_ENGINE_CLOCK || - data->display_timing.min_clock_in_sr >= POLARIS10_MINIMUM_ENGINE_CLOCK)) - data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK; - } - - for (i = 0; i < mclk_table->count; i++) { - if (mclk == mclk_table->dpm_levels[i].value) - break; - } - - if (i >= mclk_table->count) - data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK; - - cgs_get_active_displays_info(hwmgr->device, &info); - - if (data->display_timing.num_existing_displays != info.display_count) - data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK; - - return 0; -} - -static uint16_t polaris10_get_maximum_link_speed(struct pp_hwmgr *hwmgr, - const struct polaris10_power_state *polaris10_ps) -{ - uint32_t i; - uint32_t sclk, max_sclk = 0; - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - struct polaris10_dpm_table *dpm_table = &data->dpm_table; - - for (i = 0; i < polaris10_ps->performance_level_count; i++) { - sclk = polaris10_ps->performance_levels[i].engine_clock; - if (max_sclk < sclk) - max_sclk = sclk; - } - - for (i = 0; i < dpm_table->sclk_table.count; i++) { - if (dpm_table->sclk_table.dpm_levels[i].value == max_sclk) - return (uint16_t) ((i >= dpm_table->pcie_speed_table.count) ? - dpm_table->pcie_speed_table.dpm_levels - [dpm_table->pcie_speed_table.count - 1].value : - dpm_table->pcie_speed_table.dpm_levels[i].value); - } - - return 0; -} - -static int polaris10_request_link_speed_change_before_state_change( - struct pp_hwmgr *hwmgr, const void *input) -{ - const struct phm_set_power_state_input *states = - (const struct phm_set_power_state_input *)input; - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - const struct polaris10_power_state *polaris10_nps = - cast_const_phw_polaris10_power_state(states->pnew_state); - const struct polaris10_power_state *polaris10_cps = - cast_const_phw_polaris10_power_state(states->pcurrent_state); - - uint16_t target_link_speed = polaris10_get_maximum_link_speed(hwmgr, polaris10_nps); - uint16_t current_link_speed; - - if (data->force_pcie_gen == PP_PCIEGenInvalid) - current_link_speed = polaris10_get_maximum_link_speed(hwmgr, polaris10_cps); - else - current_link_speed = data->force_pcie_gen; - - data->force_pcie_gen = PP_PCIEGenInvalid; - data->pspp_notify_required = false; - - if (target_link_speed > current_link_speed) { - switch (target_link_speed) { - case PP_PCIEGen3: - if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN3, false)) - break; - data->force_pcie_gen = PP_PCIEGen2; - if (current_link_speed == PP_PCIEGen2) - break; - case PP_PCIEGen2: - if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN2, false)) - break; - default: - data->force_pcie_gen = phm_get_current_pcie_speed(hwmgr); - break; - } - } else { - if (target_link_speed < current_link_speed) - data->pspp_notify_required = true; - } - - return 0; -} - -static int polaris10_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - - if (0 == data->need_update_smu7_dpm_table) - return 0; - - if ((0 == data->sclk_dpm_key_disabled) && - (data->need_update_smu7_dpm_table & - (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) { - PP_ASSERT_WITH_CODE(polaris10_is_dpm_running(hwmgr), - "Trying to freeze SCLK DPM when DPM is disabled", - ); - PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_SCLKDPM_FreezeLevel), - "Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!", - return -1); - } - - if ((0 == data->mclk_dpm_key_disabled) && - (data->need_update_smu7_dpm_table & - DPMTABLE_OD_UPDATE_MCLK)) { - PP_ASSERT_WITH_CODE(polaris10_is_dpm_running(hwmgr), - "Trying to freeze MCLK DPM when DPM is disabled", - ); - PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_MCLKDPM_FreezeLevel), - "Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!", - return -1); - } - - return 0; -} - -static int polaris10_populate_and_upload_sclk_mclk_dpm_levels( - struct pp_hwmgr *hwmgr, const void *input) -{ - int result = 0; - const struct phm_set_power_state_input *states = - (const struct phm_set_power_state_input *)input; - const struct polaris10_power_state *polaris10_ps = - cast_const_phw_polaris10_power_state(states->pnew_state); - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - uint32_t sclk = polaris10_ps->performance_levels - [polaris10_ps->performance_level_count - 1].engine_clock; - uint32_t mclk = polaris10_ps->performance_levels - [polaris10_ps->performance_level_count - 1].memory_clock; - struct polaris10_dpm_table *dpm_table = &data->dpm_table; - - struct polaris10_dpm_table *golden_dpm_table = &data->golden_dpm_table; - uint32_t dpm_count, clock_percent; - uint32_t i; - - if (0 == data->need_update_smu7_dpm_table) - return 0; - - if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) { - dpm_table->sclk_table.dpm_levels - [dpm_table->sclk_table.count - 1].value = sclk; - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinACSupport) || - phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinDCSupport)) { - /* Need to do calculation based on the golden DPM table - * as the Heatmap GPU Clock axis is also based on the default values - */ - PP_ASSERT_WITH_CODE( - (golden_dpm_table->sclk_table.dpm_levels - [golden_dpm_table->sclk_table.count - 1].value != 0), - "Divide by 0!", - return -1); - dpm_count = dpm_table->sclk_table.count < 2 ? 0 : dpm_table->sclk_table.count - 2; - - for (i = dpm_count; i > 1; i--) { - if (sclk > golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value) { - clock_percent = - ((sclk - - golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value - ) * 100) - / golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value; - - dpm_table->sclk_table.dpm_levels[i].value = - golden_dpm_table->sclk_table.dpm_levels[i].value + - (golden_dpm_table->sclk_table.dpm_levels[i].value * - clock_percent)/100; - - } else if (golden_dpm_table->sclk_table.dpm_levels[dpm_table->sclk_table.count-1].value > sclk) { - clock_percent = - ((golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count - 1].value - - sclk) * 100) - / golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value; - - dpm_table->sclk_table.dpm_levels[i].value = - golden_dpm_table->sclk_table.dpm_levels[i].value - - (golden_dpm_table->sclk_table.dpm_levels[i].value * - clock_percent) / 100; - } else - dpm_table->sclk_table.dpm_levels[i].value = - golden_dpm_table->sclk_table.dpm_levels[i].value; - } - } - } - - if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK) { - dpm_table->mclk_table.dpm_levels - [dpm_table->mclk_table.count - 1].value = mclk; - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinACSupport) || - phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinDCSupport)) { - - PP_ASSERT_WITH_CODE( - (golden_dpm_table->mclk_table.dpm_levels - [golden_dpm_table->mclk_table.count-1].value != 0), - "Divide by 0!", - return -1); - dpm_count = dpm_table->mclk_table.count < 2 ? 0 : dpm_table->mclk_table.count - 2; - for (i = dpm_count; i > 1; i--) { - if (golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value < mclk) { - clock_percent = ((mclk - - golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value) * 100) - / golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value; - - dpm_table->mclk_table.dpm_levels[i].value = - golden_dpm_table->mclk_table.dpm_levels[i].value + - (golden_dpm_table->mclk_table.dpm_levels[i].value * - clock_percent) / 100; - - } else if (golden_dpm_table->mclk_table.dpm_levels[dpm_table->mclk_table.count-1].value > mclk) { - clock_percent = ( - (golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value - mclk) - * 100) - / golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value; - - dpm_table->mclk_table.dpm_levels[i].value = - golden_dpm_table->mclk_table.dpm_levels[i].value - - (golden_dpm_table->mclk_table.dpm_levels[i].value * - clock_percent) / 100; - } else - dpm_table->mclk_table.dpm_levels[i].value = - golden_dpm_table->mclk_table.dpm_levels[i].value; - } - } - } - - if (data->need_update_smu7_dpm_table & - (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) { - result = polaris10_populate_all_graphic_levels(hwmgr); - PP_ASSERT_WITH_CODE((0 == result), - "Failed to populate SCLK during PopulateNewDPMClocksStates Function!", - return result); - } - - if (data->need_update_smu7_dpm_table & - (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) { - /*populate MCLK dpm table to SMU7 */ - result = polaris10_populate_all_memory_levels(hwmgr); - PP_ASSERT_WITH_CODE((0 == result), - "Failed to populate MCLK during PopulateNewDPMClocksStates Function!", - return result); - } - - return result; -} - -static int polaris10_trim_single_dpm_states(struct pp_hwmgr *hwmgr, - struct polaris10_single_dpm_table *dpm_table, - uint32_t low_limit, uint32_t high_limit) -{ - uint32_t i; - - for (i = 0; i < dpm_table->count; i++) { - if ((dpm_table->dpm_levels[i].value < low_limit) - || (dpm_table->dpm_levels[i].value > high_limit)) - dpm_table->dpm_levels[i].enabled = false; - else - dpm_table->dpm_levels[i].enabled = true; - } - - return 0; -} - -static int polaris10_trim_dpm_states(struct pp_hwmgr *hwmgr, - const struct polaris10_power_state *polaris10_ps) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - uint32_t high_limit_count; - - PP_ASSERT_WITH_CODE((polaris10_ps->performance_level_count >= 1), - "power state did not have any performance level", - return -1); - - high_limit_count = (1 == polaris10_ps->performance_level_count) ? 0 : 1; - - polaris10_trim_single_dpm_states(hwmgr, - &(data->dpm_table.sclk_table), - polaris10_ps->performance_levels[0].engine_clock, - polaris10_ps->performance_levels[high_limit_count].engine_clock); - - polaris10_trim_single_dpm_states(hwmgr, - &(data->dpm_table.mclk_table), - polaris10_ps->performance_levels[0].memory_clock, - polaris10_ps->performance_levels[high_limit_count].memory_clock); - - return 0; -} - -static int polaris10_generate_dpm_level_enable_mask( - struct pp_hwmgr *hwmgr, const void *input) -{ - int result; - const struct phm_set_power_state_input *states = - (const struct phm_set_power_state_input *)input; - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - const struct polaris10_power_state *polaris10_ps = - cast_const_phw_polaris10_power_state(states->pnew_state); - - result = polaris10_trim_dpm_states(hwmgr, polaris10_ps); - if (result) - return result; - - data->dpm_level_enable_mask.sclk_dpm_enable_mask = - phm_get_dpm_level_enable_mask_value(&data->dpm_table.sclk_table); - data->dpm_level_enable_mask.mclk_dpm_enable_mask = - phm_get_dpm_level_enable_mask_value(&data->dpm_table.mclk_table); - data->dpm_level_enable_mask.pcie_dpm_enable_mask = - phm_get_dpm_level_enable_mask_value(&data->dpm_table.pcie_speed_table); - - return 0; -} - -static int -polaris10_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable) -{ - return smum_send_msg_to_smc(hwmgr->smumgr, enable ? - PPSMC_MSG_UVDDPM_Enable : - PPSMC_MSG_UVDDPM_Disable); -} - -int polaris10_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable) -{ - return smum_send_msg_to_smc(hwmgr->smumgr, enable? - PPSMC_MSG_VCEDPM_Enable : - PPSMC_MSG_VCEDPM_Disable); -} - -static int -polaris10_enable_disable_samu_dpm(struct pp_hwmgr *hwmgr, bool enable) -{ - return smum_send_msg_to_smc(hwmgr->smumgr, enable? - PPSMC_MSG_SAMUDPM_Enable : - PPSMC_MSG_SAMUDPM_Disable); -} - -int polaris10_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - uint32_t mm_boot_level_offset, mm_boot_level_value; - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - - if (!bgate) { - data->smc_state_table.UvdBootLevel = 0; - if (table_info->mm_dep_table->count > 0) - data->smc_state_table.UvdBootLevel = - (uint8_t) (table_info->mm_dep_table->count - 1); - mm_boot_level_offset = data->dpm_table_start + - offsetof(SMU74_Discrete_DpmTable, UvdBootLevel); - mm_boot_level_offset /= 4; - mm_boot_level_offset *= 4; - mm_boot_level_value = cgs_read_ind_register(hwmgr->device, - CGS_IND_REG__SMC, mm_boot_level_offset); - mm_boot_level_value &= 0x00FFFFFF; - mm_boot_level_value |= data->smc_state_table.UvdBootLevel << 24; - cgs_write_ind_register(hwmgr->device, - CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value); - - if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_UVDDPM) || - phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_StablePState)) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_UVDDPM_SetEnabledMask, - (uint32_t)(1 << data->smc_state_table.UvdBootLevel)); - } - - return polaris10_enable_disable_uvd_dpm(hwmgr, !bgate); -} - -int polaris10_update_vce_dpm(struct pp_hwmgr *hwmgr, bool bgate) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - uint32_t mm_boot_level_offset, mm_boot_level_value; - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - - if (!bgate) { - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_StablePState)) - data->smc_state_table.VceBootLevel = - (uint8_t) (table_info->mm_dep_table->count - 1); - else - data->smc_state_table.VceBootLevel = 0; - - mm_boot_level_offset = data->dpm_table_start + - offsetof(SMU74_Discrete_DpmTable, VceBootLevel); - mm_boot_level_offset /= 4; - mm_boot_level_offset *= 4; - mm_boot_level_value = cgs_read_ind_register(hwmgr->device, - CGS_IND_REG__SMC, mm_boot_level_offset); - mm_boot_level_value &= 0xFF00FFFF; - mm_boot_level_value |= data->smc_state_table.VceBootLevel << 16; - cgs_write_ind_register(hwmgr->device, - CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value); - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_VCEDPM_SetEnabledMask, - (uint32_t)1 << data->smc_state_table.VceBootLevel); - } - - polaris10_enable_disable_vce_dpm(hwmgr, !bgate); - - return 0; -} - -int polaris10_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - uint32_t mm_boot_level_offset, mm_boot_level_value; - - if (!bgate) { - data->smc_state_table.SamuBootLevel = 0; - mm_boot_level_offset = data->dpm_table_start + - offsetof(SMU74_Discrete_DpmTable, SamuBootLevel); - mm_boot_level_offset /= 4; - mm_boot_level_offset *= 4; - mm_boot_level_value = cgs_read_ind_register(hwmgr->device, - CGS_IND_REG__SMC, mm_boot_level_offset); - mm_boot_level_value &= 0xFFFFFF00; - mm_boot_level_value |= data->smc_state_table.SamuBootLevel << 0; - cgs_write_ind_register(hwmgr->device, - CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value); - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_StablePState)) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_SAMUDPM_SetEnabledMask, - (uint32_t)(1 << data->smc_state_table.SamuBootLevel)); - } - - return polaris10_enable_disable_samu_dpm(hwmgr, !bgate); -} - -static int polaris10_update_sclk_threshold(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - - int result = 0; - uint32_t low_sclk_interrupt_threshold = 0; - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_SclkThrottleLowNotification) - && (hwmgr->gfx_arbiter.sclk_threshold != - data->low_sclk_interrupt_threshold)) { - data->low_sclk_interrupt_threshold = - hwmgr->gfx_arbiter.sclk_threshold; - low_sclk_interrupt_threshold = - data->low_sclk_interrupt_threshold; - - CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold); - - result = polaris10_copy_bytes_to_smc( - hwmgr->smumgr, - data->dpm_table_start + - offsetof(SMU74_Discrete_DpmTable, - LowSclkInterruptThreshold), - (uint8_t *)&low_sclk_interrupt_threshold, - sizeof(uint32_t), - data->sram_end); - } - - return result; -} - -static int polaris10_program_mem_timing_parameters(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - - if (data->need_update_smu7_dpm_table & - (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK)) - return polaris10_program_memory_timing_parameters(hwmgr); - - return 0; -} - -static int polaris10_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - - if (0 == data->need_update_smu7_dpm_table) - return 0; - - if ((0 == data->sclk_dpm_key_disabled) && - (data->need_update_smu7_dpm_table & - (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) { - - PP_ASSERT_WITH_CODE(polaris10_is_dpm_running(hwmgr), - "Trying to Unfreeze SCLK DPM when DPM is disabled", - ); - PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_SCLKDPM_UnfreezeLevel), - "Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!", - return -1); - } - - if ((0 == data->mclk_dpm_key_disabled) && - (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) { - - PP_ASSERT_WITH_CODE(polaris10_is_dpm_running(hwmgr), - "Trying to Unfreeze MCLK DPM when DPM is disabled", - ); - PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_SCLKDPM_UnfreezeLevel), - "Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!", - return -1); - } - - data->need_update_smu7_dpm_table = 0; - - return 0; -} - -static int polaris10_notify_link_speed_change_after_state_change( - struct pp_hwmgr *hwmgr, const void *input) -{ - const struct phm_set_power_state_input *states = - (const struct phm_set_power_state_input *)input; - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - const struct polaris10_power_state *polaris10_ps = - cast_const_phw_polaris10_power_state(states->pnew_state); - uint16_t target_link_speed = polaris10_get_maximum_link_speed(hwmgr, polaris10_ps); - uint8_t request; - - if (data->pspp_notify_required) { - if (target_link_speed == PP_PCIEGen3) - request = PCIE_PERF_REQ_GEN3; - else if (target_link_speed == PP_PCIEGen2) - request = PCIE_PERF_REQ_GEN2; - else - request = PCIE_PERF_REQ_GEN1; - - if (request == PCIE_PERF_REQ_GEN1 && - phm_get_current_pcie_speed(hwmgr) > 0) - return 0; - - if (acpi_pcie_perf_request(hwmgr->device, request, false)) { - if (PP_PCIEGen2 == target_link_speed) - printk("PSPP request to switch to Gen2 from Gen3 Failed!"); - else - printk("PSPP request to switch to Gen1 from Gen2 Failed!"); - } - } - - return 0; -} - -static int polaris10_notify_smc_display(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - (PPSMC_Msg)PPSMC_MSG_SetVBITimeout, data->frame_time_x2); - return (smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_HasDisplay) == 0) ? 0 : -EINVAL; -} - - - -static int polaris10_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input) -{ - int tmp_result, result = 0; - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - - tmp_result = polaris10_find_dpm_states_clocks_in_dpm_table(hwmgr, input); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to find DPM states clocks in DPM table!", - result = tmp_result); - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_PCIEPerformanceRequest)) { - tmp_result = - polaris10_request_link_speed_change_before_state_change(hwmgr, input); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to request link speed change before state change!", - result = tmp_result); - } - - tmp_result = polaris10_freeze_sclk_mclk_dpm(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to freeze SCLK MCLK DPM!", result = tmp_result); - - tmp_result = polaris10_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to populate and upload SCLK MCLK DPM levels!", - result = tmp_result); - - tmp_result = polaris10_generate_dpm_level_enable_mask(hwmgr, input); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to generate DPM level enabled mask!", - result = tmp_result); - - tmp_result = polaris10_update_sclk_threshold(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to update SCLK threshold!", - result = tmp_result); - - tmp_result = polaris10_program_mem_timing_parameters(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to program memory timing parameters!", - result = tmp_result); - - tmp_result = polaris10_notify_smc_display(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to notify smc display settings!", - result = tmp_result); - - tmp_result = polaris10_unfreeze_sclk_mclk_dpm(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to unfreeze SCLK MCLK DPM!", - result = tmp_result); - - tmp_result = polaris10_upload_dpm_level_enable_mask(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to upload DPM level enabled mask!", - result = tmp_result); - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_PCIEPerformanceRequest)) { - tmp_result = - polaris10_notify_link_speed_change_after_state_change(hwmgr, input); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to notify link speed change after state change!", - result = tmp_result); - } - data->apply_optimized_settings = false; - return result; -} - -static int polaris10_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm) -{ - hwmgr->thermal_controller. - advanceFanControlParameters.usMaxFanPWM = us_max_fan_pwm; - - if (phm_is_hw_access_blocked(hwmgr)) - return 0; - - return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm); -} - - -static int -polaris10_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display) -{ - PPSMC_Msg msg = has_display ? (PPSMC_Msg)PPSMC_HasDisplay : (PPSMC_Msg)PPSMC_NoDisplay; - - return (smum_send_msg_to_smc(hwmgr->smumgr, msg) == 0) ? 0 : -1; -} - -static int -polaris10_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr) -{ - uint32_t num_active_displays = 0; - struct cgs_display_info info = {0}; - info.mode_info = NULL; - - cgs_get_active_displays_info(hwmgr->device, &info); - - num_active_displays = info.display_count; - - if (num_active_displays > 1) /* to do && (pHwMgr->pPECI->displayConfiguration.bMultiMonitorInSync != TRUE)) */ - polaris10_notify_smc_display_change(hwmgr, false); - - - return 0; -} - -/** -* Programs the display gap -* -* @param hwmgr the address of the powerplay hardware manager. -* @return always OK -*/ -static int polaris10_program_display_gap(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - uint32_t num_active_displays = 0; - uint32_t display_gap = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL); - uint32_t display_gap2; - uint32_t pre_vbi_time_in_us; - uint32_t frame_time_in_us; - uint32_t ref_clock; - uint32_t refresh_rate = 0; - struct cgs_display_info info = {0}; - struct cgs_mode_info mode_info; - - info.mode_info = &mode_info; - - cgs_get_active_displays_info(hwmgr->device, &info); - num_active_displays = info.display_count; - - display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, DISP_GAP, (num_active_displays > 0) ? DISPLAY_GAP_VBLANK_OR_WM : DISPLAY_GAP_IGNORE); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL, display_gap); - - ref_clock = mode_info.ref_clock; - refresh_rate = mode_info.refresh_rate; - - if (0 == refresh_rate) - refresh_rate = 60; - - frame_time_in_us = 1000000 / refresh_rate; - - pre_vbi_time_in_us = frame_time_in_us - 200 - mode_info.vblank_time_us; - data->frame_time_x2 = frame_time_in_us * 2 / 100; - - display_gap2 = pre_vbi_time_in_us * (ref_clock / 100); - - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL2, display_gap2); - - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, data->soft_regs_start + offsetof(SMU74_SoftRegisters, PreVBlankGap), 0x64); - - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, data->soft_regs_start + offsetof(SMU74_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us)); - - - return 0; -} - - -static int polaris10_display_configuration_changed_task(struct pp_hwmgr *hwmgr) -{ - return polaris10_program_display_gap(hwmgr); -} - -/** -* Set maximum target operating fan output RPM -* -* @param hwmgr: the address of the powerplay hardware manager. -* @param usMaxFanRpm: max operating fan RPM value. -* @return The response that came from the SMC. -*/ -static int polaris10_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_rpm) -{ - hwmgr->thermal_controller. - advanceFanControlParameters.usMaxFanRPM = us_max_fan_rpm; - - if (phm_is_hw_access_blocked(hwmgr)) - return 0; - - return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_SetFanRpmMax, us_max_fan_rpm); -} - -static int -polaris10_register_internal_thermal_interrupt(struct pp_hwmgr *hwmgr, - const void *thermal_interrupt_info) -{ - return 0; -} - -static bool polaris10_check_smc_update_required_for_display_configuration( - struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - bool is_update_required = false; - struct cgs_display_info info = {0, 0, NULL}; - - cgs_get_active_displays_info(hwmgr->device, &info); - - if (data->display_timing.num_existing_displays != info.display_count) - is_update_required = true; -/* TO DO NEED TO GET DEEP SLEEP CLOCK FROM DAL - if (phm_cap_enabled(hwmgr->hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) { - cgs_get_min_clock_settings(hwmgr->device, &min_clocks); - if (min_clocks.engineClockInSR != data->display_timing.minClockInSR && - (min_clocks.engineClockInSR >= POLARIS10_MINIMUM_ENGINE_CLOCK || - data->display_timing.minClockInSR >= POLARIS10_MINIMUM_ENGINE_CLOCK)) - is_update_required = true; -*/ - return is_update_required; -} - -static inline bool polaris10_are_power_levels_equal(const struct polaris10_performance_level *pl1, - const struct polaris10_performance_level *pl2) -{ - return ((pl1->memory_clock == pl2->memory_clock) && - (pl1->engine_clock == pl2->engine_clock) && - (pl1->pcie_gen == pl2->pcie_gen) && - (pl1->pcie_lane == pl2->pcie_lane)); -} - -static int polaris10_check_states_equal(struct pp_hwmgr *hwmgr, - const struct pp_hw_power_state *pstate1, - const struct pp_hw_power_state *pstate2, bool *equal) -{ - const struct polaris10_power_state *psa = cast_const_phw_polaris10_power_state(pstate1); - const struct polaris10_power_state *psb = cast_const_phw_polaris10_power_state(pstate2); - int i; - - if (pstate1 == NULL || pstate2 == NULL || equal == NULL) - return -EINVAL; - - /* If the two states don't even have the same number of performance levels they cannot be the same state. */ - if (psa->performance_level_count != psb->performance_level_count) { - *equal = false; - return 0; - } - - for (i = 0; i < psa->performance_level_count; i++) { - if (!polaris10_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) { - /* If we have found even one performance level pair that is different the states are different. */ - *equal = false; - return 0; - } - } - - /* If all performance levels are the same try to use the UVD clocks to break the tie.*/ - *equal = ((psa->uvd_clks.vclk == psb->uvd_clks.vclk) && (psa->uvd_clks.dclk == psb->uvd_clks.dclk)); - *equal &= ((psa->vce_clks.evclk == psb->vce_clks.evclk) && (psa->vce_clks.ecclk == psb->vce_clks.ecclk)); - *equal &= (psa->sclk_threshold == psb->sclk_threshold); - - return 0; -} - -static int polaris10_upload_mc_firmware(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - - uint32_t vbios_version; - - /* Read MC indirect register offset 0x9F bits [3:0] to see if VBIOS has already loaded a full version of MC ucode or not.*/ - - phm_get_mc_microcode_version(hwmgr); - vbios_version = hwmgr->microcode_version_info.MC & 0xf; - /* Full version of MC ucode has already been loaded. */ - if (vbios_version == 0) { - data->need_long_memory_training = false; - return 0; - } - - data->need_long_memory_training = false; - -/* - * PPMCME_FirmwareDescriptorEntry *pfd = NULL; - pfd = &tonga_mcmeFirmware; - if (0 == PHM_READ_FIELD(hwmgr->device, MC_SEQ_SUP_CNTL, RUN)) - polaris10_load_mc_microcode(hwmgr, pfd->dpmThreshold, - pfd->cfgArray, pfd->cfgSize, pfd->ioDebugArray, - pfd->ioDebugSize, pfd->ucodeArray, pfd->ucodeSize); -*/ - return 0; -} - -/** - * Read clock related registers. - * - * @param hwmgr the address of the powerplay hardware manager. - * @return always 0 - */ -static int polaris10_read_clock_registers(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - - data->clock_registers.vCG_SPLL_FUNC_CNTL = cgs_read_ind_register(hwmgr->device, - CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL) - & CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN_MASK; - - data->clock_registers.vCG_SPLL_FUNC_CNTL_2 = cgs_read_ind_register(hwmgr->device, - CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_2) - & CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL_MASK; - - data->clock_registers.vCG_SPLL_FUNC_CNTL_4 = cgs_read_ind_register(hwmgr->device, - CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_4) - & CG_SPLL_FUNC_CNTL_4__SPLL_SPARE_MASK; - - return 0; -} - -/** - * Find out if memory is GDDR5. - * - * @param hwmgr the address of the powerplay hardware manager. - * @return always 0 - */ -static int polaris10_get_memory_type(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - uint32_t temp; - - temp = cgs_read_register(hwmgr->device, mmMC_SEQ_MISC0); - - data->is_memory_gddr5 = (MC_SEQ_MISC0_GDDR5_VALUE == - ((temp & MC_SEQ_MISC0_GDDR5_MASK) >> - MC_SEQ_MISC0_GDDR5_SHIFT)); - - return 0; -} - -/** - * Enables Dynamic Power Management by SMC - * - * @param hwmgr the address of the powerplay hardware manager. - * @return always 0 - */ -static int polaris10_enable_acpi_power_management(struct pp_hwmgr *hwmgr) -{ - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - GENERAL_PWRMGT, STATIC_PM_EN, 1); - - return 0; -} - -/** - * Initialize PowerGating States for different engines - * - * @param hwmgr the address of the powerplay hardware manager. - * @return always 0 - */ -static int polaris10_init_power_gate_state(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - - data->uvd_power_gated = false; - data->vce_power_gated = false; - data->samu_power_gated = false; - - return 0; -} - -static int polaris10_init_sclk_threshold(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - data->low_sclk_interrupt_threshold = 0; - - return 0; -} - -static int polaris10_setup_asic_task(struct pp_hwmgr *hwmgr) -{ - int tmp_result, result = 0; - - polaris10_upload_mc_firmware(hwmgr); - - tmp_result = polaris10_read_clock_registers(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to read clock registers!", result = tmp_result); - - tmp_result = polaris10_get_memory_type(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to get memory type!", result = tmp_result); - - tmp_result = polaris10_enable_acpi_power_management(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to enable ACPI power management!", result = tmp_result); - - tmp_result = polaris10_init_power_gate_state(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to init power gate state!", result = tmp_result); - - tmp_result = phm_get_mc_microcode_version(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to get MC microcode version!", result = tmp_result); - - tmp_result = polaris10_init_sclk_threshold(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to init sclk threshold!", result = tmp_result); - - return result; -} - -static int polaris10_force_clock_level(struct pp_hwmgr *hwmgr, - enum pp_clock_type type, uint32_t mask) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - - if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) - return -EINVAL; - - switch (type) { - case PP_SCLK: - if (!data->sclk_dpm_key_disabled) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_SCLKDPM_SetEnabledMask, - data->dpm_level_enable_mask.sclk_dpm_enable_mask & mask); - break; - case PP_MCLK: - if (!data->mclk_dpm_key_disabled) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_MCLKDPM_SetEnabledMask, - data->dpm_level_enable_mask.mclk_dpm_enable_mask & mask); - break; - case PP_PCIE: - { - uint32_t tmp = mask & data->dpm_level_enable_mask.pcie_dpm_enable_mask; - uint32_t level = 0; - - while (tmp >>= 1) - level++; - - if (!data->pcie_dpm_key_disabled) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_PCIeDPM_ForceLevel, - level); - break; - } - default: - break; - } - - return 0; -} - -static uint16_t polaris10_get_current_pcie_speed(struct pp_hwmgr *hwmgr) -{ - uint32_t speedCntl = 0; - - /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */ - speedCntl = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__PCIE, - ixPCIE_LC_SPEED_CNTL); - return((uint16_t)PHM_GET_FIELD(speedCntl, - PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE)); -} - -static int polaris10_print_clock_levels(struct pp_hwmgr *hwmgr, - enum pp_clock_type type, char *buf) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - struct polaris10_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table); - struct polaris10_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table); - struct polaris10_single_dpm_table *pcie_table = &(data->dpm_table.pcie_speed_table); - int i, now, size = 0; - uint32_t clock, pcie_speed; - - switch (type) { - case PP_SCLK: - smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency); - clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); - - for (i = 0; i < sclk_table->count; i++) { - if (clock > sclk_table->dpm_levels[i].value) - continue; - break; - } - now = i; - - for (i = 0; i < sclk_table->count; i++) - size += sprintf(buf + size, "%d: %uMhz %s\n", - i, sclk_table->dpm_levels[i].value / 100, - (i == now) ? "*" : ""); - break; - case PP_MCLK: - smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency); - clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); - - for (i = 0; i < mclk_table->count; i++) { - if (clock > mclk_table->dpm_levels[i].value) - continue; - break; - } - now = i; - - for (i = 0; i < mclk_table->count; i++) - size += sprintf(buf + size, "%d: %uMhz %s\n", - i, mclk_table->dpm_levels[i].value / 100, - (i == now) ? "*" : ""); - break; - case PP_PCIE: - pcie_speed = polaris10_get_current_pcie_speed(hwmgr); - for (i = 0; i < pcie_table->count; i++) { - if (pcie_speed != pcie_table->dpm_levels[i].value) - continue; - break; - } - now = i; - - for (i = 0; i < pcie_table->count; i++) - size += sprintf(buf + size, "%d: %s %s\n", i, - (pcie_table->dpm_levels[i].value == 0) ? "2.5GB, x8" : - (pcie_table->dpm_levels[i].value == 1) ? "5.0GB, x16" : - (pcie_table->dpm_levels[i].value == 2) ? "8.0GB, x16" : "", - (i == now) ? "*" : ""); - break; - default: - break; - } - return size; -} - -static int polaris10_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode) -{ - if (mode) { - /* stop auto-manage */ - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_MicrocodeFanControl)) - polaris10_fan_ctrl_stop_smc_fan_control(hwmgr); - polaris10_fan_ctrl_set_static_mode(hwmgr, mode); - } else - /* restart auto-manage */ - polaris10_fan_ctrl_reset_fan_speed_to_default(hwmgr); - - return 0; -} - -static int polaris10_get_fan_control_mode(struct pp_hwmgr *hwmgr) -{ - if (hwmgr->fan_ctrl_is_in_default_mode) - return hwmgr->fan_ctrl_default_mode; - else - return PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_FDO_CTRL2, FDO_PWM_MODE); -} - -static int polaris10_get_sclk_od(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - struct polaris10_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table); - struct polaris10_single_dpm_table *golden_sclk_table = - &(data->golden_dpm_table.sclk_table); - int value; - - value = (sclk_table->dpm_levels[sclk_table->count - 1].value - - golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) * - 100 / - golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value; - - return value; -} - -static int polaris10_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - struct polaris10_single_dpm_table *golden_sclk_table = - &(data->golden_dpm_table.sclk_table); - struct pp_power_state *ps; - struct polaris10_power_state *polaris10_ps; - - if (value > 20) - value = 20; - - ps = hwmgr->request_ps; - - if (ps == NULL) - return -EINVAL; - - polaris10_ps = cast_phw_polaris10_power_state(&ps->hardware); - - polaris10_ps->performance_levels[polaris10_ps->performance_level_count - 1].engine_clock = - golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value * - value / 100 + - golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value; - - return 0; -} - -static int polaris10_get_mclk_od(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - struct polaris10_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table); - struct polaris10_single_dpm_table *golden_mclk_table = - &(data->golden_dpm_table.mclk_table); - int value; - - value = (mclk_table->dpm_levels[mclk_table->count - 1].value - - golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) * - 100 / - golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value; - - return value; -} - -static int polaris10_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - struct polaris10_single_dpm_table *golden_mclk_table = - &(data->golden_dpm_table.mclk_table); - struct pp_power_state *ps; - struct polaris10_power_state *polaris10_ps; - - if (value > 20) - value = 20; - - ps = hwmgr->request_ps; - - if (ps == NULL) - return -EINVAL; - - polaris10_ps = cast_phw_polaris10_power_state(&ps->hardware); - - polaris10_ps->performance_levels[polaris10_ps->performance_level_count - 1].memory_clock = - golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value * - value / 100 + - golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value; - - return 0; -} -static const struct pp_hwmgr_func polaris10_hwmgr_funcs = { - .backend_init = &polaris10_hwmgr_backend_init, - .backend_fini = &polaris10_hwmgr_backend_fini, - .asic_setup = &polaris10_setup_asic_task, - .dynamic_state_management_enable = &polaris10_enable_dpm_tasks, - .apply_state_adjust_rules = polaris10_apply_state_adjust_rules, - .force_dpm_level = &polaris10_force_dpm_level, - .power_state_set = polaris10_set_power_state_tasks, - .get_power_state_size = polaris10_get_power_state_size, - .get_mclk = polaris10_dpm_get_mclk, - .get_sclk = polaris10_dpm_get_sclk, - .patch_boot_state = polaris10_dpm_patch_boot_state, - .get_pp_table_entry = polaris10_get_pp_table_entry, - .get_num_of_pp_table_entries = get_number_of_powerplay_table_entries_v1_0, - .print_current_perforce_level = polaris10_print_current_perforce_level, - .powerdown_uvd = polaris10_phm_powerdown_uvd, - .powergate_uvd = polaris10_phm_powergate_uvd, - .powergate_vce = polaris10_phm_powergate_vce, - .disable_clock_power_gating = polaris10_phm_disable_clock_power_gating, - .update_clock_gatings = polaris10_phm_update_clock_gatings, - .notify_smc_display_config_after_ps_adjustment = polaris10_notify_smc_display_config_after_ps_adjustment, - .display_config_changed = polaris10_display_configuration_changed_task, - .set_max_fan_pwm_output = polaris10_set_max_fan_pwm_output, - .set_max_fan_rpm_output = polaris10_set_max_fan_rpm_output, - .get_temperature = polaris10_thermal_get_temperature, - .stop_thermal_controller = polaris10_thermal_stop_thermal_controller, - .get_fan_speed_info = polaris10_fan_ctrl_get_fan_speed_info, - .get_fan_speed_percent = polaris10_fan_ctrl_get_fan_speed_percent, - .set_fan_speed_percent = polaris10_fan_ctrl_set_fan_speed_percent, - .reset_fan_speed_to_default = polaris10_fan_ctrl_reset_fan_speed_to_default, - .get_fan_speed_rpm = polaris10_fan_ctrl_get_fan_speed_rpm, - .set_fan_speed_rpm = polaris10_fan_ctrl_set_fan_speed_rpm, - .uninitialize_thermal_controller = polaris10_thermal_ctrl_uninitialize_thermal_controller, - .register_internal_thermal_interrupt = polaris10_register_internal_thermal_interrupt, - .check_smc_update_required_for_display_configuration = polaris10_check_smc_update_required_for_display_configuration, - .check_states_equal = polaris10_check_states_equal, - .set_fan_control_mode = polaris10_set_fan_control_mode, - .get_fan_control_mode = polaris10_get_fan_control_mode, - .force_clock_level = polaris10_force_clock_level, - .print_clock_levels = polaris10_print_clock_levels, - .enable_per_cu_power_gating = polaris10_phm_enable_per_cu_power_gating, - .get_sclk_od = polaris10_get_sclk_od, - .set_sclk_od = polaris10_set_sclk_od, - .get_mclk_od = polaris10_get_mclk_od, - .set_mclk_od = polaris10_set_mclk_od, -}; - -int polaris10_hwmgr_init(struct pp_hwmgr *hwmgr) -{ - hwmgr->hwmgr_func = &polaris10_hwmgr_funcs; - hwmgr->pptable_func = &pptable_v1_0_funcs; - pp_polaris10_thermal_initialize(hwmgr); - - return 0; -} diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h deleted file mode 100644 index 378ab342c257..000000000000 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h +++ /dev/null @@ -1,354 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#ifndef POLARIS10_HWMGR_H -#define POLARIS10_HWMGR_H - -#include "hwmgr.h" -#include "smu74.h" -#include "smu74_discrete.h" -#include "ppatomctrl.h" -#include "polaris10_ppsmc.h" -#include "polaris10_powertune.h" -#include "polaris10_smumgr.h" - -#define POLARIS10_MAX_HARDWARE_POWERLEVELS 2 - -#define POLARIS10_VOLTAGE_CONTROL_NONE 0x0 -#define POLARIS10_VOLTAGE_CONTROL_BY_GPIO 0x1 -#define POLARIS10_VOLTAGE_CONTROL_BY_SVID2 0x2 -#define POLARIS10_VOLTAGE_CONTROL_MERGED 0x3 - -#define DPMTABLE_OD_UPDATE_SCLK 0x00000001 -#define DPMTABLE_OD_UPDATE_MCLK 0x00000002 -#define DPMTABLE_UPDATE_SCLK 0x00000004 -#define DPMTABLE_UPDATE_MCLK 0x00000008 - -struct polaris10_performance_level { - uint32_t memory_clock; - uint32_t engine_clock; - uint16_t pcie_gen; - uint16_t pcie_lane; -}; - -struct polaris10_uvd_clocks { - uint32_t vclk; - uint32_t dclk; -}; - -struct polaris10_vce_clocks { - uint32_t evclk; - uint32_t ecclk; -}; - -struct polaris10_power_state { - uint32_t magic; - struct polaris10_uvd_clocks uvd_clks; - struct polaris10_vce_clocks vce_clks; - uint32_t sam_clk; - uint16_t performance_level_count; - bool dc_compatible; - uint32_t sclk_threshold; - struct polaris10_performance_level performance_levels[POLARIS10_MAX_HARDWARE_POWERLEVELS]; -}; - -struct polaris10_dpm_level { - bool enabled; - uint32_t value; - uint32_t param1; -}; - -#define POLARIS10_MAX_DEEPSLEEP_DIVIDER_ID 5 -#define MAX_REGULAR_DPM_NUMBER 8 -#define POLARIS10_MINIMUM_ENGINE_CLOCK 2500 - -struct polaris10_single_dpm_table { - uint32_t count; - struct polaris10_dpm_level dpm_levels[MAX_REGULAR_DPM_NUMBER]; -}; - -struct polaris10_dpm_table { - struct polaris10_single_dpm_table sclk_table; - struct polaris10_single_dpm_table mclk_table; - struct polaris10_single_dpm_table pcie_speed_table; - struct polaris10_single_dpm_table vddc_table; - struct polaris10_single_dpm_table vddci_table; - struct polaris10_single_dpm_table mvdd_table; -}; - -struct polaris10_clock_registers { - uint32_t vCG_SPLL_FUNC_CNTL; - uint32_t vCG_SPLL_FUNC_CNTL_2; - uint32_t vCG_SPLL_FUNC_CNTL_3; - uint32_t vCG_SPLL_FUNC_CNTL_4; - uint32_t vCG_SPLL_SPREAD_SPECTRUM; - uint32_t vCG_SPLL_SPREAD_SPECTRUM_2; - uint32_t vDLL_CNTL; - uint32_t vMCLK_PWRMGT_CNTL; - uint32_t vMPLL_AD_FUNC_CNTL; - uint32_t vMPLL_DQ_FUNC_CNTL; - uint32_t vMPLL_FUNC_CNTL; - uint32_t vMPLL_FUNC_CNTL_1; - uint32_t vMPLL_FUNC_CNTL_2; - uint32_t vMPLL_SS1; - uint32_t vMPLL_SS2; -}; - -#define DISABLE_MC_LOADMICROCODE 1 -#define DISABLE_MC_CFGPROGRAMMING 2 - -struct polaris10_voltage_smio_registers { - uint32_t vS0_VID_LOWER_SMIO_CNTL; -}; - -#define POLARIS10_MAX_LEAKAGE_COUNT 8 - -struct polaris10_leakage_voltage { - uint16_t count; - uint16_t leakage_id[POLARIS10_MAX_LEAKAGE_COUNT]; - uint16_t actual_voltage[POLARIS10_MAX_LEAKAGE_COUNT]; -}; - -struct polaris10_vbios_boot_state { - uint16_t mvdd_bootup_value; - uint16_t vddc_bootup_value; - uint16_t vddci_bootup_value; - uint32_t sclk_bootup_value; - uint32_t mclk_bootup_value; - uint16_t pcie_gen_bootup_value; - uint16_t pcie_lane_bootup_value; -}; - -/* Ultra Low Voltage parameter structure */ -struct polaris10_ulv_parm { - bool ulv_supported; - uint32_t cg_ulv_parameter; - uint32_t ulv_volt_change_delay; - struct polaris10_performance_level ulv_power_level; -}; - -struct polaris10_display_timing { - uint32_t min_clock_in_sr; - uint32_t num_existing_displays; -}; - -struct polaris10_dpmlevel_enable_mask { - uint32_t uvd_dpm_enable_mask; - uint32_t vce_dpm_enable_mask; - uint32_t acp_dpm_enable_mask; - uint32_t samu_dpm_enable_mask; - uint32_t sclk_dpm_enable_mask; - uint32_t mclk_dpm_enable_mask; - uint32_t pcie_dpm_enable_mask; -}; - -struct polaris10_pcie_perf_range { - uint16_t max; - uint16_t min; -}; - -struct polaris10_hwmgr { - struct polaris10_dpm_table dpm_table; - struct polaris10_dpm_table golden_dpm_table; - SMU74_Discrete_DpmTable smc_state_table; - struct SMU74_Discrete_Ulv ulv_setting; - - struct polaris10_range_table range_table[NUM_SCLK_RANGE]; - uint32_t voting_rights_clients0; - uint32_t voting_rights_clients1; - uint32_t voting_rights_clients2; - uint32_t voting_rights_clients3; - uint32_t voting_rights_clients4; - uint32_t voting_rights_clients5; - uint32_t voting_rights_clients6; - uint32_t voting_rights_clients7; - uint32_t static_screen_threshold_unit; - uint32_t static_screen_threshold; - uint32_t voltage_control; - uint32_t vddc_vddci_delta; - - uint32_t active_auto_throttle_sources; - - struct polaris10_clock_registers clock_registers; - struct polaris10_voltage_smio_registers voltage_smio_registers; - - bool is_memory_gddr5; - uint16_t acpi_vddc; - bool pspp_notify_required; - uint16_t force_pcie_gen; - uint16_t acpi_pcie_gen; - uint32_t pcie_gen_cap; - uint32_t pcie_lane_cap; - uint32_t pcie_spc_cap; - struct polaris10_leakage_voltage vddc_leakage; - struct polaris10_leakage_voltage Vddci_leakage; - - uint32_t mvdd_control; - uint32_t vddc_mask_low; - uint32_t mvdd_mask_low; - uint16_t max_vddc_in_pptable; - uint16_t min_vddc_in_pptable; - uint16_t max_vddci_in_pptable; - uint16_t min_vddci_in_pptable; - uint32_t mclk_strobe_mode_threshold; - uint32_t mclk_stutter_mode_threshold; - uint32_t mclk_edc_enable_threshold; - uint32_t mclk_edcwr_enable_threshold; - bool is_uvd_enabled; - struct polaris10_vbios_boot_state vbios_boot_state; - - bool pcie_performance_request; - bool battery_state; - bool is_tlu_enabled; - - /* ---- SMC SRAM Address of firmware header tables ---- */ - uint32_t sram_end; - uint32_t dpm_table_start; - uint32_t soft_regs_start; - uint32_t mc_reg_table_start; - uint32_t fan_table_start; - uint32_t arb_table_start; - - /* ---- Stuff originally coming from Evergreen ---- */ - uint32_t vddci_control; - struct pp_atomctrl_voltage_table vddc_voltage_table; - struct pp_atomctrl_voltage_table vddci_voltage_table; - struct pp_atomctrl_voltage_table mvdd_voltage_table; - - uint32_t mgcg_cgtt_local2; - uint32_t mgcg_cgtt_local3; - uint32_t gpio_debug; - uint32_t mc_micro_code_feature; - uint32_t highest_mclk; - uint16_t acpi_vddci; - uint8_t mvdd_high_index; - uint8_t mvdd_low_index; - bool dll_default_on; - bool performance_request_registered; - - /* ---- Low Power Features ---- */ - struct polaris10_ulv_parm ulv; - - /* ---- CAC Stuff ---- */ - uint32_t cac_table_start; - bool cac_configuration_required; - bool driver_calculate_cac_leakage; - bool cac_enabled; - - /* ---- DPM2 Parameters ---- */ - uint32_t power_containment_features; - bool enable_dte_feature; - bool enable_tdc_limit_feature; - bool enable_pkg_pwr_tracking_feature; - bool disable_uvd_power_tune_feature; - const struct polaris10_pt_defaults *power_tune_defaults; - struct SMU74_Discrete_PmFuses power_tune_table; - uint32_t dte_tj_offset; - uint32_t fast_watermark_threshold; - - /* ---- Phase Shedding ---- */ - bool vddc_phase_shed_control; - - /* ---- DI/DT ---- */ - struct polaris10_display_timing display_timing; - uint32_t bif_sclk_table[SMU74_MAX_LEVELS_LINK]; - - /* ---- Thermal Temperature Setting ---- */ - struct polaris10_dpmlevel_enable_mask dpm_level_enable_mask; - uint32_t need_update_smu7_dpm_table; - uint32_t sclk_dpm_key_disabled; - uint32_t mclk_dpm_key_disabled; - uint32_t pcie_dpm_key_disabled; - uint32_t min_engine_clocks; - struct polaris10_pcie_perf_range pcie_gen_performance; - struct polaris10_pcie_perf_range pcie_lane_performance; - struct polaris10_pcie_perf_range pcie_gen_power_saving; - struct polaris10_pcie_perf_range pcie_lane_power_saving; - bool use_pcie_performance_levels; - bool use_pcie_power_saving_levels; - uint32_t activity_target[SMU74_MAX_LEVELS_GRAPHICS]; - uint32_t mclk_activity_target; - uint32_t mclk_dpm0_activity_target; - uint32_t low_sclk_interrupt_threshold; - uint32_t last_mclk_dpm_enable_mask; - bool uvd_enabled; - - /* ---- Power Gating States ---- */ - bool uvd_power_gated; - bool vce_power_gated; - bool samu_power_gated; - bool need_long_memory_training; - - /* Application power optimization parameters */ - bool update_up_hyst; - bool update_down_hyst; - uint32_t down_hyst; - uint32_t up_hyst; - uint32_t disable_dpm_mask; - bool apply_optimized_settings; - uint32_t avfs_vdroop_override_setting; - bool apply_avfs_cks_off_voltage; - uint32_t frame_time_x2; -}; - -/* To convert to Q8.8 format for firmware */ -#define POLARIS10_Q88_FORMAT_CONVERSION_UNIT 256 - -enum Polaris10_I2CLineID { - Polaris10_I2CLineID_DDC1 = 0x90, - Polaris10_I2CLineID_DDC2 = 0x91, - Polaris10_I2CLineID_DDC3 = 0x92, - Polaris10_I2CLineID_DDC4 = 0x93, - Polaris10_I2CLineID_DDC5 = 0x94, - Polaris10_I2CLineID_DDC6 = 0x95, - Polaris10_I2CLineID_SCLSDA = 0x96, - Polaris10_I2CLineID_DDCVGA = 0x97 -}; - -#define POLARIS10_I2C_DDC1DATA 0 -#define POLARIS10_I2C_DDC1CLK 1 -#define POLARIS10_I2C_DDC2DATA 2 -#define POLARIS10_I2C_DDC2CLK 3 -#define POLARIS10_I2C_DDC3DATA 4 -#define POLARIS10_I2C_DDC3CLK 5 -#define POLARIS10_I2C_SDA 40 -#define POLARIS10_I2C_SCL 41 -#define POLARIS10_I2C_DDC4DATA 65 -#define POLARIS10_I2C_DDC4CLK 66 -#define POLARIS10_I2C_DDC5DATA 0x48 -#define POLARIS10_I2C_DDC5CLK 0x49 -#define POLARIS10_I2C_DDC6DATA 0x4a -#define POLARIS10_I2C_DDC6CLK 0x4b -#define POLARIS10_I2C_DDCVGADATA 0x4c -#define POLARIS10_I2C_DDCVGACLK 0x4d - -#define POLARIS10_UNUSED_GPIO_PIN 0x7F - -int polaris10_hwmgr_init(struct pp_hwmgr *hwmgr); - -int polaris10_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate); -int polaris10_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate); -int polaris10_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable); -int polaris10_update_vce_dpm(struct pp_hwmgr *hwmgr, bool bgate); -#endif - diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c deleted file mode 100644 index b9cb240a135d..000000000000 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c +++ /dev/null @@ -1,988 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#include "hwmgr.h" -#include "smumgr.h" -#include "polaris10_hwmgr.h" -#include "polaris10_powertune.h" -#include "polaris10_smumgr.h" -#include "smu74_discrete.h" -#include "pp_debug.h" -#include "gca/gfx_8_0_d.h" -#include "gca/gfx_8_0_sh_mask.h" -#include "oss/oss_3_0_sh_mask.h" - -#define VOLTAGE_SCALE 4 -#define POWERTUNE_DEFAULT_SET_MAX 1 - -uint32_t DIDTBlock_Info = SQ_IR_MASK | TCP_IR_MASK | TD_PCC_MASK; - -struct polaris10_pt_config_reg GCCACConfig_Polaris10[] = { -/* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - * Offset Mask Shift Value Type - * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - */ - { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00060013, POLARIS10_CONFIGREG_GC_CAC_IND }, - { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00860013, POLARIS10_CONFIGREG_GC_CAC_IND }, - { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01060013, POLARIS10_CONFIGREG_GC_CAC_IND }, - { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01860013, POLARIS10_CONFIGREG_GC_CAC_IND }, - { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02060013, POLARIS10_CONFIGREG_GC_CAC_IND }, - { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02860013, POLARIS10_CONFIGREG_GC_CAC_IND }, - { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x03060013, POLARIS10_CONFIGREG_GC_CAC_IND }, - { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x03860013, POLARIS10_CONFIGREG_GC_CAC_IND }, - { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x04060013, POLARIS10_CONFIGREG_GC_CAC_IND }, - - { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x000E0013, POLARIS10_CONFIGREG_GC_CAC_IND }, - { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x008E0013, POLARIS10_CONFIGREG_GC_CAC_IND }, - { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x010E0013, POLARIS10_CONFIGREG_GC_CAC_IND }, - { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x018E0013, POLARIS10_CONFIGREG_GC_CAC_IND }, - { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x020E0013, POLARIS10_CONFIGREG_GC_CAC_IND }, - - { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00100013, POLARIS10_CONFIGREG_GC_CAC_IND }, - { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00900013, POLARIS10_CONFIGREG_GC_CAC_IND }, - { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01100013, POLARIS10_CONFIGREG_GC_CAC_IND }, - { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01900013, POLARIS10_CONFIGREG_GC_CAC_IND }, - { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02100013, POLARIS10_CONFIGREG_GC_CAC_IND }, - { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02900013, POLARIS10_CONFIGREG_GC_CAC_IND }, - - { 0xFFFFFFFF } -}; - -struct polaris10_pt_config_reg GCCACConfig_Polaris11[] = { -/* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - * Offset Mask Shift Value Type - * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - */ - { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00060011, POLARIS10_CONFIGREG_GC_CAC_IND }, - { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00860011, POLARIS10_CONFIGREG_GC_CAC_IND }, - { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01060011, POLARIS10_CONFIGREG_GC_CAC_IND }, - { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01860011, POLARIS10_CONFIGREG_GC_CAC_IND }, - { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02060011, POLARIS10_CONFIGREG_GC_CAC_IND }, - { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02860011, POLARIS10_CONFIGREG_GC_CAC_IND }, - { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x03060011, POLARIS10_CONFIGREG_GC_CAC_IND }, - { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x03860011, POLARIS10_CONFIGREG_GC_CAC_IND }, - { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x04060011, POLARIS10_CONFIGREG_GC_CAC_IND }, - - { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x000E0011, POLARIS10_CONFIGREG_GC_CAC_IND }, - { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x008E0011, POLARIS10_CONFIGREG_GC_CAC_IND }, - { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x010E0011, POLARIS10_CONFIGREG_GC_CAC_IND }, - { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x018E0011, POLARIS10_CONFIGREG_GC_CAC_IND }, - { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x020E0011, POLARIS10_CONFIGREG_GC_CAC_IND }, - - { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00100011, POLARIS10_CONFIGREG_GC_CAC_IND }, - { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00900011, POLARIS10_CONFIGREG_GC_CAC_IND }, - { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01100011, POLARIS10_CONFIGREG_GC_CAC_IND }, - { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01900011, POLARIS10_CONFIGREG_GC_CAC_IND }, - { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02100011, POLARIS10_CONFIGREG_GC_CAC_IND }, - { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02900011, POLARIS10_CONFIGREG_GC_CAC_IND }, - - { 0xFFFFFFFF } -}; - -struct polaris10_pt_config_reg DIDTConfig_Polaris10[] = { -/* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - * Offset Mask Shift Value Type - * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - */ - { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT0_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT0__SHIFT, 0x0073, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT1_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT1__SHIFT, 0x00ab, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT2_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT2__SHIFT, 0x0084, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT3_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT3__SHIFT, 0x005a, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT4_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT4__SHIFT, 0x0067, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT5_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT5__SHIFT, 0x0084, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT6_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT6__SHIFT, 0x0027, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT7_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT7__SHIFT, 0x0046, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT8_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT8__SHIFT, 0x00aa, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT9_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT9__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT10_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT10__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT11_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT11__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MIN_POWER_MASK, DIDT_SQ_CTRL1__MIN_POWER__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MAX_POWER_MASK, DIDT_SQ_CTRL1__MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__UNUSED_0_MASK, DIDT_SQ_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__MAX_POWER_DELTA_MASK, DIDT_SQ_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3853, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_0_MASK, DIDT_SQ_CTRL2__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x005a, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_1_MASK, DIDT_SQ_CTRL2__UNUSED_1__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_2_MASK, DIDT_SQ_CTRL2__UNUSED_2__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x0ebb, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__UNUSED_0_MASK, DIDT_SQ_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3853, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3153, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK, DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__USE_REF_CLOCK_MASK, DIDT_SQ_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__PHASE_OFFSET_MASK, DIDT_SQ_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_RST_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__UNUSED_0_MASK, DIDT_SQ_CTRL0__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT0_MASK, DIDT_TD_WEIGHT0_3__WEIGHT0__SHIFT, 0x000a, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT1_MASK, DIDT_TD_WEIGHT0_3__WEIGHT1__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT2_MASK, DIDT_TD_WEIGHT0_3__WEIGHT2__SHIFT, 0x0017, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT3_MASK, DIDT_TD_WEIGHT0_3__WEIGHT3__SHIFT, 0x002f, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT4_MASK, DIDT_TD_WEIGHT4_7__WEIGHT4__SHIFT, 0x0046, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT5_MASK, DIDT_TD_WEIGHT4_7__WEIGHT5__SHIFT, 0x005d, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT6_MASK, DIDT_TD_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT7_MASK, DIDT_TD_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MIN_POWER_MASK, DIDT_TD_CTRL1__MIN_POWER__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MAX_POWER_MASK, DIDT_TD_CTRL1__MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__UNUSED_0_MASK, DIDT_TD_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TD_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0x00ff, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TD_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3fff, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_0_MASK, DIDT_TD_CTRL2__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x000f, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_1_MASK, DIDT_TD_CTRL2__UNUSED_1__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_2_MASK, DIDT_TD_CTRL2__UNUSED_2__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__UNUSED_0_MASK, DIDT_TD_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x0dde, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x0dde, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__UNUSED_0_MASK, DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__USE_REF_CLOCK_MASK, DIDT_TD_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__PHASE_OFFSET_MASK, DIDT_TD_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TD_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0009, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0009, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__UNUSED_0_MASK, DIDT_TD_CTRL0__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT0_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT0__SHIFT, 0x0004, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT1_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT1__SHIFT, 0x0037, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT2_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT2__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT3_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT3__SHIFT, 0x00ff, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT4_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT4__SHIFT, 0x0054, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT5_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT5__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT6_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT7_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MIN_POWER_MASK, DIDT_TCP_CTRL1__MIN_POWER__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MAX_POWER_MASK, DIDT_TCP_CTRL1__MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__UNUSED_0_MASK, DIDT_TCP_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TCP_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3dde, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_0_MASK, DIDT_TCP_CTRL2__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x0032, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_1_MASK, DIDT_TCP_CTRL2__UNUSED_1__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_2_MASK, DIDT_TCP_CTRL2__UNUSED_2__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__UNUSED_0_MASK, DIDT_TCP_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3dde, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3dde, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK, DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__USE_REF_CLOCK_MASK, DIDT_TCP_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__PHASE_OFFSET_MASK, DIDT_TCP_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__UNUSED_0_MASK, DIDT_TCP_CTRL0__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - - { 0xFFFFFFFF } -}; - -struct polaris10_pt_config_reg DIDTConfig_Polaris11[] = { -/* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - * Offset Mask Shift Value Type - * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - */ - { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT0_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT0__SHIFT, 0x0073, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT1_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT1__SHIFT, 0x00ab, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT2_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT2__SHIFT, 0x0084, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT3_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT3__SHIFT, 0x005a, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT4_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT4__SHIFT, 0x0067, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT5_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT5__SHIFT, 0x0084, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT6_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT6__SHIFT, 0x0027, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT7_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT7__SHIFT, 0x0046, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT8_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT8__SHIFT, 0x00aa, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT9_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT9__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT10_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT10__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT11_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT11__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MIN_POWER_MASK, DIDT_SQ_CTRL1__MIN_POWER__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MAX_POWER_MASK, DIDT_SQ_CTRL1__MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__UNUSED_0_MASK, DIDT_SQ_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__MAX_POWER_DELTA_MASK, DIDT_SQ_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3853, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_0_MASK, DIDT_SQ_CTRL2__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x005a, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_1_MASK, DIDT_SQ_CTRL2__UNUSED_1__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_2_MASK, DIDT_SQ_CTRL2__UNUSED_2__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x0ebb, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__UNUSED_0_MASK, DIDT_SQ_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3853, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3153, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK, DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__USE_REF_CLOCK_MASK, DIDT_SQ_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__PHASE_OFFSET_MASK, DIDT_SQ_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_RST_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__UNUSED_0_MASK, DIDT_SQ_CTRL0__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT0_MASK, DIDT_TD_WEIGHT0_3__WEIGHT0__SHIFT, 0x000a, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT1_MASK, DIDT_TD_WEIGHT0_3__WEIGHT1__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT2_MASK, DIDT_TD_WEIGHT0_3__WEIGHT2__SHIFT, 0x0017, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT3_MASK, DIDT_TD_WEIGHT0_3__WEIGHT3__SHIFT, 0x002f, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT4_MASK, DIDT_TD_WEIGHT4_7__WEIGHT4__SHIFT, 0x0046, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT5_MASK, DIDT_TD_WEIGHT4_7__WEIGHT5__SHIFT, 0x005d, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT6_MASK, DIDT_TD_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT7_MASK, DIDT_TD_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MIN_POWER_MASK, DIDT_TD_CTRL1__MIN_POWER__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MAX_POWER_MASK, DIDT_TD_CTRL1__MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__UNUSED_0_MASK, DIDT_TD_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TD_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0x00ff, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TD_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3fff, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_0_MASK, DIDT_TD_CTRL2__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x000f, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_1_MASK, DIDT_TD_CTRL2__UNUSED_1__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_2_MASK, DIDT_TD_CTRL2__UNUSED_2__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__UNUSED_0_MASK, DIDT_TD_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x0dde, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x0dde, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__UNUSED_0_MASK, DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__USE_REF_CLOCK_MASK, DIDT_TD_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__PHASE_OFFSET_MASK, DIDT_TD_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TD_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0008, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0008, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__UNUSED_0_MASK, DIDT_TD_CTRL0__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT0_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT0__SHIFT, 0x0004, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT1_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT1__SHIFT, 0x0037, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT2_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT2__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT3_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT3__SHIFT, 0x00ff, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT4_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT4__SHIFT, 0x0054, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT5_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT5__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT6_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT7_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MIN_POWER_MASK, DIDT_TCP_CTRL1__MIN_POWER__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MAX_POWER_MASK, DIDT_TCP_CTRL1__MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__UNUSED_0_MASK, DIDT_TCP_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TCP_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3dde, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_0_MASK, DIDT_TCP_CTRL2__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x0032, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_1_MASK, DIDT_TCP_CTRL2__UNUSED_1__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_2_MASK, DIDT_TCP_CTRL2__UNUSED_2__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__UNUSED_0_MASK, DIDT_TCP_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3dde, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3dde, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK, DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__USE_REF_CLOCK_MASK, DIDT_TCP_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__PHASE_OFFSET_MASK, DIDT_TCP_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__UNUSED_0_MASK, DIDT_TCP_CTRL0__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { 0xFFFFFFFF } -}; - -static const struct polaris10_pt_defaults polaris10_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = { - /* sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt, - * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT */ - { 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000, - { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61}, - { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 } }, -}; - -void polaris10_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *polaris10_hwmgr = (struct polaris10_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - - if (table_info && - table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX && - table_info->cac_dtp_table->usPowerTuneDataSetID) - polaris10_hwmgr->power_tune_defaults = - &polaris10_power_tune_data_set_array - [table_info->cac_dtp_table->usPowerTuneDataSetID - 1]; - else - polaris10_hwmgr->power_tune_defaults = &polaris10_power_tune_data_set_array[0]; - -} - -static uint16_t scale_fan_gain_settings(uint16_t raw_setting) -{ - uint32_t tmp; - tmp = raw_setting * 4096 / 100; - return (uint16_t)tmp; -} - -int polaris10_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - const struct polaris10_pt_defaults *defaults = data->power_tune_defaults; - SMU74_Discrete_DpmTable *dpm_table = &(data->smc_state_table); - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table; - struct pp_advance_fan_control_parameters *fan_table= - &hwmgr->thermal_controller.advanceFanControlParameters; - int i, j, k; - const uint16_t *pdef1; - const uint16_t *pdef2; - - dpm_table->DefaultTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 128)); - dpm_table->TargetTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 128)); - - PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255, - "Target Operating Temp is out of Range!", - ); - - dpm_table->TemperatureLimitEdge = PP_HOST_TO_SMC_US( - cac_dtp_table->usTargetOperatingTemp * 256); - dpm_table->TemperatureLimitHotspot = PP_HOST_TO_SMC_US( - cac_dtp_table->usTemperatureLimitHotspot * 256); - dpm_table->FanGainEdge = PP_HOST_TO_SMC_US( - scale_fan_gain_settings(fan_table->usFanGainEdge)); - dpm_table->FanGainHotspot = PP_HOST_TO_SMC_US( - scale_fan_gain_settings(fan_table->usFanGainHotspot)); - - pdef1 = defaults->BAPMTI_R; - pdef2 = defaults->BAPMTI_RC; - - for (i = 0; i < SMU74_DTE_ITERATIONS; i++) { - for (j = 0; j < SMU74_DTE_SOURCES; j++) { - for (k = 0; k < SMU74_DTE_SINKS; k++) { - dpm_table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*pdef1); - dpm_table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*pdef2); - pdef1++; - pdef2++; - } - } - } - - return 0; -} - -static int polaris10_populate_svi_load_line(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - const struct polaris10_pt_defaults *defaults = data->power_tune_defaults; - - data->power_tune_table.SviLoadLineEn = defaults->SviLoadLineEn; - data->power_tune_table.SviLoadLineVddC = defaults->SviLoadLineVddC; - data->power_tune_table.SviLoadLineTrimVddC = 3; - data->power_tune_table.SviLoadLineOffsetVddC = 0; - - return 0; -} - -static int polaris10_populate_tdc_limit(struct pp_hwmgr *hwmgr) -{ - uint16_t tdc_limit; - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - const struct polaris10_pt_defaults *defaults = data->power_tune_defaults; - - tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 128); - data->power_tune_table.TDC_VDDC_PkgLimit = - CONVERT_FROM_HOST_TO_SMC_US(tdc_limit); - data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc = - defaults->TDC_VDDC_ThrottleReleaseLimitPerc; - data->power_tune_table.TDC_MAWt = defaults->TDC_MAWt; - - return 0; -} - -static int polaris10_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - const struct polaris10_pt_defaults *defaults = data->power_tune_defaults; - uint32_t temp; - - if (polaris10_read_smc_sram_dword(hwmgr->smumgr, - fuse_table_offset + - offsetof(SMU74_Discrete_PmFuses, TdcWaterfallCtl), - (uint32_t *)&temp, data->sram_end)) - PP_ASSERT_WITH_CODE(false, - "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!", - return -EINVAL); - else { - data->power_tune_table.TdcWaterfallCtl = defaults->TdcWaterfallCtl; - data->power_tune_table.LPMLTemperatureMin = - (uint8_t)((temp >> 16) & 0xff); - data->power_tune_table.LPMLTemperatureMax = - (uint8_t)((temp >> 8) & 0xff); - data->power_tune_table.Reserved = (uint8_t)(temp & 0xff); - } - return 0; -} - -static int polaris10_populate_temperature_scaler(struct pp_hwmgr *hwmgr) -{ - int i; - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - - /* Currently not used. Set all to zero. */ - for (i = 0; i < 16; i++) - data->power_tune_table.LPMLTemperatureScaler[i] = 0; - - return 0; -} - -static int polaris10_populate_fuzzy_fan(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - - if ((hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity & (1 << 15)) - || 0 == hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity) - hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity = - hwmgr->thermal_controller.advanceFanControlParameters.usDefaultFanOutputSensitivity; - - data->power_tune_table.FuzzyFan_PwmSetDelta = PP_HOST_TO_SMC_US( - hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity); - return 0; -} - -static int polaris10_populate_gnb_lpml(struct pp_hwmgr *hwmgr) -{ - int i; - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - - /* Currently not used. Set all to zero. */ - for (i = 0; i < 16; i++) - data->power_tune_table.GnbLPML[i] = 0; - - return 0; -} - -static int polaris10_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr) -{ - return 0; -} - -static int polaris10_enable_didt(struct pp_hwmgr *hwmgr, const bool enable) -{ - - uint32_t en = enable ? 1 : 0; - int32_t result = 0; - uint32_t data; - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping)) { - data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_CTRL0); - data &= ~DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK; - data |= ((en << DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_CTRL0, data); - DIDTBlock_Info &= ~SQ_Enable_MASK; - DIDTBlock_Info |= en << SQ_Enable_SHIFT; - } - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping)) { - data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_CTRL0); - data &= ~DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK; - data |= ((en << DIDT_DB_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_CTRL0, data); - DIDTBlock_Info &= ~DB_Enable_MASK; - DIDTBlock_Info |= en << DB_Enable_SHIFT; - } - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping)) { - data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_CTRL0); - data &= ~DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK; - data |= ((en << DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_CTRL0, data); - DIDTBlock_Info &= ~TD_Enable_MASK; - DIDTBlock_Info |= en << TD_Enable_SHIFT; - } - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping)) { - data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_CTRL0); - data &= ~DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK; - data |= ((en << DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_CTRL0, data); - DIDTBlock_Info &= ~TCP_Enable_MASK; - DIDTBlock_Info |= en << TCP_Enable_SHIFT; - } - - if (enable) - result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_Didt_Block_Function, DIDTBlock_Info); - - return result; -} - -static int polaris10_program_pt_config_registers(struct pp_hwmgr *hwmgr, - struct polaris10_pt_config_reg *cac_config_regs) -{ - struct polaris10_pt_config_reg *config_regs = cac_config_regs; - uint32_t cache = 0; - uint32_t data = 0; - - PP_ASSERT_WITH_CODE((config_regs != NULL), "Invalid config register table.", return -EINVAL); - - while (config_regs->offset != 0xFFFFFFFF) { - if (config_regs->type == POLARIS10_CONFIGREG_CACHE) - cache |= ((config_regs->value << config_regs->shift) & config_regs->mask); - else { - switch (config_regs->type) { - case POLARIS10_CONFIGREG_SMC_IND: - data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, config_regs->offset); - break; - - case POLARIS10_CONFIGREG_DIDT_IND: - data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, config_regs->offset); - break; - - case POLARIS10_CONFIGREG_GC_CAC_IND: - data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG_GC_CAC, config_regs->offset); - break; - - default: - data = cgs_read_register(hwmgr->device, config_regs->offset); - break; - } - - data &= ~config_regs->mask; - data |= ((config_regs->value << config_regs->shift) & config_regs->mask); - data |= cache; - - switch (config_regs->type) { - case POLARIS10_CONFIGREG_SMC_IND: - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, config_regs->offset, data); - break; - - case POLARIS10_CONFIGREG_DIDT_IND: - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, config_regs->offset, data); - break; - - case POLARIS10_CONFIGREG_GC_CAC_IND: - cgs_write_ind_register(hwmgr->device, CGS_IND_REG_GC_CAC, config_regs->offset, data); - break; - - default: - cgs_write_register(hwmgr->device, config_regs->offset, data); - break; - } - cache = 0; - } - - config_regs++; - } - - return 0; -} - -int polaris10_enable_didt_config(struct pp_hwmgr *hwmgr) -{ - int result; - uint32_t num_se = 0; - uint32_t count, value, value2; - struct cgs_system_info sys_info = {0}; - - sys_info.size = sizeof(struct cgs_system_info); - sys_info.info_id = CGS_SYSTEM_INFO_GFX_SE_INFO; - result = cgs_query_system_info(hwmgr->device, &sys_info); - - - if (result == 0) - num_se = sys_info.value; - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping) || - phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping) || - phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping) || - phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping)) { - - /* TO DO Pre DIDT disable clock gating */ - value = 0; - value2 = cgs_read_register(hwmgr->device, mmGRBM_GFX_INDEX); - for (count = 0; count < num_se; count++) { - value = SYS_GRBM_GFX_INDEX_DATA__INSTANCE_BROADCAST_WRITES_MASK - | SYS_GRBM_GFX_INDEX_DATA__SH_BROADCAST_WRITES_MASK - | (count << SYS_GRBM_GFX_INDEX_DATA__SE_INDEX__SHIFT); - cgs_write_register(hwmgr->device, mmGRBM_GFX_INDEX, value); - - if (hwmgr->chip_id == CHIP_POLARIS10) { - result = polaris10_program_pt_config_registers(hwmgr, GCCACConfig_Polaris10); - PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result); - result = polaris10_program_pt_config_registers(hwmgr, DIDTConfig_Polaris10); - PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result); - } else if (hwmgr->chip_id == CHIP_POLARIS11) { - result = polaris10_program_pt_config_registers(hwmgr, GCCACConfig_Polaris11); - PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result); - result = polaris10_program_pt_config_registers(hwmgr, DIDTConfig_Polaris11); - PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result); - } - } - cgs_write_register(hwmgr->device, mmGRBM_GFX_INDEX, value2); - - result = polaris10_enable_didt(hwmgr, true); - PP_ASSERT_WITH_CODE((result == 0), "EnableDiDt failed.", return result); - - /* TO DO Post DIDT enable clock gating */ - } - - return 0; -} - -int polaris10_disable_didt_config(struct pp_hwmgr *hwmgr) -{ - int result; - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping) || - phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping) || - phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping) || - phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping)) { - /* TO DO Pre DIDT disable clock gating */ - - result = polaris10_enable_didt(hwmgr, false); - PP_ASSERT_WITH_CODE((result == 0), "Post DIDT enable clock gating failed.", return result); - /* TO DO Post DIDT enable clock gating */ - } - - return 0; -} - - -static int polaris10_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - uint16_t hi_sidd = data->power_tune_table.BapmVddCBaseLeakageHiSidd; - uint16_t lo_sidd = data->power_tune_table.BapmVddCBaseLeakageLoSidd; - struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table; - - hi_sidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256); - lo_sidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256); - - data->power_tune_table.BapmVddCBaseLeakageHiSidd = - CONVERT_FROM_HOST_TO_SMC_US(hi_sidd); - data->power_tune_table.BapmVddCBaseLeakageLoSidd = - CONVERT_FROM_HOST_TO_SMC_US(lo_sidd); - - return 0; -} - -int polaris10_populate_pm_fuses(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - uint32_t pm_fuse_table_offset; - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_PowerContainment)) { - if (polaris10_read_smc_sram_dword(hwmgr->smumgr, - SMU7_FIRMWARE_HEADER_LOCATION + - offsetof(SMU74_Firmware_Header, PmFuseTable), - &pm_fuse_table_offset, data->sram_end)) - PP_ASSERT_WITH_CODE(false, - "Attempt to get pm_fuse_table_offset Failed!", - return -EINVAL); - - if (polaris10_populate_svi_load_line(hwmgr)) - PP_ASSERT_WITH_CODE(false, - "Attempt to populate SviLoadLine Failed!", - return -EINVAL); - - if (polaris10_populate_tdc_limit(hwmgr)) - PP_ASSERT_WITH_CODE(false, - "Attempt to populate TDCLimit Failed!", return -EINVAL); - - if (polaris10_populate_dw8(hwmgr, pm_fuse_table_offset)) - PP_ASSERT_WITH_CODE(false, - "Attempt to populate TdcWaterfallCtl, " - "LPMLTemperature Min and Max Failed!", - return -EINVAL); - - if (0 != polaris10_populate_temperature_scaler(hwmgr)) - PP_ASSERT_WITH_CODE(false, - "Attempt to populate LPMLTemperatureScaler Failed!", - return -EINVAL); - - if (polaris10_populate_fuzzy_fan(hwmgr)) - PP_ASSERT_WITH_CODE(false, - "Attempt to populate Fuzzy Fan Control parameters Failed!", - return -EINVAL); - - if (polaris10_populate_gnb_lpml(hwmgr)) - PP_ASSERT_WITH_CODE(false, - "Attempt to populate GnbLPML Failed!", - return -EINVAL); - - if (polaris10_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr)) - PP_ASSERT_WITH_CODE(false, - "Attempt to populate GnbLPML Min and Max Vid Failed!", - return -EINVAL); - - if (polaris10_populate_bapm_vddc_base_leakage_sidd(hwmgr)) - PP_ASSERT_WITH_CODE(false, - "Attempt to populate BapmVddCBaseLeakage Hi and Lo " - "Sidd Failed!", return -EINVAL); - - if (polaris10_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset, - (uint8_t *)&data->power_tune_table, - (sizeof(struct SMU74_Discrete_PmFuses) - 92), data->sram_end)) - PP_ASSERT_WITH_CODE(false, - "Attempt to download PmFuseTable Failed!", - return -EINVAL); - } - return 0; -} - -int polaris10_enable_smc_cac(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - int result = 0; - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_CAC)) { - int smc_result; - smc_result = smum_send_msg_to_smc(hwmgr->smumgr, - (uint16_t)(PPSMC_MSG_EnableCac)); - PP_ASSERT_WITH_CODE((0 == smc_result), - "Failed to enable CAC in SMC.", result = -1); - - data->cac_enabled = (0 == smc_result) ? true : false; - } - return result; -} - -int polaris10_disable_smc_cac(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - int result = 0; - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_CAC) && data->cac_enabled) { - int smc_result = smum_send_msg_to_smc(hwmgr->smumgr, - (uint16_t)(PPSMC_MSG_DisableCac)); - PP_ASSERT_WITH_CODE((smc_result == 0), - "Failed to disable CAC in SMC.", result = -1); - - data->cac_enabled = false; - } - return result; -} - -int polaris10_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - - if (data->power_containment_features & - POWERCONTAINMENT_FEATURE_PkgPwrLimit) - return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_PkgPwrSetLimit, n); - return 0; -} - -static int polaris10_set_overdriver_target_tdp(struct pp_hwmgr *pHwMgr, uint32_t target_tdp) -{ - return smum_send_msg_to_smc_with_parameter(pHwMgr->smumgr, - PPSMC_MSG_OverDriveSetTargetTdp, target_tdp); -} - -int polaris10_enable_power_containment(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - int smc_result; - int result = 0; - - data->power_containment_features = 0; - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_PowerContainment)) { - - if (data->enable_tdc_limit_feature) { - smc_result = smum_send_msg_to_smc(hwmgr->smumgr, - (uint16_t)(PPSMC_MSG_TDCLimitEnable)); - PP_ASSERT_WITH_CODE((0 == smc_result), - "Failed to enable TDCLimit in SMC.", result = -1;); - if (0 == smc_result) - data->power_containment_features |= - POWERCONTAINMENT_FEATURE_TDCLimit; - } - - if (data->enable_pkg_pwr_tracking_feature) { - smc_result = smum_send_msg_to_smc(hwmgr->smumgr, - (uint16_t)(PPSMC_MSG_PkgPwrLimitEnable)); - PP_ASSERT_WITH_CODE((0 == smc_result), - "Failed to enable PkgPwrTracking in SMC.", result = -1;); - if (0 == smc_result) { - struct phm_cac_tdp_table *cac_table = - table_info->cac_dtp_table; - uint32_t default_limit = - (uint32_t)(cac_table->usMaximumPowerDeliveryLimit * 256); - - data->power_containment_features |= - POWERCONTAINMENT_FEATURE_PkgPwrLimit; - - if (polaris10_set_power_limit(hwmgr, default_limit)) - printk(KERN_ERR "Failed to set Default Power Limit in SMC!"); - } - } - } - return result; -} - -int polaris10_disable_power_containment(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - int result = 0; - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_PowerContainment) && - data->power_containment_features) { - int smc_result; - - if (data->power_containment_features & - POWERCONTAINMENT_FEATURE_TDCLimit) { - smc_result = smum_send_msg_to_smc(hwmgr->smumgr, - (uint16_t)(PPSMC_MSG_TDCLimitDisable)); - PP_ASSERT_WITH_CODE((smc_result == 0), - "Failed to disable TDCLimit in SMC.", - result = smc_result); - } - - if (data->power_containment_features & - POWERCONTAINMENT_FEATURE_DTE) { - smc_result = smum_send_msg_to_smc(hwmgr->smumgr, - (uint16_t)(PPSMC_MSG_DisableDTE)); - PP_ASSERT_WITH_CODE((smc_result == 0), - "Failed to disable DTE in SMC.", - result = smc_result); - } - - if (data->power_containment_features & - POWERCONTAINMENT_FEATURE_PkgPwrLimit) { - smc_result = smum_send_msg_to_smc(hwmgr->smumgr, - (uint16_t)(PPSMC_MSG_PkgPwrLimitDisable)); - PP_ASSERT_WITH_CODE((smc_result == 0), - "Failed to disable PkgPwrTracking in SMC.", - result = smc_result); - } - data->power_containment_features = 0; - } - - return result; -} - -int polaris10_power_control_set_level(struct pp_hwmgr *hwmgr) -{ - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table; - int adjust_percent, target_tdp; - int result = 0; - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_PowerContainment)) { - /* adjustment percentage has already been validated */ - adjust_percent = hwmgr->platform_descriptor.TDPAdjustmentPolarity ? - hwmgr->platform_descriptor.TDPAdjustment : - (-1 * hwmgr->platform_descriptor.TDPAdjustment); - /* SMC requested that target_tdp to be 7 bit fraction in DPM table - * but message to be 8 bit fraction for messages - */ - target_tdp = ((100 + adjust_percent) * (int)(cac_table->usTDP * 256)) / 100; - result = polaris10_set_overdriver_target_tdp(hwmgr, (uint32_t)target_tdp); - } - - return result; -} diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.h b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.h deleted file mode 100644 index 329119d6cc71..000000000000 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.h +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#ifndef POLARIS10_POWERTUNE_H -#define POLARIS10_POWERTUNE_H - -enum polaris10_pt_config_reg_type { - POLARIS10_CONFIGREG_MMR = 0, - POLARIS10_CONFIGREG_SMC_IND, - POLARIS10_CONFIGREG_DIDT_IND, - POLARIS10_CONFIGREG_GC_CAC_IND, - POLARIS10_CONFIGREG_CACHE, - POLARIS10_CONFIGREG_MAX -}; - -#define DIDT_SQ_CTRL0__UNUSED_0_MASK 0xfffc0000 -#define DIDT_SQ_CTRL0__UNUSED_0__SHIFT 0x12 -#define DIDT_TD_CTRL0__UNUSED_0_MASK 0xfffc0000 -#define DIDT_TD_CTRL0__UNUSED_0__SHIFT 0x12 -#define DIDT_TCP_CTRL0__UNUSED_0_MASK 0xfffc0000 -#define DIDT_TCP_CTRL0__UNUSED_0__SHIFT 0x12 -#define DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK 0xc0000000 -#define DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT 0x0000001e -#define DIDT_TD_TUNING_CTRL__UNUSED_0_MASK 0xc0000000 -#define DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT 0x0000001e -#define DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK 0xc0000000 -#define DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT 0x0000001e - -/* PowerContainment Features */ -#define POWERCONTAINMENT_FEATURE_DTE 0x00000001 -#define POWERCONTAINMENT_FEATURE_TDCLimit 0x00000002 -#define POWERCONTAINMENT_FEATURE_PkgPwrLimit 0x00000004 - -#define ixGC_CAC_CNTL 0x0000 -#define ixDIDT_SQ_STALL_CTRL 0x0004 -#define ixDIDT_SQ_TUNING_CTRL 0x0005 -#define ixDIDT_TD_STALL_CTRL 0x0044 -#define ixDIDT_TD_TUNING_CTRL 0x0045 -#define ixDIDT_TCP_STALL_CTRL 0x0064 -#define ixDIDT_TCP_TUNING_CTRL 0x0065 - -struct polaris10_pt_config_reg { - uint32_t offset; - uint32_t mask; - uint32_t shift; - uint32_t value; - enum polaris10_pt_config_reg_type type; -}; - - -void polaris10_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr); -int polaris10_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr); -int polaris10_populate_pm_fuses(struct pp_hwmgr *hwmgr); -int polaris10_enable_smc_cac(struct pp_hwmgr *hwmgr); -int polaris10_disable_smc_cac(struct pp_hwmgr *hwmgr); -int polaris10_enable_power_containment(struct pp_hwmgr *hwmgr); -int polaris10_disable_power_containment(struct pp_hwmgr *hwmgr); -int polaris10_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n); -int polaris10_power_control_set_level(struct pp_hwmgr *hwmgr); -int polaris10_enable_didt_config(struct pp_hwmgr *hwmgr); -#endif /* POLARIS10_POWERTUNE_H */ - diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.c deleted file mode 100644 index 41f835adba91..000000000000 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.c +++ /dev/null @@ -1,716 +0,0 @@ -/* - * Copyright 2016 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#include -#include "polaris10_thermal.h" -#include "polaris10_hwmgr.h" -#include "polaris10_smumgr.h" -#include "polaris10_ppsmc.h" -#include "smu/smu_7_1_3_d.h" -#include "smu/smu_7_1_3_sh_mask.h" - -int polaris10_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, - struct phm_fan_speed_info *fan_speed_info) -{ - if (hwmgr->thermal_controller.fanInfo.bNoFan) - return 0; - - fan_speed_info->supports_percent_read = true; - fan_speed_info->supports_percent_write = true; - fan_speed_info->min_percent = 0; - fan_speed_info->max_percent = 100; - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_FanSpeedInTableIsRPM) && - hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution) { - fan_speed_info->supports_rpm_read = true; - fan_speed_info->supports_rpm_write = true; - fan_speed_info->min_rpm = hwmgr->thermal_controller.fanInfo.ulMinRPM; - fan_speed_info->max_rpm = hwmgr->thermal_controller.fanInfo.ulMaxRPM; - } else { - fan_speed_info->min_rpm = 0; - fan_speed_info->max_rpm = 0; - } - - return 0; -} - -int polaris10_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, - uint32_t *speed) -{ - uint32_t duty100; - uint32_t duty; - uint64_t tmp64; - - if (hwmgr->thermal_controller.fanInfo.bNoFan) - return 0; - - duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_FDO_CTRL1, FMAX_DUTY100); - duty = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_THERMAL_STATUS, FDO_PWM_DUTY); - - if (duty100 == 0) - return -EINVAL; - - - tmp64 = (uint64_t)duty * 100; - do_div(tmp64, duty100); - *speed = (uint32_t)tmp64; - - if (*speed > 100) - *speed = 100; - - return 0; -} - -int polaris10_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed) -{ - uint32_t tach_period; - uint32_t crystal_clock_freq; - - if (hwmgr->thermal_controller.fanInfo.bNoFan || - (hwmgr->thermal_controller.fanInfo. - ucTachometerPulsesPerRevolution == 0)) - return 0; - - tach_period = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_TACH_STATUS, TACH_PERIOD); - - if (tach_period == 0) - return -EINVAL; - - crystal_clock_freq = tonga_get_xclk(hwmgr); - - *speed = 60 * crystal_clock_freq * 10000 / tach_period; - - return 0; -} - -/** -* Set Fan Speed Control to static mode, so that the user can decide what speed to use. -* @param hwmgr the address of the powerplay hardware manager. -* mode the fan control mode, 0 default, 1 by percent, 5, by RPM -* @exception Should always succeed. -*/ -int polaris10_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode) -{ - - if (hwmgr->fan_ctrl_is_in_default_mode) { - hwmgr->fan_ctrl_default_mode = - PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_FDO_CTRL2, FDO_PWM_MODE); - hwmgr->tmin = - PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_FDO_CTRL2, TMIN); - hwmgr->fan_ctrl_is_in_default_mode = false; - } - - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_FDO_CTRL2, TMIN, 0); - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_FDO_CTRL2, FDO_PWM_MODE, mode); - - return 0; -} - -/** -* Reset Fan Speed Control to default mode. -* @param hwmgr the address of the powerplay hardware manager. -* @exception Should always succeed. -*/ -int polaris10_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr) -{ - if (!hwmgr->fan_ctrl_is_in_default_mode) { - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_FDO_CTRL2, FDO_PWM_MODE, hwmgr->fan_ctrl_default_mode); - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_FDO_CTRL2, TMIN, hwmgr->tmin); - hwmgr->fan_ctrl_is_in_default_mode = true; - } - - return 0; -} - -static int polaris10_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr) -{ - int result; - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ODFuzzyFanControlSupport)) { - cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, FAN_CONTROL_FUZZY); - result = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StartFanControl); - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_FanSpeedInTableIsRPM)) - hwmgr->hwmgr_func->set_max_fan_rpm_output(hwmgr, - hwmgr->thermal_controller. - advanceFanControlParameters.usMaxFanRPM); - else - hwmgr->hwmgr_func->set_max_fan_pwm_output(hwmgr, - hwmgr->thermal_controller. - advanceFanControlParameters.usMaxFanPWM); - - } else { - cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, FAN_CONTROL_TABLE); - result = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StartFanControl); - } - - if (!result && hwmgr->thermal_controller. - advanceFanControlParameters.ucTargetTemperature) - result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_SetFanTemperatureTarget, - hwmgr->thermal_controller. - advanceFanControlParameters.ucTargetTemperature); - - return result; -} - - -int polaris10_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr) -{ - return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StopFanControl); -} - -/** -* Set Fan Speed in percent. -* @param hwmgr the address of the powerplay hardware manager. -* @param speed is the percentage value (0% - 100%) to be set. -* @exception Fails is the 100% setting appears to be 0. -*/ -int polaris10_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, - uint32_t speed) -{ - uint32_t duty100; - uint32_t duty; - uint64_t tmp64; - - if (hwmgr->thermal_controller.fanInfo.bNoFan) - return 0; - - if (speed > 100) - speed = 100; - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_MicrocodeFanControl)) - polaris10_fan_ctrl_stop_smc_fan_control(hwmgr); - - duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_FDO_CTRL1, FMAX_DUTY100); - - if (duty100 == 0) - return -EINVAL; - - tmp64 = (uint64_t)speed * duty100; - do_div(tmp64, 100); - duty = (uint32_t)tmp64; - - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_FDO_CTRL0, FDO_STATIC_DUTY, duty); - - return polaris10_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC); -} - -/** -* Reset Fan Speed to default. -* @param hwmgr the address of the powerplay hardware manager. -* @exception Always succeeds. -*/ -int polaris10_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr) -{ - int result; - - if (hwmgr->thermal_controller.fanInfo.bNoFan) - return 0; - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_MicrocodeFanControl)) { - result = polaris10_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC); - if (!result) - result = polaris10_fan_ctrl_start_smc_fan_control(hwmgr); - } else - result = polaris10_fan_ctrl_set_default_mode(hwmgr); - - return result; -} - -/** -* Set Fan Speed in RPM. -* @param hwmgr the address of the powerplay hardware manager. -* @param speed is the percentage value (min - max) to be set. -* @exception Fails is the speed not lie between min and max. -*/ -int polaris10_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed) -{ - uint32_t tach_period; - uint32_t crystal_clock_freq; - - if (hwmgr->thermal_controller.fanInfo.bNoFan || - (hwmgr->thermal_controller.fanInfo. - ucTachometerPulsesPerRevolution == 0) || - (speed < hwmgr->thermal_controller.fanInfo.ulMinRPM) || - (speed > hwmgr->thermal_controller.fanInfo.ulMaxRPM)) - return 0; - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_MicrocodeFanControl)) - polaris10_fan_ctrl_stop_smc_fan_control(hwmgr); - - crystal_clock_freq = tonga_get_xclk(hwmgr); - - tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed); - - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_TACH_STATUS, TACH_PERIOD, tach_period); - - return polaris10_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC); -} - -/** -* Reads the remote temperature from the SIslands thermal controller. -* -* @param hwmgr The address of the hardware manager. -*/ -int polaris10_thermal_get_temperature(struct pp_hwmgr *hwmgr) -{ - int temp; - - temp = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_MULT_THERMAL_STATUS, CTF_TEMP); - - /* Bit 9 means the reading is lower than the lowest usable value. */ - if (temp & 0x200) - temp = POLARIS10_THERMAL_MAXIMUM_TEMP_READING; - else - temp = temp & 0x1ff; - - temp *= PP_TEMPERATURE_UNITS_PER_CENTIGRADES; - - return temp; -} - -/** -* Set the requested temperature range for high and low alert signals -* -* @param hwmgr The address of the hardware manager. -* @param range Temperature range to be programmed for high and low alert signals -* @exception PP_Result_BadInput if the input data is not valid. -*/ -static int polaris10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, - uint32_t low_temp, uint32_t high_temp) -{ - uint32_t low = POLARIS10_THERMAL_MINIMUM_ALERT_TEMP * - PP_TEMPERATURE_UNITS_PER_CENTIGRADES; - uint32_t high = POLARIS10_THERMAL_MAXIMUM_ALERT_TEMP * - PP_TEMPERATURE_UNITS_PER_CENTIGRADES; - - if (low < low_temp) - low = low_temp; - if (high > high_temp) - high = high_temp; - - if (low > high) - return -EINVAL; - - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_THERMAL_INT, DIG_THERM_INTH, - (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_THERMAL_INT, DIG_THERM_INTL, - (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_THERMAL_CTRL, DIG_THERM_DPM, - (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); - - return 0; -} - -/** -* Programs thermal controller one-time setting registers -* -* @param hwmgr The address of the hardware manager. -*/ -static int polaris10_thermal_initialize(struct pp_hwmgr *hwmgr) -{ - if (hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution) - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_TACH_CTRL, EDGE_PER_REV, - hwmgr->thermal_controller.fanInfo. - ucTachometerPulsesPerRevolution - 1); - - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_FDO_CTRL2, TACH_PWM_RESP_RATE, 0x28); - - return 0; -} - -/** -* Enable thermal alerts on the RV770 thermal controller. -* -* @param hwmgr The address of the hardware manager. -*/ -static int polaris10_thermal_enable_alert(struct pp_hwmgr *hwmgr) -{ - uint32_t alert; - - alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_THERMAL_INT, THERM_INT_MASK); - alert &= ~(POLARIS10_THERMAL_HIGH_ALERT_MASK | POLARIS10_THERMAL_LOW_ALERT_MASK); - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_THERMAL_INT, THERM_INT_MASK, alert); - - /* send message to SMU to enable internal thermal interrupts */ - return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Thermal_Cntl_Enable); -} - -/** -* Disable thermal alerts on the RV770 thermal controller. -* @param hwmgr The address of the hardware manager. -*/ -static int polaris10_thermal_disable_alert(struct pp_hwmgr *hwmgr) -{ - uint32_t alert; - - alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_THERMAL_INT, THERM_INT_MASK); - alert |= (POLARIS10_THERMAL_HIGH_ALERT_MASK | POLARIS10_THERMAL_LOW_ALERT_MASK); - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_THERMAL_INT, THERM_INT_MASK, alert); - - /* send message to SMU to disable internal thermal interrupts */ - return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Thermal_Cntl_Disable); -} - -/** -* Uninitialize the thermal controller. -* Currently just disables alerts. -* @param hwmgr The address of the hardware manager. -*/ -int polaris10_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr) -{ - int result = polaris10_thermal_disable_alert(hwmgr); - - if (!hwmgr->thermal_controller.fanInfo.bNoFan) - polaris10_fan_ctrl_set_default_mode(hwmgr); - - return result; -} - -/** -* Set up the fan table to control the fan using the SMC. -* @param hwmgr the address of the powerplay hardware manager. -* @param pInput the pointer to input data -* @param pOutput the pointer to output data -* @param pStorage the pointer to temporary storage -* @param Result the last failure code -* @return result from set temperature range routine -*/ -static int tf_polaris10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr, - void *input, void *output, void *storage, int result) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - SMU74_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE }; - uint32_t duty100; - uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2; - uint16_t fdo_min, slope1, slope2; - uint32_t reference_clock; - int res; - uint64_t tmp64; - - if (data->fan_table_start == 0) { - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_MicrocodeFanControl); - return 0; - } - - duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_FDO_CTRL1, FMAX_DUTY100); - - if (duty100 == 0) { - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_MicrocodeFanControl); - return 0; - } - - tmp64 = hwmgr->thermal_controller.advanceFanControlParameters. - usPWMMin * duty100; - do_div(tmp64, 10000); - fdo_min = (uint16_t)tmp64; - - t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed - - hwmgr->thermal_controller.advanceFanControlParameters.usTMin; - t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh - - hwmgr->thermal_controller.advanceFanControlParameters.usTMed; - - pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed - - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin; - pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh - - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed; - - slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100); - slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100); - - fan_table.TempMin = cpu_to_be16((50 + hwmgr-> - thermal_controller.advanceFanControlParameters.usTMin) / 100); - fan_table.TempMed = cpu_to_be16((50 + hwmgr-> - thermal_controller.advanceFanControlParameters.usTMed) / 100); - fan_table.TempMax = cpu_to_be16((50 + hwmgr-> - thermal_controller.advanceFanControlParameters.usTMax) / 100); - - fan_table.Slope1 = cpu_to_be16(slope1); - fan_table.Slope2 = cpu_to_be16(slope2); - - fan_table.FdoMin = cpu_to_be16(fdo_min); - - fan_table.HystDown = cpu_to_be16(hwmgr-> - thermal_controller.advanceFanControlParameters.ucTHyst); - - fan_table.HystUp = cpu_to_be16(1); - - fan_table.HystSlope = cpu_to_be16(1); - - fan_table.TempRespLim = cpu_to_be16(5); - - reference_clock = tonga_get_xclk(hwmgr); - - fan_table.RefreshPeriod = cpu_to_be32((hwmgr-> - thermal_controller.advanceFanControlParameters.ulCycleDelay * - reference_clock) / 1600); - - fan_table.FdoMax = cpu_to_be16((uint16_t)duty100); - - fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD( - hwmgr->device, CGS_IND_REG__SMC, - CG_MULT_THERMAL_CTRL, TEMP_SEL); - - res = polaris10_copy_bytes_to_smc(hwmgr->smumgr, data->fan_table_start, - (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), - data->sram_end); - - if (!res && hwmgr->thermal_controller. - advanceFanControlParameters.ucMinimumPWMLimit) - res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_SetFanMinPwm, - hwmgr->thermal_controller. - advanceFanControlParameters.ucMinimumPWMLimit); - - if (!res && hwmgr->thermal_controller. - advanceFanControlParameters.ulMinFanSCLKAcousticLimit) - res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_SetFanSclkTarget, - hwmgr->thermal_controller. - advanceFanControlParameters.ulMinFanSCLKAcousticLimit); - - if (res) - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_MicrocodeFanControl); - - return 0; -} - -/** -* Start the fan control on the SMC. -* @param hwmgr the address of the powerplay hardware manager. -* @param pInput the pointer to input data -* @param pOutput the pointer to output data -* @param pStorage the pointer to temporary storage -* @param Result the last failure code -* @return result from set temperature range routine -*/ -static int tf_polaris10_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr, - void *input, void *output, void *storage, int result) -{ -/* If the fantable setup has failed we could have disabled - * PHM_PlatformCaps_MicrocodeFanControl even after - * this function was included in the table. - * Make sure that we still think controlling the fan is OK. -*/ - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_MicrocodeFanControl)) { - polaris10_fan_ctrl_start_smc_fan_control(hwmgr); - polaris10_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC); - } - - return 0; -} - -/** -* Set temperature range for high and low alerts -* @param hwmgr the address of the powerplay hardware manager. -* @param pInput the pointer to input data -* @param pOutput the pointer to output data -* @param pStorage the pointer to temporary storage -* @param Result the last failure code -* @return result from set temperature range routine -*/ -int tf_polaris10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, - void *input, void *output, void *storage, int result) -{ - struct PP_TemperatureRange *range = (struct PP_TemperatureRange *)input; - - if (range == NULL) - return -EINVAL; - - return polaris10_thermal_set_temperature_range(hwmgr, range->min, range->max); -} - -/** -* Programs one-time setting registers -* @param hwmgr the address of the powerplay hardware manager. -* @param pInput the pointer to input data -* @param pOutput the pointer to output data -* @param pStorage the pointer to temporary storage -* @param Result the last failure code -* @return result from initialize thermal controller routine -*/ -int tf_polaris10_thermal_initialize(struct pp_hwmgr *hwmgr, - void *input, void *output, void *storage, int result) -{ - return polaris10_thermal_initialize(hwmgr); -} - -/** -* Enable high and low alerts -* @param hwmgr the address of the powerplay hardware manager. -* @param pInput the pointer to input data -* @param pOutput the pointer to output data -* @param pStorage the pointer to temporary storage -* @param Result the last failure code -* @return result from enable alert routine -*/ -int tf_polaris10_thermal_enable_alert(struct pp_hwmgr *hwmgr, - void *input, void *output, void *storage, int result) -{ - return polaris10_thermal_enable_alert(hwmgr); -} - -/** -* Disable high and low alerts -* @param hwmgr the address of the powerplay hardware manager. -* @param pInput the pointer to input data -* @param pOutput the pointer to output data -* @param pStorage the pointer to temporary storage -* @param Result the last failure code -* @return result from disable alert routine -*/ -static int tf_polaris10_thermal_disable_alert(struct pp_hwmgr *hwmgr, - void *input, void *output, void *storage, int result) -{ - return polaris10_thermal_disable_alert(hwmgr); -} - -static int tf_polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr, - void *input, void *output, void *storage, int result) -{ - int ret; - struct pp_smumgr *smumgr = (struct pp_smumgr *)(hwmgr->smumgr); - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - - if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED) - return 0; - - ret = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_SetGBDroopSettings, data->avfs_vdroop_override_setting); - - ret = (smum_send_msg_to_smc(smumgr, PPSMC_MSG_EnableAvfs) == 0) ? - 0 : -1; - - if (!ret) - /* If this param is not changed, this function could fire unnecessarily */ - smu_data->avfs.avfs_btc_status = AVFS_BTC_COMPLETED_PREVIOUSLY; - - return ret; -} - -static const struct phm_master_table_item -polaris10_thermal_start_thermal_controller_master_list[] = { - {NULL, tf_polaris10_thermal_initialize}, - {NULL, tf_polaris10_thermal_set_temperature_range}, - {NULL, tf_polaris10_thermal_enable_alert}, - {NULL, tf_polaris10_thermal_avfs_enable}, -/* We should restrict performance levels to low before we halt the SMC. - * On the other hand we are still in boot state when we do this - * so it would be pointless. - * If this assumption changes we have to revisit this table. - */ - {NULL, tf_polaris10_thermal_setup_fan_table}, - {NULL, tf_polaris10_thermal_start_smc_fan_control}, - {NULL, NULL} -}; - -static const struct phm_master_table_header -polaris10_thermal_start_thermal_controller_master = { - 0, - PHM_MasterTableFlag_None, - polaris10_thermal_start_thermal_controller_master_list -}; - -static const struct phm_master_table_item -polaris10_thermal_set_temperature_range_master_list[] = { - {NULL, tf_polaris10_thermal_disable_alert}, - {NULL, tf_polaris10_thermal_set_temperature_range}, - {NULL, tf_polaris10_thermal_enable_alert}, - {NULL, NULL} -}; - -static const struct phm_master_table_header -polaris10_thermal_set_temperature_range_master = { - 0, - PHM_MasterTableFlag_None, - polaris10_thermal_set_temperature_range_master_list -}; - -int polaris10_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr) -{ - if (!hwmgr->thermal_controller.fanInfo.bNoFan) - polaris10_fan_ctrl_set_default_mode(hwmgr); - return 0; -} - -/** -* Initializes the thermal controller related functions in the Hardware Manager structure. -* @param hwmgr The address of the hardware manager. -* @exception Any error code from the low-level communication. -*/ -int pp_polaris10_thermal_initialize(struct pp_hwmgr *hwmgr) -{ - int result; - - result = phm_construct_table(hwmgr, - &polaris10_thermal_set_temperature_range_master, - &(hwmgr->set_temperature_range)); - - if (!result) { - result = phm_construct_table(hwmgr, - &polaris10_thermal_start_thermal_controller_master, - &(hwmgr->start_thermal_controller)); - if (result) - phm_destroy_table(hwmgr, &(hwmgr->set_temperature_range)); - } - - if (!result) - hwmgr->fan_ctrl_is_in_default_mode = true; - return result; -} - diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.h b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.h deleted file mode 100644 index 62f8cbc2d590..000000000000 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.h +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Copyright 2016 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#ifndef _POLARIS10_THERMAL_H_ -#define _POLARIS10_THERMAL_H_ - -#include "hwmgr.h" - -#define POLARIS10_THERMAL_HIGH_ALERT_MASK 0x1 -#define POLARIS10_THERMAL_LOW_ALERT_MASK 0x2 - -#define POLARIS10_THERMAL_MINIMUM_TEMP_READING -256 -#define POLARIS10_THERMAL_MAXIMUM_TEMP_READING 255 - -#define POLARIS10_THERMAL_MINIMUM_ALERT_TEMP 0 -#define POLARIS10_THERMAL_MAXIMUM_ALERT_TEMP 255 - -#define FDO_PWM_MODE_STATIC 1 -#define FDO_PWM_MODE_STATIC_RPM 5 - - -extern int tf_polaris10_thermal_initialize(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result); -extern int tf_polaris10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result); -extern int tf_polaris10_thermal_enable_alert(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result); - -extern int polaris10_thermal_get_temperature(struct pp_hwmgr *hwmgr); -extern int polaris10_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr); -extern int polaris10_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, struct phm_fan_speed_info *fan_speed_info); -extern int polaris10_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t *speed); -extern int polaris10_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr); -extern int polaris10_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode); -extern int polaris10_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t speed); -extern int polaris10_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr); -extern int pp_polaris10_thermal_initialize(struct pp_hwmgr *hwmgr); -extern int polaris10_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr); -extern int polaris10_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed); -extern int polaris10_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed); -extern int polaris10_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr); -extern uint32_t tonga_get_xclk(struct pp_hwmgr *hwmgr); - -#endif - diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_clockpowergating.c deleted file mode 100644 index e58d038a997b..000000000000 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_clockpowergating.c +++ /dev/null @@ -1,350 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#include "hwmgr.h" -#include "tonga_clockpowergating.h" -#include "tonga_ppsmc.h" -#include "tonga_hwmgr.h" - -int tonga_phm_powerdown_uvd(struct pp_hwmgr *hwmgr) -{ - if (phm_cf_want_uvd_power_gating(hwmgr)) - return smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_UVDPowerOFF); - return 0; -} - -int tonga_phm_powerup_uvd(struct pp_hwmgr *hwmgr) -{ - if (phm_cf_want_uvd_power_gating(hwmgr)) { - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_UVDDynamicPowerGating)) { - return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_UVDPowerON, 1); - } else { - return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_UVDPowerON, 0); - } - } - - return 0; -} - -int tonga_phm_powerdown_vce(struct pp_hwmgr *hwmgr) -{ - if (phm_cf_want_vce_power_gating(hwmgr)) - return smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_VCEPowerOFF); - return 0; -} - -int tonga_phm_powerup_vce(struct pp_hwmgr *hwmgr) -{ - if (phm_cf_want_vce_power_gating(hwmgr)) - return smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_VCEPowerON); - return 0; -} - -int tonga_phm_set_asic_block_gating(struct pp_hwmgr *hwmgr, enum PHM_AsicBlock block, enum PHM_ClockGateSetting gating) -{ - int ret = 0; - - switch (block) { - case PHM_AsicBlock_UVD_MVC: - case PHM_AsicBlock_UVD: - case PHM_AsicBlock_UVD_HD: - case PHM_AsicBlock_UVD_SD: - if (gating == PHM_ClockGateSetting_StaticOff) - ret = tonga_phm_powerdown_uvd(hwmgr); - else - ret = tonga_phm_powerup_uvd(hwmgr); - break; - case PHM_AsicBlock_GFX: - default: - break; - } - - return ret; -} - -int tonga_phm_disable_clock_power_gating(struct pp_hwmgr *hwmgr) -{ - struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - - data->uvd_power_gated = false; - data->vce_power_gated = false; - - tonga_phm_powerup_uvd(hwmgr); - tonga_phm_powerup_vce(hwmgr); - - return 0; -} - -int tonga_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate) -{ - struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - - if (data->uvd_power_gated == bgate) - return 0; - - data->uvd_power_gated = bgate; - - if (bgate) { - cgs_set_clockgating_state(hwmgr->device, - AMD_IP_BLOCK_TYPE_UVD, - AMD_CG_STATE_UNGATE); - cgs_set_powergating_state(hwmgr->device, - AMD_IP_BLOCK_TYPE_UVD, - AMD_PG_STATE_GATE); - tonga_update_uvd_dpm(hwmgr, true); - tonga_phm_powerdown_uvd(hwmgr); - } else { - tonga_phm_powerup_uvd(hwmgr); - cgs_set_powergating_state(hwmgr->device, - AMD_IP_BLOCK_TYPE_UVD, - AMD_PG_STATE_UNGATE); - cgs_set_clockgating_state(hwmgr->device, - AMD_IP_BLOCK_TYPE_UVD, - AMD_PG_STATE_GATE); - - tonga_update_uvd_dpm(hwmgr, false); - } - - return 0; -} - -int tonga_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate) -{ - struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - struct phm_set_power_state_input states; - const struct pp_power_state *pcurrent; - struct pp_power_state *requested; - - pcurrent = hwmgr->current_ps; - requested = hwmgr->request_ps; - - states.pcurrent_state = &(pcurrent->hardware); - states.pnew_state = &(requested->hardware); - - if (phm_cf_want_vce_power_gating(hwmgr)) { - if (data->vce_power_gated != bgate) { - if (bgate) { - cgs_set_clockgating_state( - hwmgr->device, - AMD_IP_BLOCK_TYPE_VCE, - AMD_CG_STATE_UNGATE); - cgs_set_powergating_state( - hwmgr->device, - AMD_IP_BLOCK_TYPE_VCE, - AMD_PG_STATE_GATE); - tonga_enable_disable_vce_dpm(hwmgr, false); - data->vce_power_gated = true; - } else { - tonga_phm_powerup_vce(hwmgr); - data->vce_power_gated = false; - cgs_set_powergating_state( - hwmgr->device, - AMD_IP_BLOCK_TYPE_VCE, - AMD_PG_STATE_UNGATE); - cgs_set_clockgating_state( - hwmgr->device, - AMD_IP_BLOCK_TYPE_VCE, - AMD_PG_STATE_GATE); - - tonga_update_vce_dpm(hwmgr, &states); - tonga_enable_disable_vce_dpm(hwmgr, true); - return 0; - } - } - } else { - tonga_update_vce_dpm(hwmgr, &states); - tonga_enable_disable_vce_dpm(hwmgr, true); - return 0; - } - - if (!data->vce_power_gated) - tonga_update_vce_dpm(hwmgr, &states); - - return 0; -} - -int tonga_phm_update_clock_gatings(struct pp_hwmgr *hwmgr, - const uint32_t *msg_id) -{ - PPSMC_Msg msg; - uint32_t value; - - switch ((*msg_id & PP_GROUP_MASK) >> PP_GROUP_SHIFT) { - case PP_GROUP_GFX: - switch ((*msg_id & PP_BLOCK_MASK) >> PP_BLOCK_SHIFT) { - case PP_BLOCK_GFX_CG: - if (PP_STATE_SUPPORT_CG & *msg_id) { - msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG) - ? PPSMC_MSG_EnableClockGatingFeature - : PPSMC_MSG_DisableClockGatingFeature; - value = CG_GFX_CGCG_MASK; - - if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value)) - return -1; - } - if (PP_STATE_SUPPORT_LS & *msg_id) { - msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS - ? PPSMC_MSG_EnableClockGatingFeature - : PPSMC_MSG_DisableClockGatingFeature; - value = CG_GFX_CGLS_MASK; - - if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value)) - return -1; - } - break; - - case PP_BLOCK_GFX_MG: - /* For GFX MGCG, there are three different ones; - * CPF, RLC, and all others. CPF MGCG will not be used for Tonga. - * For GFX MGLS, Tonga will not support it. - * */ - if (PP_STATE_SUPPORT_CG & *msg_id) { - msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG) - ? PPSMC_MSG_EnableClockGatingFeature - : PPSMC_MSG_DisableClockGatingFeature; - value = (CG_RLC_MGCG_MASK | CG_GFX_OTHERS_MGCG_MASK); - - if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value)) - return -1; - } - break; - - default: - return -1; - } - break; - - case PP_GROUP_SYS: - switch ((*msg_id & PP_BLOCK_MASK) >> PP_BLOCK_SHIFT) { - case PP_BLOCK_SYS_BIF: - if (PP_STATE_SUPPORT_LS & *msg_id) { - msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS - ? PPSMC_MSG_EnableClockGatingFeature - : PPSMC_MSG_DisableClockGatingFeature; - value = CG_SYS_BIF_MGLS_MASK; - - if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value)) - return -1; - } - break; - - case PP_BLOCK_SYS_MC: - if (PP_STATE_SUPPORT_CG & *msg_id) { - msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG) - ? PPSMC_MSG_EnableClockGatingFeature - : PPSMC_MSG_DisableClockGatingFeature; - value = CG_SYS_MC_MGCG_MASK; - - if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value)) - return -1; - } - - if (PP_STATE_SUPPORT_LS & *msg_id) { - msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS - ? PPSMC_MSG_EnableClockGatingFeature - : PPSMC_MSG_DisableClockGatingFeature; - value = CG_SYS_MC_MGLS_MASK; - - if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value)) - return -1; - - } - break; - - case PP_BLOCK_SYS_HDP: - if (PP_STATE_SUPPORT_CG & *msg_id) { - msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG) - ? PPSMC_MSG_EnableClockGatingFeature - : PPSMC_MSG_DisableClockGatingFeature; - value = CG_SYS_HDP_MGCG_MASK; - - if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value)) - return -1; - } - - if (PP_STATE_SUPPORT_LS & *msg_id) { - msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS - ? PPSMC_MSG_EnableClockGatingFeature - : PPSMC_MSG_DisableClockGatingFeature; - - value = CG_SYS_HDP_MGLS_MASK; - - if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value)) - return -1; - } - break; - - case PP_BLOCK_SYS_SDMA: - if (PP_STATE_SUPPORT_CG & *msg_id) { - msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG) - ? PPSMC_MSG_EnableClockGatingFeature - : PPSMC_MSG_DisableClockGatingFeature; - value = CG_SYS_SDMA_MGCG_MASK; - - if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value)) - return -1; - } - - if (PP_STATE_SUPPORT_LS & *msg_id) { - msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS - ? PPSMC_MSG_EnableClockGatingFeature - : PPSMC_MSG_DisableClockGatingFeature; - - value = CG_SYS_SDMA_MGLS_MASK; - - if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value)) - return -1; - } - break; - - case PP_BLOCK_SYS_ROM: - if (PP_STATE_SUPPORT_CG & *msg_id) { - msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG) - ? PPSMC_MSG_EnableClockGatingFeature - : PPSMC_MSG_DisableClockGatingFeature; - value = CG_SYS_ROM_MASK; - - if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value)) - return -1; - } - break; - - default: - return -1; - - } - break; - - default: - return -1; - - } - - return 0; -} diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_clockpowergating.h b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_clockpowergating.h deleted file mode 100644 index 8bc38cb17b7f..000000000000 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_clockpowergating.h +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#ifndef _TONGA_CLOCK_POWER_GATING_H_ -#define _TONGA_CLOCK_POWER_GATING_H_ - -#include "tonga_hwmgr.h" -#include "pp_asicblocks.h" - -extern int tonga_phm_set_asic_block_gating(struct pp_hwmgr *hwmgr, enum PHM_AsicBlock block, enum PHM_ClockGateSetting gating); -extern int tonga_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate); -extern int tonga_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate); -extern int tonga_phm_powerdown_uvd(struct pp_hwmgr *hwmgr); -extern int tonga_phm_disable_clock_power_gating(struct pp_hwmgr *hwmgr); -extern int tonga_phm_update_clock_gatings(struct pp_hwmgr *hwmgr, const uint32_t *msg_id); -#endif /* _TONGA_CLOCK_POWER_GATING_H_ */ diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_dyn_defaults.h b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_dyn_defaults.h deleted file mode 100644 index 080d69d77f04..000000000000 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_dyn_defaults.h +++ /dev/null @@ -1,107 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#ifndef TONGA_DYN_DEFAULTS_H -#define TONGA_DYN_DEFAULTS_H - - -/** \file - * Volcanic Islands Dynamic default parameters. - */ - -enum TONGAdpm_TrendDetection { - TONGAdpm_TrendDetection_AUTO, - TONGAdpm_TrendDetection_UP, - TONGAdpm_TrendDetection_DOWN -}; -typedef enum TONGAdpm_TrendDetection TONGAdpm_TrendDetection; - -/* Bit vector representing same fields as hardware register. */ -#define PPTONGA_VOTINGRIGHTSCLIENTS_DFLT0 0x3FFFC102 /* CP_Gfx_busy */ -/* HDP_busy */ -/* IH_busy */ -/* DRM_busy */ -/* DRMDMA_busy */ -/* UVD_busy */ -/* VCE_busy */ -/* ACP_busy */ -/* SAMU_busy */ -/* AVP_busy */ -/* SDMA enabled */ -#define PPTONGA_VOTINGRIGHTSCLIENTS_DFLT1 0x000400 /* FE_Gfx_busy - Intended for primary usage. Rest are for flexibility. */ -/* SH_Gfx_busy */ -/* RB_Gfx_busy */ -/* VCE_busy */ - -#define PPTONGA_VOTINGRIGHTSCLIENTS_DFLT2 0xC00080 /* SH_Gfx_busy - Intended for primary usage. Rest are for flexibility. */ -/* FE_Gfx_busy */ -/* RB_Gfx_busy */ -/* ACP_busy */ - -#define PPTONGA_VOTINGRIGHTSCLIENTS_DFLT3 0xC00200 /* RB_Gfx_busy - Intended for primary usage. Rest are for flexibility. */ -/* FE_Gfx_busy */ -/* SH_Gfx_busy */ -/* UVD_busy */ - -#define PPTONGA_VOTINGRIGHTSCLIENTS_DFLT4 0xC01680 /* UVD_busy */ -/* VCE_busy */ -/* ACP_busy */ -/* SAMU_busy */ - -#define PPTONGA_VOTINGRIGHTSCLIENTS_DFLT5 0xC00033 /* GFX, HDP, DRMDMA */ -#define PPTONGA_VOTINGRIGHTSCLIENTS_DFLT6 0xC00033 /* GFX, HDP, DRMDMA */ -#define PPTONGA_VOTINGRIGHTSCLIENTS_DFLT7 0x3FFFC000 /* GFX, HDP, DRMDMA */ - - -/* thermal protection counter (units).*/ -#define PPTONGA_THERMALPROTECTCOUNTER_DFLT 0x200 /* ~19us */ - -/* static screen threshold unit */ -#define PPTONGA_STATICSCREENTHRESHOLDUNIT_DFLT 0 - -/* static screen threshold */ -#define PPTONGA_STATICSCREENTHRESHOLD_DFLT 0x00C8 - -/* gfx idle clock stop threshold */ -#define PPTONGA_GFXIDLECLOCKSTOPTHRESHOLD_DFLT 0x200 /* ~19us with static screen threshold unit of 0 */ - -/* Fixed reference divider to use when building baby stepping tables. */ -#define PPTONGA_REFERENCEDIVIDER_DFLT 4 - -/* - * ULV voltage change delay time - * Used to be delay_vreg in N.I. split for S.I. - * Using N.I. delay_vreg value as default - * ReferenceClock = 2700 - * VoltageResponseTime = 1000 - * VDDCDelayTime = (VoltageResponseTime * ReferenceClock) / 1600 = 1687 - */ - -#define PPTONGA_ULVVOLTAGECHANGEDELAY_DFLT 1687 - -#define PPTONGA_CGULVPARAMETER_DFLT 0x00040035 -#define PPTONGA_CGULVCONTROL_DFLT 0x00007450 -#define PPTONGA_TARGETACTIVITY_DFLT 30 /*30% */ -#define PPTONGA_MCLK_TARGETACTIVITY_DFLT 10 /*10% */ - -#endif - diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c deleted file mode 100644 index 3110bf0eeacc..000000000000 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c +++ /dev/null @@ -1,6371 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#include -#include -#include -#include "linux/delay.h" -#include "pp_acpi.h" -#include "hwmgr.h" -#include -#include "tonga_hwmgr.h" -#include "pptable.h" -#include "processpptables.h" -#include "process_pptables_v1_0.h" -#include "pptable_v1_0.h" -#include "pp_debug.h" -#include "tonga_ppsmc.h" -#include "cgs_common.h" -#include "pppcielanes.h" -#include "tonga_dyn_defaults.h" -#include "smumgr.h" -#include "tonga_smumgr.h" -#include "tonga_clockpowergating.h" -#include "tonga_thermal.h" - -#include "smu/smu_7_1_2_d.h" -#include "smu/smu_7_1_2_sh_mask.h" - -#include "gmc/gmc_8_1_d.h" -#include "gmc/gmc_8_1_sh_mask.h" - -#include "bif/bif_5_0_d.h" -#include "bif/bif_5_0_sh_mask.h" - -#include "dce/dce_10_0_d.h" -#include "dce/dce_10_0_sh_mask.h" - -#include "cgs_linux.h" -#include "eventmgr.h" -#include "amd_pcie_helpers.h" - -#define MC_CG_ARB_FREQ_F0 0x0a -#define MC_CG_ARB_FREQ_F1 0x0b -#define MC_CG_ARB_FREQ_F2 0x0c -#define MC_CG_ARB_FREQ_F3 0x0d - -#define MC_CG_SEQ_DRAMCONF_S0 0x05 -#define MC_CG_SEQ_DRAMCONF_S1 0x06 -#define MC_CG_SEQ_YCLK_SUSPEND 0x04 -#define MC_CG_SEQ_YCLK_RESUME 0x0a - -#define PCIE_BUS_CLK 10000 -#define TCLK (PCIE_BUS_CLK / 10) - -#define SMC_RAM_END 0x40000 -#define SMC_CG_IND_START 0xc0030000 -#define SMC_CG_IND_END 0xc0040000 /* First byte after SMC_CG_IND*/ - -#define VOLTAGE_SCALE 4 -#define VOLTAGE_VID_OFFSET_SCALE1 625 -#define VOLTAGE_VID_OFFSET_SCALE2 100 - -#define VDDC_VDDCI_DELTA 200 -#define VDDC_VDDGFX_DELTA 300 - -#define MC_SEQ_MISC0_GDDR5_SHIFT 28 -#define MC_SEQ_MISC0_GDDR5_MASK 0xf0000000 -#define MC_SEQ_MISC0_GDDR5_VALUE 5 - -typedef uint32_t PECI_RegistryValue; - -/* [2.5%,~2.5%] Clock stretched is multiple of 2.5% vs not and [Fmin, Fmax, LDO_REFSEL, USE_FOR_LOW_FREQ] */ -static const uint16_t PP_ClockStretcherLookupTable[2][4] = { - {600, 1050, 3, 0}, - {600, 1050, 6, 1} }; - -/* [FF, SS] type, [] 4 voltage ranges, and [Floor Freq, Boundary Freq, VID min , VID max] */ -static const uint32_t PP_ClockStretcherDDTTable[2][4][4] = { - { {265, 529, 120, 128}, {325, 650, 96, 119}, {430, 860, 32, 95}, {0, 0, 0, 31} }, - { {275, 550, 104, 112}, {319, 638, 96, 103}, {360, 720, 64, 95}, {384, 768, 32, 63} } }; - -/* [Use_For_Low_freq] value, [0%, 5%, 10%, 7.14%, 14.28%, 20%] (coming from PWR_CKS_CNTL.stretch_amount reg spec) */ -static const uint8_t PP_ClockStretchAmountConversion[2][6] = { - {0, 1, 3, 2, 4, 5}, - {0, 2, 4, 5, 6, 5} }; - -/* Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */ -enum DPM_EVENT_SRC { - DPM_EVENT_SRC_ANALOG = 0, /* Internal analog trip point */ - DPM_EVENT_SRC_EXTERNAL = 1, /* External (GPIO 17) signal */ - DPM_EVENT_SRC_DIGITAL = 2, /* Internal digital trip point (DIG_THERM_DPM) */ - DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3, /* Internal analog or external */ - DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL = 4 /* Internal digital or external */ -}; -typedef enum DPM_EVENT_SRC DPM_EVENT_SRC; - -static const unsigned long PhwTonga_Magic = (unsigned long)(PHM_VIslands_Magic); - -struct tonga_power_state *cast_phw_tonga_power_state( - struct pp_hw_power_state *hw_ps) -{ - if (hw_ps == NULL) - return NULL; - - PP_ASSERT_WITH_CODE((PhwTonga_Magic == hw_ps->magic), - "Invalid Powerstate Type!", - return NULL); - - return (struct tonga_power_state *)hw_ps; -} - -const struct tonga_power_state *cast_const_phw_tonga_power_state( - const struct pp_hw_power_state *hw_ps) -{ - if (hw_ps == NULL) - return NULL; - - PP_ASSERT_WITH_CODE((PhwTonga_Magic == hw_ps->magic), - "Invalid Powerstate Type!", - return NULL); - - return (const struct tonga_power_state *)hw_ps; -} - -int tonga_add_voltage(struct pp_hwmgr *hwmgr, - phm_ppt_v1_voltage_lookup_table *look_up_table, - phm_ppt_v1_voltage_lookup_record *record) -{ - uint32_t i; - PP_ASSERT_WITH_CODE((NULL != look_up_table), - "Lookup Table empty.", return -1;); - PP_ASSERT_WITH_CODE((0 != look_up_table->count), - "Lookup Table empty.", return -1;); - PP_ASSERT_WITH_CODE((SMU72_MAX_LEVELS_VDDGFX >= look_up_table->count), - "Lookup Table is full.", return -1;); - - /* This is to avoid entering duplicate calculated records. */ - for (i = 0; i < look_up_table->count; i++) { - if (look_up_table->entries[i].us_vdd == record->us_vdd) { - if (look_up_table->entries[i].us_calculated == 1) - return 0; - else - break; - } - } - - look_up_table->entries[i].us_calculated = 1; - look_up_table->entries[i].us_vdd = record->us_vdd; - look_up_table->entries[i].us_cac_low = record->us_cac_low; - look_up_table->entries[i].us_cac_mid = record->us_cac_mid; - look_up_table->entries[i].us_cac_high = record->us_cac_high; - /* Only increment the count when we're appending, not replacing duplicate entry. */ - if (i == look_up_table->count) - look_up_table->count++; - - return 0; -} - -int tonga_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display) -{ - PPSMC_Msg msg = has_display? (PPSMC_Msg)PPSMC_HasDisplay : (PPSMC_Msg)PPSMC_NoDisplay; - - return (smum_send_msg_to_smc(hwmgr->smumgr, msg) == 0) ? 0 : -1; -} - -uint8_t tonga_get_voltage_id(pp_atomctrl_voltage_table *voltage_table, - uint32_t voltage) -{ - uint8_t count = (uint8_t) (voltage_table->count); - uint8_t i = 0; - - PP_ASSERT_WITH_CODE((NULL != voltage_table), - "Voltage Table empty.", return 0;); - PP_ASSERT_WITH_CODE((0 != count), - "Voltage Table empty.", return 0;); - - for (i = 0; i < count; i++) { - /* find first voltage bigger than requested */ - if (voltage_table->entries[i].value >= voltage) - return i; - } - - /* voltage is bigger than max voltage in the table */ - return i - 1; -} - - -/** - * @brief PhwTonga_GetVoltageOrder - * Returns index of requested voltage record in lookup(table) - * @param hwmgr - pointer to hardware manager - * @param lookupTable - lookup list to search in - * @param voltage - voltage to look for - * @return 0 on success - */ -uint8_t tonga_get_voltage_index(phm_ppt_v1_voltage_lookup_table *look_up_table, - uint16_t voltage) -{ - uint8_t count = (uint8_t) (look_up_table->count); - uint8_t i; - - PP_ASSERT_WITH_CODE((NULL != look_up_table), "Lookup Table empty.", return 0;); - PP_ASSERT_WITH_CODE((0 != count), "Lookup Table empty.", return 0;); - - for (i = 0; i < count; i++) { - /* find first voltage equal or bigger than requested */ - if (look_up_table->entries[i].us_vdd >= voltage) - return i; - } - - /* voltage is bigger than max voltage in the table */ - return i-1; -} - -static bool tonga_is_dpm_running(struct pp_hwmgr *hwmgr) -{ - /* - * We return the status of Voltage Control instead of checking SCLK/MCLK DPM - * because we may have test scenarios that need us intentionly disable SCLK/MCLK DPM, - * whereas voltage control is a fundemental change that will not be disabled - */ - - return (0 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - FEATURE_STATUS, VOLTAGE_CONTROLLER_ON) ? 1 : 0); -} - -/** - * Re-generate the DPM level mask value - * @param hwmgr the address of the hardware manager - */ -static uint32_t tonga_get_dpm_level_enable_mask_value( - struct tonga_single_dpm_table * dpm_table) -{ - uint32_t i; - uint32_t mask_value = 0; - - for (i = dpm_table->count; i > 0; i--) { - mask_value = mask_value << 1; - - if (dpm_table->dpm_levels[i-1].enabled) - mask_value |= 0x1; - else - mask_value &= 0xFFFFFFFE; - } - return mask_value; -} - -/** - * Retrieve DPM default values from registry (if available) - * - * @param hwmgr the address of the powerplay hardware manager. - */ -void tonga_initialize_dpm_defaults(struct pp_hwmgr *hwmgr) -{ - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - phw_tonga_ulv_parm *ulv = &(data->ulv); - uint32_t tmp; - - ulv->ch_ulv_parameter = PPTONGA_CGULVPARAMETER_DFLT; - data->voting_rights_clients0 = PPTONGA_VOTINGRIGHTSCLIENTS_DFLT0; - data->voting_rights_clients1 = PPTONGA_VOTINGRIGHTSCLIENTS_DFLT1; - data->voting_rights_clients2 = PPTONGA_VOTINGRIGHTSCLIENTS_DFLT2; - data->voting_rights_clients3 = PPTONGA_VOTINGRIGHTSCLIENTS_DFLT3; - data->voting_rights_clients4 = PPTONGA_VOTINGRIGHTSCLIENTS_DFLT4; - data->voting_rights_clients5 = PPTONGA_VOTINGRIGHTSCLIENTS_DFLT5; - data->voting_rights_clients6 = PPTONGA_VOTINGRIGHTSCLIENTS_DFLT6; - data->voting_rights_clients7 = PPTONGA_VOTINGRIGHTSCLIENTS_DFLT7; - - data->static_screen_threshold_unit = PPTONGA_STATICSCREENTHRESHOLDUNIT_DFLT; - data->static_screen_threshold = PPTONGA_STATICSCREENTHRESHOLD_DFLT; - - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ABM); - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_NonABMSupportInPPLib); - - tmp = 0; - if (tmp == 0) - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_DynamicACTiming); - - tmp = 0; - if (0 != tmp) - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_DisableMemoryTransition); - - tonga_initialize_power_tune_defaults(hwmgr); - - data->mclk_strobe_mode_threshold = 40000; - data->mclk_stutter_mode_threshold = 30000; - data->mclk_edc_enable_threshold = 40000; - data->mclk_edc_wr_enable_threshold = 40000; - - tmp = 0; - if (tmp != 0) - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_DisableMCLS); - - data->pcie_gen_performance.max = PP_PCIEGen1; - data->pcie_gen_performance.min = PP_PCIEGen3; - data->pcie_gen_power_saving.max = PP_PCIEGen1; - data->pcie_gen_power_saving.min = PP_PCIEGen3; - - data->pcie_lane_performance.max = 0; - data->pcie_lane_performance.min = 16; - data->pcie_lane_power_saving.max = 0; - data->pcie_lane_power_saving.min = 16; - - tmp = 0; - - if (tmp) - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_SclkThrottleLowNotification); - - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_DynamicUVDState); - -} - -static int tonga_update_sclk_threshold(struct pp_hwmgr *hwmgr) -{ - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - - int result = 0; - uint32_t low_sclk_interrupt_threshold = 0; - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_SclkThrottleLowNotification) - && (hwmgr->gfx_arbiter.sclk_threshold != data->low_sclk_interrupt_threshold)) { - data->low_sclk_interrupt_threshold = hwmgr->gfx_arbiter.sclk_threshold; - low_sclk_interrupt_threshold = data->low_sclk_interrupt_threshold; - - CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold); - - result = tonga_copy_bytes_to_smc( - hwmgr->smumgr, - data->dpm_table_start + offsetof(SMU72_Discrete_DpmTable, - LowSclkInterruptThreshold), - (uint8_t *)&low_sclk_interrupt_threshold, - sizeof(uint32_t), - data->sram_end - ); - } - - return result; -} - -/** - * Find SCLK value that is associated with specified virtual_voltage_Id. - * - * @param hwmgr the address of the powerplay hardware manager. - * @param virtual_voltage_Id voltageId to look for. - * @param sclk output value . - * @return always 0 if success and 2 if association not found - */ -static int tonga_get_sclk_for_voltage_evv(struct pp_hwmgr *hwmgr, - phm_ppt_v1_voltage_lookup_table *lookup_table, - uint16_t virtual_voltage_id, uint32_t *sclk) -{ - uint8_t entryId; - uint8_t voltageId; - struct phm_ppt_v1_information *pptable_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - - PP_ASSERT_WITH_CODE(lookup_table->count != 0, "Lookup table is empty", return -1); - - /* search for leakage voltage ID 0xff01 ~ 0xff08 and sckl */ - for (entryId = 0; entryId < pptable_info->vdd_dep_on_sclk->count; entryId++) { - voltageId = pptable_info->vdd_dep_on_sclk->entries[entryId].vddInd; - if (lookup_table->entries[voltageId].us_vdd == virtual_voltage_id) - break; - } - - PP_ASSERT_WITH_CODE(entryId < pptable_info->vdd_dep_on_sclk->count, - "Can't find requested voltage id in vdd_dep_on_sclk table!", - return -1; - ); - - *sclk = pptable_info->vdd_dep_on_sclk->entries[entryId].clk; - - return 0; -} - -/** - * Get Leakage VDDC based on leakage ID. - * - * @param hwmgr the address of the powerplay hardware manager. - * @return 2 if vddgfx returned is greater than 2V or if BIOS - */ -int tonga_get_evv_voltage(struct pp_hwmgr *hwmgr) -{ - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); - phm_ppt_v1_clock_voltage_dependency_table *sclk_table = pptable_info->vdd_dep_on_sclk; - uint16_t virtual_voltage_id; - uint16_t vddc = 0; - uint16_t vddgfx = 0; - uint16_t i, j; - uint32_t sclk = 0; - - /* retrieve voltage for leakage ID (0xff01 + i) */ - for (i = 0; i < TONGA_MAX_LEAKAGE_COUNT; i++) { - virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i; - - /* in split mode we should have only vddgfx EVV leakages */ - if (data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2) { - if (0 == tonga_get_sclk_for_voltage_evv(hwmgr, - pptable_info->vddgfx_lookup_table, virtual_voltage_id, &sclk)) { - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ClockStretcher)) { - for (j = 1; j < sclk_table->count; j++) { - if (sclk_table->entries[j].clk == sclk && - sclk_table->entries[j].cks_enable == 0) { - sclk += 5000; - break; - } - } - } - if (0 == atomctrl_get_voltage_evv_on_sclk - (hwmgr, VOLTAGE_TYPE_VDDGFX, sclk, - virtual_voltage_id, &vddgfx)) { - /* need to make sure vddgfx is less than 2v or else, it could burn the ASIC. */ - PP_ASSERT_WITH_CODE((vddgfx < 2000 && vddgfx != 0), "Invalid VDDGFX value!", return -1); - - /* the voltage should not be zero nor equal to leakage ID */ - if (vddgfx != 0 && vddgfx != virtual_voltage_id) { - data->vddcgfx_leakage.actual_voltage[data->vddcgfx_leakage.count] = vddgfx; - data->vddcgfx_leakage.leakage_id[data->vddcgfx_leakage.count] = virtual_voltage_id; - data->vddcgfx_leakage.count++; - } - } else { - printk("Error retrieving EVV voltage value!\n"); - } - } - } else { - /* in merged mode we have only vddc EVV leakages */ - if (0 == tonga_get_sclk_for_voltage_evv(hwmgr, - pptable_info->vddc_lookup_table, - virtual_voltage_id, &sclk)) { - if (0 == atomctrl_get_voltage_evv_on_sclk - (hwmgr, VOLTAGE_TYPE_VDDC, sclk, - virtual_voltage_id, &vddc)) { - /* need to make sure vddc is less than 2v or else, it could burn the ASIC. */ - PP_ASSERT_WITH_CODE(vddc < 2000, "Invalid VDDC value!", return -1); - - /* the voltage should not be zero nor equal to leakage ID */ - if (vddc != 0 && vddc != virtual_voltage_id) { - data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = vddc; - data->vddc_leakage.leakage_id[data->vddc_leakage.count] = virtual_voltage_id; - data->vddc_leakage.count++; - } - } else { - printk("Error retrieving EVV voltage value!\n"); - } - } - } - } - - return 0; -} - -int tonga_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) -{ - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - - /* enable SCLK dpm */ - if (0 == data->sclk_dpm_key_disabled) { - PP_ASSERT_WITH_CODE( - (0 == smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_DPM_Enable)), - "Failed to enable SCLK DPM during DPM Start Function!", - return -1); - } - - /* enable MCLK dpm */ - if (0 == data->mclk_dpm_key_disabled) { - PP_ASSERT_WITH_CODE( - (0 == smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_MCLKDPM_Enable)), - "Failed to enable MCLK DPM during DPM Start Function!", - return -1); - - PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1); - - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixLCAC_MC0_CNTL, 0x05);/* CH0,1 read */ - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixLCAC_MC1_CNTL, 0x05);/* CH2,3 read */ - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixLCAC_CPL_CNTL, 0x100005);/*Read */ - - udelay(10); - - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixLCAC_MC0_CNTL, 0x400005);/* CH0,1 write */ - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixLCAC_MC1_CNTL, 0x400005);/* CH2,3 write */ - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixLCAC_CPL_CNTL, 0x500005);/* write */ - - } - - return 0; -} - -int tonga_start_dpm(struct pp_hwmgr *hwmgr) -{ - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - - /* enable general power management */ - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, 1); - /* enable sclk deep sleep */ - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, DYNAMIC_PM_EN, 1); - - /* prepare for PCIE DPM */ - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, data->soft_regs_start + - offsetof(SMU72_SoftRegisters, VoltageChangeTimeout), 0x1000); - - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE, SWRST_COMMAND_1, RESETLC, 0x0); - - PP_ASSERT_WITH_CODE( - (0 == smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_Voltage_Cntl_Enable)), - "Failed to enable voltage DPM during DPM Start Function!", - return -1); - - if (0 != tonga_enable_sclk_mclk_dpm(hwmgr)) { - PP_ASSERT_WITH_CODE(0, "Failed to enable Sclk DPM and Mclk DPM!", return -1); - } - - /* enable PCIE dpm */ - if (0 == data->pcie_dpm_key_disabled) { - PP_ASSERT_WITH_CODE( - (0 == smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_PCIeDPM_Enable)), - "Failed to enable pcie DPM during DPM Start Function!", - return -1 - ); - } - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_Falcon_QuickTransition)) { - smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_EnableACDCGPIOInterrupt); - } - - return 0; -} - -int tonga_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) -{ - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - - /* disable SCLK dpm */ - if (0 == data->sclk_dpm_key_disabled) { - /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/ - PP_ASSERT_WITH_CODE( - !tonga_is_dpm_running(hwmgr), - "Trying to Disable SCLK DPM when DPM is disabled", - return -1 - ); - - PP_ASSERT_WITH_CODE( - (0 == smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_DPM_Disable)), - "Failed to disable SCLK DPM during DPM stop Function!", - return -1); - } - - /* disable MCLK dpm */ - if (0 == data->mclk_dpm_key_disabled) { - /* Checking if DPM is running. If we discover hang because of this, we should skip this message. */ - PP_ASSERT_WITH_CODE( - !tonga_is_dpm_running(hwmgr), - "Trying to Disable MCLK DPM when DPM is disabled", - return -1 - ); - - PP_ASSERT_WITH_CODE( - (0 == smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_MCLKDPM_Disable)), - "Failed to Disable MCLK DPM during DPM stop Function!", - return -1); - } - - return 0; -} - -int tonga_stop_dpm(struct pp_hwmgr *hwmgr) -{ - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, 0); - /* disable sclk deep sleep*/ - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, DYNAMIC_PM_EN, 0); - - /* disable PCIE dpm */ - if (0 == data->pcie_dpm_key_disabled) { - /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/ - PP_ASSERT_WITH_CODE( - !tonga_is_dpm_running(hwmgr), - "Trying to Disable PCIE DPM when DPM is disabled", - return -1 - ); - PP_ASSERT_WITH_CODE( - (0 == smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_PCIeDPM_Disable)), - "Failed to disable pcie DPM during DPM stop Function!", - return -1); - } - - if (0 != tonga_disable_sclk_mclk_dpm(hwmgr)) - PP_ASSERT_WITH_CODE(0, "Failed to disable Sclk DPM and Mclk DPM!", return -1); - - /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/ - PP_ASSERT_WITH_CODE( - !tonga_is_dpm_running(hwmgr), - "Trying to Disable Voltage CNTL when DPM is disabled", - return -1 - ); - - PP_ASSERT_WITH_CODE( - (0 == smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_Voltage_Cntl_Disable)), - "Failed to disable voltage DPM during DPM stop Function!", - return -1); - - return 0; -} - -int tonga_enable_sclk_control(struct pp_hwmgr *hwmgr) -{ - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, SCLK_PWRMGT_OFF, 0); - - return 0; -} - -/** - * Send a message to the SMC and return a parameter - * - * @param hwmgr: the address of the powerplay hardware manager. - * @param msg: the message to send. - * @param parameter: pointer to the received parameter - * @return The response that came from the SMC. - */ -PPSMC_Result tonga_send_msg_to_smc_return_parameter( - struct pp_hwmgr *hwmgr, - PPSMC_Msg msg, - uint32_t *parameter) -{ - int result; - - result = smum_send_msg_to_smc(hwmgr->smumgr, msg); - - if ((0 == result) && parameter) { - *parameter = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); - } - - return result; -} - -/** - * force DPM power State - * - * @param hwmgr: the address of the powerplay hardware manager. - * @param n : DPM level - * @return The response that came from the SMC. - */ -int tonga_dpm_force_state(struct pp_hwmgr *hwmgr, uint32_t n) -{ - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - uint32_t level_mask = 1 << n; - - /* Checking if DPM is running. If we discover hang because of this, we should skip this message. */ - PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr), - "Trying to force SCLK when DPM is disabled", - return -1;); - if (0 == data->sclk_dpm_key_disabled) - return (0 == smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, - (PPSMC_Msg)(PPSMC_MSG_SCLKDPM_SetEnabledMask), - level_mask) ? 0 : 1); - - return 0; -} - -/** - * force DPM power State - * - * @param hwmgr: the address of the powerplay hardware manager. - * @param n : DPM level - * @return The response that came from the SMC. - */ -int tonga_dpm_force_state_mclk(struct pp_hwmgr *hwmgr, uint32_t n) -{ - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - uint32_t level_mask = 1 << n; - - /* Checking if DPM is running. If we discover hang because of this, we should skip this message. */ - PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr), - "Trying to Force MCLK when DPM is disabled", - return -1;); - if (0 == data->mclk_dpm_key_disabled) - return (0 == smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, - (PPSMC_Msg)(PPSMC_MSG_MCLKDPM_SetEnabledMask), - level_mask) ? 0 : 1); - - return 0; -} - -/** - * force DPM power State - * - * @param hwmgr: the address of the powerplay hardware manager. - * @param n : DPM level - * @return The response that came from the SMC. - */ -int tonga_dpm_force_state_pcie(struct pp_hwmgr *hwmgr, uint32_t n) -{ - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - - /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/ - PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr), - "Trying to Force PCIE level when DPM is disabled", - return -1;); - if (0 == data->pcie_dpm_key_disabled) - return (0 == smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, - (PPSMC_Msg)(PPSMC_MSG_PCIeDPM_ForceLevel), - n) ? 0 : 1); - - return 0; -} - -/** - * Set the initial state by calling SMC to switch to this state directly - * - * @param hwmgr the address of the powerplay hardware manager. - * @return always 0 - */ -int tonga_set_boot_state(struct pp_hwmgr *hwmgr) -{ - /* - * SMC only stores one state that SW will ask to switch too, - * so we switch the the just uploaded one - */ - return (0 == tonga_disable_sclk_mclk_dpm(hwmgr)) ? 0 : 1; -} - -/** - * Get the location of various tables inside the FW image. - * - * @param hwmgr the address of the powerplay hardware manager. - * @return always 0 - */ -static int tonga_process_firmware_header(struct pp_hwmgr *hwmgr) -{ - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - struct tonga_smumgr *tonga_smu = (struct tonga_smumgr *)(hwmgr->smumgr->backend); - - uint32_t tmp; - int result; - bool error = false; - - result = tonga_read_smc_sram_dword(hwmgr->smumgr, - SMU72_FIRMWARE_HEADER_LOCATION + - offsetof(SMU72_Firmware_Header, DpmTable), - &tmp, data->sram_end); - - if (0 == result) { - data->dpm_table_start = tmp; - } - - error |= (0 != result); - - result = tonga_read_smc_sram_dword(hwmgr->smumgr, - SMU72_FIRMWARE_HEADER_LOCATION + - offsetof(SMU72_Firmware_Header, SoftRegisters), - &tmp, data->sram_end); - - if (0 == result) { - data->soft_regs_start = tmp; - tonga_smu->soft_regs_start = tmp; - } - - error |= (0 != result); - - - result = tonga_read_smc_sram_dword(hwmgr->smumgr, - SMU72_FIRMWARE_HEADER_LOCATION + - offsetof(SMU72_Firmware_Header, mcRegisterTable), - &tmp, data->sram_end); - - if (0 == result) { - data->mc_reg_table_start = tmp; - } - - result = tonga_read_smc_sram_dword(hwmgr->smumgr, - SMU72_FIRMWARE_HEADER_LOCATION + - offsetof(SMU72_Firmware_Header, FanTable), - &tmp, data->sram_end); - - if (0 == result) { - data->fan_table_start = tmp; - } - - error |= (0 != result); - - result = tonga_read_smc_sram_dword(hwmgr->smumgr, - SMU72_FIRMWARE_HEADER_LOCATION + - offsetof(SMU72_Firmware_Header, mcArbDramTimingTable), - &tmp, data->sram_end); - - if (0 == result) { - data->arb_table_start = tmp; - } - - error |= (0 != result); - - - result = tonga_read_smc_sram_dword(hwmgr->smumgr, - SMU72_FIRMWARE_HEADER_LOCATION + - offsetof(SMU72_Firmware_Header, Version), - &tmp, data->sram_end); - - if (0 == result) { - hwmgr->microcode_version_info.SMC = tmp; - } - - error |= (0 != result); - - return error ? 1 : 0; -} - -/** - * Read clock related registers. - * - * @param hwmgr the address of the powerplay hardware manager. - * @return always 0 - */ -int tonga_read_clock_registers(struct pp_hwmgr *hwmgr) -{ - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - - data->clock_registers.vCG_SPLL_FUNC_CNTL = - cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL); - data->clock_registers.vCG_SPLL_FUNC_CNTL_2 = - cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_2); - data->clock_registers.vCG_SPLL_FUNC_CNTL_3 = - cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_3); - data->clock_registers.vCG_SPLL_FUNC_CNTL_4 = - cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_4); - data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM = - cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM); - data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2 = - cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM_2); - data->clock_registers.vDLL_CNTL = - cgs_read_register(hwmgr->device, mmDLL_CNTL); - data->clock_registers.vMCLK_PWRMGT_CNTL = - cgs_read_register(hwmgr->device, mmMCLK_PWRMGT_CNTL); - data->clock_registers.vMPLL_AD_FUNC_CNTL = - cgs_read_register(hwmgr->device, mmMPLL_AD_FUNC_CNTL); - data->clock_registers.vMPLL_DQ_FUNC_CNTL = - cgs_read_register(hwmgr->device, mmMPLL_DQ_FUNC_CNTL); - data->clock_registers.vMPLL_FUNC_CNTL = - cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL); - data->clock_registers.vMPLL_FUNC_CNTL_1 = - cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_1); - data->clock_registers.vMPLL_FUNC_CNTL_2 = - cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_2); - data->clock_registers.vMPLL_SS1 = - cgs_read_register(hwmgr->device, mmMPLL_SS1); - data->clock_registers.vMPLL_SS2 = - cgs_read_register(hwmgr->device, mmMPLL_SS2); - - return 0; -} - -/** - * Find out if memory is GDDR5. - * - * @param hwmgr the address of the powerplay hardware manager. - * @return always 0 - */ -int tonga_get_memory_type(struct pp_hwmgr *hwmgr) -{ - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - uint32_t temp; - - temp = cgs_read_register(hwmgr->device, mmMC_SEQ_MISC0); - - data->is_memory_GDDR5 = (MC_SEQ_MISC0_GDDR5_VALUE == - ((temp & MC_SEQ_MISC0_GDDR5_MASK) >> - MC_SEQ_MISC0_GDDR5_SHIFT)); - - return 0; -} - -/** - * Enables Dynamic Power Management by SMC - * - * @param hwmgr the address of the powerplay hardware manager. - * @return always 0 - */ -int tonga_enable_acpi_power_management(struct pp_hwmgr *hwmgr) -{ - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, STATIC_PM_EN, 1); - - return 0; -} - -/** - * Initialize PowerGating States for different engines - * - * @param hwmgr the address of the powerplay hardware manager. - * @return always 0 - */ -int tonga_init_power_gate_state(struct pp_hwmgr *hwmgr) -{ - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - - data->uvd_power_gated = false; - data->vce_power_gated = false; - data->samu_power_gated = false; - data->acp_power_gated = false; - data->pg_acp_init = true; - - return 0; -} - -/** - * Checks if DPM is enabled - * - * @param hwmgr the address of the powerplay hardware manager. - * @return always 0 - */ -int tonga_check_for_dpm_running(struct pp_hwmgr *hwmgr) -{ - /* - * We return the status of Voltage Control instead of checking SCLK/MCLK DPM - * because we may have test scenarios that need us intentionly disable SCLK/MCLK DPM, - * whereas voltage control is a fundemental change that will not be disabled - */ - return (!tonga_is_dpm_running(hwmgr) ? 0 : 1); -} - -/** - * Checks if DPM is stopped - * - * @param hwmgr the address of the powerplay hardware manager. - * @return always 0 - */ -int tonga_check_for_dpm_stopped(struct pp_hwmgr *hwmgr) -{ - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - - if (tonga_is_dpm_running(hwmgr)) { - /* If HW Virtualization is enabled, dpm_table_start will not have a valid value */ - if (!data->dpm_table_start) { - return 1; - } - } - - return 0; -} - -/** - * Remove repeated voltage values and create table with unique values. - * - * @param hwmgr the address of the powerplay hardware manager. - * @param voltage_table the pointer to changing voltage table - * @return 1 in success - */ - -static int tonga_trim_voltage_table(struct pp_hwmgr *hwmgr, - pp_atomctrl_voltage_table *voltage_table) -{ - uint32_t table_size, i, j; - uint16_t vvalue; - bool bVoltageFound = false; - pp_atomctrl_voltage_table *table; - - PP_ASSERT_WITH_CODE((NULL != voltage_table), "Voltage Table empty.", return -1;); - table_size = sizeof(pp_atomctrl_voltage_table); - table = kzalloc(table_size, GFP_KERNEL); - - if (NULL == table) - return -ENOMEM; - - memset(table, 0x00, table_size); - table->mask_low = voltage_table->mask_low; - table->phase_delay = voltage_table->phase_delay; - - for (i = 0; i < voltage_table->count; i++) { - vvalue = voltage_table->entries[i].value; - bVoltageFound = false; - - for (j = 0; j < table->count; j++) { - if (vvalue == table->entries[j].value) { - bVoltageFound = true; - break; - } - } - - if (!bVoltageFound) { - table->entries[table->count].value = vvalue; - table->entries[table->count].smio_low = - voltage_table->entries[i].smio_low; - table->count++; - } - } - - memcpy(table, voltage_table, sizeof(pp_atomctrl_voltage_table)); - - kfree(table); - - return 0; -} - -static int tonga_get_svi2_vdd_ci_voltage_table( - struct pp_hwmgr *hwmgr, - phm_ppt_v1_clock_voltage_dependency_table *voltage_dependency_table) -{ - uint32_t i; - int result; - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - pp_atomctrl_voltage_table *vddci_voltage_table = &(data->vddci_voltage_table); - - PP_ASSERT_WITH_CODE((0 != voltage_dependency_table->count), - "Voltage Dependency Table empty.", return -1;); - - vddci_voltage_table->mask_low = 0; - vddci_voltage_table->phase_delay = 0; - vddci_voltage_table->count = voltage_dependency_table->count; - - for (i = 0; i < voltage_dependency_table->count; i++) { - vddci_voltage_table->entries[i].value = - voltage_dependency_table->entries[i].vddci; - vddci_voltage_table->entries[i].smio_low = 0; - } - - result = tonga_trim_voltage_table(hwmgr, vddci_voltage_table); - PP_ASSERT_WITH_CODE((0 == result), - "Failed to trim VDDCI table.", return result;); - - return 0; -} - - - -static int tonga_get_svi2_vdd_voltage_table( - struct pp_hwmgr *hwmgr, - phm_ppt_v1_voltage_lookup_table *look_up_table, - pp_atomctrl_voltage_table *voltage_table) -{ - uint8_t i = 0; - - PP_ASSERT_WITH_CODE((0 != look_up_table->count), - "Voltage Lookup Table empty.", return -1;); - - voltage_table->mask_low = 0; - voltage_table->phase_delay = 0; - - voltage_table->count = look_up_table->count; - - for (i = 0; i < voltage_table->count; i++) { - voltage_table->entries[i].value = look_up_table->entries[i].us_vdd; - voltage_table->entries[i].smio_low = 0; - } - - return 0; -} - -/* - * -------------------------------------------------------- Voltage Tables -------------------------------------------------------------------------- - * If the voltage table would be bigger than what will fit into the state table on the SMC keep only the higher entries. - */ - -static void tonga_trim_voltage_table_to_fit_state_table( - struct pp_hwmgr *hwmgr, - uint32_t max_voltage_steps, - pp_atomctrl_voltage_table *voltage_table) -{ - unsigned int i, diff; - - if (voltage_table->count <= max_voltage_steps) { - return; - } - - diff = voltage_table->count - max_voltage_steps; - - for (i = 0; i < max_voltage_steps; i++) { - voltage_table->entries[i] = voltage_table->entries[i + diff]; - } - - voltage_table->count = max_voltage_steps; - - return; -} - -/** - * Create Voltage Tables. - * - * @param hwmgr the address of the powerplay hardware manager. - * @return always 0 - */ -int tonga_construct_voltage_tables(struct pp_hwmgr *hwmgr) -{ - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); - int result; - - /* MVDD has only GPIO voltage control */ - if (TONGA_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) { - result = atomctrl_get_voltage_table_v3(hwmgr, - VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT, &(data->mvdd_voltage_table)); - PP_ASSERT_WITH_CODE((0 == result), - "Failed to retrieve MVDD table.", return result;); - } - - if (TONGA_VOLTAGE_CONTROL_BY_GPIO == data->vdd_ci_control) { - /* GPIO voltage */ - result = atomctrl_get_voltage_table_v3(hwmgr, - VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT, &(data->vddci_voltage_table)); - PP_ASSERT_WITH_CODE((0 == result), - "Failed to retrieve VDDCI table.", return result;); - } else if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_ci_control) { - /* SVI2 voltage */ - result = tonga_get_svi2_vdd_ci_voltage_table(hwmgr, - pptable_info->vdd_dep_on_mclk); - PP_ASSERT_WITH_CODE((0 == result), - "Failed to retrieve SVI2 VDDCI table from dependancy table.", return result;); - } - - if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) { - /* VDDGFX has only SVI2 voltage control */ - result = tonga_get_svi2_vdd_voltage_table(hwmgr, - pptable_info->vddgfx_lookup_table, &(data->vddgfx_voltage_table)); - PP_ASSERT_WITH_CODE((0 == result), - "Failed to retrieve SVI2 VDDGFX table from lookup table.", return result;); - } - - if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) { - /* VDDC has only SVI2 voltage control */ - result = tonga_get_svi2_vdd_voltage_table(hwmgr, - pptable_info->vddc_lookup_table, &(data->vddc_voltage_table)); - PP_ASSERT_WITH_CODE((0 == result), - "Failed to retrieve SVI2 VDDC table from lookup table.", return result;); - } - - PP_ASSERT_WITH_CODE( - (data->vddc_voltage_table.count <= (SMU72_MAX_LEVELS_VDDC)), - "Too many voltage values for VDDC. Trimming to fit state table.", - tonga_trim_voltage_table_to_fit_state_table(hwmgr, - SMU72_MAX_LEVELS_VDDC, &(data->vddc_voltage_table)); - ); - - PP_ASSERT_WITH_CODE( - (data->vddgfx_voltage_table.count <= (SMU72_MAX_LEVELS_VDDGFX)), - "Too many voltage values for VDDGFX. Trimming to fit state table.", - tonga_trim_voltage_table_to_fit_state_table(hwmgr, - SMU72_MAX_LEVELS_VDDGFX, &(data->vddgfx_voltage_table)); - ); - - PP_ASSERT_WITH_CODE( - (data->vddci_voltage_table.count <= (SMU72_MAX_LEVELS_VDDCI)), - "Too many voltage values for VDDCI. Trimming to fit state table.", - tonga_trim_voltage_table_to_fit_state_table(hwmgr, - SMU72_MAX_LEVELS_VDDCI, &(data->vddci_voltage_table)); - ); - - PP_ASSERT_WITH_CODE( - (data->mvdd_voltage_table.count <= (SMU72_MAX_LEVELS_MVDD)), - "Too many voltage values for MVDD. Trimming to fit state table.", - tonga_trim_voltage_table_to_fit_state_table(hwmgr, - SMU72_MAX_LEVELS_MVDD, &(data->mvdd_voltage_table)); - ); - - return 0; -} - -/** - * Vddc table preparation for SMC. - * - * @param hwmgr the address of the hardware manager - * @param table the SMC DPM table structure to be populated - * @return always 0 - */ -static int tonga_populate_smc_vddc_table(struct pp_hwmgr *hwmgr, - SMU72_Discrete_DpmTable *table) -{ - unsigned int count; - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - - if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) { - table->VddcLevelCount = data->vddc_voltage_table.count; - for (count = 0; count < table->VddcLevelCount; count++) { - table->VddcTable[count] = - PP_HOST_TO_SMC_US(data->vddc_voltage_table.entries[count].value * VOLTAGE_SCALE); - } - CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount); - } - return 0; -} - -/** - * VddGfx table preparation for SMC. - * - * @param hwmgr the address of the hardware manager - * @param table the SMC DPM table structure to be populated - * @return always 0 - */ -static int tonga_populate_smc_vdd_gfx_table(struct pp_hwmgr *hwmgr, - SMU72_Discrete_DpmTable *table) -{ - unsigned int count; - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - - if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) { - table->VddGfxLevelCount = data->vddgfx_voltage_table.count; - for (count = 0; count < data->vddgfx_voltage_table.count; count++) { - table->VddGfxTable[count] = - PP_HOST_TO_SMC_US(data->vddgfx_voltage_table.entries[count].value * VOLTAGE_SCALE); - } - CONVERT_FROM_HOST_TO_SMC_UL(table->VddGfxLevelCount); - } - return 0; -} - -/** - * Vddci table preparation for SMC. - * - * @param *hwmgr The address of the hardware manager. - * @param *table The SMC DPM table structure to be populated. - * @return 0 - */ -static int tonga_populate_smc_vdd_ci_table(struct pp_hwmgr *hwmgr, - SMU72_Discrete_DpmTable *table) -{ - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - uint32_t count; - - table->VddciLevelCount = data->vddci_voltage_table.count; - for (count = 0; count < table->VddciLevelCount; count++) { - if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_ci_control) { - table->VddciTable[count] = - PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[count].value * VOLTAGE_SCALE); - } else if (TONGA_VOLTAGE_CONTROL_BY_GPIO == data->vdd_ci_control) { - table->SmioTable1.Pattern[count].Voltage = - PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[count].value * VOLTAGE_SCALE); - /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level. */ - table->SmioTable1.Pattern[count].Smio = - (uint8_t) count; - table->Smio[count] |= - data->vddci_voltage_table.entries[count].smio_low; - table->VddciTable[count] = - PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[count].value * VOLTAGE_SCALE); - } - } - - table->SmioMask1 = data->vddci_voltage_table.mask_low; - CONVERT_FROM_HOST_TO_SMC_UL(table->VddciLevelCount); - - return 0; -} - -/** - * Mvdd table preparation for SMC. - * - * @param *hwmgr The address of the hardware manager. - * @param *table The SMC DPM table structure to be populated. - * @return 0 - */ -static int tonga_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr, - SMU72_Discrete_DpmTable *table) -{ - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - uint32_t count; - - if (TONGA_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) { - table->MvddLevelCount = data->mvdd_voltage_table.count; - for (count = 0; count < table->MvddLevelCount; count++) { - table->SmioTable2.Pattern[count].Voltage = - PP_HOST_TO_SMC_US(data->mvdd_voltage_table.entries[count].value * VOLTAGE_SCALE); - /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level.*/ - table->SmioTable2.Pattern[count].Smio = - (uint8_t) count; - table->Smio[count] |= - data->mvdd_voltage_table.entries[count].smio_low; - } - table->SmioMask2 = data->mvdd_voltage_table.mask_low; - - CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount); - } - - return 0; -} - -/** - * Preparation of vddc and vddgfx CAC tables for SMC. - * - * @param hwmgr the address of the hardware manager - * @param table the SMC DPM table structure to be populated - * @return always 0 - */ -static int tonga_populate_cac_tables(struct pp_hwmgr *hwmgr, - SMU72_Discrete_DpmTable *table) -{ - uint32_t count; - uint8_t index; - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); - struct phm_ppt_v1_voltage_lookup_table *vddgfx_lookup_table = pptable_info->vddgfx_lookup_table; - struct phm_ppt_v1_voltage_lookup_table *vddc_lookup_table = pptable_info->vddc_lookup_table; - - /* pTables is already swapped, so in order to use the value from it, we need to swap it back. */ - uint32_t vddcLevelCount = PP_SMC_TO_HOST_UL(table->VddcLevelCount); - uint32_t vddgfxLevelCount = PP_SMC_TO_HOST_UL(table->VddGfxLevelCount); - - for (count = 0; count < vddcLevelCount; count++) { - /* We are populating vddc CAC data to BapmVddc table in split and merged mode */ - index = tonga_get_voltage_index(vddc_lookup_table, - data->vddc_voltage_table.entries[count].value); - table->BapmVddcVidLoSidd[count] = - convert_to_vid(vddc_lookup_table->entries[index].us_cac_low); - table->BapmVddcVidHiSidd[count] = - convert_to_vid(vddc_lookup_table->entries[index].us_cac_mid); - table->BapmVddcVidHiSidd2[count] = - convert_to_vid(vddc_lookup_table->entries[index].us_cac_high); - } - - if ((data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2)) { - /* We are populating vddgfx CAC data to BapmVddgfx table in split mode */ - for (count = 0; count < vddgfxLevelCount; count++) { - index = tonga_get_voltage_index(vddgfx_lookup_table, - data->vddgfx_voltage_table.entries[count].value); - table->BapmVddGfxVidLoSidd[count] = - convert_to_vid(vddgfx_lookup_table->entries[index].us_cac_low); - table->BapmVddGfxVidHiSidd[count] = - convert_to_vid(vddgfx_lookup_table->entries[index].us_cac_mid); - table->BapmVddGfxVidHiSidd2[count] = - convert_to_vid(vddgfx_lookup_table->entries[index].us_cac_high); - } - } else { - for (count = 0; count < vddcLevelCount; count++) { - index = tonga_get_voltage_index(vddc_lookup_table, - data->vddc_voltage_table.entries[count].value); - table->BapmVddGfxVidLoSidd[count] = - convert_to_vid(vddc_lookup_table->entries[index].us_cac_low); - table->BapmVddGfxVidHiSidd[count] = - convert_to_vid(vddc_lookup_table->entries[index].us_cac_mid); - table->BapmVddGfxVidHiSidd2[count] = - convert_to_vid(vddc_lookup_table->entries[index].us_cac_high); - } - } - - return 0; -} - - -/** - * Preparation of voltage tables for SMC. - * - * @param hwmgr the address of the hardware manager - * @param table the SMC DPM table structure to be populated - * @return always 0 - */ - -int tonga_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr, - SMU72_Discrete_DpmTable *table) -{ - int result; - - result = tonga_populate_smc_vddc_table(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, - "can not populate VDDC voltage table to SMC", return -1); - - result = tonga_populate_smc_vdd_ci_table(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, - "can not populate VDDCI voltage table to SMC", return -1); - - result = tonga_populate_smc_vdd_gfx_table(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, - "can not populate VDDGFX voltage table to SMC", return -1); - - result = tonga_populate_smc_mvdd_table(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, - "can not populate MVDD voltage table to SMC", return -1); - - result = tonga_populate_cac_tables(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, - "can not populate CAC voltage tables to SMC", return -1); - - return 0; -} - -/** - * Populates the SMC VRConfig field in DPM table. - * - * @param hwmgr the address of the hardware manager - * @param table the SMC DPM table structure to be populated - * @return always 0 - */ -static int tonga_populate_vr_config(struct pp_hwmgr *hwmgr, - SMU72_Discrete_DpmTable *table) -{ - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - uint16_t config; - - if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) { - /* Splitted mode */ - config = VR_SVI2_PLANE_1; - table->VRConfig |= (config<voltage_control) { - config = VR_SVI2_PLANE_2; - table->VRConfig |= config; - } else { - printk(KERN_ERR "[ powerplay ] VDDC and VDDGFX should be both on SVI2 control in splitted mode! \n"); - } - } else { - /* Merged mode */ - config = VR_MERGED_WITH_VDDC; - table->VRConfig |= (config<voltage_control) { - config = VR_SVI2_PLANE_1; - table->VRConfig |= config; - } else { - printk(KERN_ERR "[ powerplay ] VDDC should be on SVI2 control in merged mode! \n"); - } - } - - /* Set Vddci Voltage Controller */ - if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_ci_control) { - config = VR_SVI2_PLANE_2; /* only in merged mode */ - table->VRConfig |= (config<vdd_ci_control) { - config = VR_SMIO_PATTERN_1; - table->VRConfig |= (config<mvdd_control) { - config = VR_SMIO_PATTERN_2; - table->VRConfig |= (config<backend); - struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); - - /* clock - voltage dependency table is empty table */ - if (allowed_clock_voltage_table->count == 0) - return -1; - - for (i = 0; i < allowed_clock_voltage_table->count; i++) { - /* find first sclk bigger than request */ - if (allowed_clock_voltage_table->entries[i].clk >= clock) { - voltage->VddGfx = tonga_get_voltage_index(pptable_info->vddgfx_lookup_table, - allowed_clock_voltage_table->entries[i].vddgfx); - - voltage->Vddc = tonga_get_voltage_index(pptable_info->vddc_lookup_table, - allowed_clock_voltage_table->entries[i].vddc); - - if (allowed_clock_voltage_table->entries[i].vddci) { - voltage->Vddci = tonga_get_voltage_id(&data->vddci_voltage_table, - allowed_clock_voltage_table->entries[i].vddci); - } else { - voltage->Vddci = tonga_get_voltage_id(&data->vddci_voltage_table, - allowed_clock_voltage_table->entries[i].vddc - data->vddc_vddci_delta); - } - - if (allowed_clock_voltage_table->entries[i].mvdd) { - *mvdd = (uint32_t) allowed_clock_voltage_table->entries[i].mvdd; - } - - voltage->Phases = 1; - return 0; - } - } - - /* sclk is bigger than max sclk in the dependence table */ - voltage->VddGfx = tonga_get_voltage_index(pptable_info->vddgfx_lookup_table, - allowed_clock_voltage_table->entries[i-1].vddgfx); - voltage->Vddc = tonga_get_voltage_index(pptable_info->vddc_lookup_table, - allowed_clock_voltage_table->entries[i-1].vddc); - - if (allowed_clock_voltage_table->entries[i-1].vddci) { - voltage->Vddci = tonga_get_voltage_id(&data->vddci_voltage_table, - allowed_clock_voltage_table->entries[i-1].vddci); - } - if (allowed_clock_voltage_table->entries[i-1].mvdd) { - *mvdd = (uint32_t) allowed_clock_voltage_table->entries[i-1].mvdd; - } - - return 0; -} - -/** - * Call SMC to reset S0/S1 to S1 and Reset SMIO to initial value - * - * @param hwmgr the address of the powerplay hardware manager. - * @return always 0 - */ -int tonga_reset_to_default(struct pp_hwmgr *hwmgr) -{ - return (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_ResetToDefaults) == 0) ? 0 : 1; -} - -int tonga_populate_memory_timing_parameters( - struct pp_hwmgr *hwmgr, - uint32_t engine_clock, - uint32_t memory_clock, - struct SMU72_Discrete_MCArbDramTimingTableEntry *arb_regs - ) -{ - uint32_t dramTiming; - uint32_t dramTiming2; - uint32_t burstTime; - int result; - - result = atomctrl_set_engine_dram_timings_rv770(hwmgr, - engine_clock, memory_clock); - - PP_ASSERT_WITH_CODE(result == 0, - "Error calling VBIOS to set DRAM_TIMING.", return result); - - dramTiming = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING); - dramTiming2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2); - burstTime = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0); - - arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dramTiming); - arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dramTiming2); - arb_regs->McArbBurstTime = (uint8_t)burstTime; - - return 0; -} - -/** - * Setup parameters for the MC ARB. - * - * @param hwmgr the address of the powerplay hardware manager. - * @return always 0 - * This function is to be called from the SetPowerState table. - */ -int tonga_program_memory_timing_parameters(struct pp_hwmgr *hwmgr) -{ - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - int result = 0; - SMU72_Discrete_MCArbDramTimingTable arb_regs; - uint32_t i, j; - - memset(&arb_regs, 0x00, sizeof(SMU72_Discrete_MCArbDramTimingTable)); - - for (i = 0; i < data->dpm_table.sclk_table.count; i++) { - for (j = 0; j < data->dpm_table.mclk_table.count; j++) { - result = tonga_populate_memory_timing_parameters - (hwmgr, data->dpm_table.sclk_table.dpm_levels[i].value, - data->dpm_table.mclk_table.dpm_levels[j].value, - &arb_regs.entries[i][j]); - - if (0 != result) { - break; - } - } - } - - if (0 == result) { - result = tonga_copy_bytes_to_smc( - hwmgr->smumgr, - data->arb_table_start, - (uint8_t *)&arb_regs, - sizeof(SMU72_Discrete_MCArbDramTimingTable), - data->sram_end - ); - } - - return result; -} - -static int tonga_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU72_Discrete_DpmTable *table) -{ - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - struct tonga_dpm_table *dpm_table = &data->dpm_table; - uint32_t i; - - /* Index (dpm_table->pcie_speed_table.count) is reserved for PCIE boot level. */ - for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) { - table->LinkLevel[i].PcieGenSpeed = - (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value; - table->LinkLevel[i].PcieLaneCount = - (uint8_t)encode_pcie_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1); - table->LinkLevel[i].EnabledForActivity = - 1; - table->LinkLevel[i].SPC = - (uint8_t)(data->pcie_spc_cap & 0xff); - table->LinkLevel[i].DownThreshold = - PP_HOST_TO_SMC_UL(5); - table->LinkLevel[i].UpThreshold = - PP_HOST_TO_SMC_UL(30); - } - - data->smc_state_table.LinkLevelCount = - (uint8_t)dpm_table->pcie_speed_table.count; - data->dpm_level_enable_mask.pcie_dpm_enable_mask = - tonga_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table); - - return 0; -} - -static int tonga_populate_smc_uvd_level(struct pp_hwmgr *hwmgr, - SMU72_Discrete_DpmTable *table) -{ - int result = 0; - - uint8_t count; - pp_atomctrl_clock_dividers_vi dividers; - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); - phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table; - - table->UvdLevelCount = (uint8_t) (mm_table->count); - table->UvdBootLevel = 0; - - for (count = 0; count < table->UvdLevelCount; count++) { - table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk; - table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk; - table->UvdLevel[count].MinVoltage.Vddc = - tonga_get_voltage_index(pptable_info->vddc_lookup_table, - mm_table->entries[count].vddc); - table->UvdLevel[count].MinVoltage.VddGfx = - (data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2) ? - tonga_get_voltage_index(pptable_info->vddgfx_lookup_table, - mm_table->entries[count].vddgfx) : 0; - table->UvdLevel[count].MinVoltage.Vddci = - tonga_get_voltage_id(&data->vddci_voltage_table, - mm_table->entries[count].vddc - data->vddc_vddci_delta); - table->UvdLevel[count].MinVoltage.Phases = 1; - - /* retrieve divider value for VBIOS */ - result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, - table->UvdLevel[count].VclkFrequency, ÷rs); - PP_ASSERT_WITH_CODE((0 == result), - "can not find divide id for Vclk clock", return result); - - table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider; - - result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, - table->UvdLevel[count].DclkFrequency, ÷rs); - PP_ASSERT_WITH_CODE((0 == result), - "can not find divide id for Dclk clock", return result); - - table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider; - - CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency); - CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency); - //CONVERT_FROM_HOST_TO_SMC_UL((uint32_t)table->UvdLevel[count].MinVoltage); - } - - return result; - -} - -static int tonga_populate_smc_vce_level(struct pp_hwmgr *hwmgr, - SMU72_Discrete_DpmTable *table) -{ - int result = 0; - - uint8_t count; - pp_atomctrl_clock_dividers_vi dividers; - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); - phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table; - - table->VceLevelCount = (uint8_t) (mm_table->count); - table->VceBootLevel = 0; - - for (count = 0; count < table->VceLevelCount; count++) { - table->VceLevel[count].Frequency = - mm_table->entries[count].eclk; - table->VceLevel[count].MinVoltage.Vddc = - tonga_get_voltage_index(pptable_info->vddc_lookup_table, - mm_table->entries[count].vddc); - table->VceLevel[count].MinVoltage.VddGfx = - (data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2) ? - tonga_get_voltage_index(pptable_info->vddgfx_lookup_table, - mm_table->entries[count].vddgfx) : 0; - table->VceLevel[count].MinVoltage.Vddci = - tonga_get_voltage_id(&data->vddci_voltage_table, - mm_table->entries[count].vddc - data->vddc_vddci_delta); - table->VceLevel[count].MinVoltage.Phases = 1; - - /* retrieve divider value for VBIOS */ - result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, - table->VceLevel[count].Frequency, ÷rs); - PP_ASSERT_WITH_CODE((0 == result), - "can not find divide id for VCE engine clock", return result); - - table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider; - - CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency); - } - - return result; -} - -static int tonga_populate_smc_acp_level(struct pp_hwmgr *hwmgr, - SMU72_Discrete_DpmTable *table) -{ - int result = 0; - uint8_t count; - pp_atomctrl_clock_dividers_vi dividers; - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); - phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table; - - table->AcpLevelCount = (uint8_t) (mm_table->count); - table->AcpBootLevel = 0; - - for (count = 0; count < table->AcpLevelCount; count++) { - table->AcpLevel[count].Frequency = - pptable_info->mm_dep_table->entries[count].aclk; - table->AcpLevel[count].MinVoltage.Vddc = - tonga_get_voltage_index(pptable_info->vddc_lookup_table, - mm_table->entries[count].vddc); - table->AcpLevel[count].MinVoltage.VddGfx = - (data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2) ? - tonga_get_voltage_index(pptable_info->vddgfx_lookup_table, - mm_table->entries[count].vddgfx) : 0; - table->AcpLevel[count].MinVoltage.Vddci = - tonga_get_voltage_id(&data->vddci_voltage_table, - mm_table->entries[count].vddc - data->vddc_vddci_delta); - table->AcpLevel[count].MinVoltage.Phases = 1; - - /* retrieve divider value for VBIOS */ - result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, - table->AcpLevel[count].Frequency, ÷rs); - PP_ASSERT_WITH_CODE((0 == result), - "can not find divide id for engine clock", return result); - - table->AcpLevel[count].Divider = (uint8_t)dividers.pll_post_divider; - - CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].Frequency); - } - - return result; -} - -static int tonga_populate_smc_samu_level(struct pp_hwmgr *hwmgr, - SMU72_Discrete_DpmTable *table) -{ - int result = 0; - uint8_t count; - pp_atomctrl_clock_dividers_vi dividers; - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); - phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table; - - table->SamuBootLevel = 0; - table->SamuLevelCount = (uint8_t) (mm_table->count); - - for (count = 0; count < table->SamuLevelCount; count++) { - /* not sure whether we need evclk or not */ - table->SamuLevel[count].Frequency = - pptable_info->mm_dep_table->entries[count].samclock; - table->SamuLevel[count].MinVoltage.Vddc = - tonga_get_voltage_index(pptable_info->vddc_lookup_table, - mm_table->entries[count].vddc); - table->SamuLevel[count].MinVoltage.VddGfx = - (data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2) ? - tonga_get_voltage_index(pptable_info->vddgfx_lookup_table, - mm_table->entries[count].vddgfx) : 0; - table->SamuLevel[count].MinVoltage.Vddci = - tonga_get_voltage_id(&data->vddci_voltage_table, - mm_table->entries[count].vddc - data->vddc_vddci_delta); - table->SamuLevel[count].MinVoltage.Phases = 1; - - /* retrieve divider value for VBIOS */ - result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, - table->SamuLevel[count].Frequency, ÷rs); - PP_ASSERT_WITH_CODE((0 == result), - "can not find divide id for samu clock", return result); - - table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider; - - CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency); - } - - return result; -} - -/** - * Populates the SMC MCLK structure using the provided memory clock - * - * @param hwmgr the address of the hardware manager - * @param memory_clock the memory clock to use to populate the structure - * @param sclk the SMC SCLK structure to be populated - */ -static int tonga_calculate_mclk_params( - struct pp_hwmgr *hwmgr, - uint32_t memory_clock, - SMU72_Discrete_MemoryLevel *mclk, - bool strobe_mode, - bool dllStateOn - ) -{ - const tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - uint32_t dll_cntl = data->clock_registers.vDLL_CNTL; - uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL; - uint32_t mpll_ad_func_cntl = data->clock_registers.vMPLL_AD_FUNC_CNTL; - uint32_t mpll_dq_func_cntl = data->clock_registers.vMPLL_DQ_FUNC_CNTL; - uint32_t mpll_func_cntl = data->clock_registers.vMPLL_FUNC_CNTL; - uint32_t mpll_func_cntl_1 = data->clock_registers.vMPLL_FUNC_CNTL_1; - uint32_t mpll_func_cntl_2 = data->clock_registers.vMPLL_FUNC_CNTL_2; - uint32_t mpll_ss1 = data->clock_registers.vMPLL_SS1; - uint32_t mpll_ss2 = data->clock_registers.vMPLL_SS2; - - pp_atomctrl_memory_clock_param mpll_param; - int result; - - result = atomctrl_get_memory_pll_dividers_si(hwmgr, - memory_clock, &mpll_param, strobe_mode); - PP_ASSERT_WITH_CODE(0 == result, - "Error retrieving Memory Clock Parameters from VBIOS.", return result); - - /* MPLL_FUNC_CNTL setup*/ - mpll_func_cntl = PHM_SET_FIELD(mpll_func_cntl, MPLL_FUNC_CNTL, BWCTRL, mpll_param.bw_ctrl); - - /* MPLL_FUNC_CNTL_1 setup*/ - mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1, - MPLL_FUNC_CNTL_1, CLKF, mpll_param.mpll_fb_divider.cl_kf); - mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1, - MPLL_FUNC_CNTL_1, CLKFRAC, mpll_param.mpll_fb_divider.clk_frac); - mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1, - MPLL_FUNC_CNTL_1, VCO_MODE, mpll_param.vco_mode); - - /* MPLL_AD_FUNC_CNTL setup*/ - mpll_ad_func_cntl = PHM_SET_FIELD(mpll_ad_func_cntl, - MPLL_AD_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider); - - if (data->is_memory_GDDR5) { - /* MPLL_DQ_FUNC_CNTL setup*/ - mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl, - MPLL_DQ_FUNC_CNTL, YCLK_SEL, mpll_param.yclk_sel); - mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl, - MPLL_DQ_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider); - } - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_MemorySpreadSpectrumSupport)) { - /* - ************************************ - Fref = Reference Frequency - NF = Feedback divider ratio - NR = Reference divider ratio - Fnom = Nominal VCO output frequency = Fref * NF / NR - Fs = Spreading Rate - D = Percentage down-spread / 2 - Fint = Reference input frequency to PFD = Fref / NR - NS = Spreading rate divider ratio = int(Fint / (2 * Fs)) - CLKS = NS - 1 = ISS_STEP_NUM[11:0] - NV = D * Fs / Fnom * 4 * ((Fnom/Fref * NR) ^ 2) - CLKV = 65536 * NV = ISS_STEP_SIZE[25:0] - ************************************* - */ - pp_atomctrl_internal_ss_info ss_info; - uint32_t freq_nom; - uint32_t tmp; - uint32_t reference_clock = atomctrl_get_mpll_reference_clock(hwmgr); - - /* for GDDR5 for all modes and DDR3 */ - if (1 == mpll_param.qdr) - freq_nom = memory_clock * 4 * (1 << mpll_param.mpll_post_divider); - else - freq_nom = memory_clock * 2 * (1 << mpll_param.mpll_post_divider); - - /* tmp = (freq_nom / reference_clock * reference_divider) ^ 2 Note: S.I. reference_divider = 1*/ - tmp = (freq_nom / reference_clock); - tmp = tmp * tmp; - - if (0 == atomctrl_get_memory_clock_spread_spectrum(hwmgr, freq_nom, &ss_info)) { - /* ss_info.speed_spectrum_percentage -- in unit of 0.01% */ - /* ss.Info.speed_spectrum_rate -- in unit of khz */ - /* CLKS = reference_clock / (2 * speed_spectrum_rate * reference_divider) * 10 */ - /* = reference_clock * 5 / speed_spectrum_rate */ - uint32_t clks = reference_clock * 5 / ss_info.speed_spectrum_rate; - - /* CLKV = 65536 * speed_spectrum_percentage / 2 * spreadSpecrumRate / freq_nom * 4 / 100000 * ((freq_nom / reference_clock) ^ 2) */ - /* = 131 * speed_spectrum_percentage * speed_spectrum_rate / 100 * ((freq_nom / reference_clock) ^ 2) / freq_nom */ - uint32_t clkv = - (uint32_t)((((131 * ss_info.speed_spectrum_percentage * - ss_info.speed_spectrum_rate) / 100) * tmp) / freq_nom); - - mpll_ss1 = PHM_SET_FIELD(mpll_ss1, MPLL_SS1, CLKV, clkv); - mpll_ss2 = PHM_SET_FIELD(mpll_ss2, MPLL_SS2, CLKS, clks); - } - } - - /* MCLK_PWRMGT_CNTL setup */ - mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, - MCLK_PWRMGT_CNTL, DLL_SPEED, mpll_param.dll_speed); - mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, - MCLK_PWRMGT_CNTL, MRDCK0_PDNB, dllStateOn); - mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, - MCLK_PWRMGT_CNTL, MRDCK1_PDNB, dllStateOn); - - - /* Save the result data to outpupt memory level structure */ - mclk->MclkFrequency = memory_clock; - mclk->MpllFuncCntl = mpll_func_cntl; - mclk->MpllFuncCntl_1 = mpll_func_cntl_1; - mclk->MpllFuncCntl_2 = mpll_func_cntl_2; - mclk->MpllAdFuncCntl = mpll_ad_func_cntl; - mclk->MpllDqFuncCntl = mpll_dq_func_cntl; - mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl; - mclk->DllCntl = dll_cntl; - mclk->MpllSs1 = mpll_ss1; - mclk->MpllSs2 = mpll_ss2; - - return 0; -} - -static uint8_t tonga_get_mclk_frequency_ratio(uint32_t memory_clock, - bool strobe_mode) -{ - uint8_t mc_para_index; - - if (strobe_mode) { - if (memory_clock < 12500) { - mc_para_index = 0x00; - } else if (memory_clock > 47500) { - mc_para_index = 0x0f; - } else { - mc_para_index = (uint8_t)((memory_clock - 10000) / 2500); - } - } else { - if (memory_clock < 65000) { - mc_para_index = 0x00; - } else if (memory_clock > 135000) { - mc_para_index = 0x0f; - } else { - mc_para_index = (uint8_t)((memory_clock - 60000) / 5000); - } - } - - return mc_para_index; -} - -static uint8_t tonga_get_ddr3_mclk_frequency_ratio(uint32_t memory_clock) -{ - uint8_t mc_para_index; - - if (memory_clock < 10000) { - mc_para_index = 0; - } else if (memory_clock >= 80000) { - mc_para_index = 0x0f; - } else { - mc_para_index = (uint8_t)((memory_clock - 10000) / 5000 + 1); - } - - return mc_para_index; -} - -static int tonga_populate_single_memory_level( - struct pp_hwmgr *hwmgr, - uint32_t memory_clock, - SMU72_Discrete_MemoryLevel *memory_level - ) -{ - uint32_t minMvdd = 0; - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); - int result = 0; - bool dllStateOn; - struct cgs_display_info info = {0}; - - - if (NULL != pptable_info->vdd_dep_on_mclk) { - result = tonga_get_dependecy_volt_by_clk(hwmgr, - pptable_info->vdd_dep_on_mclk, memory_clock, &memory_level->MinVoltage, &minMvdd); - PP_ASSERT_WITH_CODE((0 == result), - "can not find MinVddc voltage value from memory VDDC voltage dependency table", return result); - } - - if (data->mvdd_control == TONGA_VOLTAGE_CONTROL_NONE) { - memory_level->MinMvdd = data->vbios_boot_state.mvdd_bootup_value; - } else { - memory_level->MinMvdd = minMvdd; - } - memory_level->EnabledForThrottle = 1; - memory_level->EnabledForActivity = 0; - memory_level->UpHyst = 0; - memory_level->DownHyst = 100; - memory_level->VoltageDownHyst = 0; - - /* Indicates maximum activity level for this performance level.*/ - memory_level->ActivityLevel = (uint16_t)data->mclk_activity_target; - memory_level->StutterEnable = 0; - memory_level->StrobeEnable = 0; - memory_level->EdcReadEnable = 0; - memory_level->EdcWriteEnable = 0; - memory_level->RttEnable = 0; - - /* default set to low watermark. Highest level will be set to high later.*/ - memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; - - cgs_get_active_displays_info(hwmgr->device, &info); - data->display_timing.num_existing_displays = info.display_count; - - if ((data->mclk_stutter_mode_threshold != 0) && - (memory_clock <= data->mclk_stutter_mode_threshold) && - (!data->is_uvd_enabled) - && (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL, STUTTER_ENABLE) & 0x1) - && (data->display_timing.num_existing_displays <= 2) - && (data->display_timing.num_existing_displays != 0)) - memory_level->StutterEnable = 1; - - /* decide strobe mode*/ - memory_level->StrobeEnable = (data->mclk_strobe_mode_threshold != 0) && - (memory_clock <= data->mclk_strobe_mode_threshold); - - /* decide EDC mode and memory clock ratio*/ - if (data->is_memory_GDDR5) { - memory_level->StrobeRatio = tonga_get_mclk_frequency_ratio(memory_clock, - memory_level->StrobeEnable); - - if ((data->mclk_edc_enable_threshold != 0) && - (memory_clock > data->mclk_edc_enable_threshold)) { - memory_level->EdcReadEnable = 1; - } - - if ((data->mclk_edc_wr_enable_threshold != 0) && - (memory_clock > data->mclk_edc_wr_enable_threshold)) { - memory_level->EdcWriteEnable = 1; - } - - if (memory_level->StrobeEnable) { - if (tonga_get_mclk_frequency_ratio(memory_clock, 1) >= - ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC7) >> 16) & 0xf)) { - dllStateOn = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0; - } else { - dllStateOn = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC6) >> 1) & 0x1) ? 1 : 0; - } - - } else { - dllStateOn = data->dll_defaule_on; - } - } else { - memory_level->StrobeRatio = - tonga_get_ddr3_mclk_frequency_ratio(memory_clock); - dllStateOn = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0; - } - - result = tonga_calculate_mclk_params(hwmgr, - memory_clock, memory_level, memory_level->StrobeEnable, dllStateOn); - - if (0 == result) { - CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MinMvdd); - /* MCLK frequency in units of 10KHz*/ - CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkFrequency); - /* Indicates maximum activity level for this performance level.*/ - CONVERT_FROM_HOST_TO_SMC_US(memory_level->ActivityLevel); - CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl); - CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_1); - CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_2); - CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllAdFuncCntl); - CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllDqFuncCntl); - CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkPwrmgtCntl); - CONVERT_FROM_HOST_TO_SMC_UL(memory_level->DllCntl); - CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs1); - CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs2); - } - - return result; -} - -/** - * Populates the SMC MVDD structure using the provided memory clock. - * - * @param hwmgr the address of the hardware manager - * @param mclk the MCLK value to be used in the decision if MVDD should be high or low. - * @param voltage the SMC VOLTAGE structure to be populated - */ -int tonga_populate_mvdd_value(struct pp_hwmgr *hwmgr, uint32_t mclk, SMIO_Pattern *smio_pattern) -{ - const tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); - uint32_t i = 0; - - if (TONGA_VOLTAGE_CONTROL_NONE != data->mvdd_control) { - /* find mvdd value which clock is more than request */ - for (i = 0; i < pptable_info->vdd_dep_on_mclk->count; i++) { - if (mclk <= pptable_info->vdd_dep_on_mclk->entries[i].clk) { - /* Always round to higher voltage. */ - smio_pattern->Voltage = data->mvdd_voltage_table.entries[i].value; - break; - } - } - - PP_ASSERT_WITH_CODE(i < pptable_info->vdd_dep_on_mclk->count, - "MVDD Voltage is outside the supported range.", return -1); - - } else { - return -1; - } - - return 0; -} - - -static int tonga_populate_smv_acpi_level(struct pp_hwmgr *hwmgr, - SMU72_Discrete_DpmTable *table) -{ - int result = 0; - const tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - pp_atomctrl_clock_dividers_vi dividers; - SMIO_Pattern voltage_level; - uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL; - uint32_t spll_func_cntl_2 = data->clock_registers.vCG_SPLL_FUNC_CNTL_2; - uint32_t dll_cntl = data->clock_registers.vDLL_CNTL; - uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL; - - /* The ACPI state should not do DPM on DC (or ever).*/ - table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC; - - table->ACPILevel.MinVoltage = data->smc_state_table.GraphicsLevel[0].MinVoltage; - - /* assign zero for now*/ - table->ACPILevel.SclkFrequency = atomctrl_get_reference_clock(hwmgr); - - /* get the engine clock dividers for this clock value*/ - result = atomctrl_get_engine_pll_dividers_vi(hwmgr, - table->ACPILevel.SclkFrequency, ÷rs); - - PP_ASSERT_WITH_CODE(result == 0, - "Error retrieving Engine Clock dividers from VBIOS.", return result); - - /* divider ID for required SCLK*/ - table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider; - table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; - table->ACPILevel.DeepSleepDivId = 0; - - spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, - CG_SPLL_FUNC_CNTL, SPLL_PWRON, 0); - spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, - CG_SPLL_FUNC_CNTL, SPLL_RESET, 1); - spll_func_cntl_2 = PHM_SET_FIELD(spll_func_cntl_2, - CG_SPLL_FUNC_CNTL_2, SCLK_MUX_SEL, 4); - - table->ACPILevel.CgSpllFuncCntl = spll_func_cntl; - table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2; - table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3; - table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4; - table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM; - table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2; - table->ACPILevel.CcPwrDynRm = 0; - table->ACPILevel.CcPwrDynRm1 = 0; - - - /* For various features to be enabled/disabled while this level is active.*/ - CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags); - /* SCLK frequency in units of 10KHz*/ - CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency); - CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl); - CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2); - CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3); - CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4); - CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum); - CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2); - CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm); - CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1); - - /* table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;*/ - table->MemoryACPILevel.MinVoltage = data->smc_state_table.MemoryLevel[0].MinVoltage; - - /* CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);*/ - - if (0 == tonga_populate_mvdd_value(hwmgr, 0, &voltage_level)) - table->MemoryACPILevel.MinMvdd = - PP_HOST_TO_SMC_UL(voltage_level.Voltage * VOLTAGE_SCALE); - else - table->MemoryACPILevel.MinMvdd = 0; - - /* Force reset on DLL*/ - mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, - MCLK_PWRMGT_CNTL, MRDCK0_RESET, 0x1); - mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, - MCLK_PWRMGT_CNTL, MRDCK1_RESET, 0x1); - - /* Disable DLL in ACPIState*/ - mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, - MCLK_PWRMGT_CNTL, MRDCK0_PDNB, 0); - mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, - MCLK_PWRMGT_CNTL, MRDCK1_PDNB, 0); - - /* Enable DLL bypass signal*/ - dll_cntl = PHM_SET_FIELD(dll_cntl, - DLL_CNTL, MRDCK0_BYPASS, 0); - dll_cntl = PHM_SET_FIELD(dll_cntl, - DLL_CNTL, MRDCK1_BYPASS, 0); - - table->MemoryACPILevel.DllCntl = - PP_HOST_TO_SMC_UL(dll_cntl); - table->MemoryACPILevel.MclkPwrmgtCntl = - PP_HOST_TO_SMC_UL(mclk_pwrmgt_cntl); - table->MemoryACPILevel.MpllAdFuncCntl = - PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_AD_FUNC_CNTL); - table->MemoryACPILevel.MpllDqFuncCntl = - PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_DQ_FUNC_CNTL); - table->MemoryACPILevel.MpllFuncCntl = - PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL); - table->MemoryACPILevel.MpllFuncCntl_1 = - PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_1); - table->MemoryACPILevel.MpllFuncCntl_2 = - PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_2); - table->MemoryACPILevel.MpllSs1 = - PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS1); - table->MemoryACPILevel.MpllSs2 = - PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS2); - - table->MemoryACPILevel.EnabledForThrottle = 0; - table->MemoryACPILevel.EnabledForActivity = 0; - table->MemoryACPILevel.UpHyst = 0; - table->MemoryACPILevel.DownHyst = 100; - table->MemoryACPILevel.VoltageDownHyst = 0; - /* Indicates maximum activity level for this performance level.*/ - table->MemoryACPILevel.ActivityLevel = PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target); - - table->MemoryACPILevel.StutterEnable = 0; - table->MemoryACPILevel.StrobeEnable = 0; - table->MemoryACPILevel.EdcReadEnable = 0; - table->MemoryACPILevel.EdcWriteEnable = 0; - table->MemoryACPILevel.RttEnable = 0; - - return result; -} - -static int tonga_find_boot_level(struct tonga_single_dpm_table *table, uint32_t value, uint32_t *boot_level) -{ - int result = 0; - uint32_t i; - - for (i = 0; i < table->count; i++) { - if (value == table->dpm_levels[i].value) { - *boot_level = i; - result = 0; - } - } - return result; -} - -static int tonga_populate_smc_boot_level(struct pp_hwmgr *hwmgr, - SMU72_Discrete_DpmTable *table) -{ - int result = 0; - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - - table->GraphicsBootLevel = 0; /* 0 == DPM[0] (low), etc. */ - table->MemoryBootLevel = 0; /* 0 == DPM[0] (low), etc. */ - - /* find boot level from dpm table*/ - result = tonga_find_boot_level(&(data->dpm_table.sclk_table), - data->vbios_boot_state.sclk_bootup_value, - (uint32_t *)&(data->smc_state_table.GraphicsBootLevel)); - - if (0 != result) { - data->smc_state_table.GraphicsBootLevel = 0; - printk(KERN_ERR "[ powerplay ] VBIOS did not find boot engine clock value \ - in dependency table. Using Graphics DPM level 0!"); - result = 0; - } - - result = tonga_find_boot_level(&(data->dpm_table.mclk_table), - data->vbios_boot_state.mclk_bootup_value, - (uint32_t *)&(data->smc_state_table.MemoryBootLevel)); - - if (0 != result) { - data->smc_state_table.MemoryBootLevel = 0; - printk(KERN_ERR "[ powerplay ] VBIOS did not find boot engine clock value \ - in dependency table. Using Memory DPM level 0!"); - result = 0; - } - - table->BootVoltage.Vddc = - tonga_get_voltage_id(&(data->vddc_voltage_table), - data->vbios_boot_state.vddc_bootup_value); - table->BootVoltage.VddGfx = - tonga_get_voltage_id(&(data->vddgfx_voltage_table), - data->vbios_boot_state.vddgfx_bootup_value); - table->BootVoltage.Vddci = - tonga_get_voltage_id(&(data->vddci_voltage_table), - data->vbios_boot_state.vddci_bootup_value); - table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value; - - CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd); - - return result; -} - - -/** - * Calculates the SCLK dividers using the provided engine clock - * - * @param hwmgr the address of the hardware manager - * @param engine_clock the engine clock to use to populate the structure - * @param sclk the SMC SCLK structure to be populated - */ -int tonga_calculate_sclk_params(struct pp_hwmgr *hwmgr, - uint32_t engine_clock, SMU72_Discrete_GraphicsLevel *sclk) -{ - const tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - pp_atomctrl_clock_dividers_vi dividers; - uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL; - uint32_t spll_func_cntl_3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3; - uint32_t spll_func_cntl_4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4; - uint32_t cg_spll_spread_spectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM; - uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2; - uint32_t reference_clock; - uint32_t reference_divider; - uint32_t fbdiv; - int result; - - /* get the engine clock dividers for this clock value*/ - result = atomctrl_get_engine_pll_dividers_vi(hwmgr, engine_clock, ÷rs); - - PP_ASSERT_WITH_CODE(result == 0, - "Error retrieving Engine Clock dividers from VBIOS.", return result); - - /* To get FBDIV we need to multiply this by 16384 and divide it by Fref.*/ - reference_clock = atomctrl_get_reference_clock(hwmgr); - - reference_divider = 1 + dividers.uc_pll_ref_div; - - /* low 14 bits is fraction and high 12 bits is divider*/ - fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF; - - /* SPLL_FUNC_CNTL setup*/ - spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, - CG_SPLL_FUNC_CNTL, SPLL_REF_DIV, dividers.uc_pll_ref_div); - spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, - CG_SPLL_FUNC_CNTL, SPLL_PDIV_A, dividers.uc_pll_post_div); - - /* SPLL_FUNC_CNTL_3 setup*/ - spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, - CG_SPLL_FUNC_CNTL_3, SPLL_FB_DIV, fbdiv); - - /* set to use fractional accumulation*/ - spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, - CG_SPLL_FUNC_CNTL_3, SPLL_DITHEN, 1); - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_EngineSpreadSpectrumSupport)) { - pp_atomctrl_internal_ss_info ss_info; - - uint32_t vcoFreq = engine_clock * dividers.uc_pll_post_div; - if (0 == atomctrl_get_engine_clock_spread_spectrum(hwmgr, vcoFreq, &ss_info)) { - /* - * ss_info.speed_spectrum_percentage -- in unit of 0.01% - * ss_info.speed_spectrum_rate -- in unit of khz - */ - /* clks = reference_clock * 10 / (REFDIV + 1) / speed_spectrum_rate / 2 */ - uint32_t clkS = reference_clock * 5 / (reference_divider * ss_info.speed_spectrum_rate); - - /* clkv = 2 * D * fbdiv / NS */ - uint32_t clkV = 4 * ss_info.speed_spectrum_percentage * fbdiv / (clkS * 10000); - - cg_spll_spread_spectrum = - PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, CLKS, clkS); - cg_spll_spread_spectrum = - PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, SSEN, 1); - cg_spll_spread_spectrum_2 = - PHM_SET_FIELD(cg_spll_spread_spectrum_2, CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clkV); - } - } - - sclk->SclkFrequency = engine_clock; - sclk->CgSpllFuncCntl3 = spll_func_cntl_3; - sclk->CgSpllFuncCntl4 = spll_func_cntl_4; - sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum; - sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2; - sclk->SclkDid = (uint8_t)dividers.pll_post_divider; - - return 0; -} - -static uint8_t tonga_get_sleep_divider_id_from_clock(uint32_t engine_clock, - uint32_t min_engine_clock_in_sr) -{ - uint32_t i, temp; - uint32_t min = max(min_engine_clock_in_sr, (uint32_t)TONGA_MINIMUM_ENGINE_CLOCK); - - PP_ASSERT_WITH_CODE((engine_clock >= min), - "Engine clock can't satisfy stutter requirement!", return 0); - - for (i = TONGA_MAX_DEEPSLEEP_DIVIDER_ID;; i--) { - temp = engine_clock >> i; - - if(temp >= min || i == 0) - break; - } - return (uint8_t)i; -} - -/** - * Populates single SMC SCLK structure using the provided engine clock - * - * @param hwmgr the address of the hardware manager - * @param engine_clock the engine clock to use to populate the structure - * @param sclk the SMC SCLK structure to be populated - */ -static int tonga_populate_single_graphic_level(struct pp_hwmgr *hwmgr, uint32_t engine_clock, uint16_t sclk_activity_level_threshold, SMU72_Discrete_GraphicsLevel *graphic_level) -{ - int result; - uint32_t threshold; - uint32_t mvdd; - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); - - result = tonga_calculate_sclk_params(hwmgr, engine_clock, graphic_level); - - - /* populate graphics levels*/ - result = tonga_get_dependecy_volt_by_clk(hwmgr, - pptable_info->vdd_dep_on_sclk, engine_clock, - &graphic_level->MinVoltage, &mvdd); - PP_ASSERT_WITH_CODE((0 == result), - "can not find VDDC voltage value for VDDC \ - engine clock dependency table", return result); - - /* SCLK frequency in units of 10KHz*/ - graphic_level->SclkFrequency = engine_clock; - - /* Indicates maximum activity level for this performance level. 50% for now*/ - graphic_level->ActivityLevel = sclk_activity_level_threshold; - - graphic_level->CcPwrDynRm = 0; - graphic_level->CcPwrDynRm1 = 0; - /* this level can be used if activity is high enough.*/ - graphic_level->EnabledForActivity = 0; - /* this level can be used for throttling.*/ - graphic_level->EnabledForThrottle = 1; - graphic_level->UpHyst = 0; - graphic_level->DownHyst = 0; - graphic_level->VoltageDownHyst = 0; - graphic_level->PowerThrottle = 0; - - threshold = engine_clock * data->fast_watermark_threshold / 100; -/* - *get the DAL clock. do it in funture. - PECI_GetMinClockSettings(hwmgr->peci, &minClocks); - data->display_timing.min_clock_insr = minClocks.engineClockInSR; -*/ - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_SclkDeepSleep)) - graphic_level->DeepSleepDivId = - tonga_get_sleep_divider_id_from_clock(engine_clock, - data->display_timing.min_clock_insr); - - /* Default to slow, highest DPM level will be set to PPSMC_DISPLAY_WATERMARK_LOW later.*/ - graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; - - if (0 == result) { - /* CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVoltage);*/ - /* CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVddcPhases);*/ - CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SclkFrequency); - CONVERT_FROM_HOST_TO_SMC_US(graphic_level->ActivityLevel); - CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl3); - CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl4); - CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum); - CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum2); - CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm); - CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm1); - } - - return result; -} - -/** - * Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states - * - * @param hwmgr the address of the hardware manager - */ -static int tonga_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) -{ - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); - struct tonga_dpm_table *dpm_table = &data->dpm_table; - phm_ppt_v1_pcie_table *pcie_table = pptable_info->pcie_table; - uint8_t pcie_entry_count = (uint8_t) data->dpm_table.pcie_speed_table.count; - int result = 0; - uint32_t level_array_adress = data->dpm_table_start + - offsetof(SMU72_Discrete_DpmTable, GraphicsLevel); - uint32_t level_array_size = sizeof(SMU72_Discrete_GraphicsLevel) * - SMU72_MAX_LEVELS_GRAPHICS; /* 64 -> long; 32 -> int*/ - SMU72_Discrete_GraphicsLevel *levels = data->smc_state_table.GraphicsLevel; - uint32_t i, maxEntry; - uint8_t highest_pcie_level_enabled = 0, lowest_pcie_level_enabled = 0, mid_pcie_level_enabled = 0, count = 0; - PECI_RegistryValue reg_value; - memset(levels, 0x00, level_array_size); - - for (i = 0; i < dpm_table->sclk_table.count; i++) { - result = tonga_populate_single_graphic_level(hwmgr, - dpm_table->sclk_table.dpm_levels[i].value, - (uint16_t)data->activity_target[i], - &(data->smc_state_table.GraphicsLevel[i])); - - if (0 != result) - return result; - - /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */ - if (i > 1) - data->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0; - - if (0 == i) { - reg_value = 0; - if (reg_value != 0) - data->smc_state_table.GraphicsLevel[0].UpHyst = (uint8_t)reg_value; - } - - if (1 == i) { - reg_value = 0; - if (reg_value != 0) - data->smc_state_table.GraphicsLevel[1].UpHyst = (uint8_t)reg_value; - } - } - - /* Only enable level 0 for now. */ - data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1; - - /* set highest level watermark to high */ - if (dpm_table->sclk_table.count > 1) - data->smc_state_table.GraphicsLevel[dpm_table->sclk_table.count-1].DisplayWatermark = - PPSMC_DISPLAY_WATERMARK_HIGH; - - data->smc_state_table.GraphicsDpmLevelCount = - (uint8_t)dpm_table->sclk_table.count; - data->dpm_level_enable_mask.sclk_dpm_enable_mask = - tonga_get_dpm_level_enable_mask_value(&dpm_table->sclk_table); - - if (pcie_table != NULL) { - PP_ASSERT_WITH_CODE((pcie_entry_count >= 1), - "There must be 1 or more PCIE levels defined in PPTable.", return -1); - maxEntry = pcie_entry_count - 1; /* for indexing, we need to decrement by 1.*/ - for (i = 0; i < dpm_table->sclk_table.count; i++) { - data->smc_state_table.GraphicsLevel[i].pcieDpmLevel = - (uint8_t) ((i < maxEntry) ? i : maxEntry); - } - } else { - if (0 == data->dpm_level_enable_mask.pcie_dpm_enable_mask) - printk(KERN_ERR "[ powerplay ] Pcie Dpm Enablemask is 0!"); - - while (data->dpm_level_enable_mask.pcie_dpm_enable_mask && - ((data->dpm_level_enable_mask.pcie_dpm_enable_mask & - (1<<(highest_pcie_level_enabled+1))) != 0)) { - highest_pcie_level_enabled++; - } - - while (data->dpm_level_enable_mask.pcie_dpm_enable_mask && - ((data->dpm_level_enable_mask.pcie_dpm_enable_mask & - (1<dpm_level_enable_mask.pcie_dpm_enable_mask & - (1<<(lowest_pcie_level_enabled+1+count))) == 0)) { - count++; - } - mid_pcie_level_enabled = (lowest_pcie_level_enabled+1+count) < highest_pcie_level_enabled ? - (lowest_pcie_level_enabled+1+count) : highest_pcie_level_enabled; - - - /* set pcieDpmLevel to highest_pcie_level_enabled*/ - for (i = 2; i < dpm_table->sclk_table.count; i++) { - data->smc_state_table.GraphicsLevel[i].pcieDpmLevel = highest_pcie_level_enabled; - } - - /* set pcieDpmLevel to lowest_pcie_level_enabled*/ - data->smc_state_table.GraphicsLevel[0].pcieDpmLevel = lowest_pcie_level_enabled; - - /* set pcieDpmLevel to mid_pcie_level_enabled*/ - data->smc_state_table.GraphicsLevel[1].pcieDpmLevel = mid_pcie_level_enabled; - } - /* level count will send to smc once at init smc table and never change*/ - result = tonga_copy_bytes_to_smc(hwmgr->smumgr, level_array_adress, (uint8_t *)levels, (uint32_t)level_array_size, data->sram_end); - - if (0 != result) - return result; - - return 0; -} - -/** - * Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states - * - * @param hwmgr the address of the hardware manager - */ - -static int tonga_populate_all_memory_levels(struct pp_hwmgr *hwmgr) -{ - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - struct tonga_dpm_table *dpm_table = &data->dpm_table; - int result; - /* populate MCLK dpm table to SMU7 */ - uint32_t level_array_adress = data->dpm_table_start + offsetof(SMU72_Discrete_DpmTable, MemoryLevel); - uint32_t level_array_size = sizeof(SMU72_Discrete_MemoryLevel) * SMU72_MAX_LEVELS_MEMORY; - SMU72_Discrete_MemoryLevel *levels = data->smc_state_table.MemoryLevel; - uint32_t i; - - memset(levels, 0x00, level_array_size); - - for (i = 0; i < dpm_table->mclk_table.count; i++) { - PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value), - "can not populate memory level as memory clock is zero", return -1); - result = tonga_populate_single_memory_level(hwmgr, dpm_table->mclk_table.dpm_levels[i].value, - &(data->smc_state_table.MemoryLevel[i])); - if (0 != result) { - return result; - } - } - - /* Only enable level 0 for now.*/ - data->smc_state_table.MemoryLevel[0].EnabledForActivity = 1; - - /* - * in order to prevent MC activity from stutter mode to push DPM up. - * the UVD change complements this by putting the MCLK in a higher state - * by default such that we are not effected by up threshold or and MCLK DPM latency. - */ - data->smc_state_table.MemoryLevel[0].ActivityLevel = 0x1F; - CONVERT_FROM_HOST_TO_SMC_US(data->smc_state_table.MemoryLevel[0].ActivityLevel); - - data->smc_state_table.MemoryDpmLevelCount = (uint8_t)dpm_table->mclk_table.count; - data->dpm_level_enable_mask.mclk_dpm_enable_mask = tonga_get_dpm_level_enable_mask_value(&dpm_table->mclk_table); - /* set highest level watermark to high*/ - data->smc_state_table.MemoryLevel[dpm_table->mclk_table.count-1].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH; - - /* level count will send to smc once at init smc table and never change*/ - result = tonga_copy_bytes_to_smc(hwmgr->smumgr, - level_array_adress, (uint8_t *)levels, (uint32_t)level_array_size, data->sram_end); - - if (0 != result) { - return result; - } - - return 0; -} - -struct TONGA_DLL_SPEED_SETTING { - uint16_t Min; /* Minimum Data Rate*/ - uint16_t Max; /* Maximum Data Rate*/ - uint32_t dll_speed; /* The desired DLL_SPEED setting*/ -}; - -static int tonga_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr) -{ - return 0; -} - -/* ---------------------------------------- ULV related functions ----------------------------------------------------*/ - - -static int tonga_reset_single_dpm_table( - struct pp_hwmgr *hwmgr, - struct tonga_single_dpm_table *dpm_table, - uint32_t count) -{ - uint32_t i; - if (!(count <= MAX_REGULAR_DPM_NUMBER)) - printk(KERN_ERR "[ powerplay ] Fatal error, can not set up single DPM \ - table entries to exceed max number! \n"); - - dpm_table->count = count; - for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++) { - dpm_table->dpm_levels[i].enabled = false; - } - - return 0; -} - -static void tonga_setup_pcie_table_entry( - struct tonga_single_dpm_table *dpm_table, - uint32_t index, uint32_t pcie_gen, - uint32_t pcie_lanes) -{ - dpm_table->dpm_levels[index].value = pcie_gen; - dpm_table->dpm_levels[index].param1 = pcie_lanes; - dpm_table->dpm_levels[index].enabled = true; -} - -static int tonga_setup_default_pcie_tables(struct pp_hwmgr *hwmgr) -{ - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); - phm_ppt_v1_pcie_table *pcie_table = pptable_info->pcie_table; - uint32_t i, maxEntry; - - if (data->use_pcie_performance_levels && !data->use_pcie_power_saving_levels) { - data->pcie_gen_power_saving = data->pcie_gen_performance; - data->pcie_lane_power_saving = data->pcie_lane_performance; - } else if (!data->use_pcie_performance_levels && data->use_pcie_power_saving_levels) { - data->pcie_gen_performance = data->pcie_gen_power_saving; - data->pcie_lane_performance = data->pcie_lane_power_saving; - } - - tonga_reset_single_dpm_table(hwmgr, &data->dpm_table.pcie_speed_table, SMU72_MAX_LEVELS_LINK); - - if (pcie_table != NULL) { - /* - * maxEntry is used to make sure we reserve one PCIE level for boot level (fix for A+A PSPP issue). - * If PCIE table from PPTable have ULV entry + 8 entries, then ignore the last entry. - */ - maxEntry = (SMU72_MAX_LEVELS_LINK < pcie_table->count) ? - SMU72_MAX_LEVELS_LINK : pcie_table->count; - for (i = 1; i < maxEntry; i++) { - tonga_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, i-1, - get_pcie_gen_support(data->pcie_gen_cap, pcie_table->entries[i].gen_speed), - get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane)); - } - data->dpm_table.pcie_speed_table.count = maxEntry - 1; - } else { - /* Hardcode Pcie Table */ - tonga_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 0, - get_pcie_gen_support(data->pcie_gen_cap, PP_Min_PCIEGen), - get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane)); - tonga_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 1, - get_pcie_gen_support(data->pcie_gen_cap, PP_Min_PCIEGen), - get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane)); - tonga_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 2, - get_pcie_gen_support(data->pcie_gen_cap, PP_Max_PCIEGen), - get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane)); - tonga_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 3, - get_pcie_gen_support(data->pcie_gen_cap, PP_Max_PCIEGen), - get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane)); - tonga_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 4, - get_pcie_gen_support(data->pcie_gen_cap, PP_Max_PCIEGen), - get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane)); - tonga_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 5, - get_pcie_gen_support(data->pcie_gen_cap, PP_Max_PCIEGen), - get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane)); - data->dpm_table.pcie_speed_table.count = 6; - } - /* Populate last level for boot PCIE level, but do not increment count. */ - tonga_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, - data->dpm_table.pcie_speed_table.count, - get_pcie_gen_support(data->pcie_gen_cap, PP_Min_PCIEGen), - get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane)); - - return 0; - -} - -/* - * This function is to initalize all DPM state tables for SMU7 based on the dependency table. - * Dynamic state patching function will then trim these state tables to the allowed range based - * on the power policy or external client requests, such as UVD request, etc. - */ -static int tonga_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) -{ - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); - uint32_t i; - - phm_ppt_v1_clock_voltage_dependency_table *allowed_vdd_sclk_table = - pptable_info->vdd_dep_on_sclk; - phm_ppt_v1_clock_voltage_dependency_table *allowed_vdd_mclk_table = - pptable_info->vdd_dep_on_mclk; - - PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table != NULL, - "SCLK dependency table is missing. This table is mandatory", return -1); - PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table->count >= 1, - "SCLK dependency table has to have is missing. This table is mandatory", return -1); - - PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL, - "MCLK dependency table is missing. This table is mandatory", return -1); - PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table->count >= 1, - "VMCLK dependency table has to have is missing. This table is mandatory", return -1); - - /* clear the state table to reset everything to default */ - memset(&(data->dpm_table), 0x00, sizeof(data->dpm_table)); - tonga_reset_single_dpm_table(hwmgr, &data->dpm_table.sclk_table, SMU72_MAX_LEVELS_GRAPHICS); - tonga_reset_single_dpm_table(hwmgr, &data->dpm_table.mclk_table, SMU72_MAX_LEVELS_MEMORY); - /* tonga_reset_single_dpm_table(hwmgr, &tonga_hwmgr->dpm_table.VddcTable, SMU72_MAX_LEVELS_VDDC); */ - /* tonga_reset_single_dpm_table(hwmgr, &tonga_hwmgr->dpm_table.vdd_gfx_table, SMU72_MAX_LEVELS_VDDGFX);*/ - /* tonga_reset_single_dpm_table(hwmgr, &tonga_hwmgr->dpm_table.vdd_ci_table, SMU72_MAX_LEVELS_VDDCI);*/ - /* tonga_reset_single_dpm_table(hwmgr, &tonga_hwmgr->dpm_table.mvdd_table, SMU72_MAX_LEVELS_MVDD);*/ - - PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table != NULL, - "SCLK dependency table is missing. This table is mandatory", return -1); - /* Initialize Sclk DPM table based on allow Sclk values*/ - data->dpm_table.sclk_table.count = 0; - - for (i = 0; i < allowed_vdd_sclk_table->count; i++) { - if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count-1].value != - allowed_vdd_sclk_table->entries[i].clk) { - data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value = - allowed_vdd_sclk_table->entries[i].clk; - data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = true; /*(i==0) ? 1 : 0; to do */ - data->dpm_table.sclk_table.count++; - } - } - - PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL, - "MCLK dependency table is missing. This table is mandatory", return -1); - /* Initialize Mclk DPM table based on allow Mclk values */ - data->dpm_table.mclk_table.count = 0; - for (i = 0; i < allowed_vdd_mclk_table->count; i++) { - if (i == 0 || data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count-1].value != - allowed_vdd_mclk_table->entries[i].clk) { - data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value = - allowed_vdd_mclk_table->entries[i].clk; - data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled = true; /*(i==0) ? 1 : 0; */ - data->dpm_table.mclk_table.count++; - } - } - - /* setup PCIE gen speed levels*/ - tonga_setup_default_pcie_tables(hwmgr); - - /* save a copy of the default DPM table*/ - memcpy(&(data->golden_dpm_table), &(data->dpm_table), sizeof(struct tonga_dpm_table)); - - return 0; -} - -int tonga_populate_smc_initial_state(struct pp_hwmgr *hwmgr, - const struct tonga_power_state *bootState) -{ - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); - uint8_t count, level; - - count = (uint8_t) (pptable_info->vdd_dep_on_sclk->count); - for (level = 0; level < count; level++) { - if (pptable_info->vdd_dep_on_sclk->entries[level].clk >= - bootState->performance_levels[0].engine_clock) { - data->smc_state_table.GraphicsBootLevel = level; - break; - } - } - - count = (uint8_t) (pptable_info->vdd_dep_on_mclk->count); - for (level = 0; level < count; level++) { - if (pptable_info->vdd_dep_on_mclk->entries[level].clk >= - bootState->performance_levels[0].memory_clock) { - data->smc_state_table.MemoryBootLevel = level; - break; - } - } - - return 0; -} - -/** - * Initializes the SMC table and uploads it - * - * @param hwmgr the address of the powerplay hardware manager. - * @param pInput the pointer to input data (PowerState) - * @return always 0 - */ -static int tonga_init_smc_table(struct pp_hwmgr *hwmgr) -{ - int result; - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); - SMU72_Discrete_DpmTable *table = &(data->smc_state_table); - const phw_tonga_ulv_parm *ulv = &(data->ulv); - uint8_t i; - PECI_RegistryValue reg_value; - pp_atomctrl_gpio_pin_assignment gpio_pin_assignment; - - result = tonga_setup_default_dpm_tables(hwmgr); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to setup default DPM tables!", return result;); - memset(&(data->smc_state_table), 0x00, sizeof(data->smc_state_table)); - if (TONGA_VOLTAGE_CONTROL_NONE != data->voltage_control) { - tonga_populate_smc_voltage_tables(hwmgr, table); - } - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_AutomaticDCTransition)) { - table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC; - } - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_StepVddc)) { - table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC; - } - - if (data->is_memory_GDDR5) { - table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5; - } - - i = PHM_READ_FIELD(hwmgr->device, CC_MC_MAX_CHANNEL, NOOFCHAN); - - if (i == 1 || i == 0) { - table->SystemFlags |= PPSMC_SYSTEMFLAG_12CHANNEL; - } - - if (ulv->ulv_supported && pptable_info->us_ulv_voltage_offset) { - PP_ASSERT_WITH_CODE(0 == result, - "Failed to initialize ULV state!", return result;); - - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_ULV_PARAMETER, ulv->ch_ulv_parameter); - } - - result = tonga_populate_smc_link_level(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to initialize Link Level!", return result;); - - result = tonga_populate_all_graphic_levels(hwmgr); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to initialize Graphics Level!", return result;); - - result = tonga_populate_all_memory_levels(hwmgr); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to initialize Memory Level!", return result;); - - result = tonga_populate_smv_acpi_level(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to initialize ACPI Level!", return result;); - - result = tonga_populate_smc_vce_level(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to initialize VCE Level!", return result;); - - result = tonga_populate_smc_acp_level(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to initialize ACP Level!", return result;); - - result = tonga_populate_smc_samu_level(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to initialize SAMU Level!", return result;); - - /* Since only the initial state is completely set up at this point (the other states are just copies of the boot state) we only */ - /* need to populate the ARB settings for the initial state. */ - result = tonga_program_memory_timing_parameters(hwmgr); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to Write ARB settings for the initial state.", return result;); - - result = tonga_populate_smc_uvd_level(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to initialize UVD Level!", return result;); - - result = tonga_populate_smc_boot_level(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to initialize Boot Level!", return result;); - - result = tonga_populate_bapm_parameters_in_dpm_table(hwmgr); - PP_ASSERT_WITH_CODE(result == 0, - "Failed to populate BAPM Parameters!", return result); - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ClockStretcher)) { - result = tonga_populate_clock_stretcher_data_table(hwmgr); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to populate Clock Stretcher Data Table!", return result;); - } - table->GraphicsVoltageChangeEnable = 1; - table->GraphicsThermThrottleEnable = 1; - table->GraphicsInterval = 1; - table->VoltageInterval = 1; - table->ThermalInterval = 1; - table->TemperatureLimitHigh = - pptable_info->cac_dtp_table->usTargetOperatingTemp * - TONGA_Q88_FORMAT_CONVERSION_UNIT; - table->TemperatureLimitLow = - (pptable_info->cac_dtp_table->usTargetOperatingTemp - 1) * - TONGA_Q88_FORMAT_CONVERSION_UNIT; - table->MemoryVoltageChangeEnable = 1; - table->MemoryInterval = 1; - table->VoltageResponseTime = 0; - table->PhaseResponseTime = 0; - table->MemoryThermThrottleEnable = 1; - - /* - * Cail reads current link status and reports it as cap (we cannot change this due to some previous issues we had) - * SMC drops the link status to lowest level after enabling DPM by PowerPlay. After pnp or toggling CF, driver gets reloaded again - * but this time Cail reads current link status which was set to low by SMC and reports it as cap to powerplay - * To avoid it, we set PCIeBootLinkLevel to highest dpm level - */ - PP_ASSERT_WITH_CODE((1 <= data->dpm_table.pcie_speed_table.count), - "There must be 1 or more PCIE levels defined in PPTable.", - return -1); - - table->PCIeBootLinkLevel = (uint8_t) (data->dpm_table.pcie_speed_table.count); - - table->PCIeGenInterval = 1; - - result = tonga_populate_vr_config(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to populate VRConfig setting!", return result); - - table->ThermGpio = 17; - table->SclkStepSize = 0x4000; - - reg_value = 0; - if ((0 == reg_value) && - (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID, - &gpio_pin_assignment))) { - table->VRHotGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift; - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_RegulatorHot); - } else { - table->VRHotGpio = TONGA_UNUSED_GPIO_PIN; - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_RegulatorHot); - } - - /* ACDC Switch GPIO */ - reg_value = 0; - if ((0 == reg_value) && - (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID, - &gpio_pin_assignment))) { - table->AcDcGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift; - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_AutomaticDCTransition); - } else { - table->AcDcGpio = TONGA_UNUSED_GPIO_PIN; - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_AutomaticDCTransition); - } - - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_Falcon_QuickTransition); - - reg_value = 0; - if (1 == reg_value) { - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_AutomaticDCTransition); - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_Falcon_QuickTransition); - } - - reg_value = 0; - if ((0 == reg_value) && (atomctrl_get_pp_assign_pin(hwmgr, - THERMAL_INT_OUTPUT_GPIO_PINID, &gpio_pin_assignment))) { - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ThermalOutGPIO); - - table->ThermOutGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift; - - table->ThermOutPolarity = - (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A) & - (1 << gpio_pin_assignment.uc_gpio_pin_bit_shift))) ? 1:0; - - table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY; - - /* if required, combine VRHot/PCC with thermal out GPIO*/ - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_RegulatorHot) && - phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_CombinePCCWithThermalSignal)){ - table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT; - } - } else { - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ThermalOutGPIO); - - table->ThermOutGpio = 17; - table->ThermOutPolarity = 1; - table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE; - } - - for (i = 0; i < SMU72_MAX_ENTRIES_SMIO; i++) { - table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]); - } - CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags); - CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig); - CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1); - CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2); - CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize); - CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh); - CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow); - CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime); - CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime); - - /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */ - result = tonga_copy_bytes_to_smc(hwmgr->smumgr, data->dpm_table_start + - offsetof(SMU72_Discrete_DpmTable, SystemFlags), - (uint8_t *)&(table->SystemFlags), - sizeof(SMU72_Discrete_DpmTable)-3 * sizeof(SMU72_PIDController), - data->sram_end); - - PP_ASSERT_WITH_CODE(0 == result, - "Failed to upload dpm data to SMC memory!", return result;); - - return result; -} - -/* Look up the voltaged based on DAL's requested level. and then send the requested VDDC voltage to SMC*/ -static void tonga_apply_dal_minimum_voltage_request(struct pp_hwmgr *hwmgr) -{ - return; -} - -int tonga_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr) -{ - PPSMC_Result result; - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - - /* Apply minimum voltage based on DAL's request level */ - tonga_apply_dal_minimum_voltage_request(hwmgr); - - if (0 == data->sclk_dpm_key_disabled) { - /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/ - if (tonga_is_dpm_running(hwmgr)) - printk(KERN_ERR "[ powerplay ] Trying to set Enable Mask when DPM is disabled \n"); - - if (0 != data->dpm_level_enable_mask.sclk_dpm_enable_mask) { - result = smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, - (PPSMC_Msg)PPSMC_MSG_SCLKDPM_SetEnabledMask, - data->dpm_level_enable_mask.sclk_dpm_enable_mask); - PP_ASSERT_WITH_CODE((0 == result), - "Set Sclk Dpm enable Mask failed", return -1); - } - } - - if (0 == data->mclk_dpm_key_disabled) { - /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/ - if (tonga_is_dpm_running(hwmgr)) - printk(KERN_ERR "[ powerplay ] Trying to set Enable Mask when DPM is disabled \n"); - - if (0 != data->dpm_level_enable_mask.mclk_dpm_enable_mask) { - result = smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, - (PPSMC_Msg)PPSMC_MSG_MCLKDPM_SetEnabledMask, - data->dpm_level_enable_mask.mclk_dpm_enable_mask); - PP_ASSERT_WITH_CODE((0 == result), - "Set Mclk Dpm enable Mask failed", return -1); - } - } - - return 0; -} - - -int tonga_force_dpm_highest(struct pp_hwmgr *hwmgr) -{ - uint32_t level, tmp; - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - - if (0 == data->pcie_dpm_key_disabled) { - /* PCIE */ - if (data->dpm_level_enable_mask.pcie_dpm_enable_mask != 0) { - level = 0; - tmp = data->dpm_level_enable_mask.pcie_dpm_enable_mask; - while (tmp >>= 1) - level++ ; - - if (0 != level) { - PP_ASSERT_WITH_CODE((0 == tonga_dpm_force_state_pcie(hwmgr, level)), - "force highest pcie dpm state failed!", return -1); - } - } - } - - if (0 == data->sclk_dpm_key_disabled) { - /* SCLK */ - if (data->dpm_level_enable_mask.sclk_dpm_enable_mask != 0) { - level = 0; - tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask; - while (tmp >>= 1) - level++ ; - - if (0 != level) { - PP_ASSERT_WITH_CODE((0 == tonga_dpm_force_state(hwmgr, level)), - "force highest sclk dpm state failed!", return -1); - if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, - CGS_IND_REG__SMC, TARGET_AND_CURRENT_PROFILE_INDEX, CURR_SCLK_INDEX) != level) - printk(KERN_ERR "[ powerplay ] Target_and_current_Profile_Index. \ - Curr_Sclk_Index does not match the level \n"); - - } - } - } - - if (0 == data->mclk_dpm_key_disabled) { - /* MCLK */ - if (data->dpm_level_enable_mask.mclk_dpm_enable_mask != 0) { - level = 0; - tmp = data->dpm_level_enable_mask.mclk_dpm_enable_mask; - while (tmp >>= 1) - level++ ; - - if (0 != level) { - PP_ASSERT_WITH_CODE((0 == tonga_dpm_force_state_mclk(hwmgr, level)), - "force highest mclk dpm state failed!", return -1); - if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - TARGET_AND_CURRENT_PROFILE_INDEX, CURR_MCLK_INDEX) != level) - printk(KERN_ERR "[ powerplay ] Target_and_current_Profile_Index. \ - Curr_Mclk_Index does not match the level \n"); - } - } - } - - return 0; -} - -/** - * Find the MC microcode version and store it in the HwMgr struct - * - * @param hwmgr the address of the powerplay hardware manager. - * @return always 0 - */ -int tonga_get_mc_microcode_version (struct pp_hwmgr *hwmgr) -{ - cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX, 0x9F); - - hwmgr->microcode_version_info.MC = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA); - - return 0; -} - -/** - * Initialize Dynamic State Adjustment Rule Settings - * - * @param hwmgr the address of the powerplay hardware manager. - */ -int tonga_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr) -{ - uint32_t table_size; - struct phm_clock_voltage_dependency_table *table_clk_vlt; - struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); - - hwmgr->dyn_state.mclk_sclk_ratio = 4; - hwmgr->dyn_state.sclk_mclk_delta = 15000; /* 150 MHz */ - hwmgr->dyn_state.vddc_vddci_delta = 200; /* 200mV */ - - /* initialize vddc_dep_on_dal_pwrl table */ - table_size = sizeof(uint32_t) + 4 * sizeof(struct phm_clock_voltage_dependency_record); - table_clk_vlt = kzalloc(table_size, GFP_KERNEL); - - if (NULL == table_clk_vlt) { - printk(KERN_ERR "[ powerplay ] Can not allocate space for vddc_dep_on_dal_pwrl! \n"); - return -ENOMEM; - } else { - table_clk_vlt->count = 4; - table_clk_vlt->entries[0].clk = PP_DAL_POWERLEVEL_ULTRALOW; - table_clk_vlt->entries[0].v = 0; - table_clk_vlt->entries[1].clk = PP_DAL_POWERLEVEL_LOW; - table_clk_vlt->entries[1].v = 720; - table_clk_vlt->entries[2].clk = PP_DAL_POWERLEVEL_NOMINAL; - table_clk_vlt->entries[2].v = 810; - table_clk_vlt->entries[3].clk = PP_DAL_POWERLEVEL_PERFORMANCE; - table_clk_vlt->entries[3].v = 900; - pptable_info->vddc_dep_on_dal_pwrl = table_clk_vlt; - hwmgr->dyn_state.vddc_dep_on_dal_pwrl = table_clk_vlt; - } - - return 0; -} - -static int tonga_set_private_var_based_on_pptale(struct pp_hwmgr *hwmgr) -{ - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); - - phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table = - pptable_info->vdd_dep_on_sclk; - phm_ppt_v1_clock_voltage_dependency_table *allowed_mclk_vdd_table = - pptable_info->vdd_dep_on_mclk; - - PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table != NULL, - "VDD dependency on SCLK table is missing. \ - This table is mandatory", return -1); - PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1, - "VDD dependency on SCLK table has to have is missing. \ - This table is mandatory", return -1); - - PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table != NULL, - "VDD dependency on MCLK table is missing. \ - This table is mandatory", return -1); - PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1, - "VDD dependency on MCLK table has to have is missing. \ - This table is mandatory", return -1); - - data->min_vddc_in_pp_table = (uint16_t)allowed_sclk_vdd_table->entries[0].vddc; - data->max_vddc_in_pp_table = (uint16_t)allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc; - - pptable_info->max_clock_voltage_on_ac.sclk = - allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].clk; - pptable_info->max_clock_voltage_on_ac.mclk = - allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].clk; - pptable_info->max_clock_voltage_on_ac.vddc = - allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc; - pptable_info->max_clock_voltage_on_ac.vddci = - allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].vddci; - - hwmgr->dyn_state.max_clock_voltage_on_ac.sclk = - pptable_info->max_clock_voltage_on_ac.sclk; - hwmgr->dyn_state.max_clock_voltage_on_ac.mclk = - pptable_info->max_clock_voltage_on_ac.mclk; - hwmgr->dyn_state.max_clock_voltage_on_ac.vddc = - pptable_info->max_clock_voltage_on_ac.vddc; - hwmgr->dyn_state.max_clock_voltage_on_ac.vddci = - pptable_info->max_clock_voltage_on_ac.vddci; - - return 0; -} - -int tonga_unforce_dpm_levels(struct pp_hwmgr *hwmgr) -{ - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - int result = 1; - - PP_ASSERT_WITH_CODE (!tonga_is_dpm_running(hwmgr), - "Trying to Unforce DPM when DPM is disabled. Returning without sending SMC message.", - return result); - - if (0 == data->pcie_dpm_key_disabled) { - PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc( - hwmgr->smumgr, - PPSMC_MSG_PCIeDPM_UnForceLevel)), - "unforce pcie level failed!", - return -1); - } - - result = tonga_upload_dpm_level_enable_mask(hwmgr); - - return result; -} - -static uint32_t tonga_get_lowest_enable_level( - struct pp_hwmgr *hwmgr, uint32_t level_mask) -{ - uint32_t level = 0; - - while (0 == (level_mask & (1 << level))) - level++; - - return level; -} - -static int tonga_force_dpm_lowest(struct pp_hwmgr *hwmgr) -{ - uint32_t level; - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - - if (0 == data->pcie_dpm_key_disabled) { - /* PCIE */ - if (data->dpm_level_enable_mask.pcie_dpm_enable_mask != 0) { - level = tonga_get_lowest_enable_level(hwmgr, - data->dpm_level_enable_mask.pcie_dpm_enable_mask); - PP_ASSERT_WITH_CODE((0 == tonga_dpm_force_state_pcie(hwmgr, level)), - "force lowest pcie dpm state failed!", return -1); - } - } - - if (0 == data->sclk_dpm_key_disabled) { - /* SCLK */ - if (0 != data->dpm_level_enable_mask.sclk_dpm_enable_mask) { - level = tonga_get_lowest_enable_level(hwmgr, - data->dpm_level_enable_mask.sclk_dpm_enable_mask); - - PP_ASSERT_WITH_CODE((0 == tonga_dpm_force_state(hwmgr, level)), - "force sclk dpm state failed!", return -1); - - if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, - CGS_IND_REG__SMC, TARGET_AND_CURRENT_PROFILE_INDEX, CURR_SCLK_INDEX) != level) - printk(KERN_ERR "[ powerplay ] Target_and_current_Profile_Index. \ - Curr_Sclk_Index does not match the level \n"); - } - } - - if (0 == data->mclk_dpm_key_disabled) { - /* MCLK */ - if (data->dpm_level_enable_mask.mclk_dpm_enable_mask != 0) { - level = tonga_get_lowest_enable_level(hwmgr, - data->dpm_level_enable_mask.mclk_dpm_enable_mask); - PP_ASSERT_WITH_CODE((0 == tonga_dpm_force_state_mclk(hwmgr, level)), - "force lowest mclk dpm state failed!", return -1); - if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - TARGET_AND_CURRENT_PROFILE_INDEX, CURR_MCLK_INDEX) != level) - printk(KERN_ERR "[ powerplay ] Target_and_current_Profile_Index. \ - Curr_Mclk_Index does not match the level \n"); - } - } - - return 0; -} - -static int tonga_patch_voltage_dependency_tables_with_lookup_table(struct pp_hwmgr *hwmgr) -{ - uint8_t entryId; - uint8_t voltageId; - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); - - phm_ppt_v1_clock_voltage_dependency_table *sclk_table = pptable_info->vdd_dep_on_sclk; - phm_ppt_v1_clock_voltage_dependency_table *mclk_table = pptable_info->vdd_dep_on_mclk; - phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table; - - if (data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2) { - for (entryId = 0; entryId < sclk_table->count; ++entryId) { - voltageId = sclk_table->entries[entryId].vddInd; - sclk_table->entries[entryId].vddgfx = - pptable_info->vddgfx_lookup_table->entries[voltageId].us_vdd; - } - } else { - for (entryId = 0; entryId < sclk_table->count; ++entryId) { - voltageId = sclk_table->entries[entryId].vddInd; - sclk_table->entries[entryId].vddc = - pptable_info->vddc_lookup_table->entries[voltageId].us_vdd; - } - } - - for (entryId = 0; entryId < mclk_table->count; ++entryId) { - voltageId = mclk_table->entries[entryId].vddInd; - mclk_table->entries[entryId].vddc = - pptable_info->vddc_lookup_table->entries[voltageId].us_vdd; - } - - for (entryId = 0; entryId < mm_table->count; ++entryId) { - voltageId = mm_table->entries[entryId].vddcInd; - mm_table->entries[entryId].vddc = - pptable_info->vddc_lookup_table->entries[voltageId].us_vdd; - } - - return 0; - -} - -static int tonga_calc_voltage_dependency_tables(struct pp_hwmgr *hwmgr) -{ - uint8_t entryId; - phm_ppt_v1_voltage_lookup_record v_record; - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); - - phm_ppt_v1_clock_voltage_dependency_table *sclk_table = pptable_info->vdd_dep_on_sclk; - phm_ppt_v1_clock_voltage_dependency_table *mclk_table = pptable_info->vdd_dep_on_mclk; - - if (data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2) { - for (entryId = 0; entryId < sclk_table->count; ++entryId) { - if (sclk_table->entries[entryId].vdd_offset & (1 << 15)) - v_record.us_vdd = sclk_table->entries[entryId].vddgfx + - sclk_table->entries[entryId].vdd_offset - 0xFFFF; - else - v_record.us_vdd = sclk_table->entries[entryId].vddgfx + - sclk_table->entries[entryId].vdd_offset; - - sclk_table->entries[entryId].vddc = - v_record.us_cac_low = v_record.us_cac_mid = - v_record.us_cac_high = v_record.us_vdd; - - tonga_add_voltage(hwmgr, pptable_info->vddc_lookup_table, &v_record); - } - - for (entryId = 0; entryId < mclk_table->count; ++entryId) { - if (mclk_table->entries[entryId].vdd_offset & (1 << 15)) - v_record.us_vdd = mclk_table->entries[entryId].vddc + - mclk_table->entries[entryId].vdd_offset - 0xFFFF; - else - v_record.us_vdd = mclk_table->entries[entryId].vddc + - mclk_table->entries[entryId].vdd_offset; - - mclk_table->entries[entryId].vddgfx = v_record.us_cac_low = - v_record.us_cac_mid = v_record.us_cac_high = v_record.us_vdd; - tonga_add_voltage(hwmgr, pptable_info->vddgfx_lookup_table, &v_record); - } - } - - return 0; - -} - -static int tonga_calc_mm_voltage_dependency_table(struct pp_hwmgr *hwmgr) -{ - uint32_t entryId; - phm_ppt_v1_voltage_lookup_record v_record; - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); - phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table; - - if (data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2) { - for (entryId = 0; entryId < mm_table->count; entryId++) { - if (mm_table->entries[entryId].vddgfx_offset & (1 << 15)) - v_record.us_vdd = mm_table->entries[entryId].vddc + - mm_table->entries[entryId].vddgfx_offset - 0xFFFF; - else - v_record.us_vdd = mm_table->entries[entryId].vddc + - mm_table->entries[entryId].vddgfx_offset; - - /* Add the calculated VDDGFX to the VDDGFX lookup table */ - mm_table->entries[entryId].vddgfx = v_record.us_cac_low = - v_record.us_cac_mid = v_record.us_cac_high = v_record.us_vdd; - tonga_add_voltage(hwmgr, pptable_info->vddgfx_lookup_table, &v_record); - } - } - return 0; -} - - -/** - * Change virtual leakage voltage to actual value. - * - * @param hwmgr the address of the powerplay hardware manager. - * @param pointer to changing voltage - * @param pointer to leakage table - */ -static void tonga_patch_with_vdd_leakage(struct pp_hwmgr *hwmgr, - uint16_t *voltage, phw_tonga_leakage_voltage *pLeakageTable) -{ - uint32_t leakage_index; - - /* search for leakage voltage ID 0xff01 ~ 0xff08 */ - for (leakage_index = 0; leakage_index < pLeakageTable->count; leakage_index++) { - /* if this voltage matches a leakage voltage ID */ - /* patch with actual leakage voltage */ - if (pLeakageTable->leakage_id[leakage_index] == *voltage) { - *voltage = pLeakageTable->actual_voltage[leakage_index]; - break; - } - } - - if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0) - printk(KERN_ERR "[ powerplay ] Voltage value looks like a Leakage ID but it's not patched \n"); -} - -/** - * Patch voltage lookup table by EVV leakages. - * - * @param hwmgr the address of the powerplay hardware manager. - * @param pointer to voltage lookup table - * @param pointer to leakage table - * @return always 0 - */ -static int tonga_patch_lookup_table_with_leakage(struct pp_hwmgr *hwmgr, - phm_ppt_v1_voltage_lookup_table *lookup_table, - phw_tonga_leakage_voltage *pLeakageTable) -{ - uint32_t i; - - for (i = 0; i < lookup_table->count; i++) { - tonga_patch_with_vdd_leakage(hwmgr, - &lookup_table->entries[i].us_vdd, pLeakageTable); - } - - return 0; -} - -static int tonga_patch_clock_voltage_lomits_with_vddc_leakage(struct pp_hwmgr *hwmgr, - phw_tonga_leakage_voltage *pLeakageTable, uint16_t *Vddc) -{ - struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); - - tonga_patch_with_vdd_leakage(hwmgr, (uint16_t *)Vddc, pLeakageTable); - hwmgr->dyn_state.max_clock_voltage_on_dc.vddc = - pptable_info->max_clock_voltage_on_dc.vddc; - - return 0; -} - -static int tonga_patch_clock_voltage_limits_with_vddgfx_leakage( - struct pp_hwmgr *hwmgr, phw_tonga_leakage_voltage *pLeakageTable, - uint16_t *Vddgfx) -{ - tonga_patch_with_vdd_leakage(hwmgr, (uint16_t *)Vddgfx, pLeakageTable); - return 0; -} - -int tonga_sort_lookup_table(struct pp_hwmgr *hwmgr, - phm_ppt_v1_voltage_lookup_table *lookup_table) -{ - uint32_t table_size, i, j; - phm_ppt_v1_voltage_lookup_record tmp_voltage_lookup_record; - table_size = lookup_table->count; - - PP_ASSERT_WITH_CODE(0 != lookup_table->count, - "Lookup table is empty", return -1); - - /* Sorting voltages */ - for (i = 0; i < table_size - 1; i++) { - for (j = i + 1; j > 0; j--) { - if (lookup_table->entries[j].us_vdd < lookup_table->entries[j-1].us_vdd) { - tmp_voltage_lookup_record = lookup_table->entries[j-1]; - lookup_table->entries[j-1] = lookup_table->entries[j]; - lookup_table->entries[j] = tmp_voltage_lookup_record; - } - } - } - - return 0; -} - -static int tonga_complete_dependency_tables(struct pp_hwmgr *hwmgr) -{ - int result = 0; - int tmp_result; - tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); - - if (data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2) { - tmp_result = tonga_patch_lookup_table_with_leakage(hwmgr, - pptable_info->vddgfx_lookup_table, &(data->vddcgfx_leakage)); - if (tmp_result != 0) - result = tmp_result; - - tmp_result = tonga_patch_clock_voltage_limits_with_vddgfx_leakage(hwmgr, - &(data->vddcgfx_leakage), &pptable_info->max_clock_voltage_on_dc.vddgfx); - if (tmp_result != 0) - result = tmp_result; - } else { - tmp_result = tonga_patch_lookup_table_with_leakage(hwmgr, - pptable_info->vddc_lookup_table, &(data->vddc_leakage)); - if (tmp_result != 0) - result = tmp_result; - - tmp_result = tonga_patch_clock_voltage_lomits_with_vddc_leakage(hwmgr, - &(data->vddc_leakage), &pptable_info->max_clock_voltage_on_dc.vddc); - if (tmp_result != 0) - result = tmp_result; - } - - tmp_result = tonga_patch_voltage_dependency_tables_with_lookup_table(hwmgr); - if (tmp_result != 0) - result = tmp_result; - - tmp_result = tonga_calc_voltage_dependency_tables(hwmgr); - if (tmp_result != 0) - result = tmp_result; - - tmp_result = tonga_calc_mm_voltage_dependency_table(hwmgr); - if (tmp_result != 0) - result = tmp_result; - - tmp_result = tonga_sort_lookup_table(hwmgr, pptable_info->vddgfx_lookup_table); - if (tmp_result != 0) - result = tmp_result; - - tmp_result = tonga_sort_lookup_table(hwmgr, pptable_info->vddc_lookup_table); - if (tmp_result != 0) - result = tmp_result; - - return result; -} - -int tonga_init_sclk_threshold(struct pp_hwmgr *hwmgr) -{ - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - data->low_sclk_interrupt_threshold = 0; - - return 0; -} - -int tonga_setup_asic_task(struct pp_hwmgr *hwmgr) -{ - int tmp_result, result = 0; - - tmp_result = tonga_read_clock_registers(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to read clock registers!", result = tmp_result); - - tmp_result = tonga_get_memory_type(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to get memory type!", result = tmp_result); - - tmp_result = tonga_enable_acpi_power_management(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to enable ACPI power management!", result = tmp_result); - - tmp_result = tonga_init_power_gate_state(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to init power gate state!", result = tmp_result); - - tmp_result = tonga_get_mc_microcode_version(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to get MC microcode version!", result = tmp_result); - - tmp_result = tonga_init_sclk_threshold(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to init sclk threshold!", result = tmp_result); - - return result; -} - -/** - * Enable voltage control - * - * @param hwmgr the address of the powerplay hardware manager. - * @return always 0 - */ -int tonga_enable_voltage_control(struct pp_hwmgr *hwmgr) -{ - /* enable voltage control */ - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, VOLT_PWRMGT_EN, 1); - - return 0; -} - -/** - * Checks if we want to support voltage control - * - * @param hwmgr the address of the powerplay hardware manager. - */ -bool cf_tonga_voltage_control(const struct pp_hwmgr *hwmgr) -{ - const struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - - return(TONGA_VOLTAGE_CONTROL_NONE != data->voltage_control); -} - -/*---------------------------MC----------------------------*/ - -uint8_t tonga_get_memory_modile_index(struct pp_hwmgr *hwmgr) -{ - return (uint8_t) (0xFF & (cgs_read_register(hwmgr->device, mmBIOS_SCRATCH_4) >> 16)); -} - -bool tonga_check_s0_mc_reg_index(uint16_t inReg, uint16_t *outReg) -{ - bool result = true; - - switch (inReg) { - case mmMC_SEQ_RAS_TIMING: - *outReg = mmMC_SEQ_RAS_TIMING_LP; - break; - - case mmMC_SEQ_DLL_STBY: - *outReg = mmMC_SEQ_DLL_STBY_LP; - break; - - case mmMC_SEQ_G5PDX_CMD0: - *outReg = mmMC_SEQ_G5PDX_CMD0_LP; - break; - - case mmMC_SEQ_G5PDX_CMD1: - *outReg = mmMC_SEQ_G5PDX_CMD1_LP; - break; - - case mmMC_SEQ_G5PDX_CTRL: - *outReg = mmMC_SEQ_G5PDX_CTRL_LP; - break; - - case mmMC_SEQ_CAS_TIMING: - *outReg = mmMC_SEQ_CAS_TIMING_LP; - break; - - case mmMC_SEQ_MISC_TIMING: - *outReg = mmMC_SEQ_MISC_TIMING_LP; - break; - - case mmMC_SEQ_MISC_TIMING2: - *outReg = mmMC_SEQ_MISC_TIMING2_LP; - break; - - case mmMC_SEQ_PMG_DVS_CMD: - *outReg = mmMC_SEQ_PMG_DVS_CMD_LP; - break; - - case mmMC_SEQ_PMG_DVS_CTL: - *outReg = mmMC_SEQ_PMG_DVS_CTL_LP; - break; - - case mmMC_SEQ_RD_CTL_D0: - *outReg = mmMC_SEQ_RD_CTL_D0_LP; - break; - - case mmMC_SEQ_RD_CTL_D1: - *outReg = mmMC_SEQ_RD_CTL_D1_LP; - break; - - case mmMC_SEQ_WR_CTL_D0: - *outReg = mmMC_SEQ_WR_CTL_D0_LP; - break; - - case mmMC_SEQ_WR_CTL_D1: - *outReg = mmMC_SEQ_WR_CTL_D1_LP; - break; - - case mmMC_PMG_CMD_EMRS: - *outReg = mmMC_SEQ_PMG_CMD_EMRS_LP; - break; - - case mmMC_PMG_CMD_MRS: - *outReg = mmMC_SEQ_PMG_CMD_MRS_LP; - break; - - case mmMC_PMG_CMD_MRS1: - *outReg = mmMC_SEQ_PMG_CMD_MRS1_LP; - break; - - case mmMC_SEQ_PMG_TIMING: - *outReg = mmMC_SEQ_PMG_TIMING_LP; - break; - - case mmMC_PMG_CMD_MRS2: - *outReg = mmMC_SEQ_PMG_CMD_MRS2_LP; - break; - - case mmMC_SEQ_WR_CTL_2: - *outReg = mmMC_SEQ_WR_CTL_2_LP; - break; - - default: - result = false; - break; - } - - return result; -} - -int tonga_set_s0_mc_reg_index(phw_tonga_mc_reg_table *table) -{ - uint32_t i; - uint16_t address; - - for (i = 0; i < table->last; i++) { - table->mc_reg_address[i].s0 = - tonga_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) - ? address : table->mc_reg_address[i].s1; - } - return 0; -} - -int tonga_copy_vbios_smc_reg_table(const pp_atomctrl_mc_reg_table *table, phw_tonga_mc_reg_table *ni_table) -{ - uint8_t i, j; - - PP_ASSERT_WITH_CODE((table->last <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE), - "Invalid VramInfo table.", return -1); - PP_ASSERT_WITH_CODE((table->num_entries <= MAX_AC_TIMING_ENTRIES), - "Invalid VramInfo table.", return -1); - - for (i = 0; i < table->last; i++) { - ni_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1; - } - ni_table->last = table->last; - - for (i = 0; i < table->num_entries; i++) { - ni_table->mc_reg_table_entry[i].mclk_max = - table->mc_reg_table_entry[i].mclk_max; - for (j = 0; j < table->last; j++) { - ni_table->mc_reg_table_entry[i].mc_data[j] = - table->mc_reg_table_entry[i].mc_data[j]; - } - } - - ni_table->num_entries = table->num_entries; - - return 0; -} - -/** - * VBIOS omits some information to reduce size, we need to recover them here. - * 1. when we see mmMC_SEQ_MISC1, bit[31:16] EMRS1, need to be write to mmMC_PMG_CMD_EMRS /_LP[15:0]. - * Bit[15:0] MRS, need to be update mmMC_PMG_CMD_MRS/_LP[15:0] - * 2. when we see mmMC_SEQ_RESERVE_M, bit[15:0] EMRS2, need to be write to mmMC_PMG_CMD_MRS1/_LP[15:0]. - * 3. need to set these data for each clock range - * - * @param hwmgr the address of the powerplay hardware manager. - * @param table the address of MCRegTable - * @return always 0 - */ -int tonga_set_mc_special_registers(struct pp_hwmgr *hwmgr, phw_tonga_mc_reg_table *table) -{ - uint8_t i, j, k; - uint32_t temp_reg; - const tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - - for (i = 0, j = table->last; i < table->last; i++) { - PP_ASSERT_WITH_CODE((j < SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE), - "Invalid VramInfo table.", return -1); - switch (table->mc_reg_address[i].s1) { - /* - * mmMC_SEQ_MISC1, bit[31:16] EMRS1, need to be write to mmMC_PMG_CMD_EMRS /_LP[15:0]. - * Bit[15:0] MRS, need to be update mmMC_PMG_CMD_MRS/_LP[15:0] - */ - case mmMC_SEQ_MISC1: - temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS); - table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS; - table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP; - for (k = 0; k < table->num_entries; k++) { - table->mc_reg_table_entry[k].mc_data[j] = - ((temp_reg & 0xffff0000)) | - ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16); - } - j++; - PP_ASSERT_WITH_CODE((j < SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE), - "Invalid VramInfo table.", return -1); - - temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS); - table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS; - table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP; - for (k = 0; k < table->num_entries; k++) { - table->mc_reg_table_entry[k].mc_data[j] = - (temp_reg & 0xffff0000) | - (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); - - if (!data->is_memory_GDDR5) { - table->mc_reg_table_entry[k].mc_data[j] |= 0x100; - } - } - j++; - PP_ASSERT_WITH_CODE((j <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE), - "Invalid VramInfo table.", return -1); - - if (!data->is_memory_GDDR5) { - table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD; - table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD; - for (k = 0; k < table->num_entries; k++) { - table->mc_reg_table_entry[k].mc_data[j] = - (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16; - } - j++; - PP_ASSERT_WITH_CODE((j <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE), - "Invalid VramInfo table.", return -1); - } - - break; - - case mmMC_SEQ_RESERVE_M: - temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1); - table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1; - table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP; - for (k = 0; k < table->num_entries; k++) { - table->mc_reg_table_entry[k].mc_data[j] = - (temp_reg & 0xffff0000) | - (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); - } - j++; - PP_ASSERT_WITH_CODE((j <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE), - "Invalid VramInfo table.", return -1); - break; - - default: - break; - } - - } - - table->last = j; - - return 0; -} - -int tonga_set_valid_flag(phw_tonga_mc_reg_table *table) -{ - uint8_t i, j; - for (i = 0; i < table->last; i++) { - for (j = 1; j < table->num_entries; j++) { - if (table->mc_reg_table_entry[j-1].mc_data[i] != - table->mc_reg_table_entry[j].mc_data[i]) { - table->validflag |= (1<backend); - pp_atomctrl_mc_reg_table *table; - phw_tonga_mc_reg_table *ni_table = &data->tonga_mc_reg_table; - uint8_t module_index = tonga_get_memory_modile_index(hwmgr); - - table = kzalloc(sizeof(pp_atomctrl_mc_reg_table), GFP_KERNEL); - - if (NULL == table) - return -ENOMEM; - - /* Program additional LP registers that are no longer programmed by VBIOS */ - cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING)); - cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING)); - cgs_write_register(hwmgr->device, mmMC_SEQ_DLL_STBY_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_DLL_STBY)); - cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0)); - cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1)); - cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL)); - cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD)); - cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL)); - cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING)); - cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2)); - cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_EMRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS)); - cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS)); - cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS1_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1)); - cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0)); - cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1)); - cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0)); - cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1)); - cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING)); - cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS2_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS2)); - cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_2)); - - memset(table, 0x00, sizeof(pp_atomctrl_mc_reg_table)); - - result = atomctrl_initialize_mc_reg_table(hwmgr, module_index, table); - - if (0 == result) - result = tonga_copy_vbios_smc_reg_table(table, ni_table); - - if (0 == result) { - tonga_set_s0_mc_reg_index(ni_table); - result = tonga_set_mc_special_registers(hwmgr, ni_table); - } - - if (0 == result) - tonga_set_valid_flag(ni_table); - - kfree(table); - return result; -} - -/* -* Copy one arb setting to another and then switch the active set. -* arbFreqSrc and arbFreqDest is one of the MC_CG_ARB_FREQ_Fx constants. -*/ -int tonga_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr, - uint32_t arbFreqSrc, uint32_t arbFreqDest) -{ - uint32_t mc_arb_dram_timing; - uint32_t mc_arb_dram_timing2; - uint32_t burst_time; - uint32_t mc_cg_config; - - switch (arbFreqSrc) { - case MC_CG_ARB_FREQ_F0: - mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING); - mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2); - burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0); - break; - - case MC_CG_ARB_FREQ_F1: - mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1); - mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1); - burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1); - break; - - default: - return -1; - } - - switch (arbFreqDest) { - case MC_CG_ARB_FREQ_F0: - cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing); - cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2); - PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0, burst_time); - break; - - case MC_CG_ARB_FREQ_F1: - cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing); - cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2); - PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1, burst_time); - break; - - default: - return -1; - } - - mc_cg_config = cgs_read_register(hwmgr->device, mmMC_CG_CONFIG); - mc_cg_config |= 0x0000000F; - cgs_write_register(hwmgr->device, mmMC_CG_CONFIG, mc_cg_config); - PHM_WRITE_FIELD(hwmgr->device, MC_ARB_CG, CG_ARB_REQ, arbFreqDest); - - return 0; -} - -/** - * Initial switch from ARB F0->F1 - * - * @param hwmgr the address of the powerplay hardware manager. - * @return always 0 - * This function is to be called from the SetPowerState table. - */ -int tonga_initial_switch_from_arb_f0_to_f1(struct pp_hwmgr *hwmgr) -{ - return tonga_copy_and_switch_arb_sets(hwmgr, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1); -} - -/** - * Initialize the ARB DRAM timing table's index field. - * - * @param hwmgr the address of the powerplay hardware manager. - * @return always 0 - */ -int tonga_init_arb_table_index(struct pp_hwmgr *hwmgr) -{ - const tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - uint32_t tmp; - int result; - - /* - * This is a read-modify-write on the first byte of the ARB table. - * The first byte in the SMU72_Discrete_MCArbDramTimingTable structure is the field 'current'. - * This solution is ugly, but we never write the whole table only individual fields in it. - * In reality this field should not be in that structure but in a soft register. - */ - result = tonga_read_smc_sram_dword(hwmgr->smumgr, - data->arb_table_start, &tmp, data->sram_end); - - if (0 != result) - return result; - - tmp &= 0x00FFFFFF; - tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24; - - return tonga_write_smc_sram_dword(hwmgr->smumgr, - data->arb_table_start, tmp, data->sram_end); -} - -int tonga_populate_mc_reg_address(struct pp_hwmgr *hwmgr, SMU72_Discrete_MCRegisters *mc_reg_table) -{ - const struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - - uint32_t i, j; - - for (i = 0, j = 0; j < data->tonga_mc_reg_table.last; j++) { - if (data->tonga_mc_reg_table.validflag & 1<address[] array out of boundary", return -1); - mc_reg_table->address[i].s0 = - PP_HOST_TO_SMC_US(data->tonga_mc_reg_table.mc_reg_address[j].s0); - mc_reg_table->address[i].s1 = - PP_HOST_TO_SMC_US(data->tonga_mc_reg_table.mc_reg_address[j].s1); - i++; - } - } - - mc_reg_table->last = (uint8_t)i; - - return 0; -} - -/*convert register values from driver to SMC format */ -void tonga_convert_mc_registers( - const phw_tonga_mc_reg_entry * pEntry, - SMU72_Discrete_MCRegisterSet *pData, - uint32_t numEntries, uint32_t validflag) -{ - uint32_t i, j; - - for (i = 0, j = 0; j < numEntries; j++) { - if (validflag & 1<value[i] = PP_HOST_TO_SMC_UL(pEntry->mc_data[j]); - i++; - } - } -} - -/* find the entry in the memory range table, then populate the value to SMC's tonga_mc_reg_table */ -int tonga_convert_mc_reg_table_entry_to_smc( - struct pp_hwmgr *hwmgr, - const uint32_t memory_clock, - SMU72_Discrete_MCRegisterSet *mc_reg_table_data - ) -{ - const tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - uint32_t i = 0; - - for (i = 0; i < data->tonga_mc_reg_table.num_entries; i++) { - if (memory_clock <= - data->tonga_mc_reg_table.mc_reg_table_entry[i].mclk_max) { - break; - } - } - - if ((i == data->tonga_mc_reg_table.num_entries) && (i > 0)) - --i; - - tonga_convert_mc_registers(&data->tonga_mc_reg_table.mc_reg_table_entry[i], - mc_reg_table_data, data->tonga_mc_reg_table.last, data->tonga_mc_reg_table.validflag); - - return 0; -} - -int tonga_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr, - SMU72_Discrete_MCRegisters *mc_reg_table) -{ - int result = 0; - tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - int res; - uint32_t i; - - for (i = 0; i < data->dpm_table.mclk_table.count; i++) { - res = tonga_convert_mc_reg_table_entry_to_smc( - hwmgr, - data->dpm_table.mclk_table.dpm_levels[i].value, - &mc_reg_table->data[i] - ); - - if (0 != res) - result = res; - } - - return result; -} - -int tonga_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr) -{ - int result; - struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - - memset(&data->mc_reg_table, 0x00, sizeof(SMU72_Discrete_MCRegisters)); - result = tonga_populate_mc_reg_address(hwmgr, &(data->mc_reg_table)); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to initialize MCRegTable for the MC register addresses!", return result;); - - result = tonga_convert_mc_reg_table_to_smc(hwmgr, &data->mc_reg_table); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to initialize MCRegTable for driver state!", return result;); - - return tonga_copy_bytes_to_smc(hwmgr->smumgr, data->mc_reg_table_start, - (uint8_t *)&data->mc_reg_table, sizeof(SMU72_Discrete_MCRegisters), data->sram_end); -} - -/** - * Programs static screed detection parameters - * - * @param hwmgr the address of the powerplay hardware manager. - * @return always 0 - */ -int tonga_program_static_screen_threshold_parameters(struct pp_hwmgr *hwmgr) -{ - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - - /* Set static screen threshold unit*/ - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, - CGS_IND_REG__SMC, CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD_UNIT, - data->static_screen_threshold_unit); - /* Set static screen threshold*/ - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, - CGS_IND_REG__SMC, CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD, - data->static_screen_threshold); - - return 0; -} - -/** - * Setup display gap for glitch free memory clock switching. - * - * @param hwmgr the address of the powerplay hardware manager. - * @return always 0 - */ -int tonga_enable_display_gap(struct pp_hwmgr *hwmgr) -{ - uint32_t display_gap = cgs_read_ind_register(hwmgr->device, - CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL); - - display_gap = PHM_SET_FIELD(display_gap, - CG_DISPLAY_GAP_CNTL, DISP_GAP, DISPLAY_GAP_IGNORE); - - display_gap = PHM_SET_FIELD(display_gap, - CG_DISPLAY_GAP_CNTL, DISP_GAP_MCHG, DISPLAY_GAP_VBLANK); - - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_DISPLAY_GAP_CNTL, display_gap); - - return 0; -} - -/** - * Programs activity state transition voting clients - * - * @param hwmgr the address of the powerplay hardware manager. - * @return always 0 - */ -int tonga_program_voting_clients(struct pp_hwmgr *hwmgr) -{ - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - - /* Clear reset for voting clients before enabling DPM */ - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 0); - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 0); - - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_0, data->voting_rights_clients0); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_1, data->voting_rights_clients1); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_2, data->voting_rights_clients2); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_3, data->voting_rights_clients3); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_4, data->voting_rights_clients4); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_5, data->voting_rights_clients5); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_6, data->voting_rights_clients6); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_7, data->voting_rights_clients7); - - return 0; -} - -static void tonga_set_dpm_event_sources(struct pp_hwmgr *hwmgr, uint32_t sources) -{ - bool protection; - enum DPM_EVENT_SRC src; - - switch (sources) { - default: - printk(KERN_ERR "Unknown throttling event sources."); - /* fall through */ - case 0: - protection = false; - /* src is unused */ - break; - case (1 << PHM_AutoThrottleSource_Thermal): - protection = true; - src = DPM_EVENT_SRC_DIGITAL; - break; - case (1 << PHM_AutoThrottleSource_External): - protection = true; - src = DPM_EVENT_SRC_EXTERNAL; - break; - case (1 << PHM_AutoThrottleSource_External) | - (1 << PHM_AutoThrottleSource_Thermal): - protection = true; - src = DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL; - break; - } - /* Order matters - don't enable thermal protection for the wrong source. */ - if (protection) { - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_CTRL, - DPM_EVENT_SRC, src); - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, - THERMAL_PROTECTION_DIS, - !phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ThermalController)); - } else - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, - THERMAL_PROTECTION_DIS, 1); -} - -static int tonga_enable_auto_throttle_source(struct pp_hwmgr *hwmgr, - PHM_AutoThrottleSource source) -{ - struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - - if (!(data->active_auto_throttle_sources & (1 << source))) { - data->active_auto_throttle_sources |= 1 << source; - tonga_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources); - } - return 0; -} - -static int tonga_enable_thermal_auto_throttle(struct pp_hwmgr *hwmgr) -{ - return tonga_enable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal); -} - -static int tonga_disable_auto_throttle_source(struct pp_hwmgr *hwmgr, - PHM_AutoThrottleSource source) -{ - struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - - if (data->active_auto_throttle_sources & (1 << source)) { - data->active_auto_throttle_sources &= ~(1 << source); - tonga_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources); - } - return 0; -} - -static int tonga_disable_thermal_auto_throttle(struct pp_hwmgr *hwmgr) -{ - return tonga_disable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal); -} - -int tonga_enable_dpm_tasks(struct pp_hwmgr *hwmgr) -{ - int tmp_result, result = 0; - - tmp_result = tonga_check_for_dpm_stopped(hwmgr); - - if (cf_tonga_voltage_control(hwmgr)) { - tmp_result = tonga_enable_voltage_control(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to enable voltage control!", result = tmp_result); - - tmp_result = tonga_construct_voltage_tables(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to contruct voltage tables!", result = tmp_result); - } - - tmp_result = tonga_initialize_mc_reg_table(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to initialize MC reg table!", result = tmp_result); - - tmp_result = tonga_program_static_screen_threshold_parameters(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to program static screen threshold parameters!", result = tmp_result); - - tmp_result = tonga_enable_display_gap(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to enable display gap!", result = tmp_result); - - tmp_result = tonga_program_voting_clients(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to program voting clients!", result = tmp_result); - - tmp_result = tonga_process_firmware_header(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to process firmware header!", result = tmp_result); - - tmp_result = tonga_initial_switch_from_arb_f0_to_f1(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to initialize switch from ArbF0 to F1!", result = tmp_result); - - tmp_result = tonga_init_smc_table(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to initialize SMC table!", result = tmp_result); - - tmp_result = tonga_init_arb_table_index(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to initialize ARB table index!", result = tmp_result); - - tmp_result = tonga_populate_pm_fuses(hwmgr); - PP_ASSERT_WITH_CODE((tmp_result == 0), - "Failed to populate PM fuses!", result = tmp_result); - - tmp_result = tonga_populate_initial_mc_reg_table(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to populate initialize MC Reg table!", result = tmp_result); - - tmp_result = tonga_notify_smc_display_change(hwmgr, false); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to notify no display!", result = tmp_result); - - /* enable SCLK control */ - tmp_result = tonga_enable_sclk_control(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to enable SCLK control!", result = tmp_result); - - /* enable DPM */ - tmp_result = tonga_start_dpm(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to start DPM!", result = tmp_result); - - tmp_result = tonga_enable_smc_cac(hwmgr); - PP_ASSERT_WITH_CODE((tmp_result == 0), - "Failed to enable SMC CAC!", result = tmp_result); - - tmp_result = tonga_enable_power_containment(hwmgr); - PP_ASSERT_WITH_CODE((tmp_result == 0), - "Failed to enable power containment!", result = tmp_result); - - tmp_result = tonga_power_control_set_level(hwmgr); - PP_ASSERT_WITH_CODE((tmp_result == 0), - "Failed to power control set level!", result = tmp_result); - - tmp_result = tonga_enable_thermal_auto_throttle(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to enable thermal auto throttle!", result = tmp_result); - - return result; -} - -int tonga_disable_dpm_tasks(struct pp_hwmgr *hwmgr) -{ - int tmp_result, result = 0; - - tmp_result = tonga_check_for_dpm_running(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "SMC is still running!", return 0); - - tmp_result = tonga_disable_thermal_auto_throttle(hwmgr); - PP_ASSERT_WITH_CODE((tmp_result == 0), - "Failed to disable thermal auto throttle!", result = tmp_result); - - tmp_result = tonga_stop_dpm(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to stop DPM!", result = tmp_result); - - tmp_result = tonga_reset_to_default(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to reset to default!", result = tmp_result); - - return result; -} - -int tonga_reset_asic_tasks(struct pp_hwmgr *hwmgr) -{ - int result; - - result = tonga_set_boot_state(hwmgr); - if (0 != result) - printk(KERN_ERR "[ powerplay ] Failed to reset asic via set boot state! \n"); - - return result; -} - -int tonga_hwmgr_backend_fini(struct pp_hwmgr *hwmgr) -{ - return phm_hwmgr_backend_fini(hwmgr); -} - -/** - * Initializes the Volcanic Islands Hardware Manager - * - * @param hwmgr the address of the powerplay hardware manager. - * @return 1 if success; otherwise appropriate error code. - */ -int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr) -{ - int result = 0; - SMU72_Discrete_DpmTable *table = NULL; - tonga_hwmgr *data; - pp_atomctrl_gpio_pin_assignment gpio_pin_assignment; - struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); - phw_tonga_ulv_parm *ulv; - struct cgs_system_info sys_info = {0}; - - PP_ASSERT_WITH_CODE((NULL != hwmgr), - "Invalid Parameter!", return -1;); - - data = kzalloc(sizeof(struct tonga_hwmgr), GFP_KERNEL); - if (data == NULL) - return -ENOMEM; - - hwmgr->backend = data; - - data->dll_defaule_on = false; - data->sram_end = SMC_RAM_END; - - data->activity_target[0] = PPTONGA_TARGETACTIVITY_DFLT; - data->activity_target[1] = PPTONGA_TARGETACTIVITY_DFLT; - data->activity_target[2] = PPTONGA_TARGETACTIVITY_DFLT; - data->activity_target[3] = PPTONGA_TARGETACTIVITY_DFLT; - data->activity_target[4] = PPTONGA_TARGETACTIVITY_DFLT; - data->activity_target[5] = PPTONGA_TARGETACTIVITY_DFLT; - data->activity_target[6] = PPTONGA_TARGETACTIVITY_DFLT; - data->activity_target[7] = PPTONGA_TARGETACTIVITY_DFLT; - - data->vddc_vddci_delta = VDDC_VDDCI_DELTA; - data->vddc_vddgfx_delta = VDDC_VDDGFX_DELTA; - data->mclk_activity_target = PPTONGA_MCLK_TARGETACTIVITY_DFLT; - - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_DisableVoltageIsland); - - data->sclk_dpm_key_disabled = 0; - data->mclk_dpm_key_disabled = 0; - data->pcie_dpm_key_disabled = 0; - data->pcc_monitor_enabled = 0; - - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_UnTabledHardwareInterface); - - data->gpio_debug = 0; - data->engine_clock_data = 0; - data->memory_clock_data = 0; - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_DynamicPatchPowerState); - - /* need to set voltage control types before EVV patching*/ - data->voltage_control = TONGA_VOLTAGE_CONTROL_NONE; - data->vdd_ci_control = TONGA_VOLTAGE_CONTROL_NONE; - data->vdd_gfx_control = TONGA_VOLTAGE_CONTROL_NONE; - data->mvdd_control = TONGA_VOLTAGE_CONTROL_NONE; - data->force_pcie_gen = PP_PCIEGenInvalid; - - if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, - VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) { - data->voltage_control = TONGA_VOLTAGE_CONTROL_BY_SVID2; - } - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ControlVDDGFX)) { - if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, - VOLTAGE_TYPE_VDDGFX, VOLTAGE_OBJ_SVID2)) { - data->vdd_gfx_control = TONGA_VOLTAGE_CONTROL_BY_SVID2; - } - } - - if (TONGA_VOLTAGE_CONTROL_NONE == data->vdd_gfx_control) { - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ControlVDDGFX); - } - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_EnableMVDDControl)) { - if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, - VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT)) { - data->mvdd_control = TONGA_VOLTAGE_CONTROL_BY_GPIO; - } - } - - if (TONGA_VOLTAGE_CONTROL_NONE == data->mvdd_control) { - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_EnableMVDDControl); - } - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ControlVDDCI)) { - if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, - VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT)) - data->vdd_ci_control = TONGA_VOLTAGE_CONTROL_BY_GPIO; - else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, - VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2)) - data->vdd_ci_control = TONGA_VOLTAGE_CONTROL_BY_SVID2; - } - - if (TONGA_VOLTAGE_CONTROL_NONE == data->vdd_ci_control) - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ControlVDDCI); - - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_TablelessHardwareInterface); - - if (pptable_info->cac_dtp_table->usClockStretchAmount != 0) - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ClockStretcher); - - /* Initializes DPM default values*/ - tonga_initialize_dpm_defaults(hwmgr); - - /* Get leakage voltage based on leakage ID.*/ - PP_ASSERT_WITH_CODE((0 == tonga_get_evv_voltage(hwmgr)), - "Get EVV Voltage Failed. Abort Driver loading!", return -1); - - tonga_complete_dependency_tables(hwmgr); - - /* Parse pptable data read from VBIOS*/ - tonga_set_private_var_based_on_pptale(hwmgr); - - /* ULV Support*/ - ulv = &(data->ulv); - ulv->ulv_supported = false; - - /* Initalize Dynamic State Adjustment Rule Settings*/ - result = tonga_initializa_dynamic_state_adjustment_rule_settings(hwmgr); - if (result) - printk(KERN_ERR "[ powerplay ] tonga_initializa_dynamic_state_adjustment_rule_settings failed!\n"); - data->uvd_enabled = false; - - table = &(data->smc_state_table); - - /* - * if ucGPIO_ID=VDDC_PCC_GPIO_PINID in GPIO_LUTable, - * Peak Current Control feature is enabled and we should program PCC HW register - */ - if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_PCC_GPIO_PINID, &gpio_pin_assignment)) { - uint32_t temp_reg = cgs_read_ind_register(hwmgr->device, - CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL); - - switch (gpio_pin_assignment.uc_gpio_pin_bit_shift) { - case 0: - temp_reg = PHM_SET_FIELD(temp_reg, - CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x1); - break; - case 1: - temp_reg = PHM_SET_FIELD(temp_reg, - CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x2); - break; - case 2: - temp_reg = PHM_SET_FIELD(temp_reg, - CNB_PWRMGT_CNTL, GNB_SLOW, 0x1); - break; - case 3: - temp_reg = PHM_SET_FIELD(temp_reg, - CNB_PWRMGT_CNTL, FORCE_NB_PS1, 0x1); - break; - case 4: - temp_reg = PHM_SET_FIELD(temp_reg, - CNB_PWRMGT_CNTL, DPM_ENABLED, 0x1); - break; - default: - printk(KERN_ERR "[ powerplay ] Failed to setup PCC HW register! \ - Wrong GPIO assigned for VDDC_PCC_GPIO_PINID! \n"); - break; - } - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCNB_PWRMGT_CNTL, temp_reg); - } - - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_EnableSMU7ThermalManagement); - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_SMU7); - - data->vddc_phase_shed_control = false; - - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_UVDPowerGating); - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_VCEPowerGating); - sys_info.size = sizeof(struct cgs_system_info); - sys_info.info_id = CGS_SYSTEM_INFO_PG_FLAGS; - result = cgs_query_system_info(hwmgr->device, &sys_info); - if (!result) { - if (sys_info.value & AMD_PG_SUPPORT_UVD) - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_UVDPowerGating); - if (sys_info.value & AMD_PG_SUPPORT_VCE) - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_VCEPowerGating); - } - - if (0 == result) { - data->is_tlu_enabled = false; - hwmgr->platform_descriptor.hardwareActivityPerformanceLevels = - TONGA_MAX_HARDWARE_POWERLEVELS; - hwmgr->platform_descriptor.hardwarePerformanceLevels = 2; - hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50; - - sys_info.size = sizeof(struct cgs_system_info); - sys_info.info_id = CGS_SYSTEM_INFO_PCIE_GEN_INFO; - result = cgs_query_system_info(hwmgr->device, &sys_info); - if (result) - data->pcie_gen_cap = AMDGPU_DEFAULT_PCIE_GEN_MASK; - else - data->pcie_gen_cap = (uint32_t)sys_info.value; - if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) - data->pcie_spc_cap = 20; - sys_info.size = sizeof(struct cgs_system_info); - sys_info.info_id = CGS_SYSTEM_INFO_PCIE_MLW; - result = cgs_query_system_info(hwmgr->device, &sys_info); - if (result) - data->pcie_lane_cap = AMDGPU_DEFAULT_PCIE_MLW_MASK; - else - data->pcie_lane_cap = (uint32_t)sys_info.value; - } else { - /* Ignore return value in here, we are cleaning up a mess. */ - tonga_hwmgr_backend_fini(hwmgr); - } - - return result; -} - -static int tonga_force_dpm_level(struct pp_hwmgr *hwmgr, - enum amd_dpm_forced_level level) -{ - int ret = 0; - - switch (level) { - case AMD_DPM_FORCED_LEVEL_HIGH: - ret = tonga_force_dpm_highest(hwmgr); - if (ret) - return ret; - break; - case AMD_DPM_FORCED_LEVEL_LOW: - ret = tonga_force_dpm_lowest(hwmgr); - if (ret) - return ret; - break; - case AMD_DPM_FORCED_LEVEL_AUTO: - ret = tonga_unforce_dpm_levels(hwmgr); - if (ret) - return ret; - break; - default: - break; - } - - hwmgr->dpm_level = level; - return ret; -} - -static int tonga_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, - struct pp_power_state *prequest_ps, - const struct pp_power_state *pcurrent_ps) -{ - struct tonga_power_state *tonga_ps = - cast_phw_tonga_power_state(&prequest_ps->hardware); - - uint32_t sclk; - uint32_t mclk; - struct PP_Clocks minimum_clocks = {0}; - bool disable_mclk_switching; - bool disable_mclk_switching_for_frame_lock; - struct cgs_display_info info = {0}; - const struct phm_clock_and_voltage_limits *max_limits; - uint32_t i; - tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); - - int32_t count; - int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0; - - data->battery_state = (PP_StateUILabel_Battery == prequest_ps->classification.ui_label); - - PP_ASSERT_WITH_CODE(tonga_ps->performance_level_count == 2, - "VI should always have 2 performance levels", - ); - - max_limits = (PP_PowerSource_AC == hwmgr->power_source) ? - &(hwmgr->dyn_state.max_clock_voltage_on_ac) : - &(hwmgr->dyn_state.max_clock_voltage_on_dc); - - if (PP_PowerSource_DC == hwmgr->power_source) { - for (i = 0; i < tonga_ps->performance_level_count; i++) { - if (tonga_ps->performance_levels[i].memory_clock > max_limits->mclk) - tonga_ps->performance_levels[i].memory_clock = max_limits->mclk; - if (tonga_ps->performance_levels[i].engine_clock > max_limits->sclk) - tonga_ps->performance_levels[i].engine_clock = max_limits->sclk; - } - } - - tonga_ps->vce_clocks.EVCLK = hwmgr->vce_arbiter.evclk; - tonga_ps->vce_clocks.ECCLK = hwmgr->vce_arbiter.ecclk; - - tonga_ps->acp_clk = hwmgr->acp_arbiter.acpclk; - - cgs_get_active_displays_info(hwmgr->device, &info); - - /*TO DO result = PHM_CheckVBlankTime(hwmgr, &vblankTooShort);*/ - - /* TO DO GetMinClockSettings(hwmgr->pPECI, &minimum_clocks); */ - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) { - - max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac); - stable_pstate_sclk = (max_limits->sclk * 75) / 100; - - for (count = pptable_info->vdd_dep_on_sclk->count-1; count >= 0; count--) { - if (stable_pstate_sclk >= pptable_info->vdd_dep_on_sclk->entries[count].clk) { - stable_pstate_sclk = pptable_info->vdd_dep_on_sclk->entries[count].clk; - break; - } - } - - if (count < 0) - stable_pstate_sclk = pptable_info->vdd_dep_on_sclk->entries[0].clk; - - stable_pstate_mclk = max_limits->mclk; - - minimum_clocks.engineClock = stable_pstate_sclk; - minimum_clocks.memoryClock = stable_pstate_mclk; - } - - if (minimum_clocks.engineClock < hwmgr->gfx_arbiter.sclk) - minimum_clocks.engineClock = hwmgr->gfx_arbiter.sclk; - - if (minimum_clocks.memoryClock < hwmgr->gfx_arbiter.mclk) - minimum_clocks.memoryClock = hwmgr->gfx_arbiter.mclk; - - tonga_ps->sclk_threshold = hwmgr->gfx_arbiter.sclk_threshold; - - if (0 != hwmgr->gfx_arbiter.sclk_over_drive) { - PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.sclk_over_drive <= hwmgr->platform_descriptor.overdriveLimit.engineClock), - "Overdrive sclk exceeds limit", - hwmgr->gfx_arbiter.sclk_over_drive = hwmgr->platform_descriptor.overdriveLimit.engineClock); - - if (hwmgr->gfx_arbiter.sclk_over_drive >= hwmgr->gfx_arbiter.sclk) - tonga_ps->performance_levels[1].engine_clock = hwmgr->gfx_arbiter.sclk_over_drive; - } - - if (0 != hwmgr->gfx_arbiter.mclk_over_drive) { - PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.mclk_over_drive <= hwmgr->platform_descriptor.overdriveLimit.memoryClock), - "Overdrive mclk exceeds limit", - hwmgr->gfx_arbiter.mclk_over_drive = hwmgr->platform_descriptor.overdriveLimit.memoryClock); - - if (hwmgr->gfx_arbiter.mclk_over_drive >= hwmgr->gfx_arbiter.mclk) - tonga_ps->performance_levels[1].memory_clock = hwmgr->gfx_arbiter.mclk_over_drive; - } - - disable_mclk_switching_for_frame_lock = phm_cap_enabled( - hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_DisableMclkSwitchingForFrameLock); - - disable_mclk_switching = (1 < info.display_count) || - disable_mclk_switching_for_frame_lock; - - sclk = tonga_ps->performance_levels[0].engine_clock; - mclk = tonga_ps->performance_levels[0].memory_clock; - - if (disable_mclk_switching) - mclk = tonga_ps->performance_levels[tonga_ps->performance_level_count - 1].memory_clock; - - if (sclk < minimum_clocks.engineClock) - sclk = (minimum_clocks.engineClock > max_limits->sclk) ? max_limits->sclk : minimum_clocks.engineClock; - - if (mclk < minimum_clocks.memoryClock) - mclk = (minimum_clocks.memoryClock > max_limits->mclk) ? max_limits->mclk : minimum_clocks.memoryClock; - - tonga_ps->performance_levels[0].engine_clock = sclk; - tonga_ps->performance_levels[0].memory_clock = mclk; - - tonga_ps->performance_levels[1].engine_clock = - (tonga_ps->performance_levels[1].engine_clock >= tonga_ps->performance_levels[0].engine_clock) ? - tonga_ps->performance_levels[1].engine_clock : - tonga_ps->performance_levels[0].engine_clock; - - if (disable_mclk_switching) { - if (mclk < tonga_ps->performance_levels[1].memory_clock) - mclk = tonga_ps->performance_levels[1].memory_clock; - - tonga_ps->performance_levels[0].memory_clock = mclk; - tonga_ps->performance_levels[1].memory_clock = mclk; - } else { - if (tonga_ps->performance_levels[1].memory_clock < tonga_ps->performance_levels[0].memory_clock) - tonga_ps->performance_levels[1].memory_clock = tonga_ps->performance_levels[0].memory_clock; - } - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) { - for (i=0; i < tonga_ps->performance_level_count; i++) { - tonga_ps->performance_levels[i].engine_clock = stable_pstate_sclk; - tonga_ps->performance_levels[i].memory_clock = stable_pstate_mclk; - tonga_ps->performance_levels[i].pcie_gen = data->pcie_gen_performance.max; - tonga_ps->performance_levels[i].pcie_lane = data->pcie_gen_performance.max; - } - } - - return 0; -} - -int tonga_get_power_state_size(struct pp_hwmgr *hwmgr) -{ - return sizeof(struct tonga_power_state); -} - -static int tonga_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low) -{ - struct pp_power_state *ps; - struct tonga_power_state *tonga_ps; - - if (hwmgr == NULL) - return -EINVAL; - - ps = hwmgr->request_ps; - - if (ps == NULL) - return -EINVAL; - - tonga_ps = cast_phw_tonga_power_state(&ps->hardware); - - if (low) - return tonga_ps->performance_levels[0].memory_clock; - else - return tonga_ps->performance_levels[tonga_ps->performance_level_count-1].memory_clock; -} - -static int tonga_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low) -{ - struct pp_power_state *ps; - struct tonga_power_state *tonga_ps; - - if (hwmgr == NULL) - return -EINVAL; - - ps = hwmgr->request_ps; - - if (ps == NULL) - return -EINVAL; - - tonga_ps = cast_phw_tonga_power_state(&ps->hardware); - - if (low) - return tonga_ps->performance_levels[0].engine_clock; - else - return tonga_ps->performance_levels[tonga_ps->performance_level_count-1].engine_clock; -} - -static uint16_t tonga_get_current_pcie_speed( - struct pp_hwmgr *hwmgr) -{ - uint32_t speed_cntl = 0; - - speed_cntl = cgs_read_ind_register(hwmgr->device, - CGS_IND_REG__PCIE, - ixPCIE_LC_SPEED_CNTL); - return((uint16_t)PHM_GET_FIELD(speed_cntl, - PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE)); -} - -static int tonga_get_current_pcie_lane_number( - struct pp_hwmgr *hwmgr) -{ - uint32_t link_width; - - link_width = PHM_READ_INDIRECT_FIELD(hwmgr->device, - CGS_IND_REG__PCIE, - PCIE_LC_LINK_WIDTH_CNTL, - LC_LINK_WIDTH_RD); - - PP_ASSERT_WITH_CODE((7 >= link_width), - "Invalid PCIe lane width!", return 0); - - return decode_pcie_lane_width(link_width); -} - -static int tonga_dpm_patch_boot_state(struct pp_hwmgr *hwmgr, - struct pp_hw_power_state *hw_ps) -{ - struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - struct tonga_power_state *ps = (struct tonga_power_state *)hw_ps; - ATOM_FIRMWARE_INFO_V2_2 *fw_info; - uint16_t size; - uint8_t frev, crev; - int index = GetIndexIntoMasterTable(DATA, FirmwareInfo); - - /* First retrieve the Boot clocks and VDDC from the firmware info table. - * We assume here that fw_info is unchanged if this call fails. - */ - fw_info = (ATOM_FIRMWARE_INFO_V2_2 *)cgs_atom_get_data_table( - hwmgr->device, index, - &size, &frev, &crev); - if (!fw_info) - /* During a test, there is no firmware info table. */ - return 0; - - /* Patch the state. */ - data->vbios_boot_state.sclk_bootup_value = le32_to_cpu(fw_info->ulDefaultEngineClock); - data->vbios_boot_state.mclk_bootup_value = le32_to_cpu(fw_info->ulDefaultMemoryClock); - data->vbios_boot_state.mvdd_bootup_value = le16_to_cpu(fw_info->usBootUpMVDDCVoltage); - data->vbios_boot_state.vddc_bootup_value = le16_to_cpu(fw_info->usBootUpVDDCVoltage); - data->vbios_boot_state.vddci_bootup_value = le16_to_cpu(fw_info->usBootUpVDDCIVoltage); - data->vbios_boot_state.pcie_gen_bootup_value = tonga_get_current_pcie_speed(hwmgr); - data->vbios_boot_state.pcie_lane_bootup_value = - (uint16_t)tonga_get_current_pcie_lane_number(hwmgr); - - /* set boot power state */ - ps->performance_levels[0].memory_clock = data->vbios_boot_state.mclk_bootup_value; - ps->performance_levels[0].engine_clock = data->vbios_boot_state.sclk_bootup_value; - ps->performance_levels[0].pcie_gen = data->vbios_boot_state.pcie_gen_bootup_value; - ps->performance_levels[0].pcie_lane = data->vbios_boot_state.pcie_lane_bootup_value; - - return 0; -} - -static int tonga_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr, - void *state, struct pp_power_state *power_state, - void *pp_table, uint32_t classification_flag) -{ - struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - - struct tonga_power_state *tonga_ps = - (struct tonga_power_state *)(&(power_state->hardware)); - - struct tonga_performance_level *performance_level; - - ATOM_Tonga_State *state_entry = (ATOM_Tonga_State *)state; - - ATOM_Tonga_POWERPLAYTABLE *powerplay_table = - (ATOM_Tonga_POWERPLAYTABLE *)pp_table; - - ATOM_Tonga_SCLK_Dependency_Table *sclk_dep_table = - (ATOM_Tonga_SCLK_Dependency_Table *) - (((unsigned long)powerplay_table) + - le16_to_cpu(powerplay_table->usSclkDependencyTableOffset)); - - ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table = - (ATOM_Tonga_MCLK_Dependency_Table *) - (((unsigned long)powerplay_table) + - le16_to_cpu(powerplay_table->usMclkDependencyTableOffset)); - - /* The following fields are not initialized here: id orderedList allStatesList */ - power_state->classification.ui_label = - (le16_to_cpu(state_entry->usClassification) & - ATOM_PPLIB_CLASSIFICATION_UI_MASK) >> - ATOM_PPLIB_CLASSIFICATION_UI_SHIFT; - power_state->classification.flags = classification_flag; - /* NOTE: There is a classification2 flag in BIOS that is not being used right now */ - - power_state->classification.temporary_state = false; - power_state->classification.to_be_deleted = false; - - power_state->validation.disallowOnDC = - (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) & ATOM_Tonga_DISALLOW_ON_DC)); - - power_state->pcie.lanes = 0; - - power_state->display.disableFrameModulation = false; - power_state->display.limitRefreshrate = false; - power_state->display.enableVariBright = - (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) & ATOM_Tonga_ENABLE_VARIBRIGHT)); - - power_state->validation.supportedPowerLevels = 0; - power_state->uvd_clocks.VCLK = 0; - power_state->uvd_clocks.DCLK = 0; - power_state->temperatures.min = 0; - power_state->temperatures.max = 0; - - performance_level = &(tonga_ps->performance_levels - [tonga_ps->performance_level_count++]); - - PP_ASSERT_WITH_CODE( - (tonga_ps->performance_level_count < SMU72_MAX_LEVELS_GRAPHICS), - "Performance levels exceeds SMC limit!", - return -1); - - PP_ASSERT_WITH_CODE( - (tonga_ps->performance_level_count <= - hwmgr->platform_descriptor.hardwareActivityPerformanceLevels), - "Performance levels exceeds Driver limit!", - return -1); - - /* Performance levels are arranged from low to high. */ - performance_level->memory_clock = - le32_to_cpu(mclk_dep_table->entries[state_entry->ucMemoryClockIndexLow].ulMclk); - - performance_level->engine_clock = - le32_to_cpu(sclk_dep_table->entries[state_entry->ucEngineClockIndexLow].ulSclk); - - performance_level->pcie_gen = get_pcie_gen_support( - data->pcie_gen_cap, - state_entry->ucPCIEGenLow); - - performance_level->pcie_lane = get_pcie_lane_support( - data->pcie_lane_cap, - state_entry->ucPCIELaneHigh); - - performance_level = - &(tonga_ps->performance_levels[tonga_ps->performance_level_count++]); - - performance_level->memory_clock = - le32_to_cpu(mclk_dep_table->entries[state_entry->ucMemoryClockIndexHigh].ulMclk); - - performance_level->engine_clock = - le32_to_cpu(sclk_dep_table->entries[state_entry->ucEngineClockIndexHigh].ulSclk); - - performance_level->pcie_gen = get_pcie_gen_support( - data->pcie_gen_cap, - state_entry->ucPCIEGenHigh); - - performance_level->pcie_lane = get_pcie_lane_support( - data->pcie_lane_cap, - state_entry->ucPCIELaneHigh); - - return 0; -} - -static int tonga_get_pp_table_entry(struct pp_hwmgr *hwmgr, - unsigned long entry_index, struct pp_power_state *ps) -{ - int result; - struct tonga_power_state *tonga_ps; - struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - - struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table = - table_info->vdd_dep_on_mclk; - - ps->hardware.magic = PhwTonga_Magic; - - tonga_ps = cast_phw_tonga_power_state(&(ps->hardware)); - - result = get_powerplay_table_entry_v1_0(hwmgr, entry_index, ps, - tonga_get_pp_table_entry_callback_func); - - /* This is the earliest time we have all the dependency table and the VBIOS boot state - * as PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot state - * if there is only one VDDCI/MCLK level, check if it's the same as VBIOS boot state - */ - if (dep_mclk_table != NULL && dep_mclk_table->count == 1) { - if (dep_mclk_table->entries[0].clk != - data->vbios_boot_state.mclk_bootup_value) - printk(KERN_ERR "Single MCLK entry VDDCI/MCLK dependency table " - "does not match VBIOS boot MCLK level"); - if (dep_mclk_table->entries[0].vddci != - data->vbios_boot_state.vddci_bootup_value) - printk(KERN_ERR "Single VDDCI entry VDDCI/MCLK dependency table " - "does not match VBIOS boot VDDCI level"); - } - - /* set DC compatible flag if this state supports DC */ - if (!ps->validation.disallowOnDC) - tonga_ps->dc_compatible = true; - - if (ps->classification.flags & PP_StateClassificationFlag_ACPI) - data->acpi_pcie_gen = tonga_ps->performance_levels[0].pcie_gen; - else if (ps->classification.flags & PP_StateClassificationFlag_Boot) { - if (data->bacos.best_match == 0xffff) { - /* For V.I. use boot state as base BACO state */ - data->bacos.best_match = PP_StateClassificationFlag_Boot; - data->bacos.performance_level = tonga_ps->performance_levels[0]; - } - } - - tonga_ps->uvd_clocks.VCLK = ps->uvd_clocks.VCLK; - tonga_ps->uvd_clocks.DCLK = ps->uvd_clocks.DCLK; - - if (!result) { - uint32_t i; - - switch (ps->classification.ui_label) { - case PP_StateUILabel_Performance: - data->use_pcie_performance_levels = true; - - for (i = 0; i < tonga_ps->performance_level_count; i++) { - if (data->pcie_gen_performance.max < - tonga_ps->performance_levels[i].pcie_gen) - data->pcie_gen_performance.max = - tonga_ps->performance_levels[i].pcie_gen; - - if (data->pcie_gen_performance.min > - tonga_ps->performance_levels[i].pcie_gen) - data->pcie_gen_performance.min = - tonga_ps->performance_levels[i].pcie_gen; - - if (data->pcie_lane_performance.max < - tonga_ps->performance_levels[i].pcie_lane) - data->pcie_lane_performance.max = - tonga_ps->performance_levels[i].pcie_lane; - - if (data->pcie_lane_performance.min > - tonga_ps->performance_levels[i].pcie_lane) - data->pcie_lane_performance.min = - tonga_ps->performance_levels[i].pcie_lane; - } - break; - case PP_StateUILabel_Battery: - data->use_pcie_power_saving_levels = true; - - for (i = 0; i < tonga_ps->performance_level_count; i++) { - if (data->pcie_gen_power_saving.max < - tonga_ps->performance_levels[i].pcie_gen) - data->pcie_gen_power_saving.max = - tonga_ps->performance_levels[i].pcie_gen; - - if (data->pcie_gen_power_saving.min > - tonga_ps->performance_levels[i].pcie_gen) - data->pcie_gen_power_saving.min = - tonga_ps->performance_levels[i].pcie_gen; - - if (data->pcie_lane_power_saving.max < - tonga_ps->performance_levels[i].pcie_lane) - data->pcie_lane_power_saving.max = - tonga_ps->performance_levels[i].pcie_lane; - - if (data->pcie_lane_power_saving.min > - tonga_ps->performance_levels[i].pcie_lane) - data->pcie_lane_power_saving.min = - tonga_ps->performance_levels[i].pcie_lane; - } - break; - default: - break; - } - } - return 0; -} - -static void -tonga_print_current_perforce_level(struct pp_hwmgr *hwmgr, struct seq_file *m) -{ - uint32_t sclk, mclk, activity_percent; - uint32_t offset; - struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - - smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)(PPSMC_MSG_API_GetSclkFrequency)); - - sclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); - - smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)(PPSMC_MSG_API_GetMclkFrequency)); - - mclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); - seq_printf(m, "\n [ mclk ]: %u MHz\n\n [ sclk ]: %u MHz\n", mclk/100, sclk/100); - - offset = data->soft_regs_start + offsetof(SMU72_SoftRegisters, AverageGraphicsActivity); - activity_percent = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset); - activity_percent += 0x80; - activity_percent >>= 8; - - seq_printf(m, "\n [GPU load]: %u%%\n\n", activity_percent > 100 ? 100 : activity_percent); - - seq_printf(m, "uvd %sabled\n", data->uvd_power_gated ? "dis" : "en"); - - seq_printf(m, "vce %sabled\n", data->vce_power_gated ? "dis" : "en"); -} - -static int tonga_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input) -{ - const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input; - const struct tonga_power_state *tonga_ps = cast_const_phw_tonga_power_state(states->pnew_state); - struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - struct tonga_single_dpm_table *psclk_table = &(data->dpm_table.sclk_table); - uint32_t sclk = tonga_ps->performance_levels[tonga_ps->performance_level_count-1].engine_clock; - struct tonga_single_dpm_table *pmclk_table = &(data->dpm_table.mclk_table); - uint32_t mclk = tonga_ps->performance_levels[tonga_ps->performance_level_count-1].memory_clock; - struct PP_Clocks min_clocks = {0}; - uint32_t i; - struct cgs_display_info info = {0}; - - data->need_update_smu7_dpm_table = 0; - - for (i = 0; i < psclk_table->count; i++) { - if (sclk == psclk_table->dpm_levels[i].value) - break; - } - - if (i >= psclk_table->count) - data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK; - else { - /* TODO: Check SCLK in DAL's minimum clocks in case DeepSleep divider update is required.*/ - if(data->display_timing.min_clock_insr != min_clocks.engineClockInSR) - data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK; - } - - for (i=0; i < pmclk_table->count; i++) { - if (mclk == pmclk_table->dpm_levels[i].value) - break; - } - - if (i >= pmclk_table->count) - data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK; - - cgs_get_active_displays_info(hwmgr->device, &info); - - if (data->display_timing.num_existing_displays != info.display_count) - data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK; - - return 0; -} - -static uint16_t tonga_get_maximum_link_speed(struct pp_hwmgr *hwmgr, const struct tonga_power_state *hw_ps) -{ - uint32_t i; - uint32_t sclk, max_sclk = 0; - struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - struct tonga_dpm_table *pdpm_table = &data->dpm_table; - - for (i = 0; i < hw_ps->performance_level_count; i++) { - sclk = hw_ps->performance_levels[i].engine_clock; - if (max_sclk < sclk) - max_sclk = sclk; - } - - for (i = 0; i < pdpm_table->sclk_table.count; i++) { - if (pdpm_table->sclk_table.dpm_levels[i].value == max_sclk) - return (uint16_t) ((i >= pdpm_table->pcie_speed_table.count) ? - pdpm_table->pcie_speed_table.dpm_levels[pdpm_table->pcie_speed_table.count-1].value : - pdpm_table->pcie_speed_table.dpm_levels[i].value); - } - - return 0; -} - -static int tonga_request_link_speed_change_before_state_change(struct pp_hwmgr *hwmgr, const void *input) -{ - const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input; - struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - const struct tonga_power_state *tonga_nps = cast_const_phw_tonga_power_state(states->pnew_state); - const struct tonga_power_state *tonga_cps = cast_const_phw_tonga_power_state(states->pcurrent_state); - - uint16_t target_link_speed = tonga_get_maximum_link_speed(hwmgr, tonga_nps); - uint16_t current_link_speed; - - if (data->force_pcie_gen == PP_PCIEGenInvalid) - current_link_speed = tonga_get_maximum_link_speed(hwmgr, tonga_cps); - else - current_link_speed = data->force_pcie_gen; - - data->force_pcie_gen = PP_PCIEGenInvalid; - data->pspp_notify_required = false; - if (target_link_speed > current_link_speed) { - switch(target_link_speed) { - case PP_PCIEGen3: - if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN3, false)) - break; - data->force_pcie_gen = PP_PCIEGen2; - if (current_link_speed == PP_PCIEGen2) - break; - case PP_PCIEGen2: - if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN2, false)) - break; - default: - data->force_pcie_gen = tonga_get_current_pcie_speed(hwmgr); - break; - } - } else { - if (target_link_speed < current_link_speed) - data->pspp_notify_required = true; - } - - return 0; -} - -static int tonga_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) -{ - struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - - if (0 == data->need_update_smu7_dpm_table) - return 0; - - if ((0 == data->sclk_dpm_key_disabled) && - (data->need_update_smu7_dpm_table & - (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) { - PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr), - "Trying to freeze SCLK DPM when DPM is disabled", - ); - PP_ASSERT_WITH_CODE( - 0 == smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_SCLKDPM_FreezeLevel), - "Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!", - return -1); - } - - if ((0 == data->mclk_dpm_key_disabled) && - (data->need_update_smu7_dpm_table & - DPMTABLE_OD_UPDATE_MCLK)) { - PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr), - "Trying to freeze MCLK DPM when DPM is disabled", - ); - PP_ASSERT_WITH_CODE( - 0 == smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_MCLKDPM_FreezeLevel), - "Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!", - return -1); - } - - return 0; -} - -static int tonga_populate_and_upload_sclk_mclk_dpm_levels(struct pp_hwmgr *hwmgr, const void *input) -{ - int result = 0; - - const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input; - const struct tonga_power_state *tonga_ps = cast_const_phw_tonga_power_state(states->pnew_state); - struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - uint32_t sclk = tonga_ps->performance_levels[tonga_ps->performance_level_count-1].engine_clock; - uint32_t mclk = tonga_ps->performance_levels[tonga_ps->performance_level_count-1].memory_clock; - struct tonga_dpm_table *pdpm_table = &data->dpm_table; - - struct tonga_dpm_table *pgolden_dpm_table = &data->golden_dpm_table; - uint32_t dpm_count, clock_percent; - uint32_t i; - - if (0 == data->need_update_smu7_dpm_table) - return 0; - - if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) { - pdpm_table->sclk_table.dpm_levels[pdpm_table->sclk_table.count-1].value = sclk; - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinACSupport) || - phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinDCSupport)) { - /* Need to do calculation based on the golden DPM table - * as the Heatmap GPU Clock axis is also based on the default values - */ - PP_ASSERT_WITH_CODE( - (pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value != 0), - "Divide by 0!", - return -1); - dpm_count = pdpm_table->sclk_table.count < 2 ? 0 : pdpm_table->sclk_table.count-2; - for (i = dpm_count; i > 1; i--) { - if (sclk > pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value) { - clock_percent = ((sclk - pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value)*100) / - pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value; - - pdpm_table->sclk_table.dpm_levels[i].value = - pgolden_dpm_table->sclk_table.dpm_levels[i].value + - (pgolden_dpm_table->sclk_table.dpm_levels[i].value * clock_percent)/100; - - } else if (pgolden_dpm_table->sclk_table.dpm_levels[pdpm_table->sclk_table.count-1].value > sclk) { - clock_percent = ((pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value - sclk)*100) / - pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value; - - pdpm_table->sclk_table.dpm_levels[i].value = - pgolden_dpm_table->sclk_table.dpm_levels[i].value - - (pgolden_dpm_table->sclk_table.dpm_levels[i].value * clock_percent)/100; - } else - pdpm_table->sclk_table.dpm_levels[i].value = - pgolden_dpm_table->sclk_table.dpm_levels[i].value; - } - } - } - - if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK) { - pdpm_table->mclk_table.dpm_levels[pdpm_table->mclk_table.count-1].value = mclk; - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinACSupport) || - phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinDCSupport)) { - - PP_ASSERT_WITH_CODE( - (pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value != 0), - "Divide by 0!", - return -1); - dpm_count = pdpm_table->mclk_table.count < 2? 0 : pdpm_table->mclk_table.count-2; - for (i = dpm_count; i > 1; i--) { - if (mclk > pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value) { - clock_percent = ((mclk - pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value)*100) / - pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value; - - pdpm_table->mclk_table.dpm_levels[i].value = - pgolden_dpm_table->mclk_table.dpm_levels[i].value + - (pgolden_dpm_table->mclk_table.dpm_levels[i].value * clock_percent)/100; - - } else if (pgolden_dpm_table->mclk_table.dpm_levels[pdpm_table->mclk_table.count-1].value > mclk) { - clock_percent = ((pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value - mclk)*100) / - pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value; - - pdpm_table->mclk_table.dpm_levels[i].value = - pgolden_dpm_table->mclk_table.dpm_levels[i].value - - (pgolden_dpm_table->mclk_table.dpm_levels[i].value * clock_percent)/100; - } else - pdpm_table->mclk_table.dpm_levels[i].value = pgolden_dpm_table->mclk_table.dpm_levels[i].value; - } - } - } - - if (data->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) { - result = tonga_populate_all_graphic_levels(hwmgr); - PP_ASSERT_WITH_CODE((0 == result), - "Failed to populate SCLK during PopulateNewDPMClocksStates Function!", - return result); - } - - if (data->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) { - /*populate MCLK dpm table to SMU7 */ - result = tonga_populate_all_memory_levels(hwmgr); - PP_ASSERT_WITH_CODE((0 == result), - "Failed to populate MCLK during PopulateNewDPMClocksStates Function!", - return result); - } - - return result; -} - -static int tonga_trim_single_dpm_states(struct pp_hwmgr *hwmgr, - struct tonga_single_dpm_table * pdpm_table, - uint32_t low_limit, uint32_t high_limit) -{ - uint32_t i; - - for (i = 0; i < pdpm_table->count; i++) { - if ((pdpm_table->dpm_levels[i].value < low_limit) || - (pdpm_table->dpm_levels[i].value > high_limit)) - pdpm_table->dpm_levels[i].enabled = false; - else - pdpm_table->dpm_levels[i].enabled = true; - } - return 0; -} - -static int tonga_trim_dpm_states(struct pp_hwmgr *hwmgr, const struct tonga_power_state *hw_state) -{ - struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - uint32_t high_limit_count; - - PP_ASSERT_WITH_CODE((hw_state->performance_level_count >= 1), - "power state did not have any performance level", - return -1); - - high_limit_count = (1 == hw_state->performance_level_count) ? 0: 1; - - tonga_trim_single_dpm_states(hwmgr, - &(data->dpm_table.sclk_table), - hw_state->performance_levels[0].engine_clock, - hw_state->performance_levels[high_limit_count].engine_clock); - - tonga_trim_single_dpm_states(hwmgr, - &(data->dpm_table.mclk_table), - hw_state->performance_levels[0].memory_clock, - hw_state->performance_levels[high_limit_count].memory_clock); - - return 0; -} - -static int tonga_generate_dpm_level_enable_mask(struct pp_hwmgr *hwmgr, const void *input) -{ - int result; - const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input; - struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - const struct tonga_power_state *tonga_ps = cast_const_phw_tonga_power_state(states->pnew_state); - - result = tonga_trim_dpm_states(hwmgr, tonga_ps); - if (0 != result) - return result; - - data->dpm_level_enable_mask.sclk_dpm_enable_mask = tonga_get_dpm_level_enable_mask_value(&data->dpm_table.sclk_table); - data->dpm_level_enable_mask.mclk_dpm_enable_mask = tonga_get_dpm_level_enable_mask_value(&data->dpm_table.mclk_table); - data->last_mclk_dpm_enable_mask = data->dpm_level_enable_mask.mclk_dpm_enable_mask; - if (data->uvd_enabled) - data->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE; - - data->dpm_level_enable_mask.pcie_dpm_enable_mask = tonga_get_dpm_level_enable_mask_value(&data->dpm_table.pcie_speed_table); - - return 0; -} - -int tonga_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable) -{ - return smum_send_msg_to_smc(hwmgr->smumgr, enable ? - (PPSMC_Msg)PPSMC_MSG_VCEDPM_Enable : - (PPSMC_Msg)PPSMC_MSG_VCEDPM_Disable); -} - -int tonga_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable) -{ - return smum_send_msg_to_smc(hwmgr->smumgr, enable ? - (PPSMC_Msg)PPSMC_MSG_UVDDPM_Enable : - (PPSMC_Msg)PPSMC_MSG_UVDDPM_Disable); -} - -int tonga_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate) -{ - struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - uint32_t mm_boot_level_offset, mm_boot_level_value; - struct phm_ppt_v1_information *ptable_information = (struct phm_ppt_v1_information *)(hwmgr->pptable); - - if (!bgate) { - data->smc_state_table.UvdBootLevel = (uint8_t) (ptable_information->mm_dep_table->count - 1); - mm_boot_level_offset = data->dpm_table_start + offsetof(SMU72_Discrete_DpmTable, UvdBootLevel); - mm_boot_level_offset /= 4; - mm_boot_level_offset *= 4; - mm_boot_level_value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, mm_boot_level_offset); - mm_boot_level_value &= 0x00FFFFFF; - mm_boot_level_value |= data->smc_state_table.UvdBootLevel << 24; - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value); - - if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UVDDPM) || - phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_UVDDPM_SetEnabledMask, - (uint32_t)(1 << data->smc_state_table.UvdBootLevel)); - } - - return tonga_enable_disable_uvd_dpm(hwmgr, !bgate); -} - -int tonga_update_vce_dpm(struct pp_hwmgr *hwmgr, const void *input) -{ - const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input; - struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - const struct tonga_power_state *tonga_nps = cast_const_phw_tonga_power_state(states->pnew_state); - const struct tonga_power_state *tonga_cps = cast_const_phw_tonga_power_state(states->pcurrent_state); - - uint32_t mm_boot_level_offset, mm_boot_level_value; - struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); - - if (tonga_nps->vce_clocks.EVCLK > 0 && (tonga_cps == NULL || tonga_cps->vce_clocks.EVCLK == 0)) { - data->smc_state_table.VceBootLevel = (uint8_t) (pptable_info->mm_dep_table->count - 1); - - mm_boot_level_offset = data->dpm_table_start + offsetof(SMU72_Discrete_DpmTable, VceBootLevel); - mm_boot_level_offset /= 4; - mm_boot_level_offset *= 4; - mm_boot_level_value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, mm_boot_level_offset); - mm_boot_level_value &= 0xFF00FFFF; - mm_boot_level_value |= data->smc_state_table.VceBootLevel << 16; - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value); - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_VCEDPM_SetEnabledMask, - (uint32_t)(1 << data->smc_state_table.VceBootLevel)); - - tonga_enable_disable_vce_dpm(hwmgr, true); - } else if (tonga_nps->vce_clocks.EVCLK == 0 && tonga_cps != NULL && tonga_cps->vce_clocks.EVCLK > 0) - tonga_enable_disable_vce_dpm(hwmgr, false); - - return 0; -} - -static int tonga_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr) -{ - struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - - uint32_t address; - int32_t result; - - if (0 == (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) - return 0; - - - memset(&data->mc_reg_table, 0, sizeof(SMU72_Discrete_MCRegisters)); - - result = tonga_convert_mc_reg_table_to_smc(hwmgr, &(data->mc_reg_table)); - - if(result != 0) - return result; - - - address = data->mc_reg_table_start + (uint32_t)offsetof(SMU72_Discrete_MCRegisters, data[0]); - - return tonga_copy_bytes_to_smc(hwmgr->smumgr, address, - (uint8_t *)&data->mc_reg_table.data[0], - sizeof(SMU72_Discrete_MCRegisterSet) * data->dpm_table.mclk_table.count, - data->sram_end); -} - -static int tonga_program_memory_timing_parameters_conditionally(struct pp_hwmgr *hwmgr) -{ - struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - - if (data->need_update_smu7_dpm_table & - (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK)) - return tonga_program_memory_timing_parameters(hwmgr); - - return 0; -} - -static int tonga_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) -{ - struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - - if (0 == data->need_update_smu7_dpm_table) - return 0; - - if ((0 == data->sclk_dpm_key_disabled) && - (data->need_update_smu7_dpm_table & - (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) { - - PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr), - "Trying to Unfreeze SCLK DPM when DPM is disabled", - ); - PP_ASSERT_WITH_CODE( - 0 == smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_SCLKDPM_UnfreezeLevel), - "Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!", - return -1); - } - - if ((0 == data->mclk_dpm_key_disabled) && - (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) { - - PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr), - "Trying to Unfreeze MCLK DPM when DPM is disabled", - ); - PP_ASSERT_WITH_CODE( - 0 == smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_SCLKDPM_UnfreezeLevel), - "Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!", - return -1); - } - - data->need_update_smu7_dpm_table = 0; - - return 0; -} - -static int tonga_notify_link_speed_change_after_state_change(struct pp_hwmgr *hwmgr, const void *input) -{ - const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input; - struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - const struct tonga_power_state *tonga_ps = cast_const_phw_tonga_power_state(states->pnew_state); - uint16_t target_link_speed = tonga_get_maximum_link_speed(hwmgr, tonga_ps); - uint8_t request; - - if (data->pspp_notify_required || - data->pcie_performance_request) { - if (target_link_speed == PP_PCIEGen3) - request = PCIE_PERF_REQ_GEN3; - else if (target_link_speed == PP_PCIEGen2) - request = PCIE_PERF_REQ_GEN2; - else - request = PCIE_PERF_REQ_GEN1; - - if(request == PCIE_PERF_REQ_GEN1 && tonga_get_current_pcie_speed(hwmgr) > 0) { - data->pcie_performance_request = false; - return 0; - } - - if (0 != acpi_pcie_perf_request(hwmgr->device, request, false)) { - if (PP_PCIEGen2 == target_link_speed) - printk("PSPP request to switch to Gen2 from Gen3 Failed!"); - else - printk("PSPP request to switch to Gen1 from Gen2 Failed!"); - } - } - - data->pcie_performance_request = false; - return 0; -} - -static int tonga_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input) -{ - int tmp_result, result = 0; - - tmp_result = tonga_find_dpm_states_clocks_in_dpm_table(hwmgr, input); - PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to find DPM states clocks in DPM table!", result = tmp_result); - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest)) { - tmp_result = tonga_request_link_speed_change_before_state_change(hwmgr, input); - PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to request link speed change before state change!", result = tmp_result); - } - - tmp_result = tonga_freeze_sclk_mclk_dpm(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to freeze SCLK MCLK DPM!", result = tmp_result); - - tmp_result = tonga_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input); - PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to populate and upload SCLK MCLK DPM levels!", result = tmp_result); - - tmp_result = tonga_generate_dpm_level_enable_mask(hwmgr, input); - PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to generate DPM level enabled mask!", result = tmp_result); - - tmp_result = tonga_update_vce_dpm(hwmgr, input); - PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to update VCE DPM!", result = tmp_result); - - tmp_result = tonga_update_sclk_threshold(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to update SCLK threshold!", result = tmp_result); - - tmp_result = tonga_update_and_upload_mc_reg_table(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to upload MC reg table!", result = tmp_result); - - tmp_result = tonga_program_memory_timing_parameters_conditionally(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to program memory timing parameters!", result = tmp_result); - - tmp_result = tonga_unfreeze_sclk_mclk_dpm(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to unfreeze SCLK MCLK DPM!", result = tmp_result); - - tmp_result = tonga_upload_dpm_level_enable_mask(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to upload DPM level enabled mask!", result = tmp_result); - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest)) { - tmp_result = tonga_notify_link_speed_change_after_state_change(hwmgr, input); - PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to notify link speed change after state change!", result = tmp_result); - } - - return result; -} - -/** -* Set maximum target operating fan output PWM -* -* @param pHwMgr: the address of the powerplay hardware manager. -* @param usMaxFanPwm: max operating fan PWM in percents -* @return The response that came from the SMC. -*/ -static int tonga_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm) -{ - hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanPWM = us_max_fan_pwm; - - if (phm_is_hw_access_blocked(hwmgr)) - return 0; - - return (0 == smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm) ? 0 : -1); -} - -int tonga_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr) -{ - uint32_t num_active_displays = 0; - struct cgs_display_info info = {0}; - info.mode_info = NULL; - - cgs_get_active_displays_info(hwmgr->device, &info); - - num_active_displays = info.display_count; - - if (num_active_displays > 1) /* to do && (pHwMgr->pPECI->displayConfiguration.bMultiMonitorInSync != TRUE)) */ - tonga_notify_smc_display_change(hwmgr, false); - else - tonga_notify_smc_display_change(hwmgr, true); - - return 0; -} - -/** -* Programs the display gap -* -* @param hwmgr the address of the powerplay hardware manager. -* @return always OK -*/ -int tonga_program_display_gap(struct pp_hwmgr *hwmgr) -{ - struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - uint32_t num_active_displays = 0; - uint32_t display_gap = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL); - uint32_t display_gap2; - uint32_t pre_vbi_time_in_us; - uint32_t frame_time_in_us; - uint32_t ref_clock; - uint32_t refresh_rate = 0; - struct cgs_display_info info = {0}; - struct cgs_mode_info mode_info; - - info.mode_info = &mode_info; - - cgs_get_active_displays_info(hwmgr->device, &info); - num_active_displays = info.display_count; - - display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, DISP_GAP, (num_active_displays > 0)? DISPLAY_GAP_VBLANK_OR_WM : DISPLAY_GAP_IGNORE); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL, display_gap); - - ref_clock = mode_info.ref_clock; - refresh_rate = mode_info.refresh_rate; - - if(0 == refresh_rate) - refresh_rate = 60; - - frame_time_in_us = 1000000 / refresh_rate; - - pre_vbi_time_in_us = frame_time_in_us - 200 - mode_info.vblank_time_us; - display_gap2 = pre_vbi_time_in_us * (ref_clock / 100); - - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL2, display_gap2); - - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, data->soft_regs_start + offsetof(SMU72_SoftRegisters, PreVBlankGap), 0x64); - - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, data->soft_regs_start + offsetof(SMU72_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us)); - - if (num_active_displays == 1) - tonga_notify_smc_display_change(hwmgr, true); - - return 0; -} - -int tonga_display_configuration_changed_task(struct pp_hwmgr *hwmgr) -{ - - tonga_program_display_gap(hwmgr); - - /* to do PhwTonga_CacUpdateDisplayConfiguration(pHwMgr); */ - return 0; -} - -/** -* Set maximum target operating fan output RPM -* -* @param pHwMgr: the address of the powerplay hardware manager. -* @param usMaxFanRpm: max operating fan RPM value. -* @return The response that came from the SMC. -*/ -static int tonga_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm) -{ - hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM = us_max_fan_pwm; - - if (phm_is_hw_access_blocked(hwmgr)) - return 0; - - return (0 == smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetFanRpmMax, us_max_fan_pwm) ? 0 : -1); -} - -uint32_t tonga_get_xclk(struct pp_hwmgr *hwmgr) -{ - uint32_t reference_clock; - uint32_t tc; - uint32_t divide; - - ATOM_FIRMWARE_INFO *fw_info; - uint16_t size; - uint8_t frev, crev; - int index = GetIndexIntoMasterTable(DATA, FirmwareInfo); - - tc = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK); - - if (tc) - return TCLK; - - fw_info = (ATOM_FIRMWARE_INFO *)cgs_atom_get_data_table(hwmgr->device, index, - &size, &frev, &crev); - - if (!fw_info) - return 0; - - reference_clock = le16_to_cpu(fw_info->usReferenceClock); - - divide = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_CLKPIN_CNTL, XTALIN_DIVIDE); - - if (0 != divide) - return reference_clock / 4; - - return reference_clock; -} - -int tonga_dpm_set_interrupt_state(void *private_data, - unsigned src_id, unsigned type, - int enabled) -{ - uint32_t cg_thermal_int; - struct pp_hwmgr *hwmgr = ((struct pp_eventmgr *)private_data)->hwmgr; - - if (hwmgr == NULL) - return -EINVAL; - - switch (type) { - case AMD_THERMAL_IRQ_LOW_TO_HIGH: - if (enabled) { - cg_thermal_int = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT); - cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK; - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int); - } else { - cg_thermal_int = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT); - cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK; - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int); - } - break; - - case AMD_THERMAL_IRQ_HIGH_TO_LOW: - if (enabled) { - cg_thermal_int = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT); - cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK; - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int); - } else { - cg_thermal_int = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT); - cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK; - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int); - } - break; - default: - break; - } - return 0; -} - -int tonga_register_internal_thermal_interrupt(struct pp_hwmgr *hwmgr, - const void *thermal_interrupt_info) -{ - int result; - const struct pp_interrupt_registration_info *info = - (const struct pp_interrupt_registration_info *)thermal_interrupt_info; - - if (info == NULL) - return -EINVAL; - - result = cgs_add_irq_source(hwmgr->device, 230, AMD_THERMAL_IRQ_LAST, - tonga_dpm_set_interrupt_state, - info->call_back, info->context); - - if (result) - return -EINVAL; - - result = cgs_add_irq_source(hwmgr->device, 231, AMD_THERMAL_IRQ_LAST, - tonga_dpm_set_interrupt_state, - info->call_back, info->context); - - if (result) - return -EINVAL; - - return 0; -} - -bool tonga_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr) -{ - struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - bool is_update_required = false; - struct cgs_display_info info = {0,0,NULL}; - - cgs_get_active_displays_info(hwmgr->device, &info); - - if (data->display_timing.num_existing_displays != info.display_count) - is_update_required = true; -/* TO DO NEED TO GET DEEP SLEEP CLOCK FROM DAL - if (phm_cap_enabled(hwmgr->hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) { - cgs_get_min_clock_settings(hwmgr->device, &min_clocks); - if(min_clocks.engineClockInSR != data->display_timing.minClockInSR) - is_update_required = true; -*/ - return is_update_required; -} - -static inline bool tonga_are_power_levels_equal(const struct tonga_performance_level *pl1, - const struct tonga_performance_level *pl2) -{ - return ((pl1->memory_clock == pl2->memory_clock) && - (pl1->engine_clock == pl2->engine_clock) && - (pl1->pcie_gen == pl2->pcie_gen) && - (pl1->pcie_lane == pl2->pcie_lane)); -} - -int tonga_check_states_equal(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *pstate1, const struct pp_hw_power_state *pstate2, bool *equal) -{ - const struct tonga_power_state *psa = cast_const_phw_tonga_power_state(pstate1); - const struct tonga_power_state *psb = cast_const_phw_tonga_power_state(pstate2); - int i; - - if (equal == NULL || psa == NULL || psb == NULL) - return -EINVAL; - - /* If the two states don't even have the same number of performance levels they cannot be the same state. */ - if (psa->performance_level_count != psb->performance_level_count) { - *equal = false; - return 0; - } - - for (i = 0; i < psa->performance_level_count; i++) { - if (!tonga_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) { - /* If we have found even one performance level pair that is different the states are different. */ - *equal = false; - return 0; - } - } - - /* If all performance levels are the same try to use the UVD clocks to break the tie.*/ - *equal = ((psa->uvd_clocks.VCLK == psb->uvd_clocks.VCLK) && (psa->uvd_clocks.DCLK == psb->uvd_clocks.DCLK)); - *equal &= ((psa->vce_clocks.EVCLK == psb->vce_clocks.EVCLK) && (psa->vce_clocks.ECCLK == psb->vce_clocks.ECCLK)); - *equal &= (psa->sclk_threshold == psb->sclk_threshold); - *equal &= (psa->acp_clk == psb->acp_clk); - - return 0; -} - -static int tonga_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode) -{ - if (mode) { - /* stop auto-manage */ - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_MicrocodeFanControl)) - tonga_fan_ctrl_stop_smc_fan_control(hwmgr); - tonga_fan_ctrl_set_static_mode(hwmgr, mode); - } else - /* restart auto-manage */ - tonga_fan_ctrl_reset_fan_speed_to_default(hwmgr); - - return 0; -} - -static int tonga_get_fan_control_mode(struct pp_hwmgr *hwmgr) -{ - if (hwmgr->fan_ctrl_is_in_default_mode) - return hwmgr->fan_ctrl_default_mode; - else - return PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_FDO_CTRL2, FDO_PWM_MODE); -} - -static int tonga_force_clock_level(struct pp_hwmgr *hwmgr, - enum pp_clock_type type, uint32_t mask) -{ - struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - - if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) - return -EINVAL; - - switch (type) { - case PP_SCLK: - if (!data->sclk_dpm_key_disabled) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_SCLKDPM_SetEnabledMask, - data->dpm_level_enable_mask.sclk_dpm_enable_mask & mask); - break; - case PP_MCLK: - if (!data->mclk_dpm_key_disabled) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_MCLKDPM_SetEnabledMask, - data->dpm_level_enable_mask.mclk_dpm_enable_mask & mask); - break; - case PP_PCIE: - { - uint32_t tmp = mask & data->dpm_level_enable_mask.pcie_dpm_enable_mask; - uint32_t level = 0; - - while (tmp >>= 1) - level++; - - if (!data->pcie_dpm_key_disabled) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_PCIeDPM_ForceLevel, - level); - break; - } - default: - break; - } - - return 0; -} - -static int tonga_print_clock_levels(struct pp_hwmgr *hwmgr, - enum pp_clock_type type, char *buf) -{ - struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - struct tonga_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table); - struct tonga_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table); - struct tonga_single_dpm_table *pcie_table = &(data->dpm_table.pcie_speed_table); - int i, now, size = 0; - uint32_t clock, pcie_speed; - - switch (type) { - case PP_SCLK: - smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency); - clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); - - for (i = 0; i < sclk_table->count; i++) { - if (clock > sclk_table->dpm_levels[i].value) - continue; - break; - } - now = i; - - for (i = 0; i < sclk_table->count; i++) - size += sprintf(buf + size, "%d: %uMhz %s\n", - i, sclk_table->dpm_levels[i].value / 100, - (i == now) ? "*" : ""); - break; - case PP_MCLK: - smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency); - clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); - - for (i = 0; i < mclk_table->count; i++) { - if (clock > mclk_table->dpm_levels[i].value) - continue; - break; - } - now = i; - - for (i = 0; i < mclk_table->count; i++) - size += sprintf(buf + size, "%d: %uMhz %s\n", - i, mclk_table->dpm_levels[i].value / 100, - (i == now) ? "*" : ""); - break; - case PP_PCIE: - pcie_speed = tonga_get_current_pcie_speed(hwmgr); - for (i = 0; i < pcie_table->count; i++) { - if (pcie_speed != pcie_table->dpm_levels[i].value) - continue; - break; - } - now = i; - - for (i = 0; i < pcie_table->count; i++) - size += sprintf(buf + size, "%d: %s %s\n", i, - (pcie_table->dpm_levels[i].value == 0) ? "2.5GB, x8" : - (pcie_table->dpm_levels[i].value == 1) ? "5.0GB, x16" : - (pcie_table->dpm_levels[i].value == 2) ? "8.0GB, x16" : "", - (i == now) ? "*" : ""); - break; - default: - break; - } - return size; -} - -static int tonga_get_sclk_od(struct pp_hwmgr *hwmgr) -{ - struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - struct tonga_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table); - struct tonga_single_dpm_table *golden_sclk_table = - &(data->golden_dpm_table.sclk_table); - int value; - - value = (sclk_table->dpm_levels[sclk_table->count - 1].value - - golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) * - 100 / - golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value; - - return value; -} - -static int tonga_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value) -{ - struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - struct tonga_single_dpm_table *golden_sclk_table = - &(data->golden_dpm_table.sclk_table); - struct pp_power_state *ps; - struct tonga_power_state *tonga_ps; - - if (value > 20) - value = 20; - - ps = hwmgr->request_ps; - - if (ps == NULL) - return -EINVAL; - - tonga_ps = cast_phw_tonga_power_state(&ps->hardware); - - tonga_ps->performance_levels[tonga_ps->performance_level_count - 1].engine_clock = - golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value * - value / 100 + - golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value; - - return 0; -} - -static int tonga_get_mclk_od(struct pp_hwmgr *hwmgr) -{ - struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - struct tonga_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table); - struct tonga_single_dpm_table *golden_mclk_table = - &(data->golden_dpm_table.mclk_table); - int value; - - value = (mclk_table->dpm_levels[mclk_table->count - 1].value - - golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) * - 100 / - golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value; - - return value; -} - -static int tonga_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value) -{ - struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - struct tonga_single_dpm_table *golden_mclk_table = - &(data->golden_dpm_table.mclk_table); - struct pp_power_state *ps; - struct tonga_power_state *tonga_ps; - - if (value > 20) - value = 20; - - ps = hwmgr->request_ps; - - if (ps == NULL) - return -EINVAL; - - tonga_ps = cast_phw_tonga_power_state(&ps->hardware); - - tonga_ps->performance_levels[tonga_ps->performance_level_count - 1].memory_clock = - golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value * - value / 100 + - golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value; - - return 0; -} - -static const struct pp_hwmgr_func tonga_hwmgr_funcs = { - .backend_init = &tonga_hwmgr_backend_init, - .backend_fini = &tonga_hwmgr_backend_fini, - .asic_setup = &tonga_setup_asic_task, - .dynamic_state_management_enable = &tonga_enable_dpm_tasks, - .dynamic_state_management_disable = &tonga_disable_dpm_tasks, - .apply_state_adjust_rules = tonga_apply_state_adjust_rules, - .force_dpm_level = &tonga_force_dpm_level, - .power_state_set = tonga_set_power_state_tasks, - .get_power_state_size = tonga_get_power_state_size, - .get_mclk = tonga_dpm_get_mclk, - .get_sclk = tonga_dpm_get_sclk, - .patch_boot_state = tonga_dpm_patch_boot_state, - .get_pp_table_entry = tonga_get_pp_table_entry, - .get_num_of_pp_table_entries = get_number_of_powerplay_table_entries_v1_0, - .print_current_perforce_level = tonga_print_current_perforce_level, - .powerdown_uvd = tonga_phm_powerdown_uvd, - .powergate_uvd = tonga_phm_powergate_uvd, - .powergate_vce = tonga_phm_powergate_vce, - .disable_clock_power_gating = tonga_phm_disable_clock_power_gating, - .update_clock_gatings = tonga_phm_update_clock_gatings, - .notify_smc_display_config_after_ps_adjustment = tonga_notify_smc_display_config_after_ps_adjustment, - .display_config_changed = tonga_display_configuration_changed_task, - .set_max_fan_pwm_output = tonga_set_max_fan_pwm_output, - .set_max_fan_rpm_output = tonga_set_max_fan_rpm_output, - .get_temperature = tonga_thermal_get_temperature, - .stop_thermal_controller = tonga_thermal_stop_thermal_controller, - .get_fan_speed_info = tonga_fan_ctrl_get_fan_speed_info, - .get_fan_speed_percent = tonga_fan_ctrl_get_fan_speed_percent, - .set_fan_speed_percent = tonga_fan_ctrl_set_fan_speed_percent, - .reset_fan_speed_to_default = tonga_fan_ctrl_reset_fan_speed_to_default, - .get_fan_speed_rpm = tonga_fan_ctrl_get_fan_speed_rpm, - .set_fan_speed_rpm = tonga_fan_ctrl_set_fan_speed_rpm, - .uninitialize_thermal_controller = tonga_thermal_ctrl_uninitialize_thermal_controller, - .register_internal_thermal_interrupt = tonga_register_internal_thermal_interrupt, - .check_smc_update_required_for_display_configuration = tonga_check_smc_update_required_for_display_configuration, - .check_states_equal = tonga_check_states_equal, - .set_fan_control_mode = tonga_set_fan_control_mode, - .get_fan_control_mode = tonga_get_fan_control_mode, - .force_clock_level = tonga_force_clock_level, - .print_clock_levels = tonga_print_clock_levels, - .get_sclk_od = tonga_get_sclk_od, - .set_sclk_od = tonga_set_sclk_od, - .get_mclk_od = tonga_get_mclk_od, - .set_mclk_od = tonga_set_mclk_od, -}; - -int tonga_hwmgr_init(struct pp_hwmgr *hwmgr) -{ - hwmgr->hwmgr_func = &tonga_hwmgr_funcs; - hwmgr->pptable_func = &pptable_v1_0_funcs; - pp_tonga_thermal_initialize(hwmgr); - return 0; -} - diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h deleted file mode 100644 index fcad9426d3c1..000000000000 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h +++ /dev/null @@ -1,402 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#ifndef TONGA_HWMGR_H -#define TONGA_HWMGR_H - -#include "hwmgr.h" -#include "smu72_discrete.h" -#include "ppatomctrl.h" -#include "ppinterrupt.h" -#include "tonga_powertune.h" -#include "pp_endian.h" - -#define TONGA_MAX_HARDWARE_POWERLEVELS 2 -#define TONGA_DYNCLK_NUMBER_OF_TREND_COEFFICIENTS 15 - -struct tonga_performance_level { - uint32_t memory_clock; - uint32_t engine_clock; - uint16_t pcie_gen; - uint16_t pcie_lane; -}; - -struct _phw_tonga_bacos { - uint32_t best_match; - uint32_t baco_flags; - struct tonga_performance_level performance_level; -}; -typedef struct _phw_tonga_bacos phw_tonga_bacos; - -struct _phw_tonga_uvd_clocks { - uint32_t VCLK; - uint32_t DCLK; -}; - -typedef struct _phw_tonga_uvd_clocks phw_tonga_uvd_clocks; - -struct _phw_tonga_vce_clocks { - uint32_t EVCLK; - uint32_t ECCLK; -}; - -typedef struct _phw_tonga_vce_clocks phw_tonga_vce_clocks; - -struct tonga_power_state { - uint32_t magic; - phw_tonga_uvd_clocks uvd_clocks; - phw_tonga_vce_clocks vce_clocks; - uint32_t sam_clk; - uint32_t acp_clk; - uint16_t performance_level_count; - bool dc_compatible; - uint32_t sclk_threshold; - struct tonga_performance_level performance_levels[TONGA_MAX_HARDWARE_POWERLEVELS]; -}; - -struct _phw_tonga_dpm_level { - bool enabled; - uint32_t value; - uint32_t param1; -}; -typedef struct _phw_tonga_dpm_level phw_tonga_dpm_level; - -#define TONGA_MAX_DEEPSLEEP_DIVIDER_ID 5 -#define MAX_REGULAR_DPM_NUMBER 8 -#define TONGA_MINIMUM_ENGINE_CLOCK 2500 - -struct tonga_single_dpm_table { - uint32_t count; - phw_tonga_dpm_level dpm_levels[MAX_REGULAR_DPM_NUMBER]; -}; - -struct tonga_dpm_table { - struct tonga_single_dpm_table sclk_table; - struct tonga_single_dpm_table mclk_table; - struct tonga_single_dpm_table pcie_speed_table; - struct tonga_single_dpm_table vddc_table; - struct tonga_single_dpm_table vdd_gfx_table; - struct tonga_single_dpm_table vdd_ci_table; - struct tonga_single_dpm_table mvdd_table; -}; -typedef struct _phw_tonga_dpm_table phw_tonga_dpm_table; - - -struct _phw_tonga_clock_regisiters { - uint32_t vCG_SPLL_FUNC_CNTL; - uint32_t vCG_SPLL_FUNC_CNTL_2; - uint32_t vCG_SPLL_FUNC_CNTL_3; - uint32_t vCG_SPLL_FUNC_CNTL_4; - uint32_t vCG_SPLL_SPREAD_SPECTRUM; - uint32_t vCG_SPLL_SPREAD_SPECTRUM_2; - uint32_t vDLL_CNTL; - uint32_t vMCLK_PWRMGT_CNTL; - uint32_t vMPLL_AD_FUNC_CNTL; - uint32_t vMPLL_DQ_FUNC_CNTL; - uint32_t vMPLL_FUNC_CNTL; - uint32_t vMPLL_FUNC_CNTL_1; - uint32_t vMPLL_FUNC_CNTL_2; - uint32_t vMPLL_SS1; - uint32_t vMPLL_SS2; -}; -typedef struct _phw_tonga_clock_regisiters phw_tonga_clock_registers; - -struct _phw_tonga_voltage_smio_registers { - uint32_t vs0_vid_lower_smio_cntl; -}; -typedef struct _phw_tonga_voltage_smio_registers phw_tonga_voltage_smio_registers; - - -struct _phw_tonga_mc_reg_entry { - uint32_t mclk_max; - uint32_t mc_data[SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE]; -}; -typedef struct _phw_tonga_mc_reg_entry phw_tonga_mc_reg_entry; - -struct _phw_tonga_mc_reg_table { - uint8_t last; /* number of registers*/ - uint8_t num_entries; /* number of entries in mc_reg_table_entry used*/ - uint16_t validflag; /* indicate the corresponding register is valid or not. 1: valid, 0: invalid. bit0->address[0], bit1->address[1], etc.*/ - phw_tonga_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES]; - SMU72_Discrete_MCRegisterAddress mc_reg_address[SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE]; -}; -typedef struct _phw_tonga_mc_reg_table phw_tonga_mc_reg_table; - -#define DISABLE_MC_LOADMICROCODE 1 -#define DISABLE_MC_CFGPROGRAMMING 2 - -/*Ultra Low Voltage parameter structure */ -struct _phw_tonga_ulv_parm{ - bool ulv_supported; - uint32_t ch_ulv_parameter; - uint32_t ulv_volt_change_delay; - struct tonga_performance_level ulv_power_level; -}; -typedef struct _phw_tonga_ulv_parm phw_tonga_ulv_parm; - -#define TONGA_MAX_LEAKAGE_COUNT 8 - -struct _phw_tonga_leakage_voltage { - uint16_t count; - uint16_t leakage_id[TONGA_MAX_LEAKAGE_COUNT]; - uint16_t actual_voltage[TONGA_MAX_LEAKAGE_COUNT]; -}; -typedef struct _phw_tonga_leakage_voltage phw_tonga_leakage_voltage; - -struct _phw_tonga_display_timing { - uint32_t min_clock_insr; - uint32_t num_existing_displays; -}; -typedef struct _phw_tonga_display_timing phw_tonga_display_timing; - -struct _phw_tonga_dpmlevel_enable_mask { - uint32_t uvd_dpm_enable_mask; - uint32_t vce_dpm_enable_mask; - uint32_t acp_dpm_enable_mask; - uint32_t samu_dpm_enable_mask; - uint32_t sclk_dpm_enable_mask; - uint32_t mclk_dpm_enable_mask; - uint32_t pcie_dpm_enable_mask; -}; -typedef struct _phw_tonga_dpmlevel_enable_mask phw_tonga_dpmlevel_enable_mask; - -struct _phw_tonga_pcie_perf_range { - uint16_t max; - uint16_t min; -}; -typedef struct _phw_tonga_pcie_perf_range phw_tonga_pcie_perf_range; - -struct _phw_tonga_vbios_boot_state { - uint16_t mvdd_bootup_value; - uint16_t vddc_bootup_value; - uint16_t vddci_bootup_value; - uint16_t vddgfx_bootup_value; - uint32_t sclk_bootup_value; - uint32_t mclk_bootup_value; - uint16_t pcie_gen_bootup_value; - uint16_t pcie_lane_bootup_value; -}; -typedef struct _phw_tonga_vbios_boot_state phw_tonga_vbios_boot_state; - -#define DPMTABLE_OD_UPDATE_SCLK 0x00000001 -#define DPMTABLE_OD_UPDATE_MCLK 0x00000002 -#define DPMTABLE_UPDATE_SCLK 0x00000004 -#define DPMTABLE_UPDATE_MCLK 0x00000008 - -/* We need to review which fields are needed. */ -/* This is mostly a copy of the RV7xx/Evergreen structure which is close, but not identical to the N.Islands one. */ -struct tonga_hwmgr { - struct tonga_dpm_table dpm_table; - struct tonga_dpm_table golden_dpm_table; - - uint32_t voting_rights_clients0; - uint32_t voting_rights_clients1; - uint32_t voting_rights_clients2; - uint32_t voting_rights_clients3; - uint32_t voting_rights_clients4; - uint32_t voting_rights_clients5; - uint32_t voting_rights_clients6; - uint32_t voting_rights_clients7; - uint32_t static_screen_threshold_unit; - uint32_t static_screen_threshold; - uint32_t voltage_control; - uint32_t vdd_gfx_control; - - uint32_t vddc_vddci_delta; - uint32_t vddc_vddgfx_delta; - - struct pp_interrupt_registration_info internal_high_thermal_interrupt_info; - struct pp_interrupt_registration_info internal_low_thermal_interrupt_info; - struct pp_interrupt_registration_info smc_to_host_interrupt_info; - uint32_t active_auto_throttle_sources; - - struct pp_interrupt_registration_info external_throttle_interrupt; - irq_handler_func_t external_throttle_callback; - void *external_throttle_context; - - struct pp_interrupt_registration_info ctf_interrupt_info; - irq_handler_func_t ctf_callback; - void *ctf_context; - - phw_tonga_clock_registers clock_registers; - phw_tonga_voltage_smio_registers voltage_smio_registers; - - bool is_memory_GDDR5; - uint16_t acpi_vddc; - bool pspp_notify_required; /* Flag to indicate if PSPP notification to SBIOS is required */ - uint16_t force_pcie_gen; /* The forced PCI-E speed if not 0xffff */ - uint16_t acpi_pcie_gen; /* The PCI-E speed at ACPI time */ - uint32_t pcie_gen_cap; /* The PCI-E speed capabilities bitmap from CAIL */ - uint32_t pcie_lane_cap; /* The PCI-E lane capabilities bitmap from CAIL */ - uint32_t pcie_spc_cap; /* Symbol Per Clock Capabilities from registry */ - phw_tonga_leakage_voltage vddc_leakage; /* The Leakage VDDC supported (based on leakage ID).*/ - phw_tonga_leakage_voltage vddcgfx_leakage; /* The Leakage VDDC supported (based on leakage ID). */ - phw_tonga_leakage_voltage vddci_leakage; /* The Leakage VDDCI supported (based on leakage ID). */ - - uint32_t mvdd_control; - uint32_t vddc_mask_low; - uint32_t mvdd_mask_low; - uint16_t max_vddc_in_pp_table; /* the maximum VDDC value in the powerplay table*/ - uint16_t min_vddc_in_pp_table; - uint16_t max_vddci_in_pp_table; /* the maximum VDDCI value in the powerplay table */ - uint16_t min_vddci_in_pp_table; - uint32_t mclk_strobe_mode_threshold; - uint32_t mclk_stutter_mode_threshold; - uint32_t mclk_edc_enable_threshold; - uint32_t mclk_edc_wr_enable_threshold; - bool is_uvd_enabled; - bool is_xdma_enabled; - phw_tonga_vbios_boot_state vbios_boot_state; - - bool battery_state; - bool is_tlu_enabled; - bool pcie_performance_request; - - /* -------------- SMC SRAM Address of firmware header tables ----------------*/ - uint32_t sram_end; /* The first address after the SMC SRAM. */ - uint32_t dpm_table_start; /* The start of the dpm table in the SMC SRAM. */ - uint32_t soft_regs_start; /* The start of the soft registers in the SMC SRAM. */ - uint32_t mc_reg_table_start; /* The start of the mc register table in the SMC SRAM. */ - uint32_t fan_table_start; /* The start of the fan table in the SMC SRAM. */ - uint32_t arb_table_start; /* The start of the ARB setting table in the SMC SRAM. */ - SMU72_Discrete_DpmTable smc_state_table; /* The carbon copy of the SMC state table. */ - SMU72_Discrete_MCRegisters mc_reg_table; - SMU72_Discrete_Ulv ulv_setting; /* The carbon copy of ULV setting. */ - /* -------------- Stuff originally coming from Evergreen --------------------*/ - phw_tonga_mc_reg_table tonga_mc_reg_table; - uint32_t vdd_ci_control; - pp_atomctrl_voltage_table vddc_voltage_table; - pp_atomctrl_voltage_table vddci_voltage_table; - pp_atomctrl_voltage_table vddgfx_voltage_table; - pp_atomctrl_voltage_table mvdd_voltage_table; - - uint32_t mgcg_cgtt_local2; - uint32_t mgcg_cgtt_local3; - uint32_t gpio_debug; - uint32_t mc_micro_code_feature; - uint32_t highest_mclk; - uint16_t acpi_vdd_ci; - uint8_t mvdd_high_index; - uint8_t mvdd_low_index; - bool dll_defaule_on; - bool performance_request_registered; - - - /* ----------------- Low Power Features ---------------------*/ - phw_tonga_bacos bacos; - phw_tonga_ulv_parm ulv; - /* ----------------- CAC Stuff ---------------------*/ - uint32_t cac_table_start; - bool cac_configuration_required; /* TRUE if PP_CACConfigurationRequired == 1 */ - bool driver_calculate_cac_leakage; /* TRUE if PP_DriverCalculateCACLeakage == 1 */ - bool cac_enabled; - /* ----------------- DPM2 Parameters ---------------------*/ - uint32_t power_containment_features; - bool enable_bapm_feature; - bool enable_tdc_limit_feature; - bool enable_pkg_pwr_tracking_feature; - bool disable_uvd_power_tune_feature; - struct tonga_pt_defaults *power_tune_defaults; - SMU72_Discrete_PmFuses power_tune_table; - uint32_t dte_tj_offset; /* Fudge factor in DPM table to correct HW DTE errors */ - uint32_t fast_watermark_threshold; /* use fast watermark if clock is equal or above this. In percentage of the target high sclk. */ - - - bool enable_dte_feature; - - - /* ----------------- Phase Shedding ---------------------*/ - bool vddc_phase_shed_control; - /* --------------------- DI/DT --------------------------*/ - phw_tonga_display_timing display_timing; - /* --------- ReadRegistry data for memory and engine clock margins ---- */ - uint32_t engine_clock_data; - uint32_t memory_clock_data; - /* -------- Thermal Temperature Setting --------------*/ - phw_tonga_dpmlevel_enable_mask dpm_level_enable_mask; - uint32_t need_update_smu7_dpm_table; - uint32_t sclk_dpm_key_disabled; - uint32_t mclk_dpm_key_disabled; - uint32_t pcie_dpm_key_disabled; - uint32_t min_engine_clocks; /* used to store the previous dal min sclock */ - phw_tonga_pcie_perf_range pcie_gen_performance; - phw_tonga_pcie_perf_range pcie_lane_performance; - phw_tonga_pcie_perf_range pcie_gen_power_saving; - phw_tonga_pcie_perf_range pcie_lane_power_saving; - bool use_pcie_performance_levels; - bool use_pcie_power_saving_levels; - uint32_t activity_target[SMU72_MAX_LEVELS_GRAPHICS]; /* percentage value from 0-100, default 50 */ - uint32_t mclk_activity_target; - uint32_t low_sclk_interrupt_threshold; - uint32_t last_mclk_dpm_enable_mask; - bool uvd_enabled; - uint32_t pcc_monitor_enabled; - - /* --------- Power Gating States ------------*/ - bool uvd_power_gated; /* 1: gated, 0:not gated */ - bool vce_power_gated; /* 1: gated, 0:not gated */ - bool samu_power_gated; /* 1: gated, 0:not gated */ - bool acp_power_gated; /* 1: gated, 0:not gated */ - bool pg_acp_init; -}; - -typedef struct tonga_hwmgr tonga_hwmgr; - -#define TONGA_DPM2_NEAR_TDP_DEC 10 -#define TONGA_DPM2_ABOVE_SAFE_INC 5 -#define TONGA_DPM2_BELOW_SAFE_INC 20 - -#define TONGA_DPM2_LTA_WINDOW_SIZE 7 /* Log2 of the LTA window size (l2numWin_TDP). Eg. If LTA windows size is 128, then this value should be Log2(128) = 7. */ - -#define TONGA_DPM2_LTS_TRUNCATE 0 - -#define TONGA_DPM2_TDP_SAFE_LIMIT_PERCENT 80 /* Maximum 100 */ - -#define TONGA_DPM2_MAXPS_PERCENT_H 90 /* Maximum 0xFF */ -#define TONGA_DPM2_MAXPS_PERCENT_M 90 /* Maximum 0xFF */ - -#define TONGA_DPM2_PWREFFICIENCYRATIO_MARGIN 50 - -#define TONGA_DPM2_SQ_RAMP_MAX_POWER 0x3FFF -#define TONGA_DPM2_SQ_RAMP_MIN_POWER 0x12 -#define TONGA_DPM2_SQ_RAMP_MAX_POWER_DELTA 0x15 -#define TONGA_DPM2_SQ_RAMP_SHORT_TERM_INTERVAL_SIZE 0x1E -#define TONGA_DPM2_SQ_RAMP_LONG_TERM_INTERVAL_RATIO 0xF - -#define TONGA_VOLTAGE_CONTROL_NONE 0x0 -#define TONGA_VOLTAGE_CONTROL_BY_GPIO 0x1 -#define TONGA_VOLTAGE_CONTROL_BY_SVID2 0x2 -#define TONGA_VOLTAGE_CONTROL_MERGED 0x3 - -#define TONGA_Q88_FORMAT_CONVERSION_UNIT 256 /*To convert to Q8.8 format for firmware */ - -#define TONGA_UNUSED_GPIO_PIN 0x7F - -int tonga_hwmgr_init(struct pp_hwmgr *hwmgr); -int tonga_update_vce_dpm(struct pp_hwmgr *hwmgr, const void *input); -int tonga_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate); -int tonga_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable); -int tonga_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable); -uint32_t tonga_get_xclk(struct pp_hwmgr *hwmgr); - -#endif - diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_powertune.c deleted file mode 100644 index 24d9a05e7997..000000000000 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_powertune.c +++ /dev/null @@ -1,495 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#include "hwmgr.h" -#include "smumgr.h" -#include "tonga_hwmgr.h" -#include "tonga_powertune.h" -#include "tonga_smumgr.h" -#include "smu72_discrete.h" -#include "pp_debug.h" -#include "tonga_ppsmc.h" - -#define VOLTAGE_SCALE 4 -#define POWERTUNE_DEFAULT_SET_MAX 1 - -struct tonga_pt_defaults tonga_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = { -/* sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt, TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT */ - {1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000, - {0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61}, - {0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 } }, -}; - -void tonga_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr) -{ - struct tonga_hwmgr *tonga_hwmgr = (struct tonga_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - uint32_t tmp = 0; - - if (table_info && - table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX && - table_info->cac_dtp_table->usPowerTuneDataSetID) - tonga_hwmgr->power_tune_defaults = - &tonga_power_tune_data_set_array - [table_info->cac_dtp_table->usPowerTuneDataSetID - 1]; - else - tonga_hwmgr->power_tune_defaults = &tonga_power_tune_data_set_array[0]; - - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_CAC); - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_SQRamping); - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_DBRamping); - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_TDRamping); - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_TCPRamping); - - tonga_hwmgr->dte_tj_offset = tmp; - - if (!tmp) { - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_CAC); - - tonga_hwmgr->fast_watermark_threshold = 100; - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_PowerContainment)) { - tmp = 1; - tonga_hwmgr->enable_dte_feature = tmp ? false : true; - tonga_hwmgr->enable_tdc_limit_feature = tmp ? true : false; - tonga_hwmgr->enable_pkg_pwr_tracking_feature = tmp ? true : false; - } - } -} - - -int tonga_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr) -{ - struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - struct tonga_pt_defaults *defaults = data->power_tune_defaults; - SMU72_Discrete_DpmTable *dpm_table = &(data->smc_state_table); - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table; - int i, j, k; - uint16_t *pdef1; - uint16_t *pdef2; - - - /* TDP number of fraction bits are changed from 8 to 7 for Fiji - * as requested by SMC team - */ - dpm_table->DefaultTdp = PP_HOST_TO_SMC_US( - (uint16_t)(cac_dtp_table->usTDP * 256)); - dpm_table->TargetTdp = PP_HOST_TO_SMC_US( - (uint16_t)(cac_dtp_table->usConfigurableTDP * 256)); - - PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255, - "Target Operating Temp is out of Range!", - ); - - dpm_table->GpuTjMax = (uint8_t)(cac_dtp_table->usTargetOperatingTemp); - dpm_table->GpuTjHyst = 8; - - dpm_table->DTEAmbientTempBase = defaults->dte_ambient_temp_base; - - dpm_table->BAPM_TEMP_GRADIENT = PP_HOST_TO_SMC_UL(defaults->bamp_temp_gradient); - pdef1 = defaults->bapmti_r; - pdef2 = defaults->bapmti_rc; - - for (i = 0; i < SMU72_DTE_ITERATIONS; i++) { - for (j = 0; j < SMU72_DTE_SOURCES; j++) { - for (k = 0; k < SMU72_DTE_SINKS; k++) { - dpm_table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*pdef1); - dpm_table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*pdef2); - pdef1++; - pdef2++; - } - } - } - - return 0; -} - -static int tonga_populate_svi_load_line(struct pp_hwmgr *hwmgr) -{ - struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - const struct tonga_pt_defaults *defaults = data->power_tune_defaults; - - data->power_tune_table.SviLoadLineEn = defaults->svi_load_line_en; - data->power_tune_table.SviLoadLineVddC = defaults->svi_load_line_vddC; - data->power_tune_table.SviLoadLineTrimVddC = 3; - data->power_tune_table.SviLoadLineOffsetVddC = 0; - - return 0; -} - -static int tonga_populate_tdc_limit(struct pp_hwmgr *hwmgr) -{ - uint16_t tdc_limit; - struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - const struct tonga_pt_defaults *defaults = data->power_tune_defaults; - - /* TDC number of fraction bits are changed from 8 to 7 - * for Fiji as requested by SMC team - */ - tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 256); - data->power_tune_table.TDC_VDDC_PkgLimit = - CONVERT_FROM_HOST_TO_SMC_US(tdc_limit); - data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc = - defaults->tdc_vddc_throttle_release_limit_perc; - data->power_tune_table.TDC_MAWt = defaults->tdc_mawt; - - return 0; -} - -static int tonga_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset) -{ - struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - const struct tonga_pt_defaults *defaults = data->power_tune_defaults; - uint32_t temp; - - if (tonga_read_smc_sram_dword(hwmgr->smumgr, - fuse_table_offset + - offsetof(SMU72_Discrete_PmFuses, TdcWaterfallCtl), - (uint32_t *)&temp, data->sram_end)) - PP_ASSERT_WITH_CODE(false, - "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!", - return -EINVAL); - else - data->power_tune_table.TdcWaterfallCtl = defaults->tdc_waterfall_ctl; - - return 0; -} - -static int tonga_populate_temperature_scaler(struct pp_hwmgr *hwmgr) -{ - int i; - struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - - /* Currently not used. Set all to zero. */ - for (i = 0; i < 16; i++) - data->power_tune_table.LPMLTemperatureScaler[i] = 0; - - return 0; -} - -static int tonga_populate_fuzzy_fan(struct pp_hwmgr *hwmgr) -{ - struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - - if ((hwmgr->thermal_controller.advanceFanControlParameters. - usFanOutputSensitivity & (1 << 15)) || - (hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity == 0)) - hwmgr->thermal_controller.advanceFanControlParameters. - usFanOutputSensitivity = hwmgr->thermal_controller. - advanceFanControlParameters.usDefaultFanOutputSensitivity; - - data->power_tune_table.FuzzyFan_PwmSetDelta = - PP_HOST_TO_SMC_US(hwmgr->thermal_controller. - advanceFanControlParameters.usFanOutputSensitivity); - return 0; -} - -static int tonga_populate_gnb_lpml(struct pp_hwmgr *hwmgr) -{ - int i; - struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - - /* Currently not used. Set all to zero. */ - for (i = 0; i < 16; i++) - data->power_tune_table.GnbLPML[i] = 0; - - return 0; -} - -static int tonga_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr) -{ - return 0; -} - -static int tonga_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr) -{ - struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - uint16_t hi_sidd = data->power_tune_table.BapmVddCBaseLeakageHiSidd; - uint16_t lo_sidd = data->power_tune_table.BapmVddCBaseLeakageLoSidd; - struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table; - - hi_sidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256); - lo_sidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256); - - data->power_tune_table.BapmVddCBaseLeakageHiSidd = - CONVERT_FROM_HOST_TO_SMC_US(hi_sidd); - data->power_tune_table.BapmVddCBaseLeakageLoSidd = - CONVERT_FROM_HOST_TO_SMC_US(lo_sidd); - - return 0; -} - -int tonga_populate_pm_fuses(struct pp_hwmgr *hwmgr) -{ - struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - uint32_t pm_fuse_table_offset; - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_PowerContainment)) { - if (tonga_read_smc_sram_dword(hwmgr->smumgr, - SMU72_FIRMWARE_HEADER_LOCATION + - offsetof(SMU72_Firmware_Header, PmFuseTable), - &pm_fuse_table_offset, data->sram_end)) - PP_ASSERT_WITH_CODE(false, - "Attempt to get pm_fuse_table_offset Failed!", - return -EINVAL); - - /* DW6 */ - if (tonga_populate_svi_load_line(hwmgr)) - PP_ASSERT_WITH_CODE(false, - "Attempt to populate SviLoadLine Failed!", - return -EINVAL); - /* DW7 */ - if (tonga_populate_tdc_limit(hwmgr)) - PP_ASSERT_WITH_CODE(false, - "Attempt to populate TDCLimit Failed!", return -EINVAL); - /* DW8 */ - if (tonga_populate_dw8(hwmgr, pm_fuse_table_offset)) - PP_ASSERT_WITH_CODE(false, - "Attempt to populate TdcWaterfallCtl Failed !", - return -EINVAL); - - /* DW9-DW12 */ - if (tonga_populate_temperature_scaler(hwmgr) != 0) - PP_ASSERT_WITH_CODE(false, - "Attempt to populate LPMLTemperatureScaler Failed!", - return -EINVAL); - - /* DW13-DW14 */ - if (tonga_populate_fuzzy_fan(hwmgr)) - PP_ASSERT_WITH_CODE(false, - "Attempt to populate Fuzzy Fan Control parameters Failed!", - return -EINVAL); - - /* DW15-DW18 */ - if (tonga_populate_gnb_lpml(hwmgr)) - PP_ASSERT_WITH_CODE(false, - "Attempt to populate GnbLPML Failed!", - return -EINVAL); - - /* DW19 */ - if (tonga_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr)) - PP_ASSERT_WITH_CODE(false, - "Attempt to populate GnbLPML Min and Max Vid Failed!", - return -EINVAL); - - /* DW20 */ - if (tonga_populate_bapm_vddc_base_leakage_sidd(hwmgr)) - PP_ASSERT_WITH_CODE(false, - "Attempt to populate BapmVddCBaseLeakage Hi and Lo Sidd Failed!", - return -EINVAL); - - if (tonga_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset, - (uint8_t *)&data->power_tune_table, - sizeof(struct SMU72_Discrete_PmFuses), data->sram_end)) - PP_ASSERT_WITH_CODE(false, - "Attempt to download PmFuseTable Failed!", - return -EINVAL); - } - return 0; -} - -int tonga_enable_smc_cac(struct pp_hwmgr *hwmgr) -{ - struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - int result = 0; - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_CAC)) { - int smc_result; - - smc_result = smum_send_msg_to_smc(hwmgr->smumgr, - (uint16_t)(PPSMC_MSG_EnableCac)); - PP_ASSERT_WITH_CODE((smc_result == 0), - "Failed to enable CAC in SMC.", result = -1); - - data->cac_enabled = (smc_result == 0) ? true : false; - } - return result; -} - -int tonga_disable_smc_cac(struct pp_hwmgr *hwmgr) -{ - struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - int result = 0; - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_CAC) && data->cac_enabled) { - int smc_result = smum_send_msg_to_smc(hwmgr->smumgr, - (uint16_t)(PPSMC_MSG_DisableCac)); - PP_ASSERT_WITH_CODE((smc_result == 0), - "Failed to disable CAC in SMC.", result = -1); - - data->cac_enabled = false; - } - return result; -} - -int tonga_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n) -{ - struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - - if (data->power_containment_features & - POWERCONTAINMENT_FEATURE_PkgPwrLimit) - return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_PkgPwrSetLimit, n); - return 0; -} - -static int tonga_set_overdriver_target_tdp(struct pp_hwmgr *pHwMgr, uint32_t target_tdp) -{ - return smum_send_msg_to_smc_with_parameter(pHwMgr->smumgr, - PPSMC_MSG_OverDriveSetTargetTdp, target_tdp); -} - -int tonga_enable_power_containment(struct pp_hwmgr *hwmgr) -{ - struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - int smc_result; - int result = 0; - - data->power_containment_features = 0; - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_PowerContainment)) { - if (data->enable_dte_feature) { - smc_result = smum_send_msg_to_smc(hwmgr->smumgr, - (uint16_t)(PPSMC_MSG_EnableDTE)); - PP_ASSERT_WITH_CODE((smc_result == 0), - "Failed to enable DTE in SMC.", result = -1;); - if (smc_result == 0) - data->power_containment_features |= POWERCONTAINMENT_FEATURE_DTE; - } - - if (data->enable_tdc_limit_feature) { - smc_result = smum_send_msg_to_smc(hwmgr->smumgr, - (uint16_t)(PPSMC_MSG_TDCLimitEnable)); - PP_ASSERT_WITH_CODE((smc_result == 0), - "Failed to enable TDCLimit in SMC.", result = -1;); - if (smc_result == 0) - data->power_containment_features |= - POWERCONTAINMENT_FEATURE_TDCLimit; - } - - if (data->enable_pkg_pwr_tracking_feature) { - smc_result = smum_send_msg_to_smc(hwmgr->smumgr, - (uint16_t)(PPSMC_MSG_PkgPwrLimitEnable)); - PP_ASSERT_WITH_CODE((smc_result == 0), - "Failed to enable PkgPwrTracking in SMC.", result = -1;); - if (smc_result == 0) { - struct phm_cac_tdp_table *cac_table = - table_info->cac_dtp_table; - uint32_t default_limit = - (uint32_t)(cac_table->usMaximumPowerDeliveryLimit * 256); - - data->power_containment_features |= - POWERCONTAINMENT_FEATURE_PkgPwrLimit; - - if (tonga_set_power_limit(hwmgr, default_limit)) - printk(KERN_ERR "Failed to set Default Power Limit in SMC!"); - } - } - } - return result; -} - -int tonga_disable_power_containment(struct pp_hwmgr *hwmgr) -{ - struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - int result = 0; - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_PowerContainment) && - data->power_containment_features) { - int smc_result; - - if (data->power_containment_features & - POWERCONTAINMENT_FEATURE_TDCLimit) { - smc_result = smum_send_msg_to_smc(hwmgr->smumgr, - (uint16_t)(PPSMC_MSG_TDCLimitDisable)); - PP_ASSERT_WITH_CODE((smc_result == 0), - "Failed to disable TDCLimit in SMC.", - result = smc_result); - } - - if (data->power_containment_features & - POWERCONTAINMENT_FEATURE_DTE) { - smc_result = smum_send_msg_to_smc(hwmgr->smumgr, - (uint16_t)(PPSMC_MSG_DisableDTE)); - PP_ASSERT_WITH_CODE((smc_result == 0), - "Failed to disable DTE in SMC.", - result = smc_result); - } - - if (data->power_containment_features & - POWERCONTAINMENT_FEATURE_PkgPwrLimit) { - smc_result = smum_send_msg_to_smc(hwmgr->smumgr, - (uint16_t)(PPSMC_MSG_PkgPwrLimitDisable)); - PP_ASSERT_WITH_CODE((smc_result == 0), - "Failed to disable PkgPwrTracking in SMC.", - result = smc_result); - } - data->power_containment_features = 0; - } - - return result; -} - -int tonga_power_control_set_level(struct pp_hwmgr *hwmgr) -{ - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table; - int adjust_percent, target_tdp; - int result = 0; - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_PowerContainment)) { - /* adjustment percentage has already been validated */ - adjust_percent = hwmgr->platform_descriptor.TDPAdjustmentPolarity ? - hwmgr->platform_descriptor.TDPAdjustment : - (-1 * hwmgr->platform_descriptor.TDPAdjustment); - /* SMC requested that target_tdp to be 7 bit fraction in DPM table - * but message to be 8 bit fraction for messages - */ - target_tdp = ((100 + adjust_percent) * (int)(cac_table->usTDP * 256)) / 100; - result = tonga_set_overdriver_target_tdp(hwmgr, (uint32_t)target_tdp); - } - - return result; -} diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_powertune.h b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_powertune.h deleted file mode 100644 index c8bdb92d81f4..000000000000 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_powertune.h +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#ifndef TONGA_POWERTUNE_H -#define TONGA_POWERTUNE_H - -enum _phw_tonga_ptc_config_reg_type { - TONGA_CONFIGREG_MMR = 0, - TONGA_CONFIGREG_SMC_IND, - TONGA_CONFIGREG_DIDT_IND, - TONGA_CONFIGREG_CACHE, - - TONGA_CONFIGREG_MAX -}; -typedef enum _phw_tonga_ptc_config_reg_type phw_tonga_ptc_config_reg_type; - -/* PowerContainment Features */ -#define POWERCONTAINMENT_FEATURE_DTE 0x00000001 - - -/* PowerContainment Features */ -#define POWERCONTAINMENT_FEATURE_BAPM 0x00000001 -#define POWERCONTAINMENT_FEATURE_TDCLimit 0x00000002 -#define POWERCONTAINMENT_FEATURE_PkgPwrLimit 0x00000004 - -struct tonga_pt_config_reg { - uint32_t Offset; - uint32_t Mask; - uint32_t Shift; - uint32_t Value; - phw_tonga_ptc_config_reg_type Type; -}; - -struct tonga_pt_defaults { - uint8_t svi_load_line_en; - uint8_t svi_load_line_vddC; - uint8_t tdc_vddc_throttle_release_limit_perc; - uint8_t tdc_mawt; - uint8_t tdc_waterfall_ctl; - uint8_t dte_ambient_temp_base; - uint32_t display_cac; - uint32_t bamp_temp_gradient; - uint16_t bapmti_r[SMU72_DTE_ITERATIONS * SMU72_DTE_SOURCES * SMU72_DTE_SINKS]; - uint16_t bapmti_rc[SMU72_DTE_ITERATIONS * SMU72_DTE_SOURCES * SMU72_DTE_SINKS]; -}; - - - -void tonga_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr); -int tonga_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr); -int tonga_populate_pm_fuses(struct pp_hwmgr *hwmgr); -int tonga_enable_smc_cac(struct pp_hwmgr *hwmgr); -int tonga_disable_smc_cac(struct pp_hwmgr *hwmgr); -int tonga_enable_power_containment(struct pp_hwmgr *hwmgr); -int tonga_disable_power_containment(struct pp_hwmgr *hwmgr); -int tonga_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n); -int tonga_power_control_set_level(struct pp_hwmgr *hwmgr); - -#endif - diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_thermal.c deleted file mode 100644 index 47ef1ca2d78b..000000000000 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_thermal.c +++ /dev/null @@ -1,590 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#include -#include "tonga_thermal.h" -#include "tonga_hwmgr.h" -#include "tonga_smumgr.h" -#include "tonga_ppsmc.h" -#include "smu/smu_7_1_2_d.h" -#include "smu/smu_7_1_2_sh_mask.h" - -/** -* Get Fan Speed Control Parameters. -* @param hwmgr the address of the powerplay hardware manager. -* @param pSpeed is the address of the structure where the result is to be placed. -* @exception Always succeeds except if we cannot zero out the output structure. -*/ -int tonga_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, struct phm_fan_speed_info *fan_speed_info) -{ - - if (hwmgr->thermal_controller.fanInfo.bNoFan) - return 0; - - fan_speed_info->supports_percent_read = true; - fan_speed_info->supports_percent_write = true; - fan_speed_info->min_percent = 0; - fan_speed_info->max_percent = 100; - - if (0 != hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution) { - fan_speed_info->supports_rpm_read = true; - fan_speed_info->supports_rpm_write = true; - fan_speed_info->min_rpm = hwmgr->thermal_controller.fanInfo.ulMinRPM; - fan_speed_info->max_rpm = hwmgr->thermal_controller.fanInfo.ulMaxRPM; - } else { - fan_speed_info->min_rpm = 0; - fan_speed_info->max_rpm = 0; - } - - return 0; -} - -/** -* Get Fan Speed in percent. -* @param hwmgr the address of the powerplay hardware manager. -* @param pSpeed is the address of the structure where the result is to be placed. -* @exception Fails is the 100% setting appears to be 0. -*/ -int tonga_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t *speed) -{ - uint32_t duty100; - uint32_t duty; - uint64_t tmp64; - - if (hwmgr->thermal_controller.fanInfo.bNoFan) - return 0; - - duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100); - duty = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_STATUS, FDO_PWM_DUTY); - - if (0 == duty100) - return -EINVAL; - - - tmp64 = (uint64_t)duty * 100; - do_div(tmp64, duty100); - *speed = (uint32_t)tmp64; - - if (*speed > 100) - *speed = 100; - - return 0; -} - -/** -* Get Fan Speed in RPM. -* @param hwmgr the address of the powerplay hardware manager. -* @param speed is the address of the structure where the result is to be placed. -* @exception Returns not supported if no fan is found or if pulses per revolution are not set -*/ -int tonga_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed) -{ - return 0; -} - -/** -* Set Fan Speed Control to static mode, so that the user can decide what speed to use. -* @param hwmgr the address of the powerplay hardware manager. -* mode the fan control mode, 0 default, 1 by percent, 5, by RPM -* @exception Should always succeed. -*/ -int tonga_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode) -{ - - if (hwmgr->fan_ctrl_is_in_default_mode) { - hwmgr->fan_ctrl_default_mode = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, FDO_PWM_MODE); - hwmgr->tmin = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, TMIN); - hwmgr->fan_ctrl_is_in_default_mode = false; - } - - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, TMIN, 0); - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, FDO_PWM_MODE, mode); - - return 0; -} - -/** -* Reset Fan Speed Control to default mode. -* @param hwmgr the address of the powerplay hardware manager. -* @exception Should always succeed. -*/ -int tonga_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr) -{ - if (!hwmgr->fan_ctrl_is_in_default_mode) { - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, FDO_PWM_MODE, hwmgr->fan_ctrl_default_mode); - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, TMIN, hwmgr->tmin); - hwmgr->fan_ctrl_is_in_default_mode = true; - } - - return 0; -} - -int tonga_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr) -{ - int result; - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_ODFuzzyFanControlSupport)) { - cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, FAN_CONTROL_FUZZY); - result = (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StartFanControl) == 0) ? 0 : -EINVAL; -/* - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_FanSpeedInTableIsRPM)) - hwmgr->set_max_fan_rpm_output(hwmgr, hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM); - else - hwmgr->set_max_fan_pwm_output(hwmgr, hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanPWM); -*/ - } else { - cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, FAN_CONTROL_TABLE); - result = (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StartFanControl) == 0) ? 0 : -EINVAL; - } -/* TO DO FOR SOME DEVICE ID 0X692b, send this msg return invalid command. - if (result == 0 && hwmgr->thermal_controller.advanceFanControlParameters.ucTargetTemperature != 0) - result = (0 == smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetFanTemperatureTarget, \ - hwmgr->thermal_controller.advanceFanControlParameters.ucTargetTemperature) ? 0 : -EINVAL); -*/ - return result; -} - - -int tonga_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr) -{ - return (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StopFanControl) == 0) ? 0 : -EINVAL; -} - -/** -* Set Fan Speed in percent. -* @param hwmgr the address of the powerplay hardware manager. -* @param speed is the percentage value (0% - 100%) to be set. -* @exception Fails is the 100% setting appears to be 0. -*/ -int tonga_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t speed) -{ - uint32_t duty100; - uint32_t duty; - uint64_t tmp64; - - if (hwmgr->thermal_controller.fanInfo.bNoFan) - return -EINVAL; - - if (speed > 100) - speed = 100; - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl)) - tonga_fan_ctrl_stop_smc_fan_control(hwmgr); - - duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100); - - if (0 == duty100) - return -EINVAL; - - tmp64 = (uint64_t)speed * duty100; - do_div(tmp64, 100); - duty = (uint32_t)tmp64; - - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL0, FDO_STATIC_DUTY, duty); - - return tonga_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC); -} - -/** -* Reset Fan Speed to default. -* @param hwmgr the address of the powerplay hardware manager. -* @exception Always succeeds. -*/ -int tonga_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr) -{ - int result; - - if (hwmgr->thermal_controller.fanInfo.bNoFan) - return 0; - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl)) { - result = tonga_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC); - if (0 == result) - result = tonga_fan_ctrl_start_smc_fan_control(hwmgr); - } else - result = tonga_fan_ctrl_set_default_mode(hwmgr); - - return result; -} - -/** -* Set Fan Speed in RPM. -* @param hwmgr the address of the powerplay hardware manager. -* @param speed is the percentage value (min - max) to be set. -* @exception Fails is the speed not lie between min and max. -*/ -int tonga_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed) -{ - return 0; -} - -/** -* Reads the remote temperature from the SIslands thermal controller. -* -* @param hwmgr The address of the hardware manager. -*/ -int tonga_thermal_get_temperature(struct pp_hwmgr *hwmgr) -{ - int temp; - - temp = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_STATUS, CTF_TEMP); - -/* Bit 9 means the reading is lower than the lowest usable value. */ - if (0 != (0x200 & temp)) - temp = TONGA_THERMAL_MAXIMUM_TEMP_READING; - else - temp = (temp & 0x1ff); - - temp = temp * PP_TEMPERATURE_UNITS_PER_CENTIGRADES; - - return temp; -} - -/** -* Set the requested temperature range for high and low alert signals -* -* @param hwmgr The address of the hardware manager. -* @param range Temperature range to be programmed for high and low alert signals -* @exception PP_Result_BadInput if the input data is not valid. -*/ -static int tonga_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, uint32_t low_temp, uint32_t high_temp) -{ - uint32_t low = TONGA_THERMAL_MINIMUM_ALERT_TEMP * PP_TEMPERATURE_UNITS_PER_CENTIGRADES; - uint32_t high = TONGA_THERMAL_MAXIMUM_ALERT_TEMP * PP_TEMPERATURE_UNITS_PER_CENTIGRADES; - - if (low < low_temp) - low = low_temp; - if (high > high_temp) - high = high_temp; - - if (low > high) - return -EINVAL; - - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_CTRL, DIG_THERM_DPM, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); - - return 0; -} - -/** -* Programs thermal controller one-time setting registers -* -* @param hwmgr The address of the hardware manager. -*/ -static int tonga_thermal_initialize(struct pp_hwmgr *hwmgr) -{ - if (0 != hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution) - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_TACH_CTRL, EDGE_PER_REV, - hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution - 1); - - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, TACH_PWM_RESP_RATE, 0x28); - - return 0; -} - -/** -* Enable thermal alerts on the RV770 thermal controller. -* -* @param hwmgr The address of the hardware manager. -*/ -static int tonga_thermal_enable_alert(struct pp_hwmgr *hwmgr) -{ - uint32_t alert; - - alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, THERM_INT_MASK); - alert &= ~(TONGA_THERMAL_HIGH_ALERT_MASK | TONGA_THERMAL_LOW_ALERT_MASK); - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, THERM_INT_MASK, alert); - - /* send message to SMU to enable internal thermal interrupts */ - return (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Thermal_Cntl_Enable) == 0) ? 0 : -1; -} - -/** -* Disable thermal alerts on the RV770 thermal controller. -* @param hwmgr The address of the hardware manager. -*/ -static int tonga_thermal_disable_alert(struct pp_hwmgr *hwmgr) -{ - uint32_t alert; - - alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, THERM_INT_MASK); - alert |= (TONGA_THERMAL_HIGH_ALERT_MASK | TONGA_THERMAL_LOW_ALERT_MASK); - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, THERM_INT_MASK, alert); - - /* send message to SMU to disable internal thermal interrupts */ - return (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Thermal_Cntl_Disable) == 0) ? 0 : -1; -} - -/** -* Uninitialize the thermal controller. -* Currently just disables alerts. -* @param hwmgr The address of the hardware manager. -*/ -int tonga_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr) -{ - int result = tonga_thermal_disable_alert(hwmgr); - - if (hwmgr->thermal_controller.fanInfo.bNoFan) - tonga_fan_ctrl_set_default_mode(hwmgr); - - return result; -} - -/** -* Set up the fan table to control the fan using the SMC. -* @param hwmgr the address of the powerplay hardware manager. -* @param pInput the pointer to input data -* @param pOutput the pointer to output data -* @param pStorage the pointer to temporary storage -* @param Result the last failure code -* @return result from set temperature range routine -*/ -int tf_tonga_thermal_setup_fan_table(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result) -{ - struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - SMU72_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE }; - uint32_t duty100; - uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2; - uint16_t fdo_min, slope1, slope2; - uint32_t reference_clock; - int res; - uint64_t tmp64; - - if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl)) - return 0; - - if (0 == data->fan_table_start) { - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl); - return 0; - } - - duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100); - - if (0 == duty100) { - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl); - return 0; - } - - tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin * duty100; - do_div(tmp64, 10000); - fdo_min = (uint16_t)tmp64; - - t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed - hwmgr->thermal_controller.advanceFanControlParameters.usTMin; - t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh - hwmgr->thermal_controller.advanceFanControlParameters.usTMed; - - pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin; - pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed; - - slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100); - slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100); - - fan_table.TempMin = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMin) / 100); - fan_table.TempMed = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMed) / 100); - fan_table.TempMax = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMax) / 100); - - fan_table.Slope1 = cpu_to_be16(slope1); - fan_table.Slope2 = cpu_to_be16(slope2); - - fan_table.FdoMin = cpu_to_be16(fdo_min); - - fan_table.HystDown = cpu_to_be16(hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst); - - fan_table.HystUp = cpu_to_be16(1); - - fan_table.HystSlope = cpu_to_be16(1); - - fan_table.TempRespLim = cpu_to_be16(5); - - reference_clock = tonga_get_xclk(hwmgr); - - fan_table.RefreshPeriod = cpu_to_be32((hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600); - - fan_table.FdoMax = cpu_to_be16((uint16_t)duty100); - - fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_CTRL, TEMP_SEL); - - fan_table.FanControl_GL_Flag = 1; - - res = tonga_copy_bytes_to_smc(hwmgr->smumgr, data->fan_table_start, (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), data->sram_end); -/* TO DO FOR SOME DEVICE ID 0X692b, send this msg return invalid command. - if (res == 0 && hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit != 0) - res = (0 == smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetFanMinPwm, \ - hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit) ? 0 : -1); - - if (res == 0 && hwmgr->thermal_controller.advanceFanControlParameters.ulMinFanSCLKAcousticLimit != 0) - res = (0 == smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetFanSclkTarget, \ - hwmgr->thermal_controller.advanceFanControlParameters.ulMinFanSCLKAcousticLimit) ? 0 : -1); - - if (0 != res) - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl); -*/ - return 0; -} - -/** -* Start the fan control on the SMC. -* @param hwmgr the address of the powerplay hardware manager. -* @param pInput the pointer to input data -* @param pOutput the pointer to output data -* @param pStorage the pointer to temporary storage -* @param Result the last failure code -* @return result from set temperature range routine -*/ -int tf_tonga_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result) -{ -/* If the fantable setup has failed we could have disabled PHM_PlatformCaps_MicrocodeFanControl even after this function was included in the table. - * Make sure that we still think controlling the fan is OK. -*/ - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl)) { - tonga_fan_ctrl_start_smc_fan_control(hwmgr); - tonga_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC); - } - - return 0; -} - -/** -* Set temperature range for high and low alerts -* @param hwmgr the address of the powerplay hardware manager. -* @param pInput the pointer to input data -* @param pOutput the pointer to output data -* @param pStorage the pointer to temporary storage -* @param Result the last failure code -* @return result from set temperature range routine -*/ -int tf_tonga_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result) -{ - struct PP_TemperatureRange *range = (struct PP_TemperatureRange *)input; - - if (range == NULL) - return -EINVAL; - - return tonga_thermal_set_temperature_range(hwmgr, range->min, range->max); -} - -/** -* Programs one-time setting registers -* @param hwmgr the address of the powerplay hardware manager. -* @param pInput the pointer to input data -* @param pOutput the pointer to output data -* @param pStorage the pointer to temporary storage -* @param Result the last failure code -* @return result from initialize thermal controller routine -*/ -int tf_tonga_thermal_initialize(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result) -{ - return tonga_thermal_initialize(hwmgr); -} - -/** -* Enable high and low alerts -* @param hwmgr the address of the powerplay hardware manager. -* @param pInput the pointer to input data -* @param pOutput the pointer to output data -* @param pStorage the pointer to temporary storage -* @param Result the last failure code -* @return result from enable alert routine -*/ -int tf_tonga_thermal_enable_alert(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result) -{ - return tonga_thermal_enable_alert(hwmgr); -} - -/** -* Disable high and low alerts -* @param hwmgr the address of the powerplay hardware manager. -* @param pInput the pointer to input data -* @param pOutput the pointer to output data -* @param pStorage the pointer to temporary storage -* @param Result the last failure code -* @return result from disable alert routine -*/ -static int tf_tonga_thermal_disable_alert(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result) -{ - return tonga_thermal_disable_alert(hwmgr); -} - -static const struct phm_master_table_item tonga_thermal_start_thermal_controller_master_list[] = { - { NULL, tf_tonga_thermal_initialize }, - { NULL, tf_tonga_thermal_set_temperature_range }, - { NULL, tf_tonga_thermal_enable_alert }, -/* We should restrict performance levels to low before we halt the SMC. - * On the other hand we are still in boot state when we do this so it would be pointless. - * If this assumption changes we have to revisit this table. - */ - { NULL, tf_tonga_thermal_setup_fan_table}, - { NULL, tf_tonga_thermal_start_smc_fan_control}, - { NULL, NULL } -}; - -static const struct phm_master_table_header tonga_thermal_start_thermal_controller_master = { - 0, - PHM_MasterTableFlag_None, - tonga_thermal_start_thermal_controller_master_list -}; - -static const struct phm_master_table_item tonga_thermal_set_temperature_range_master_list[] = { - { NULL, tf_tonga_thermal_disable_alert}, - { NULL, tf_tonga_thermal_set_temperature_range}, - { NULL, tf_tonga_thermal_enable_alert}, - { NULL, NULL } -}; - -static const struct phm_master_table_header tonga_thermal_set_temperature_range_master = { - 0, - PHM_MasterTableFlag_None, - tonga_thermal_set_temperature_range_master_list -}; - -int tonga_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr) -{ - if (!hwmgr->thermal_controller.fanInfo.bNoFan) - tonga_fan_ctrl_set_default_mode(hwmgr); - return 0; -} - -/** -* Initializes the thermal controller related functions in the Hardware Manager structure. -* @param hwmgr The address of the hardware manager. -* @exception Any error code from the low-level communication. -*/ -int pp_tonga_thermal_initialize(struct pp_hwmgr *hwmgr) -{ - int result; - - result = phm_construct_table(hwmgr, &tonga_thermal_set_temperature_range_master, &(hwmgr->set_temperature_range)); - - if (0 == result) { - result = phm_construct_table(hwmgr, - &tonga_thermal_start_thermal_controller_master, - &(hwmgr->start_thermal_controller)); - if (0 != result) - phm_destroy_table(hwmgr, &(hwmgr->set_temperature_range)); - } - - if (0 == result) - hwmgr->fan_ctrl_is_in_default_mode = true; - return result; -} - diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_thermal.h b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_thermal.h deleted file mode 100644 index aa335f267e25..000000000000 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_thermal.h +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#ifndef TONGA_THERMAL_H -#define TONGA_THERMAL_H - -#include "hwmgr.h" - -#define TONGA_THERMAL_HIGH_ALERT_MASK 0x1 -#define TONGA_THERMAL_LOW_ALERT_MASK 0x2 - -#define TONGA_THERMAL_MINIMUM_TEMP_READING -256 -#define TONGA_THERMAL_MAXIMUM_TEMP_READING 255 - -#define TONGA_THERMAL_MINIMUM_ALERT_TEMP 0 -#define TONGA_THERMAL_MAXIMUM_ALERT_TEMP 255 - -#define FDO_PWM_MODE_STATIC 1 -#define FDO_PWM_MODE_STATIC_RPM 5 - - -extern int tf_tonga_thermal_initialize(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result); -extern int tf_tonga_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result); -extern int tf_tonga_thermal_enable_alert(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result); - -extern int tonga_thermal_get_temperature(struct pp_hwmgr *hwmgr); -extern int tonga_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr); -extern int tonga_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, struct phm_fan_speed_info *fan_speed_info); -extern int tonga_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t *speed); -extern int tonga_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr); -extern int tonga_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode); -extern int tonga_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t speed); -extern int tonga_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr); -extern int pp_tonga_thermal_initialize(struct pp_hwmgr *hwmgr); -extern int tonga_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr); -extern int tonga_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed); -extern int tonga_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed); -extern int tonga_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr); - -#endif - From 865ab832ba78a1baf03fed90dccf5088e63a3aa3 Mon Sep 17 00:00:00 2001 From: Huang Rui Date: Fri, 9 Sep 2016 16:37:08 +0800 Subject: [PATCH 23/49] drm/amdgpu: implement raster configuration for gfx v6 This patch is to implement the raster configuration and harvested configuration of gfx v6. Signed-off-by: Huang Rui Reviewed-by: Alex Deucher Acked-by: Edward O'Callaghan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c | 131 +++++++++++++++++- drivers/gpu/drm/amd/include/asic_reg/si/sid.h | 35 +++++ 2 files changed, 165 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c index 410b29c05671..40abb6b81c09 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c @@ -931,6 +931,123 @@ static u32 gfx_v6_0_get_rb_disabled(struct amdgpu_device *adev, return data & mask; } +static void gfx_v6_0_raster_config(struct amdgpu_device *adev, u32 *rconf) +{ + switch (adev->asic_type) { + case CHIP_TAHITI: + case CHIP_PITCAIRN: + *rconf |= RB_XSEL2(2) | RB_XSEL | PKR_MAP(2) | PKR_YSEL(1) | + SE_MAP(2) | SE_XSEL(2) | SE_YSEL(2); + break; + case CHIP_VERDE: + *rconf |= RB_XSEL | PKR_MAP(2) | PKR_YSEL(1); + break; + case CHIP_OLAND: + *rconf |= RB_YSEL; + break; + case CHIP_HAINAN: + *rconf |= 0x0; + break; + default: + DRM_ERROR("unknown asic: 0x%x\n", adev->asic_type); + break; + } +} + +static void gfx_v6_0_write_harvested_raster_configs(struct amdgpu_device *adev, + u32 raster_config, unsigned rb_mask, + unsigned num_rb) +{ + unsigned sh_per_se = max_t(unsigned, adev->gfx.config.max_sh_per_se, 1); + unsigned num_se = max_t(unsigned, adev->gfx.config.max_shader_engines, 1); + unsigned rb_per_pkr = min_t(unsigned, num_rb / num_se / sh_per_se, 2); + unsigned rb_per_se = num_rb / num_se; + unsigned se_mask[4]; + unsigned se; + + se_mask[0] = ((1 << rb_per_se) - 1) & rb_mask; + se_mask[1] = (se_mask[0] << rb_per_se) & rb_mask; + se_mask[2] = (se_mask[1] << rb_per_se) & rb_mask; + se_mask[3] = (se_mask[2] << rb_per_se) & rb_mask; + + WARN_ON(!(num_se == 1 || num_se == 2 || num_se == 4)); + WARN_ON(!(sh_per_se == 1 || sh_per_se == 2)); + WARN_ON(!(rb_per_pkr == 1 || rb_per_pkr == 2)); + + for (se = 0; se < num_se; se++) { + unsigned raster_config_se = raster_config; + unsigned pkr0_mask = ((1 << rb_per_pkr) - 1) << (se * rb_per_se); + unsigned pkr1_mask = pkr0_mask << rb_per_pkr; + int idx = (se / 2) * 2; + + if ((num_se > 1) && (!se_mask[idx] || !se_mask[idx + 1])) { + raster_config_se &= ~SE_MAP_MASK; + + if (!se_mask[idx]) { + raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_3); + } else { + raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_0); + } + } + + pkr0_mask &= rb_mask; + pkr1_mask &= rb_mask; + if (rb_per_se > 2 && (!pkr0_mask || !pkr1_mask)) { + raster_config_se &= ~PKR_MAP_MASK; + + if (!pkr0_mask) { + raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_3); + } else { + raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_0); + } + } + + if (rb_per_se >= 2) { + unsigned rb0_mask = 1 << (se * rb_per_se); + unsigned rb1_mask = rb0_mask << 1; + + rb0_mask &= rb_mask; + rb1_mask &= rb_mask; + if (!rb0_mask || !rb1_mask) { + raster_config_se &= ~RB_MAP_PKR0_MASK; + + if (!rb0_mask) { + raster_config_se |= + RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_3); + } else { + raster_config_se |= + RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_0); + } + } + + if (rb_per_se > 2) { + rb0_mask = 1 << (se * rb_per_se + rb_per_pkr); + rb1_mask = rb0_mask << 1; + rb0_mask &= rb_mask; + rb1_mask &= rb_mask; + if (!rb0_mask || !rb1_mask) { + raster_config_se &= ~RB_MAP_PKR1_MASK; + + if (!rb0_mask) { + raster_config_se |= + RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_3); + } else { + raster_config_se |= + RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_0); + } + } + } + } + + /* GRBM_GFX_INDEX has a different offset on SI */ + gfx_v6_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff); + WREG32(PA_SC_RASTER_CONFIG, raster_config_se); + } + + /* GRBM_GFX_INDEX has a different offset on SI */ + gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); +} + static void gfx_v6_0_setup_rb(struct amdgpu_device *adev, u32 se_num, u32 sh_per_se, u32 max_rb_num_per_se) @@ -939,6 +1056,7 @@ static void gfx_v6_0_setup_rb(struct amdgpu_device *adev, u32 data, mask; u32 disabled_rbs = 0; u32 enabled_rbs = 0; + unsigned num_rb_pipes; mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < se_num; i++) { @@ -961,6 +1079,9 @@ static void gfx_v6_0_setup_rb(struct amdgpu_device *adev, adev->gfx.config.backend_enable_mask = enabled_rbs; adev->gfx.config.num_rbs = hweight32(enabled_rbs); + num_rb_pipes = min_t(unsigned, adev->gfx.config.max_backends_per_se * + adev->gfx.config.max_shader_engines, 16); + mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < se_num; i++) { gfx_v6_0_select_se_sh(adev, i, 0xffffffff, 0xffffffff); @@ -980,7 +1101,15 @@ static void gfx_v6_0_setup_rb(struct amdgpu_device *adev, } enabled_rbs >>= 2; } - WREG32(PA_SC_RASTER_CONFIG, data); + gfx_v6_0_raster_config(adev, &data); + + if (!adev->gfx.config.backend_enable_mask || + adev->gfx.config.num_rbs >= num_rb_pipes) + WREG32(PA_SC_RASTER_CONFIG, data); + else + gfx_v6_0_write_harvested_raster_configs(adev, data, + adev->gfx.config.backend_enable_mask, + num_rb_pipes); } gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); mutex_unlock(&adev->grbm_idx_mutex); diff --git a/drivers/gpu/drm/amd/include/asic_reg/si/sid.h b/drivers/gpu/drm/amd/include/asic_reg/si/sid.h index 8c5608a4d526..c57eff159374 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/si/sid.h +++ b/drivers/gpu/drm/amd/include/asic_reg/si/sid.h @@ -1398,10 +1398,45 @@ #define DB_DEPTH_INFO 0xA00F #define PA_SC_RASTER_CONFIG 0xA0D4 +# define RB_MAP_PKR0(x) ((x) << 0) +# define RB_MAP_PKR0_MASK (0x3 << 0) +# define RB_MAP_PKR1(x) ((x) << 2) +# define RB_MAP_PKR1_MASK (0x3 << 2) # define RASTER_CONFIG_RB_MAP_0 0 # define RASTER_CONFIG_RB_MAP_1 1 # define RASTER_CONFIG_RB_MAP_2 2 # define RASTER_CONFIG_RB_MAP_3 3 +# define RB_XSEL2(x) ((x) << 4) +# define RB_XSEL2_MASK (0x3 << 4) +# define RB_XSEL (1 << 6) +# define RB_YSEL (1 << 7) +# define PKR_MAP(x) ((x) << 8) +# define PKR_MAP_MASK (0x3 << 8) +# define RASTER_CONFIG_PKR_MAP_0 0 +# define RASTER_CONFIG_PKR_MAP_1 1 +# define RASTER_CONFIG_PKR_MAP_2 2 +# define RASTER_CONFIG_PKR_MAP_3 3 +# define PKR_XSEL(x) ((x) << 10) +# define PKR_XSEL_MASK (0x3 << 10) +# define PKR_YSEL(x) ((x) << 12) +# define PKR_YSEL_MASK (0x3 << 12) +# define SC_MAP(x) ((x) << 16) +# define SC_MAP_MASK (0x3 << 16) +# define SC_XSEL(x) ((x) << 18) +# define SC_XSEL_MASK (0x3 << 18) +# define SC_YSEL(x) ((x) << 20) +# define SC_YSEL_MASK (0x3 << 20) +# define SE_MAP(x) ((x) << 24) +# define SE_MAP_MASK (0x3 << 24) +# define RASTER_CONFIG_SE_MAP_0 0 +# define RASTER_CONFIG_SE_MAP_1 1 +# define RASTER_CONFIG_SE_MAP_2 2 +# define RASTER_CONFIG_SE_MAP_3 3 +# define SE_XSEL(x) ((x) << 26) +# define SE_XSEL_MASK (0x3 << 26) +# define SE_YSEL(x) ((x) << 28) +# define SE_YSEL_MASK (0x3 << 28) + #define VGT_EVENT_INITIATOR 0xA2A4 # define SAMPLE_STREAMOUTSTATS1 (1 << 0) From 0b2138a45516ba83445a3c9a8aa38a1fb42c71bc Mon Sep 17 00:00:00 2001 From: Huang Rui Date: Wed, 14 Sep 2016 15:55:48 +0800 Subject: [PATCH 24/49] drm/amdgpu: implement raster configuration for gfx v7 This patch is to implement the raster configuration and harvested configuration of gfx v7. Signed-off-by: Huang Rui Reviewed-by: Alex Deucher Acked-by: Edward O'Callaghan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/cikd.h | 36 ++++++ drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | 160 +++++++++++++++++++++++++- 2 files changed, 195 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/cikd.h b/drivers/gpu/drm/amd/amdgpu/cikd.h index c4f6f00d62bc..8659852aea9e 100644 --- a/drivers/gpu/drm/amd/amdgpu/cikd.h +++ b/drivers/gpu/drm/amd/amdgpu/cikd.h @@ -562,4 +562,40 @@ enum { MTYPE_NONCACHED = 3 }; +/* mmPA_SC_RASTER_CONFIG mask */ +#define RB_MAP_PKR0(x) ((x) << 0) +#define RB_MAP_PKR0_MASK (0x3 << 0) +#define RB_MAP_PKR1(x) ((x) << 2) +#define RB_MAP_PKR1_MASK (0x3 << 2) +#define RB_XSEL2(x) ((x) << 4) +#define RB_XSEL2_MASK (0x3 << 4) +#define RB_XSEL (1 << 6) +#define RB_YSEL (1 << 7) +#define PKR_MAP(x) ((x) << 8) +#define PKR_MAP_MASK (0x3 << 8) +#define PKR_XSEL(x) ((x) << 10) +#define PKR_XSEL_MASK (0x3 << 10) +#define PKR_YSEL(x) ((x) << 12) +#define PKR_YSEL_MASK (0x3 << 12) +#define SC_MAP(x) ((x) << 16) +#define SC_MAP_MASK (0x3 << 16) +#define SC_XSEL(x) ((x) << 18) +#define SC_XSEL_MASK (0x3 << 18) +#define SC_YSEL(x) ((x) << 20) +#define SC_YSEL_MASK (0x3 << 20) +#define SE_MAP(x) ((x) << 24) +#define SE_MAP_MASK (0x3 << 24) +#define SE_XSEL(x) ((x) << 26) +#define SE_XSEL_MASK (0x3 << 26) +#define SE_YSEL(x) ((x) << 28) +#define SE_YSEL_MASK (0x3 << 28) + +/* mmPA_SC_RASTER_CONFIG_1 mask */ +#define SE_PAIR_MAP(x) ((x) << 0) +#define SE_PAIR_MAP_MASK (0x3 << 0) +#define SE_PAIR_XSEL(x) ((x) << 2) +#define SE_PAIR_XSEL_MASK (0x3 << 2) +#define SE_PAIR_YSEL(x) ((x) << 4) +#define SE_PAIR_YSEL_MASK (0x3 << 4) + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 90102f123bb8..32a676291e67 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -1645,6 +1645,147 @@ static u32 gfx_v7_0_get_rb_active_bitmap(struct amdgpu_device *adev) return (~data) & mask; } +static void +gfx_v7_0_raster_config(struct amdgpu_device *adev, u32 *rconf, u32 *rconf1) +{ + switch (adev->asic_type) { + case CHIP_BONAIRE: + *rconf |= RB_MAP_PKR0(2) | RB_XSEL2(1) | SE_MAP(2) | + SE_XSEL(1) | SE_YSEL(1); + *rconf1 |= 0x0; + break; + case CHIP_HAWAII: + *rconf |= RB_MAP_PKR0(2) | RB_MAP_PKR1(2) | + RB_XSEL2(1) | PKR_MAP(2) | PKR_XSEL(1) | + PKR_YSEL(1) | SE_MAP(2) | SE_XSEL(2) | + SE_YSEL(3); + *rconf1 |= SE_PAIR_MAP(2) | SE_PAIR_XSEL(3) | + SE_PAIR_YSEL(2); + break; + case CHIP_KAVERI: + *rconf |= RB_MAP_PKR0(2); + *rconf1 |= 0x0; + break; + case CHIP_KABINI: + case CHIP_MULLINS: + *rconf |= 0x0; + *rconf1 |= 0x0; + break; + default: + DRM_ERROR("unknown asic: 0x%x\n", adev->asic_type); + break; + } +} + +static void +gfx_v7_0_write_harvested_raster_configs(struct amdgpu_device *adev, + u32 raster_config, u32 raster_config_1, + unsigned rb_mask, unsigned num_rb) +{ + unsigned sh_per_se = max_t(unsigned, adev->gfx.config.max_sh_per_se, 1); + unsigned num_se = max_t(unsigned, adev->gfx.config.max_shader_engines, 1); + unsigned rb_per_pkr = min_t(unsigned, num_rb / num_se / sh_per_se, 2); + unsigned rb_per_se = num_rb / num_se; + unsigned se_mask[4]; + unsigned se; + + se_mask[0] = ((1 << rb_per_se) - 1) & rb_mask; + se_mask[1] = (se_mask[0] << rb_per_se) & rb_mask; + se_mask[2] = (se_mask[1] << rb_per_se) & rb_mask; + se_mask[3] = (se_mask[2] << rb_per_se) & rb_mask; + + WARN_ON(!(num_se == 1 || num_se == 2 || num_se == 4)); + WARN_ON(!(sh_per_se == 1 || sh_per_se == 2)); + WARN_ON(!(rb_per_pkr == 1 || rb_per_pkr == 2)); + + if ((num_se > 2) && ((!se_mask[0] && !se_mask[1]) || + (!se_mask[2] && !se_mask[3]))) { + raster_config_1 &= ~SE_PAIR_MAP_MASK; + + if (!se_mask[0] && !se_mask[1]) { + raster_config_1 |= + SE_PAIR_MAP(RASTER_CONFIG_SE_PAIR_MAP_3); + } else { + raster_config_1 |= + SE_PAIR_MAP(RASTER_CONFIG_SE_PAIR_MAP_0); + } + } + + for (se = 0; se < num_se; se++) { + unsigned raster_config_se = raster_config; + unsigned pkr0_mask = ((1 << rb_per_pkr) - 1) << (se * rb_per_se); + unsigned pkr1_mask = pkr0_mask << rb_per_pkr; + int idx = (se / 2) * 2; + + if ((num_se > 1) && (!se_mask[idx] || !se_mask[idx + 1])) { + raster_config_se &= ~SE_MAP_MASK; + + if (!se_mask[idx]) { + raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_3); + } else { + raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_0); + } + } + + pkr0_mask &= rb_mask; + pkr1_mask &= rb_mask; + if (rb_per_se > 2 && (!pkr0_mask || !pkr1_mask)) { + raster_config_se &= ~PKR_MAP_MASK; + + if (!pkr0_mask) { + raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_3); + } else { + raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_0); + } + } + + if (rb_per_se >= 2) { + unsigned rb0_mask = 1 << (se * rb_per_se); + unsigned rb1_mask = rb0_mask << 1; + + rb0_mask &= rb_mask; + rb1_mask &= rb_mask; + if (!rb0_mask || !rb1_mask) { + raster_config_se &= ~RB_MAP_PKR0_MASK; + + if (!rb0_mask) { + raster_config_se |= + RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_3); + } else { + raster_config_se |= + RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_0); + } + } + + if (rb_per_se > 2) { + rb0_mask = 1 << (se * rb_per_se + rb_per_pkr); + rb1_mask = rb0_mask << 1; + rb0_mask &= rb_mask; + rb1_mask &= rb_mask; + if (!rb0_mask || !rb1_mask) { + raster_config_se &= ~RB_MAP_PKR1_MASK; + + if (!rb0_mask) { + raster_config_se |= + RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_3); + } else { + raster_config_se |= + RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_0); + } + } + } + } + + /* GRBM_GFX_INDEX has a different offset on CI+ */ + gfx_v7_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff); + WREG32(mmPA_SC_RASTER_CONFIG, raster_config_se); + WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1); + } + + /* GRBM_GFX_INDEX has a different offset on CI+ */ + gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); +} + /** * gfx_v7_0_setup_rb - setup the RBs on the asic * @@ -1658,9 +1799,11 @@ static void gfx_v7_0_setup_rb(struct amdgpu_device *adev) { int i, j; u32 data; + u32 raster_config = 0, raster_config_1 = 0; u32 active_rbs = 0; u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se / adev->gfx.config.max_sh_per_se; + unsigned num_rb_pipes; mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { @@ -1672,10 +1815,25 @@ static void gfx_v7_0_setup_rb(struct amdgpu_device *adev) } } gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); - mutex_unlock(&adev->grbm_idx_mutex); adev->gfx.config.backend_enable_mask = active_rbs; adev->gfx.config.num_rbs = hweight32(active_rbs); + + num_rb_pipes = min_t(unsigned, adev->gfx.config.max_backends_per_se * + adev->gfx.config.max_shader_engines, 16); + + gfx_v7_0_raster_config(adev, &raster_config, &raster_config_1); + + if (!adev->gfx.config.backend_enable_mask || + adev->gfx.config.num_rbs >= num_rb_pipes) { + WREG32(mmPA_SC_RASTER_CONFIG, raster_config); + WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1); + } else { + gfx_v7_0_write_harvested_raster_configs(adev, raster_config, raster_config_1, + adev->gfx.config.backend_enable_mask, + num_rb_pipes); + } + mutex_unlock(&adev->grbm_idx_mutex); } /** From 167ac5733c70683c4886a8b3ef347cc6c93a76a6 Mon Sep 17 00:00:00 2001 From: Huang Rui Date: Wed, 14 Sep 2016 17:14:16 +0800 Subject: [PATCH 25/49] drm/amdgpu: implement raster configuration for gfx v8 This patch is to implement the raster configuration and harvested configuration of gfx v8. Signed-off-by: Huang Rui Reviewed-by: Alex Deucher Acked-by: Edward O'Callaghan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 168 +++++++++++++++++++++++++- drivers/gpu/drm/amd/amdgpu/vid.h | 37 ++++++ 2 files changed, 204 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 1e7c615c054e..6da7d94925eb 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -3488,13 +3488,163 @@ static u32 gfx_v8_0_get_rb_active_bitmap(struct amdgpu_device *adev) return (~data) & mask; } +static void +gfx_v8_0_raster_config(struct amdgpu_device *adev, u32 *rconf, u32 *rconf1) +{ + switch (adev->asic_type) { + case CHIP_FIJI: + *rconf |= RB_MAP_PKR0(2) | RB_MAP_PKR1(2) | + RB_XSEL2(1) | PKR_MAP(2) | + PKR_XSEL(1) | PKR_YSEL(1) | + SE_MAP(2) | SE_XSEL(2) | SE_YSEL(3); + *rconf1 |= SE_PAIR_MAP(2) | SE_PAIR_XSEL(3) | + SE_PAIR_YSEL(2); + break; + case CHIP_TONGA: + case CHIP_POLARIS10: + *rconf |= RB_MAP_PKR0(2) | RB_XSEL2(1) | SE_MAP(2) | + SE_XSEL(1) | SE_YSEL(1); + *rconf1 |= SE_PAIR_MAP(2) | SE_PAIR_XSEL(2) | + SE_PAIR_YSEL(2); + break; + case CHIP_TOPAZ: + case CHIP_CARRIZO: + *rconf |= RB_MAP_PKR0(2); + *rconf1 |= 0x0; + break; + case CHIP_POLARIS11: + *rconf |= RB_MAP_PKR0(2) | RB_XSEL2(1) | SE_MAP(2) | + SE_XSEL(1) | SE_YSEL(1); + *rconf1 |= 0x0; + break; + case CHIP_STONEY: + *rconf |= 0x0; + *rconf1 |= 0x0; + break; + default: + DRM_ERROR("unknown asic: 0x%x\n", adev->asic_type); + break; + } +} + +static void +gfx_v8_0_write_harvested_raster_configs(struct amdgpu_device *adev, + u32 raster_config, u32 raster_config_1, + unsigned rb_mask, unsigned num_rb) +{ + unsigned sh_per_se = max_t(unsigned, adev->gfx.config.max_sh_per_se, 1); + unsigned num_se = max_t(unsigned, adev->gfx.config.max_shader_engines, 1); + unsigned rb_per_pkr = min_t(unsigned, num_rb / num_se / sh_per_se, 2); + unsigned rb_per_se = num_rb / num_se; + unsigned se_mask[4]; + unsigned se; + + se_mask[0] = ((1 << rb_per_se) - 1) & rb_mask; + se_mask[1] = (se_mask[0] << rb_per_se) & rb_mask; + se_mask[2] = (se_mask[1] << rb_per_se) & rb_mask; + se_mask[3] = (se_mask[2] << rb_per_se) & rb_mask; + + WARN_ON(!(num_se == 1 || num_se == 2 || num_se == 4)); + WARN_ON(!(sh_per_se == 1 || sh_per_se == 2)); + WARN_ON(!(rb_per_pkr == 1 || rb_per_pkr == 2)); + + if ((num_se > 2) && ((!se_mask[0] && !se_mask[1]) || + (!se_mask[2] && !se_mask[3]))) { + raster_config_1 &= ~SE_PAIR_MAP_MASK; + + if (!se_mask[0] && !se_mask[1]) { + raster_config_1 |= + SE_PAIR_MAP(RASTER_CONFIG_SE_PAIR_MAP_3); + } else { + raster_config_1 |= + SE_PAIR_MAP(RASTER_CONFIG_SE_PAIR_MAP_0); + } + } + + for (se = 0; se < num_se; se++) { + unsigned raster_config_se = raster_config; + unsigned pkr0_mask = ((1 << rb_per_pkr) - 1) << (se * rb_per_se); + unsigned pkr1_mask = pkr0_mask << rb_per_pkr; + int idx = (se / 2) * 2; + + if ((num_se > 1) && (!se_mask[idx] || !se_mask[idx + 1])) { + raster_config_se &= ~SE_MAP_MASK; + + if (!se_mask[idx]) { + raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_3); + } else { + raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_0); + } + } + + pkr0_mask &= rb_mask; + pkr1_mask &= rb_mask; + if (rb_per_se > 2 && (!pkr0_mask || !pkr1_mask)) { + raster_config_se &= ~PKR_MAP_MASK; + + if (!pkr0_mask) { + raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_3); + } else { + raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_0); + } + } + + if (rb_per_se >= 2) { + unsigned rb0_mask = 1 << (se * rb_per_se); + unsigned rb1_mask = rb0_mask << 1; + + rb0_mask &= rb_mask; + rb1_mask &= rb_mask; + if (!rb0_mask || !rb1_mask) { + raster_config_se &= ~RB_MAP_PKR0_MASK; + + if (!rb0_mask) { + raster_config_se |= + RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_3); + } else { + raster_config_se |= + RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_0); + } + } + + if (rb_per_se > 2) { + rb0_mask = 1 << (se * rb_per_se + rb_per_pkr); + rb1_mask = rb0_mask << 1; + rb0_mask &= rb_mask; + rb1_mask &= rb_mask; + if (!rb0_mask || !rb1_mask) { + raster_config_se &= ~RB_MAP_PKR1_MASK; + + if (!rb0_mask) { + raster_config_se |= + RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_3); + } else { + raster_config_se |= + RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_0); + } + } + } + } + + /* GRBM_GFX_INDEX has a different offset on VI */ + gfx_v8_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff); + WREG32(mmPA_SC_RASTER_CONFIG, raster_config_se); + WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1); + } + + /* GRBM_GFX_INDEX has a different offset on VI */ + gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); +} + static void gfx_v8_0_setup_rb(struct amdgpu_device *adev) { int i, j; u32 data; + u32 raster_config = 0, raster_config_1 = 0; u32 active_rbs = 0; u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se / adev->gfx.config.max_sh_per_se; + unsigned num_rb_pipes; mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { @@ -3506,10 +3656,26 @@ static void gfx_v8_0_setup_rb(struct amdgpu_device *adev) } } gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); - mutex_unlock(&adev->grbm_idx_mutex); adev->gfx.config.backend_enable_mask = active_rbs; adev->gfx.config.num_rbs = hweight32(active_rbs); + + num_rb_pipes = min_t(unsigned, adev->gfx.config.max_backends_per_se * + adev->gfx.config.max_shader_engines, 16); + + gfx_v8_0_raster_config(adev, &raster_config, &raster_config_1); + + if (!adev->gfx.config.backend_enable_mask || + adev->gfx.config.num_rbs >= num_rb_pipes) { + WREG32(mmPA_SC_RASTER_CONFIG, raster_config); + WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1); + } else { + gfx_v8_0_write_harvested_raster_configs(adev, raster_config, raster_config_1, + adev->gfx.config.backend_enable_mask, + num_rb_pipes); + } + + mutex_unlock(&adev->grbm_idx_mutex); } /** diff --git a/drivers/gpu/drm/amd/amdgpu/vid.h b/drivers/gpu/drm/amd/amdgpu/vid.h index f62b261660d4..11746f22d0c5 100644 --- a/drivers/gpu/drm/amd/amdgpu/vid.h +++ b/drivers/gpu/drm/amd/amdgpu/vid.h @@ -373,4 +373,41 @@ #define VCE_CMD_WAIT_GE 0x00000106 #define VCE_CMD_UPDATE_PTB 0x00000107 #define VCE_CMD_FLUSH_TLB 0x00000108 + +/* mmPA_SC_RASTER_CONFIG mask */ +#define RB_MAP_PKR0(x) ((x) << 0) +#define RB_MAP_PKR0_MASK (0x3 << 0) +#define RB_MAP_PKR1(x) ((x) << 2) +#define RB_MAP_PKR1_MASK (0x3 << 2) +#define RB_XSEL2(x) ((x) << 4) +#define RB_XSEL2_MASK (0x3 << 4) +#define RB_XSEL (1 << 6) +#define RB_YSEL (1 << 7) +#define PKR_MAP(x) ((x) << 8) +#define PKR_MAP_MASK (0x3 << 8) +#define PKR_XSEL(x) ((x) << 10) +#define PKR_XSEL_MASK (0x3 << 10) +#define PKR_YSEL(x) ((x) << 12) +#define PKR_YSEL_MASK (0x3 << 12) +#define SC_MAP(x) ((x) << 16) +#define SC_MAP_MASK (0x3 << 16) +#define SC_XSEL(x) ((x) << 18) +#define SC_XSEL_MASK (0x3 << 18) +#define SC_YSEL(x) ((x) << 20) +#define SC_YSEL_MASK (0x3 << 20) +#define SE_MAP(x) ((x) << 24) +#define SE_MAP_MASK (0x3 << 24) +#define SE_XSEL(x) ((x) << 26) +#define SE_XSEL_MASK (0x3 << 26) +#define SE_YSEL(x) ((x) << 28) +#define SE_YSEL_MASK (0x3 << 28) + +/* mmPA_SC_RASTER_CONFIG_1 mask */ +#define SE_PAIR_MAP(x) ((x) << 0) +#define SE_PAIR_MAP_MASK (0x3 << 0) +#define SE_PAIR_XSEL(x) ((x) << 2) +#define SE_PAIR_XSEL_MASK (0x3 << 2) +#define SE_PAIR_YSEL(x) ((x) << 4) +#define SE_PAIR_YSEL_MASK (0x3 << 4) + #endif From a6e3695221446cf825d12db9c6ad3502c45fb9de Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Thu, 15 Sep 2016 10:07:34 -0400 Subject: [PATCH 26/49] drm/amd/powerplay: Add read_sensor() callback to hwmgr (v3) Provides standardized interface to read various sensors. The API is extensible (by adding to the end of the amd_pp_sensors enumeration list. Support has been added to Carrizo/smu7 (v2) Squashed the two sensor patches into one. (v3) Updated to apply to smu7_hwmgr instead Signed-off-by: Tom St Denis Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/amd_powerplay.c | 20 ++++ .../gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c | 96 +++++++++++++++++++ .../gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 36 +++++++ .../gpu/drm/amd/powerplay/inc/amd_powerplay.h | 12 +++ drivers/gpu/drm/amd/powerplay/inc/hwmgr.h | 1 + 5 files changed, 165 insertions(+) diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c index b1d19409bf86..ee0368381e82 100644 --- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c @@ -894,6 +894,25 @@ static int pp_dpm_set_mclk_od(void *handle, uint32_t value) return hwmgr->hwmgr_func->set_mclk_od(hwmgr, value); } +static int pp_dpm_read_sensor(void *handle, int idx, int32_t *value) +{ + struct pp_hwmgr *hwmgr; + + if (!handle) + return -EINVAL; + + hwmgr = ((struct pp_instance *)handle)->hwmgr; + + PP_CHECK_HW(hwmgr); + + if (hwmgr->hwmgr_func->read_sensor == NULL) { + printk(KERN_INFO "%s was not implemented.\n", __func__); + return 0; + } + + return hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value); +} + const struct amd_powerplay_funcs pp_dpm_funcs = { .get_temperature = pp_dpm_get_temperature, .load_firmware = pp_dpm_load_fw, @@ -920,6 +939,7 @@ const struct amd_powerplay_funcs pp_dpm_funcs = { .set_sclk_od = pp_dpm_set_sclk_od, .get_mclk_od = pp_dpm_get_mclk_od, .set_mclk_od = pp_dpm_set_mclk_od, + .read_sensor = pp_dpm_read_sensor, }; static int amd_pp_instance_init(struct amd_pp_init *pp_init, diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c index 5ecef1732e20..9f3c5a8a903c 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c @@ -1857,6 +1857,101 @@ static int cz_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_c return 0; } +static int cz_read_sensor(struct pp_hwmgr *hwmgr, int idx, int32_t *value) +{ + struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); + + struct phm_clock_voltage_dependency_table *table = + hwmgr->dyn_state.vddc_dependency_on_sclk; + + struct phm_vce_clock_voltage_dependency_table *vce_table = + hwmgr->dyn_state.vce_clock_voltage_dependency_table; + + struct phm_uvd_clock_voltage_dependency_table *uvd_table = + hwmgr->dyn_state.uvd_clock_voltage_dependency_table; + + uint32_t sclk_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX), + TARGET_AND_CURRENT_PROFILE_INDEX, CURR_SCLK_INDEX); + uint32_t uvd_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX_2), + TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_UVD_INDEX); + uint32_t vce_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX_2), + TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_VCE_INDEX); + + uint32_t sclk, vclk, dclk, ecclk, tmp, activity_percent; + uint16_t vddnb, vddgfx; + int result; + + switch (idx) { + case AMDGPU_PP_SENSOR_GFX_SCLK: + if (sclk_index < NUM_SCLK_LEVELS) { + sclk = table->entries[sclk_index].clk; + *value = sclk; + return 0; + } + return -EINVAL; + case AMDGPU_PP_SENSOR_VDDNB: + tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_NB_CURRENTVID) & + CURRENT_NB_VID_MASK) >> CURRENT_NB_VID__SHIFT; + vddnb = cz_convert_8Bit_index_to_voltage(hwmgr, tmp); + *value = vddnb; + return 0; + case AMDGPU_PP_SENSOR_VDDGFX: + tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_GFX_CURRENTVID) & + CURRENT_GFX_VID_MASK) >> CURRENT_GFX_VID__SHIFT; + vddgfx = cz_convert_8Bit_index_to_voltage(hwmgr, (u16)tmp); + *value = vddgfx; + return 0; + case AMDGPU_PP_SENSOR_UVD_VCLK: + if (!cz_hwmgr->uvd_power_gated) { + if (uvd_index >= CZ_MAX_HARDWARE_POWERLEVELS) { + return -EINVAL; + } else { + vclk = uvd_table->entries[uvd_index].vclk; + *value = vclk; + return 0; + } + } + *value = 0; + return 0; + case AMDGPU_PP_SENSOR_UVD_DCLK: + if (!cz_hwmgr->uvd_power_gated) { + if (uvd_index >= CZ_MAX_HARDWARE_POWERLEVELS) { + return -EINVAL; + } else { + dclk = uvd_table->entries[uvd_index].dclk; + *value = dclk; + return 0; + } + } + *value = 0; + return 0; + case AMDGPU_PP_SENSOR_VCE_ECCLK: + if (!cz_hwmgr->vce_power_gated) { + if (vce_index >= CZ_MAX_HARDWARE_POWERLEVELS) { + return -EINVAL; + } else { + ecclk = vce_table->entries[vce_index].ecclk; + *value = ecclk; + return 0; + } + } + *value = 0; + return 0; + case AMDGPU_PP_SENSOR_GPU_LOAD: + result = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetAverageGraphicsActivity); + if (0 == result) { + activity_percent = cgs_read_register(hwmgr->device, mmSMU_MP1_SRBM2P_ARG_0); + activity_percent = activity_percent > 100 ? 100 : activity_percent; + } else { + activity_percent = 50; + } + *value = activity_percent; + return 0; + default: + return -EINVAL; + } +} + static const struct pp_hwmgr_func cz_hwmgr_funcs = { .backend_init = cz_hwmgr_backend_init, .backend_fini = cz_hwmgr_backend_fini, @@ -1882,6 +1977,7 @@ static const struct pp_hwmgr_func cz_hwmgr_funcs = { .get_current_shallow_sleep_clocks = cz_get_current_shallow_sleep_clocks, .get_clock_by_type = cz_get_clock_by_type, .get_max_high_clocks = cz_get_max_high_clocks, + .read_sensor = cz_read_sensor, }; int cz_hwmgr_init(struct pp_hwmgr *hwmgr) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index f67e1e260b30..07a7d046d6f6 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -3144,6 +3144,41 @@ smu7_print_current_perforce_level(struct pp_hwmgr *hwmgr, struct seq_file *m) seq_printf(m, "vce %sabled\n", data->vce_power_gated ? "dis" : "en"); } +static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx, int32_t *value) +{ + uint32_t sclk, mclk, activity_percent; + uint32_t offset; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + switch (idx) { + case AMDGPU_PP_SENSOR_GFX_SCLK: + smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency); + sclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); + *value = sclk; + return 0; + case AMDGPU_PP_SENSOR_GFX_MCLK: + smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency); + mclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); + *value = mclk; + return 0; + case AMDGPU_PP_SENSOR_GPU_LOAD: + offset = data->soft_regs_start + smum_get_offsetof(hwmgr->smumgr, + SMU_SoftRegisters, + AverageGraphicsActivity); + + activity_percent = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset); + activity_percent += 0x80; + activity_percent >>= 8; + *value = activity_percent > 100 ? 100 : activity_percent; + return 0; + case AMDGPU_PP_SENSOR_GPU_TEMP: + *value = smu7_thermal_get_temperature(hwmgr); + return 0; + default: + return -EINVAL; + } +} + static int smu7_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input) { const struct phm_set_power_state_input *states = @@ -4315,6 +4350,7 @@ static struct pp_hwmgr_func smu7_hwmgr_funcs = { .get_mclk_od = smu7_get_mclk_od, .set_mclk_od = smu7_set_mclk_od, .get_clock_by_type = smu7_get_clock_by_type, + .read_sensor = smu7_read_sensor, }; uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock, diff --git a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h index f941acf563a9..dfa0f38a5e76 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h +++ b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h @@ -29,6 +29,17 @@ #include "amd_shared.h" #include "cgs_common.h" +enum amd_pp_sensors { + AMDGPU_PP_SENSOR_GFX_SCLK = 0, + AMDGPU_PP_SENSOR_VDDNB, + AMDGPU_PP_SENSOR_VDDGFX, + AMDGPU_PP_SENSOR_UVD_VCLK, + AMDGPU_PP_SENSOR_UVD_DCLK, + AMDGPU_PP_SENSOR_VCE_ECCLK, + AMDGPU_PP_SENSOR_GPU_LOAD, + AMDGPU_PP_SENSOR_GFX_MCLK, + AMDGPU_PP_SENSOR_GPU_TEMP, +}; enum amd_pp_event { AMD_PP_EVENT_INITIALIZE = 0, @@ -347,6 +358,7 @@ struct amd_powerplay_funcs { int (*set_sclk_od)(void *handle, uint32_t value); int (*get_mclk_od)(void *handle); int (*set_mclk_od)(void *handle, uint32_t value); + int (*read_sensor)(void *handle, int idx, int32_t *value); }; struct amd_powerplay { diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h index c9628b4db2c3..fcd45452380d 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h @@ -359,6 +359,7 @@ struct pp_hwmgr_func { int (*set_sclk_od)(struct pp_hwmgr *hwmgr, uint32_t value); int (*get_mclk_od)(struct pp_hwmgr *hwmgr); int (*set_mclk_od)(struct pp_hwmgr *hwmgr, uint32_t value); + int (*read_sensor)(struct pp_hwmgr *hwmgr, int idx, int32_t *value); }; struct pp_table_func { From f2cdaf20664525227f721ac3a4c72ee8ef4b37b8 Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Thu, 15 Sep 2016 10:08:44 -0400 Subject: [PATCH 27/49] drm/amd/amdgpu: Hook up read_sensor() to debugfs (v2) (v2) Tidy'ed up read function. Signed-off-by: Tom St Denis Reviewed-by: Edward O'Callaghan Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 31 ++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 377d81875c6d..490f04f0efe8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2771,6 +2771,29 @@ static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf, return result; } +static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf, + size_t size, loff_t *pos) +{ + struct amdgpu_device *adev = f->f_inode->i_private; + int idx, r; + int32_t value; + + if (size != 4 || *pos & 0x3) + return -EINVAL; + + /* convert offset to sensor number */ + idx = *pos >> 2; + + if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor) + r = adev->powerplay.pp_funcs->read_sensor(adev->powerplay.pp_handle, idx, &value); + else + return -EINVAL; + + if (!r) + r = put_user(value, (int32_t *)buf); + + return !r ? 4 : r; +} static const struct file_operations amdgpu_debugfs_regs_fops = { .owner = THIS_MODULE, @@ -2803,12 +2826,19 @@ static const struct file_operations amdgpu_debugfs_gca_config_fops = { .llseek = default_llseek }; +static const struct file_operations amdgpu_debugfs_sensors_fops = { + .owner = THIS_MODULE, + .read = amdgpu_debugfs_sensor_read, + .llseek = default_llseek +}; + static const struct file_operations *debugfs_regs[] = { &amdgpu_debugfs_regs_fops, &amdgpu_debugfs_regs_didt_fops, &amdgpu_debugfs_regs_pcie_fops, &amdgpu_debugfs_regs_smc_fops, &amdgpu_debugfs_gca_config_fops, + &amdgpu_debugfs_sensors_fops, }; static const char *debugfs_regs_names[] = { @@ -2817,6 +2847,7 @@ static const char *debugfs_regs_names[] = { "amdgpu_regs_pcie", "amdgpu_regs_smc", "amdgpu_gca_config", + "amdgpu_sensors", }; static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev) From ecab76688a0785fbcddcd1ff638a3fc76db073aa Mon Sep 17 00:00:00 2001 From: Markus Elfring Date: Sun, 18 Sep 2016 17:00:52 +0200 Subject: [PATCH 28/49] drm/amdgpu: Use kmalloc_array() in amdgpu_debugfs_gca_config_read() A multiplication for the size determination of a memory allocation indicated that an array data structure should be processed. Thus use the corresponding function "kmalloc_array". This issue was detected by using the Coccinelle software. Signed-off-by: Markus Elfring Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 490f04f0efe8..d97efc1a3109 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2712,7 +2712,7 @@ static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf, if (size & 0x3 || *pos & 0x3) return -EINVAL; - config = kmalloc(256 * sizeof(*config), GFP_KERNEL); + config = kmalloc_array(256, sizeof(*config), GFP_KERNEL); if (!config) return -ENOMEM; From 4e99a44e37bfed8c4f25c94687e8e4ac4ae65086 Mon Sep 17 00:00:00 2001 From: Monk Liu Date: Thu, 31 Mar 2016 13:26:59 +0800 Subject: [PATCH 29/49] drm/amdgpu:changes of virtualization cases probe (v3) 1,Changes on virtualization detections 2,Don't load smu & mc firmware if using sr-iov bios 3,skip vPost for sriov & force vPost if dev pass-through v2: agd: squash in Rays's fix for the missed SI case v3: agd: squash in additional fixes for CIK, SI, cleanup Signed-off-by: Monk Liu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 36 +++++++++++++++++----- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 33 +++++++++----------- drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 2 +- drivers/gpu/drm/amd/amdgpu/cik.c | 14 ++++----- drivers/gpu/drm/amd/amdgpu/fiji_smc.c | 2 +- drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | 4 ++- drivers/gpu/drm/amd/amdgpu/iceland_smc.c | 2 +- drivers/gpu/drm/amd/amdgpu/si.c | 14 ++++----- drivers/gpu/drm/amd/amdgpu/tonga_smc.c | 2 +- drivers/gpu/drm/amd/amdgpu/vi.c | 27 ++++++++++------ 10 files changed, 81 insertions(+), 55 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index ee45d9f7f3dc..fb8d6030a64d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1827,6 +1827,7 @@ struct amdgpu_asic_funcs { bool (*read_disabled_bios)(struct amdgpu_device *adev); bool (*read_bios_from_rom)(struct amdgpu_device *adev, u8 *bios, u32 length_bytes); + void (*detect_hw_virtualization) (struct amdgpu_device *adev); int (*read_register)(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 reg_offset, u32 *value); void (*set_vga_state)(struct amdgpu_device *adev, bool state); @@ -1836,8 +1837,6 @@ struct amdgpu_asic_funcs { /* MM block clocks */ int (*set_uvd_clocks)(struct amdgpu_device *adev, u32 vclk, u32 dclk); int (*set_vce_clocks)(struct amdgpu_device *adev, u32 evclk, u32 ecclk); - /* query virtual capabilities */ - u32 (*get_virtual_caps)(struct amdgpu_device *adev); /* static power management */ int (*get_pcie_lanes)(struct amdgpu_device *adev); void (*set_pcie_lanes)(struct amdgpu_device *adev, int lanes); @@ -1934,15 +1933,36 @@ struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev); void amdgpu_cgs_destroy_device(struct cgs_device *cgs_device); +#define AMDGPU_SRIOV_CAPS_SRIOV_VBIOS (1 << 0) /* vBIOS is sr-iov ready */ +#define AMDGPU_SRIOV_CAPS_ENABLE_IOV (1 << 1) /* sr-iov is enabled on this GPU */ +#define AMDGPU_SRIOV_CAPS_IS_VF (1 << 2) /* this GPU is a virtual function */ +#define AMDGPU_PASSTHROUGH_MODE (1 << 3) /* thw whole GPU is pass through for VM */ /* GPU virtualization */ -#define AMDGPU_VIRT_CAPS_SRIOV_EN (1 << 0) -#define AMDGPU_VIRT_CAPS_IS_VF (1 << 1) struct amdgpu_virtualization { - bool supports_sr_iov; - bool is_virtual; - u32 caps; + uint32_t virtual_caps; }; +#define amdgpu_sriov_enabled(adev) \ +((adev)->virtualization.virtual_caps & AMDGPU_SRIOV_CAPS_ENABLE_IOV) + +#define amdgpu_sriov_vf(adev) \ +((adev)->virtualization.virtual_caps & AMDGPU_SRIOV_CAPS_IS_VF) + +#define amdgpu_sriov_bios(adev) \ +((adev)->virtualization.virtual_caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS) + +#define amdgpu_passthrough(adev) \ +((adev)->virtualization.virtual_caps & AMDGPU_PASSTHROUGH_MODE) + +static inline bool is_virtual_machine(void) +{ +#ifdef CONFIG_X86 + return boot_cpu_has(X86_FEATURE_HYPERVISOR); +#else + return false; +#endif +} + /* * Core structure, functions and helpers. */ @@ -2260,12 +2280,12 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) #define amdgpu_asic_get_xclk(adev) (adev)->asic_funcs->get_xclk((adev)) #define amdgpu_asic_set_uvd_clocks(adev, v, d) (adev)->asic_funcs->set_uvd_clocks((adev), (v), (d)) #define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec)) -#define amdgpu_asic_get_virtual_caps(adev) ((adev)->asic_funcs->get_virtual_caps((adev))) #define amdgpu_get_pcie_lanes(adev) (adev)->asic_funcs->get_pcie_lanes((adev)) #define amdgpu_set_pcie_lanes(adev, l) (adev)->asic_funcs->set_pcie_lanes((adev), (l)) #define amdgpu_asic_get_gpu_clock_counter(adev) (adev)->asic_funcs->get_gpu_clock_counter((adev)) #define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev)) #define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l)) +#define amdgpu_asic_detect_hw_virtualization(adev) (adev)->asic_funcs->detect_hw_virtualization((adev)) #define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v))) #define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid)) #define amdgpu_gart_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gart.gart_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags)) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index d97efc1a3109..4acc92b9eec6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -110,7 +110,7 @@ void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, bool always_indirect) { trace_amdgpu_mm_wreg(adev->pdev->device, reg, v); - + if ((reg * 4) < adev->rmmio_size && !always_indirect) writel(v, ((void __iomem *)adev->rmmio) + (reg * 4)); else { @@ -1485,13 +1485,10 @@ static int amdgpu_resume(struct amdgpu_device *adev) return 0; } -static bool amdgpu_device_is_virtual(void) +static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev) { -#ifdef CONFIG_X86 - return boot_cpu_has(X86_FEATURE_HYPERVISOR); -#else - return false; -#endif + if (amdgpu_atombios_has_gpu_virtualization_table(adev)) + adev->virtualization.virtual_caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS; } /** @@ -1648,25 +1645,25 @@ int amdgpu_device_init(struct amdgpu_device *adev, goto failed; } - /* See if the asic supports SR-IOV */ - adev->virtualization.supports_sr_iov = - amdgpu_atombios_has_gpu_virtualization_table(adev); - - /* Check if we are executing in a virtualized environment */ - adev->virtualization.is_virtual = amdgpu_device_is_virtual(); - adev->virtualization.caps = amdgpu_asic_get_virtual_caps(adev); + /* detect if we are with an SRIOV vbios */ + amdgpu_device_detect_sriov_bios(adev); /* Post card if necessary */ - if (!amdgpu_card_posted(adev) || - (adev->virtualization.is_virtual && - !(adev->virtualization.caps & AMDGPU_VIRT_CAPS_SRIOV_EN))) { + if (!amdgpu_sriov_vf(adev) && + (!amdgpu_card_posted(adev) || amdgpu_passthrough(adev))) { if (!adev->bios) { dev_err(adev->dev, "Card not posted and no BIOS - ignoring\n"); r = -EINVAL; goto failed; } DRM_INFO("GPU not posted. posting now...\n"); - amdgpu_atom_asic_init(adev->mode_info.atom_context); + r = amdgpu_atom_asic_init(adev->mode_info.atom_context); + if (r) { + dev_err(adev->dev, "gpu post error!\n"); + goto failed; + } + } else { + DRM_INFO("GPU post is not needed\n"); } /* Initialize clocks */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index c96ae105b764..0c5f36d1ea3e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -485,7 +485,7 @@ amdgpu_pci_shutdown(struct pci_dev *pdev) /* if we are running in a VM, make sure the device * torn down properly on reboot/shutdown */ - if (adev->virtualization.is_virtual) + if (amdgpu_passthrough(adev)) amdgpu_pci_remove(pdev); } diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c index 825de800b798..a845b6a93b79 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.c +++ b/drivers/gpu/drm/amd/amdgpu/cik.c @@ -963,12 +963,6 @@ static bool cik_read_bios_from_rom(struct amdgpu_device *adev, return true; } -static u32 cik_get_virtual_caps(struct amdgpu_device *adev) -{ - /* CIK does not support SR-IOV */ - return 0; -} - static const struct amdgpu_allowed_register_entry cik_allowed_read_registers[] = { {mmGRBM_STATUS, false}, {mmGB_ADDR_CONFIG, false}, @@ -1641,6 +1635,12 @@ static uint32_t cik_get_rev_id(struct amdgpu_device *adev) >> CC_DRM_ID_STRAPS__ATI_REV_ID__SHIFT; } +static void cik_detect_hw_virtualization(struct amdgpu_device *adev) +{ + if (is_virtual_machine()) /* passthrough mode */ + adev->virtualization.virtual_caps |= AMDGPU_PASSTHROUGH_MODE; +} + static const struct amdgpu_ip_block_version bonaire_ip_blocks[] = { /* ORDER MATTERS! */ @@ -2384,13 +2384,13 @@ static const struct amdgpu_asic_funcs cik_asic_funcs = { .read_disabled_bios = &cik_read_disabled_bios, .read_bios_from_rom = &cik_read_bios_from_rom, + .detect_hw_virtualization = cik_detect_hw_virtualization, .read_register = &cik_read_register, .reset = &cik_asic_reset, .set_vga_state = &cik_vga_set_state, .get_xclk = &cik_get_xclk, .set_uvd_clocks = &cik_set_uvd_clocks, .set_vce_clocks = &cik_set_vce_clocks, - .get_virtual_caps = &cik_get_virtual_caps, }; static int cik_common_early_init(void *handle) diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_smc.c b/drivers/gpu/drm/amd/amdgpu/fiji_smc.c index b3e19ba4c57f..8cfb0a3cf725 100644 --- a/drivers/gpu/drm/amd/amdgpu/fiji_smc.c +++ b/drivers/gpu/drm/amd/amdgpu/fiji_smc.c @@ -275,7 +275,7 @@ static int fiji_smu_upload_firmware_image(struct amdgpu_device *adev) /* Skip SMC ucode loading on SR-IOV capable boards. * vbios does this for us in asic_init in that case. */ - if (adev->virtualization.supports_sr_iov) + if (amdgpu_sriov_bios(adev)) return 0; hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 8e7127f09ff6..6ec8e01109aa 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -261,8 +261,10 @@ static int gmc_v8_0_mc_load_microcode(struct amdgpu_device *adev) /* Skip MC ucode loading on SR-IOV capable boards. * vbios does this for us in asic_init in that case. + * Skip MC ucode loading on VF, because hypervisor will do that + * for this adaptor. */ - if (adev->virtualization.supports_sr_iov) + if (amdgpu_sriov_bios(adev)) return 0; hdr = (const struct mc_firmware_header_v1_0 *)adev->mc.fw->data; diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c index ef7c27d7356a..c6e004a3f557 100644 --- a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c +++ b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c @@ -282,7 +282,7 @@ static int iceland_smu_upload_firmware_image(struct amdgpu_device *adev) /* Skip SMC ucode loading on SR-IOV capable boards. * vbios does this for us in asic_init in that case. */ - if (adev->virtualization.supports_sr_iov) + if (amdgpu_sriov_bios(adev)) return 0; hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data; diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c index fee76b8a536f..dc9511c5ecb8 100644 --- a/drivers/gpu/drm/amd/amdgpu/si.c +++ b/drivers/gpu/drm/amd/amdgpu/si.c @@ -952,12 +952,6 @@ static void si_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v) spin_unlock_irqrestore(&adev->smc_idx_lock, flags); } -static u32 si_get_virtual_caps(struct amdgpu_device *adev) -{ - /* SI does not support SR-IOV */ - return 0; -} - static struct amdgpu_allowed_register_entry si_allowed_read_registers[] = { {GRBM_STATUS, false}, {GB_ADDR_CONFIG, false}, @@ -1124,16 +1118,22 @@ static int si_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk) return 0; } +static void si_detect_hw_virtualization(struct amdgpu_device *adev) +{ + if (is_virtual_machine()) /* passthrough mode */ + adev->virtualization.virtual_caps |= AMDGPU_PASSTHROUGH_MODE; +} + static const struct amdgpu_asic_funcs si_asic_funcs = { .read_disabled_bios = &si_read_disabled_bios, + .detect_hw_virtualization = si_detect_hw_virtualization, .read_register = &si_read_register, .reset = &si_asic_reset, .set_vga_state = &si_vga_set_state, .get_xclk = &si_get_xclk, .set_uvd_clocks = &si_set_uvd_clocks, .set_vce_clocks = NULL, - .get_virtual_caps = &si_get_virtual_caps, }; static uint32_t si_get_rev_id(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_smc.c b/drivers/gpu/drm/amd/amdgpu/tonga_smc.c index 940de1836f8f..1e71e819468b 100644 --- a/drivers/gpu/drm/amd/amdgpu/tonga_smc.c +++ b/drivers/gpu/drm/amd/amdgpu/tonga_smc.c @@ -275,7 +275,7 @@ static int tonga_smu_upload_firmware_image(struct amdgpu_device *adev) /* Skip SMC ucode loading on SR-IOV capable boards. * vbios does this for us in asic_init in that case. */ - if (adev->virtualization.supports_sr_iov) + if (amdgpu_sriov_bios(adev)) return 0; hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data; diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index b688e2f77419..a8154d0ac288 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -445,18 +445,21 @@ static bool vi_read_bios_from_rom(struct amdgpu_device *adev, return true; } -static u32 vi_get_virtual_caps(struct amdgpu_device *adev) +static void vi_detect_hw_virtualization(struct amdgpu_device *adev) { - u32 caps = 0; - u32 reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER); + uint32_t reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER); + /* bit0: 0 means pf and 1 means vf */ + /* bit31: 0 means disable IOV and 1 means enable */ + if (reg & 1) + adev->virtualization.virtual_caps |= AMDGPU_SRIOV_CAPS_IS_VF; - if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, IOV_ENABLE)) - caps |= AMDGPU_VIRT_CAPS_SRIOV_EN; + if (reg & 0x80000000) + adev->virtualization.virtual_caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV; - if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, FUNC_IDENTIFIER)) - caps |= AMDGPU_VIRT_CAPS_IS_VF; - - return caps; + if (reg == 0) { + if (is_virtual_machine()) /* passthrough mode exclus sr-iov mode */ + adev->virtualization.virtual_caps |= AMDGPU_PASSTHROUGH_MODE; + } } static const struct amdgpu_allowed_register_entry tonga_allowed_read_registers[] = { @@ -1521,13 +1524,13 @@ static const struct amdgpu_asic_funcs vi_asic_funcs = { .read_disabled_bios = &vi_read_disabled_bios, .read_bios_from_rom = &vi_read_bios_from_rom, + .detect_hw_virtualization = vi_detect_hw_virtualization, .read_register = &vi_read_register, .reset = &vi_asic_reset, .set_vga_state = &vi_vga_set_state, .get_xclk = &vi_get_xclk, .set_uvd_clocks = &vi_set_uvd_clocks, .set_vce_clocks = &vi_set_vce_clocks, - .get_virtual_caps = &vi_get_virtual_caps, }; static int vi_common_early_init(void *handle) @@ -1657,6 +1660,10 @@ static int vi_common_early_init(void *handle) return -EINVAL; } + /* in early init stage, vbios code won't work */ + if (adev->asic_funcs->detect_hw_virtualization) + amdgpu_asic_detect_hw_virtualization(adev); + if (amdgpu_smc_load_fw && smc_enabled) adev->firmware.smu_load = true; From bec86378befae4155b58f80bb9d0da50080291e6 Mon Sep 17 00:00:00 2001 From: Monk Liu Date: Wed, 14 Sep 2016 19:38:08 +0800 Subject: [PATCH 30/49] drm/amdgpu:determine if vPost is needed indeed Signed-off-by: Monk Liu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 48 ++++++++++++++++++++-- 1 file changed, 44 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 4acc92b9eec6..75f490f9bd8d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -50,6 +50,7 @@ #include "vi.h" #include "bif/bif_4_1_d.h" #include +#include static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev); static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev); @@ -651,6 +652,46 @@ bool amdgpu_card_posted(struct amdgpu_device *adev) } +static bool amdgpu_vpost_needed(struct amdgpu_device *adev) +{ + if (amdgpu_sriov_vf(adev)) + return false; + + if (amdgpu_passthrough(adev)) { + /* for FIJI: In whole GPU pass-through virtualization case + * old smc fw won't clear some registers (e.g. MEM_SIZE, BIOS_SCRATCH) + * so amdgpu_card_posted return false and driver will incorrectly skip vPost. + * but if we force vPost do in pass-through case, the driver reload will hang. + * whether doing vPost depends on amdgpu_card_posted if smc version is above + * 00160e00 for FIJI. + */ + if (adev->asic_type == CHIP_FIJI) { + int err; + uint32_t fw_ver; + err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev); + /* force vPost if error occured */ + if (err) + return true; + + fw_ver = *((uint32_t *)adev->pm.fw->data + 69); + if (fw_ver >= 0x00160e00) + return !amdgpu_card_posted(adev); + } + } else { + /* in bare-metal case, amdgpu_card_posted return false + * after system reboot/boot, and return true if driver + * reloaded. + * we shouldn't do vPost after driver reload otherwise GPU + * could hang. + */ + if (amdgpu_card_posted(adev)) + return false; + } + + /* we assume vPost is neede for all other cases */ + return true; +} + /** * amdgpu_dummy_page_init - init dummy page used by the driver * @@ -1649,14 +1690,13 @@ int amdgpu_device_init(struct amdgpu_device *adev, amdgpu_device_detect_sriov_bios(adev); /* Post card if necessary */ - if (!amdgpu_sriov_vf(adev) && - (!amdgpu_card_posted(adev) || amdgpu_passthrough(adev))) { + if (amdgpu_vpost_needed(adev)) { if (!adev->bios) { - dev_err(adev->dev, "Card not posted and no BIOS - ignoring\n"); + dev_err(adev->dev, "no vBIOS found\n"); r = -EINVAL; goto failed; } - DRM_INFO("GPU not posted. posting now...\n"); + DRM_INFO("GPU posting now...\n"); r = amdgpu_atom_asic_init(adev->mode_info.atom_context); if (r) { dev_err(adev->dev, "gpu post error!\n"); From fc76cbf45651f58284b8035ae1938e8ff5d19ee7 Mon Sep 17 00:00:00 2001 From: Frank Min Date: Wed, 27 Apr 2016 18:53:29 +0800 Subject: [PATCH 31/49] drm/amdgpu:add fw version entry to info Signed-off-by: Frank Min Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c | 42 +++++++++++++++++++ drivers/gpu/drm/amd/include/cgs_common.h | 1 + .../drm/amd/powerplay/smumgr/fiji_smumgr.c | 0 3 files changed, 43 insertions(+) mode change 100644 => 100755 drivers/gpu/drm/amd/include/cgs_common.h mode change 100644 => 100755 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c index f1c53a2b09c6..7a8bfa34682f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c @@ -711,6 +711,47 @@ static int amdgpu_cgs_rel_firmware(struct cgs_device *cgs_device, enum cgs_ucode return -EINVAL; } +static uint16_t amdgpu_get_firmware_version(struct cgs_device *cgs_device, + enum cgs_ucode_id type) +{ + CGS_FUNC_ADEV; + uint16_t fw_version; + + switch (type) { + case CGS_UCODE_ID_SDMA0: + fw_version = adev->sdma.instance[0].fw_version; + break; + case CGS_UCODE_ID_SDMA1: + fw_version = adev->sdma.instance[1].fw_version; + break; + case CGS_UCODE_ID_CP_CE: + fw_version = adev->gfx.ce_fw_version; + break; + case CGS_UCODE_ID_CP_PFP: + fw_version = adev->gfx.pfp_fw_version; + break; + case CGS_UCODE_ID_CP_ME: + fw_version = adev->gfx.me_fw_version; + break; + case CGS_UCODE_ID_CP_MEC: + fw_version = adev->gfx.mec_fw_version; + break; + case CGS_UCODE_ID_CP_MEC_JT1: + fw_version = adev->gfx.mec_fw_version; + break; + case CGS_UCODE_ID_CP_MEC_JT2: + fw_version = adev->gfx.mec_fw_version; + break; + case CGS_UCODE_ID_RLC_G: + fw_version = adev->gfx.rlc_fw_version; + break; + default: + DRM_ERROR("firmware type %d do not have version\n", type); + fw_version = 0; + } + return fw_version; +} + static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, enum cgs_ucode_id type, struct cgs_firmware_info *info) @@ -741,6 +782,7 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, info->mc_addr = gpu_addr; info->image_size = data_size; info->version = (uint16_t)le32_to_cpu(header->header.ucode_version); + info->fw_version = amdgpu_get_firmware_version(cgs_device, type); info->feature_version = (uint16_t)le32_to_cpu(header->ucode_feature_version); } else { char fw_name[30] = {0}; diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h old mode 100644 new mode 100755 index 6aa8938fd826..df7c18b6a02a --- a/drivers/gpu/drm/amd/include/cgs_common.h +++ b/drivers/gpu/drm/amd/include/cgs_common.h @@ -161,6 +161,7 @@ struct cgs_clock_limits { */ struct cgs_firmware_info { uint16_t version; + uint16_t fw_version; uint16_t feature_version; uint32_t image_size; uint64_t mc_addr; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c old mode 100644 new mode 100755 From 884031f0aacf57dad1575f96714efc80de9b19cc Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 19 Sep 2016 12:35:22 -0400 Subject: [PATCH 32/49] drm/radeon: narrow asic_init for virtualization Only needed on CIK+ due to the way pci reset is handled by the GPU. Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org --- drivers/gpu/drm/radeon/radeon_device.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index bbc895891631..eb92aef46e3c 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -661,8 +661,9 @@ bool radeon_card_posted(struct radeon_device *rdev) { uint32_t reg; - /* for pass through, always force asic_init */ - if (radeon_device_is_virtual()) + /* for pass through, always force asic_init for CI */ + if (rdev->family >= CHIP_BONAIRE && + radeon_device_is_virtual()) return false; /* required for EFI mode on macbook2,1 which uses an r5xx asic */ From 3de4ec57271a1979b5d960d8610939fff7dc38f9 Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Mon, 19 Sep 2016 12:48:52 -0400 Subject: [PATCH 33/49] drm/amd/powerplay: Replace per-asic print_performance with generic Replace per-asic print_current_performance() functions with generic that calls read_sensor. Tested on Tonga and Carrizo for aesthetics and accuracy. Signed-off-by: Tom St Denis Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 10 +-- drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 62 ++++++++++++++- drivers/gpu/drm/amd/powerplay/amd_powerplay.c | 23 ------ .../gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c | 79 ++----------------- .../gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 39 ++------- .../gpu/drm/amd/powerplay/inc/amd_powerplay.h | 4 +- drivers/gpu/drm/amd/powerplay/inc/hwmgr.h | 2 - 7 files changed, 79 insertions(+), 140 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index fb8d6030a64d..9f0049563b71 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -2343,6 +2343,11 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) #define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev)) #define amdgpu_gfx_select_se_sh(adev, se, sh, instance) (adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance)) +#define amdgpu_dpm_read_sensor(adev, idx, value) \ + ((adev)->pp_enabled ? \ + (adev)->powerplay.pp_funcs->read_sensor(adev->powerplay.pp_handle, (idx), (value)) : \ + -EINVAL) + #define amdgpu_dpm_get_temperature(adev) \ ((adev)->pp_enabled ? \ (adev)->powerplay.pp_funcs->get_temperature((adev)->powerplay.pp_handle) : \ @@ -2394,11 +2399,6 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) (adev)->powerplay.pp_funcs->powergate_vce((adev)->powerplay.pp_handle, (g)) : \ (adev)->pm.funcs->powergate_vce((adev), (g))) -#define amdgpu_dpm_debugfs_print_current_performance_level(adev, m) \ - ((adev)->pp_enabled ? \ - (adev)->powerplay.pp_funcs->print_current_performance_level((adev)->powerplay.pp_handle, (m)) : \ - (adev)->pm.funcs->debugfs_print_current_performance_level((adev), (m))) - #define amdgpu_dpm_get_current_power_state(adev) \ (adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index d4ec3cb187a5..accc908bdc88 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -1322,6 +1322,64 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev) */ #if defined(CONFIG_DEBUG_FS) +static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *adev) +{ + int32_t value; + + /* sanity check PP is enabled */ + if (!(adev->powerplay.pp_funcs && + adev->powerplay.pp_funcs->read_sensor)) + return -EINVAL; + + /* GPU Clocks */ + seq_printf(m, "GFX Clocks and Power:\n"); + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, &value)) + seq_printf(m, "\t%u MHz (MCLK)\n", value/100); + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, &value)) + seq_printf(m, "\t%u MHz (SCLK)\n", value/100); + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, &value)) + seq_printf(m, "\t%u mV (VDDGFX)\n", value); + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, &value)) + seq_printf(m, "\t%u mV (VDDNB)\n", value); + seq_printf(m, "\n"); + + /* GPU Temp */ + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP, &value)) + seq_printf(m, "GPU Temperature: %u C\n", value/1000); + + /* GPU Load */ + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD, &value)) + seq_printf(m, "GPU Load: %u %%\n", value); + seq_printf(m, "\n"); + + /* UVD clocks */ + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, &value)) { + if (!value) { + seq_printf(m, "UVD: Disabled\n"); + } else { + seq_printf(m, "UVD: Enabled\n"); + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, &value)) + seq_printf(m, "\t%u MHz (DCLK)\n", value/100); + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, &value)) + seq_printf(m, "\t%u MHz (VCLK)\n", value/100); + } + } + seq_printf(m, "\n"); + + /* VCE clocks */ + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, &value)) { + if (!value) { + seq_printf(m, "VCE: Disabled\n"); + } else { + seq_printf(m, "VCE: Enabled\n"); + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, &value)) + seq_printf(m, "\t%u MHz (ECCLK)\n", value/100); + } + } + + return 0; +} + static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data) { struct drm_info_node *node = (struct drm_info_node *) m->private; @@ -1337,11 +1395,11 @@ static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data) (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) { seq_printf(m, "PX asic powered off\n"); } else if (adev->pp_enabled) { - amdgpu_dpm_debugfs_print_current_performance_level(adev, m); + return amdgpu_debugfs_pm_info_pp(m, adev); } else { mutex_lock(&adev->pm.mutex); if (adev->pm.funcs->debugfs_print_current_performance_level) - amdgpu_dpm_debugfs_print_current_performance_level(adev, m); + adev->pm.funcs->debugfs_print_current_performance_level(adev, m); else seq_printf(m, "Debugfs support not implemented for this asic\n"); mutex_unlock(&adev->pm.mutex); diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c index ee0368381e82..212ec2fd97ed 100644 --- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c @@ -576,28 +576,6 @@ enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle) } } -static void -pp_debugfs_print_current_performance_level(void *handle, - struct seq_file *m) -{ - struct pp_hwmgr *hwmgr; - - if (handle == NULL) - return; - - hwmgr = ((struct pp_instance *)handle)->hwmgr; - - if (hwmgr == NULL || hwmgr->hwmgr_func == NULL) - return; - - if (hwmgr->hwmgr_func->print_current_perforce_level == NULL) { - printk(KERN_INFO "%s was not implemented.\n", __func__); - return; - } - - hwmgr->hwmgr_func->print_current_perforce_level(hwmgr, m); -} - static int pp_dpm_set_fan_control_mode(void *handle, uint32_t mode) { struct pp_hwmgr *hwmgr; @@ -925,7 +903,6 @@ const struct amd_powerplay_funcs pp_dpm_funcs = { .powergate_vce = pp_dpm_powergate_vce, .powergate_uvd = pp_dpm_powergate_uvd, .dispatch_tasks = pp_dpm_dispatch_tasks, - .print_current_performance_level = pp_debugfs_print_current_performance_level, .set_fan_control_mode = pp_dpm_set_fan_control_mode, .get_fan_control_mode = pp_dpm_get_fan_control_mode, .set_fan_speed_percent = pp_dpm_set_fan_speed_percent, diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c index 9f3c5a8a903c..7e4fcbbbe086 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c @@ -1538,78 +1538,6 @@ int cz_get_power_state_size(struct pp_hwmgr *hwmgr) return sizeof(struct cz_power_state); } -static void -cz_print_current_perforce_level(struct pp_hwmgr *hwmgr, struct seq_file *m) -{ - struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); - - struct phm_clock_voltage_dependency_table *table = - hwmgr->dyn_state.vddc_dependency_on_sclk; - - struct phm_vce_clock_voltage_dependency_table *vce_table = - hwmgr->dyn_state.vce_clock_voltage_dependency_table; - - struct phm_uvd_clock_voltage_dependency_table *uvd_table = - hwmgr->dyn_state.uvd_clock_voltage_dependency_table; - - uint32_t sclk_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX), - TARGET_AND_CURRENT_PROFILE_INDEX, CURR_SCLK_INDEX); - uint32_t uvd_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX_2), - TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_UVD_INDEX); - uint32_t vce_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX_2), - TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_VCE_INDEX); - - uint32_t sclk, vclk, dclk, ecclk, tmp, activity_percent; - uint16_t vddnb, vddgfx; - int result; - - if (sclk_index >= NUM_SCLK_LEVELS) { - seq_printf(m, "\n invalid sclk dpm profile %d\n", sclk_index); - } else { - sclk = table->entries[sclk_index].clk; - seq_printf(m, "\n index: %u sclk: %u MHz\n", sclk_index, sclk/100); - } - - tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_NB_CURRENTVID) & - CURRENT_NB_VID_MASK) >> CURRENT_NB_VID__SHIFT; - vddnb = cz_convert_8Bit_index_to_voltage(hwmgr, tmp); - tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_GFX_CURRENTVID) & - CURRENT_GFX_VID_MASK) >> CURRENT_GFX_VID__SHIFT; - vddgfx = cz_convert_8Bit_index_to_voltage(hwmgr, (u16)tmp); - seq_printf(m, "\n vddnb: %u vddgfx: %u\n", vddnb, vddgfx); - - seq_printf(m, "\n uvd %sabled\n", cz_hwmgr->uvd_power_gated ? "dis" : "en"); - if (!cz_hwmgr->uvd_power_gated) { - if (uvd_index >= CZ_MAX_HARDWARE_POWERLEVELS) { - seq_printf(m, "\n invalid uvd dpm level %d\n", uvd_index); - } else { - vclk = uvd_table->entries[uvd_index].vclk; - dclk = uvd_table->entries[uvd_index].dclk; - seq_printf(m, "\n index: %u uvd vclk: %u MHz dclk: %u MHz\n", uvd_index, vclk/100, dclk/100); - } - } - - seq_printf(m, "\n vce %sabled\n", cz_hwmgr->vce_power_gated ? "dis" : "en"); - if (!cz_hwmgr->vce_power_gated) { - if (vce_index >= CZ_MAX_HARDWARE_POWERLEVELS) { - seq_printf(m, "\n invalid vce dpm level %d\n", vce_index); - } else { - ecclk = vce_table->entries[vce_index].ecclk; - seq_printf(m, "\n index: %u vce ecclk: %u MHz\n", vce_index, ecclk/100); - } - } - - result = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetAverageGraphicsActivity); - if (0 == result) { - activity_percent = cgs_read_register(hwmgr->device, mmSMU_MP1_SRBM2P_ARG_0); - activity_percent = activity_percent > 100 ? 100 : activity_percent; - } else { - activity_percent = 50; - } - - seq_printf(m, "\n [GPU load]: %u %%\n\n", activity_percent); -} - static void cz_hw_print_display_cfg( const struct cc6_settings *cc6_settings) { @@ -1947,6 +1875,12 @@ static int cz_read_sensor(struct pp_hwmgr *hwmgr, int idx, int32_t *value) } *value = activity_percent; return 0; + case AMDGPU_PP_SENSOR_UVD_POWER: + *value = cz_hwmgr->uvd_power_gated ? 0 : 1; + return 0; + case AMDGPU_PP_SENSOR_VCE_POWER: + *value = cz_hwmgr->vce_power_gated ? 0 : 1; + return 0; default: return -EINVAL; } @@ -1967,7 +1901,6 @@ static const struct pp_hwmgr_func cz_hwmgr_funcs = { .patch_boot_state = cz_dpm_patch_boot_state, .get_pp_table_entry = cz_dpm_get_pp_table_entry, .get_num_of_pp_table_entries = cz_dpm_get_num_of_pp_table_entries, - .print_current_perforce_level = cz_print_current_perforce_level, .set_cpu_power_state = cz_set_cpu_power_state, .store_cc6_data = cz_store_cc6_data, .force_clock_level = cz_force_clock_level, diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index 07a7d046d6f6..a3832f2d893b 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -3112,38 +3112,6 @@ static int smu7_get_pp_table_entry(struct pp_hwmgr *hwmgr, return 0; } -static void -smu7_print_current_perforce_level(struct pp_hwmgr *hwmgr, struct seq_file *m) -{ - uint32_t sclk, mclk, activity_percent; - uint32_t offset; - struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - - smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency); - - sclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); - - smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency); - - mclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); - seq_printf(m, "\n [ mclk ]: %u MHz\n\n [ sclk ]: %u MHz\n", - mclk / 100, sclk / 100); - - offset = data->soft_regs_start + smum_get_offsetof(hwmgr->smumgr, - SMU_SoftRegisters, - AverageGraphicsActivity); - - activity_percent = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset); - activity_percent += 0x80; - activity_percent >>= 8; - - seq_printf(m, "\n [GPU load]: %u%%\n\n", activity_percent > 100 ? 100 : activity_percent); - - seq_printf(m, "uvd %sabled\n", data->uvd_power_gated ? "dis" : "en"); - - seq_printf(m, "vce %sabled\n", data->vce_power_gated ? "dis" : "en"); -} - static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx, int32_t *value) { uint32_t sclk, mclk, activity_percent; @@ -3174,6 +3142,12 @@ static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx, int32_t *value) case AMDGPU_PP_SENSOR_GPU_TEMP: *value = smu7_thermal_get_temperature(hwmgr); return 0; + case AMDGPU_PP_SENSOR_UVD_POWER: + *value = data->uvd_power_gated ? 0 : 1; + return 0; + case AMDGPU_PP_SENSOR_VCE_POWER: + *value = data->vce_power_gated ? 0 : 1; + return 0; default: return -EINVAL; } @@ -4318,7 +4292,6 @@ static struct pp_hwmgr_func smu7_hwmgr_funcs = { .patch_boot_state = smu7_dpm_patch_boot_state, .get_pp_table_entry = smu7_get_pp_table_entry, .get_num_of_pp_table_entries = smu7_get_number_of_powerplay_table_entries, - .print_current_perforce_level = smu7_print_current_perforce_level, .powerdown_uvd = smu7_powerdown_uvd, .powergate_uvd = smu7_powergate_uvd, .powergate_vce = smu7_powergate_vce, diff --git a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h index dfa0f38a5e76..3d74043c0e08 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h +++ b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h @@ -39,6 +39,8 @@ enum amd_pp_sensors { AMDGPU_PP_SENSOR_GPU_LOAD, AMDGPU_PP_SENSOR_GFX_MCLK, AMDGPU_PP_SENSOR_GPU_TEMP, + AMDGPU_PP_SENSOR_VCE_POWER, + AMDGPU_PP_SENSOR_UVD_POWER, }; enum amd_pp_event { @@ -343,8 +345,6 @@ struct amd_powerplay_funcs { int (*powergate_uvd)(void *handle, bool gate); int (*dispatch_tasks)(void *handle, enum amd_pp_event event_id, void *input, void *output); - void (*print_current_performance_level)(void *handle, - struct seq_file *m); int (*set_fan_control_mode)(void *handle, uint32_t mode); int (*get_fan_control_mode)(void *handle); int (*set_fan_speed_percent)(void *handle, uint32_t percent); diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h index fcd45452380d..4f0fedd1e9d3 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h @@ -311,8 +311,6 @@ struct pp_hwmgr_func { int (*get_sclk)(struct pp_hwmgr *hwmgr, bool low); int (*power_state_set)(struct pp_hwmgr *hwmgr, const void *state); - void (*print_current_perforce_level)(struct pp_hwmgr *hwmgr, - struct seq_file *m); int (*enable_clock_power_gating)(struct pp_hwmgr *hwmgr); int (*notify_smc_display_config_after_ps_adjustment)(struct pp_hwmgr *hwmgr); int (*display_config_changed)(struct pp_hwmgr *hwmgr); From fa675329fc79dc2f657811b26f17e7bd6f1b9837 Mon Sep 17 00:00:00 2001 From: Baoyou Xie Date: Sun, 18 Sep 2016 22:13:19 +0800 Subject: [PATCH 34/49] drm/amdgpu: remove unused functions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We get 2 warnings when building kernel with W=1: drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c:146:5: warning: no previous prototype for 'pool_to_domain' [-Wmissing-prototypes] drivers/gpu/drm/amd/amdgpu/cz_smc.c:104:5: warning: no previous prototype for 'cz_send_msg_to_smc_with_parameter_async' [-Wmissing-prototypes] In fact, both functions are called by no one and not exported, so this patch removes them. Reviewed-by: Christian König Signed-off-by: Baoyou Xie Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 8 -------- drivers/gpu/drm/amd/amdgpu/cz_smc.c | 7 ------- 2 files changed, 15 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index d080d0807a5b..dba8a5b25e66 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -143,14 +143,6 @@ int amdgpu_amdkfd_resume(struct amdgpu_device *rdev) return r; } -u32 pool_to_domain(enum kgd_memory_pool p) -{ - switch (p) { - case KGD_POOL_FRAMEBUFFER: return AMDGPU_GEM_DOMAIN_VRAM; - default: return AMDGPU_GEM_DOMAIN_GTT; - } -} - int alloc_gtt_mem(struct kgd_dev *kgd, size_t size, void **mem_obj, uint64_t *gpu_addr, void **cpu_ptr) diff --git a/drivers/gpu/drm/amd/amdgpu/cz_smc.c b/drivers/gpu/drm/amd/amdgpu/cz_smc.c index 95887e484c51..aed7033c0973 100644 --- a/drivers/gpu/drm/amd/amdgpu/cz_smc.c +++ b/drivers/gpu/drm/amd/amdgpu/cz_smc.c @@ -101,13 +101,6 @@ int cz_send_msg_to_smc(struct amdgpu_device *adev, u16 msg) return 0; } -int cz_send_msg_to_smc_with_parameter_async(struct amdgpu_device *adev, - u16 msg, u32 parameter) -{ - WREG32(mmSMU_MP1_SRBM2P_ARG_0, parameter); - return cz_send_msg_to_smc_async(adev, msg); -} - int cz_send_msg_to_smc_with_parameter(struct amdgpu_device *adev, u16 msg, u32 parameter) { From 4d446656fabb308ae2171e95a624a0367dcd34d2 Mon Sep 17 00:00:00 2001 From: Baoyou Xie Date: Sun, 18 Sep 2016 22:09:35 +0800 Subject: [PATCH 35/49] drm/amdgpu: mark symbols static where possible MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We get 7 warnings when building kernel with W=1: drivers/gpu/drm/amd/amdgpu/amdgpu_device.c:1990:5: warning: no previous prototype for 'amdgpu_pre_soft_reset' [-Wmissing-prototypes] drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c:1548:5: warning: no previous prototype for 'amdgpu_connector_virtual_dpms' [-Wmissing-prototypes] drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c:1560:5: warning: no previous prototype for 'amdgpu_connector_virtual_set_property' [-Wmissing-prototypes] drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c:330:5: warning: no previous prototype for 'amdgpu_cs_list_validate' [-Wmissing-prototypes] drivers/gpu/drm/amd/amdgpu/dce_virtual.c:98:6: warning: no previous prototype for 'dce_virtual_stop_mc_access' [-Wmissing-prototypes] drivers/gpu/drm/amd/amdgpu/dce_virtual.c:130:6: warning: no previous prototype for 'dce_virtual_resume_mc_access' [-Wmissing-prototypes] drivers/gpu/drm/amd/amdgpu/dce_virtual.c:136:6: warning: no previous prototype for 'dce_virtual_set_vga_render_state' [-Wmissing-prototypes] In fact, all of the functions are only used in the file in which they are declared and don't need a declaration, but can be made static. So this patch marks both functions with 'static'. Reviewed-by: Christian König Signed-off-by: Baoyou Xie Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c | 6 ++++-- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 2 +- drivers/gpu/drm/amd/amdgpu/dce_virtual.c | 6 +++--- 3 files changed, 8 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c index 319a5e1d9389..decbba5ad438 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c @@ -1545,7 +1545,8 @@ static int amdgpu_connector_virtual_mode_valid(struct drm_connector *connector, return MODE_OK; } -int amdgpu_connector_virtual_dpms(struct drm_connector *connector, int mode) +static int +amdgpu_connector_virtual_dpms(struct drm_connector *connector, int mode) { return 0; } @@ -1557,7 +1558,8 @@ amdgpu_connector_virtual_detect(struct drm_connector *connector, bool force) return connector_status_connected; } -int amdgpu_connector_virtual_set_property(struct drm_connector *connector, +static int +amdgpu_connector_virtual_set_property(struct drm_connector *connector, struct drm_property *property, uint64_t val) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 75f490f9bd8d..99a15cad6789 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2078,7 +2078,7 @@ static bool amdgpu_check_soft_reset(struct amdgpu_device *adev) return asic_hang; } -int amdgpu_pre_soft_reset(struct amdgpu_device *adev) +static int amdgpu_pre_soft_reset(struct amdgpu_device *adev) { int i, r = 0; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c index 00663a7b4053..2d02acd55829 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c @@ -95,7 +95,7 @@ static bool dce_virtual_is_display_hung(struct amdgpu_device *adev) return false; } -void dce_virtual_stop_mc_access(struct amdgpu_device *adev, +static void dce_virtual_stop_mc_access(struct amdgpu_device *adev, struct amdgpu_mode_mc_save *save) { switch (adev->asic_type) { @@ -127,13 +127,13 @@ void dce_virtual_stop_mc_access(struct amdgpu_device *adev, return; } -void dce_virtual_resume_mc_access(struct amdgpu_device *adev, +static void dce_virtual_resume_mc_access(struct amdgpu_device *adev, struct amdgpu_mode_mc_save *save) { return; } -void dce_virtual_set_vga_render_state(struct amdgpu_device *adev, +static void dce_virtual_set_vga_render_state(struct amdgpu_device *adev, bool render) { return; From 664a08bb906bd946a4c3963d5f92518e5ff19bd8 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Tue, 20 Sep 2016 16:49:31 +0100 Subject: [PATCH 36/49] drm/amdgpu: avoid out of bounds access on array interrupt_status_offsets The check for an out of bound index into array interrupt_status_offsets is off-by-one. Fix this and also don't compared to a hard coded array size but use adev->mode_info.num_hpd instead. Signed-off-by: Colin Ian King Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/dce_v6_0.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c index ec5d8d93ad23..c8380f030400 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c @@ -2782,7 +2782,7 @@ static int dce_v6_0_hpd_irq(struct amdgpu_device *adev, uint32_t disp_int, mask, int_control, tmp; unsigned hpd; - if (entry->src_data > 6) { + if (entry->src_data >= adev->mode_info.num_hpd) { DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data); return 0; } From 465f96e21365c3df7b992692393834e984db306c Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Sun, 18 Sep 2016 16:52:03 +0800 Subject: [PATCH 37/49] drm/amd/powerplay: export function to help to set cg by smu. Signed-off-by: Rex Zhu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/amd_powerplay.c | 77 +------------------ .../gpu/drm/amd/powerplay/inc/amd_powerplay.h | 2 + 2 files changed, 5 insertions(+), 74 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c index 212ec2fd97ed..7174f7a68266 100644 --- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c @@ -191,11 +191,9 @@ static int pp_sw_reset(void *handle) } -static int pp_set_clockgating_state(void *handle, - enum amd_clockgating_state state) +int amd_set_clockgating_by_smu(void *handle, uint32_t msg_id) { struct pp_hwmgr *hwmgr; - uint32_t msg_id, pp_state; if (handle == NULL) return -EINVAL; @@ -209,76 +207,7 @@ static int pp_set_clockgating_state(void *handle, return 0; } - if (state == AMD_CG_STATE_UNGATE) - pp_state = 0; - else - pp_state = PP_STATE_CG | PP_STATE_LS; - - /* Enable/disable GFX blocks clock gating through SMU */ - msg_id = PP_CG_MSG_ID(PP_GROUP_GFX, - PP_BLOCK_GFX_CG, - PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS, - pp_state); - hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id); - msg_id = PP_CG_MSG_ID(PP_GROUP_GFX, - PP_BLOCK_GFX_3D, - PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS, - pp_state); - hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id); - msg_id = PP_CG_MSG_ID(PP_GROUP_GFX, - PP_BLOCK_GFX_RLC, - PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS, - pp_state); - hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id); - msg_id = PP_CG_MSG_ID(PP_GROUP_GFX, - PP_BLOCK_GFX_CP, - PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS, - pp_state); - hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id); - msg_id = PP_CG_MSG_ID(PP_GROUP_GFX, - PP_BLOCK_GFX_MG, - PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS, - pp_state); - hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id); - - /* Enable/disable System blocks clock gating through SMU */ - msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, - PP_BLOCK_SYS_BIF, - PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS, - pp_state); - hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id); - msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, - PP_BLOCK_SYS_BIF, - PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS, - pp_state); - hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id); - msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, - PP_BLOCK_SYS_MC, - PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS, - pp_state); - hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id); - msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, - PP_BLOCK_SYS_ROM, - PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS, - pp_state); - hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id); - msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, - PP_BLOCK_SYS_DRM, - PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS, - pp_state); - hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id); - msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, - PP_BLOCK_SYS_HDP, - PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS, - pp_state); - hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id); - msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, - PP_BLOCK_SYS_SDMA, - PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS, - pp_state); - hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id); - - return 0; + return hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id); } static int pp_set_powergating_state(void *handle, @@ -362,7 +291,7 @@ const struct amd_ip_funcs pp_ip_funcs = { .is_idle = pp_is_idle, .wait_for_idle = pp_wait_for_idle, .soft_reset = pp_sw_reset, - .set_clockgating_state = pp_set_clockgating_state, + .set_clockgating_state = NULL, .set_powergating_state = pp_set_powergating_state, }; diff --git a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h index 3d74043c0e08..3fb5e57a378b 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h +++ b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h @@ -390,4 +390,6 @@ int amd_powerplay_get_clock_by_type(void *handle, int amd_powerplay_get_display_mode_validation_clocks(void *handle, struct amd_pp_simple_clock_info *output); +int amd_set_clockgating_by_smu(void *handle, uint32_t msg_id); + #endif /* _AMD_POWERPLAY_H_ */ From 1bb08f91b0f6b2dd24a1a5bf42875258647ee285 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Sun, 18 Sep 2016 16:54:00 +0800 Subject: [PATCH 38/49] drm/amdgpu: set system clock gating for tonga/polaris. Signed-off-by: Rex Zhu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/vi.c | 61 +++++++++++++++++++++++++++++++++ 1 file changed, 61 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index a8154d0ac288..63ce7f08d1d5 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -1807,6 +1807,63 @@ static void vi_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev, WREG32_SMC(ixCGTT_ROM_CLK_CTRL0, data); } +static int vi_common_set_clockgating_state_by_smu(void *handle, + enum amd_clockgating_state state) +{ + uint32_t msg_id, pp_state; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + void *pp_handle = adev->powerplay.pp_handle; + + if (state == AMD_CG_STATE_UNGATE) + pp_state = 0; + else + pp_state = PP_STATE_CG | PP_STATE_LS; + + msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, + PP_BLOCK_SYS_MC, + PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS, + pp_state); + amd_set_clockgating_by_smu(pp_handle, msg_id); + + msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, + PP_BLOCK_SYS_SDMA, + PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS, + pp_state); + amd_set_clockgating_by_smu(pp_handle, msg_id); + + msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, + PP_BLOCK_SYS_HDP, + PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS, + pp_state); + amd_set_clockgating_by_smu(pp_handle, msg_id); + + msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, + PP_BLOCK_SYS_BIF, + PP_STATE_SUPPORT_LS, + pp_state); + amd_set_clockgating_by_smu(pp_handle, msg_id); + + msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, + PP_BLOCK_SYS_BIF, + PP_STATE_SUPPORT_CG, + pp_state); + amd_set_clockgating_by_smu(pp_handle, msg_id); + + msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, + PP_BLOCK_SYS_DRM, + PP_STATE_SUPPORT_LS, + pp_state); + amd_set_clockgating_by_smu(pp_handle, msg_id); + + msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, + PP_BLOCK_SYS_ROM, + PP_STATE_SUPPORT_CG, + pp_state); + amd_set_clockgating_by_smu(pp_handle, msg_id); + + return 0; +} + static int vi_common_set_clockgating_state(void *handle, enum amd_clockgating_state state) { @@ -1832,6 +1889,10 @@ static int vi_common_set_clockgating_state(void *handle, vi_update_hdp_light_sleep(adev, state == AMD_CG_STATE_GATE ? true : false); break; + case CHIP_TONGA: + case CHIP_POLARIS10: + case CHIP_POLARIS11: + vi_common_set_clockgating_state_by_smu(adev, state); default: break; } From a8ca34136453b5a570b514e466f8b0b9efd71df2 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Sun, 18 Sep 2016 16:55:00 +0800 Subject: [PATCH 39/49] drm/amdgpu: set gfx clock gating for tonga/polaris. Signed-off-by: Rex Zhu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 77 +++++++++++++++++++++++++++ 1 file changed, 77 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 6da7d94925eb..f490691d4b6c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -5979,6 +5979,76 @@ static int gfx_v8_0_update_gfx_clock_gating(struct amdgpu_device *adev, return 0; } +static int gfx_v8_0_tonga_update_gfx_clock_gating(struct amdgpu_device *adev, + enum amd_clockgating_state state) +{ + uint32_t msg_id, pp_state; + void *pp_handle = adev->powerplay.pp_handle; + + if (state == AMD_CG_STATE_UNGATE) + pp_state = 0; + else + pp_state = PP_STATE_CG | PP_STATE_LS; + + msg_id = PP_CG_MSG_ID(PP_GROUP_GFX, + PP_BLOCK_GFX_CG, + PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS, + pp_state); + amd_set_clockgating_by_smu(pp_handle, msg_id); + + msg_id = PP_CG_MSG_ID(PP_GROUP_GFX, + PP_BLOCK_GFX_MG, + PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS, + pp_state); + amd_set_clockgating_by_smu(pp_handle, msg_id); + + return 0; +} + +static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev, + enum amd_clockgating_state state) +{ + uint32_t msg_id, pp_state; + void *pp_handle = adev->powerplay.pp_handle; + + if (state == AMD_CG_STATE_UNGATE) + pp_state = 0; + else + pp_state = PP_STATE_CG | PP_STATE_LS; + + msg_id = PP_CG_MSG_ID(PP_GROUP_GFX, + PP_BLOCK_GFX_CG, + PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS, + pp_state); + amd_set_clockgating_by_smu(pp_handle, msg_id); + + msg_id = PP_CG_MSG_ID(PP_GROUP_GFX, + PP_BLOCK_GFX_3D, + PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS, + pp_state); + amd_set_clockgating_by_smu(pp_handle, msg_id); + + msg_id = PP_CG_MSG_ID(PP_GROUP_GFX, + PP_BLOCK_GFX_MG, + PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS, + pp_state); + amd_set_clockgating_by_smu(pp_handle, msg_id); + + msg_id = PP_CG_MSG_ID(PP_GROUP_GFX, + PP_BLOCK_GFX_RLC, + PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS, + pp_state); + amd_set_clockgating_by_smu(pp_handle, msg_id); + + msg_id = PP_CG_MSG_ID(PP_GROUP_GFX, + PP_BLOCK_GFX_CP, + PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS, + pp_state); + amd_set_clockgating_by_smu(pp_handle, msg_id); + + return 0; +} + static int gfx_v8_0_set_clockgating_state(void *handle, enum amd_clockgating_state state) { @@ -5991,6 +6061,13 @@ static int gfx_v8_0_set_clockgating_state(void *handle, gfx_v8_0_update_gfx_clock_gating(adev, state == AMD_CG_STATE_GATE ? true : false); break; + case CHIP_TONGA: + gfx_v8_0_tonga_update_gfx_clock_gating(adev, state); + break; + case CHIP_POLARIS10: + case CHIP_POLARIS11: + gfx_v8_0_polaris_update_gfx_clock_gating(adev, state); + break; default: break; } From 9487dd1548d600161485ee5528cec36ccff96edd Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Mon, 19 Sep 2016 15:44:50 +0800 Subject: [PATCH 40/49] drm/amdgpu: use powerplay module for dgpu in Vi. delete non-pp code and files. It was just a temporary solution and not support dynamic power management. Signed-off-by: Rex Zhu Acked-by: Edward O'Callaghan Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/Makefile | 5 +- drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c | 13 +- drivers/gpu/drm/amd/amdgpu/fiji_dpm.c | 186 ---- drivers/gpu/drm/amd/amdgpu/fiji_smc.c | 863 ------------------ drivers/gpu/drm/amd/amdgpu/fiji_smum.h | 42 - drivers/gpu/drm/amd/amdgpu/iceland_dpm.c | 200 ---- drivers/gpu/drm/amd/amdgpu/iceland_smc.c | 677 -------------- drivers/gpu/drm/amd/amdgpu/iceland_smum.h | 41 - drivers/gpu/drm/amd/amdgpu/tonga_dpm.c | 186 ---- drivers/gpu/drm/amd/amdgpu/tonga_smc.c | 862 ----------------- drivers/gpu/drm/amd/amdgpu/tonga_smum.h | 42 - drivers/gpu/drm/amd/amdgpu/vi.c | 3 + 12 files changed, 6 insertions(+), 3114 deletions(-) delete mode 100644 drivers/gpu/drm/amd/amdgpu/fiji_dpm.c delete mode 100644 drivers/gpu/drm/amd/amdgpu/fiji_smc.c delete mode 100644 drivers/gpu/drm/amd/amdgpu/fiji_smum.h delete mode 100644 drivers/gpu/drm/amd/amdgpu/iceland_dpm.c delete mode 100644 drivers/gpu/drm/amd/amdgpu/iceland_smc.c delete mode 100644 drivers/gpu/drm/amd/amdgpu/iceland_smum.h delete mode 100644 drivers/gpu/drm/amd/amdgpu/tonga_dpm.c delete mode 100644 drivers/gpu/drm/amd/amdgpu/tonga_smc.c delete mode 100644 drivers/gpu/drm/amd/amdgpu/tonga_smum.h diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index dc6df075bafc..d15e9b080ce1 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -52,10 +52,7 @@ amdgpu-y += \ amdgpu-y += \ amdgpu_dpm.o \ amdgpu_powerplay.o \ - cz_smc.o cz_dpm.o \ - tonga_smc.o tonga_dpm.o \ - fiji_smc.o fiji_dpm.o \ - iceland_smc.o iceland_dpm.o + cz_smc.o cz_dpm.o # add DCE block amdgpu-y += \ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c index 1e7f160f23d8..68ad24101a36 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c @@ -80,15 +80,6 @@ static int amdgpu_powerplay_init(struct amdgpu_device *adev) amd_pp->ip_funcs = &kv_dpm_ip_funcs; break; #endif - case CHIP_TOPAZ: - amd_pp->ip_funcs = &iceland_dpm_ip_funcs; - break; - case CHIP_TONGA: - amd_pp->ip_funcs = &tonga_dpm_ip_funcs; - break; - case CHIP_FIJI: - amd_pp->ip_funcs = &fiji_dpm_ip_funcs; - break; case CHIP_CARRIZO: case CHIP_STONEY: amd_pp->ip_funcs = &cz_dpm_ip_funcs; @@ -110,11 +101,11 @@ static int amdgpu_pp_early_init(void *handle) switch (adev->asic_type) { case CHIP_POLARIS11: case CHIP_POLARIS10: - adev->pp_enabled = true; - break; case CHIP_TONGA: case CHIP_FIJI: case CHIP_TOPAZ: + adev->pp_enabled = true; + break; case CHIP_CARRIZO: case CHIP_STONEY: adev->pp_enabled = (amdgpu_powerplay == 0) ? false : true; diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c b/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c deleted file mode 100644 index ed03b75175d4..000000000000 --- a/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c +++ /dev/null @@ -1,186 +0,0 @@ -/* - * Copyright 2014 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#include -#include "drmP.h" -#include "amdgpu.h" -#include "fiji_smum.h" - -MODULE_FIRMWARE("amdgpu/fiji_smc.bin"); - -static void fiji_dpm_set_funcs(struct amdgpu_device *adev); - -static int fiji_dpm_early_init(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - fiji_dpm_set_funcs(adev); - - return 0; -} - -static int fiji_dpm_init_microcode(struct amdgpu_device *adev) -{ - char fw_name[30] = "amdgpu/fiji_smc.bin"; - int err; - - err = request_firmware(&adev->pm.fw, fw_name, adev->dev); - if (err) - goto out; - err = amdgpu_ucode_validate(adev->pm.fw); - -out: - if (err) { - DRM_ERROR("Failed to load firmware \"%s\"", fw_name); - release_firmware(adev->pm.fw); - adev->pm.fw = NULL; - } - return err; -} - -static int fiji_dpm_sw_init(void *handle) -{ - int ret; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - ret = fiji_dpm_init_microcode(adev); - if (ret) - return ret; - - return 0; -} - -static int fiji_dpm_sw_fini(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - release_firmware(adev->pm.fw); - adev->pm.fw = NULL; - - return 0; -} - -static int fiji_dpm_hw_init(void *handle) -{ - int ret; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - mutex_lock(&adev->pm.mutex); - - ret = fiji_smu_init(adev); - if (ret) { - DRM_ERROR("SMU initialization failed\n"); - goto fail; - } - - ret = fiji_smu_start(adev); - if (ret) { - DRM_ERROR("SMU start failed\n"); - goto fail; - } - - mutex_unlock(&adev->pm.mutex); - return 0; - -fail: - adev->firmware.smu_load = false; - mutex_unlock(&adev->pm.mutex); - return -EINVAL; -} - -static int fiji_dpm_hw_fini(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - mutex_lock(&adev->pm.mutex); - fiji_smu_fini(adev); - mutex_unlock(&adev->pm.mutex); - return 0; -} - -static int fiji_dpm_suspend(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - fiji_dpm_hw_fini(adev); - - return 0; -} - -static int fiji_dpm_resume(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - fiji_dpm_hw_init(adev); - - return 0; -} - -static int fiji_dpm_set_clockgating_state(void *handle, - enum amd_clockgating_state state) -{ - return 0; -} - -static int fiji_dpm_set_powergating_state(void *handle, - enum amd_powergating_state state) -{ - return 0; -} - -const struct amd_ip_funcs fiji_dpm_ip_funcs = { - .name = "fiji_dpm", - .early_init = fiji_dpm_early_init, - .late_init = NULL, - .sw_init = fiji_dpm_sw_init, - .sw_fini = fiji_dpm_sw_fini, - .hw_init = fiji_dpm_hw_init, - .hw_fini = fiji_dpm_hw_fini, - .suspend = fiji_dpm_suspend, - .resume = fiji_dpm_resume, - .is_idle = NULL, - .wait_for_idle = NULL, - .soft_reset = NULL, - .set_clockgating_state = fiji_dpm_set_clockgating_state, - .set_powergating_state = fiji_dpm_set_powergating_state, -}; - -static const struct amdgpu_dpm_funcs fiji_dpm_funcs = { - .get_temperature = NULL, - .pre_set_power_state = NULL, - .set_power_state = NULL, - .post_set_power_state = NULL, - .display_configuration_changed = NULL, - .get_sclk = NULL, - .get_mclk = NULL, - .print_power_state = NULL, - .debugfs_print_current_performance_level = NULL, - .force_performance_level = NULL, - .vblank_too_short = NULL, - .powergate_uvd = NULL, -}; - -static void fiji_dpm_set_funcs(struct amdgpu_device *adev) -{ - if (NULL == adev->pm.funcs) - adev->pm.funcs = &fiji_dpm_funcs; -} diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_smc.c b/drivers/gpu/drm/amd/amdgpu/fiji_smc.c deleted file mode 100644 index 8cfb0a3cf725..000000000000 --- a/drivers/gpu/drm/amd/amdgpu/fiji_smc.c +++ /dev/null @@ -1,863 +0,0 @@ -/* - * Copyright 2014 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#include -#include "drmP.h" -#include "amdgpu.h" -#include "fiji_ppsmc.h" -#include "fiji_smum.h" -#include "smu_ucode_xfer_vi.h" -#include "amdgpu_ucode.h" - -#include "smu/smu_7_1_3_d.h" -#include "smu/smu_7_1_3_sh_mask.h" - -#define FIJI_SMC_SIZE 0x20000 - -static int fiji_set_smc_sram_address(struct amdgpu_device *adev, uint32_t smc_address, uint32_t limit) -{ - uint32_t val; - - if (smc_address & 3) - return -EINVAL; - - if ((smc_address + 3) > limit) - return -EINVAL; - - WREG32(mmSMC_IND_INDEX_0, smc_address); - - val = RREG32(mmSMC_IND_ACCESS_CNTL); - val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0); - WREG32(mmSMC_IND_ACCESS_CNTL, val); - - return 0; -} - -static int fiji_copy_bytes_to_smc(struct amdgpu_device *adev, uint32_t smc_start_address, const uint8_t *src, uint32_t byte_count, uint32_t limit) -{ - uint32_t addr; - uint32_t data, orig_data; - int result = 0; - uint32_t extra_shift; - unsigned long flags; - - if (smc_start_address & 3) - return -EINVAL; - - if ((smc_start_address + byte_count) > limit) - return -EINVAL; - - addr = smc_start_address; - - spin_lock_irqsave(&adev->smc_idx_lock, flags); - while (byte_count >= 4) { - /* Bytes are written into the SMC addres space with the MSB first */ - data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3]; - - result = fiji_set_smc_sram_address(adev, addr, limit); - - if (result) - goto out; - - WREG32(mmSMC_IND_DATA_0, data); - - src += 4; - byte_count -= 4; - addr += 4; - } - - if (0 != byte_count) { - /* Now write odd bytes left, do a read modify write cycle */ - data = 0; - - result = fiji_set_smc_sram_address(adev, addr, limit); - if (result) - goto out; - - orig_data = RREG32(mmSMC_IND_DATA_0); - extra_shift = 8 * (4 - byte_count); - - while (byte_count > 0) { - data = (data << 8) + *src++; - byte_count--; - } - - data <<= extra_shift; - data |= (orig_data & ~((~0UL) << extra_shift)); - - result = fiji_set_smc_sram_address(adev, addr, limit); - if (result) - goto out; - - WREG32(mmSMC_IND_DATA_0, data); - } - -out: - spin_unlock_irqrestore(&adev->smc_idx_lock, flags); - return result; -} - -static int fiji_program_jump_on_start(struct amdgpu_device *adev) -{ - static unsigned char data[] = {0xE0, 0x00, 0x80, 0x40}; - fiji_copy_bytes_to_smc(adev, 0x0, data, 4, sizeof(data)+1); - - return 0; -} - -static bool fiji_is_smc_ram_running(struct amdgpu_device *adev) -{ - uint32_t val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0); - val = REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable); - - return ((0 == val) && (0x20100 <= RREG32_SMC(ixSMC_PC_C))); -} - -static int wait_smu_response(struct amdgpu_device *adev) -{ - int i; - uint32_t val; - - for (i = 0; i < adev->usec_timeout; i++) { - val = RREG32(mmSMC_RESP_0); - if (REG_GET_FIELD(val, SMC_RESP_0, SMC_RESP)) - break; - udelay(1); - } - - if (i == adev->usec_timeout) - return -EINVAL; - - return 0; -} - -static int fiji_send_msg_to_smc_offset(struct amdgpu_device *adev) -{ - if (wait_smu_response(adev)) { - DRM_ERROR("Failed to send previous message\n"); - return -EINVAL; - } - - WREG32(mmSMC_MSG_ARG_0, 0x20000); - WREG32(mmSMC_MESSAGE_0, PPSMC_MSG_Test); - - if (wait_smu_response(adev)) { - DRM_ERROR("Failed to send message\n"); - return -EINVAL; - } - - return 0; -} - -static int fiji_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg) -{ - if (!fiji_is_smc_ram_running(adev)) - { - return -EINVAL; - } - - if (wait_smu_response(adev)) { - DRM_ERROR("Failed to send previous message\n"); - return -EINVAL; - } - - WREG32(mmSMC_MESSAGE_0, msg); - - if (wait_smu_response(adev)) { - DRM_ERROR("Failed to send message\n"); - return -EINVAL; - } - - return 0; -} - -static int fiji_send_msg_to_smc_without_waiting(struct amdgpu_device *adev, - PPSMC_Msg msg) -{ - if (wait_smu_response(adev)) { - DRM_ERROR("Failed to send previous message\n"); - return -EINVAL; - } - - WREG32(mmSMC_MESSAGE_0, msg); - - return 0; -} - -static int fiji_send_msg_to_smc_with_parameter(struct amdgpu_device *adev, - PPSMC_Msg msg, - uint32_t parameter) -{ - if (!fiji_is_smc_ram_running(adev)) - return -EINVAL; - - if (wait_smu_response(adev)) { - DRM_ERROR("Failed to send previous message\n"); - return -EINVAL; - } - - WREG32(mmSMC_MSG_ARG_0, parameter); - - return fiji_send_msg_to_smc(adev, msg); -} - -static int fiji_send_msg_to_smc_with_parameter_without_waiting( - struct amdgpu_device *adev, - PPSMC_Msg msg, uint32_t parameter) -{ - if (wait_smu_response(adev)) { - DRM_ERROR("Failed to send previous message\n"); - return -EINVAL; - } - - WREG32(mmSMC_MSG_ARG_0, parameter); - - return fiji_send_msg_to_smc_without_waiting(adev, msg); -} - -#if 0 /* not used yet */ -static int fiji_wait_for_smc_inactive(struct amdgpu_device *adev) -{ - int i; - uint32_t val; - - if (!fiji_is_smc_ram_running(adev)) - return -EINVAL; - - for (i = 0; i < adev->usec_timeout; i++) { - val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0); - if (REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, cken) == 0) - break; - udelay(1); - } - - if (i == adev->usec_timeout) - return -EINVAL; - - return 0; -} -#endif - -static int fiji_smu_upload_firmware_image(struct amdgpu_device *adev) -{ - const struct smc_firmware_header_v1_0 *hdr; - uint32_t ucode_size; - uint32_t ucode_start_address; - const uint8_t *src; - uint32_t val; - uint32_t byte_count; - uint32_t *data; - unsigned long flags; - - if (!adev->pm.fw) - return -EINVAL; - - /* Skip SMC ucode loading on SR-IOV capable boards. - * vbios does this for us in asic_init in that case. - */ - if (amdgpu_sriov_bios(adev)) - return 0; - - hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data; - amdgpu_ucode_print_smc_hdr(&hdr->header); - - adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version); - ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes); - ucode_start_address = le32_to_cpu(hdr->ucode_start_addr); - src = (const uint8_t *) - (adev->pm.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); - - if (ucode_size & 3) { - DRM_ERROR("SMC ucode is not 4 bytes aligned\n"); - return -EINVAL; - } - - if (ucode_size > FIJI_SMC_SIZE) { - DRM_ERROR("SMC address is beyond the SMC RAM area\n"); - return -EINVAL; - } - - spin_lock_irqsave(&adev->smc_idx_lock, flags); - WREG32(mmSMC_IND_INDEX_0, ucode_start_address); - - val = RREG32(mmSMC_IND_ACCESS_CNTL); - val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1); - WREG32(mmSMC_IND_ACCESS_CNTL, val); - - byte_count = ucode_size; - data = (uint32_t *)src; - for (; byte_count >= 4; data++, byte_count -= 4) - WREG32(mmSMC_IND_DATA_0, data[0]); - - val = RREG32(mmSMC_IND_ACCESS_CNTL); - val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0); - WREG32(mmSMC_IND_ACCESS_CNTL, val); - spin_unlock_irqrestore(&adev->smc_idx_lock, flags); - - return 0; -} - -#if 0 /* not used yet */ -static int fiji_read_smc_sram_dword(struct amdgpu_device *adev, - uint32_t smc_address, - uint32_t *value, - uint32_t limit) -{ - int result; - unsigned long flags; - - spin_lock_irqsave(&adev->smc_idx_lock, flags); - result = fiji_set_smc_sram_address(adev, smc_address, limit); - if (result == 0) - *value = RREG32(mmSMC_IND_DATA_0); - spin_unlock_irqrestore(&adev->smc_idx_lock, flags); - return result; -} - -static int fiji_write_smc_sram_dword(struct amdgpu_device *adev, - uint32_t smc_address, - uint32_t value, - uint32_t limit) -{ - int result; - unsigned long flags; - - spin_lock_irqsave(&adev->smc_idx_lock, flags); - result = fiji_set_smc_sram_address(adev, smc_address, limit); - if (result == 0) - WREG32(mmSMC_IND_DATA_0, value); - spin_unlock_irqrestore(&adev->smc_idx_lock, flags); - return result; -} - -static int fiji_smu_stop_smc(struct amdgpu_device *adev) -{ - uint32_t val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL); - val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1); - WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val); - - val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0); - val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 1); - WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val); - - return 0; -} -#endif - -static enum AMDGPU_UCODE_ID fiji_convert_fw_type(uint32_t fw_type) -{ - switch (fw_type) { - case UCODE_ID_SDMA0: - return AMDGPU_UCODE_ID_SDMA0; - case UCODE_ID_SDMA1: - return AMDGPU_UCODE_ID_SDMA1; - case UCODE_ID_CP_CE: - return AMDGPU_UCODE_ID_CP_CE; - case UCODE_ID_CP_PFP: - return AMDGPU_UCODE_ID_CP_PFP; - case UCODE_ID_CP_ME: - return AMDGPU_UCODE_ID_CP_ME; - case UCODE_ID_CP_MEC: - case UCODE_ID_CP_MEC_JT1: - case UCODE_ID_CP_MEC_JT2: - return AMDGPU_UCODE_ID_CP_MEC1; - case UCODE_ID_RLC_G: - return AMDGPU_UCODE_ID_RLC_G; - default: - DRM_ERROR("ucode type is out of range!\n"); - return AMDGPU_UCODE_ID_MAXIMUM; - } -} - -static int fiji_smu_populate_single_firmware_entry(struct amdgpu_device *adev, - uint32_t fw_type, - struct SMU_Entry *entry) -{ - enum AMDGPU_UCODE_ID id = fiji_convert_fw_type(fw_type); - struct amdgpu_firmware_info *ucode = &adev->firmware.ucode[id]; - const struct gfx_firmware_header_v1_0 *header = NULL; - uint64_t gpu_addr; - uint32_t data_size; - - if (ucode->fw == NULL) - return -EINVAL; - gpu_addr = ucode->mc_addr; - header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data; - data_size = le32_to_cpu(header->header.ucode_size_bytes); - - if ((fw_type == UCODE_ID_CP_MEC_JT1) || - (fw_type == UCODE_ID_CP_MEC_JT2)) { - gpu_addr += le32_to_cpu(header->jt_offset) << 2; - data_size = le32_to_cpu(header->jt_size) << 2; - } - - entry->version = (uint16_t)le32_to_cpu(header->header.ucode_version); - entry->id = (uint16_t)fw_type; - entry->image_addr_high = upper_32_bits(gpu_addr); - entry->image_addr_low = lower_32_bits(gpu_addr); - entry->meta_data_addr_high = 0; - entry->meta_data_addr_low = 0; - entry->data_size_byte = data_size; - entry->num_register_entries = 0; - - if (fw_type == UCODE_ID_RLC_G) - entry->flags = 1; - else - entry->flags = 0; - - return 0; -} - -static int fiji_smu_request_load_fw(struct amdgpu_device *adev) -{ - struct fiji_smu_private_data *private = (struct fiji_smu_private_data *)adev->smu.priv; - struct SMU_DRAMData_TOC *toc; - uint32_t fw_to_load; - - WREG32_SMC(ixSOFT_REGISTERS_TABLE_28, 0); - - fiji_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SMU_DRAM_ADDR_HI, private->smu_buffer_addr_high); - fiji_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SMU_DRAM_ADDR_LO, private->smu_buffer_addr_low); - - toc = (struct SMU_DRAMData_TOC *)private->header; - toc->num_entries = 0; - toc->structure_version = 1; - - if (!adev->firmware.smu_load) - return 0; - - if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_RLC_G, - &toc->entry[toc->num_entries++])) { - DRM_ERROR("Failed to get firmware entry for RLC\n"); - return -EINVAL; - } - - if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_CE, - &toc->entry[toc->num_entries++])) { - DRM_ERROR("Failed to get firmware entry for CE\n"); - return -EINVAL; - } - - if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_PFP, - &toc->entry[toc->num_entries++])) { - DRM_ERROR("Failed to get firmware entry for PFP\n"); - return -EINVAL; - } - - if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_ME, - &toc->entry[toc->num_entries++])) { - DRM_ERROR("Failed to get firmware entry for ME\n"); - return -EINVAL; - } - - if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC, - &toc->entry[toc->num_entries++])) { - DRM_ERROR("Failed to get firmware entry for MEC\n"); - return -EINVAL; - } - - if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT1, - &toc->entry[toc->num_entries++])) { - DRM_ERROR("Failed to get firmware entry for MEC_JT1\n"); - return -EINVAL; - } - - if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT2, - &toc->entry[toc->num_entries++])) { - DRM_ERROR("Failed to get firmware entry for MEC_JT2\n"); - return -EINVAL; - } - - if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA0, - &toc->entry[toc->num_entries++])) { - DRM_ERROR("Failed to get firmware entry for SDMA0\n"); - return -EINVAL; - } - - if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA1, - &toc->entry[toc->num_entries++])) { - DRM_ERROR("Failed to get firmware entry for SDMA1\n"); - return -EINVAL; - } - - fiji_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_HI, private->header_addr_high); - fiji_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_LO, private->header_addr_low); - - fw_to_load = UCODE_ID_RLC_G_MASK | - UCODE_ID_SDMA0_MASK | - UCODE_ID_SDMA1_MASK | - UCODE_ID_CP_CE_MASK | - UCODE_ID_CP_ME_MASK | - UCODE_ID_CP_PFP_MASK | - UCODE_ID_CP_MEC_MASK; - - if (fiji_send_msg_to_smc_with_parameter_without_waiting(adev, PPSMC_MSG_LoadUcodes, fw_to_load)) { - DRM_ERROR("Fail to request SMU load ucode\n"); - return -EINVAL; - } - - return 0; -} - -static uint32_t fiji_smu_get_mask_for_fw_type(uint32_t fw_type) -{ - switch (fw_type) { - case AMDGPU_UCODE_ID_SDMA0: - return UCODE_ID_SDMA0_MASK; - case AMDGPU_UCODE_ID_SDMA1: - return UCODE_ID_SDMA1_MASK; - case AMDGPU_UCODE_ID_CP_CE: - return UCODE_ID_CP_CE_MASK; - case AMDGPU_UCODE_ID_CP_PFP: - return UCODE_ID_CP_PFP_MASK; - case AMDGPU_UCODE_ID_CP_ME: - return UCODE_ID_CP_ME_MASK; - case AMDGPU_UCODE_ID_CP_MEC1: - return UCODE_ID_CP_MEC_MASK; - case AMDGPU_UCODE_ID_CP_MEC2: - return UCODE_ID_CP_MEC_MASK; - case AMDGPU_UCODE_ID_RLC_G: - return UCODE_ID_RLC_G_MASK; - default: - DRM_ERROR("ucode type is out of range!\n"); - return 0; - } -} - -static int fiji_smu_check_fw_load_finish(struct amdgpu_device *adev, - uint32_t fw_type) -{ - uint32_t fw_mask = fiji_smu_get_mask_for_fw_type(fw_type); - int i; - - for (i = 0; i < adev->usec_timeout; i++) { - if (fw_mask == (RREG32_SMC(ixSOFT_REGISTERS_TABLE_28) & fw_mask)) - break; - udelay(1); - } - - if (i == adev->usec_timeout) { - DRM_ERROR("check firmware loading failed\n"); - return -EINVAL; - } - - return 0; -} - -static int fiji_smu_start_in_protection_mode(struct amdgpu_device *adev) -{ - int result; - uint32_t val; - int i; - - /* Assert reset */ - val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL); - val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1); - WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val); - - result = fiji_smu_upload_firmware_image(adev); - if (result) - return result; - - /* Clear status */ - WREG32_SMC(ixSMU_STATUS, 0); - - /* Enable clock */ - val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0); - val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0); - WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val); - - /* De-assert reset */ - val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL); - val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 0); - WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val); - - /* Set SMU Auto Start */ - val = RREG32_SMC(ixSMU_INPUT_DATA); - val = REG_SET_FIELD(val, SMU_INPUT_DATA, AUTO_START, 1); - WREG32_SMC(ixSMU_INPUT_DATA, val); - - /* Clear firmware interrupt enable flag */ - WREG32_SMC(ixFIRMWARE_FLAGS, 0); - - for (i = 0; i < adev->usec_timeout; i++) { - val = RREG32_SMC(ixRCU_UC_EVENTS); - if (REG_GET_FIELD(val, RCU_UC_EVENTS, INTERRUPTS_ENABLED)) - break; - udelay(1); - } - - if (i == adev->usec_timeout) { - DRM_ERROR("Interrupt is not enabled by firmware\n"); - return -EINVAL; - } - - /* Call Test SMU message with 0x20000 offset - * to trigger SMU start - */ - fiji_send_msg_to_smc_offset(adev); - DRM_INFO("[FM]try triger smu start\n"); - /* Wait for done bit to be set */ - for (i = 0; i < adev->usec_timeout; i++) { - val = RREG32_SMC(ixSMU_STATUS); - if (REG_GET_FIELD(val, SMU_STATUS, SMU_DONE)) - break; - udelay(1); - } - - if (i == adev->usec_timeout) { - DRM_ERROR("Timeout for SMU start\n"); - return -EINVAL; - } - - /* Check pass/failed indicator */ - val = RREG32_SMC(ixSMU_STATUS); - if (!REG_GET_FIELD(val, SMU_STATUS, SMU_PASS)) { - DRM_ERROR("SMU Firmware start failed\n"); - return -EINVAL; - } - DRM_INFO("[FM]smu started\n"); - /* Wait for firmware to initialize */ - for (i = 0; i < adev->usec_timeout; i++) { - val = RREG32_SMC(ixFIRMWARE_FLAGS); - if(REG_GET_FIELD(val, FIRMWARE_FLAGS, INTERRUPTS_ENABLED)) - break; - udelay(1); - } - - if (i == adev->usec_timeout) { - DRM_ERROR("SMU firmware initialization failed\n"); - return -EINVAL; - } - DRM_INFO("[FM]smu initialized\n"); - - return 0; -} - -static int fiji_smu_start_in_non_protection_mode(struct amdgpu_device *adev) -{ - int i, result; - uint32_t val; - - /* wait for smc boot up */ - for (i = 0; i < adev->usec_timeout; i++) { - val = RREG32_SMC(ixRCU_UC_EVENTS); - val = REG_GET_FIELD(val, RCU_UC_EVENTS, boot_seq_done); - if (val) - break; - udelay(1); - } - - if (i == adev->usec_timeout) { - DRM_ERROR("SMC boot sequence is not completed\n"); - return -EINVAL; - } - - /* Clear firmware interrupt enable flag */ - WREG32_SMC(ixFIRMWARE_FLAGS, 0); - - /* Assert reset */ - val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL); - val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1); - WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val); - - result = fiji_smu_upload_firmware_image(adev); - if (result) - return result; - - /* Set smc instruct start point at 0x0 */ - fiji_program_jump_on_start(adev); - - /* Enable clock */ - val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0); - val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0); - WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val); - - /* De-assert reset */ - val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL); - val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 0); - WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val); - - /* Wait for firmware to initialize */ - for (i = 0; i < adev->usec_timeout; i++) { - val = RREG32_SMC(ixFIRMWARE_FLAGS); - if (REG_GET_FIELD(val, FIRMWARE_FLAGS, INTERRUPTS_ENABLED)) - break; - udelay(1); - } - - if (i == adev->usec_timeout) { - DRM_ERROR("Timeout for SMC firmware initialization\n"); - return -EINVAL; - } - - return 0; -} - -int fiji_smu_start(struct amdgpu_device *adev) -{ - int result; - uint32_t val; - - if (!fiji_is_smc_ram_running(adev)) { - val = RREG32_SMC(ixSMU_FIRMWARE); - if (!REG_GET_FIELD(val, SMU_FIRMWARE, SMU_MODE)) { - DRM_INFO("[FM]start smu in nonprotection mode\n"); - result = fiji_smu_start_in_non_protection_mode(adev); - if (result) - return result; - } else { - DRM_INFO("[FM]start smu in protection mode\n"); - result = fiji_smu_start_in_protection_mode(adev); - if (result) - return result; - } - } - - return fiji_smu_request_load_fw(adev); -} - -static const struct amdgpu_smumgr_funcs fiji_smumgr_funcs = { - .check_fw_load_finish = fiji_smu_check_fw_load_finish, - .request_smu_load_fw = NULL, - .request_smu_specific_fw = NULL, -}; - -int fiji_smu_init(struct amdgpu_device *adev) -{ - struct fiji_smu_private_data *private; - uint32_t image_size = ((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096; - uint32_t smu_internal_buffer_size = 200*4096; - struct amdgpu_bo **toc_buf = &adev->smu.toc_buf; - struct amdgpu_bo **smu_buf = &adev->smu.smu_buf; - uint64_t mc_addr; - void *toc_buf_ptr; - void *smu_buf_ptr; - int ret; - - private = kzalloc(sizeof(struct fiji_smu_private_data), GFP_KERNEL); - if (NULL == private) - return -ENOMEM; - - /* allocate firmware buffers */ - if (adev->firmware.smu_load) - amdgpu_ucode_init_bo(adev); - - adev->smu.priv = private; - adev->smu.fw_flags = 0; - - /* Allocate FW image data structure and header buffer */ - ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE, - true, AMDGPU_GEM_DOMAIN_VRAM, - AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, - NULL, NULL, toc_buf); - if (ret) { - DRM_ERROR("Failed to allocate memory for TOC buffer\n"); - return -ENOMEM; - } - - /* Allocate buffer for SMU internal buffer */ - ret = amdgpu_bo_create(adev, smu_internal_buffer_size, PAGE_SIZE, - true, AMDGPU_GEM_DOMAIN_VRAM, - AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, - NULL, NULL, smu_buf); - if (ret) { - DRM_ERROR("Failed to allocate memory for SMU internal buffer\n"); - return -ENOMEM; - } - - /* Retrieve GPU address for header buffer and internal buffer */ - ret = amdgpu_bo_reserve(adev->smu.toc_buf, false); - if (ret) { - amdgpu_bo_unref(&adev->smu.toc_buf); - DRM_ERROR("Failed to reserve the TOC buffer\n"); - return -EINVAL; - } - - ret = amdgpu_bo_pin(adev->smu.toc_buf, AMDGPU_GEM_DOMAIN_VRAM, &mc_addr); - if (ret) { - amdgpu_bo_unreserve(adev->smu.toc_buf); - amdgpu_bo_unref(&adev->smu.toc_buf); - DRM_ERROR("Failed to pin the TOC buffer\n"); - return -EINVAL; - } - - ret = amdgpu_bo_kmap(*toc_buf, &toc_buf_ptr); - if (ret) { - amdgpu_bo_unreserve(adev->smu.toc_buf); - amdgpu_bo_unref(&adev->smu.toc_buf); - DRM_ERROR("Failed to map the TOC buffer\n"); - return -EINVAL; - } - - amdgpu_bo_unreserve(adev->smu.toc_buf); - private->header_addr_low = lower_32_bits(mc_addr); - private->header_addr_high = upper_32_bits(mc_addr); - private->header = toc_buf_ptr; - - ret = amdgpu_bo_reserve(adev->smu.smu_buf, false); - if (ret) { - amdgpu_bo_unref(&adev->smu.smu_buf); - amdgpu_bo_unref(&adev->smu.toc_buf); - DRM_ERROR("Failed to reserve the SMU internal buffer\n"); - return -EINVAL; - } - - ret = amdgpu_bo_pin(adev->smu.smu_buf, AMDGPU_GEM_DOMAIN_VRAM, &mc_addr); - if (ret) { - amdgpu_bo_unreserve(adev->smu.smu_buf); - amdgpu_bo_unref(&adev->smu.smu_buf); - amdgpu_bo_unref(&adev->smu.toc_buf); - DRM_ERROR("Failed to pin the SMU internal buffer\n"); - return -EINVAL; - } - - ret = amdgpu_bo_kmap(*smu_buf, &smu_buf_ptr); - if (ret) { - amdgpu_bo_unreserve(adev->smu.smu_buf); - amdgpu_bo_unref(&adev->smu.smu_buf); - amdgpu_bo_unref(&adev->smu.toc_buf); - DRM_ERROR("Failed to map the SMU internal buffer\n"); - return -EINVAL; - } - - amdgpu_bo_unreserve(adev->smu.smu_buf); - private->smu_buffer_addr_low = lower_32_bits(mc_addr); - private->smu_buffer_addr_high = upper_32_bits(mc_addr); - - adev->smu.smumgr_funcs = &fiji_smumgr_funcs; - - return 0; -} - -int fiji_smu_fini(struct amdgpu_device *adev) -{ - amdgpu_bo_unref(&adev->smu.toc_buf); - amdgpu_bo_unref(&adev->smu.smu_buf); - kfree(adev->smu.priv); - adev->smu.priv = NULL; - if (adev->firmware.fw_buf) - amdgpu_ucode_fini_bo(adev); - - return 0; -} diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_smum.h b/drivers/gpu/drm/amd/amdgpu/fiji_smum.h deleted file mode 100644 index 1cef03deeac3..000000000000 --- a/drivers/gpu/drm/amd/amdgpu/fiji_smum.h +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright 2014 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#ifndef FIJI_SMUMGR_H -#define FIJI_SMUMGR_H - -#include "fiji_ppsmc.h" - -int fiji_smu_init(struct amdgpu_device *adev); -int fiji_smu_fini(struct amdgpu_device *adev); -int fiji_smu_start(struct amdgpu_device *adev); - -struct fiji_smu_private_data -{ - uint8_t *header; - uint32_t smu_buffer_addr_high; - uint32_t smu_buffer_addr_low; - uint32_t header_addr_high; - uint32_t header_addr_low; -}; - -#endif diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c b/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c deleted file mode 100644 index 2f078ad6095c..000000000000 --- a/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c +++ /dev/null @@ -1,200 +0,0 @@ -/* - * Copyright 2014 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#include -#include "drmP.h" -#include "amdgpu.h" -#include "iceland_smum.h" - -MODULE_FIRMWARE("amdgpu/topaz_smc.bin"); - -static void iceland_dpm_set_funcs(struct amdgpu_device *adev); - -static int iceland_dpm_early_init(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - iceland_dpm_set_funcs(adev); - - return 0; -} - -static int iceland_dpm_init_microcode(struct amdgpu_device *adev) -{ - char fw_name[30] = "amdgpu/topaz_smc.bin"; - int err; - - err = request_firmware(&adev->pm.fw, fw_name, adev->dev); - if (err) - goto out; - err = amdgpu_ucode_validate(adev->pm.fw); - -out: - if (err) { - DRM_ERROR("Failed to load firmware \"%s\"", fw_name); - release_firmware(adev->pm.fw); - adev->pm.fw = NULL; - } - return err; -} - -static int iceland_dpm_sw_init(void *handle) -{ - int ret; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - ret = iceland_dpm_init_microcode(adev); - if (ret) - return ret; - - return 0; -} - -static int iceland_dpm_sw_fini(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - release_firmware(adev->pm.fw); - adev->pm.fw = NULL; - - return 0; -} - -static int iceland_dpm_hw_init(void *handle) -{ - int ret; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - mutex_lock(&adev->pm.mutex); - - /* smu init only needs to be called at startup, not resume. - * It should be in sw_init, but requires the fw info gathered - * in sw_init from other IP modules. - */ - ret = iceland_smu_init(adev); - if (ret) { - DRM_ERROR("SMU initialization failed\n"); - goto fail; - } - - ret = iceland_smu_start(adev); - if (ret) { - DRM_ERROR("SMU start failed\n"); - goto fail; - } - - mutex_unlock(&adev->pm.mutex); - return 0; - -fail: - adev->firmware.smu_load = false; - mutex_unlock(&adev->pm.mutex); - return -EINVAL; -} - -static int iceland_dpm_hw_fini(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - mutex_lock(&adev->pm.mutex); - /* smu fini only needs to be called at teardown, not suspend. - * It should be in sw_fini, but we put it here for symmetry - * with smu init. - */ - iceland_smu_fini(adev); - mutex_unlock(&adev->pm.mutex); - return 0; -} - -static int iceland_dpm_suspend(void *handle) -{ - return 0; -} - -static int iceland_dpm_resume(void *handle) -{ - int ret; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - mutex_lock(&adev->pm.mutex); - - ret = iceland_smu_start(adev); - if (ret) { - DRM_ERROR("SMU start failed\n"); - goto fail; - } - -fail: - mutex_unlock(&adev->pm.mutex); - return ret; -} - -static int iceland_dpm_set_clockgating_state(void *handle, - enum amd_clockgating_state state) -{ - return 0; -} - -static int iceland_dpm_set_powergating_state(void *handle, - enum amd_powergating_state state) -{ - return 0; -} - -const struct amd_ip_funcs iceland_dpm_ip_funcs = { - .name = "iceland_dpm", - .early_init = iceland_dpm_early_init, - .late_init = NULL, - .sw_init = iceland_dpm_sw_init, - .sw_fini = iceland_dpm_sw_fini, - .hw_init = iceland_dpm_hw_init, - .hw_fini = iceland_dpm_hw_fini, - .suspend = iceland_dpm_suspend, - .resume = iceland_dpm_resume, - .is_idle = NULL, - .wait_for_idle = NULL, - .soft_reset = NULL, - .set_clockgating_state = iceland_dpm_set_clockgating_state, - .set_powergating_state = iceland_dpm_set_powergating_state, -}; - -static const struct amdgpu_dpm_funcs iceland_dpm_funcs = { - .get_temperature = NULL, - .pre_set_power_state = NULL, - .set_power_state = NULL, - .post_set_power_state = NULL, - .display_configuration_changed = NULL, - .get_sclk = NULL, - .get_mclk = NULL, - .print_power_state = NULL, - .debugfs_print_current_performance_level = NULL, - .force_performance_level = NULL, - .vblank_too_short = NULL, - .powergate_uvd = NULL, -}; - -static void iceland_dpm_set_funcs(struct amdgpu_device *adev) -{ - if (NULL == adev->pm.funcs) - adev->pm.funcs = &iceland_dpm_funcs; -} diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c deleted file mode 100644 index c6e004a3f557..000000000000 --- a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c +++ /dev/null @@ -1,677 +0,0 @@ -/* - * Copyright 2014 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#include -#include "drmP.h" -#include "amdgpu.h" -#include "ppsmc.h" -#include "iceland_smum.h" -#include "smu_ucode_xfer_vi.h" -#include "amdgpu_ucode.h" - -#include "smu/smu_7_1_1_d.h" -#include "smu/smu_7_1_1_sh_mask.h" - -#define ICELAND_SMC_SIZE 0x20000 - -static int iceland_set_smc_sram_address(struct amdgpu_device *adev, - uint32_t smc_address, uint32_t limit) -{ - uint32_t val; - - if (smc_address & 3) - return -EINVAL; - - if ((smc_address + 3) > limit) - return -EINVAL; - - WREG32(mmSMC_IND_INDEX_0, smc_address); - - val = RREG32(mmSMC_IND_ACCESS_CNTL); - val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0); - WREG32(mmSMC_IND_ACCESS_CNTL, val); - - return 0; -} - -static int iceland_copy_bytes_to_smc(struct amdgpu_device *adev, - uint32_t smc_start_address, - const uint8_t *src, - uint32_t byte_count, uint32_t limit) -{ - uint32_t addr; - uint32_t data, orig_data; - int result = 0; - uint32_t extra_shift; - unsigned long flags; - - if (smc_start_address & 3) - return -EINVAL; - - if ((smc_start_address + byte_count) > limit) - return -EINVAL; - - addr = smc_start_address; - - spin_lock_irqsave(&adev->smc_idx_lock, flags); - while (byte_count >= 4) { - /* Bytes are written into the SMC addres space with the MSB first */ - data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3]; - - result = iceland_set_smc_sram_address(adev, addr, limit); - - if (result) - goto out; - - WREG32(mmSMC_IND_DATA_0, data); - - src += 4; - byte_count -= 4; - addr += 4; - } - - if (0 != byte_count) { - /* Now write odd bytes left, do a read modify write cycle */ - data = 0; - - result = iceland_set_smc_sram_address(adev, addr, limit); - if (result) - goto out; - - orig_data = RREG32(mmSMC_IND_DATA_0); - extra_shift = 8 * (4 - byte_count); - - while (byte_count > 0) { - data = (data << 8) + *src++; - byte_count--; - } - - data <<= extra_shift; - data |= (orig_data & ~((~0UL) << extra_shift)); - - result = iceland_set_smc_sram_address(adev, addr, limit); - if (result) - goto out; - - WREG32(mmSMC_IND_DATA_0, data); - } - -out: - spin_unlock_irqrestore(&adev->smc_idx_lock, flags); - return result; -} - -static void iceland_start_smc(struct amdgpu_device *adev) -{ - uint32_t val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL); - - val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 0); - WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val); -} - -static void iceland_reset_smc(struct amdgpu_device *adev) -{ - uint32_t val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL); - - val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1); - WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val); -} - -static int iceland_program_jump_on_start(struct amdgpu_device *adev) -{ - static unsigned char data[] = {0xE0, 0x00, 0x80, 0x40}; - iceland_copy_bytes_to_smc(adev, 0x0, data, 4, sizeof(data)+1); - - return 0; -} - -static void iceland_stop_smc_clock(struct amdgpu_device *adev) -{ - uint32_t val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0); - - val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 1); - WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val); -} - -static void iceland_start_smc_clock(struct amdgpu_device *adev) -{ - uint32_t val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0); - - val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0); - WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val); -} - -static bool iceland_is_smc_ram_running(struct amdgpu_device *adev) -{ - uint32_t val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0); - val = REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable); - - return ((0 == val) && (0x20100 <= RREG32_SMC(ixSMC_PC_C))); -} - -static int wait_smu_response(struct amdgpu_device *adev) -{ - int i; - uint32_t val; - - for (i = 0; i < adev->usec_timeout; i++) { - val = RREG32(mmSMC_RESP_0); - if (REG_GET_FIELD(val, SMC_RESP_0, SMC_RESP)) - break; - udelay(1); - } - - if (i == adev->usec_timeout) - return -EINVAL; - - return 0; -} - -static int iceland_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg) -{ - if (!iceland_is_smc_ram_running(adev)) - return -EINVAL; - - if (wait_smu_response(adev)) { - DRM_ERROR("Failed to send previous message\n"); - return -EINVAL; - } - - WREG32(mmSMC_MESSAGE_0, msg); - - if (wait_smu_response(adev)) { - DRM_ERROR("Failed to send message\n"); - return -EINVAL; - } - - return 0; -} - -static int iceland_send_msg_to_smc_without_waiting(struct amdgpu_device *adev, - PPSMC_Msg msg) -{ - if (!iceland_is_smc_ram_running(adev)) - return -EINVAL; - - if (wait_smu_response(adev)) { - DRM_ERROR("Failed to send previous message\n"); - return -EINVAL; - } - - WREG32(mmSMC_MESSAGE_0, msg); - - return 0; -} - -static int iceland_send_msg_to_smc_with_parameter(struct amdgpu_device *adev, - PPSMC_Msg msg, - uint32_t parameter) -{ - WREG32(mmSMC_MSG_ARG_0, parameter); - - return iceland_send_msg_to_smc(adev, msg); -} - -static int iceland_send_msg_to_smc_with_parameter_without_waiting( - struct amdgpu_device *adev, - PPSMC_Msg msg, uint32_t parameter) -{ - WREG32(mmSMC_MSG_ARG_0, parameter); - - return iceland_send_msg_to_smc_without_waiting(adev, msg); -} - -#if 0 /* not used yet */ -static int iceland_wait_for_smc_inactive(struct amdgpu_device *adev) -{ - int i; - uint32_t val; - - if (!iceland_is_smc_ram_running(adev)) - return -EINVAL; - - for (i = 0; i < adev->usec_timeout; i++) { - val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0); - if (REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, cken) == 0) - break; - udelay(1); - } - - if (i == adev->usec_timeout) - return -EINVAL; - - return 0; -} -#endif - -static int iceland_smu_upload_firmware_image(struct amdgpu_device *adev) -{ - const struct smc_firmware_header_v1_0 *hdr; - uint32_t ucode_size; - uint32_t ucode_start_address; - const uint8_t *src; - uint32_t val; - uint32_t byte_count; - uint32_t data; - unsigned long flags; - int i; - - if (!adev->pm.fw) - return -EINVAL; - - /* Skip SMC ucode loading on SR-IOV capable boards. - * vbios does this for us in asic_init in that case. - */ - if (amdgpu_sriov_bios(adev)) - return 0; - - hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data; - amdgpu_ucode_print_smc_hdr(&hdr->header); - - adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version); - ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes); - ucode_start_address = le32_to_cpu(hdr->ucode_start_addr); - src = (const uint8_t *) - (adev->pm.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); - - if (ucode_size & 3) { - DRM_ERROR("SMC ucode is not 4 bytes aligned\n"); - return -EINVAL; - } - - if (ucode_size > ICELAND_SMC_SIZE) { - DRM_ERROR("SMC address is beyond the SMC RAM area\n"); - return -EINVAL; - } - - for (i = 0; i < adev->usec_timeout; i++) { - val = RREG32_SMC(ixRCU_UC_EVENTS); - if (REG_GET_FIELD(val, RCU_UC_EVENTS, boot_seq_done) == 0) - break; - udelay(1); - } - val = RREG32_SMC(ixSMC_SYSCON_MISC_CNTL); - WREG32_SMC(ixSMC_SYSCON_MISC_CNTL, val | 1); - - iceland_stop_smc_clock(adev); - iceland_reset_smc(adev); - - spin_lock_irqsave(&adev->smc_idx_lock, flags); - WREG32(mmSMC_IND_INDEX_0, ucode_start_address); - - val = RREG32(mmSMC_IND_ACCESS_CNTL); - val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1); - WREG32(mmSMC_IND_ACCESS_CNTL, val); - - byte_count = ucode_size; - while (byte_count >= 4) { - data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3]; - WREG32(mmSMC_IND_DATA_0, data); - src += 4; - byte_count -= 4; - } - val = RREG32(mmSMC_IND_ACCESS_CNTL); - val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0); - WREG32(mmSMC_IND_ACCESS_CNTL, val); - spin_unlock_irqrestore(&adev->smc_idx_lock, flags); - - return 0; -} - -#if 0 /* not used yet */ -static int iceland_read_smc_sram_dword(struct amdgpu_device *adev, - uint32_t smc_address, - uint32_t *value, - uint32_t limit) -{ - int result; - unsigned long flags; - - spin_lock_irqsave(&adev->smc_idx_lock, flags); - result = iceland_set_smc_sram_address(adev, smc_address, limit); - if (result == 0) - *value = RREG32(mmSMC_IND_DATA_0); - spin_unlock_irqrestore(&adev->smc_idx_lock, flags); - return result; -} - -static int iceland_write_smc_sram_dword(struct amdgpu_device *adev, - uint32_t smc_address, - uint32_t value, - uint32_t limit) -{ - int result; - unsigned long flags; - - spin_lock_irqsave(&adev->smc_idx_lock, flags); - result = iceland_set_smc_sram_address(adev, smc_address, limit); - if (result == 0) - WREG32(mmSMC_IND_DATA_0, value); - spin_unlock_irqrestore(&adev->smc_idx_lock, flags); - return result; -} - -static int iceland_smu_stop_smc(struct amdgpu_device *adev) -{ - iceland_reset_smc(adev); - iceland_stop_smc_clock(adev); - - return 0; -} -#endif - -static int iceland_smu_start_smc(struct amdgpu_device *adev) -{ - int i; - uint32_t val; - - iceland_program_jump_on_start(adev); - iceland_start_smc_clock(adev); - iceland_start_smc(adev); - - for (i = 0; i < adev->usec_timeout; i++) { - val = RREG32_SMC(ixFIRMWARE_FLAGS); - if (REG_GET_FIELD(val, FIRMWARE_FLAGS, INTERRUPTS_ENABLED) == 1) - break; - udelay(1); - } - return 0; -} - -static enum AMDGPU_UCODE_ID iceland_convert_fw_type(uint32_t fw_type) -{ - switch (fw_type) { - case UCODE_ID_SDMA0: - return AMDGPU_UCODE_ID_SDMA0; - case UCODE_ID_SDMA1: - return AMDGPU_UCODE_ID_SDMA1; - case UCODE_ID_CP_CE: - return AMDGPU_UCODE_ID_CP_CE; - case UCODE_ID_CP_PFP: - return AMDGPU_UCODE_ID_CP_PFP; - case UCODE_ID_CP_ME: - return AMDGPU_UCODE_ID_CP_ME; - case UCODE_ID_CP_MEC: - case UCODE_ID_CP_MEC_JT1: - return AMDGPU_UCODE_ID_CP_MEC1; - case UCODE_ID_CP_MEC_JT2: - return AMDGPU_UCODE_ID_CP_MEC2; - case UCODE_ID_RLC_G: - return AMDGPU_UCODE_ID_RLC_G; - default: - DRM_ERROR("ucode type is out of range!\n"); - return AMDGPU_UCODE_ID_MAXIMUM; - } -} - -static uint32_t iceland_smu_get_mask_for_fw_type(uint32_t fw_type) -{ - switch (fw_type) { - case AMDGPU_UCODE_ID_SDMA0: - return UCODE_ID_SDMA0_MASK; - case AMDGPU_UCODE_ID_SDMA1: - return UCODE_ID_SDMA1_MASK; - case AMDGPU_UCODE_ID_CP_CE: - return UCODE_ID_CP_CE_MASK; - case AMDGPU_UCODE_ID_CP_PFP: - return UCODE_ID_CP_PFP_MASK; - case AMDGPU_UCODE_ID_CP_ME: - return UCODE_ID_CP_ME_MASK; - case AMDGPU_UCODE_ID_CP_MEC1: - return UCODE_ID_CP_MEC_MASK | UCODE_ID_CP_MEC_JT1_MASK; - case AMDGPU_UCODE_ID_CP_MEC2: - return UCODE_ID_CP_MEC_MASK; - case AMDGPU_UCODE_ID_RLC_G: - return UCODE_ID_RLC_G_MASK; - default: - DRM_ERROR("ucode type is out of range!\n"); - return 0; - } -} - -static int iceland_smu_populate_single_firmware_entry(struct amdgpu_device *adev, - uint32_t fw_type, - struct SMU_Entry *entry) -{ - enum AMDGPU_UCODE_ID id = iceland_convert_fw_type(fw_type); - struct amdgpu_firmware_info *ucode = &adev->firmware.ucode[id]; - const struct gfx_firmware_header_v1_0 *header = NULL; - uint64_t gpu_addr; - uint32_t data_size; - - if (ucode->fw == NULL) - return -EINVAL; - - gpu_addr = ucode->mc_addr; - header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data; - data_size = le32_to_cpu(header->header.ucode_size_bytes); - - entry->version = (uint16_t)le32_to_cpu(header->header.ucode_version); - entry->id = (uint16_t)fw_type; - entry->image_addr_high = upper_32_bits(gpu_addr); - entry->image_addr_low = lower_32_bits(gpu_addr); - entry->meta_data_addr_high = 0; - entry->meta_data_addr_low = 0; - entry->data_size_byte = data_size; - entry->num_register_entries = 0; - entry->flags = 0; - - return 0; -} - -static int iceland_smu_request_load_fw(struct amdgpu_device *adev) -{ - struct iceland_smu_private_data *private = (struct iceland_smu_private_data *)adev->smu.priv; - struct SMU_DRAMData_TOC *toc; - uint32_t fw_to_load; - - toc = (struct SMU_DRAMData_TOC *)private->header; - toc->num_entries = 0; - toc->structure_version = 1; - - if (!adev->firmware.smu_load) - return 0; - - if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_RLC_G, - &toc->entry[toc->num_entries++])) { - DRM_ERROR("Failed to get firmware entry for RLC\n"); - return -EINVAL; - } - - if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_CE, - &toc->entry[toc->num_entries++])) { - DRM_ERROR("Failed to get firmware entry for CE\n"); - return -EINVAL; - } - - if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_PFP, - &toc->entry[toc->num_entries++])) { - DRM_ERROR("Failed to get firmware entry for PFP\n"); - return -EINVAL; - } - - if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_ME, - &toc->entry[toc->num_entries++])) { - DRM_ERROR("Failed to get firmware entry for ME\n"); - return -EINVAL; - } - - if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC, - &toc->entry[toc->num_entries++])) { - DRM_ERROR("Failed to get firmware entry for MEC\n"); - return -EINVAL; - } - - if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT1, - &toc->entry[toc->num_entries++])) { - DRM_ERROR("Failed to get firmware entry for MEC_JT1\n"); - return -EINVAL; - } - - if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA0, - &toc->entry[toc->num_entries++])) { - DRM_ERROR("Failed to get firmware entry for SDMA0\n"); - return -EINVAL; - } - - if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA1, - &toc->entry[toc->num_entries++])) { - DRM_ERROR("Failed to get firmware entry for SDMA1\n"); - return -EINVAL; - } - - iceland_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_HI, private->header_addr_high); - iceland_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_LO, private->header_addr_low); - - fw_to_load = UCODE_ID_RLC_G_MASK | - UCODE_ID_SDMA0_MASK | - UCODE_ID_SDMA1_MASK | - UCODE_ID_CP_CE_MASK | - UCODE_ID_CP_ME_MASK | - UCODE_ID_CP_PFP_MASK | - UCODE_ID_CP_MEC_MASK | - UCODE_ID_CP_MEC_JT1_MASK; - - - if (iceland_send_msg_to_smc_with_parameter_without_waiting(adev, PPSMC_MSG_LoadUcodes, fw_to_load)) { - DRM_ERROR("Fail to request SMU load ucode\n"); - return -EINVAL; - } - - return 0; -} - -static int iceland_smu_check_fw_load_finish(struct amdgpu_device *adev, - uint32_t fw_type) -{ - uint32_t fw_mask = iceland_smu_get_mask_for_fw_type(fw_type); - int i; - - for (i = 0; i < adev->usec_timeout; i++) { - if (fw_mask == (RREG32_SMC(ixSOFT_REGISTERS_TABLE_27) & fw_mask)) - break; - udelay(1); - } - - if (i == adev->usec_timeout) { - DRM_ERROR("check firmware loading failed\n"); - return -EINVAL; - } - - return 0; -} - -int iceland_smu_start(struct amdgpu_device *adev) -{ - int result; - - result = iceland_smu_upload_firmware_image(adev); - if (result) - return result; - result = iceland_smu_start_smc(adev); - if (result) - return result; - - return iceland_smu_request_load_fw(adev); -} - -static const struct amdgpu_smumgr_funcs iceland_smumgr_funcs = { - .check_fw_load_finish = iceland_smu_check_fw_load_finish, - .request_smu_load_fw = NULL, - .request_smu_specific_fw = NULL, -}; - -int iceland_smu_init(struct amdgpu_device *adev) -{ - struct iceland_smu_private_data *private; - uint32_t image_size = ((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096; - struct amdgpu_bo **toc_buf = &adev->smu.toc_buf; - uint64_t mc_addr; - void *toc_buf_ptr; - int ret; - - private = kzalloc(sizeof(struct iceland_smu_private_data), GFP_KERNEL); - if (NULL == private) - return -ENOMEM; - - /* allocate firmware buffers */ - if (adev->firmware.smu_load) - amdgpu_ucode_init_bo(adev); - - adev->smu.priv = private; - adev->smu.fw_flags = 0; - - /* Allocate FW image data structure and header buffer */ - ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE, - true, AMDGPU_GEM_DOMAIN_VRAM, - AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, - NULL, NULL, toc_buf); - if (ret) { - DRM_ERROR("Failed to allocate memory for TOC buffer\n"); - return -ENOMEM; - } - - /* Retrieve GPU address for header buffer and internal buffer */ - ret = amdgpu_bo_reserve(adev->smu.toc_buf, false); - if (ret) { - amdgpu_bo_unref(&adev->smu.toc_buf); - DRM_ERROR("Failed to reserve the TOC buffer\n"); - return -EINVAL; - } - - ret = amdgpu_bo_pin(adev->smu.toc_buf, AMDGPU_GEM_DOMAIN_VRAM, &mc_addr); - if (ret) { - amdgpu_bo_unreserve(adev->smu.toc_buf); - amdgpu_bo_unref(&adev->smu.toc_buf); - DRM_ERROR("Failed to pin the TOC buffer\n"); - return -EINVAL; - } - - ret = amdgpu_bo_kmap(*toc_buf, &toc_buf_ptr); - if (ret) { - amdgpu_bo_unreserve(adev->smu.toc_buf); - amdgpu_bo_unref(&adev->smu.toc_buf); - DRM_ERROR("Failed to map the TOC buffer\n"); - return -EINVAL; - } - - amdgpu_bo_unreserve(adev->smu.toc_buf); - private->header_addr_low = lower_32_bits(mc_addr); - private->header_addr_high = upper_32_bits(mc_addr); - private->header = toc_buf_ptr; - - adev->smu.smumgr_funcs = &iceland_smumgr_funcs; - - return 0; -} - -int iceland_smu_fini(struct amdgpu_device *adev) -{ - amdgpu_bo_unref(&adev->smu.toc_buf); - kfree(adev->smu.priv); - adev->smu.priv = NULL; - if (adev->firmware.fw_buf) - amdgpu_ucode_fini_bo(adev); - - return 0; -} diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_smum.h b/drivers/gpu/drm/amd/amdgpu/iceland_smum.h deleted file mode 100644 index 5983e3150cc5..000000000000 --- a/drivers/gpu/drm/amd/amdgpu/iceland_smum.h +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Copyright 2014 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#ifndef ICELAND_SMUM_H -#define ICELAND_SMUM_H - -#include "ppsmc.h" - -extern int iceland_smu_init(struct amdgpu_device *adev); -extern int iceland_smu_fini(struct amdgpu_device *adev); -extern int iceland_smu_start(struct amdgpu_device *adev); - -struct iceland_smu_private_data -{ - uint8_t *header; - uint8_t *mec_image; - uint32_t header_addr_high; - uint32_t header_addr_low; -}; - -#endif diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c b/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c deleted file mode 100644 index f06f6f4dc3a8..000000000000 --- a/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c +++ /dev/null @@ -1,186 +0,0 @@ -/* - * Copyright 2014 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#include -#include "drmP.h" -#include "amdgpu.h" -#include "tonga_smum.h" - -MODULE_FIRMWARE("amdgpu/tonga_smc.bin"); - -static void tonga_dpm_set_funcs(struct amdgpu_device *adev); - -static int tonga_dpm_early_init(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - tonga_dpm_set_funcs(adev); - - return 0; -} - -static int tonga_dpm_init_microcode(struct amdgpu_device *adev) -{ - char fw_name[30] = "amdgpu/tonga_smc.bin"; - int err; - err = request_firmware(&adev->pm.fw, fw_name, adev->dev); - if (err) - goto out; - err = amdgpu_ucode_validate(adev->pm.fw); - -out: - if (err) { - DRM_ERROR("Failed to load firmware \"%s\"", fw_name); - release_firmware(adev->pm.fw); - adev->pm.fw = NULL; - } - return err; -} - -static int tonga_dpm_sw_init(void *handle) -{ - int ret; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - ret = tonga_dpm_init_microcode(adev); - if (ret) - return ret; - - return 0; -} - -static int tonga_dpm_sw_fini(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - release_firmware(adev->pm.fw); - adev->pm.fw = NULL; - - return 0; -} - -static int tonga_dpm_hw_init(void *handle) -{ - int ret; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - mutex_lock(&adev->pm.mutex); - - /* smu init only needs to be called at startup, not resume. - * It should be in sw_init, but requires the fw info gathered - * in sw_init from other IP modules. - */ - ret = tonga_smu_init(adev); - if (ret) { - DRM_ERROR("SMU initialization failed\n"); - goto fail; - } - - ret = tonga_smu_start(adev); - if (ret) { - DRM_ERROR("SMU start failed\n"); - goto fail; - } - - mutex_unlock(&adev->pm.mutex); - return 0; - -fail: - adev->firmware.smu_load = false; - mutex_unlock(&adev->pm.mutex); - return -EINVAL; -} - -static int tonga_dpm_hw_fini(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - mutex_lock(&adev->pm.mutex); - /* smu fini only needs to be called at teardown, not suspend. - * It should be in sw_fini, but we put it here for symmetry - * with smu init. - */ - tonga_smu_fini(adev); - mutex_unlock(&adev->pm.mutex); - return 0; -} - -static int tonga_dpm_suspend(void *handle) -{ - return tonga_dpm_hw_fini(handle); -} - -static int tonga_dpm_resume(void *handle) -{ - return tonga_dpm_hw_init(handle); -} - -static int tonga_dpm_set_clockgating_state(void *handle, - enum amd_clockgating_state state) -{ - return 0; -} - -static int tonga_dpm_set_powergating_state(void *handle, - enum amd_powergating_state state) -{ - return 0; -} - -const struct amd_ip_funcs tonga_dpm_ip_funcs = { - .name = "tonga_dpm", - .early_init = tonga_dpm_early_init, - .late_init = NULL, - .sw_init = tonga_dpm_sw_init, - .sw_fini = tonga_dpm_sw_fini, - .hw_init = tonga_dpm_hw_init, - .hw_fini = tonga_dpm_hw_fini, - .suspend = tonga_dpm_suspend, - .resume = tonga_dpm_resume, - .is_idle = NULL, - .wait_for_idle = NULL, - .soft_reset = NULL, - .set_clockgating_state = tonga_dpm_set_clockgating_state, - .set_powergating_state = tonga_dpm_set_powergating_state, -}; - -static const struct amdgpu_dpm_funcs tonga_dpm_funcs = { - .get_temperature = NULL, - .pre_set_power_state = NULL, - .set_power_state = NULL, - .post_set_power_state = NULL, - .display_configuration_changed = NULL, - .get_sclk = NULL, - .get_mclk = NULL, - .print_power_state = NULL, - .debugfs_print_current_performance_level = NULL, - .force_performance_level = NULL, - .vblank_too_short = NULL, - .powergate_uvd = NULL, -}; - -static void tonga_dpm_set_funcs(struct amdgpu_device *adev) -{ - if (NULL == adev->pm.funcs) - adev->pm.funcs = &tonga_dpm_funcs; -} diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_smc.c b/drivers/gpu/drm/amd/amdgpu/tonga_smc.c deleted file mode 100644 index 1e71e819468b..000000000000 --- a/drivers/gpu/drm/amd/amdgpu/tonga_smc.c +++ /dev/null @@ -1,862 +0,0 @@ -/* - * Copyright 2014 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#include -#include "drmP.h" -#include "amdgpu.h" -#include "tonga_ppsmc.h" -#include "tonga_smum.h" -#include "smu_ucode_xfer_vi.h" -#include "amdgpu_ucode.h" - -#include "smu/smu_7_1_2_d.h" -#include "smu/smu_7_1_2_sh_mask.h" - -#define TONGA_SMC_SIZE 0x20000 - -static int tonga_set_smc_sram_address(struct amdgpu_device *adev, uint32_t smc_address, uint32_t limit) -{ - uint32_t val; - - if (smc_address & 3) - return -EINVAL; - - if ((smc_address + 3) > limit) - return -EINVAL; - - WREG32(mmSMC_IND_INDEX_0, smc_address); - - val = RREG32(mmSMC_IND_ACCESS_CNTL); - val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0); - WREG32(mmSMC_IND_ACCESS_CNTL, val); - - return 0; -} - -static int tonga_copy_bytes_to_smc(struct amdgpu_device *adev, uint32_t smc_start_address, const uint8_t *src, uint32_t byte_count, uint32_t limit) -{ - uint32_t addr; - uint32_t data, orig_data; - int result = 0; - uint32_t extra_shift; - unsigned long flags; - - if (smc_start_address & 3) - return -EINVAL; - - if ((smc_start_address + byte_count) > limit) - return -EINVAL; - - addr = smc_start_address; - - spin_lock_irqsave(&adev->smc_idx_lock, flags); - while (byte_count >= 4) { - /* Bytes are written into the SMC addres space with the MSB first */ - data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3]; - - result = tonga_set_smc_sram_address(adev, addr, limit); - - if (result) - goto out; - - WREG32(mmSMC_IND_DATA_0, data); - - src += 4; - byte_count -= 4; - addr += 4; - } - - if (0 != byte_count) { - /* Now write odd bytes left, do a read modify write cycle */ - data = 0; - - result = tonga_set_smc_sram_address(adev, addr, limit); - if (result) - goto out; - - orig_data = RREG32(mmSMC_IND_DATA_0); - extra_shift = 8 * (4 - byte_count); - - while (byte_count > 0) { - data = (data << 8) + *src++; - byte_count--; - } - - data <<= extra_shift; - data |= (orig_data & ~((~0UL) << extra_shift)); - - result = tonga_set_smc_sram_address(adev, addr, limit); - if (result) - goto out; - - WREG32(mmSMC_IND_DATA_0, data); - } - -out: - spin_unlock_irqrestore(&adev->smc_idx_lock, flags); - return result; -} - -static int tonga_program_jump_on_start(struct amdgpu_device *adev) -{ - static unsigned char data[] = {0xE0, 0x00, 0x80, 0x40}; - tonga_copy_bytes_to_smc(adev, 0x0, data, 4, sizeof(data)+1); - - return 0; -} - -static bool tonga_is_smc_ram_running(struct amdgpu_device *adev) -{ - uint32_t val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0); - val = REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable); - - return ((0 == val) && (0x20100 <= RREG32_SMC(ixSMC_PC_C))); -} - -static int wait_smu_response(struct amdgpu_device *adev) -{ - int i; - uint32_t val; - - for (i = 0; i < adev->usec_timeout; i++) { - val = RREG32(mmSMC_RESP_0); - if (REG_GET_FIELD(val, SMC_RESP_0, SMC_RESP)) - break; - udelay(1); - } - - if (i == adev->usec_timeout) - return -EINVAL; - - return 0; -} - -static int tonga_send_msg_to_smc_offset(struct amdgpu_device *adev) -{ - if (wait_smu_response(adev)) { - DRM_ERROR("Failed to send previous message\n"); - return -EINVAL; - } - - WREG32(mmSMC_MSG_ARG_0, 0x20000); - WREG32(mmSMC_MESSAGE_0, PPSMC_MSG_Test); - - if (wait_smu_response(adev)) { - DRM_ERROR("Failed to send message\n"); - return -EINVAL; - } - - return 0; -} - -static int tonga_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg) -{ - if (!tonga_is_smc_ram_running(adev)) - { - return -EINVAL; - } - - if (wait_smu_response(adev)) { - DRM_ERROR("Failed to send previous message\n"); - return -EINVAL; - } - - WREG32(mmSMC_MESSAGE_0, msg); - - if (wait_smu_response(adev)) { - DRM_ERROR("Failed to send message\n"); - return -EINVAL; - } - - return 0; -} - -static int tonga_send_msg_to_smc_without_waiting(struct amdgpu_device *adev, - PPSMC_Msg msg) -{ - if (wait_smu_response(adev)) { - DRM_ERROR("Failed to send previous message\n"); - return -EINVAL; - } - - WREG32(mmSMC_MESSAGE_0, msg); - - return 0; -} - -static int tonga_send_msg_to_smc_with_parameter(struct amdgpu_device *adev, - PPSMC_Msg msg, - uint32_t parameter) -{ - if (!tonga_is_smc_ram_running(adev)) - return -EINVAL; - - if (wait_smu_response(adev)) { - DRM_ERROR("Failed to send previous message\n"); - return -EINVAL; - } - - WREG32(mmSMC_MSG_ARG_0, parameter); - - return tonga_send_msg_to_smc(adev, msg); -} - -static int tonga_send_msg_to_smc_with_parameter_without_waiting( - struct amdgpu_device *adev, - PPSMC_Msg msg, uint32_t parameter) -{ - if (wait_smu_response(adev)) { - DRM_ERROR("Failed to send previous message\n"); - return -EINVAL; - } - - WREG32(mmSMC_MSG_ARG_0, parameter); - - return tonga_send_msg_to_smc_without_waiting(adev, msg); -} - -#if 0 /* not used yet */ -static int tonga_wait_for_smc_inactive(struct amdgpu_device *adev) -{ - int i; - uint32_t val; - - if (!tonga_is_smc_ram_running(adev)) - return -EINVAL; - - for (i = 0; i < adev->usec_timeout; i++) { - val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0); - if (REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, cken) == 0) - break; - udelay(1); - } - - if (i == adev->usec_timeout) - return -EINVAL; - - return 0; -} -#endif - -static int tonga_smu_upload_firmware_image(struct amdgpu_device *adev) -{ - const struct smc_firmware_header_v1_0 *hdr; - uint32_t ucode_size; - uint32_t ucode_start_address; - const uint8_t *src; - uint32_t val; - uint32_t byte_count; - uint32_t *data; - unsigned long flags; - - if (!adev->pm.fw) - return -EINVAL; - - /* Skip SMC ucode loading on SR-IOV capable boards. - * vbios does this for us in asic_init in that case. - */ - if (amdgpu_sriov_bios(adev)) - return 0; - - hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data; - amdgpu_ucode_print_smc_hdr(&hdr->header); - - adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version); - ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes); - ucode_start_address = le32_to_cpu(hdr->ucode_start_addr); - src = (const uint8_t *) - (adev->pm.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); - - if (ucode_size & 3) { - DRM_ERROR("SMC ucode is not 4 bytes aligned\n"); - return -EINVAL; - } - - if (ucode_size > TONGA_SMC_SIZE) { - DRM_ERROR("SMC address is beyond the SMC RAM area\n"); - return -EINVAL; - } - - spin_lock_irqsave(&adev->smc_idx_lock, flags); - WREG32(mmSMC_IND_INDEX_0, ucode_start_address); - - val = RREG32(mmSMC_IND_ACCESS_CNTL); - val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1); - WREG32(mmSMC_IND_ACCESS_CNTL, val); - - byte_count = ucode_size; - data = (uint32_t *)src; - for (; byte_count >= 4; data++, byte_count -= 4) - WREG32(mmSMC_IND_DATA_0, data[0]); - - val = RREG32(mmSMC_IND_ACCESS_CNTL); - val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0); - WREG32(mmSMC_IND_ACCESS_CNTL, val); - spin_unlock_irqrestore(&adev->smc_idx_lock, flags); - - return 0; -} - -#if 0 /* not used yet */ -static int tonga_read_smc_sram_dword(struct amdgpu_device *adev, - uint32_t smc_address, - uint32_t *value, - uint32_t limit) -{ - int result; - unsigned long flags; - - spin_lock_irqsave(&adev->smc_idx_lock, flags); - result = tonga_set_smc_sram_address(adev, smc_address, limit); - if (result == 0) - *value = RREG32(mmSMC_IND_DATA_0); - spin_unlock_irqrestore(&adev->smc_idx_lock, flags); - return result; -} - -static int tonga_write_smc_sram_dword(struct amdgpu_device *adev, - uint32_t smc_address, - uint32_t value, - uint32_t limit) -{ - int result; - unsigned long flags; - - spin_lock_irqsave(&adev->smc_idx_lock, flags); - result = tonga_set_smc_sram_address(adev, smc_address, limit); - if (result == 0) - WREG32(mmSMC_IND_DATA_0, value); - spin_unlock_irqrestore(&adev->smc_idx_lock, flags); - return result; -} - -static int tonga_smu_stop_smc(struct amdgpu_device *adev) -{ - uint32_t val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL); - val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1); - WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val); - - val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0); - val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 1); - WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val); - - return 0; -} -#endif - -static enum AMDGPU_UCODE_ID tonga_convert_fw_type(uint32_t fw_type) -{ - switch (fw_type) { - case UCODE_ID_SDMA0: - return AMDGPU_UCODE_ID_SDMA0; - case UCODE_ID_SDMA1: - return AMDGPU_UCODE_ID_SDMA1; - case UCODE_ID_CP_CE: - return AMDGPU_UCODE_ID_CP_CE; - case UCODE_ID_CP_PFP: - return AMDGPU_UCODE_ID_CP_PFP; - case UCODE_ID_CP_ME: - return AMDGPU_UCODE_ID_CP_ME; - case UCODE_ID_CP_MEC: - case UCODE_ID_CP_MEC_JT1: - return AMDGPU_UCODE_ID_CP_MEC1; - case UCODE_ID_CP_MEC_JT2: - return AMDGPU_UCODE_ID_CP_MEC2; - case UCODE_ID_RLC_G: - return AMDGPU_UCODE_ID_RLC_G; - default: - DRM_ERROR("ucode type is out of range!\n"); - return AMDGPU_UCODE_ID_MAXIMUM; - } -} - -static int tonga_smu_populate_single_firmware_entry(struct amdgpu_device *adev, - uint32_t fw_type, - struct SMU_Entry *entry) -{ - enum AMDGPU_UCODE_ID id = tonga_convert_fw_type(fw_type); - struct amdgpu_firmware_info *ucode = &adev->firmware.ucode[id]; - const struct gfx_firmware_header_v1_0 *header = NULL; - uint64_t gpu_addr; - uint32_t data_size; - - if (ucode->fw == NULL) - return -EINVAL; - - gpu_addr = ucode->mc_addr; - header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data; - data_size = le32_to_cpu(header->header.ucode_size_bytes); - - if ((fw_type == UCODE_ID_CP_MEC_JT1) || - (fw_type == UCODE_ID_CP_MEC_JT2)) { - gpu_addr += le32_to_cpu(header->jt_offset) << 2; - data_size = le32_to_cpu(header->jt_size) << 2; - } - - entry->version = (uint16_t)le32_to_cpu(header->header.ucode_version); - entry->id = (uint16_t)fw_type; - entry->image_addr_high = upper_32_bits(gpu_addr); - entry->image_addr_low = lower_32_bits(gpu_addr); - entry->meta_data_addr_high = 0; - entry->meta_data_addr_low = 0; - entry->data_size_byte = data_size; - entry->num_register_entries = 0; - - if (fw_type == UCODE_ID_RLC_G) - entry->flags = 1; - else - entry->flags = 0; - - return 0; -} - -static int tonga_smu_request_load_fw(struct amdgpu_device *adev) -{ - struct tonga_smu_private_data *private = (struct tonga_smu_private_data *)adev->smu.priv; - struct SMU_DRAMData_TOC *toc; - uint32_t fw_to_load; - - WREG32_SMC(ixSOFT_REGISTERS_TABLE_28, 0); - - tonga_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SMU_DRAM_ADDR_HI, private->smu_buffer_addr_high); - tonga_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SMU_DRAM_ADDR_LO, private->smu_buffer_addr_low); - - toc = (struct SMU_DRAMData_TOC *)private->header; - toc->num_entries = 0; - toc->structure_version = 1; - - if (!adev->firmware.smu_load) - return 0; - - if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_RLC_G, - &toc->entry[toc->num_entries++])) { - DRM_ERROR("Failed to get firmware entry for RLC\n"); - return -EINVAL; - } - - if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_CE, - &toc->entry[toc->num_entries++])) { - DRM_ERROR("Failed to get firmware entry for CE\n"); - return -EINVAL; - } - - if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_PFP, - &toc->entry[toc->num_entries++])) { - DRM_ERROR("Failed to get firmware entry for PFP\n"); - return -EINVAL; - } - - if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_ME, - &toc->entry[toc->num_entries++])) { - DRM_ERROR("Failed to get firmware entry for ME\n"); - return -EINVAL; - } - - if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC, - &toc->entry[toc->num_entries++])) { - DRM_ERROR("Failed to get firmware entry for MEC\n"); - return -EINVAL; - } - - if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT1, - &toc->entry[toc->num_entries++])) { - DRM_ERROR("Failed to get firmware entry for MEC_JT1\n"); - return -EINVAL; - } - - if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT2, - &toc->entry[toc->num_entries++])) { - DRM_ERROR("Failed to get firmware entry for MEC_JT2\n"); - return -EINVAL; - } - - if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA0, - &toc->entry[toc->num_entries++])) { - DRM_ERROR("Failed to get firmware entry for SDMA0\n"); - return -EINVAL; - } - - if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA1, - &toc->entry[toc->num_entries++])) { - DRM_ERROR("Failed to get firmware entry for SDMA1\n"); - return -EINVAL; - } - - tonga_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_HI, private->header_addr_high); - tonga_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_LO, private->header_addr_low); - - fw_to_load = UCODE_ID_RLC_G_MASK | - UCODE_ID_SDMA0_MASK | - UCODE_ID_SDMA1_MASK | - UCODE_ID_CP_CE_MASK | - UCODE_ID_CP_ME_MASK | - UCODE_ID_CP_PFP_MASK | - UCODE_ID_CP_MEC_MASK; - - if (tonga_send_msg_to_smc_with_parameter_without_waiting(adev, PPSMC_MSG_LoadUcodes, fw_to_load)) { - DRM_ERROR("Fail to request SMU load ucode\n"); - return -EINVAL; - } - - return 0; -} - -static uint32_t tonga_smu_get_mask_for_fw_type(uint32_t fw_type) -{ - switch (fw_type) { - case AMDGPU_UCODE_ID_SDMA0: - return UCODE_ID_SDMA0_MASK; - case AMDGPU_UCODE_ID_SDMA1: - return UCODE_ID_SDMA1_MASK; - case AMDGPU_UCODE_ID_CP_CE: - return UCODE_ID_CP_CE_MASK; - case AMDGPU_UCODE_ID_CP_PFP: - return UCODE_ID_CP_PFP_MASK; - case AMDGPU_UCODE_ID_CP_ME: - return UCODE_ID_CP_ME_MASK; - case AMDGPU_UCODE_ID_CP_MEC1: - return UCODE_ID_CP_MEC_MASK; - case AMDGPU_UCODE_ID_CP_MEC2: - return UCODE_ID_CP_MEC_MASK; - case AMDGPU_UCODE_ID_RLC_G: - return UCODE_ID_RLC_G_MASK; - default: - DRM_ERROR("ucode type is out of range!\n"); - return 0; - } -} - -static int tonga_smu_check_fw_load_finish(struct amdgpu_device *adev, - uint32_t fw_type) -{ - uint32_t fw_mask = tonga_smu_get_mask_for_fw_type(fw_type); - int i; - - for (i = 0; i < adev->usec_timeout; i++) { - if (fw_mask == (RREG32_SMC(ixSOFT_REGISTERS_TABLE_28) & fw_mask)) - break; - udelay(1); - } - - if (i == adev->usec_timeout) { - DRM_ERROR("check firmware loading failed\n"); - return -EINVAL; - } - - return 0; -} - -static int tonga_smu_start_in_protection_mode(struct amdgpu_device *adev) -{ - int result; - uint32_t val; - int i; - - /* Assert reset */ - val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL); - val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1); - WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val); - - result = tonga_smu_upload_firmware_image(adev); - if (result) - return result; - - /* Clear status */ - WREG32_SMC(ixSMU_STATUS, 0); - - /* Enable clock */ - val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0); - val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0); - WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val); - - /* De-assert reset */ - val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL); - val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 0); - WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val); - - /* Set SMU Auto Start */ - val = RREG32_SMC(ixSMU_INPUT_DATA); - val = REG_SET_FIELD(val, SMU_INPUT_DATA, AUTO_START, 1); - WREG32_SMC(ixSMU_INPUT_DATA, val); - - /* Clear firmware interrupt enable flag */ - WREG32_SMC(ixFIRMWARE_FLAGS, 0); - - for (i = 0; i < adev->usec_timeout; i++) { - val = RREG32_SMC(ixRCU_UC_EVENTS); - if (REG_GET_FIELD(val, RCU_UC_EVENTS, INTERRUPTS_ENABLED)) - break; - udelay(1); - } - - if (i == adev->usec_timeout) { - DRM_ERROR("Interrupt is not enabled by firmware\n"); - return -EINVAL; - } - - /* Call Test SMU message with 0x20000 offset - * to trigger SMU start - */ - tonga_send_msg_to_smc_offset(adev); - - /* Wait for done bit to be set */ - for (i = 0; i < adev->usec_timeout; i++) { - val = RREG32_SMC(ixSMU_STATUS); - if (REG_GET_FIELD(val, SMU_STATUS, SMU_DONE)) - break; - udelay(1); - } - - if (i == adev->usec_timeout) { - DRM_ERROR("Timeout for SMU start\n"); - return -EINVAL; - } - - /* Check pass/failed indicator */ - val = RREG32_SMC(ixSMU_STATUS); - if (!REG_GET_FIELD(val, SMU_STATUS, SMU_PASS)) { - DRM_ERROR("SMU Firmware start failed\n"); - return -EINVAL; - } - - /* Wait for firmware to initialize */ - for (i = 0; i < adev->usec_timeout; i++) { - val = RREG32_SMC(ixFIRMWARE_FLAGS); - if(REG_GET_FIELD(val, FIRMWARE_FLAGS, INTERRUPTS_ENABLED)) - break; - udelay(1); - } - - if (i == adev->usec_timeout) { - DRM_ERROR("SMU firmware initialization failed\n"); - return -EINVAL; - } - - return 0; -} - -static int tonga_smu_start_in_non_protection_mode(struct amdgpu_device *adev) -{ - int i, result; - uint32_t val; - - /* wait for smc boot up */ - for (i = 0; i < adev->usec_timeout; i++) { - val = RREG32_SMC(ixRCU_UC_EVENTS); - val = REG_GET_FIELD(val, RCU_UC_EVENTS, boot_seq_done); - if (val) - break; - udelay(1); - } - - if (i == adev->usec_timeout) { - DRM_ERROR("SMC boot sequence is not completed\n"); - return -EINVAL; - } - - /* Clear firmware interrupt enable flag */ - WREG32_SMC(ixFIRMWARE_FLAGS, 0); - - /* Assert reset */ - val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL); - val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1); - WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val); - - result = tonga_smu_upload_firmware_image(adev); - if (result) - return result; - - /* Set smc instruct start point at 0x0 */ - tonga_program_jump_on_start(adev); - - /* Enable clock */ - val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0); - val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0); - WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val); - - /* De-assert reset */ - val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL); - val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 0); - WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val); - - /* Wait for firmware to initialize */ - for (i = 0; i < adev->usec_timeout; i++) { - val = RREG32_SMC(ixFIRMWARE_FLAGS); - if (REG_GET_FIELD(val, FIRMWARE_FLAGS, INTERRUPTS_ENABLED)) - break; - udelay(1); - } - - if (i == adev->usec_timeout) { - DRM_ERROR("Timeout for SMC firmware initialization\n"); - return -EINVAL; - } - - return 0; -} - -int tonga_smu_start(struct amdgpu_device *adev) -{ - int result; - uint32_t val; - - if (!tonga_is_smc_ram_running(adev)) { - val = RREG32_SMC(ixSMU_FIRMWARE); - if (!REG_GET_FIELD(val, SMU_FIRMWARE, SMU_MODE)) { - result = tonga_smu_start_in_non_protection_mode(adev); - if (result) - return result; - } else { - result = tonga_smu_start_in_protection_mode(adev); - if (result) - return result; - } - } - - return tonga_smu_request_load_fw(adev); -} - -static const struct amdgpu_smumgr_funcs tonga_smumgr_funcs = { - .check_fw_load_finish = tonga_smu_check_fw_load_finish, - .request_smu_load_fw = NULL, - .request_smu_specific_fw = NULL, -}; - -int tonga_smu_init(struct amdgpu_device *adev) -{ - struct tonga_smu_private_data *private; - uint32_t image_size = ((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096; - uint32_t smu_internal_buffer_size = 200*4096; - struct amdgpu_bo **toc_buf = &adev->smu.toc_buf; - struct amdgpu_bo **smu_buf = &adev->smu.smu_buf; - uint64_t mc_addr; - void *toc_buf_ptr; - void *smu_buf_ptr; - int ret; - - private = kzalloc(sizeof(struct tonga_smu_private_data), GFP_KERNEL); - if (NULL == private) - return -ENOMEM; - - /* allocate firmware buffers */ - if (adev->firmware.smu_load) - amdgpu_ucode_init_bo(adev); - - adev->smu.priv = private; - adev->smu.fw_flags = 0; - - /* Allocate FW image data structure and header buffer */ - ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE, - true, AMDGPU_GEM_DOMAIN_VRAM, - AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, - NULL, NULL, toc_buf); - if (ret) { - DRM_ERROR("Failed to allocate memory for TOC buffer\n"); - return -ENOMEM; - } - - /* Allocate buffer for SMU internal buffer */ - ret = amdgpu_bo_create(adev, smu_internal_buffer_size, PAGE_SIZE, - true, AMDGPU_GEM_DOMAIN_VRAM, - AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, - NULL, NULL, smu_buf); - if (ret) { - DRM_ERROR("Failed to allocate memory for SMU internal buffer\n"); - return -ENOMEM; - } - - /* Retrieve GPU address for header buffer and internal buffer */ - ret = amdgpu_bo_reserve(adev->smu.toc_buf, false); - if (ret) { - amdgpu_bo_unref(&adev->smu.toc_buf); - DRM_ERROR("Failed to reserve the TOC buffer\n"); - return -EINVAL; - } - - ret = amdgpu_bo_pin(adev->smu.toc_buf, AMDGPU_GEM_DOMAIN_VRAM, &mc_addr); - if (ret) { - amdgpu_bo_unreserve(adev->smu.toc_buf); - amdgpu_bo_unref(&adev->smu.toc_buf); - DRM_ERROR("Failed to pin the TOC buffer\n"); - return -EINVAL; - } - - ret = amdgpu_bo_kmap(*toc_buf, &toc_buf_ptr); - if (ret) { - amdgpu_bo_unreserve(adev->smu.toc_buf); - amdgpu_bo_unref(&adev->smu.toc_buf); - DRM_ERROR("Failed to map the TOC buffer\n"); - return -EINVAL; - } - - amdgpu_bo_unreserve(adev->smu.toc_buf); - private->header_addr_low = lower_32_bits(mc_addr); - private->header_addr_high = upper_32_bits(mc_addr); - private->header = toc_buf_ptr; - - ret = amdgpu_bo_reserve(adev->smu.smu_buf, false); - if (ret) { - amdgpu_bo_unref(&adev->smu.smu_buf); - amdgpu_bo_unref(&adev->smu.toc_buf); - DRM_ERROR("Failed to reserve the SMU internal buffer\n"); - return -EINVAL; - } - - ret = amdgpu_bo_pin(adev->smu.smu_buf, AMDGPU_GEM_DOMAIN_VRAM, &mc_addr); - if (ret) { - amdgpu_bo_unreserve(adev->smu.smu_buf); - amdgpu_bo_unref(&adev->smu.smu_buf); - amdgpu_bo_unref(&adev->smu.toc_buf); - DRM_ERROR("Failed to pin the SMU internal buffer\n"); - return -EINVAL; - } - - ret = amdgpu_bo_kmap(*smu_buf, &smu_buf_ptr); - if (ret) { - amdgpu_bo_unreserve(adev->smu.smu_buf); - amdgpu_bo_unref(&adev->smu.smu_buf); - amdgpu_bo_unref(&adev->smu.toc_buf); - DRM_ERROR("Failed to map the SMU internal buffer\n"); - return -EINVAL; - } - - amdgpu_bo_unreserve(adev->smu.smu_buf); - private->smu_buffer_addr_low = lower_32_bits(mc_addr); - private->smu_buffer_addr_high = upper_32_bits(mc_addr); - - adev->smu.smumgr_funcs = &tonga_smumgr_funcs; - - return 0; -} - -int tonga_smu_fini(struct amdgpu_device *adev) -{ - amdgpu_bo_unref(&adev->smu.toc_buf); - amdgpu_bo_unref(&adev->smu.smu_buf); - kfree(adev->smu.priv); - adev->smu.priv = NULL; - if (adev->firmware.fw_buf) - amdgpu_ucode_fini_bo(adev); - - return 0; -} diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_smum.h b/drivers/gpu/drm/amd/amdgpu/tonga_smum.h deleted file mode 100644 index c031ff99fe3e..000000000000 --- a/drivers/gpu/drm/amd/amdgpu/tonga_smum.h +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright 2014 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#ifndef TONGA_SMUMGR_H -#define TONGA_SMUMGR_H - -#include "tonga_ppsmc.h" - -int tonga_smu_init(struct amdgpu_device *adev); -int tonga_smu_fini(struct amdgpu_device *adev); -int tonga_smu_start(struct amdgpu_device *adev); - -struct tonga_smu_private_data -{ - uint8_t *header; - uint32_t smu_buffer_addr_high; - uint32_t smu_buffer_addr_low; - uint32_t header_addr_high; - uint32_t header_addr_low; -}; - -#endif diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index 63ce7f08d1d5..c0d9aad7126f 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -79,6 +79,9 @@ #endif #include "dce_virtual.h" +MODULE_FIRMWARE("amdgpu/topaz_smc.bin"); +MODULE_FIRMWARE("amdgpu/tonga_smc.bin"); +MODULE_FIRMWARE("amdgpu/fiji_smc.bin"); MODULE_FIRMWARE("amdgpu/polaris10_smc.bin"); MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin"); MODULE_FIRMWARE("amdgpu/polaris11_smc.bin"); From ceeb50ed77dc34265c4362855f6a59338b124605 Mon Sep 17 00:00:00 2001 From: Monk Liu Date: Mon, 19 Sep 2016 12:13:58 +0800 Subject: [PATCH 41/49] drm/amdgpu:cleanup virt related define MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit move virtual machine related structure to amdgpu_virt.h easy for developer to maintain for virualization stuffs Signed-off-by: Monk Liu Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 32 +------------ drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h | 57 ++++++++++++++++++++++++ 2 files changed, 58 insertions(+), 31 deletions(-) create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 9f0049563b71..44255cb0bb50 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -57,6 +57,7 @@ #include "amdgpu_acp.h" #include "gpu_scheduler.h" +#include "amdgpu_virt.h" /* * Modules parameters. @@ -1932,37 +1933,6 @@ struct amdgpu_atcs { struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev); void amdgpu_cgs_destroy_device(struct cgs_device *cgs_device); - -#define AMDGPU_SRIOV_CAPS_SRIOV_VBIOS (1 << 0) /* vBIOS is sr-iov ready */ -#define AMDGPU_SRIOV_CAPS_ENABLE_IOV (1 << 1) /* sr-iov is enabled on this GPU */ -#define AMDGPU_SRIOV_CAPS_IS_VF (1 << 2) /* this GPU is a virtual function */ -#define AMDGPU_PASSTHROUGH_MODE (1 << 3) /* thw whole GPU is pass through for VM */ -/* GPU virtualization */ -struct amdgpu_virtualization { - uint32_t virtual_caps; -}; - -#define amdgpu_sriov_enabled(adev) \ -((adev)->virtualization.virtual_caps & AMDGPU_SRIOV_CAPS_ENABLE_IOV) - -#define amdgpu_sriov_vf(adev) \ -((adev)->virtualization.virtual_caps & AMDGPU_SRIOV_CAPS_IS_VF) - -#define amdgpu_sriov_bios(adev) \ -((adev)->virtualization.virtual_caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS) - -#define amdgpu_passthrough(adev) \ -((adev)->virtualization.virtual_caps & AMDGPU_PASSTHROUGH_MODE) - -static inline bool is_virtual_machine(void) -{ -#ifdef CONFIG_X86 - return boot_cpu_has(X86_FEATURE_HYPERVISOR); -#else - return false; -#endif -} - /* * Core structure, functions and helpers. */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h new file mode 100644 index 000000000000..2c37a374917f --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h @@ -0,0 +1,57 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Author: Monk.liu@amd.com + */ +#ifndef AMDGPU_VIRT_H +#define AMDGPU_VIRT_H + +#define AMDGPU_SRIOV_CAPS_SRIOV_VBIOS (1 << 0) /* vBIOS is sr-iov ready */ +#define AMDGPU_SRIOV_CAPS_ENABLE_IOV (1 << 1) /* sr-iov is enabled on this GPU */ +#define AMDGPU_SRIOV_CAPS_IS_VF (1 << 2) /* this GPU is a virtual function */ +#define AMDGPU_PASSTHROUGH_MODE (1 << 3) /* thw whole GPU is pass through for VM */ +/* GPU virtualization */ +struct amdgpu_virtualization { + uint32_t virtual_caps; +}; + +#define amdgpu_sriov_enabled(adev) \ +((adev)->virtualization.virtual_caps & AMDGPU_SRIOV_CAPS_ENABLE_IOV) + +#define amdgpu_sriov_vf(adev) \ +((adev)->virtualization.virtual_caps & AMDGPU_SRIOV_CAPS_IS_VF) + +#define amdgpu_sriov_bios(adev) \ +((adev)->virtualization.virtual_caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS) + +#define amdgpu_passthrough(adev) \ +((adev)->virtualization.virtual_caps & AMDGPU_PASSTHROUGH_MODE) + +static inline bool is_virtual_machine(void) +{ +#ifdef CONFIG_X86 + return boot_cpu_has(X86_FEATURE_HYPERVISOR); +#else + return false; +#endif +} + +#endif \ No newline at end of file From 4f827785404f20ad7ca11c5d2d7832630514a280 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 21 Sep 2016 14:57:06 -0400 Subject: [PATCH 42/49] drm/amdgpu/vce: allow the clock table packet MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This packet allows the user mode driver to specify the required performance for specific use cases. Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index 2c9ea9b50f48..06b94c13c2c9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -691,6 +691,7 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx) case 0x04000008: /* rdo */ case 0x04000009: /* vui */ case 0x05000002: /* auxiliary buffer */ + case 0x05000009: /* clock table */ break; case 0x03000001: /* encode */ From 9cee3c1f95298fb98bbec9e8410d4da64a271fe5 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 21 Sep 2016 18:04:50 -0400 Subject: [PATCH 43/49] drm/amdgpu: bump version for new vce packet support MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 0c5f36d1ea3e..e7ae67234a7b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -56,9 +56,10 @@ * - 3.4.0 - Add AMDGPU_INFO_NUM_EVICTIONS. * - 3.5.0 - Add support for new UVD_NO_OP register. * - 3.6.0 - kmd involves use CONTEXT_CONTROL in ring buffer. + * - 3.7.0 - Add support for VCE clock list packet */ #define KMS_DRIVER_MAJOR 3 -#define KMS_DRIVER_MINOR 6 +#define KMS_DRIVER_MINOR 7 #define KMS_DRIVER_PATCHLEVEL 0 int amdgpu_vram_limit = 0; From efc83cf423df99bd6c1899cf0b65897f1ee75751 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 14 Sep 2016 14:01:41 -0400 Subject: [PATCH 44/49] drm/amdgpu/atpx: check for ATIF dGPU wake for display events support Some ATPX laptops implement special circuitry to generate display hotplug events via ACPI when the dGPU is powered off. Check if this is supported. Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 2 ++ drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c | 9 +++++++++ 2 files changed, 11 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 44255cb0bb50..9d79e4ba0213 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -2450,11 +2450,13 @@ void amdgpu_register_atpx_handler(void); void amdgpu_unregister_atpx_handler(void); bool amdgpu_has_atpx_dgpu_power_cntl(void); bool amdgpu_is_atpx_hybrid(void); +bool amdgpu_atpx_dgpu_req_power_for_displays(void); #else static inline void amdgpu_register_atpx_handler(void) {} static inline void amdgpu_unregister_atpx_handler(void) {} static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; } static inline bool amdgpu_is_atpx_hybrid(void) { return false; } +static inline bool amdgpu_atpx_dgpu_req_power_for_displays(void) { return false; } #endif /* diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c index 49de92600074..550c5ee704ec 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c @@ -29,6 +29,7 @@ struct amdgpu_atpx { acpi_handle handle; struct amdgpu_atpx_functions functions; bool is_hybrid; + bool dgpu_req_power_for_displays; }; static struct amdgpu_atpx_priv { @@ -73,6 +74,10 @@ bool amdgpu_is_atpx_hybrid(void) { return amdgpu_atpx_priv.atpx.is_hybrid; } +bool amdgpu_atpx_dgpu_req_power_for_displays(void) { + return amdgpu_atpx_priv.atpx.dgpu_req_power_for_displays; +} + /** * amdgpu_atpx_call - call an ATPX method * @@ -213,6 +218,10 @@ static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx) atpx->is_hybrid = true; } + atpx->dgpu_req_power_for_displays = false; + if (valid_bits & ATPX_DGPU_REQ_POWER_FOR_DISPLAYS) + atpx->dgpu_req_power_for_displays = true; + return 0; } From 1b0f568d24cb7d750e99c48a6dab6e3246507fef Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 14 Sep 2016 14:04:29 -0400 Subject: [PATCH 45/49] drm/amdgpu/atif: Send a hotplug event when we get dgpu display request On PX systems, if the platform supports hotplug events ATIF while the dGPU is powered down, handle the event and alert userspace. Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c index 5cd7b736a9de..5796539a0bcb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include #include @@ -333,6 +334,16 @@ int amdgpu_atif_handler(struct amdgpu_device *adev, #endif } } + if (req.pending & ATIF_DGPU_DISPLAY_EVENT) { + if ((adev->flags & AMD_IS_PX) && + amdgpu_atpx_dgpu_req_power_for_displays()) { + pm_runtime_get_sync(adev->ddev->dev); + /* Just fire off a uevent and let userspace tell us what to do */ + drm_helper_hpd_irq_event(adev->ddev); + pm_runtime_mark_last_busy(adev->ddev->dev); + pm_runtime_put_autosuspend(adev->ddev->dev); + } + } /* TODO: check other events */ /* We've handled the event, stop the notifier chain. The ACPI interface From 4777166989dee66a5050ce77d0ea37832ed330d7 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 14 Sep 2016 14:14:55 -0400 Subject: [PATCH 46/49] drm/radeon/atpx: check for ATIF dGPU wake for display events support Some ATPX laptops implement special circuitry to generate display hotplug events via ACPI when the dGPU is powered off. Check if this is supported. Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/radeon_atpx_handler.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c index 6de342861202..a1321b2fa454 100644 --- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c +++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c @@ -29,6 +29,7 @@ struct radeon_atpx { acpi_handle handle; struct radeon_atpx_functions functions; bool is_hybrid; + bool dgpu_req_power_for_displays; }; static struct radeon_atpx_priv { @@ -72,6 +73,10 @@ bool radeon_is_atpx_hybrid(void) { return radeon_atpx_priv.atpx.is_hybrid; } +bool radeon_atpx_dgpu_req_power_for_displays(void) { + return radeon_atpx_priv.atpx.dgpu_req_power_for_displays; +} + /** * radeon_atpx_call - call an ATPX method * From 735598477f386d05af018e8708aa9e71f1ae163e Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 14 Sep 2016 14:15:34 -0400 Subject: [PATCH 47/49] drm/radeon/atif: Send a hotplug event when we get dgpu display request On PX systems, if the platform supports hotplug events ATIF while the dGPU is powered down, handle the event and alert userspace. Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/radeon_acpi.c | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/drivers/gpu/drm/radeon/radeon_acpi.c b/drivers/gpu/drm/radeon/radeon_acpi.c index 31c9a92d6a1b..6efbd65c929e 100644 --- a/drivers/gpu/drm/radeon/radeon_acpi.c +++ b/drivers/gpu/drm/radeon/radeon_acpi.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include #include @@ -32,6 +33,12 @@ #include "radeon_acpi.h" #include "atom.h" +#if defined(CONFIG_VGA_SWITCHEROO) +bool radeon_atpx_dgpu_req_power_for_displays(void); +#else +static inline bool radeon_atpx_dgpu_req_power_for_displays(void) { return false; } +#endif + #define ACPI_AC_CLASS "ac_adapter" extern void radeon_pm_acpi_event_handler(struct radeon_device *rdev); @@ -394,6 +401,16 @@ int radeon_atif_handler(struct radeon_device *rdev, #endif } } + if (req.pending & ATIF_DGPU_DISPLAY_EVENT) { + if ((rdev->flags & RADEON_IS_PX) && + radeon_atpx_dgpu_req_power_for_displays()) { + pm_runtime_get_sync(rdev->ddev->dev); + /* Just fire off a uevent and let userspace tell us what to do */ + drm_helper_hpd_irq_event(rdev->ddev); + pm_runtime_mark_last_busy(rdev->ddev->dev); + pm_runtime_put_autosuspend(rdev->ddev->dev); + } + } /* TODO: check other events */ /* We've handled the event, stop the notifier chain. The ACPI interface From e37e4f057b47ff1e502799062f8ab30156e54bd3 Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Thu, 22 Sep 2016 10:01:00 -0400 Subject: [PATCH 48/49] drm/amd/amdgpu: Remove division from vblank_wait Removes division from vblank_wait for DCE v6, v8, and v10. Was already implemented for v11. Signed-off-by: Tom St Denis Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/dce_v10_0.c | 8 +++++--- drivers/gpu/drm/amd/amdgpu/dce_v6_0.c | 8 +++++--- drivers/gpu/drm/amd/amdgpu/dce_v8_0.c | 8 +++++--- 3 files changed, 15 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index db6e6c3eaae2..9a63d7766604 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -221,7 +221,7 @@ static bool dce_v10_0_is_counter_moving(struct amdgpu_device *adev, int crtc) */ static void dce_v10_0_vblank_wait(struct amdgpu_device *adev, int crtc) { - unsigned i = 0; + unsigned i = 100; if (crtc >= adev->mode_info.num_crtc) return; @@ -233,14 +233,16 @@ static void dce_v10_0_vblank_wait(struct amdgpu_device *adev, int crtc) * wait for another frame. */ while (dce_v10_0_is_in_vblank(adev, crtc)) { - if (i++ % 100 == 0) { + if (i++ == 100) { + i = 0; if (!dce_v10_0_is_counter_moving(adev, crtc)) break; } } while (!dce_v10_0_is_in_vblank(adev, crtc)) { - if (i++ % 100 == 0) { + if (i++ == 100) { + i = 0; if (!dce_v10_0_is_counter_moving(adev, crtc)) break; } diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c index c8380f030400..c866185fd091 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c @@ -146,7 +146,7 @@ static bool dce_v6_0_is_counter_moving(struct amdgpu_device *adev, int crtc) */ static void dce_v6_0_vblank_wait(struct amdgpu_device *adev, int crtc) { - unsigned i = 0; + unsigned i = 100; if (crtc >= adev->mode_info.num_crtc) return; @@ -158,14 +158,16 @@ static void dce_v6_0_vblank_wait(struct amdgpu_device *adev, int crtc) * wait for another frame. */ while (dce_v6_0_is_in_vblank(adev, crtc)) { - if (i++ % 100 == 0) { + if (i++ == 100) { + i = 0; if (!dce_v6_0_is_counter_moving(adev, crtc)) break; } } while (!dce_v6_0_is_in_vblank(adev, crtc)) { - if (i++ % 100 == 0) { + if (i++ == 100) { + i = 0; if (!dce_v6_0_is_counter_moving(adev, crtc)) break; } diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index 845a30b03cb4..3e1e3d0a850e 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -170,7 +170,7 @@ static bool dce_v8_0_is_counter_moving(struct amdgpu_device *adev, int crtc) */ static void dce_v8_0_vblank_wait(struct amdgpu_device *adev, int crtc) { - unsigned i = 0; + unsigned i = 100; if (crtc >= adev->mode_info.num_crtc) return; @@ -182,14 +182,16 @@ static void dce_v8_0_vblank_wait(struct amdgpu_device *adev, int crtc) * wait for another frame. */ while (dce_v8_0_is_in_vblank(adev, crtc)) { - if (i++ % 100 == 0) { + if (i++ == 100) { + i = 0; if (!dce_v8_0_is_counter_moving(adev, crtc)) break; } } while (!dce_v8_0_is_in_vblank(adev, crtc)) { - if (i++ % 100 == 0) { + if (i++ == 100) { + i = 0; if (!dce_v8_0_is_counter_moving(adev, crtc)) break; } From beb86f29c9c7f2d04f9a42c4c61cc469c3689779 Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Thu, 22 Sep 2016 12:20:58 -0400 Subject: [PATCH 49/49] drm/amd/amdgpu: Clean up afmt allocation in DCEv6. (v2) v2: minor build fix Signed-off-by: Tom St Denis Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/dce_v6_0.c | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c index c866185fd091..eb8f96a61491 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c @@ -1422,21 +1422,29 @@ static void dce_v6_0_afmt_enable(struct drm_encoder *encoder, bool enable) enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id); } -static void dce_v6_0_afmt_init(struct amdgpu_device *adev) +static int dce_v6_0_afmt_init(struct amdgpu_device *adev) { - int i; + int i, j; for (i = 0; i < adev->mode_info.num_dig; i++) adev->mode_info.afmt[i] = NULL; - /* DCE8 has audio blocks tied to DIG encoders */ + /* DCE6 has audio blocks tied to DIG encoders */ for (i = 0; i < adev->mode_info.num_dig; i++) { adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL); if (adev->mode_info.afmt[i]) { adev->mode_info.afmt[i]->offset = dig_offsets[i]; adev->mode_info.afmt[i]->id = i; + } else { + for (j = 0; j < i; j++) { + kfree(adev->mode_info.afmt[j]); + adev->mode_info.afmt[j] = NULL; + } + DRM_ERROR("Out of memory allocating afmt table\n"); + return -ENOMEM; } } + return 0; } static void dce_v6_0_afmt_fini(struct amdgpu_device *adev) @@ -2399,7 +2407,9 @@ static int dce_v6_0_sw_init(void *handle) return -EINVAL; /* setup afmt */ - dce_v6_0_afmt_init(adev); + r = dce_v6_0_afmt_init(adev); + if (r) + return r; r = dce_v6_0_audio_init(adev); if (r)