drm/amd/powerplay: put dpm frequency setting common code in smu_v11_0.c

As designed the common code shared among all smu v11 ASCIs go to
smu_v11_0.c. This helps to maintain clear code layers.

Signed-off-by: Evan Quan <evan.quan@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Evan Quan 2020-06-09 17:26:39 +08:00 committed by Alex Deucher
parent e5ef784b1e
commit d8d3493a46
6 changed files with 129 additions and 96 deletions

View File

@ -284,71 +284,6 @@ int smu_get_dpm_freq_range(struct smu_context *smu,
return ret;
}
int smu_get_dpm_freq_by_index(struct smu_context *smu, enum smu_clk_type clk_type,
uint16_t level, uint32_t *value)
{
int ret = 0, clk_id = 0;
uint32_t param;
if (!value)
return -EINVAL;
if (!smu_clk_dpm_is_enabled(smu, clk_type))
return 0;
clk_id = smu_clk_get_index(smu, clk_type);
if (clk_id < 0)
return clk_id;
param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff));
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDpmFreqByIndex,
param, value);
if (ret)
return ret;
/* BIT31: 0 - Fine grained DPM, 1 - Dicrete DPM
* now, we un-support it */
*value = *value & 0x7fffffff;
return ret;
}
int smu_get_dpm_level_count(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t *value)
{
return smu_get_dpm_freq_by_index(smu, clk_type, 0xff, value);
}
int smu_get_dpm_level_range(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t *min_value, uint32_t *max_value)
{
int ret = 0;
uint32_t level_count = 0;
if (!min_value && !max_value)
return -EINVAL;
if (min_value) {
/* by default, level 0 clock value as min value */
ret = smu_get_dpm_freq_by_index(smu, clk_type, 0, min_value);
if (ret)
return ret;
}
if (max_value) {
ret = smu_get_dpm_level_count(smu, clk_type, &level_count);
if (ret)
return ret;
ret = smu_get_dpm_freq_by_index(smu, clk_type, level_count - 1, max_value);
if (ret)
return ret;
}
return ret;
}
bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type)
{
enum smu_feature_mask feature_id = 0;

View File

@ -715,16 +715,10 @@ int smu_switch_power_profile(struct smu_context *smu,
enum PP_SMC_POWER_PROFILE type,
bool en);
int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t *smu_version);
int smu_get_dpm_freq_by_index(struct smu_context *smu, enum smu_clk_type clk_type,
uint16_t level, uint32_t *value);
int smu_get_dpm_level_count(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t *value);
int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t *min, uint32_t *max);
int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t min, uint32_t max);
int smu_get_dpm_level_range(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t *min_value, uint32_t *max_value);
enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu);
int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level);
int smu_set_display_count(struct smu_context *smu, uint32_t count);

View File

@ -265,4 +265,18 @@ int smu_v11_0_set_performance_level(struct smu_context *smu,
int smu_v11_0_set_power_source(struct smu_context *smu,
enum smu_power_src_type power_src);
int smu_v11_0_get_dpm_freq_by_index(struct smu_context *smu,
enum smu_clk_type clk_type,
uint16_t level,
uint32_t *value);
int smu_v11_0_get_dpm_level_count(struct smu_context *smu,
enum smu_clk_type clk_type,
uint32_t *value);
int smu_v11_0_get_dpm_level_range(struct smu_context *smu,
enum smu_clk_type clk_type,
uint32_t *min_value,
uint32_t *max_value);
#endif

View File

@ -881,13 +881,13 @@ static int navi10_print_clk_levels(struct smu_context *smu,
if (ret)
return size;
ret = smu_get_dpm_level_count(smu, clk_type, &count);
ret = smu_v11_0_get_dpm_level_count(smu, clk_type, &count);
if (ret)
return size;
if (!navi10_is_support_fine_grained_dpm(smu, clk_type)) {
for (i = 0; i < count; i++) {
ret = smu_get_dpm_freq_by_index(smu, clk_type, i, &value);
ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, i, &value);
if (ret)
return size;
@ -895,10 +895,10 @@ static int navi10_print_clk_levels(struct smu_context *smu,
cur_value == value ? "*" : "");
}
} else {
ret = smu_get_dpm_freq_by_index(smu, clk_type, 0, &freq_values[0]);
ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, 0, &freq_values[0]);
if (ret)
return size;
ret = smu_get_dpm_freq_by_index(smu, clk_type, count - 1, &freq_values[2]);
ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, count - 1, &freq_values[2]);
if (ret)
return size;
@ -1058,11 +1058,11 @@ static int navi10_force_clk_levels(struct smu_context *smu,
soft_min_level = (soft_min_level >= 1 ? 1 : 0);
}
ret = smu_get_dpm_freq_by_index(smu, clk_type, soft_min_level, &min_freq);
ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, soft_min_level, &min_freq);
if (ret)
return size;
ret = smu_get_dpm_freq_by_index(smu, clk_type, soft_max_level, &max_freq);
ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, soft_max_level, &max_freq);
if (ret)
return size;
@ -1110,7 +1110,7 @@ static int navi10_get_clock_by_type_with_latency(struct smu_context *smu,
case SMU_SOCCLK:
case SMU_MCLK:
case SMU_UCLK:
ret = smu_get_dpm_level_count(smu, clk_type, &level_count);
ret = smu_v11_0_get_dpm_level_count(smu, clk_type, &level_count);
if (ret)
return ret;
@ -1118,7 +1118,7 @@ static int navi10_get_clock_by_type_with_latency(struct smu_context *smu,
clocks->num_levels = level_count;
for (i = 0; i < level_count; i++) {
ret = smu_get_dpm_freq_by_index(smu, clk_type, i, &freq);
ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, i, &freq);
if (ret)
return ret;
@ -1493,21 +1493,21 @@ static int navi10_get_profiling_clk_mask(struct smu_context *smu,
*mclk_mask = 0;
} else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
if(sclk_mask) {
ret = smu_get_dpm_level_count(smu, SMU_SCLK, &level_count);
ret = smu_v11_0_get_dpm_level_count(smu, SMU_SCLK, &level_count);
if (ret)
return ret;
*sclk_mask = level_count - 1;
}
if(mclk_mask) {
ret = smu_get_dpm_level_count(smu, SMU_MCLK, &level_count);
ret = smu_v11_0_get_dpm_level_count(smu, SMU_MCLK, &level_count);
if (ret)
return ret;
*mclk_mask = level_count - 1;
}
if(soc_mask) {
ret = smu_get_dpm_level_count(smu, SMU_SOCCLK, &level_count);
ret = smu_v11_0_get_dpm_level_count(smu, SMU_SOCCLK, &level_count);
if (ret)
return ret;
*soc_mask = level_count - 1;
@ -1831,12 +1831,18 @@ static int navi10_set_peak_performance_level(struct smu_context *smu)
sclk_freq = NAVI12_UMD_PSTATE_PEAK_GFXCLK;
break;
default:
ret = smu_get_dpm_level_range(smu, SMU_SCLK, NULL, &sclk_freq);
ret = smu_v11_0_get_dpm_level_range(smu,
SMU_SCLK,
NULL,
&sclk_freq);
if (ret)
return ret;
}
ret = smu_get_dpm_level_range(smu, SMU_UCLK, NULL, &uclk_freq);
ret = smu_v11_0_get_dpm_level_range(smu,
SMU_UCLK,
NULL,
&uclk_freq);
if (ret)
return ret;
@ -2331,15 +2337,15 @@ static int navi10_disable_umc_cdr_12gbps_workaround(struct smu_context *smu)
if (smu_version < 0x2A3200)
return 0;
ret = smu_get_dpm_level_count(smu, SMU_UCLK, &uclk_count);
ret = smu_v11_0_get_dpm_level_count(smu, SMU_UCLK, &uclk_count);
if (ret)
return ret;
ret = smu_get_dpm_freq_by_index(smu, SMU_UCLK, (uint16_t)0, &uclk_min);
ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_UCLK, (uint16_t)0, &uclk_min);
if (ret)
return ret;
ret = smu_get_dpm_freq_by_index(smu, SMU_UCLK, (uint16_t)(uclk_count - 1), &uclk_max);
ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_UCLK, (uint16_t)(uclk_count - 1), &uclk_max);
if (ret)
return ret;

View File

@ -789,13 +789,13 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu,
if ((clk_type == SMU_GFXCLK) || (clk_type == SMU_SCLK))
amdgpu_gfx_off_ctrl(adev, false);
ret = smu_get_dpm_level_count(smu, clk_type, &count);
ret = smu_v11_0_get_dpm_level_count(smu, clk_type, &count);
if (ret)
goto print_clk_out;
if (!sienna_cichlid_is_support_fine_grained_dpm(smu, clk_type)) {
for (i = 0; i < count; i++) {
ret = smu_get_dpm_freq_by_index(smu, clk_type, i, &value);
ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, i, &value);
if (ret)
goto print_clk_out;
@ -803,10 +803,10 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu,
cur_value == value ? "*" : "");
}
} else {
ret = smu_get_dpm_freq_by_index(smu, clk_type, 0, &freq_values[0]);
ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, 0, &freq_values[0]);
if (ret)
goto print_clk_out;
ret = smu_get_dpm_freq_by_index(smu, clk_type, count - 1, &freq_values[2]);
ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, count - 1, &freq_values[2]);
if (ret)
goto print_clk_out;
@ -901,11 +901,11 @@ static int sienna_cichlid_force_clk_levels(struct smu_context *smu,
soft_min_level = (soft_min_level >= 1 ? 1 : 0);
}
ret = smu_get_dpm_freq_by_index(smu, clk_type, soft_min_level, &min_freq);
ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, soft_min_level, &min_freq);
if (ret)
goto forec_level_out;
ret = smu_get_dpm_freq_by_index(smu, clk_type, soft_max_level, &max_freq);
ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, soft_max_level, &max_freq);
if (ret)
goto forec_level_out;
@ -1312,7 +1312,7 @@ static int sienna_cichlid_get_profiling_clk_mask(struct smu_context *smu,
} else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
if(sclk_mask) {
amdgpu_gfx_off_ctrl(adev, false);
ret = smu_get_dpm_level_count(smu, SMU_SCLK, &level_count);
ret = smu_v11_0_get_dpm_level_count(smu, SMU_SCLK, &level_count);
amdgpu_gfx_off_ctrl(adev, true);
if (ret)
return ret;
@ -1320,14 +1320,14 @@ static int sienna_cichlid_get_profiling_clk_mask(struct smu_context *smu,
}
if(mclk_mask) {
ret = smu_get_dpm_level_count(smu, SMU_MCLK, &level_count);
ret = smu_v11_0_get_dpm_level_count(smu, SMU_MCLK, &level_count);
if (ret)
return ret;
*mclk_mask = level_count - 1;
}
if(soc_mask) {
ret = smu_get_dpm_level_count(smu, SMU_SOCCLK, &level_count);
ret = smu_v11_0_get_dpm_level_count(smu, SMU_SOCCLK, &level_count);
if (ret)
return ret;
*soc_mask = level_count - 1;

View File

@ -1911,3 +1911,87 @@ int smu_v11_0_set_power_source(struct smu_context *smu,
NULL);
}
int smu_v11_0_get_dpm_freq_by_index(struct smu_context *smu,
enum smu_clk_type clk_type,
uint16_t level,
uint32_t *value)
{
int ret = 0, clk_id = 0;
uint32_t param;
if (!value)
return -EINVAL;
if (!smu_clk_dpm_is_enabled(smu, clk_type))
return 0;
clk_id = smu_clk_get_index(smu, clk_type);
if (clk_id < 0)
return clk_id;
param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff));
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_GetDpmFreqByIndex,
param,
value);
if (ret)
return ret;
/*
* BIT31: 0 - Fine grained DPM, 1 - Dicrete DPM
* now, we un-support it
*/
*value = *value & 0x7fffffff;
return ret;
}
int smu_v11_0_get_dpm_level_count(struct smu_context *smu,
enum smu_clk_type clk_type,
uint32_t *value)
{
return smu_v11_0_get_dpm_freq_by_index(smu,
clk_type,
0xff,
value);
}
int smu_v11_0_get_dpm_level_range(struct smu_context *smu,
enum smu_clk_type clk_type,
uint32_t *min_value,
uint32_t *max_value)
{
uint32_t level_count = 0;
int ret = 0;
if (!min_value && !max_value)
return -EINVAL;
if (min_value) {
/* by default, level 0 clock value as min value */
ret = smu_v11_0_get_dpm_freq_by_index(smu,
clk_type,
0,
min_value);
if (ret)
return ret;
}
if (max_value) {
ret = smu_v11_0_get_dpm_level_count(smu,
clk_type,
&level_count);
if (ret)
return ret;
ret = smu_v11_0_get_dpm_freq_by_index(smu,
clk_type,
level_count - 1,
max_value);
if (ret)
return ret;
}
return ret;
}