Merge tag 'drm-fixes-5.3-2019-07-24' of git://people.freedesktop.org/~agd5f/linux into drm-fixes
drm-fixes-5.3-2019-07-24: amdgpu: - RAS fixes for vega20 - Navi VCN fix - DC audio fixes - DC DSC fixes - DC dongle fixes - DC clk mgr fixes - Fix DDC lines on some RV2 boards - GDS fixes for compute - Navi SMU fixes ttm: - Use the same attributes when freeing d_page->vaddr Signed-off-by: Dave Airlie <airlied@redhat.com> From: Alex Deucher <alexdeucher@gmail.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190724210527.3415-1-alexander.deucher@amd.com
This commit is contained in:
commit
4d5308e785
|
@ -148,7 +148,7 @@ struct amdgpu_mgpu_info mgpu_info = {
|
|||
.mutex = __MUTEX_INITIALIZER(mgpu_info.mutex),
|
||||
};
|
||||
int amdgpu_ras_enable = -1;
|
||||
uint amdgpu_ras_mask = 0xffffffff;
|
||||
uint amdgpu_ras_mask = 0xfffffffb;
|
||||
|
||||
/**
|
||||
* DOC: vramlimit (int)
|
||||
|
|
|
@ -1734,7 +1734,7 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
|
|||
return -EINVAL;
|
||||
|
||||
if (is_support_sw_smu(adev)) {
|
||||
err = smu_get_current_rpm(&adev->smu, &speed);
|
||||
err = smu_get_fan_speed_rpm(&adev->smu, &speed);
|
||||
if (err)
|
||||
return err;
|
||||
} else if (adev->powerplay.pp_funcs->get_fan_speed_rpm) {
|
||||
|
@ -1794,7 +1794,7 @@ static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
|
|||
return -EINVAL;
|
||||
|
||||
if (is_support_sw_smu(adev)) {
|
||||
err = smu_get_current_rpm(&adev->smu, &rpm);
|
||||
err = smu_get_fan_speed_rpm(&adev->smu, &rpm);
|
||||
if (err)
|
||||
return err;
|
||||
} else if (adev->powerplay.pp_funcs->get_fan_speed_rpm) {
|
||||
|
|
|
@ -136,11 +136,6 @@ static int amdgpu_ras_reserve_vram(struct amdgpu_device *adev,
|
|||
static int amdgpu_ras_release_vram(struct amdgpu_device *adev,
|
||||
struct amdgpu_bo **bo_ptr);
|
||||
|
||||
static void amdgpu_ras_self_test(struct amdgpu_device *adev)
|
||||
{
|
||||
/* TODO */
|
||||
}
|
||||
|
||||
static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf,
|
||||
size_t size, loff_t *pos)
|
||||
{
|
||||
|
@ -689,6 +684,12 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev,
|
|||
if (!obj)
|
||||
return -EINVAL;
|
||||
|
||||
if (block_info.block_id != TA_RAS_BLOCK__UMC) {
|
||||
DRM_INFO("%s error injection is not supported yet\n",
|
||||
ras_block_str(info->head.block));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = psp_ras_trigger_error(&adev->psp, &block_info);
|
||||
if (ret)
|
||||
DRM_ERROR("RAS ERROR: inject %s error failed ret %d\n",
|
||||
|
@ -1557,6 +1558,12 @@ int amdgpu_ras_init(struct amdgpu_device *adev)
|
|||
|
||||
amdgpu_ras_check_supported(adev, &con->hw_supported,
|
||||
&con->supported);
|
||||
if (!con->hw_supported) {
|
||||
amdgpu_ras_set_context(adev, NULL);
|
||||
kfree(con);
|
||||
return 0;
|
||||
}
|
||||
|
||||
con->features = 0;
|
||||
INIT_LIST_HEAD(&con->head);
|
||||
/* Might need get this flag from vbios. */
|
||||
|
@ -1570,8 +1577,6 @@ int amdgpu_ras_init(struct amdgpu_device *adev)
|
|||
if (amdgpu_ras_fs_init(adev))
|
||||
goto fs_out;
|
||||
|
||||
amdgpu_ras_self_test(adev);
|
||||
|
||||
DRM_INFO("RAS INFO: ras initialized successfully, "
|
||||
"hardware ability[%x] ras_mask[%x]\n",
|
||||
con->hw_supported, con->supported);
|
||||
|
|
|
@ -1441,6 +1441,15 @@ static void gfx_v10_0_init_compute_vmid(struct amdgpu_device *adev)
|
|||
}
|
||||
nv_grbm_select(adev, 0, 0, 0, 0);
|
||||
mutex_unlock(&adev->srbm_mutex);
|
||||
|
||||
/* Initialize all compute VMIDs to have no GDS, GWS, or OA
|
||||
acccess. These should be enabled by FW for target VMIDs. */
|
||||
for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
|
||||
WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * i, 0);
|
||||
WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * i, 0);
|
||||
WREG32_SOC15_OFFSET(GC, 0, mmGDS_GWS_VMID0, i, 0);
|
||||
WREG32_SOC15_OFFSET(GC, 0, mmGDS_OA_VMID0, i, 0);
|
||||
}
|
||||
}
|
||||
|
||||
static void gfx_v10_0_tcp_harvest(struct amdgpu_device *adev)
|
||||
|
|
|
@ -1879,6 +1879,15 @@ static void gfx_v7_0_init_compute_vmid(struct amdgpu_device *adev)
|
|||
}
|
||||
cik_srbm_select(adev, 0, 0, 0, 0);
|
||||
mutex_unlock(&adev->srbm_mutex);
|
||||
|
||||
/* Initialize all compute VMIDs to have no GDS, GWS, or OA
|
||||
acccess. These should be enabled by FW for target VMIDs. */
|
||||
for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
|
||||
WREG32(amdgpu_gds_reg_offset[i].mem_base, 0);
|
||||
WREG32(amdgpu_gds_reg_offset[i].mem_size, 0);
|
||||
WREG32(amdgpu_gds_reg_offset[i].gws, 0);
|
||||
WREG32(amdgpu_gds_reg_offset[i].oa, 0);
|
||||
}
|
||||
}
|
||||
|
||||
static void gfx_v7_0_config_init(struct amdgpu_device *adev)
|
||||
|
|
|
@ -3706,6 +3706,15 @@ static void gfx_v8_0_init_compute_vmid(struct amdgpu_device *adev)
|
|||
}
|
||||
vi_srbm_select(adev, 0, 0, 0, 0);
|
||||
mutex_unlock(&adev->srbm_mutex);
|
||||
|
||||
/* Initialize all compute VMIDs to have no GDS, GWS, or OA
|
||||
acccess. These should be enabled by FW for target VMIDs. */
|
||||
for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
|
||||
WREG32(amdgpu_gds_reg_offset[i].mem_base, 0);
|
||||
WREG32(amdgpu_gds_reg_offset[i].mem_size, 0);
|
||||
WREG32(amdgpu_gds_reg_offset[i].gws, 0);
|
||||
WREG32(amdgpu_gds_reg_offset[i].oa, 0);
|
||||
}
|
||||
}
|
||||
|
||||
static void gfx_v8_0_config_init(struct amdgpu_device *adev)
|
||||
|
|
|
@ -1918,6 +1918,15 @@ static void gfx_v9_0_init_compute_vmid(struct amdgpu_device *adev)
|
|||
}
|
||||
soc15_grbm_select(adev, 0, 0, 0, 0);
|
||||
mutex_unlock(&adev->srbm_mutex);
|
||||
|
||||
/* Initialize all compute VMIDs to have no GDS, GWS, or OA
|
||||
acccess. These should be enabled by FW for target VMIDs. */
|
||||
for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
|
||||
WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * i, 0);
|
||||
WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * i, 0);
|
||||
WREG32_SOC15_OFFSET(GC, 0, mmGDS_GWS_VMID0, i, 0);
|
||||
WREG32_SOC15_OFFSET(GC, 0, mmGDS_OA_VMID0, i, 0);
|
||||
}
|
||||
}
|
||||
|
||||
static void gfx_v9_0_constants_init(struct amdgpu_device *adev)
|
||||
|
|
|
@ -372,11 +372,8 @@ static void vcn_v2_0_mc_resume(struct amdgpu_device *adev)
|
|||
WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
|
||||
upper_32_bits(adev->vcn.gpu_addr));
|
||||
offset = size;
|
||||
/* No signed header for now from firmware
|
||||
WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0,
|
||||
AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
|
||||
*/
|
||||
WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, 0);
|
||||
}
|
||||
|
||||
WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size);
|
||||
|
|
|
@ -262,12 +262,12 @@ void dce110_clk_mgr_construct(
|
|||
struct dc_context *ctx,
|
||||
struct clk_mgr_internal *clk_mgr)
|
||||
{
|
||||
dce_clk_mgr_construct(ctx, clk_mgr);
|
||||
|
||||
memcpy(clk_mgr->max_clks_by_state,
|
||||
dce110_max_clks_by_state,
|
||||
sizeof(dce110_max_clks_by_state));
|
||||
|
||||
dce_clk_mgr_construct(ctx, clk_mgr);
|
||||
|
||||
clk_mgr->regs = &disp_clk_regs;
|
||||
clk_mgr->clk_mgr_shift = &disp_clk_shift;
|
||||
clk_mgr->clk_mgr_mask = &disp_clk_mask;
|
||||
|
|
|
@ -226,12 +226,12 @@ void dce112_clk_mgr_construct(
|
|||
struct dc_context *ctx,
|
||||
struct clk_mgr_internal *clk_mgr)
|
||||
{
|
||||
dce_clk_mgr_construct(ctx, clk_mgr);
|
||||
|
||||
memcpy(clk_mgr->max_clks_by_state,
|
||||
dce112_max_clks_by_state,
|
||||
sizeof(dce112_max_clks_by_state));
|
||||
|
||||
dce_clk_mgr_construct(ctx, clk_mgr);
|
||||
|
||||
clk_mgr->regs = &disp_clk_regs;
|
||||
clk_mgr->clk_mgr_shift = &disp_clk_shift;
|
||||
clk_mgr->clk_mgr_mask = &disp_clk_mask;
|
||||
|
|
|
@ -127,12 +127,12 @@ static struct clk_mgr_funcs dce120_funcs = {
|
|||
|
||||
void dce120_clk_mgr_construct(struct dc_context *ctx, struct clk_mgr_internal *clk_mgr)
|
||||
{
|
||||
dce_clk_mgr_construct(ctx, clk_mgr);
|
||||
|
||||
memcpy(clk_mgr->max_clks_by_state,
|
||||
dce120_max_clks_by_state,
|
||||
sizeof(dce120_max_clks_by_state));
|
||||
|
||||
dce_clk_mgr_construct(ctx, clk_mgr);
|
||||
|
||||
clk_mgr->base.dprefclk_khz = 600000;
|
||||
clk_mgr->base.funcs = &dce120_funcs;
|
||||
}
|
||||
|
|
|
@ -301,6 +301,8 @@ void dcn2_update_clocks_fpga(struct clk_mgr *clk_mgr,
|
|||
void dcn2_init_clocks(struct clk_mgr *clk_mgr)
|
||||
{
|
||||
memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));
|
||||
// Assumption is that boot state always supports pstate
|
||||
clk_mgr->clks.p_state_change_support = true;
|
||||
}
|
||||
|
||||
void dcn2_enable_pme_wa(struct clk_mgr *clk_mgr_base)
|
||||
|
@ -331,6 +333,7 @@ void dcn20_clk_mgr_construct(
|
|||
struct dccg *dccg)
|
||||
{
|
||||
clk_mgr->base.ctx = ctx;
|
||||
clk_mgr->pp_smu = pp_smu;
|
||||
clk_mgr->base.funcs = &dcn2_funcs;
|
||||
clk_mgr->regs = &clk_mgr_regs;
|
||||
clk_mgr->clk_mgr_shift = &clk_mgr_shift;
|
||||
|
|
|
@ -502,8 +502,10 @@ void dc_stream_set_static_screen_events(struct dc *dc,
|
|||
|
||||
static void destruct(struct dc *dc)
|
||||
{
|
||||
dc_release_state(dc->current_state);
|
||||
dc->current_state = NULL;
|
||||
if (dc->current_state) {
|
||||
dc_release_state(dc->current_state);
|
||||
dc->current_state = NULL;
|
||||
}
|
||||
|
||||
destroy_links(dc);
|
||||
|
||||
|
|
|
@ -532,6 +532,7 @@ static void read_edp_current_link_settings_on_detect(struct dc_link *link)
|
|||
uint32_t read_dpcd_retry_cnt = 10;
|
||||
enum dc_status status = DC_ERROR_UNEXPECTED;
|
||||
int i;
|
||||
union max_down_spread max_down_spread = { {0} };
|
||||
|
||||
// Read DPCD 00101h to find out the number of lanes currently set
|
||||
for (i = 0; i < read_dpcd_retry_cnt; i++) {
|
||||
|
@ -553,8 +554,6 @@ static void read_edp_current_link_settings_on_detect(struct dc_link *link)
|
|||
msleep(8);
|
||||
}
|
||||
|
||||
ASSERT(status == DC_OK);
|
||||
|
||||
// Read DPCD 00100h to find if standard link rates are set
|
||||
core_link_read_dpcd(link, DP_LINK_BW_SET,
|
||||
&link_bw_set, sizeof(link_bw_set));
|
||||
|
@ -576,6 +575,12 @@ static void read_edp_current_link_settings_on_detect(struct dc_link *link)
|
|||
link->cur_link_settings.link_rate = link_bw_set;
|
||||
link->cur_link_settings.use_link_rate_set = false;
|
||||
}
|
||||
// Read DPCD 00003h to find the max down spread.
|
||||
core_link_read_dpcd(link, DP_MAX_DOWNSPREAD,
|
||||
&max_down_spread.raw, sizeof(max_down_spread));
|
||||
link->cur_link_settings.link_spread =
|
||||
max_down_spread.bits.MAX_DOWN_SPREAD ?
|
||||
LINK_SPREAD_05_DOWNSPREAD_30KHZ : LINK_SPREAD_DISABLED;
|
||||
}
|
||||
|
||||
static bool detect_dp(
|
||||
|
@ -717,13 +722,6 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
|
|||
return false;
|
||||
}
|
||||
|
||||
if (link->connector_signal == SIGNAL_TYPE_EDP) {
|
||||
/* On detect, we want to make sure current link settings are
|
||||
* up to date, especially if link was powered on by GOP.
|
||||
*/
|
||||
read_edp_current_link_settings_on_detect(link);
|
||||
}
|
||||
|
||||
prev_sink = link->local_sink;
|
||||
if (prev_sink != NULL) {
|
||||
dc_sink_retain(prev_sink);
|
||||
|
@ -765,6 +763,7 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
|
|||
}
|
||||
|
||||
case SIGNAL_TYPE_EDP: {
|
||||
read_edp_current_link_settings_on_detect(link);
|
||||
detect_edp_sink_caps(link);
|
||||
sink_caps.transaction_type =
|
||||
DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
|
||||
|
@ -2329,7 +2328,7 @@ bool dc_link_set_backlight_level(const struct dc_link *link,
|
|||
if (core_dc->current_state->res_ctx.pipe_ctx[i].stream) {
|
||||
if (core_dc->current_state->res_ctx.
|
||||
pipe_ctx[i].stream->link
|
||||
== link)
|
||||
== link) {
|
||||
/* DMCU -1 for all controller id values,
|
||||
* therefore +1 here
|
||||
*/
|
||||
|
@ -2337,6 +2336,13 @@ bool dc_link_set_backlight_level(const struct dc_link *link,
|
|||
core_dc->current_state->
|
||||
res_ctx.pipe_ctx[i].stream_res.tg->inst +
|
||||
1;
|
||||
|
||||
/* Disable brightness ramping when the display is blanked
|
||||
* as it can hang the DMCU
|
||||
*/
|
||||
if (core_dc->current_state->res_ctx.pipe_ctx[i].plane_state == NULL)
|
||||
frame_ramp = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
abm->funcs->set_backlight_level_pwm(
|
||||
|
@ -2984,8 +2990,10 @@ void dc_link_set_preferred_link_settings(struct dc *dc,
|
|||
|
||||
/* Retrain with preferred link settings only relevant for
|
||||
* DP signal type
|
||||
* Check for non-DP signal or if passive dongle present
|
||||
*/
|
||||
if (!dc_is_dp_signal(link->connector_signal))
|
||||
if (!dc_is_dp_signal(link->connector_signal) ||
|
||||
link->dongle_max_pix_clk > 0)
|
||||
return;
|
||||
|
||||
for (i = 0; i < MAX_PIPES; i++) {
|
||||
|
|
|
@ -2230,18 +2230,25 @@ static void get_active_converter_info(
|
|||
link->dpcd_caps.dongle_type = DISPLAY_DONGLE_NONE;
|
||||
ddc_service_set_dongle_type(link->ddc,
|
||||
link->dpcd_caps.dongle_type);
|
||||
link->dpcd_caps.is_branch_dev = false;
|
||||
return;
|
||||
}
|
||||
|
||||
/* DPCD 0x5 bit 0 = 1, it indicate it's branch device */
|
||||
link->dpcd_caps.is_branch_dev = ds_port.fields.PORT_PRESENT;
|
||||
if (ds_port.fields.PORT_TYPE == DOWNSTREAM_DP) {
|
||||
link->dpcd_caps.is_branch_dev = false;
|
||||
}
|
||||
|
||||
else {
|
||||
link->dpcd_caps.is_branch_dev = ds_port.fields.PORT_PRESENT;
|
||||
}
|
||||
|
||||
switch (ds_port.fields.PORT_TYPE) {
|
||||
case DOWNSTREAM_VGA:
|
||||
link->dpcd_caps.dongle_type = DISPLAY_DONGLE_DP_VGA_CONVERTER;
|
||||
break;
|
||||
case DOWNSTREAM_DVI_HDMI:
|
||||
/* At this point we don't know is it DVI or HDMI,
|
||||
case DOWNSTREAM_DVI_HDMI_DP_PLUS_PLUS:
|
||||
/* At this point we don't know is it DVI or HDMI or DP++,
|
||||
* assume DVI.*/
|
||||
link->dpcd_caps.dongle_type = DISPLAY_DONGLE_DP_DVI_CONVERTER;
|
||||
break;
|
||||
|
@ -2258,6 +2265,10 @@ static void get_active_converter_info(
|
|||
det_caps, sizeof(det_caps));
|
||||
|
||||
switch (port_caps->bits.DWN_STRM_PORTX_TYPE) {
|
||||
/*Handle DP case as DONGLE_NONE*/
|
||||
case DOWN_STREAM_DETAILED_DP:
|
||||
link->dpcd_caps.dongle_type = DISPLAY_DONGLE_NONE;
|
||||
break;
|
||||
case DOWN_STREAM_DETAILED_VGA:
|
||||
link->dpcd_caps.dongle_type =
|
||||
DISPLAY_DONGLE_DP_VGA_CONVERTER;
|
||||
|
@ -2267,6 +2278,8 @@ static void get_active_converter_info(
|
|||
DISPLAY_DONGLE_DP_DVI_CONVERTER;
|
||||
break;
|
||||
case DOWN_STREAM_DETAILED_HDMI:
|
||||
case DOWN_STREAM_DETAILED_DP_PLUS_PLUS:
|
||||
/*Handle DP++ active converter case, process DP++ case as HDMI case according DP1.4 spec*/
|
||||
link->dpcd_caps.dongle_type =
|
||||
DISPLAY_DONGLE_DP_HDMI_CONVERTER;
|
||||
|
||||
|
@ -2282,14 +2295,18 @@ static void get_active_converter_info(
|
|||
|
||||
link->dpcd_caps.dongle_caps.is_dp_hdmi_s3d_converter =
|
||||
hdmi_caps.bits.FRAME_SEQ_TO_FRAME_PACK;
|
||||
link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr422_pass_through =
|
||||
hdmi_caps.bits.YCrCr422_PASS_THROUGH;
|
||||
link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr420_pass_through =
|
||||
hdmi_caps.bits.YCrCr420_PASS_THROUGH;
|
||||
link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr422_converter =
|
||||
hdmi_caps.bits.YCrCr422_CONVERSION;
|
||||
link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr420_converter =
|
||||
hdmi_caps.bits.YCrCr420_CONVERSION;
|
||||
/*YCBCR capability only for HDMI case*/
|
||||
if (port_caps->bits.DWN_STRM_PORTX_TYPE
|
||||
== DOWN_STREAM_DETAILED_HDMI) {
|
||||
link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr422_pass_through =
|
||||
hdmi_caps.bits.YCrCr422_PASS_THROUGH;
|
||||
link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr420_pass_through =
|
||||
hdmi_caps.bits.YCrCr420_PASS_THROUGH;
|
||||
link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr422_converter =
|
||||
hdmi_caps.bits.YCrCr422_CONVERSION;
|
||||
link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr420_converter =
|
||||
hdmi_caps.bits.YCrCr420_CONVERSION;
|
||||
}
|
||||
|
||||
link->dpcd_caps.dongle_caps.dp_hdmi_max_bpc =
|
||||
translate_dpcd_max_bpc(
|
||||
|
|
|
@ -258,7 +258,7 @@ bool resource_construct(
|
|||
* PORT_CONNECTIVITY == 1 (as instructed by HW team).
|
||||
*/
|
||||
update_num_audio(&straps, &num_audio, &pool->audio_support);
|
||||
for (i = 0; i < pool->pipe_count && i < num_audio; i++) {
|
||||
for (i = 0; i < caps->num_audio; i++) {
|
||||
struct audio *aud = create_funcs->create_audio(ctx, i);
|
||||
|
||||
if (aud == NULL) {
|
||||
|
@ -1669,6 +1669,12 @@ static struct audio *find_first_free_audio(
|
|||
return pool->audios[i];
|
||||
}
|
||||
}
|
||||
|
||||
/* use engine id to find free audio */
|
||||
if ((id < pool->audio_count) && (res_ctx->is_audio_acquired[id] == false)) {
|
||||
return pool->audios[id];
|
||||
}
|
||||
|
||||
/*not found the matching one, first come first serve*/
|
||||
for (i = 0; i < pool->audio_count; i++) {
|
||||
if (res_ctx->is_audio_acquired[i] == false) {
|
||||
|
@ -1833,6 +1839,7 @@ static int get_norm_pix_clk(const struct dc_crtc_timing *timing)
|
|||
pix_clk /= 2;
|
||||
if (timing->pixel_encoding != PIXEL_ENCODING_YCBCR422) {
|
||||
switch (timing->display_color_depth) {
|
||||
case COLOR_DEPTH_666:
|
||||
case COLOR_DEPTH_888:
|
||||
normalized_pix_clk = pix_clk;
|
||||
break;
|
||||
|
@ -1979,7 +1986,7 @@ enum dc_status resource_map_pool_resources(
|
|||
/* TODO: Add check if ASIC support and EDID audio */
|
||||
if (!stream->converter_disable_audio &&
|
||||
dc_is_audio_capable_signal(pipe_ctx->stream->signal) &&
|
||||
stream->audio_info.mode_count) {
|
||||
stream->audio_info.mode_count && stream->audio_info.flags.all) {
|
||||
pipe_ctx->stream_res.audio = find_first_free_audio(
|
||||
&context->res_ctx, pool, pipe_ctx->stream_res.stream_enc->id);
|
||||
|
||||
|
|
|
@ -612,7 +612,8 @@ bool dc_stream_set_dynamic_metadata(struct dc *dc,
|
|||
|
||||
pipe_ctx->stream->dmdata_address = attr->address;
|
||||
|
||||
if (pipe_ctx->stream_res.stream_enc->funcs->set_dynamic_metadata != NULL) {
|
||||
if (pipe_ctx->stream_res.stream_enc &&
|
||||
pipe_ctx->stream_res.stream_enc->funcs->set_dynamic_metadata != NULL) {
|
||||
if (pipe_ctx->stream->dmdata_address.quad_part != 0) {
|
||||
/* if using dynamic meta, don't set up generic infopackets */
|
||||
pipe_ctx->stream_res.encoder_info_frame.hdrsmd.valid = false;
|
||||
|
|
|
@ -239,6 +239,10 @@ static void dmcu_set_backlight_level(
|
|||
s2 |= (backlight_8_bit << ATOM_S2_CURRENT_BL_LEVEL_SHIFT);
|
||||
|
||||
REG_WRITE(BIOS_SCRATCH_2, s2);
|
||||
|
||||
/* waitDMCUReadyForCmd */
|
||||
REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT,
|
||||
0, 1, 80000);
|
||||
}
|
||||
|
||||
static void dce_abm_init(struct abm *abm)
|
||||
|
|
|
@ -965,11 +965,17 @@ void hwss_edp_backlight_control(
|
|||
void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx)
|
||||
{
|
||||
/* notify audio driver for audio modes of monitor */
|
||||
struct dc *core_dc = pipe_ctx->stream->ctx->dc;
|
||||
struct dc *core_dc;
|
||||
struct pp_smu_funcs *pp_smu = NULL;
|
||||
struct clk_mgr *clk_mgr = core_dc->clk_mgr;
|
||||
struct clk_mgr *clk_mgr;
|
||||
unsigned int i, num_audio = 1;
|
||||
|
||||
if (!pipe_ctx->stream)
|
||||
return;
|
||||
|
||||
core_dc = pipe_ctx->stream->ctx->dc;
|
||||
clk_mgr = core_dc->clk_mgr;
|
||||
|
||||
if (pipe_ctx->stream_res.audio && pipe_ctx->stream_res.audio->enabled == true)
|
||||
return;
|
||||
|
||||
|
@ -999,9 +1005,15 @@ void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx)
|
|||
|
||||
void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option)
|
||||
{
|
||||
struct dc *dc = pipe_ctx->stream->ctx->dc;
|
||||
struct dc *dc;
|
||||
struct pp_smu_funcs *pp_smu = NULL;
|
||||
struct clk_mgr *clk_mgr = dc->clk_mgr;
|
||||
struct clk_mgr *clk_mgr;
|
||||
|
||||
if (!pipe_ctx || !pipe_ctx->stream)
|
||||
return;
|
||||
|
||||
dc = pipe_ctx->stream->ctx->dc;
|
||||
clk_mgr = dc->clk_mgr;
|
||||
|
||||
if (pipe_ctx->stream_res.audio && pipe_ctx->stream_res.audio->enabled == false)
|
||||
return;
|
||||
|
@ -1009,6 +1021,8 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option)
|
|||
pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
|
||||
pipe_ctx->stream_res.stream_enc, true);
|
||||
if (pipe_ctx->stream_res.audio) {
|
||||
pipe_ctx->stream_res.audio->enabled = false;
|
||||
|
||||
if (dc->res_pool->pp_smu)
|
||||
pp_smu = dc->res_pool->pp_smu;
|
||||
|
||||
|
@ -1039,8 +1053,6 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option)
|
|||
/* dal_audio_disable_azalia_audio_jack_presence(stream->audio,
|
||||
* stream->stream_engine_id);
|
||||
*/
|
||||
if (pipe_ctx->stream_res.audio)
|
||||
pipe_ctx->stream_res.audio->enabled = false;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1195,16 +1195,7 @@ static void dcn10_init_hw(struct dc *dc)
|
|||
* everything down.
|
||||
*/
|
||||
if (dcb->funcs->is_accelerated_mode(dcb) || dc->config.power_down_display_on_boot) {
|
||||
for (i = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
struct hubp *hubp = dc->res_pool->hubps[i];
|
||||
struct dpp *dpp = dc->res_pool->dpps[i];
|
||||
|
||||
hubp->funcs->hubp_init(hubp);
|
||||
dc->res_pool->opps[i]->mpc_tree_params.opp_id = dc->res_pool->opps[i]->inst;
|
||||
plane_atomic_power_down(dc, dpp, hubp);
|
||||
}
|
||||
|
||||
apply_DEGVIDCN10_253_wa(dc);
|
||||
dc->hwss.init_pipes(dc, dc->current_state);
|
||||
}
|
||||
|
||||
for (i = 0; i < dc->res_pool->audio_count; i++) {
|
||||
|
@ -1375,10 +1366,6 @@ static bool dcn10_set_input_transfer_func(struct pipe_ctx *pipe_ctx,
|
|||
return result;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
static bool
|
||||
dcn10_set_output_transfer_func(struct pipe_ctx *pipe_ctx,
|
||||
const struct dc_stream_state *stream)
|
||||
|
@ -2516,6 +2503,12 @@ static void dcn10_apply_ctx_for_surface(
|
|||
if (removed_pipe[i])
|
||||
dcn10_disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
|
||||
|
||||
for (i = 0; i < dc->res_pool->pipe_count; i++)
|
||||
if (removed_pipe[i]) {
|
||||
dc->hwss.optimize_bandwidth(dc, context);
|
||||
break;
|
||||
}
|
||||
|
||||
if (dc->hwseq->wa.DEGVIDCN10_254)
|
||||
hubbub1_wm_change_req_wa(dc->res_pool->hubbub);
|
||||
}
|
||||
|
|
|
@ -508,7 +508,7 @@ static const struct resource_caps rv2_res_cap = {
|
|||
.num_audio = 3,
|
||||
.num_stream_encoder = 3,
|
||||
.num_pll = 3,
|
||||
.num_ddc = 3,
|
||||
.num_ddc = 4,
|
||||
};
|
||||
|
||||
static const struct dc_plane_cap plane_cap = {
|
||||
|
|
|
@ -337,6 +337,7 @@ static enum dcn_hubbub_page_table_block_size page_table_block_size_to_hw(unsigne
|
|||
break;
|
||||
default:
|
||||
ASSERT(false);
|
||||
block_size = page_table_block_size;
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -366,25 +367,24 @@ int hubbub2_init_dchub_sys_ctx(struct hubbub *hubbub,
|
|||
struct dcn_vmid_page_table_config phys_config;
|
||||
|
||||
REG_SET(DCN_VM_FB_LOCATION_BASE, 0,
|
||||
FB_BASE, pa_config->system_aperture.fb_base);
|
||||
FB_BASE, pa_config->system_aperture.fb_base >> 24);
|
||||
REG_SET(DCN_VM_FB_LOCATION_TOP, 0,
|
||||
FB_TOP, pa_config->system_aperture.fb_top);
|
||||
FB_TOP, pa_config->system_aperture.fb_top >> 24);
|
||||
REG_SET(DCN_VM_FB_OFFSET, 0,
|
||||
FB_OFFSET, pa_config->system_aperture.fb_offset);
|
||||
FB_OFFSET, pa_config->system_aperture.fb_offset >> 24);
|
||||
REG_SET(DCN_VM_AGP_BOT, 0,
|
||||
AGP_BOT, pa_config->system_aperture.agp_bot);
|
||||
AGP_BOT, pa_config->system_aperture.agp_bot >> 24);
|
||||
REG_SET(DCN_VM_AGP_TOP, 0,
|
||||
AGP_TOP, pa_config->system_aperture.agp_top);
|
||||
AGP_TOP, pa_config->system_aperture.agp_top >> 24);
|
||||
REG_SET(DCN_VM_AGP_BASE, 0,
|
||||
AGP_BASE, pa_config->system_aperture.agp_base);
|
||||
AGP_BASE, pa_config->system_aperture.agp_base >> 24);
|
||||
|
||||
if (pa_config->gart_config.page_table_start_addr != pa_config->gart_config.page_table_end_addr) {
|
||||
phys_config.depth = 1;
|
||||
phys_config.block_size = 4096;
|
||||
phys_config.page_table_start_addr = pa_config->gart_config.page_table_start_addr >> 12;
|
||||
phys_config.page_table_end_addr = pa_config->gart_config.page_table_end_addr >> 12;
|
||||
phys_config.page_table_base_addr = pa_config->gart_config.page_table_base_addr;
|
||||
|
||||
phys_config.depth = 0;
|
||||
phys_config.block_size = 0;
|
||||
// Init VMID 0 based on PA config
|
||||
dcn20_vmid_setup(&hubbub1->vmid[0], &phys_config);
|
||||
}
|
||||
|
|
|
@ -1153,8 +1153,8 @@ void dcn20_enable_plane(
|
|||
|
||||
apt.sys_default.quad_part = 0;
|
||||
|
||||
apt.sys_high.quad_part = dc->vm_pa_config.system_aperture.start_addr;
|
||||
apt.sys_low.quad_part = dc->vm_pa_config.system_aperture.end_addr;
|
||||
apt.sys_low.quad_part = dc->vm_pa_config.system_aperture.start_addr;
|
||||
apt.sys_high.quad_part = dc->vm_pa_config.system_aperture.end_addr;
|
||||
|
||||
// Program system aperture settings
|
||||
pipe_ctx->plane_res.hubp->funcs->hubp_set_vm_system_aperture_settings(pipe_ctx->plane_res.hubp, &apt);
|
||||
|
@ -1242,6 +1242,8 @@ void dcn20_pipe_control_lock_global(
|
|||
CRTC_STATE_VACTIVE);
|
||||
pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg,
|
||||
CRTC_STATE_VBLANK);
|
||||
pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg,
|
||||
CRTC_STATE_VACTIVE);
|
||||
pipe->stream_res.tg->funcs->lock_doublebuffer_disable(
|
||||
pipe->stream_res.tg);
|
||||
}
|
||||
|
@ -1263,6 +1265,17 @@ void dcn20_pipe_control_lock(
|
|||
if (pipe->plane_state != NULL)
|
||||
flip_immediate = pipe->plane_state->flip_immediate;
|
||||
|
||||
if (flip_immediate && lock) {
|
||||
while (pipe->plane_res.hubp->funcs->hubp_is_flip_pending(pipe->plane_res.hubp)) {
|
||||
udelay(1);
|
||||
}
|
||||
|
||||
if (pipe->bottom_pipe != NULL)
|
||||
while (pipe->bottom_pipe->plane_res.hubp->funcs->hubp_is_flip_pending(pipe->bottom_pipe->plane_res.hubp)) {
|
||||
udelay(1);
|
||||
}
|
||||
}
|
||||
|
||||
/* In flip immediate and pipe splitting case, we need to use GSL
|
||||
* for synchronization. Only do setup on locking and on flip type change.
|
||||
*/
|
||||
|
@ -1740,8 +1753,11 @@ static void dcn20_reset_back_end_for_pipe(
|
|||
else if (pipe_ctx->stream_res.audio) {
|
||||
dc->hwss.disable_audio_stream(pipe_ctx, FREE_ACQUIRED_RESOURCE);
|
||||
}
|
||||
|
||||
}
|
||||
#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
|
||||
else if (pipe_ctx->stream_res.dsc)
|
||||
dp_set_dsc_enable(pipe_ctx, false);
|
||||
#endif
|
||||
|
||||
/* by upper caller loop, parent pipe: pipe0, will be reset last.
|
||||
* back end share by all pipes and will be disable only when disable
|
||||
|
|
|
@ -535,7 +535,7 @@ void dcn20_timing_generator_init(struct optc *optc1)
|
|||
optc1->min_h_blank = 32;
|
||||
optc1->min_v_blank = 3;
|
||||
optc1->min_v_blank_interlace = 5;
|
||||
optc1->min_h_sync_width = 8;
|
||||
optc1->min_h_sync_width = 4;// Minimum HSYNC = 8 pixels asked By HW in the first place for no actual reason. Oculus Rift S will not light up with 8 as it's hsyncWidth is 6. Changing it to 4 to fix that issue.
|
||||
optc1->min_v_sync_width = 1;
|
||||
optc1->comb_opp_id = 0xf;
|
||||
}
|
||||
|
|
|
@ -2643,6 +2643,10 @@ static void update_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_
|
|||
|
||||
if (dc->bb_overrides.min_dcfclk_mhz > 0)
|
||||
min_dcfclk = dc->bb_overrides.min_dcfclk_mhz;
|
||||
else
|
||||
// Accounting for SOC/DCF relationship, we can go as high as
|
||||
// 506Mhz in Vmin. We need to code 507 since SMU will round down to 506.
|
||||
min_dcfclk = 507;
|
||||
|
||||
for (i = 0; i < num_states; i++) {
|
||||
int min_fclk_required_by_uclk;
|
||||
|
|
|
@ -23,6 +23,8 @@
|
|||
*
|
||||
*/
|
||||
|
||||
#include <linux/delay.h>
|
||||
|
||||
#include "dcn20_vmid.h"
|
||||
#include "reg_helper.h"
|
||||
|
||||
|
@ -36,6 +38,38 @@
|
|||
#define FN(reg_name, field_name) \
|
||||
vmid->shifts->field_name, vmid->masks->field_name
|
||||
|
||||
static void dcn20_wait_for_vmid_ready(struct dcn20_vmid *vmid)
|
||||
{
|
||||
/* According the hardware spec, we need to poll for the lowest
|
||||
* bit of PAGE_TABLE_BASE_ADDR_LO32 = 1 any time a GPUVM
|
||||
* context is updated. We can't use REG_WAIT here since we
|
||||
* don't have a seperate field to wait on.
|
||||
*
|
||||
* TODO: Confirm timeout / poll interval with hardware team
|
||||
*/
|
||||
|
||||
int max_times = 10000;
|
||||
int delay_us = 5;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < max_times; ++i) {
|
||||
uint32_t entry_lo32;
|
||||
|
||||
REG_GET(PAGE_TABLE_BASE_ADDR_LO32,
|
||||
VM_CONTEXT0_PAGE_DIRECTORY_ENTRY_LO32,
|
||||
&entry_lo32);
|
||||
|
||||
if (entry_lo32 & 0x1)
|
||||
return;
|
||||
|
||||
udelay(delay_us);
|
||||
}
|
||||
|
||||
/* VM setup timed out */
|
||||
DC_LOG_WARNING("Timeout while waiting for GPUVM context update\n");
|
||||
ASSERT(0);
|
||||
}
|
||||
|
||||
void dcn20_vmid_setup(struct dcn20_vmid *vmid, const struct dcn_vmid_page_table_config *config)
|
||||
{
|
||||
REG_SET(PAGE_TABLE_START_ADDR_HI32, 0,
|
||||
|
@ -54,6 +88,9 @@ void dcn20_vmid_setup(struct dcn20_vmid *vmid, const struct dcn_vmid_page_table_
|
|||
|
||||
REG_SET(PAGE_TABLE_BASE_ADDR_HI32, 0,
|
||||
VM_CONTEXT0_PAGE_DIRECTORY_ENTRY_HI32, (config->page_table_base_addr >> 32) & 0xFFFFFFFF);
|
||||
/* Note: per hardware spec PAGE_TABLE_BASE_ADDR_LO32 must be programmed last in sequence */
|
||||
REG_SET(PAGE_TABLE_BASE_ADDR_LO32, 0,
|
||||
VM_CONTEXT0_PAGE_DIRECTORY_ENTRY_LO32, config->page_table_base_addr & 0xFFFFFFFF);
|
||||
|
||||
dcn20_wait_for_vmid_ready(vmid);
|
||||
}
|
||||
|
|
|
@ -377,6 +377,12 @@ int drm_dsc_compute_rc_parameters(struct drm_dsc_config *vdsc_cfg)
|
|||
vdsc_cfg->rc_bits = (hrd_delay * vdsc_cfg->bits_per_pixel) / 16;
|
||||
vdsc_cfg->initial_dec_delay = hrd_delay - vdsc_cfg->initial_xmit_delay;
|
||||
|
||||
/* As per DSC spec v1.2a recommendation: */
|
||||
if (vdsc_cfg->native_420)
|
||||
vdsc_cfg->second_line_offset_adj = 512;
|
||||
else
|
||||
vdsc_cfg->second_line_offset_adj = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_dsc_compute_rc_parameters);
|
||||
|
|
|
@ -212,7 +212,7 @@ struct resource_pool {
|
|||
struct clock_source *clock_sources[MAX_CLOCK_SOURCES];
|
||||
unsigned int clk_src_count;
|
||||
|
||||
struct audio *audios[MAX_PIPES];
|
||||
struct audio *audios[MAX_AUDIOS];
|
||||
unsigned int audio_count;
|
||||
struct audio_support audio_support;
|
||||
|
||||
|
|
|
@ -61,8 +61,8 @@ enum dcn_hubbub_page_table_depth {
|
|||
};
|
||||
|
||||
enum dcn_hubbub_page_table_block_size {
|
||||
DCN_PAGE_TABLE_BLOCK_SIZE_4KB,
|
||||
DCN_PAGE_TABLE_BLOCK_SIZE_64KB
|
||||
DCN_PAGE_TABLE_BLOCK_SIZE_4KB = 0,
|
||||
DCN_PAGE_TABLE_BLOCK_SIZE_64KB = 4,
|
||||
};
|
||||
|
||||
struct dcn_hubbub_phys_addr_config {
|
||||
|
|
|
@ -34,6 +34,7 @@
|
|||
* Data types shared between different Virtual HW blocks
|
||||
******************************************************************************/
|
||||
|
||||
#define MAX_AUDIOS 7
|
||||
#define MAX_PIPES 6
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
|
||||
#define MAX_DWB_PIPES 1
|
||||
|
|
|
@ -43,7 +43,7 @@ enum dpcd_revision {
|
|||
enum dpcd_downstream_port_type {
|
||||
DOWNSTREAM_DP = 0,
|
||||
DOWNSTREAM_VGA,
|
||||
DOWNSTREAM_DVI_HDMI,
|
||||
DOWNSTREAM_DVI_HDMI_DP_PLUS_PLUS,/* DVI, HDMI, DP++ */
|
||||
DOWNSTREAM_NONDDC /* has no EDID (TV,CV) */
|
||||
};
|
||||
|
||||
|
|
|
@ -137,12 +137,37 @@ int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
|
|||
{
|
||||
int ret = 0, clk_id = 0;
|
||||
uint32_t param = 0;
|
||||
uint32_t clock_limit;
|
||||
|
||||
if (!min && !max)
|
||||
return -EINVAL;
|
||||
|
||||
if (!smu_clk_dpm_is_enabled(smu, clk_type))
|
||||
if (!smu_clk_dpm_is_enabled(smu, clk_type)) {
|
||||
switch (clk_type) {
|
||||
case SMU_MCLK:
|
||||
case SMU_UCLK:
|
||||
clock_limit = smu->smu_table.boot_values.uclk;
|
||||
break;
|
||||
case SMU_GFXCLK:
|
||||
case SMU_SCLK:
|
||||
clock_limit = smu->smu_table.boot_values.gfxclk;
|
||||
break;
|
||||
case SMU_SOCCLK:
|
||||
clock_limit = smu->smu_table.boot_values.socclk;
|
||||
break;
|
||||
default:
|
||||
clock_limit = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
/* clock in Mhz unit */
|
||||
if (min)
|
||||
*min = clock_limit / 100;
|
||||
if (max)
|
||||
*max = clock_limit / 100;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
mutex_lock(&smu->mutex);
|
||||
clk_id = smu_clk_get_index(smu, clk_type);
|
||||
|
@ -1349,13 +1374,49 @@ static int smu_enable_umd_pstate(void *handle,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int smu_default_set_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
|
||||
{
|
||||
int ret = 0;
|
||||
uint32_t sclk_mask, mclk_mask, soc_mask;
|
||||
|
||||
switch (level) {
|
||||
case AMD_DPM_FORCED_LEVEL_HIGH:
|
||||
ret = smu_force_dpm_limit_value(smu, true);
|
||||
break;
|
||||
case AMD_DPM_FORCED_LEVEL_LOW:
|
||||
ret = smu_force_dpm_limit_value(smu, false);
|
||||
break;
|
||||
case AMD_DPM_FORCED_LEVEL_AUTO:
|
||||
case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
|
||||
ret = smu_unforce_dpm_levels(smu);
|
||||
break;
|
||||
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
|
||||
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
|
||||
case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
|
||||
ret = smu_get_profiling_clk_mask(smu, level,
|
||||
&sclk_mask,
|
||||
&mclk_mask,
|
||||
&soc_mask);
|
||||
if (ret)
|
||||
return ret;
|
||||
smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask);
|
||||
smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask);
|
||||
smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask);
|
||||
break;
|
||||
case AMD_DPM_FORCED_LEVEL_MANUAL:
|
||||
case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
int smu_adjust_power_state_dynamic(struct smu_context *smu,
|
||||
enum amd_dpm_forced_level level,
|
||||
bool skip_display_settings)
|
||||
{
|
||||
int ret = 0;
|
||||
int index = 0;
|
||||
uint32_t sclk_mask, mclk_mask, soc_mask;
|
||||
long workload;
|
||||
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
|
||||
|
||||
|
@ -1386,39 +1447,10 @@ int smu_adjust_power_state_dynamic(struct smu_context *smu,
|
|||
}
|
||||
|
||||
if (smu_dpm_ctx->dpm_level != level) {
|
||||
switch (level) {
|
||||
case AMD_DPM_FORCED_LEVEL_HIGH:
|
||||
ret = smu_force_dpm_limit_value(smu, true);
|
||||
break;
|
||||
case AMD_DPM_FORCED_LEVEL_LOW:
|
||||
ret = smu_force_dpm_limit_value(smu, false);
|
||||
break;
|
||||
|
||||
case AMD_DPM_FORCED_LEVEL_AUTO:
|
||||
case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
|
||||
ret = smu_unforce_dpm_levels(smu);
|
||||
break;
|
||||
|
||||
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
|
||||
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
|
||||
case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
|
||||
ret = smu_get_profiling_clk_mask(smu, level,
|
||||
&sclk_mask,
|
||||
&mclk_mask,
|
||||
&soc_mask);
|
||||
if (ret)
|
||||
return ret;
|
||||
smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask);
|
||||
smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask);
|
||||
smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask);
|
||||
break;
|
||||
|
||||
case AMD_DPM_FORCED_LEVEL_MANUAL:
|
||||
case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
|
||||
default:
|
||||
break;
|
||||
ret = smu_asic_set_performance_level(smu, level);
|
||||
if (ret) {
|
||||
ret = smu_default_set_performance_level(smu, level);
|
||||
}
|
||||
|
||||
if (!ret)
|
||||
smu_dpm_ctx->dpm_level = level;
|
||||
}
|
||||
|
|
|
@ -613,6 +613,7 @@ struct pptable_funcs {
|
|||
int (*tables_init)(struct smu_context *smu, struct smu_table *tables);
|
||||
int (*set_thermal_fan_table)(struct smu_context *smu);
|
||||
int (*get_fan_speed_percent)(struct smu_context *smu, uint32_t *speed);
|
||||
int (*get_fan_speed_rpm)(struct smu_context *smu, uint32_t *speed);
|
||||
int (*set_watermarks_table)(struct smu_context *smu, void *watermarks,
|
||||
struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges);
|
||||
int (*get_current_clk_freq_by_table)(struct smu_context *smu,
|
||||
|
@ -621,6 +622,7 @@ struct pptable_funcs {
|
|||
int (*get_thermal_temperature_range)(struct smu_context *smu, struct smu_temperature_range *range);
|
||||
int (*get_uclk_dpm_states)(struct smu_context *smu, uint32_t *clocks_in_khz, uint32_t *num_states);
|
||||
int (*set_default_od_settings)(struct smu_context *smu, bool initialize);
|
||||
int (*set_performance_level)(struct smu_context *smu, enum amd_dpm_forced_level level);
|
||||
};
|
||||
|
||||
struct smu_funcs
|
||||
|
@ -685,7 +687,6 @@ struct smu_funcs
|
|||
int (*set_watermarks_for_clock_ranges)(struct smu_context *smu,
|
||||
struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges);
|
||||
int (*conv_power_profile_to_pplib_workload)(int power_profile);
|
||||
int (*get_current_rpm)(struct smu_context *smu, uint32_t *speed);
|
||||
uint32_t (*get_fan_control_mode)(struct smu_context *smu);
|
||||
int (*set_fan_control_mode)(struct smu_context *smu, uint32_t mode);
|
||||
int (*set_fan_speed_percent)(struct smu_context *smu, uint32_t speed);
|
||||
|
@ -751,8 +752,6 @@ struct smu_funcs
|
|||
((smu)->funcs->init_max_sustainable_clocks ? (smu)->funcs->init_max_sustainable_clocks((smu)) : 0)
|
||||
#define smu_set_default_od_settings(smu, initialize) \
|
||||
((smu)->ppt_funcs->set_default_od_settings ? (smu)->ppt_funcs->set_default_od_settings((smu), (initialize)) : 0)
|
||||
#define smu_get_current_rpm(smu, speed) \
|
||||
((smu)->funcs->get_current_rpm ? (smu)->funcs->get_current_rpm((smu), (speed)) : 0)
|
||||
#define smu_set_fan_speed_rpm(smu, speed) \
|
||||
((smu)->funcs->set_fan_speed_rpm ? (smu)->funcs->set_fan_speed_rpm((smu), (speed)) : 0)
|
||||
#define smu_send_smc_msg(smu, msg) \
|
||||
|
@ -841,6 +840,8 @@ struct smu_funcs
|
|||
((smu)->ppt_funcs->get_fan_speed_percent ? (smu)->ppt_funcs->get_fan_speed_percent((smu), (speed)) : 0)
|
||||
#define smu_set_fan_speed_percent(smu, speed) \
|
||||
((smu)->funcs->set_fan_speed_percent ? (smu)->funcs->set_fan_speed_percent((smu), (speed)) : 0)
|
||||
#define smu_get_fan_speed_rpm(smu, speed) \
|
||||
((smu)->ppt_funcs->get_fan_speed_rpm ? (smu)->ppt_funcs->get_fan_speed_rpm((smu), (speed)) : 0)
|
||||
|
||||
#define smu_msg_get_index(smu, msg) \
|
||||
((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_msg_index? (smu)->ppt_funcs->get_smu_msg_index((smu), (msg)) : -EINVAL) : -EINVAL)
|
||||
|
@ -918,6 +919,9 @@ struct smu_funcs
|
|||
((smu)->funcs->baco_get_state? (smu)->funcs->baco_get_state((smu), (state)) : 0)
|
||||
#define smu_baco_reset(smu) \
|
||||
((smu)->funcs->baco_reset? (smu)->funcs->baco_reset((smu)) : 0)
|
||||
#define smu_asic_set_performance_level(smu, level) \
|
||||
((smu)->ppt_funcs->set_performance_level? (smu)->ppt_funcs->set_performance_level((smu), (level)) : -EINVAL);
|
||||
|
||||
|
||||
extern int smu_get_atom_data_table(struct smu_context *smu, uint32_t table,
|
||||
uint16_t *size, uint8_t *frev, uint8_t *crev,
|
||||
|
|
|
@ -626,11 +626,26 @@ static int navi10_get_current_clk_freq_by_table(struct smu_context *smu,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static bool navi10_is_support_fine_grained_dpm(struct smu_context *smu, enum smu_clk_type clk_type)
|
||||
{
|
||||
PPTable_t *pptable = smu->smu_table.driver_pptable;
|
||||
DpmDescriptor_t *dpm_desc = NULL;
|
||||
uint32_t clk_index = 0;
|
||||
|
||||
clk_index = smu_clk_get_index(smu, clk_type);
|
||||
dpm_desc = &pptable->DpmDescriptor[clk_index];
|
||||
|
||||
/* 0 - Fine grained DPM, 1 - Discrete DPM */
|
||||
return dpm_desc->SnapToDiscrete == 0 ? true : false;
|
||||
}
|
||||
|
||||
static int navi10_print_clk_levels(struct smu_context *smu,
|
||||
enum smu_clk_type clk_type, char *buf)
|
||||
{
|
||||
int i, size = 0, ret = 0;
|
||||
uint32_t cur_value = 0, value = 0, count = 0;
|
||||
uint32_t freq_values[3] = {0};
|
||||
uint32_t mark_index = 0;
|
||||
|
||||
switch (clk_type) {
|
||||
case SMU_GFXCLK:
|
||||
|
@ -643,22 +658,42 @@ static int navi10_print_clk_levels(struct smu_context *smu,
|
|||
ret = smu_get_current_clk_freq(smu, clk_type, &cur_value);
|
||||
if (ret)
|
||||
return size;
|
||||
|
||||
/* 10KHz -> MHz */
|
||||
cur_value = cur_value / 100;
|
||||
|
||||
size += sprintf(buf, "current clk: %uMhz\n", cur_value);
|
||||
|
||||
ret = smu_get_dpm_level_count(smu, clk_type, &count);
|
||||
if (ret)
|
||||
return size;
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
ret = smu_get_dpm_freq_by_index(smu, clk_type, i, &value);
|
||||
if (!navi10_is_support_fine_grained_dpm(smu, clk_type)) {
|
||||
for (i = 0; i < count; i++) {
|
||||
ret = smu_get_dpm_freq_by_index(smu, clk_type, i, &value);
|
||||
if (ret)
|
||||
return size;
|
||||
|
||||
size += sprintf(buf + size, "%d: %uMhz %s\n", i, value,
|
||||
cur_value == value ? "*" : "");
|
||||
}
|
||||
} else {
|
||||
ret = smu_get_dpm_freq_by_index(smu, clk_type, 0, &freq_values[0]);
|
||||
if (ret)
|
||||
return size;
|
||||
ret = smu_get_dpm_freq_by_index(smu, clk_type, count - 1, &freq_values[2]);
|
||||
if (ret)
|
||||
return size;
|
||||
|
||||
size += sprintf(buf + size, "%d: %uMhz %s\n", i, value,
|
||||
cur_value == value ? "*" : "");
|
||||
freq_values[1] = cur_value;
|
||||
mark_index = cur_value == freq_values[0] ? 0 :
|
||||
cur_value == freq_values[2] ? 2 : 1;
|
||||
if (mark_index != 1)
|
||||
freq_values[1] = (freq_values[0] + freq_values[2]) / 2;
|
||||
|
||||
for (i = 0; i < 3; i++) {
|
||||
size += sprintf(buf + size, "%d: %uMhz %s\n", i, freq_values[i],
|
||||
i == mark_index ? "*" : "");
|
||||
}
|
||||
|
||||
}
|
||||
break;
|
||||
default:
|
||||
|
@ -919,12 +954,13 @@ static bool navi10_is_dpm_running(struct smu_context *smu)
|
|||
return !!(feature_enabled & SMC_DPM_FEATURE);
|
||||
}
|
||||
|
||||
static int navi10_get_fan_speed(struct smu_context *smu, uint16_t *value)
|
||||
static int navi10_get_fan_speed_rpm(struct smu_context *smu,
|
||||
uint32_t *speed)
|
||||
{
|
||||
SmuMetrics_t metrics;
|
||||
int ret = 0;
|
||||
|
||||
if (!value)
|
||||
if (!speed)
|
||||
return -EINVAL;
|
||||
|
||||
memset(&metrics, 0, sizeof(metrics));
|
||||
|
@ -934,7 +970,7 @@ static int navi10_get_fan_speed(struct smu_context *smu, uint16_t *value)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
*value = metrics.CurrFanSpeed;
|
||||
*speed = metrics.CurrFanSpeed;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -944,10 +980,10 @@ static int navi10_get_fan_speed_percent(struct smu_context *smu,
|
|||
{
|
||||
int ret = 0;
|
||||
uint32_t percent = 0;
|
||||
uint16_t current_rpm;
|
||||
uint32_t current_rpm;
|
||||
PPTable_t *pptable = smu->smu_table.driver_pptable;
|
||||
|
||||
ret = navi10_get_fan_speed(smu, ¤t_rpm);
|
||||
ret = navi10_get_fan_speed_rpm(smu, ¤t_rpm);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -1530,6 +1566,60 @@ static int navi10_set_ppfeature_status(struct smu_context *smu,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int navi10_set_peak_clock_by_device(struct smu_context *smu)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
int ret = 0;
|
||||
uint32_t sclk_freq = 0, uclk_freq = 0;
|
||||
uint32_t uclk_level = 0;
|
||||
|
||||
switch (adev->rev_id) {
|
||||
case 0xf0: /* XTX */
|
||||
case 0xc0:
|
||||
sclk_freq = NAVI10_PEAK_SCLK_XTX;
|
||||
break;
|
||||
case 0xf1: /* XT */
|
||||
case 0xc1:
|
||||
sclk_freq = NAVI10_PEAK_SCLK_XT;
|
||||
break;
|
||||
default: /* XL */
|
||||
sclk_freq = NAVI10_PEAK_SCLK_XL;
|
||||
break;
|
||||
}
|
||||
|
||||
ret = smu_get_dpm_level_count(smu, SMU_UCLK, &uclk_level);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = smu_get_dpm_freq_by_index(smu, SMU_UCLK, uclk_level - 1, &uclk_freq);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = smu_set_soft_freq_range(smu, SMU_SCLK, sclk_freq, sclk_freq);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = smu_set_soft_freq_range(smu, SMU_UCLK, uclk_freq, uclk_freq);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int navi10_set_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
switch (level) {
|
||||
case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
|
||||
ret = navi10_set_peak_clock_by_device(smu);
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct pptable_funcs navi10_ppt_funcs = {
|
||||
.tables_init = navi10_tables_init,
|
||||
.alloc_dpm_context = navi10_allocate_dpm_context,
|
||||
|
@ -1557,6 +1647,7 @@ static const struct pptable_funcs navi10_ppt_funcs = {
|
|||
.unforce_dpm_levels = navi10_unforce_dpm_levels,
|
||||
.is_dpm_running = navi10_is_dpm_running,
|
||||
.get_fan_speed_percent = navi10_get_fan_speed_percent,
|
||||
.get_fan_speed_rpm = navi10_get_fan_speed_rpm,
|
||||
.get_power_profile_mode = navi10_get_power_profile_mode,
|
||||
.set_power_profile_mode = navi10_set_power_profile_mode,
|
||||
.get_profiling_clk_mask = navi10_get_profiling_clk_mask,
|
||||
|
@ -1565,6 +1656,7 @@ static const struct pptable_funcs navi10_ppt_funcs = {
|
|||
.get_uclk_dpm_states = navi10_get_uclk_dpm_states,
|
||||
.get_ppfeature_status = navi10_get_ppfeature_status,
|
||||
.set_ppfeature_status = navi10_set_ppfeature_status,
|
||||
.set_performance_level = navi10_set_performance_level,
|
||||
};
|
||||
|
||||
void navi10_set_ppt_funcs(struct smu_context *smu)
|
||||
|
|
|
@ -23,6 +23,10 @@
|
|||
#ifndef __NAVI10_PPT_H__
|
||||
#define __NAVI10_PPT_H__
|
||||
|
||||
#define NAVI10_PEAK_SCLK_XTX (1830)
|
||||
#define NAVI10_PEAK_SCLK_XT (1755)
|
||||
#define NAVI10_PEAK_SCLK_XL (1625)
|
||||
|
||||
extern void navi10_set_ppt_funcs(struct smu_context *smu);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -1371,23 +1371,6 @@ static int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int smu_v11_0_get_current_rpm(struct smu_context *smu,
|
||||
uint32_t *current_rpm)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = smu_send_smc_msg(smu, SMU_MSG_GetCurrentRpm);
|
||||
|
||||
if (ret) {
|
||||
pr_err("Attempt to get current RPM from SMC Failed!\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
smu_read_smc_arg(smu, current_rpm);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static uint32_t
|
||||
smu_v11_0_get_fan_control_mode(struct smu_context *smu)
|
||||
{
|
||||
|
@ -1773,7 +1756,6 @@ static const struct smu_funcs smu_v11_0_funcs = {
|
|||
.set_deep_sleep_dcefclk = smu_v11_0_set_deep_sleep_dcefclk,
|
||||
.display_clock_voltage_request = smu_v11_0_display_clock_voltage_request,
|
||||
.set_watermarks_for_clock_ranges = smu_v11_0_set_watermarks_for_clock_ranges,
|
||||
.get_current_rpm = smu_v11_0_get_current_rpm,
|
||||
.get_fan_control_mode = smu_v11_0_get_fan_control_mode,
|
||||
.set_fan_control_mode = smu_v11_0_set_fan_control_mode,
|
||||
.set_fan_speed_percent = smu_v11_0_set_fan_speed_percent,
|
||||
|
|
|
@ -3015,6 +3015,23 @@ static int vega20_set_thermal_fan_table(struct smu_context *smu)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int vega20_get_fan_speed_rpm(struct smu_context *smu,
|
||||
uint32_t *speed)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = smu_send_smc_msg(smu, SMU_MSG_GetCurrentRpm);
|
||||
|
||||
if (ret) {
|
||||
pr_err("Attempt to get current RPM from SMC Failed!\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
smu_read_smc_arg(smu, speed);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vega20_get_fan_speed_percent(struct smu_context *smu,
|
||||
uint32_t *speed)
|
||||
{
|
||||
|
@ -3022,7 +3039,7 @@ static int vega20_get_fan_speed_percent(struct smu_context *smu,
|
|||
uint32_t current_rpm = 0, percent = 0;
|
||||
PPTable_t *pptable = smu->smu_table.driver_pptable;
|
||||
|
||||
ret = smu_get_current_rpm(smu, ¤t_rpm);
|
||||
ret = vega20_get_fan_speed_rpm(smu, ¤t_rpm);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -3293,6 +3310,7 @@ static const struct pptable_funcs vega20_ppt_funcs = {
|
|||
.is_dpm_running = vega20_is_dpm_running,
|
||||
.set_thermal_fan_table = vega20_set_thermal_fan_table,
|
||||
.get_fan_speed_percent = vega20_get_fan_speed_percent,
|
||||
.get_fan_speed_rpm = vega20_get_fan_speed_rpm,
|
||||
.set_watermarks_table = vega20_set_watermarks_table,
|
||||
.get_thermal_temperature_range = vega20_get_thermal_temperature_range
|
||||
};
|
||||
|
|
|
@ -285,9 +285,13 @@ static int ttm_set_pages_caching(struct dma_pool *pool,
|
|||
|
||||
static void __ttm_dma_free_page(struct dma_pool *pool, struct dma_page *d_page)
|
||||
{
|
||||
unsigned long attrs = 0;
|
||||
dma_addr_t dma = d_page->dma;
|
||||
d_page->vaddr &= ~VADDR_FLAG_HUGE_POOL;
|
||||
dma_free_coherent(pool->dev, pool->size, (void *)d_page->vaddr, dma);
|
||||
if (pool->type & IS_HUGE)
|
||||
attrs = DMA_ATTR_NO_WARN;
|
||||
|
||||
dma_free_attrs(pool->dev, pool->size, (void *)d_page->vaddr, dma, attrs);
|
||||
|
||||
kfree(d_page);
|
||||
d_page = NULL;
|
||||
|
|
Loading…
Reference in New Issue