Merge branch 'drm-fixes-4.9' of git://people.freedesktop.org/~agd5f/linux into drm-fixes
Two sets of amdgpu fixes as I missed one set. * 'drm-fixes-4.9' of git://people.freedesktop.org/~agd5f/linux: (23 commits) drm/amd/powerplay: fix bug get wrong evv voltage of Polaris. drm/amdgpu/si_dpm: workaround for SI kickers drm/radeon/si_dpm: workaround for SI kickers drm/amdgpu: fix s3 resume back, uvd dpm randomly can't disable. drm/radeon: drop register readback in cayman_cp_int_cntl_setup drm/amdgpu/vce3: only enable 3 rings on new enough firmware (v2) drm/amdgpu: fix fence slab teardown drm/amdgpu: update kernel-doc for some functions drm/amdgpu: fix a vm_flush fence leak drm/amdgpu: fix sched fence slab teardown Revert "drm/radeon: fix DP link training issue with second 4K monitor" drm/amdgpu/dpm: flush any thermal work on fini drm/amdgpu: cancel reset work on fini drm/amd/powerplay: don't give up if DPM is already running drm/amd/powerplay: fix static checker warning in process_pptables_v1_0.c drm/amdgpu: avoid drm error log during S3 on RHEL7.3 drm/amdgpu: explicitly set pg_flags for ST drm/amdgpu/st: move ATC CG golden init from gfx to mc drm/amd/amdgpu: expose max engine and memory clock for powerplay enabled case drm/amdgpu: move atom scratch register save/restore to common code ...
This commit is contained in:
commit
1cfa126c52
|
@ -519,7 +519,8 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
|
|||
r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
|
||||
&duplicates);
|
||||
if (unlikely(r != 0)) {
|
||||
DRM_ERROR("ttm_eu_reserve_buffers failed.\n");
|
||||
if (r != -ERESTARTSYS)
|
||||
DRM_ERROR("ttm_eu_reserve_buffers failed.\n");
|
||||
goto error_free_pages;
|
||||
}
|
||||
|
||||
|
|
|
@ -1959,6 +1959,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
|
|||
/* evict remaining vram memory */
|
||||
amdgpu_bo_evict_vram(adev);
|
||||
|
||||
amdgpu_atombios_scratch_regs_save(adev);
|
||||
pci_save_state(dev->pdev);
|
||||
if (suspend) {
|
||||
/* Shut down the device */
|
||||
|
@ -2010,6 +2011,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
|
|||
return r;
|
||||
}
|
||||
}
|
||||
amdgpu_atombios_scratch_regs_restore(adev);
|
||||
|
||||
/* post card */
|
||||
if (!amdgpu_card_posted(adev) || !resume) {
|
||||
|
@ -2268,8 +2270,6 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
|
|||
}
|
||||
|
||||
if (need_full_reset) {
|
||||
/* save scratch */
|
||||
amdgpu_atombios_scratch_regs_save(adev);
|
||||
r = amdgpu_suspend(adev);
|
||||
|
||||
retry:
|
||||
|
@ -2279,8 +2279,9 @@ retry:
|
|||
amdgpu_display_stop_mc_access(adev, &save);
|
||||
amdgpu_wait_for_idle(adev, AMD_IP_BLOCK_TYPE_GMC);
|
||||
}
|
||||
|
||||
amdgpu_atombios_scratch_regs_save(adev);
|
||||
r = amdgpu_asic_reset(adev);
|
||||
amdgpu_atombios_scratch_regs_restore(adev);
|
||||
/* post card */
|
||||
amdgpu_atom_asic_init(adev->mode_info.atom_context);
|
||||
|
||||
|
@ -2288,8 +2289,6 @@ retry:
|
|||
dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
|
||||
r = amdgpu_resume(adev);
|
||||
}
|
||||
/* restore scratch */
|
||||
amdgpu_atombios_scratch_regs_restore(adev);
|
||||
}
|
||||
if (!r) {
|
||||
amdgpu_irq_gpu_reset_resume_helper(adev);
|
||||
|
|
|
@ -68,6 +68,7 @@ int amdgpu_fence_slab_init(void)
|
|||
|
||||
void amdgpu_fence_slab_fini(void)
|
||||
{
|
||||
rcu_barrier();
|
||||
kmem_cache_destroy(amdgpu_fence_slab);
|
||||
}
|
||||
/*
|
||||
|
|
|
@ -239,6 +239,7 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
|
|||
if (r) {
|
||||
adev->irq.installed = false;
|
||||
flush_work(&adev->hotplug_work);
|
||||
cancel_work_sync(&adev->reset_work);
|
||||
return r;
|
||||
}
|
||||
|
||||
|
@ -264,6 +265,7 @@ void amdgpu_irq_fini(struct amdgpu_device *adev)
|
|||
if (adev->irq.msi_enabled)
|
||||
pci_disable_msi(adev->pdev);
|
||||
flush_work(&adev->hotplug_work);
|
||||
cancel_work_sync(&adev->reset_work);
|
||||
}
|
||||
|
||||
for (i = 0; i < AMDGPU_MAX_IRQ_SRC_ID; ++i) {
|
||||
|
|
|
@ -459,10 +459,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
|
|||
/* return all clocks in KHz */
|
||||
dev_info.gpu_counter_freq = amdgpu_asic_get_xclk(adev) * 10;
|
||||
if (adev->pm.dpm_enabled) {
|
||||
dev_info.max_engine_clock =
|
||||
adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk * 10;
|
||||
dev_info.max_memory_clock =
|
||||
adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk * 10;
|
||||
dev_info.max_engine_clock = amdgpu_dpm_get_sclk(adev, false) * 10;
|
||||
dev_info.max_memory_clock = amdgpu_dpm_get_mclk(adev, false) * 10;
|
||||
} else {
|
||||
dev_info.max_engine_clock = adev->pm.default_sclk * 10;
|
||||
dev_info.max_memory_clock = adev->pm.default_mclk * 10;
|
||||
|
|
|
@ -1758,5 +1758,6 @@ void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
|
|||
fence_put(adev->vm_manager.ids[i].first);
|
||||
amdgpu_sync_free(&adev->vm_manager.ids[i].active);
|
||||
fence_put(id->flushed_updates);
|
||||
fence_put(id->last_flush);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -4075,7 +4075,7 @@ static int ci_enable_uvd_dpm(struct amdgpu_device *adev, bool enable)
|
|||
pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
|
||||
}
|
||||
} else {
|
||||
if (pi->last_mclk_dpm_enable_mask & 0x1) {
|
||||
if (pi->uvd_enabled) {
|
||||
pi->uvd_enabled = false;
|
||||
pi->dpm_level_enable_mask.mclk_dpm_enable_mask |= 1;
|
||||
amdgpu_ci_send_msg_to_smc_with_parameter(adev,
|
||||
|
@ -6236,6 +6236,8 @@ static int ci_dpm_sw_fini(void *handle)
|
|||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
flush_work(&adev->pm.dpm.thermal.work);
|
||||
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
amdgpu_pm_sysfs_fini(adev);
|
||||
ci_dpm_fini(adev);
|
||||
|
|
|
@ -3151,10 +3151,6 @@ static int dce_v10_0_hw_fini(void *handle)
|
|||
|
||||
static int dce_v10_0_suspend(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
amdgpu_atombios_scratch_regs_save(adev);
|
||||
|
||||
return dce_v10_0_hw_fini(handle);
|
||||
}
|
||||
|
||||
|
@ -3165,8 +3161,6 @@ static int dce_v10_0_resume(void *handle)
|
|||
|
||||
ret = dce_v10_0_hw_init(handle);
|
||||
|
||||
amdgpu_atombios_scratch_regs_restore(adev);
|
||||
|
||||
/* turn on the BL */
|
||||
if (adev->mode_info.bl_encoder) {
|
||||
u8 bl_level = amdgpu_display_backlight_get_level(adev,
|
||||
|
|
|
@ -3215,10 +3215,6 @@ static int dce_v11_0_hw_fini(void *handle)
|
|||
|
||||
static int dce_v11_0_suspend(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
amdgpu_atombios_scratch_regs_save(adev);
|
||||
|
||||
return dce_v11_0_hw_fini(handle);
|
||||
}
|
||||
|
||||
|
@ -3229,8 +3225,6 @@ static int dce_v11_0_resume(void *handle)
|
|||
|
||||
ret = dce_v11_0_hw_init(handle);
|
||||
|
||||
amdgpu_atombios_scratch_regs_restore(adev);
|
||||
|
||||
/* turn on the BL */
|
||||
if (adev->mode_info.bl_encoder) {
|
||||
u8 bl_level = amdgpu_display_backlight_get_level(adev,
|
||||
|
|
|
@ -2482,10 +2482,6 @@ static int dce_v6_0_hw_fini(void *handle)
|
|||
|
||||
static int dce_v6_0_suspend(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
amdgpu_atombios_scratch_regs_save(adev);
|
||||
|
||||
return dce_v6_0_hw_fini(handle);
|
||||
}
|
||||
|
||||
|
@ -2496,8 +2492,6 @@ static int dce_v6_0_resume(void *handle)
|
|||
|
||||
ret = dce_v6_0_hw_init(handle);
|
||||
|
||||
amdgpu_atombios_scratch_regs_restore(adev);
|
||||
|
||||
/* turn on the BL */
|
||||
if (adev->mode_info.bl_encoder) {
|
||||
u8 bl_level = amdgpu_display_backlight_get_level(adev,
|
||||
|
|
|
@ -3033,10 +3033,6 @@ static int dce_v8_0_hw_fini(void *handle)
|
|||
|
||||
static int dce_v8_0_suspend(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
amdgpu_atombios_scratch_regs_save(adev);
|
||||
|
||||
return dce_v8_0_hw_fini(handle);
|
||||
}
|
||||
|
||||
|
@ -3047,8 +3043,6 @@ static int dce_v8_0_resume(void *handle)
|
|||
|
||||
ret = dce_v8_0_hw_init(handle);
|
||||
|
||||
amdgpu_atombios_scratch_regs_restore(adev);
|
||||
|
||||
/* turn on the BL */
|
||||
if (adev->mode_info.bl_encoder) {
|
||||
u8 bl_level = amdgpu_display_backlight_get_level(adev,
|
||||
|
|
|
@ -640,7 +640,6 @@ static const u32 stoney_mgcg_cgcg_init[] =
|
|||
mmCP_MEM_SLP_CNTL, 0xffffffff, 0x00020201,
|
||||
mmRLC_MEM_SLP_CNTL, 0xffffffff, 0x00020201,
|
||||
mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96940200,
|
||||
mmATC_MISC_CG, 0xffffffff, 0x000c0200,
|
||||
};
|
||||
|
||||
static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev);
|
||||
|
|
|
@ -100,6 +100,7 @@ static const u32 cz_mgcg_cgcg_init[] =
|
|||
|
||||
static const u32 stoney_mgcg_cgcg_init[] =
|
||||
{
|
||||
mmATC_MISC_CG, 0xffffffff, 0x000c0200,
|
||||
mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
|
||||
};
|
||||
|
||||
|
|
|
@ -3063,6 +3063,8 @@ static int kv_dpm_sw_fini(void *handle)
|
|||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
flush_work(&adev->pm.dpm.thermal.work);
|
||||
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
amdgpu_pm_sysfs_fini(adev);
|
||||
kv_dpm_fini(adev);
|
||||
|
|
|
@ -3477,6 +3477,49 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
|
|||
int i;
|
||||
struct si_dpm_quirk *p = si_dpm_quirk_list;
|
||||
|
||||
/* limit all SI kickers */
|
||||
if (adev->asic_type == CHIP_PITCAIRN) {
|
||||
if ((adev->pdev->revision == 0x81) ||
|
||||
(adev->pdev->device == 0x6810) ||
|
||||
(adev->pdev->device == 0x6811) ||
|
||||
(adev->pdev->device == 0x6816) ||
|
||||
(adev->pdev->device == 0x6817) ||
|
||||
(adev->pdev->device == 0x6806))
|
||||
max_mclk = 120000;
|
||||
} else if (adev->asic_type == CHIP_VERDE) {
|
||||
if ((adev->pdev->revision == 0x81) ||
|
||||
(adev->pdev->revision == 0x83) ||
|
||||
(adev->pdev->revision == 0x87) ||
|
||||
(adev->pdev->device == 0x6820) ||
|
||||
(adev->pdev->device == 0x6821) ||
|
||||
(adev->pdev->device == 0x6822) ||
|
||||
(adev->pdev->device == 0x6823) ||
|
||||
(adev->pdev->device == 0x682A) ||
|
||||
(adev->pdev->device == 0x682B)) {
|
||||
max_sclk = 75000;
|
||||
max_mclk = 80000;
|
||||
}
|
||||
} else if (adev->asic_type == CHIP_OLAND) {
|
||||
if ((adev->pdev->revision == 0xC7) ||
|
||||
(adev->pdev->revision == 0x80) ||
|
||||
(adev->pdev->revision == 0x81) ||
|
||||
(adev->pdev->revision == 0x83) ||
|
||||
(adev->pdev->device == 0x6604) ||
|
||||
(adev->pdev->device == 0x6605)) {
|
||||
max_sclk = 75000;
|
||||
max_mclk = 80000;
|
||||
}
|
||||
} else if (adev->asic_type == CHIP_HAINAN) {
|
||||
if ((adev->pdev->revision == 0x81) ||
|
||||
(adev->pdev->revision == 0x83) ||
|
||||
(adev->pdev->revision == 0xC3) ||
|
||||
(adev->pdev->device == 0x6664) ||
|
||||
(adev->pdev->device == 0x6665) ||
|
||||
(adev->pdev->device == 0x6667)) {
|
||||
max_sclk = 75000;
|
||||
max_mclk = 80000;
|
||||
}
|
||||
}
|
||||
/* Apply dpm quirks */
|
||||
while (p && p->chip_device != 0) {
|
||||
if (adev->pdev->vendor == p->chip_vendor &&
|
||||
|
@ -3489,22 +3532,6 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
|
|||
}
|
||||
++p;
|
||||
}
|
||||
/* limit mclk on all R7 370 parts for stability */
|
||||
if (adev->pdev->device == 0x6811 &&
|
||||
adev->pdev->revision == 0x81)
|
||||
max_mclk = 120000;
|
||||
/* limit sclk/mclk on Jet parts for stability */
|
||||
if (adev->pdev->device == 0x6665 &&
|
||||
adev->pdev->revision == 0xc3) {
|
||||
max_sclk = 75000;
|
||||
max_mclk = 80000;
|
||||
}
|
||||
/* Limit clocks for some HD8600 parts */
|
||||
if (adev->pdev->device == 0x6660 &&
|
||||
adev->pdev->revision == 0x83) {
|
||||
max_sclk = 75000;
|
||||
max_mclk = 80000;
|
||||
}
|
||||
|
||||
if (rps->vce_active) {
|
||||
rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk;
|
||||
|
@ -7777,6 +7804,8 @@ static int si_dpm_sw_fini(void *handle)
|
|||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
flush_work(&adev->pm.dpm.thermal.work);
|
||||
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
amdgpu_pm_sysfs_fini(adev);
|
||||
si_dpm_fini(adev);
|
||||
|
|
|
@ -52,6 +52,8 @@
|
|||
#define VCE_V3_0_STACK_SIZE (64 * 1024)
|
||||
#define VCE_V3_0_DATA_SIZE ((16 * 1024 * AMDGPU_MAX_VCE_HANDLES) + (52 * 1024))
|
||||
|
||||
#define FW_52_8_3 ((52 << 24) | (8 << 16) | (3 << 8))
|
||||
|
||||
static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx);
|
||||
static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev);
|
||||
static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev);
|
||||
|
@ -382,6 +384,10 @@ static int vce_v3_0_sw_init(void *handle)
|
|||
if (r)
|
||||
return r;
|
||||
|
||||
/* 52.8.3 required for 3 ring support */
|
||||
if (adev->vce.fw_version < FW_52_8_3)
|
||||
adev->vce.num_rings = 2;
|
||||
|
||||
r = amdgpu_vce_resume(adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
|
|
@ -1651,7 +1651,7 @@ static int vi_common_early_init(void *handle)
|
|||
AMD_CG_SUPPORT_SDMA_MGCG |
|
||||
AMD_CG_SUPPORT_SDMA_LS |
|
||||
AMD_CG_SUPPORT_VCE_MGCG;
|
||||
adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
|
||||
adev->pg_flags = AMD_PG_SUPPORT_GFX_PG |
|
||||
AMD_PG_SUPPORT_GFX_SMG |
|
||||
AMD_PG_SUPPORT_GFX_PIPELINE |
|
||||
AMD_PG_SUPPORT_UVD |
|
||||
|
|
|
@ -716,7 +716,7 @@ int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
|
|||
*voltage = 1150;
|
||||
} else {
|
||||
ret = atomctrl_get_voltage_evv_on_sclk_ai(hwmgr, voltage_type, sclk, id, &vol);
|
||||
*voltage = (uint16_t)vol/100;
|
||||
*voltage = (uint16_t)(vol/100);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -1320,7 +1320,8 @@ int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_
|
|||
if (0 != result)
|
||||
return result;
|
||||
|
||||
*voltage = le32_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_3 *)(&get_voltage_info_param_space))->ulVoltageLevel);
|
||||
*voltage = le32_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_3 *)
|
||||
(&get_voltage_info_param_space))->ulVoltageLevel);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
|
|
@ -1201,12 +1201,15 @@ static uint32_t make_classification_flags(struct pp_hwmgr *hwmgr,
|
|||
static int ppt_get_num_of_vce_state_table_entries_v1_0(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
const ATOM_Tonga_POWERPLAYTABLE *pp_table = get_powerplay_table(hwmgr);
|
||||
const ATOM_Tonga_VCE_State_Table *vce_state_table =
|
||||
(ATOM_Tonga_VCE_State_Table *)(((unsigned long)pp_table) + le16_to_cpu(pp_table->usVCEStateTableOffset));
|
||||
const ATOM_Tonga_VCE_State_Table *vce_state_table;
|
||||
|
||||
if (vce_state_table == NULL)
|
||||
|
||||
if (pp_table == NULL)
|
||||
return 0;
|
||||
|
||||
vce_state_table = (void *)pp_table +
|
||||
le16_to_cpu(pp_table->usVCEStateTableOffset);
|
||||
|
||||
return vce_state_table->ucNumEntries;
|
||||
}
|
||||
|
||||
|
|
|
@ -1168,8 +1168,8 @@ int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
|
|||
|
||||
tmp_result = (!smum_is_dpm_running(hwmgr)) ? 0 : -1;
|
||||
PP_ASSERT_WITH_CODE(tmp_result == 0,
|
||||
"DPM is already running right now, no need to enable DPM!",
|
||||
return 0);
|
||||
"DPM is already running",
|
||||
);
|
||||
|
||||
if (smu7_voltage_control(hwmgr)) {
|
||||
tmp_result = smu7_enable_voltage_control(hwmgr);
|
||||
|
@ -2127,15 +2127,18 @@ static int smu7_patch_acp_vddc(struct pp_hwmgr *hwmgr,
|
|||
}
|
||||
|
||||
static int smu7_patch_limits_vddc(struct pp_hwmgr *hwmgr,
|
||||
struct phm_clock_and_voltage_limits *tab)
|
||||
struct phm_clock_and_voltage_limits *tab)
|
||||
{
|
||||
uint32_t vddc, vddci;
|
||||
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
|
||||
|
||||
if (tab) {
|
||||
smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, (uint32_t *)&tab->vddc,
|
||||
&data->vddc_leakage);
|
||||
smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, (uint32_t *)&tab->vddci,
|
||||
&data->vddci_leakage);
|
||||
smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddc,
|
||||
&data->vddc_leakage);
|
||||
tab->vddc = vddc;
|
||||
smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddci,
|
||||
&data->vddci_leakage);
|
||||
tab->vddci = vddci;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -645,6 +645,7 @@ void amd_sched_fini(struct amd_gpu_scheduler *sched)
|
|||
{
|
||||
if (sched->thread)
|
||||
kthread_stop(sched->thread);
|
||||
rcu_barrier();
|
||||
if (atomic_dec_and_test(&sched_fence_slab_ref))
|
||||
kmem_cache_destroy(sched_fence_slab);
|
||||
}
|
||||
|
|
|
@ -103,7 +103,7 @@ static void amd_sched_fence_free(struct rcu_head *rcu)
|
|||
}
|
||||
|
||||
/**
|
||||
* amd_sched_fence_release - callback that fence can be freed
|
||||
* amd_sched_fence_release_scheduled - callback that fence can be freed
|
||||
*
|
||||
* @fence: fence
|
||||
*
|
||||
|
@ -118,7 +118,7 @@ static void amd_sched_fence_release_scheduled(struct fence *f)
|
|||
}
|
||||
|
||||
/**
|
||||
* amd_sched_fence_release_scheduled - drop extra reference
|
||||
* amd_sched_fence_release_finished - drop extra reference
|
||||
*
|
||||
* @f: fence
|
||||
*
|
||||
|
|
|
@ -1396,9 +1396,7 @@ static void cayman_pcie_gart_fini(struct radeon_device *rdev)
|
|||
void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
|
||||
int ring, u32 cp_int_cntl)
|
||||
{
|
||||
u32 srbm_gfx_cntl = RREG32(SRBM_GFX_CNTL) & ~3;
|
||||
|
||||
WREG32(SRBM_GFX_CNTL, srbm_gfx_cntl | (ring & 3));
|
||||
WREG32(SRBM_GFX_CNTL, RINGID(ring));
|
||||
WREG32(CP_INT_CNTL, cp_int_cntl);
|
||||
}
|
||||
|
||||
|
|
|
@ -105,7 +105,7 @@ radeon_dp_aux_transfer_native(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg
|
|||
|
||||
tmp &= AUX_HPD_SEL(0x7);
|
||||
tmp |= AUX_HPD_SEL(chan->rec.hpd);
|
||||
tmp |= AUX_EN | AUX_LS_READ_EN | AUX_HPD_DISCON(0x1);
|
||||
tmp |= AUX_EN | AUX_LS_READ_EN;
|
||||
|
||||
WREG32(AUX_CONTROL + aux_offset[instance], tmp);
|
||||
|
||||
|
|
|
@ -2999,6 +2999,49 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
|
|||
int i;
|
||||
struct si_dpm_quirk *p = si_dpm_quirk_list;
|
||||
|
||||
/* limit all SI kickers */
|
||||
if (rdev->family == CHIP_PITCAIRN) {
|
||||
if ((rdev->pdev->revision == 0x81) ||
|
||||
(rdev->pdev->device == 0x6810) ||
|
||||
(rdev->pdev->device == 0x6811) ||
|
||||
(rdev->pdev->device == 0x6816) ||
|
||||
(rdev->pdev->device == 0x6817) ||
|
||||
(rdev->pdev->device == 0x6806))
|
||||
max_mclk = 120000;
|
||||
} else if (rdev->family == CHIP_VERDE) {
|
||||
if ((rdev->pdev->revision == 0x81) ||
|
||||
(rdev->pdev->revision == 0x83) ||
|
||||
(rdev->pdev->revision == 0x87) ||
|
||||
(rdev->pdev->device == 0x6820) ||
|
||||
(rdev->pdev->device == 0x6821) ||
|
||||
(rdev->pdev->device == 0x6822) ||
|
||||
(rdev->pdev->device == 0x6823) ||
|
||||
(rdev->pdev->device == 0x682A) ||
|
||||
(rdev->pdev->device == 0x682B)) {
|
||||
max_sclk = 75000;
|
||||
max_mclk = 80000;
|
||||
}
|
||||
} else if (rdev->family == CHIP_OLAND) {
|
||||
if ((rdev->pdev->revision == 0xC7) ||
|
||||
(rdev->pdev->revision == 0x80) ||
|
||||
(rdev->pdev->revision == 0x81) ||
|
||||
(rdev->pdev->revision == 0x83) ||
|
||||
(rdev->pdev->device == 0x6604) ||
|
||||
(rdev->pdev->device == 0x6605)) {
|
||||
max_sclk = 75000;
|
||||
max_mclk = 80000;
|
||||
}
|
||||
} else if (rdev->family == CHIP_HAINAN) {
|
||||
if ((rdev->pdev->revision == 0x81) ||
|
||||
(rdev->pdev->revision == 0x83) ||
|
||||
(rdev->pdev->revision == 0xC3) ||
|
||||
(rdev->pdev->device == 0x6664) ||
|
||||
(rdev->pdev->device == 0x6665) ||
|
||||
(rdev->pdev->device == 0x6667)) {
|
||||
max_sclk = 75000;
|
||||
max_mclk = 80000;
|
||||
}
|
||||
}
|
||||
/* Apply dpm quirks */
|
||||
while (p && p->chip_device != 0) {
|
||||
if (rdev->pdev->vendor == p->chip_vendor &&
|
||||
|
@ -3011,16 +3054,6 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
|
|||
}
|
||||
++p;
|
||||
}
|
||||
/* limit mclk on all R7 370 parts for stability */
|
||||
if (rdev->pdev->device == 0x6811 &&
|
||||
rdev->pdev->revision == 0x81)
|
||||
max_mclk = 120000;
|
||||
/* limit sclk/mclk on Jet parts for stability */
|
||||
if (rdev->pdev->device == 0x6665 &&
|
||||
rdev->pdev->revision == 0xc3) {
|
||||
max_sclk = 75000;
|
||||
max_mclk = 80000;
|
||||
}
|
||||
|
||||
if (rps->vce_active) {
|
||||
rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk;
|
||||
|
|
Loading…
Reference in New Issue