Merge tag 'drm-fixes-5.4-2019-11-06' of git://people.freedesktop.org/~agd5f/linux into drm-fixes
drm-fixes-5.4-2019-11-06: amdgpu: - Fix navi14 display issue root cause and revert workaround - GPU reset scheduler interaction fix - Fix fan boost on multi-GPU - Gfx10 and sdma5 fixes for navi - GFXOFF fix for renoir - Add navi14 PCI ID - GPUVM fix for arcturus radeon: - Port an SI power fix from amdgpu Signed-off-by: Dave Airlie <airlied@redhat.com> From: Alex Deucher <alexdeucher@gmail.com> Link: https://patchwork.freedesktop.org/patch/msgid/20191107032241.1021217-1-alexander.deucher@amd.com
This commit is contained in:
commit
ff9234583d
|
@ -604,8 +604,11 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
|
|||
continue;
|
||||
}
|
||||
|
||||
for (i = 0; i < num_entities; i++)
|
||||
for (i = 0; i < num_entities; i++) {
|
||||
mutex_lock(&ctx->adev->lock_reset);
|
||||
drm_sched_entity_fini(&ctx->entities[0][i].entity);
|
||||
mutex_unlock(&ctx->adev->lock_reset);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -2885,6 +2885,13 @@ fence_driver_init:
|
|||
DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
|
||||
}
|
||||
|
||||
/*
|
||||
* Register gpu instance before amdgpu_device_enable_mgpu_fan_boost.
|
||||
* Otherwise the mgpu fan boost feature will be skipped due to the
|
||||
* gpu instance is counted less.
|
||||
*/
|
||||
amdgpu_register_gpu_instance(adev);
|
||||
|
||||
/* enable clockgating, etc. after ib tests, etc. since some blocks require
|
||||
* explicit gating rather than handling it automatically.
|
||||
*/
|
||||
|
|
|
@ -1016,6 +1016,7 @@ static const struct pci_device_id pciidlist[] = {
|
|||
{0x1002, 0x7340, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14|AMD_EXP_HW_SUPPORT},
|
||||
{0x1002, 0x7341, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14|AMD_EXP_HW_SUPPORT},
|
||||
{0x1002, 0x7347, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14|AMD_EXP_HW_SUPPORT},
|
||||
{0x1002, 0x734F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14|AMD_EXP_HW_SUPPORT},
|
||||
|
||||
/* Renoir */
|
||||
{0x1002, 0x1636, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU|AMD_EXP_HW_SUPPORT},
|
||||
|
|
|
@ -289,6 +289,7 @@ struct amdgpu_gfx {
|
|||
uint32_t mec2_feature_version;
|
||||
bool mec_fw_write_wait;
|
||||
bool me_fw_write_wait;
|
||||
bool cp_fw_write_wait;
|
||||
struct amdgpu_ring gfx_ring[AMDGPU_MAX_GFX_RINGS];
|
||||
unsigned num_gfx_rings;
|
||||
struct amdgpu_ring compute_ring[AMDGPU_MAX_COMPUTE_RINGS];
|
||||
|
|
|
@ -190,7 +190,6 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
|
|||
pm_runtime_put_autosuspend(dev->dev);
|
||||
}
|
||||
|
||||
amdgpu_register_gpu_instance(adev);
|
||||
out:
|
||||
if (r) {
|
||||
/* balance pm_runtime_get_sync in amdgpu_driver_unload_kms */
|
||||
|
|
|
@ -564,6 +564,32 @@ static void gfx_v10_0_free_microcode(struct amdgpu_device *adev)
|
|||
kfree(adev->gfx.rlc.register_list_format);
|
||||
}
|
||||
|
||||
static void gfx_v10_0_check_fw_write_wait(struct amdgpu_device *adev)
|
||||
{
|
||||
adev->gfx.cp_fw_write_wait = false;
|
||||
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_NAVI10:
|
||||
case CHIP_NAVI12:
|
||||
case CHIP_NAVI14:
|
||||
if ((adev->gfx.me_fw_version >= 0x00000046) &&
|
||||
(adev->gfx.me_feature_version >= 27) &&
|
||||
(adev->gfx.pfp_fw_version >= 0x00000068) &&
|
||||
(adev->gfx.pfp_feature_version >= 27) &&
|
||||
(adev->gfx.mec_fw_version >= 0x0000005b) &&
|
||||
(adev->gfx.mec_feature_version >= 27))
|
||||
adev->gfx.cp_fw_write_wait = true;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
if (adev->gfx.cp_fw_write_wait == false)
|
||||
DRM_WARN_ONCE("Warning: check cp_fw_version and update it to realize \
|
||||
GRBM requires 1-cycle delay in cp firmware\n");
|
||||
}
|
||||
|
||||
|
||||
static void gfx_v10_0_init_rlc_ext_microcode(struct amdgpu_device *adev)
|
||||
{
|
||||
const struct rlc_firmware_header_v2_1 *rlc_hdr;
|
||||
|
@ -832,6 +858,7 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
|
|||
}
|
||||
}
|
||||
|
||||
gfx_v10_0_check_fw_write_wait(adev);
|
||||
out:
|
||||
if (err) {
|
||||
dev_err(adev->dev,
|
||||
|
@ -4765,6 +4792,24 @@ static void gfx_v10_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
|
|||
gfx_v10_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
|
||||
}
|
||||
|
||||
static void gfx_v10_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
|
||||
uint32_t reg0, uint32_t reg1,
|
||||
uint32_t ref, uint32_t mask)
|
||||
{
|
||||
int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
bool fw_version_ok = false;
|
||||
|
||||
fw_version_ok = adev->gfx.cp_fw_write_wait;
|
||||
|
||||
if (fw_version_ok)
|
||||
gfx_v10_0_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1,
|
||||
ref, mask, 0x20);
|
||||
else
|
||||
amdgpu_ring_emit_reg_write_reg_wait_helper(ring, reg0, reg1,
|
||||
ref, mask);
|
||||
}
|
||||
|
||||
static void
|
||||
gfx_v10_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
|
||||
uint32_t me, uint32_t pipe,
|
||||
|
@ -5155,6 +5200,7 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = {
|
|||
.emit_tmz = gfx_v10_0_ring_emit_tmz,
|
||||
.emit_wreg = gfx_v10_0_ring_emit_wreg,
|
||||
.emit_reg_wait = gfx_v10_0_ring_emit_reg_wait,
|
||||
.emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait,
|
||||
};
|
||||
|
||||
static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = {
|
||||
|
@ -5188,6 +5234,7 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = {
|
|||
.pad_ib = amdgpu_ring_generic_pad_ib,
|
||||
.emit_wreg = gfx_v10_0_ring_emit_wreg,
|
||||
.emit_reg_wait = gfx_v10_0_ring_emit_reg_wait,
|
||||
.emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait,
|
||||
};
|
||||
|
||||
static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_kiq = {
|
||||
|
@ -5218,6 +5265,7 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_kiq = {
|
|||
.emit_rreg = gfx_v10_0_ring_emit_rreg,
|
||||
.emit_wreg = gfx_v10_0_ring_emit_wreg,
|
||||
.emit_reg_wait = gfx_v10_0_ring_emit_reg_wait,
|
||||
.emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait,
|
||||
};
|
||||
|
||||
static void gfx_v10_0_set_ring_funcs(struct amdgpu_device *adev)
|
||||
|
|
|
@ -973,6 +973,13 @@ static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev)
|
|||
adev->gfx.me_fw_write_wait = false;
|
||||
adev->gfx.mec_fw_write_wait = false;
|
||||
|
||||
if ((adev->gfx.mec_fw_version < 0x000001a5) ||
|
||||
(adev->gfx.mec_feature_version < 46) ||
|
||||
(adev->gfx.pfp_fw_version < 0x000000b7) ||
|
||||
(adev->gfx.pfp_feature_version < 46))
|
||||
DRM_WARN_ONCE("Warning: check cp_fw_version and update it to realize \
|
||||
GRBM requires 1-cycle delay in cp firmware\n");
|
||||
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_VEGA10:
|
||||
if ((adev->gfx.me_fw_version >= 0x0000009c) &&
|
||||
|
@ -1039,6 +1046,12 @@ static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
|
|||
!adev->gfx.rlc.is_rlc_v2_1))
|
||||
adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
|
||||
|
||||
if (adev->pm.pp_feature & PP_GFXOFF_MASK)
|
||||
adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
|
||||
AMD_PG_SUPPORT_CP |
|
||||
AMD_PG_SUPPORT_RLC_SMU_HS;
|
||||
break;
|
||||
case CHIP_RENOIR:
|
||||
if (adev->pm.pp_feature & PP_GFXOFF_MASK)
|
||||
adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
|
||||
AMD_PG_SUPPORT_CP |
|
||||
|
|
|
@ -344,11 +344,9 @@ static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
|
|||
amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 + (2 * vmid),
|
||||
upper_32_bits(pd_addr));
|
||||
|
||||
amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_req + eng, req);
|
||||
|
||||
/* wait for the invalidate to complete */
|
||||
amdgpu_ring_emit_reg_wait(ring, hub->vm_inv_eng0_ack + eng,
|
||||
1 << vmid, 1 << vmid);
|
||||
amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req + eng,
|
||||
hub->vm_inv_eng0_ack + eng,
|
||||
req, 1 << vmid);
|
||||
|
||||
return pd_addr;
|
||||
}
|
||||
|
|
|
@ -219,6 +219,15 @@ static void mmhub_v9_4_init_cache_regs(struct amdgpu_device *adev, int hubid)
|
|||
hubid * MMHUB_INSTANCE_REGISTER_OFFSET, tmp);
|
||||
|
||||
tmp = mmVML2PF0_VM_L2_CNTL3_DEFAULT;
|
||||
if (adev->gmc.translate_further) {
|
||||
tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL3, BANK_SELECT, 12);
|
||||
tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL3,
|
||||
L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
|
||||
} else {
|
||||
tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL3, BANK_SELECT, 9);
|
||||
tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL3,
|
||||
L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
|
||||
}
|
||||
WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2PF0_VM_L2_CNTL3,
|
||||
hubid * MMHUB_INSTANCE_REGISTER_OFFSET, tmp);
|
||||
|
||||
|
|
|
@ -1173,6 +1173,16 @@ static void sdma_v5_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
|
|||
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10));
|
||||
}
|
||||
|
||||
static void sdma_v5_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
|
||||
uint32_t reg0, uint32_t reg1,
|
||||
uint32_t ref, uint32_t mask)
|
||||
{
|
||||
amdgpu_ring_emit_wreg(ring, reg0, ref);
|
||||
/* wait for a cycle to reset vm_inv_eng*_ack */
|
||||
amdgpu_ring_emit_reg_wait(ring, reg0, 0, 0);
|
||||
amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask);
|
||||
}
|
||||
|
||||
static int sdma_v5_0_early_init(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
@ -1588,7 +1598,7 @@ static const struct amdgpu_ring_funcs sdma_v5_0_ring_funcs = {
|
|||
6 + /* sdma_v5_0_ring_emit_pipeline_sync */
|
||||
/* sdma_v5_0_ring_emit_vm_flush */
|
||||
SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
|
||||
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 +
|
||||
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 * 2 +
|
||||
10 + 10 + 10, /* sdma_v5_0_ring_emit_fence x3 for user fence, vm fence */
|
||||
.emit_ib_size = 7 + 6, /* sdma_v5_0_ring_emit_ib */
|
||||
.emit_ib = sdma_v5_0_ring_emit_ib,
|
||||
|
@ -1602,6 +1612,7 @@ static const struct amdgpu_ring_funcs sdma_v5_0_ring_funcs = {
|
|||
.pad_ib = sdma_v5_0_ring_pad_ib,
|
||||
.emit_wreg = sdma_v5_0_ring_emit_wreg,
|
||||
.emit_reg_wait = sdma_v5_0_ring_emit_reg_wait,
|
||||
.emit_reg_write_reg_wait = sdma_v5_0_ring_emit_reg_write_reg_wait,
|
||||
.init_cond_exec = sdma_v5_0_ring_init_cond_exec,
|
||||
.patch_cond_exec = sdma_v5_0_ring_patch_cond_exec,
|
||||
.preempt_ib = sdma_v5_0_ring_preempt_ib,
|
||||
|
|
|
@ -1186,11 +1186,6 @@ static int soc15_common_early_init(void *handle)
|
|||
AMD_PG_SUPPORT_VCN |
|
||||
AMD_PG_SUPPORT_VCN_DPG;
|
||||
adev->external_rev_id = adev->rev_id + 0x91;
|
||||
|
||||
if (adev->pm.pp_feature & PP_GFXOFF_MASK)
|
||||
adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
|
||||
AMD_PG_SUPPORT_CP |
|
||||
AMD_PG_SUPPORT_RLC_SMU_HS;
|
||||
break;
|
||||
default:
|
||||
/* FIXME: not supported yet */
|
||||
|
|
|
@ -2767,15 +2767,6 @@ void core_link_enable_stream(
|
|||
CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
|
||||
COLOR_DEPTH_UNDEFINED);
|
||||
|
||||
/* This second call is needed to reconfigure the DIG
|
||||
* as a workaround for the incorrect value being applied
|
||||
* from transmitter control.
|
||||
*/
|
||||
if (!dc_is_virtual_signal(pipe_ctx->stream->signal))
|
||||
stream->link->link_enc->funcs->setup(
|
||||
stream->link->link_enc,
|
||||
pipe_ctx->stream->signal);
|
||||
|
||||
#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
|
||||
if (pipe_ctx->stream->timing.flags.DSC) {
|
||||
if (dc_is_dp_signal(pipe_ctx->stream->signal) ||
|
||||
|
|
|
@ -1107,6 +1107,11 @@ struct stream_encoder *dcn20_stream_encoder_create(
|
|||
if (!enc1)
|
||||
return NULL;
|
||||
|
||||
if (ASICREV_IS_NAVI14_M(ctx->asic_id.hw_internal_rev)) {
|
||||
if (eng_id >= ENGINE_ID_DIGD)
|
||||
eng_id++;
|
||||
}
|
||||
|
||||
dcn20_stream_encoder_construct(enc1, ctx, ctx->dc_bios, eng_id,
|
||||
&stream_enc_regs[eng_id],
|
||||
&se_shift, &se_mask);
|
||||
|
|
|
@ -205,7 +205,7 @@ static struct smu_11_0_cmn2aisc_mapping navi10_workload_map[PP_SMC_POWER_PROFILE
|
|||
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_POWERSAVING, WORKLOAD_PPLIB_POWER_SAVING_BIT),
|
||||
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO, WORKLOAD_PPLIB_VIDEO_BIT),
|
||||
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR, WORKLOAD_PPLIB_VR_BIT),
|
||||
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_CUSTOM_BIT),
|
||||
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_COMPUTE_BIT),
|
||||
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT),
|
||||
};
|
||||
|
||||
|
|
|
@ -219,7 +219,7 @@ static struct smu_11_0_cmn2aisc_mapping vega20_workload_map[PP_SMC_POWER_PROFILE
|
|||
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_POWERSAVING, WORKLOAD_PPLIB_POWER_SAVING_BIT),
|
||||
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO, WORKLOAD_PPLIB_VIDEO_BIT),
|
||||
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR, WORKLOAD_PPLIB_VR_BIT),
|
||||
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_CUSTOM_BIT),
|
||||
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_COMPUTE_BIT),
|
||||
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT),
|
||||
};
|
||||
|
||||
|
|
|
@ -1958,6 +1958,7 @@ static void si_initialize_powertune_defaults(struct radeon_device *rdev)
|
|||
case 0x682C:
|
||||
si_pi->cac_weights = cac_weights_cape_verde_pro;
|
||||
si_pi->dte_data = dte_data_sun_xt;
|
||||
update_dte_from_pl2 = true;
|
||||
break;
|
||||
case 0x6825:
|
||||
case 0x6827:
|
||||
|
|
Loading…
Reference in New Issue