Merge tag 'amd-drm-next-5.17-2021-12-30' of ssh://gitlab.freedesktop.org/agd5f/linux into drm-next
amd-drm-next-5.17-2021-12-30: amdgpu: - Suspend/resume fixes - Fence fix - Misc code cleanups - IP discovery fixes - SRIOV fixes - RAS fixes - GMC 8 VRAM detection fix - FRU fixes for Aldebaran - Display fixes amdkfd: - SVM fixes - IP discovery fixes Signed-off-by: Dave Airlie <airlied@redhat.com> From: Alex Deucher <alexander.deucher@amd.com> Link: https://patchwork.freedesktop.org/patch/msgid/20211230141032.613596-1-alexander.deucher@amd.com
This commit is contained in:
commit
cb6846fbb8
|
@ -812,6 +812,7 @@ struct amd_powerplay {
|
|||
|
||||
#define AMDGPU_RESET_MAGIC_NUM 64
|
||||
#define AMDGPU_MAX_DF_PERFMONS 4
|
||||
#define AMDGPU_PRODUCT_NAME_LEN 64
|
||||
struct amdgpu_device {
|
||||
struct device *dev;
|
||||
struct pci_dev *pdev;
|
||||
|
@ -1082,7 +1083,7 @@ struct amdgpu_device {
|
|||
|
||||
/* Chip product information */
|
||||
char product_number[16];
|
||||
char product_name[32];
|
||||
char product_name[AMDGPU_PRODUCT_NAME_LEN];
|
||||
char serial[20];
|
||||
|
||||
atomic_t throttling_logging_enabled;
|
||||
|
|
|
@ -721,13 +721,13 @@ bool amdgpu_amdkfd_have_atomics_support(struct amdgpu_device *adev)
|
|||
return adev->have_atomics_support;
|
||||
}
|
||||
|
||||
void amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device *adev)
|
||||
void amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device *adev, bool reset)
|
||||
{
|
||||
struct ras_err_data err_data = {0, 0, 0, NULL};
|
||||
|
||||
/* CPU MCA will handle page retirement if connected_to_cpu is 1 */
|
||||
if (!adev->gmc.xgmi.connected_to_cpu)
|
||||
amdgpu_umc_process_ras_data_cb(adev, &err_data, NULL);
|
||||
else
|
||||
amdgpu_umc_poison_handler(adev, &err_data, reset);
|
||||
else if (reset)
|
||||
amdgpu_amdkfd_gpu_reset(adev);
|
||||
}
|
||||
|
|
|
@ -296,7 +296,8 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct amdgpu_device *adev,
|
|||
uint64_t *mmap_offset);
|
||||
int amdgpu_amdkfd_get_tile_config(struct amdgpu_device *adev,
|
||||
struct tile_config *config);
|
||||
void amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device *adev);
|
||||
void amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device *adev,
|
||||
bool reset);
|
||||
#if IS_ENABLED(CONFIG_HSA_AMD)
|
||||
void amdgpu_amdkfd_gpuvm_init_mem_limits(void);
|
||||
void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
|
||||
|
|
|
@ -166,7 +166,7 @@ int kgd_gfx_v9_init_interrupts(struct amdgpu_device *adev, uint32_t pipe_id)
|
|||
|
||||
lock_srbm(adev, mec, pipe, 0, 0);
|
||||
|
||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmCPC_INT_CNTL),
|
||||
WREG32_SOC15(GC, 0, mmCPC_INT_CNTL,
|
||||
CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK |
|
||||
CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK);
|
||||
|
||||
|
@ -279,7 +279,7 @@ int kgd_gfx_v9_hqd_load(struct amdgpu_device *adev, void *mqd,
|
|||
lower_32_bits((uintptr_t)wptr));
|
||||
WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI),
|
||||
upper_32_bits((uintptr_t)wptr));
|
||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_WPTR_POLL_CNTL1),
|
||||
WREG32_SOC15(GC, 0, mmCP_PQ_WPTR_POLL_CNTL1,
|
||||
(uint32_t)get_queue_mask(adev, pipe_id, queue_id));
|
||||
}
|
||||
|
||||
|
@ -488,13 +488,13 @@ bool kgd_gfx_v9_hqd_is_occupied(struct amdgpu_device *adev,
|
|||
uint32_t low, high;
|
||||
|
||||
acquire_queue(adev, pipe_id, queue_id);
|
||||
act = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE));
|
||||
act = RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE);
|
||||
if (act) {
|
||||
low = lower_32_bits(queue_address >> 8);
|
||||
high = upper_32_bits(queue_address >> 8);
|
||||
|
||||
if (low == RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_BASE)) &&
|
||||
high == RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_BASE_HI)))
|
||||
if (low == RREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE) &&
|
||||
high == RREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE_HI))
|
||||
retval = true;
|
||||
}
|
||||
release_queue(adev);
|
||||
|
@ -556,7 +556,7 @@ int kgd_gfx_v9_hqd_destroy(struct amdgpu_device *adev, void *mqd,
|
|||
|
||||
end_jiffies = (utimeout * HZ / 1000) + jiffies;
|
||||
while (true) {
|
||||
temp = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE));
|
||||
temp = RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE);
|
||||
if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK))
|
||||
break;
|
||||
if (time_after(jiffies, end_jiffies)) {
|
||||
|
@ -645,7 +645,7 @@ int kgd_gfx_v9_wave_control_execute(struct amdgpu_device *adev,
|
|||
mutex_lock(&adev->grbm_idx_mutex);
|
||||
|
||||
WREG32_SOC15_RLC_SHADOW(GC, 0, mmGRBM_GFX_INDEX, gfx_index_val);
|
||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_CMD), sq_cmd);
|
||||
WREG32_SOC15(GC, 0, mmSQ_CMD, sq_cmd);
|
||||
|
||||
data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
|
||||
INSTANCE_BROADCAST_WRITES, 1);
|
||||
|
@ -722,7 +722,7 @@ static void get_wave_count(struct amdgpu_device *adev, int queue_idx,
|
|||
pipe_idx = queue_idx / adev->gfx.mec.num_queue_per_pipe;
|
||||
queue_slot = queue_idx % adev->gfx.mec.num_queue_per_pipe;
|
||||
soc15_grbm_select(adev, 1, pipe_idx, queue_slot, 0);
|
||||
reg_val = RREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_CSQ_WF_ACTIVE_COUNT_0) +
|
||||
reg_val = RREG32_SOC15_IP(GC, SOC15_REG_OFFSET(GC, 0, mmSPI_CSQ_WF_ACTIVE_COUNT_0) +
|
||||
queue_slot);
|
||||
*wave_cnt = reg_val & SPI_CSQ_WF_ACTIVE_COUNT_0__COUNT_MASK;
|
||||
if (*wave_cnt != 0)
|
||||
|
@ -809,8 +809,7 @@ void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev, int pasid,
|
|||
for (sh_idx = 0; sh_idx < sh_cnt; sh_idx++) {
|
||||
|
||||
gfx_v9_0_select_se_sh(adev, se_idx, sh_idx, 0xffffffff);
|
||||
queue_map = RREG32(SOC15_REG_OFFSET(GC, 0,
|
||||
mmSPI_CSQ_WF_ACTIVE_STATUS));
|
||||
queue_map = RREG32_SOC15(GC, 0, mmSPI_CSQ_WF_ACTIVE_STATUS);
|
||||
|
||||
/*
|
||||
* Assumption: queue map encodes following schema: four
|
||||
|
@ -860,17 +859,17 @@ void kgd_gfx_v9_program_trap_handler_settings(struct amdgpu_device *adev,
|
|||
/*
|
||||
* Program TBA registers
|
||||
*/
|
||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TBA_LO),
|
||||
WREG32_SOC15(GC, 0, mmSQ_SHADER_TBA_LO,
|
||||
lower_32_bits(tba_addr >> 8));
|
||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TBA_HI),
|
||||
WREG32_SOC15(GC, 0, mmSQ_SHADER_TBA_HI,
|
||||
upper_32_bits(tba_addr >> 8));
|
||||
|
||||
/*
|
||||
* Program TMA registers
|
||||
*/
|
||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TMA_LO),
|
||||
WREG32_SOC15(GC, 0, mmSQ_SHADER_TMA_LO,
|
||||
lower_32_bits(tma_addr >> 8));
|
||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TMA_HI),
|
||||
WREG32_SOC15(GC, 0, mmSQ_SHADER_TMA_HI,
|
||||
upper_32_bits(tma_addr >> 8));
|
||||
|
||||
unlock_srbm(adev);
|
||||
|
|
|
@ -2317,6 +2317,10 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
|
|||
|
||||
/* need to do gmc hw init early so we can allocate gpu mem */
|
||||
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
|
||||
/* Try to reserve bad pages early */
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
amdgpu_virt_exchange_data(adev);
|
||||
|
||||
r = amdgpu_device_vram_scratch_init(adev);
|
||||
if (r) {
|
||||
DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
|
||||
|
@ -2348,7 +2352,7 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
|
|||
}
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
amdgpu_virt_init_data_exchange(adev);
|
||||
amdgpu_virt_exchange_data(adev);
|
||||
|
||||
r = amdgpu_ib_pool_init(adev);
|
||||
if (r) {
|
||||
|
@ -2615,11 +2619,10 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
|
|||
if (r)
|
||||
DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
|
||||
|
||||
/* For XGMI + passthrough configuration on arcturus, enable light SBR */
|
||||
if (adev->asic_type == CHIP_ARCTURUS &&
|
||||
amdgpu_passthrough(adev) &&
|
||||
adev->gmc.xgmi.num_physical_nodes > 1)
|
||||
smu_set_light_sbr(&adev->smu, true);
|
||||
/* For passthrough configuration on arcturus and aldebaran, enable special handling SBR */
|
||||
if (amdgpu_passthrough(adev) && ((adev->asic_type == CHIP_ARCTURUS && adev->gmc.xgmi.num_physical_nodes > 1)||
|
||||
adev->asic_type == CHIP_ALDEBARAN ))
|
||||
smu_handle_passthrough_sbr(&adev->smu, true);
|
||||
|
||||
if (adev->gmc.xgmi.num_physical_nodes > 1) {
|
||||
mutex_lock(&mgpu_info.mutex);
|
||||
|
@ -3182,6 +3185,12 @@ static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
|
|||
bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
|
||||
{
|
||||
switch (asic_type) {
|
||||
#ifdef CONFIG_DRM_AMDGPU_SI
|
||||
case CHIP_HAINAN:
|
||||
#endif
|
||||
case CHIP_TOPAZ:
|
||||
/* chips with no display hardware */
|
||||
return false;
|
||||
#if defined(CONFIG_DRM_AMD_DC)
|
||||
case CHIP_TAHITI:
|
||||
case CHIP_PITCAIRN:
|
||||
|
@ -3573,6 +3582,13 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
|||
if (r)
|
||||
return r;
|
||||
|
||||
/* Need to get xgmi info early to decide the reset behavior*/
|
||||
if (adev->gmc.xgmi.supported) {
|
||||
r = adev->gfxhub.funcs->get_xgmi_info(adev);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
/* enable PCIE atomic ops */
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *)
|
||||
|
@ -3885,11 +3901,14 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
|
|||
|
||||
amdgpu_irq_fini_hw(adev);
|
||||
|
||||
if (adev->mman.initialized)
|
||||
ttm_device_clear_dma_mappings(&adev->mman.bdev);
|
||||
|
||||
amdgpu_gart_dummy_page_fini(adev);
|
||||
|
||||
if (drm_dev_is_unplugged(adev_to_drm(adev)))
|
||||
amdgpu_device_unmap_mmio(adev);
|
||||
|
||||
}
|
||||
|
||||
void amdgpu_device_fini_sw(struct amdgpu_device *adev)
|
||||
|
@ -4507,7 +4526,7 @@ int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
|
|||
int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
|
||||
struct amdgpu_reset_context *reset_context)
|
||||
{
|
||||
int i, j, r = 0;
|
||||
int i, r = 0;
|
||||
struct amdgpu_job *job = NULL;
|
||||
bool need_full_reset =
|
||||
test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
|
||||
|
@ -4529,15 +4548,8 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
|
|||
|
||||
/*clear job fence from fence drv to avoid force_completion
|
||||
*leave NULL and vm flush fence in fence drv */
|
||||
for (j = 0; j <= ring->fence_drv.num_fences_mask; j++) {
|
||||
struct dma_fence *old, **ptr;
|
||||
amdgpu_fence_driver_clear_job_fences(ring);
|
||||
|
||||
ptr = &ring->fence_drv.fences[j];
|
||||
old = rcu_dereference_protected(*ptr, 1);
|
||||
if (old && test_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &old->flags)) {
|
||||
RCU_INIT_POINTER(*ptr, NULL);
|
||||
}
|
||||
}
|
||||
/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
|
||||
amdgpu_fence_driver_force_completion(ring);
|
||||
}
|
||||
|
|
|
@ -565,10 +565,15 @@ void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
|
|||
}
|
||||
}
|
||||
|
||||
union gc_info {
|
||||
struct gc_info_v1_0 v1;
|
||||
struct gc_info_v2_0 v2;
|
||||
};
|
||||
|
||||
int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
|
||||
{
|
||||
struct binary_header *bhdr;
|
||||
struct gc_info_v1_0 *gc_info;
|
||||
union gc_info *gc_info;
|
||||
|
||||
if (!adev->mman.discovery_bin) {
|
||||
DRM_ERROR("ip discovery uninitialized\n");
|
||||
|
@ -576,28 +581,55 @@ int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
|
|||
}
|
||||
|
||||
bhdr = (struct binary_header *)adev->mman.discovery_bin;
|
||||
gc_info = (struct gc_info_v1_0 *)(adev->mman.discovery_bin +
|
||||
gc_info = (union gc_info *)(adev->mman.discovery_bin +
|
||||
le16_to_cpu(bhdr->table_list[GC].offset));
|
||||
|
||||
adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->gc_num_se);
|
||||
adev->gfx.config.max_cu_per_sh = 2 * (le32_to_cpu(gc_info->gc_num_wgp0_per_sa) +
|
||||
le32_to_cpu(gc_info->gc_num_wgp1_per_sa));
|
||||
adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->gc_num_sa_per_se);
|
||||
adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->gc_num_rb_per_se);
|
||||
adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->gc_num_gl2c);
|
||||
adev->gfx.config.max_gprs = le32_to_cpu(gc_info->gc_num_gprs);
|
||||
adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->gc_num_max_gs_thds);
|
||||
adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->gc_gs_table_depth);
|
||||
adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->gc_gsprim_buff_depth);
|
||||
adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->gc_double_offchip_lds_buffer);
|
||||
adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->gc_wave_size);
|
||||
adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->gc_max_waves_per_simd);
|
||||
adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->gc_max_scratch_slots_per_cu);
|
||||
adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->gc_lds_size);
|
||||
adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->gc_num_sc_per_se) /
|
||||
le32_to_cpu(gc_info->gc_num_sa_per_se);
|
||||
adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->gc_num_packer_per_sc);
|
||||
|
||||
switch (gc_info->v1.header.version_major) {
|
||||
case 1:
|
||||
adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v1.gc_num_se);
|
||||
adev->gfx.config.max_cu_per_sh = 2 * (le32_to_cpu(gc_info->v1.gc_num_wgp0_per_sa) +
|
||||
le32_to_cpu(gc_info->v1.gc_num_wgp1_per_sa));
|
||||
adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v1.gc_num_sa_per_se);
|
||||
adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v1.gc_num_rb_per_se);
|
||||
adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v1.gc_num_gl2c);
|
||||
adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v1.gc_num_gprs);
|
||||
adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v1.gc_num_max_gs_thds);
|
||||
adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v1.gc_gs_table_depth);
|
||||
adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v1.gc_gsprim_buff_depth);
|
||||
adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v1.gc_double_offchip_lds_buffer);
|
||||
adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v1.gc_wave_size);
|
||||
adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v1.gc_max_waves_per_simd);
|
||||
adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v1.gc_max_scratch_slots_per_cu);
|
||||
adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v1.gc_lds_size);
|
||||
adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v1.gc_num_sc_per_se) /
|
||||
le32_to_cpu(gc_info->v1.gc_num_sa_per_se);
|
||||
adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v1.gc_num_packer_per_sc);
|
||||
break;
|
||||
case 2:
|
||||
adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v2.gc_num_se);
|
||||
adev->gfx.config.max_cu_per_sh = le32_to_cpu(gc_info->v2.gc_num_cu_per_sh);
|
||||
adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v2.gc_num_sh_per_se);
|
||||
adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v2.gc_num_rb_per_se);
|
||||
adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v2.gc_num_tccs);
|
||||
adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v2.gc_num_gprs);
|
||||
adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v2.gc_num_max_gs_thds);
|
||||
adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v2.gc_gs_table_depth);
|
||||
adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v2.gc_gsprim_buff_depth);
|
||||
adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v2.gc_double_offchip_lds_buffer);
|
||||
adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v2.gc_wave_size);
|
||||
adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v2.gc_max_waves_per_simd);
|
||||
adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v2.gc_max_scratch_slots_per_cu);
|
||||
adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v2.gc_lds_size);
|
||||
adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v2.gc_num_sc_per_se) /
|
||||
le32_to_cpu(gc_info->v2.gc_num_sh_per_se);
|
||||
adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v2.gc_num_packer_per_sc);
|
||||
break;
|
||||
default:
|
||||
dev_err(adev->dev,
|
||||
"Unhandled GC info table %d.%d\n",
|
||||
gc_info->v1.header.version_major,
|
||||
gc_info->v1.header.version_minor);
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -992,7 +1024,7 @@ static int amdgpu_discovery_set_mes_ip_blocks(struct amdgpu_device *adev)
|
|||
amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block);
|
||||
break;
|
||||
default:
|
||||
break;;
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -330,10 +330,11 @@ module_param_named(aspm, amdgpu_aspm, int, 0444);
|
|||
|
||||
/**
|
||||
* DOC: runpm (int)
|
||||
* Override for runtime power management control for dGPUs in PX/HG laptops. The amdgpu driver can dynamically power down
|
||||
* the dGPU on PX/HG laptops when it is idle. The default is -1 (auto enable). Setting the value to 0 disables this functionality.
|
||||
* Override for runtime power management control for dGPUs. The amdgpu driver can dynamically power down
|
||||
* the dGPUs when they are idle if supported. The default is -1 (auto enable).
|
||||
* Setting the value to 0 disables this functionality.
|
||||
*/
|
||||
MODULE_PARM_DESC(runpm, "PX runtime pm (2 = force enable with BAMACO, 1 = force enable with BACO, 0 = disable, -1 = PX only default)");
|
||||
MODULE_PARM_DESC(runpm, "PX runtime pm (2 = force enable with BAMACO, 1 = force enable with BACO, 0 = disable, -1 = auto)");
|
||||
module_param_named(runpm, amdgpu_runtime_pm, int, 0444);
|
||||
|
||||
/**
|
||||
|
@ -2168,7 +2169,10 @@ static int amdgpu_pmops_suspend(struct device *dev)
|
|||
adev->in_s3 = true;
|
||||
r = amdgpu_device_suspend(drm_dev, true);
|
||||
adev->in_s3 = false;
|
||||
|
||||
if (r)
|
||||
return r;
|
||||
if (!adev->in_s0ix)
|
||||
r = amdgpu_asic_reset(adev);
|
||||
return r;
|
||||
}
|
||||
|
||||
|
@ -2249,12 +2253,27 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
|
|||
if (amdgpu_device_supports_px(drm_dev))
|
||||
drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
|
||||
|
||||
/*
|
||||
* By setting mp1_state as PP_MP1_STATE_UNLOAD, MP1 will do some
|
||||
* proper cleanups and put itself into a state ready for PNP. That
|
||||
* can address some random resuming failure observed on BOCO capable
|
||||
* platforms.
|
||||
* TODO: this may be also needed for PX capable platform.
|
||||
*/
|
||||
if (amdgpu_device_supports_boco(drm_dev))
|
||||
adev->mp1_state = PP_MP1_STATE_UNLOAD;
|
||||
|
||||
ret = amdgpu_device_suspend(drm_dev, false);
|
||||
if (ret) {
|
||||
adev->in_runpm = false;
|
||||
if (amdgpu_device_supports_boco(drm_dev))
|
||||
adev->mp1_state = PP_MP1_STATE_NONE;
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (amdgpu_device_supports_boco(drm_dev))
|
||||
adev->mp1_state = PP_MP1_STATE_NONE;
|
||||
|
||||
if (amdgpu_device_supports_px(drm_dev)) {
|
||||
/* Only need to handle PCI state in the driver for ATPX
|
||||
* PCI core handles it for _PR3.
|
||||
|
|
|
@ -77,11 +77,13 @@ void amdgpu_fence_slab_fini(void)
|
|||
* Cast helper
|
||||
*/
|
||||
static const struct dma_fence_ops amdgpu_fence_ops;
|
||||
static const struct dma_fence_ops amdgpu_job_fence_ops;
|
||||
static inline struct amdgpu_fence *to_amdgpu_fence(struct dma_fence *f)
|
||||
{
|
||||
struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base);
|
||||
|
||||
if (__f->base.ops == &amdgpu_fence_ops)
|
||||
if (__f->base.ops == &amdgpu_fence_ops ||
|
||||
__f->base.ops == &amdgpu_job_fence_ops)
|
||||
return __f;
|
||||
|
||||
return NULL;
|
||||
|
@ -158,19 +160,18 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, struct amd
|
|||
}
|
||||
|
||||
seq = ++ring->fence_drv.sync_seq;
|
||||
if (job != NULL && job->job_run_counter) {
|
||||
if (job && job->job_run_counter) {
|
||||
/* reinit seq for resubmitted jobs */
|
||||
fence->seqno = seq;
|
||||
} else {
|
||||
if (job)
|
||||
dma_fence_init(fence, &amdgpu_job_fence_ops,
|
||||
&ring->fence_drv.lock,
|
||||
adev->fence_context + ring->idx, seq);
|
||||
else
|
||||
dma_fence_init(fence, &amdgpu_fence_ops,
|
||||
&ring->fence_drv.lock,
|
||||
adev->fence_context + ring->idx,
|
||||
seq);
|
||||
}
|
||||
|
||||
if (job != NULL) {
|
||||
/* mark this fence has a parent job */
|
||||
set_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &fence->flags);
|
||||
adev->fence_context + ring->idx, seq);
|
||||
}
|
||||
|
||||
amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
|
||||
|
@ -620,6 +621,25 @@ void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev)
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_fence_driver_clear_job_fences - clear job embedded fences of ring
|
||||
*
|
||||
* @ring: fence of the ring to be cleared
|
||||
*
|
||||
*/
|
||||
void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring)
|
||||
{
|
||||
int i;
|
||||
struct dma_fence *old, **ptr;
|
||||
|
||||
for (i = 0; i <= ring->fence_drv.num_fences_mask; i++) {
|
||||
ptr = &ring->fence_drv.fences[i];
|
||||
old = rcu_dereference_protected(*ptr, 1);
|
||||
if (old && old->ops == &amdgpu_job_fence_ops)
|
||||
RCU_INIT_POINTER(*ptr, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_fence_driver_force_completion - force signal latest fence of ring
|
||||
*
|
||||
|
@ -643,16 +663,14 @@ static const char *amdgpu_fence_get_driver_name(struct dma_fence *fence)
|
|||
|
||||
static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f)
|
||||
{
|
||||
struct amdgpu_ring *ring;
|
||||
return (const char *)to_amdgpu_fence(f)->ring->name;
|
||||
}
|
||||
|
||||
if (test_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &f->flags)) {
|
||||
static const char *amdgpu_job_fence_get_timeline_name(struct dma_fence *f)
|
||||
{
|
||||
struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence);
|
||||
|
||||
ring = to_amdgpu_ring(job->base.sched);
|
||||
} else {
|
||||
ring = to_amdgpu_fence(f)->ring;
|
||||
}
|
||||
return (const char *)ring->name;
|
||||
return (const char *)to_amdgpu_ring(job->base.sched)->name;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -665,18 +683,25 @@ static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f)
|
|||
*/
|
||||
static bool amdgpu_fence_enable_signaling(struct dma_fence *f)
|
||||
{
|
||||
struct amdgpu_ring *ring;
|
||||
if (!timer_pending(&to_amdgpu_fence(f)->ring->fence_drv.fallback_timer))
|
||||
amdgpu_fence_schedule_fallback(to_amdgpu_fence(f)->ring);
|
||||
|
||||
if (test_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &f->flags)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_job_fence_enable_signaling - enable signalling on job fence
|
||||
* @f: fence
|
||||
*
|
||||
* This is the simliar function with amdgpu_fence_enable_signaling above, it
|
||||
* only handles the job embedded fence.
|
||||
*/
|
||||
static bool amdgpu_job_fence_enable_signaling(struct dma_fence *f)
|
||||
{
|
||||
struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence);
|
||||
|
||||
ring = to_amdgpu_ring(job->base.sched);
|
||||
} else {
|
||||
ring = to_amdgpu_fence(f)->ring;
|
||||
}
|
||||
|
||||
if (!timer_pending(&ring->fence_drv.fallback_timer))
|
||||
amdgpu_fence_schedule_fallback(ring);
|
||||
if (!timer_pending(&to_amdgpu_ring(job->base.sched)->fence_drv.fallback_timer))
|
||||
amdgpu_fence_schedule_fallback(to_amdgpu_ring(job->base.sched));
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -692,19 +717,23 @@ static void amdgpu_fence_free(struct rcu_head *rcu)
|
|||
{
|
||||
struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
|
||||
|
||||
if (test_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &f->flags)) {
|
||||
/* free job if fence has a parent job */
|
||||
struct amdgpu_job *job;
|
||||
|
||||
job = container_of(f, struct amdgpu_job, hw_fence);
|
||||
kfree(job);
|
||||
} else {
|
||||
/* free fence_slab if it's separated fence*/
|
||||
struct amdgpu_fence *fence;
|
||||
kmem_cache_free(amdgpu_fence_slab, to_amdgpu_fence(f));
|
||||
}
|
||||
|
||||
fence = to_amdgpu_fence(f);
|
||||
kmem_cache_free(amdgpu_fence_slab, fence);
|
||||
}
|
||||
/**
|
||||
* amdgpu_job_fence_free - free up the job with embedded fence
|
||||
*
|
||||
* @rcu: RCU callback head
|
||||
*
|
||||
* Free up the job with embedded fence after the RCU grace period.
|
||||
*/
|
||||
static void amdgpu_job_fence_free(struct rcu_head *rcu)
|
||||
{
|
||||
struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
|
||||
|
||||
/* free job if fence has a parent job */
|
||||
kfree(container_of(f, struct amdgpu_job, hw_fence));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -720,6 +749,19 @@ static void amdgpu_fence_release(struct dma_fence *f)
|
|||
call_rcu(&f->rcu, amdgpu_fence_free);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_job_fence_release - callback that job embedded fence can be freed
|
||||
*
|
||||
* @f: fence
|
||||
*
|
||||
* This is the simliar function with amdgpu_fence_release above, it
|
||||
* only handles the job embedded fence.
|
||||
*/
|
||||
static void amdgpu_job_fence_release(struct dma_fence *f)
|
||||
{
|
||||
call_rcu(&f->rcu, amdgpu_job_fence_free);
|
||||
}
|
||||
|
||||
static const struct dma_fence_ops amdgpu_fence_ops = {
|
||||
.get_driver_name = amdgpu_fence_get_driver_name,
|
||||
.get_timeline_name = amdgpu_fence_get_timeline_name,
|
||||
|
@ -727,6 +769,12 @@ static const struct dma_fence_ops amdgpu_fence_ops = {
|
|||
.release = amdgpu_fence_release,
|
||||
};
|
||||
|
||||
static const struct dma_fence_ops amdgpu_job_fence_ops = {
|
||||
.get_driver_name = amdgpu_fence_get_driver_name,
|
||||
.get_timeline_name = amdgpu_job_fence_get_timeline_name,
|
||||
.enable_signaling = amdgpu_job_fence_enable_signaling,
|
||||
.release = amdgpu_job_fence_release,
|
||||
};
|
||||
|
||||
/*
|
||||
* Fence debugfs
|
||||
|
|
|
@ -56,6 +56,9 @@ static bool is_fru_eeprom_supported(struct amdgpu_device *adev)
|
|||
return true;
|
||||
else
|
||||
return false;
|
||||
case CHIP_ALDEBARAN:
|
||||
/* All Aldebaran SKUs have the FRU */
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
|
@ -88,13 +91,17 @@ static int amdgpu_fru_read_eeprom(struct amdgpu_device *adev, uint32_t addrptr,
|
|||
|
||||
int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
|
||||
{
|
||||
unsigned char buff[34];
|
||||
unsigned char buff[AMDGPU_PRODUCT_NAME_LEN+2];
|
||||
u32 addrptr;
|
||||
int size, len;
|
||||
int offset = 2;
|
||||
|
||||
if (!is_fru_eeprom_supported(adev))
|
||||
return 0;
|
||||
|
||||
if (adev->asic_type == CHIP_ALDEBARAN)
|
||||
offset = 0;
|
||||
|
||||
/* If algo exists, it means that the i2c_adapter's initialized */
|
||||
if (!adev->pm.smu_i2c.algo) {
|
||||
DRM_WARN("Cannot access FRU, EEPROM accessor not initialized");
|
||||
|
@ -131,15 +138,13 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
|
|||
}
|
||||
|
||||
len = size;
|
||||
/* Product name should only be 32 characters. Any more,
|
||||
* and something could be wrong. Cap it at 32 to be safe
|
||||
*/
|
||||
if (len >= sizeof(adev->product_name)) {
|
||||
DRM_WARN("FRU Product Number is larger than 32 characters. This is likely a mistake");
|
||||
len = sizeof(adev->product_name) - 1;
|
||||
if (len >= AMDGPU_PRODUCT_NAME_LEN) {
|
||||
DRM_WARN("FRU Product Name is larger than %d characters. This is likely a mistake",
|
||||
AMDGPU_PRODUCT_NAME_LEN);
|
||||
len = AMDGPU_PRODUCT_NAME_LEN - 1;
|
||||
}
|
||||
/* Start at 2 due to buff using fields 0 and 1 for the address */
|
||||
memcpy(adev->product_name, &buff[2], len);
|
||||
memcpy(adev->product_name, &buff[offset], len);
|
||||
adev->product_name[len] = '\0';
|
||||
|
||||
addrptr += size + 1;
|
||||
|
@ -157,7 +162,7 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
|
|||
DRM_WARN("FRU Product Number is larger than 16 characters. This is likely a mistake");
|
||||
len = sizeof(adev->product_number) - 1;
|
||||
}
|
||||
memcpy(adev->product_number, &buff[2], len);
|
||||
memcpy(adev->product_number, &buff[offset], len);
|
||||
adev->product_number[len] = '\0';
|
||||
|
||||
addrptr += size + 1;
|
||||
|
@ -184,7 +189,7 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
|
|||
DRM_WARN("FRU Serial Number is larger than 16 characters. This is likely a mistake");
|
||||
len = sizeof(adev->serial) - 1;
|
||||
}
|
||||
memcpy(adev->serial, &buff[2], len);
|
||||
memcpy(adev->serial, &buff[offset], len);
|
||||
adev->serial[len] = '\0';
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -264,6 +264,9 @@ static int amdgpu_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_str
|
|||
!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)))
|
||||
vma->vm_flags &= ~VM_MAYWRITE;
|
||||
|
||||
if (bo->kfd_bo)
|
||||
vma->vm_flags |= VM_DONTCOPY;
|
||||
|
||||
return drm_gem_ttm_mmap(obj, vma);
|
||||
}
|
||||
|
||||
|
|
|
@ -53,9 +53,6 @@ enum amdgpu_ring_priority_level {
|
|||
#define AMDGPU_FENCE_FLAG_INT (1 << 1)
|
||||
#define AMDGPU_FENCE_FLAG_TC_WB_ONLY (1 << 2)
|
||||
|
||||
/* fence flag bit to indicate the face is embedded in job*/
|
||||
#define AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT (DMA_FENCE_FLAG_USER_BITS + 1)
|
||||
|
||||
#define to_amdgpu_ring(s) container_of((s), struct amdgpu_ring, sched)
|
||||
|
||||
#define AMDGPU_IB_POOL_SIZE (1024 * 1024)
|
||||
|
@ -114,6 +111,7 @@ struct amdgpu_fence_driver {
|
|||
struct dma_fence **fences;
|
||||
};
|
||||
|
||||
void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring);
|
||||
void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring);
|
||||
|
||||
int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
|
||||
|
|
|
@ -23,74 +23,10 @@
|
|||
|
||||
#include "amdgpu_ras.h"
|
||||
|
||||
int amdgpu_umc_ras_late_init(struct amdgpu_device *adev)
|
||||
{
|
||||
int r;
|
||||
struct ras_fs_if fs_info = {
|
||||
.sysfs_name = "umc_err_count",
|
||||
};
|
||||
struct ras_ih_if ih_info = {
|
||||
.cb = amdgpu_umc_process_ras_data_cb,
|
||||
};
|
||||
|
||||
if (!adev->umc.ras_if) {
|
||||
adev->umc.ras_if =
|
||||
kmalloc(sizeof(struct ras_common_if), GFP_KERNEL);
|
||||
if (!adev->umc.ras_if)
|
||||
return -ENOMEM;
|
||||
adev->umc.ras_if->block = AMDGPU_RAS_BLOCK__UMC;
|
||||
adev->umc.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
|
||||
adev->umc.ras_if->sub_block_index = 0;
|
||||
}
|
||||
ih_info.head = fs_info.head = *adev->umc.ras_if;
|
||||
|
||||
r = amdgpu_ras_late_init(adev, adev->umc.ras_if,
|
||||
&fs_info, &ih_info);
|
||||
if (r)
|
||||
goto free;
|
||||
|
||||
if (amdgpu_ras_is_supported(adev, adev->umc.ras_if->block)) {
|
||||
r = amdgpu_irq_get(adev, &adev->gmc.ecc_irq, 0);
|
||||
if (r)
|
||||
goto late_fini;
|
||||
} else {
|
||||
r = 0;
|
||||
goto free;
|
||||
}
|
||||
|
||||
/* ras init of specific umc version */
|
||||
if (adev->umc.ras_funcs &&
|
||||
adev->umc.ras_funcs->err_cnt_init)
|
||||
adev->umc.ras_funcs->err_cnt_init(adev);
|
||||
|
||||
return 0;
|
||||
|
||||
late_fini:
|
||||
amdgpu_ras_late_fini(adev, adev->umc.ras_if, &ih_info);
|
||||
free:
|
||||
kfree(adev->umc.ras_if);
|
||||
adev->umc.ras_if = NULL;
|
||||
return r;
|
||||
}
|
||||
|
||||
void amdgpu_umc_ras_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC) &&
|
||||
adev->umc.ras_if) {
|
||||
struct ras_common_if *ras_if = adev->umc.ras_if;
|
||||
struct ras_ih_if ih_info = {
|
||||
.head = *ras_if,
|
||||
.cb = amdgpu_umc_process_ras_data_cb,
|
||||
};
|
||||
|
||||
amdgpu_ras_late_fini(adev, ras_if, &ih_info);
|
||||
kfree(ras_if);
|
||||
}
|
||||
}
|
||||
|
||||
int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev,
|
||||
static int amdgpu_umc_do_page_retirement(struct amdgpu_device *adev,
|
||||
void *ras_error_status,
|
||||
struct amdgpu_iv_entry *entry)
|
||||
struct amdgpu_iv_entry *entry,
|
||||
bool reset)
|
||||
{
|
||||
struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
|
||||
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
|
||||
|
@ -164,6 +100,7 @@ int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev,
|
|||
adev->smu.ppt_funcs->send_hbm_bad_pages_num(&adev->smu, con->eeprom_control.ras_num_recs);
|
||||
}
|
||||
|
||||
if (reset)
|
||||
amdgpu_ras_reset_gpu(adev);
|
||||
}
|
||||
|
||||
|
@ -171,6 +108,100 @@ int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev,
|
|||
return AMDGPU_RAS_SUCCESS;
|
||||
}
|
||||
|
||||
int amdgpu_umc_poison_handler(struct amdgpu_device *adev,
|
||||
void *ras_error_status,
|
||||
bool reset)
|
||||
{
|
||||
int ret;
|
||||
struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
|
||||
struct ras_common_if head = {
|
||||
.block = AMDGPU_RAS_BLOCK__UMC,
|
||||
};
|
||||
struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head);
|
||||
|
||||
ret =
|
||||
amdgpu_umc_do_page_retirement(adev, ras_error_status, NULL, reset);
|
||||
|
||||
if (ret == AMDGPU_RAS_SUCCESS && obj) {
|
||||
obj->err_data.ue_count += err_data->ue_count;
|
||||
obj->err_data.ce_count += err_data->ce_count;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev,
|
||||
void *ras_error_status,
|
||||
struct amdgpu_iv_entry *entry)
|
||||
{
|
||||
return amdgpu_umc_do_page_retirement(adev, ras_error_status, entry, true);
|
||||
}
|
||||
|
||||
int amdgpu_umc_ras_late_init(struct amdgpu_device *adev)
|
||||
{
|
||||
int r;
|
||||
struct ras_fs_if fs_info = {
|
||||
.sysfs_name = "umc_err_count",
|
||||
};
|
||||
struct ras_ih_if ih_info = {
|
||||
.cb = amdgpu_umc_process_ras_data_cb,
|
||||
};
|
||||
|
||||
if (!adev->umc.ras_if) {
|
||||
adev->umc.ras_if =
|
||||
kmalloc(sizeof(struct ras_common_if), GFP_KERNEL);
|
||||
if (!adev->umc.ras_if)
|
||||
return -ENOMEM;
|
||||
adev->umc.ras_if->block = AMDGPU_RAS_BLOCK__UMC;
|
||||
adev->umc.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
|
||||
adev->umc.ras_if->sub_block_index = 0;
|
||||
}
|
||||
ih_info.head = fs_info.head = *adev->umc.ras_if;
|
||||
|
||||
r = amdgpu_ras_late_init(adev, adev->umc.ras_if,
|
||||
&fs_info, &ih_info);
|
||||
if (r)
|
||||
goto free;
|
||||
|
||||
if (amdgpu_ras_is_supported(adev, adev->umc.ras_if->block)) {
|
||||
r = amdgpu_irq_get(adev, &adev->gmc.ecc_irq, 0);
|
||||
if (r)
|
||||
goto late_fini;
|
||||
} else {
|
||||
r = 0;
|
||||
goto free;
|
||||
}
|
||||
|
||||
/* ras init of specific umc version */
|
||||
if (adev->umc.ras_funcs &&
|
||||
adev->umc.ras_funcs->err_cnt_init)
|
||||
adev->umc.ras_funcs->err_cnt_init(adev);
|
||||
|
||||
return 0;
|
||||
|
||||
late_fini:
|
||||
amdgpu_ras_late_fini(adev, adev->umc.ras_if, &ih_info);
|
||||
free:
|
||||
kfree(adev->umc.ras_if);
|
||||
adev->umc.ras_if = NULL;
|
||||
return r;
|
||||
}
|
||||
|
||||
void amdgpu_umc_ras_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC) &&
|
||||
adev->umc.ras_if) {
|
||||
struct ras_common_if *ras_if = adev->umc.ras_if;
|
||||
struct ras_ih_if ih_info = {
|
||||
.head = *ras_if,
|
||||
.cb = amdgpu_umc_process_ras_data_cb,
|
||||
};
|
||||
|
||||
amdgpu_ras_late_fini(adev, ras_if, &ih_info);
|
||||
kfree(ras_if);
|
||||
}
|
||||
}
|
||||
|
||||
int amdgpu_umc_process_ecc_irq(struct amdgpu_device *adev,
|
||||
struct amdgpu_irq_src *source,
|
||||
struct amdgpu_iv_entry *entry)
|
||||
|
|
|
@ -78,9 +78,9 @@ struct amdgpu_umc {
|
|||
|
||||
int amdgpu_umc_ras_late_init(struct amdgpu_device *adev);
|
||||
void amdgpu_umc_ras_fini(struct amdgpu_device *adev);
|
||||
int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev,
|
||||
int amdgpu_umc_poison_handler(struct amdgpu_device *adev,
|
||||
void *ras_error_status,
|
||||
struct amdgpu_iv_entry *entry);
|
||||
bool reset);
|
||||
int amdgpu_umc_process_ecc_irq(struct amdgpu_device *adev,
|
||||
struct amdgpu_irq_src *source,
|
||||
struct amdgpu_iv_entry *entry);
|
||||
|
|
|
@ -622,17 +622,35 @@ void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev)
|
|||
|
||||
void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
|
||||
{
|
||||
uint64_t bp_block_offset = 0;
|
||||
uint32_t bp_block_size = 0;
|
||||
struct amd_sriov_msg_pf2vf_info *pf2vf_v2 = NULL;
|
||||
|
||||
adev->virt.fw_reserve.p_pf2vf = NULL;
|
||||
adev->virt.fw_reserve.p_vf2pf = NULL;
|
||||
adev->virt.vf2pf_update_interval_ms = 0;
|
||||
|
||||
if (adev->mman.fw_vram_usage_va != NULL) {
|
||||
if (adev->bios != NULL) {
|
||||
adev->virt.vf2pf_update_interval_ms = 2000;
|
||||
|
||||
adev->virt.fw_reserve.p_pf2vf =
|
||||
(struct amd_sriov_msg_pf2vf_info_header *)
|
||||
(adev->bios + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
|
||||
|
||||
amdgpu_virt_read_pf2vf_data(adev);
|
||||
}
|
||||
|
||||
if (adev->virt.vf2pf_update_interval_ms != 0) {
|
||||
INIT_DELAYED_WORK(&adev->virt.vf2pf_work, amdgpu_virt_update_vf2pf_work_item);
|
||||
schedule_delayed_work(&(adev->virt.vf2pf_work), msecs_to_jiffies(adev->virt.vf2pf_update_interval_ms));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void amdgpu_virt_exchange_data(struct amdgpu_device *adev)
|
||||
{
|
||||
uint64_t bp_block_offset = 0;
|
||||
uint32_t bp_block_size = 0;
|
||||
struct amd_sriov_msg_pf2vf_info *pf2vf_v2 = NULL;
|
||||
|
||||
if (adev->mman.fw_vram_usage_va != NULL) {
|
||||
|
||||
adev->virt.fw_reserve.p_pf2vf =
|
||||
(struct amd_sriov_msg_pf2vf_info_header *)
|
||||
(adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
|
||||
|
@ -663,16 +681,10 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
|
|||
(adev->bios + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
|
||||
|
||||
amdgpu_virt_read_pf2vf_data(adev);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
if (adev->virt.vf2pf_update_interval_ms != 0) {
|
||||
INIT_DELAYED_WORK(&adev->virt.vf2pf_work, amdgpu_virt_update_vf2pf_work_item);
|
||||
schedule_delayed_work(&(adev->virt.vf2pf_work), adev->virt.vf2pf_update_interval_ms);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void amdgpu_detect_virtualization(struct amdgpu_device *adev)
|
||||
{
|
||||
uint32_t reg;
|
||||
|
|
|
@ -308,6 +308,7 @@ int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev);
|
|||
void amdgpu_virt_free_mm_table(struct amdgpu_device *adev);
|
||||
void amdgpu_virt_release_ras_err_handler_data(struct amdgpu_device *adev);
|
||||
void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev);
|
||||
void amdgpu_virt_exchange_data(struct amdgpu_device *adev);
|
||||
void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev);
|
||||
void amdgpu_detect_virtualization(struct amdgpu_device *adev);
|
||||
|
||||
|
|
|
@ -63,6 +63,13 @@
|
|||
#define mmGCEA_PROBE_MAP 0x070c
|
||||
#define mmGCEA_PROBE_MAP_BASE_IDX 0
|
||||
|
||||
#define GFX9_RLCG_GC_WRITE_OLD (0x8 << 28)
|
||||
#define GFX9_RLCG_GC_WRITE (0x0 << 28)
|
||||
#define GFX9_RLCG_GC_READ (0x1 << 28)
|
||||
#define GFX9_RLCG_VFGATE_DISABLED 0x4000000
|
||||
#define GFX9_RLCG_WRONG_OPERATION_TYPE 0x2000000
|
||||
#define GFX9_RLCG_NOT_IN_RANGE 0x1000000
|
||||
|
||||
MODULE_FIRMWARE("amdgpu/vega10_ce.bin");
|
||||
MODULE_FIRMWARE("amdgpu/vega10_pfp.bin");
|
||||
MODULE_FIRMWARE("amdgpu/vega10_me.bin");
|
||||
|
@ -739,7 +746,7 @@ static const u32 GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[] =
|
|||
mmRLC_SRM_INDEX_CNTL_DATA_7 - mmRLC_SRM_INDEX_CNTL_DATA_0,
|
||||
};
|
||||
|
||||
static void gfx_v9_0_rlcg_w(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag)
|
||||
static u32 gfx_v9_0_rlcg_rw(struct amdgpu_device *adev, u32 offset, u32 v, uint32_t flag)
|
||||
{
|
||||
static void *scratch_reg0;
|
||||
static void *scratch_reg1;
|
||||
|
@ -748,21 +755,20 @@ static void gfx_v9_0_rlcg_w(struct amdgpu_device *adev, u32 offset, u32 v, u32 f
|
|||
static void *spare_int;
|
||||
static uint32_t grbm_cntl;
|
||||
static uint32_t grbm_idx;
|
||||
uint32_t i = 0;
|
||||
uint32_t retries = 50000;
|
||||
u32 ret = 0;
|
||||
u32 tmp;
|
||||
|
||||
scratch_reg0 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG0_BASE_IDX] + mmSCRATCH_REG0)*4;
|
||||
scratch_reg1 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG1)*4;
|
||||
scratch_reg2 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG2)*4;
|
||||
scratch_reg3 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG3)*4;
|
||||
scratch_reg2 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG2_BASE_IDX] + mmSCRATCH_REG2)*4;
|
||||
scratch_reg3 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG3_BASE_IDX] + mmSCRATCH_REG3)*4;
|
||||
spare_int = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmRLC_SPARE_INT_BASE_IDX] + mmRLC_SPARE_INT)*4;
|
||||
|
||||
grbm_cntl = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_CNTL_BASE_IDX] + mmGRBM_GFX_CNTL;
|
||||
grbm_idx = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_INDEX_BASE_IDX] + mmGRBM_GFX_INDEX;
|
||||
|
||||
if (amdgpu_sriov_runtime(adev)) {
|
||||
pr_err("shouldn't call rlcg write register during runtime\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (offset == grbm_cntl || offset == grbm_idx) {
|
||||
if (offset == grbm_cntl)
|
||||
writel(v, scratch_reg2);
|
||||
|
@ -771,41 +777,95 @@ static void gfx_v9_0_rlcg_w(struct amdgpu_device *adev, u32 offset, u32 v, u32 f
|
|||
|
||||
writel(v, ((void __iomem *)adev->rmmio) + (offset * 4));
|
||||
} else {
|
||||
uint32_t i = 0;
|
||||
uint32_t retries = 50000;
|
||||
|
||||
/*
|
||||
* SCRATCH_REG0 = read/write value
|
||||
* SCRATCH_REG1[30:28] = command
|
||||
* SCRATCH_REG1[19:0] = address in dword
|
||||
* SCRATCH_REG1[26:24] = Error reporting
|
||||
*/
|
||||
writel(v, scratch_reg0);
|
||||
writel(offset | 0x80000000, scratch_reg1);
|
||||
writel(offset | flag, scratch_reg1);
|
||||
writel(1, spare_int);
|
||||
for (i = 0; i < retries; i++) {
|
||||
u32 tmp;
|
||||
|
||||
for (i = 0; i < retries; i++) {
|
||||
tmp = readl(scratch_reg1);
|
||||
if (!(tmp & 0x80000000))
|
||||
if (!(tmp & flag))
|
||||
break;
|
||||
|
||||
udelay(10);
|
||||
}
|
||||
if (i >= retries)
|
||||
pr_err("timeout: rlcg program reg:0x%05x failed !\n", offset);
|
||||
|
||||
if (i >= retries) {
|
||||
if (amdgpu_sriov_reg_indirect_gc(adev)) {
|
||||
if (tmp & GFX9_RLCG_VFGATE_DISABLED)
|
||||
pr_err("The vfgate is disabled, program reg:0x%05x failed!\n", offset);
|
||||
else if (tmp & GFX9_RLCG_WRONG_OPERATION_TYPE)
|
||||
pr_err("Wrong operation type, program reg:0x%05x failed!\n", offset);
|
||||
else if (tmp & GFX9_RLCG_NOT_IN_RANGE)
|
||||
pr_err("The register is not in range, program reg:0x%05x failed!\n", offset);
|
||||
else
|
||||
pr_err("Unknown error type, program reg:0x%05x failed!\n", offset);
|
||||
} else
|
||||
pr_err("timeout: rlcg program reg:0x%05x failed!\n", offset);
|
||||
}
|
||||
}
|
||||
|
||||
ret = readl(scratch_reg0);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool gfx_v9_0_get_rlcg_flag(struct amdgpu_device *adev, u32 acc_flags, u32 hwip,
|
||||
int write, u32 *rlcg_flag)
|
||||
{
|
||||
|
||||
switch (hwip) {
|
||||
case GC_HWIP:
|
||||
if (amdgpu_sriov_reg_indirect_gc(adev)) {
|
||||
*rlcg_flag = write ? GFX9_RLCG_GC_WRITE : GFX9_RLCG_GC_READ;
|
||||
|
||||
return true;
|
||||
/* only in new version, AMDGPU_REGS_NO_KIQ and AMDGPU_REGS_RLC enabled simultaneously */
|
||||
} else if ((acc_flags & AMDGPU_REGS_RLC) && !(acc_flags & AMDGPU_REGS_NO_KIQ) && write) {
|
||||
*rlcg_flag = GFX9_RLCG_GC_WRITE_OLD;
|
||||
return true;
|
||||
}
|
||||
|
||||
break;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static u32 gfx_v9_0_sriov_rreg(struct amdgpu_device *adev, u32 offset, u32 acc_flags, u32 hwip)
|
||||
{
|
||||
u32 rlcg_flag;
|
||||
|
||||
if (!amdgpu_sriov_runtime(adev) && gfx_v9_0_get_rlcg_flag(adev, acc_flags, hwip, 0, &rlcg_flag))
|
||||
return gfx_v9_0_rlcg_rw(adev, offset, 0, rlcg_flag);
|
||||
|
||||
if (acc_flags & AMDGPU_REGS_NO_KIQ)
|
||||
return RREG32_NO_KIQ(offset);
|
||||
else
|
||||
return RREG32(offset);
|
||||
}
|
||||
|
||||
static void gfx_v9_0_sriov_wreg(struct amdgpu_device *adev, u32 offset,
|
||||
u32 v, u32 acc_flags, u32 hwip)
|
||||
u32 value, u32 acc_flags, u32 hwip)
|
||||
{
|
||||
if ((acc_flags & AMDGPU_REGS_RLC) &&
|
||||
amdgpu_sriov_fullaccess(adev)) {
|
||||
gfx_v9_0_rlcg_w(adev, offset, v, acc_flags);
|
||||
u32 rlcg_flag;
|
||||
|
||||
if (!amdgpu_sriov_runtime(adev) && gfx_v9_0_get_rlcg_flag(adev, acc_flags, hwip, 1, &rlcg_flag)) {
|
||||
gfx_v9_0_rlcg_rw(adev, offset, value, rlcg_flag);
|
||||
return;
|
||||
}
|
||||
|
||||
if (acc_flags & AMDGPU_REGS_NO_KIQ)
|
||||
WREG32_NO_KIQ(offset, v);
|
||||
WREG32_NO_KIQ(offset, value);
|
||||
else
|
||||
WREG32(offset, v);
|
||||
WREG32(offset, value);
|
||||
}
|
||||
|
||||
#define VEGA10_GB_ADDR_CONFIG_GOLDEN 0x2a114042
|
||||
|
@ -5135,7 +5195,7 @@ static void gfx_v9_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
|
|||
if (amdgpu_sriov_is_pp_one_vf(adev))
|
||||
data = RREG32_NO_KIQ(reg);
|
||||
else
|
||||
data = RREG32(reg);
|
||||
data = RREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL);
|
||||
|
||||
data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK;
|
||||
data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT;
|
||||
|
@ -5191,6 +5251,7 @@ static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = {
|
|||
.start = gfx_v9_0_rlc_start,
|
||||
.update_spm_vmid = gfx_v9_0_update_spm_vmid,
|
||||
.sriov_wreg = gfx_v9_0_sriov_wreg,
|
||||
.sriov_rreg = gfx_v9_0_sriov_rreg,
|
||||
.is_rlcg_access_range = gfx_v9_0_is_rlcg_access_range,
|
||||
};
|
||||
|
||||
|
@ -5796,16 +5857,16 @@ static void gfx_v9_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
|
|||
|
||||
switch (state) {
|
||||
case AMDGPU_IRQ_STATE_DISABLE:
|
||||
mec_int_cntl = RREG32(mec_int_cntl_reg);
|
||||
mec_int_cntl = RREG32_SOC15_IP(GC,mec_int_cntl_reg);
|
||||
mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
|
||||
TIME_STAMP_INT_ENABLE, 0);
|
||||
WREG32(mec_int_cntl_reg, mec_int_cntl);
|
||||
WREG32_SOC15_IP(GC, mec_int_cntl_reg, mec_int_cntl);
|
||||
break;
|
||||
case AMDGPU_IRQ_STATE_ENABLE:
|
||||
mec_int_cntl = RREG32(mec_int_cntl_reg);
|
||||
mec_int_cntl = RREG32_SOC15_IP(GC, mec_int_cntl_reg);
|
||||
mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
|
||||
TIME_STAMP_INT_ENABLE, 1);
|
||||
WREG32(mec_int_cntl_reg, mec_int_cntl);
|
||||
WREG32_SOC15_IP(GC, mec_int_cntl_reg, mec_int_cntl);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
|
|
|
@ -914,12 +914,6 @@ static int gmc_v10_0_sw_init(void *handle)
|
|||
return r;
|
||||
}
|
||||
|
||||
if (adev->gmc.xgmi.supported) {
|
||||
r = adev->gfxhub.funcs->get_xgmi_info(adev);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
r = gmc_v10_0_mc_init(adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
|
|
@ -515,10 +515,10 @@ static void gmc_v8_0_mc_program(struct amdgpu_device *adev)
|
|||
static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
|
||||
{
|
||||
int r;
|
||||
u32 tmp;
|
||||
|
||||
adev->gmc.vram_width = amdgpu_atombios_get_vram_width(adev);
|
||||
if (!adev->gmc.vram_width) {
|
||||
u32 tmp;
|
||||
int chansize, numchan;
|
||||
|
||||
/* Get VRAM informations */
|
||||
|
@ -562,8 +562,15 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
|
|||
adev->gmc.vram_width = numchan * chansize;
|
||||
}
|
||||
/* size in MB on si */
|
||||
adev->gmc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
|
||||
adev->gmc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
|
||||
tmp = RREG32(mmCONFIG_MEMSIZE);
|
||||
/* some boards may have garbage in the upper 16 bits */
|
||||
if (tmp & 0xffff0000) {
|
||||
DRM_INFO("Probable bad vram size: 0x%08x\n", tmp);
|
||||
if (tmp & 0xffff)
|
||||
tmp &= 0xffff;
|
||||
}
|
||||
adev->gmc.mc_vram_size = tmp * 1024ULL * 1024ULL;
|
||||
adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
|
||||
|
||||
if (!(adev->flags & AMD_IS_APU)) {
|
||||
r = amdgpu_device_resize_fb_bar(adev);
|
||||
|
|
|
@ -478,9 +478,18 @@ static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
|
|||
hub = &adev->vmhub[j];
|
||||
for (i = 0; i < 16; i++) {
|
||||
reg = hub->vm_context0_cntl + i;
|
||||
tmp = RREG32(reg);
|
||||
|
||||
if (j == AMDGPU_GFXHUB_0)
|
||||
tmp = RREG32_SOC15_IP(GC, reg);
|
||||
else
|
||||
tmp = RREG32_SOC15_IP(MMHUB, reg);
|
||||
|
||||
tmp &= ~bits;
|
||||
WREG32(reg, tmp);
|
||||
|
||||
if (j == AMDGPU_GFXHUB_0)
|
||||
WREG32_SOC15_IP(GC, reg, tmp);
|
||||
else
|
||||
WREG32_SOC15_IP(MMHUB, reg, tmp);
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
@ -489,9 +498,18 @@ static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
|
|||
hub = &adev->vmhub[j];
|
||||
for (i = 0; i < 16; i++) {
|
||||
reg = hub->vm_context0_cntl + i;
|
||||
tmp = RREG32(reg);
|
||||
|
||||
if (j == AMDGPU_GFXHUB_0)
|
||||
tmp = RREG32_SOC15_IP(GC, reg);
|
||||
else
|
||||
tmp = RREG32_SOC15_IP(MMHUB, reg);
|
||||
|
||||
tmp |= bits;
|
||||
WREG32(reg, tmp);
|
||||
|
||||
if (j == AMDGPU_GFXHUB_0)
|
||||
WREG32_SOC15_IP(GC, reg, tmp);
|
||||
else
|
||||
WREG32_SOC15_IP(MMHUB, reg, tmp);
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
@ -788,9 +806,12 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
|
|||
/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
|
||||
if (use_semaphore) {
|
||||
for (j = 0; j < adev->usec_timeout; j++) {
|
||||
/* a read return value of 1 means semaphore acuqire */
|
||||
tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_sem +
|
||||
hub->eng_distance * eng);
|
||||
/* a read return value of 1 means semaphore acquire */
|
||||
if (vmhub == AMDGPU_GFXHUB_0)
|
||||
tmp = RREG32_SOC15_IP_NO_KIQ(GC, hub->vm_inv_eng0_sem + hub->eng_distance * eng);
|
||||
else
|
||||
tmp = RREG32_SOC15_IP_NO_KIQ(MMHUB, hub->vm_inv_eng0_sem + hub->eng_distance * eng);
|
||||
|
||||
if (tmp & 0x1)
|
||||
break;
|
||||
udelay(1);
|
||||
|
@ -801,8 +822,10 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
|
|||
}
|
||||
|
||||
do {
|
||||
WREG32_NO_KIQ(hub->vm_inv_eng0_req +
|
||||
hub->eng_distance * eng, inv_req);
|
||||
if (vmhub == AMDGPU_GFXHUB_0)
|
||||
WREG32_SOC15_IP_NO_KIQ(GC, hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req);
|
||||
else
|
||||
WREG32_SOC15_IP_NO_KIQ(MMHUB, hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req);
|
||||
|
||||
/*
|
||||
* Issue a dummy read to wait for the ACK register to
|
||||
|
@ -815,8 +838,11 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
|
|||
hub->eng_distance * eng);
|
||||
|
||||
for (j = 0; j < adev->usec_timeout; j++) {
|
||||
tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack +
|
||||
hub->eng_distance * eng);
|
||||
if (vmhub == AMDGPU_GFXHUB_0)
|
||||
tmp = RREG32_SOC15_IP_NO_KIQ(GC, hub->vm_inv_eng0_ack + hub->eng_distance * eng);
|
||||
else
|
||||
tmp = RREG32_SOC15_IP_NO_KIQ(MMHUB, hub->vm_inv_eng0_ack + hub->eng_distance * eng);
|
||||
|
||||
if (tmp & (1 << vmid))
|
||||
break;
|
||||
udelay(1);
|
||||
|
@ -827,13 +853,16 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
|
|||
} while (inv_req);
|
||||
|
||||
/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
|
||||
if (use_semaphore)
|
||||
if (use_semaphore) {
|
||||
/*
|
||||
* add semaphore release after invalidation,
|
||||
* write with 0 means semaphore release
|
||||
*/
|
||||
WREG32_NO_KIQ(hub->vm_inv_eng0_sem +
|
||||
hub->eng_distance * eng, 0);
|
||||
if (vmhub == AMDGPU_GFXHUB_0)
|
||||
WREG32_SOC15_IP_NO_KIQ(GC, hub->vm_inv_eng0_sem + hub->eng_distance * eng, 0);
|
||||
else
|
||||
WREG32_SOC15_IP_NO_KIQ(MMHUB, hub->vm_inv_eng0_sem + hub->eng_distance * eng, 0);
|
||||
}
|
||||
|
||||
spin_unlock(&adev->gmc.invalidate_lock);
|
||||
|
||||
|
@ -1599,12 +1628,6 @@ static int gmc_v9_0_sw_init(void *handle)
|
|||
}
|
||||
adev->need_swiotlb = drm_need_swiotlb(44);
|
||||
|
||||
if (adev->gmc.xgmi.supported) {
|
||||
r = adev->gfxhub.funcs->get_xgmi_info(adev);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
r = gmc_v9_0_mc_init(adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
|
|
@ -277,69 +277,9 @@ static bool psp_v11_0_is_sos_alive(struct psp_context *psp)
|
|||
return sol_reg != 0x0;
|
||||
}
|
||||
|
||||
static int psp_v11_0_bootloader_load_kdb(struct psp_context *psp)
|
||||
{
|
||||
int ret;
|
||||
uint32_t psp_gfxdrv_command_reg = 0;
|
||||
struct amdgpu_device *adev = psp->adev;
|
||||
|
||||
/* Check tOS sign of life register to confirm sys driver and sOS
|
||||
* are already been loaded.
|
||||
*/
|
||||
if (psp_v11_0_is_sos_alive(psp))
|
||||
return 0;
|
||||
|
||||
ret = psp_v11_0_wait_for_bootloader(psp);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Copy PSP KDB binary to memory */
|
||||
psp_copy_fw(psp, psp->kdb.start_addr, psp->kdb.size_bytes);
|
||||
|
||||
/* Provide the PSP KDB to bootloader */
|
||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36,
|
||||
(uint32_t)(psp->fw_pri_mc_addr >> 20));
|
||||
psp_gfxdrv_command_reg = PSP_BL__LOAD_KEY_DATABASE;
|
||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35,
|
||||
psp_gfxdrv_command_reg);
|
||||
|
||||
ret = psp_v11_0_wait_for_bootloader(psp);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int psp_v11_0_bootloader_load_spl(struct psp_context *psp)
|
||||
{
|
||||
int ret;
|
||||
uint32_t psp_gfxdrv_command_reg = 0;
|
||||
struct amdgpu_device *adev = psp->adev;
|
||||
|
||||
/* Check tOS sign of life register to confirm sys driver and sOS
|
||||
* are already been loaded.
|
||||
*/
|
||||
if (psp_v11_0_is_sos_alive(psp))
|
||||
return 0;
|
||||
|
||||
ret = psp_v11_0_wait_for_bootloader(psp);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Copy PSP SPL binary to memory */
|
||||
psp_copy_fw(psp, psp->spl.start_addr, psp->spl.size_bytes);
|
||||
|
||||
/* Provide the PSP SPL to bootloader */
|
||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36,
|
||||
(uint32_t)(psp->fw_pri_mc_addr >> 20));
|
||||
psp_gfxdrv_command_reg = PSP_BL__LOAD_TOS_SPL_TABLE;
|
||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35,
|
||||
psp_gfxdrv_command_reg);
|
||||
|
||||
ret = psp_v11_0_wait_for_bootloader(psp);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int psp_v11_0_bootloader_load_sysdrv(struct psp_context *psp)
|
||||
static int psp_v11_0_bootloader_load_component(struct psp_context *psp,
|
||||
struct psp_bin_desc *bin_desc,
|
||||
enum psp_bootloader_cmd bl_cmd)
|
||||
{
|
||||
int ret;
|
||||
uint32_t psp_gfxdrv_command_reg = 0;
|
||||
|
@ -356,23 +296,35 @@ static int psp_v11_0_bootloader_load_sysdrv(struct psp_context *psp)
|
|||
return ret;
|
||||
|
||||
/* Copy PSP System Driver binary to memory */
|
||||
psp_copy_fw(psp, psp->sys.start_addr, psp->sys.size_bytes);
|
||||
psp_copy_fw(psp, bin_desc->start_addr, bin_desc->size_bytes);
|
||||
|
||||
/* Provide the sys driver to bootloader */
|
||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36,
|
||||
(uint32_t)(psp->fw_pri_mc_addr >> 20));
|
||||
psp_gfxdrv_command_reg = PSP_BL__LOAD_SYSDRV;
|
||||
psp_gfxdrv_command_reg = bl_cmd;
|
||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35,
|
||||
psp_gfxdrv_command_reg);
|
||||
|
||||
/* there might be handshake issue with hardware which needs delay */
|
||||
mdelay(20);
|
||||
|
||||
ret = psp_v11_0_wait_for_bootloader(psp);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int psp_v11_0_bootloader_load_kdb(struct psp_context *psp)
|
||||
{
|
||||
return psp_v11_0_bootloader_load_component(psp, &psp->kdb, PSP_BL__LOAD_KEY_DATABASE);
|
||||
}
|
||||
|
||||
static int psp_v11_0_bootloader_load_spl(struct psp_context *psp)
|
||||
{
|
||||
return psp_v11_0_bootloader_load_component(psp, &psp->spl, PSP_BL__LOAD_TOS_SPL_TABLE);
|
||||
}
|
||||
|
||||
static int psp_v11_0_bootloader_load_sysdrv(struct psp_context *psp)
|
||||
{
|
||||
return psp_v11_0_bootloader_load_component(psp, &psp->sys, PSP_BL__LOAD_SYSDRV);
|
||||
}
|
||||
|
||||
static int psp_v11_0_bootloader_load_sos(struct psp_context *psp)
|
||||
{
|
||||
int ret;
|
||||
|
|
|
@ -542,9 +542,6 @@ static void sdma_v5_2_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
|
|||
}
|
||||
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
f32_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL));
|
||||
f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
|
||||
AUTO_CTXSW_ENABLE, enable ? 1 : 0);
|
||||
if (enable && amdgpu_sdma_phase_quantum) {
|
||||
WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_PHASE0_QUANTUM),
|
||||
phase_quantum);
|
||||
|
@ -553,8 +550,14 @@ static void sdma_v5_2_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
|
|||
WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_PHASE2_QUANTUM),
|
||||
phase_quantum);
|
||||
}
|
||||
|
||||
if (!amdgpu_sriov_vf(adev)) {
|
||||
f32_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL));
|
||||
f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
|
||||
AUTO_CTXSW_ENABLE, enable ? 1 : 0);
|
||||
WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL), f32_cntl);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
@ -576,11 +579,13 @@ static void sdma_v5_2_enable(struct amdgpu_device *adev, bool enable)
|
|||
sdma_v5_2_rlc_stop(adev);
|
||||
}
|
||||
|
||||
if (!amdgpu_sriov_vf(adev)) {
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
f32_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL));
|
||||
f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, enable ? 0 : 1);
|
||||
WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), f32_cntl);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -608,6 +613,7 @@ static int sdma_v5_2_gfx_resume(struct amdgpu_device *adev)
|
|||
ring = &adev->sdma.instance[i].ring;
|
||||
wb_offset = (ring->rptr_offs * 4);
|
||||
|
||||
if (!amdgpu_sriov_vf(adev))
|
||||
WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0);
|
||||
|
||||
/* Set ring buffer size in dwords */
|
||||
|
@ -683,8 +689,11 @@ static int sdma_v5_2_gfx_resume(struct amdgpu_device *adev)
|
|||
sdma_v5_2_ring_set_wptr(ring);
|
||||
|
||||
/* set minor_ptr_update to 0 after wptr programed */
|
||||
|
||||
WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 0);
|
||||
|
||||
/* SRIOV VF has no control of any of registers below */
|
||||
if (!amdgpu_sriov_vf(adev)) {
|
||||
/* set utc l1 enable flag always to 1 */
|
||||
temp = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL));
|
||||
temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1);
|
||||
|
@ -708,7 +717,6 @@ static int sdma_v5_2_gfx_resume(struct amdgpu_device *adev)
|
|||
SDMA0_UTCL1_PAGE__LLC_NOALLOC_MASK);
|
||||
WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE), temp);
|
||||
|
||||
if (!amdgpu_sriov_vf(adev)) {
|
||||
/* unhalt engine */
|
||||
temp = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL));
|
||||
temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0);
|
||||
|
@ -1436,13 +1444,14 @@ static int sdma_v5_2_set_trap_irq_state(struct amdgpu_device *adev,
|
|||
enum amdgpu_interrupt_state state)
|
||||
{
|
||||
u32 sdma_cntl;
|
||||
|
||||
u32 reg_offset = sdma_v5_2_get_reg_offset(adev, type, mmSDMA0_CNTL);
|
||||
|
||||
if (!amdgpu_sriov_vf(adev)) {
|
||||
sdma_cntl = RREG32(reg_offset);
|
||||
sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE,
|
||||
state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
|
||||
WREG32(reg_offset, sdma_cntl);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -51,6 +51,8 @@
|
|||
|
||||
#define RREG32_SOC15_IP(ip, reg) __RREG32_SOC15_RLC__(reg, 0, ip##_HWIP)
|
||||
|
||||
#define RREG32_SOC15_IP_NO_KIQ(ip, reg) __RREG32_SOC15_RLC__(reg, AMDGPU_REGS_NO_KIQ, ip##_HWIP)
|
||||
|
||||
#define RREG32_SOC15_NO_KIQ(ip, inst, reg) \
|
||||
__RREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg, \
|
||||
AMDGPU_REGS_NO_KIQ, ip##_HWIP)
|
||||
|
@ -65,6 +67,9 @@
|
|||
#define WREG32_SOC15_IP(ip, reg, value) \
|
||||
__WREG32_SOC15_RLC__(reg, value, 0, ip##_HWIP)
|
||||
|
||||
#define WREG32_SOC15_IP_NO_KIQ(ip, reg, value) \
|
||||
__WREG32_SOC15_RLC__(reg, value, AMDGPU_REGS_NO_KIQ, ip##_HWIP)
|
||||
|
||||
#define WREG32_SOC15_NO_KIQ(ip, inst, reg, value) \
|
||||
__WREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg, \
|
||||
value, AMDGPU_REGS_NO_KIQ, ip##_HWIP)
|
||||
|
|
|
@ -246,6 +246,13 @@ static int vcn_v1_0_suspend(void *handle)
|
|||
{
|
||||
int r;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
bool idle_work_unexecuted;
|
||||
|
||||
idle_work_unexecuted = cancel_delayed_work_sync(&adev->vcn.idle_work);
|
||||
if (idle_work_unexecuted) {
|
||||
if (adev->pm.dpm_enabled)
|
||||
amdgpu_dpm_enable_uvd(adev, false);
|
||||
}
|
||||
|
||||
r = vcn_v1_0_hw_fini(adev);
|
||||
if (r)
|
||||
|
|
|
@ -59,11 +59,78 @@ static void kfd_gtt_sa_fini(struct kfd_dev *kfd);
|
|||
|
||||
static int kfd_resume(struct kfd_dev *kfd);
|
||||
|
||||
static void kfd_device_info_set_sdma_queue_num(struct kfd_dev *kfd)
|
||||
{
|
||||
uint32_t sdma_version = kfd->adev->ip_versions[SDMA0_HWIP][0];
|
||||
|
||||
switch (sdma_version) {
|
||||
case IP_VERSION(4, 0, 0):/* VEGA10 */
|
||||
case IP_VERSION(4, 0, 1):/* VEGA12 */
|
||||
case IP_VERSION(4, 1, 0):/* RAVEN */
|
||||
case IP_VERSION(4, 1, 1):/* RAVEN */
|
||||
case IP_VERSION(4, 1, 2):/* RENIOR */
|
||||
case IP_VERSION(5, 2, 1):/* VANGOGH */
|
||||
case IP_VERSION(5, 2, 3):/* YELLOW_CARP */
|
||||
kfd->device_info.num_sdma_queues_per_engine = 2;
|
||||
break;
|
||||
case IP_VERSION(4, 2, 0):/* VEGA20 */
|
||||
case IP_VERSION(4, 2, 2):/* ARCTUTUS */
|
||||
case IP_VERSION(4, 4, 0):/* ALDEBARAN */
|
||||
case IP_VERSION(5, 0, 0):/* NAVI10 */
|
||||
case IP_VERSION(5, 0, 1):/* CYAN_SKILLFISH */
|
||||
case IP_VERSION(5, 0, 2):/* NAVI14 */
|
||||
case IP_VERSION(5, 0, 5):/* NAVI12 */
|
||||
case IP_VERSION(5, 2, 0):/* SIENNA_CICHLID */
|
||||
case IP_VERSION(5, 2, 2):/* NAVY_FLOUDER */
|
||||
case IP_VERSION(5, 2, 4):/* DIMGREY_CAVEFISH */
|
||||
case IP_VERSION(5, 2, 5):/* BEIGE_GOBY */
|
||||
kfd->device_info.num_sdma_queues_per_engine = 8;
|
||||
break;
|
||||
default:
|
||||
dev_warn(kfd_device,
|
||||
"Default sdma queue per engine(8) is set due to "
|
||||
"mismatch of sdma ip block(SDMA_HWIP:0x%x).\n",
|
||||
sdma_version);
|
||||
kfd->device_info.num_sdma_queues_per_engine = 8;
|
||||
}
|
||||
}
|
||||
|
||||
static void kfd_device_info_set_event_interrupt_class(struct kfd_dev *kfd)
|
||||
{
|
||||
uint32_t gc_version = KFD_GC_VERSION(kfd);
|
||||
|
||||
switch (gc_version) {
|
||||
case IP_VERSION(9, 0, 1): /* VEGA10 */
|
||||
case IP_VERSION(9, 1, 0): /* RAVEN */
|
||||
case IP_VERSION(9, 2, 1): /* VEGA12 */
|
||||
case IP_VERSION(9, 2, 2): /* RAVEN */
|
||||
case IP_VERSION(9, 3, 0): /* RENOIR */
|
||||
case IP_VERSION(9, 4, 0): /* VEGA20 */
|
||||
case IP_VERSION(9, 4, 1): /* ARCTURUS */
|
||||
case IP_VERSION(9, 4, 2): /* ALDEBARAN */
|
||||
case IP_VERSION(10, 3, 1): /* VANGOGH */
|
||||
case IP_VERSION(10, 3, 3): /* YELLOW_CARP */
|
||||
case IP_VERSION(10, 1, 3): /* CYAN_SKILLFISH */
|
||||
case IP_VERSION(10, 1, 10): /* NAVI10 */
|
||||
case IP_VERSION(10, 1, 2): /* NAVI12 */
|
||||
case IP_VERSION(10, 1, 1): /* NAVI14 */
|
||||
case IP_VERSION(10, 3, 0): /* SIENNA_CICHLID */
|
||||
case IP_VERSION(10, 3, 2): /* NAVY_FLOUNDER */
|
||||
case IP_VERSION(10, 3, 4): /* DIMGREY_CAVEFISH */
|
||||
case IP_VERSION(10, 3, 5): /* BEIGE_GOBY */
|
||||
kfd->device_info.event_interrupt_class = &event_interrupt_class_v9;
|
||||
break;
|
||||
default:
|
||||
dev_warn(kfd_device, "v9 event interrupt handler is set due to "
|
||||
"mismatch of gc ip block(GC_HWIP:0x%x).\n", gc_version);
|
||||
kfd->device_info.event_interrupt_class = &event_interrupt_class_v9;
|
||||
}
|
||||
}
|
||||
|
||||
static void kfd_device_info_init(struct kfd_dev *kfd,
|
||||
bool vf, uint32_t gfx_target_version)
|
||||
{
|
||||
uint32_t gc_version = KFD_GC_VERSION(kfd);
|
||||
uint32_t sdma_version = kfd->adev->ip_versions[SDMA0_HWIP][0];
|
||||
uint32_t asic_type = kfd->adev->asic_type;
|
||||
|
||||
kfd->device_info.max_pasid_bits = 16;
|
||||
|
@ -75,16 +142,11 @@ static void kfd_device_info_init(struct kfd_dev *kfd,
|
|||
if (KFD_IS_SOC15(kfd)) {
|
||||
kfd->device_info.doorbell_size = 8;
|
||||
kfd->device_info.ih_ring_entry_size = 8 * sizeof(uint32_t);
|
||||
kfd->device_info.event_interrupt_class = &event_interrupt_class_v9;
|
||||
kfd->device_info.supports_cwsr = true;
|
||||
|
||||
if ((sdma_version >= IP_VERSION(4, 0, 0) &&
|
||||
sdma_version <= IP_VERSION(4, 2, 0)) ||
|
||||
sdma_version == IP_VERSION(5, 2, 1) ||
|
||||
sdma_version == IP_VERSION(5, 2, 3))
|
||||
kfd->device_info.num_sdma_queues_per_engine = 2;
|
||||
else
|
||||
kfd->device_info.num_sdma_queues_per_engine = 8;
|
||||
kfd_device_info_set_sdma_queue_num(kfd);
|
||||
|
||||
kfd_device_info_set_event_interrupt_class(kfd);
|
||||
|
||||
/* Raven */
|
||||
if (gc_version == IP_VERSION(9, 1, 0) ||
|
||||
|
|
|
@ -47,7 +47,7 @@ static int execute_queues_cpsch(struct device_queue_manager *dqm,
|
|||
uint32_t filter_param);
|
||||
static int unmap_queues_cpsch(struct device_queue_manager *dqm,
|
||||
enum kfd_unmap_queues_filter filter,
|
||||
uint32_t filter_param);
|
||||
uint32_t filter_param, bool reset);
|
||||
|
||||
static int map_queues_cpsch(struct device_queue_manager *dqm);
|
||||
|
||||
|
@ -570,7 +570,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q,
|
|||
/* Make sure the queue is unmapped before updating the MQD */
|
||||
if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) {
|
||||
retval = unmap_queues_cpsch(dqm,
|
||||
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
|
||||
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, false);
|
||||
if (retval) {
|
||||
pr_err("unmap queue failed\n");
|
||||
goto out_unlock;
|
||||
|
@ -1223,7 +1223,7 @@ static int stop_cpsch(struct device_queue_manager *dqm)
|
|||
}
|
||||
|
||||
if (!dqm->is_hws_hang)
|
||||
unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
|
||||
unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0, false);
|
||||
hanging = dqm->is_hws_hang || dqm->is_resetting;
|
||||
dqm->sched_running = false;
|
||||
|
||||
|
@ -1419,7 +1419,7 @@ static int map_queues_cpsch(struct device_queue_manager *dqm)
|
|||
/* dqm->lock mutex has to be locked before calling this function */
|
||||
static int unmap_queues_cpsch(struct device_queue_manager *dqm,
|
||||
enum kfd_unmap_queues_filter filter,
|
||||
uint32_t filter_param)
|
||||
uint32_t filter_param, bool reset)
|
||||
{
|
||||
int retval = 0;
|
||||
struct mqd_manager *mqd_mgr;
|
||||
|
@ -1432,7 +1432,7 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
|
|||
return retval;
|
||||
|
||||
retval = pm_send_unmap_queue(&dqm->packet_mgr, KFD_QUEUE_TYPE_COMPUTE,
|
||||
filter, filter_param, false, 0);
|
||||
filter, filter_param, reset, 0);
|
||||
if (retval)
|
||||
return retval;
|
||||
|
||||
|
@ -1476,6 +1476,21 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
|
|||
return retval;
|
||||
}
|
||||
|
||||
/* only for compute queue */
|
||||
static int reset_queues_cpsch(struct device_queue_manager *dqm,
|
||||
uint16_t pasid)
|
||||
{
|
||||
int retval;
|
||||
|
||||
dqm_lock(dqm);
|
||||
|
||||
retval = unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_BY_PASID,
|
||||
pasid, true);
|
||||
|
||||
dqm_unlock(dqm);
|
||||
return retval;
|
||||
}
|
||||
|
||||
/* dqm->lock mutex has to be locked before calling this function */
|
||||
static int execute_queues_cpsch(struct device_queue_manager *dqm,
|
||||
enum kfd_unmap_queues_filter filter,
|
||||
|
@ -1485,7 +1500,7 @@ static int execute_queues_cpsch(struct device_queue_manager *dqm,
|
|||
|
||||
if (dqm->is_hws_hang)
|
||||
return -EIO;
|
||||
retval = unmap_queues_cpsch(dqm, filter, filter_param);
|
||||
retval = unmap_queues_cpsch(dqm, filter, filter_param, false);
|
||||
if (retval)
|
||||
return retval;
|
||||
|
||||
|
@ -1896,6 +1911,7 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
|
|||
dqm->ops.evict_process_queues = evict_process_queues_cpsch;
|
||||
dqm->ops.restore_process_queues = restore_process_queues_cpsch;
|
||||
dqm->ops.get_wave_state = get_wave_state;
|
||||
dqm->ops.reset_queues = reset_queues_cpsch;
|
||||
break;
|
||||
case KFD_SCHED_POLICY_NO_HWS:
|
||||
/* initialize dqm for no cp scheduling */
|
||||
|
|
|
@ -81,6 +81,8 @@ struct device_process_node {
|
|||
*
|
||||
* @get_wave_state: Retrieves context save state and optionally copies the
|
||||
* control stack, if kept in the MQD, to the given userspace address.
|
||||
*
|
||||
* @reset_queues: reset queues which consume RAS poison
|
||||
*/
|
||||
|
||||
struct device_queue_manager_ops {
|
||||
|
@ -134,6 +136,9 @@ struct device_queue_manager_ops {
|
|||
void __user *ctl_stack,
|
||||
u32 *ctl_stack_used_size,
|
||||
u32 *save_area_used_size);
|
||||
|
||||
int (*reset_queues)(struct device_queue_manager *dqm,
|
||||
uint16_t pasid);
|
||||
};
|
||||
|
||||
struct device_queue_manager_asic_ops {
|
||||
|
|
|
@ -89,6 +89,44 @@ enum SQ_INTERRUPT_ERROR_TYPE {
|
|||
#define KFD_SQ_INT_DATA__ERR_TYPE_MASK 0xF00000
|
||||
#define KFD_SQ_INT_DATA__ERR_TYPE__SHIFT 20
|
||||
|
||||
static void event_interrupt_poison_consumption(struct kfd_dev *dev,
|
||||
uint16_t pasid, uint16_t source_id)
|
||||
{
|
||||
int ret = -EINVAL;
|
||||
struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
|
||||
|
||||
if (!p)
|
||||
return;
|
||||
|
||||
/* all queues of a process will be unmapped in one time */
|
||||
if (atomic_read(&p->poison)) {
|
||||
kfd_unref_process(p);
|
||||
return;
|
||||
}
|
||||
|
||||
atomic_set(&p->poison, 1);
|
||||
kfd_unref_process(p);
|
||||
|
||||
switch (source_id) {
|
||||
case SOC15_INTSRC_SQ_INTERRUPT_MSG:
|
||||
if (dev->dqm->ops.reset_queues)
|
||||
ret = dev->dqm->ops.reset_queues(dev->dqm, pasid);
|
||||
break;
|
||||
case SOC15_INTSRC_SDMA_ECC:
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
kfd_signal_poison_consumed_event(dev, pasid);
|
||||
|
||||
/* resetting queue passes, do page retirement without gpu reset
|
||||
resetting queue fails, fallback to gpu reset solution */
|
||||
if (!ret)
|
||||
amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, false);
|
||||
else
|
||||
amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, true);
|
||||
}
|
||||
|
||||
static bool event_interrupt_isr_v9(struct kfd_dev *dev,
|
||||
const uint32_t *ih_ring_entry,
|
||||
uint32_t *patched_ihre,
|
||||
|
@ -230,8 +268,7 @@ static void event_interrupt_wq_v9(struct kfd_dev *dev,
|
|||
sq_intr_err);
|
||||
if (sq_intr_err != SQ_INTERRUPT_ERROR_TYPE_ILLEGAL_INST &&
|
||||
sq_intr_err != SQ_INTERRUPT_ERROR_TYPE_MEMVIOL) {
|
||||
kfd_signal_poison_consumed_event(dev, pasid);
|
||||
amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev);
|
||||
event_interrupt_poison_consumption(dev, pasid, source_id);
|
||||
return;
|
||||
}
|
||||
break;
|
||||
|
@ -252,8 +289,7 @@ static void event_interrupt_wq_v9(struct kfd_dev *dev,
|
|||
if (source_id == SOC15_INTSRC_SDMA_TRAP) {
|
||||
kfd_signal_event_interrupt(pasid, context_id0 & 0xfffffff, 28);
|
||||
} else if (source_id == SOC15_INTSRC_SDMA_ECC) {
|
||||
kfd_signal_poison_consumed_event(dev, pasid);
|
||||
amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev);
|
||||
event_interrupt_poison_consumption(dev, pasid, source_id);
|
||||
return;
|
||||
}
|
||||
} else if (client_id == SOC15_IH_CLIENTID_VMC ||
|
||||
|
|
|
@ -549,7 +549,7 @@ static void svm_migrate_page_free(struct page *page)
|
|||
|
||||
if (svm_bo) {
|
||||
pr_debug_ratelimited("ref: %d\n", kref_read(&svm_bo->kref));
|
||||
svm_range_bo_unref(svm_bo);
|
||||
svm_range_bo_unref_async(svm_bo);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -856,6 +856,8 @@ struct kfd_process {
|
|||
struct svm_range_list svms;
|
||||
|
||||
bool xnack_enabled;
|
||||
|
||||
atomic_t poison;
|
||||
};
|
||||
|
||||
#define KFD_PROCESS_TABLE_SIZE 5 /* bits: 32 entries */
|
||||
|
|
|
@ -332,6 +332,8 @@ static void svm_range_bo_release(struct kref *kref)
|
|||
struct svm_range_bo *svm_bo;
|
||||
|
||||
svm_bo = container_of(kref, struct svm_range_bo, kref);
|
||||
pr_debug("svm_bo 0x%p\n", svm_bo);
|
||||
|
||||
spin_lock(&svm_bo->list_lock);
|
||||
while (!list_empty(&svm_bo->range_list)) {
|
||||
struct svm_range *prange =
|
||||
|
@ -365,11 +367,32 @@ static void svm_range_bo_release(struct kref *kref)
|
|||
kfree(svm_bo);
|
||||
}
|
||||
|
||||
void svm_range_bo_unref(struct svm_range_bo *svm_bo)
|
||||
static void svm_range_bo_wq_release(struct work_struct *work)
|
||||
{
|
||||
if (!svm_bo)
|
||||
return;
|
||||
struct svm_range_bo *svm_bo;
|
||||
|
||||
svm_bo = container_of(work, struct svm_range_bo, release_work);
|
||||
svm_range_bo_release(&svm_bo->kref);
|
||||
}
|
||||
|
||||
static void svm_range_bo_release_async(struct kref *kref)
|
||||
{
|
||||
struct svm_range_bo *svm_bo;
|
||||
|
||||
svm_bo = container_of(kref, struct svm_range_bo, kref);
|
||||
pr_debug("svm_bo 0x%p\n", svm_bo);
|
||||
INIT_WORK(&svm_bo->release_work, svm_range_bo_wq_release);
|
||||
schedule_work(&svm_bo->release_work);
|
||||
}
|
||||
|
||||
void svm_range_bo_unref_async(struct svm_range_bo *svm_bo)
|
||||
{
|
||||
kref_put(&svm_bo->kref, svm_range_bo_release_async);
|
||||
}
|
||||
|
||||
static void svm_range_bo_unref(struct svm_range_bo *svm_bo)
|
||||
{
|
||||
if (svm_bo)
|
||||
kref_put(&svm_bo->kref, svm_range_bo_release);
|
||||
}
|
||||
|
||||
|
@ -2254,8 +2277,8 @@ svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni,
|
|||
|
||||
start = mni->interval_tree.start;
|
||||
last = mni->interval_tree.last;
|
||||
start = (start > range->start ? start : range->start) >> PAGE_SHIFT;
|
||||
last = (last < (range->end - 1) ? last : range->end - 1) >> PAGE_SHIFT;
|
||||
start = max(start, range->start) >> PAGE_SHIFT;
|
||||
last = min(last, range->end - 1) >> PAGE_SHIFT;
|
||||
pr_debug("[0x%lx 0x%lx] range[0x%lx 0x%lx] notifier[0x%lx 0x%lx] %d\n",
|
||||
start, last, range->start >> PAGE_SHIFT,
|
||||
(range->end - 1) >> PAGE_SHIFT,
|
||||
|
|
|
@ -48,6 +48,7 @@ struct svm_range_bo {
|
|||
struct work_struct eviction_work;
|
||||
struct svm_range_list *svms;
|
||||
uint32_t evicting;
|
||||
struct work_struct release_work;
|
||||
};
|
||||
|
||||
enum svm_work_list_ops {
|
||||
|
@ -195,7 +196,7 @@ void svm_range_list_lock_and_flush_work(struct svm_range_list *svms, struct mm_s
|
|||
*/
|
||||
#define KFD_IS_SVM_API_SUPPORTED(dev) ((dev)->pgmap.type != 0)
|
||||
|
||||
void svm_range_bo_unref(struct svm_range_bo *svm_bo);
|
||||
void svm_range_bo_unref_async(struct svm_range_bo *svm_bo);
|
||||
#else
|
||||
|
||||
struct kfd_process;
|
||||
|
|
|
@ -741,7 +741,7 @@ void vg_clk_mgr_construct(
|
|||
sizeof(struct watermarks),
|
||||
&clk_mgr->smu_wm_set.mc_address.quad_part);
|
||||
|
||||
if (clk_mgr->smu_wm_set.wm_set == 0) {
|
||||
if (!clk_mgr->smu_wm_set.wm_set) {
|
||||
clk_mgr->smu_wm_set.wm_set = &dummy_wms;
|
||||
clk_mgr->smu_wm_set.mc_address.quad_part = 0;
|
||||
}
|
||||
|
|
|
@ -158,6 +158,7 @@ void dcn31_update_clocks(struct clk_mgr *clk_mgr_base,
|
|||
union display_idle_optimization_u idle_info = { 0 };
|
||||
idle_info.idle_info.df_request_disabled = 1;
|
||||
idle_info.idle_info.phy_ref_clk_off = 1;
|
||||
idle_info.idle_info.s0i2_rdy = 1;
|
||||
dcn31_smu_set_display_idle_optimization(clk_mgr, idle_info.data);
|
||||
/* update power state */
|
||||
clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER;
|
||||
|
|
|
@ -274,24 +274,6 @@ static bool create_links(
|
|||
goto failed_alloc;
|
||||
}
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) &&
|
||||
dc->caps.dp_hpo &&
|
||||
link->dc->res_pool->res_cap->num_hpo_dp_link_encoder > 0) {
|
||||
/* FPGA case - Allocate HPO DP link encoder */
|
||||
if (i < link->dc->res_pool->res_cap->num_hpo_dp_link_encoder) {
|
||||
link->hpo_dp_link_enc = link->dc->res_pool->hpo_dp_link_enc[i];
|
||||
|
||||
if (link->hpo_dp_link_enc == NULL) {
|
||||
BREAK_TO_DEBUGGER();
|
||||
goto failed_alloc;
|
||||
}
|
||||
link->hpo_dp_link_enc->hpd_source = link->link_enc->hpd_source;
|
||||
link->hpo_dp_link_enc->transmitter = link->link_enc->transmitter;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
link->link_status.dpcd_caps = &link->dpcd_caps;
|
||||
|
||||
enc_init.ctx = dc->ctx;
|
||||
|
|
|
@ -422,6 +422,8 @@ char *dc_status_to_str(enum dc_status status)
|
|||
return "The operation is not supported.";
|
||||
case DC_UNSUPPORTED_VALUE:
|
||||
return "The value specified is not supported.";
|
||||
case DC_NO_LINK_ENC_RESOURCE:
|
||||
return "No link encoder resource";
|
||||
case DC_ERROR_UNEXPECTED:
|
||||
return "Unexpected error";
|
||||
}
|
||||
|
|
|
@ -66,31 +66,6 @@
|
|||
/*******************************************************************************
|
||||
* Private functions
|
||||
******************************************************************************/
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
static bool add_dp_hpo_link_encoder_to_link(struct dc_link *link)
|
||||
{
|
||||
struct hpo_dp_link_encoder *enc = resource_get_unused_hpo_dp_link_encoder(
|
||||
link->dc->res_pool);
|
||||
|
||||
if (!link->hpo_dp_link_enc && enc) {
|
||||
link->hpo_dp_link_enc = enc;
|
||||
link->hpo_dp_link_enc->transmitter = link->link_enc->transmitter;
|
||||
link->hpo_dp_link_enc->hpd_source = link->link_enc->hpd_source;
|
||||
}
|
||||
|
||||
return (link->hpo_dp_link_enc != NULL);
|
||||
}
|
||||
|
||||
static void remove_dp_hpo_link_encoder_from_link(struct dc_link *link)
|
||||
{
|
||||
if (link->hpo_dp_link_enc) {
|
||||
link->hpo_dp_link_enc->hpd_source = HPD_SOURCEID_UNKNOWN;
|
||||
link->hpo_dp_link_enc->transmitter = TRANSMITTER_UNKNOWN;
|
||||
link->hpo_dp_link_enc = NULL;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
static void dc_link_destruct(struct dc_link *link)
|
||||
{
|
||||
int i;
|
||||
|
@ -118,12 +93,6 @@ static void dc_link_destruct(struct dc_link *link)
|
|||
link->link_enc->funcs->destroy(&link->link_enc);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
if (link->hpo_dp_link_enc) {
|
||||
remove_dp_hpo_link_encoder_from_link(link);
|
||||
}
|
||||
#endif
|
||||
|
||||
if (link->local_sink)
|
||||
dc_sink_release(link->local_sink);
|
||||
|
||||
|
@ -881,6 +850,7 @@ static bool dc_link_detect_helper(struct dc_link *link,
|
|||
enum dc_connection_type pre_connection_type = dc_connection_none;
|
||||
bool perform_dp_seamless_boot = false;
|
||||
const uint32_t post_oui_delay = 30; // 30ms
|
||||
struct link_resource link_res = { 0 };
|
||||
|
||||
DC_LOGGER_INIT(link->ctx->logger);
|
||||
|
||||
|
@ -975,7 +945,10 @@ static bool dc_link_detect_helper(struct dc_link *link,
|
|||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
if (dp_get_link_encoding_format(&link->reported_link_cap) == DP_128b_132b_ENCODING)
|
||||
add_dp_hpo_link_encoder_to_link(link);
|
||||
link_res.hpo_dp_link_enc = resource_get_hpo_dp_link_enc_for_det_lt(
|
||||
&link->dc->current_state->res_ctx,
|
||||
link->dc->res_pool,
|
||||
link);
|
||||
#endif
|
||||
|
||||
if (link->type == dc_connection_mst_branch) {
|
||||
|
@ -986,7 +959,7 @@ static bool dc_link_detect_helper(struct dc_link *link,
|
|||
* empty which leads to allocate_mst_payload() has "0"
|
||||
* pbn_per_slot value leading to exception on dc_fixpt_div()
|
||||
*/
|
||||
dp_verify_mst_link_cap(link);
|
||||
dp_verify_mst_link_cap(link, &link_res);
|
||||
|
||||
/*
|
||||
* This call will initiate MST topology discovery. Which
|
||||
|
@ -1150,6 +1123,7 @@ static bool dc_link_detect_helper(struct dc_link *link,
|
|||
// verify link cap for SST non-seamless boot
|
||||
if (!perform_dp_seamless_boot)
|
||||
dp_verify_link_cap_with_retries(link,
|
||||
&link_res,
|
||||
&link->reported_link_cap,
|
||||
LINK_TRAINING_MAX_VERIFY_RETRY);
|
||||
} else {
|
||||
|
@ -1844,6 +1818,8 @@ static void enable_stream_features(struct pipe_ctx *pipe_ctx)
|
|||
union down_spread_ctrl old_downspread;
|
||||
union down_spread_ctrl new_downspread;
|
||||
|
||||
memset(&old_downspread, 0, sizeof(old_downspread));
|
||||
|
||||
core_link_read_dpcd(link, DP_DOWNSPREAD_CTRL,
|
||||
&old_downspread.raw, sizeof(old_downspread));
|
||||
|
||||
|
@ -2503,7 +2479,8 @@ static void write_i2c_redriver_setting(
|
|||
DC_LOG_DEBUG("Set redriver failed");
|
||||
}
|
||||
|
||||
static void disable_link(struct dc_link *link, enum signal_type signal)
|
||||
static void disable_link(struct dc_link *link, const struct link_resource *link_res,
|
||||
enum signal_type signal)
|
||||
{
|
||||
/*
|
||||
* TODO: implement call for dp_set_hw_test_pattern
|
||||
|
@ -2522,20 +2499,20 @@ static void disable_link(struct dc_link *link, enum signal_type signal)
|
|||
struct dc_link_settings link_settings = link->cur_link_settings;
|
||||
#endif
|
||||
if (dc_is_dp_sst_signal(signal))
|
||||
dp_disable_link_phy(link, signal);
|
||||
dp_disable_link_phy(link, link_res, signal);
|
||||
else
|
||||
dp_disable_link_phy_mst(link, signal);
|
||||
dp_disable_link_phy_mst(link, link_res, signal);
|
||||
|
||||
if (dc_is_dp_sst_signal(signal) ||
|
||||
link->mst_stream_alloc_table.stream_count == 0) {
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
if (dp_get_link_encoding_format(&link_settings) == DP_8b_10b_ENCODING) {
|
||||
dp_set_fec_enable(link, false);
|
||||
dp_set_fec_ready(link, false);
|
||||
dp_set_fec_ready(link, link_res, false);
|
||||
}
|
||||
#else
|
||||
dp_set_fec_enable(link, false);
|
||||
dp_set_fec_ready(link, false);
|
||||
dp_set_fec_ready(link, link_res, false);
|
||||
#endif
|
||||
}
|
||||
} else {
|
||||
|
@ -2646,7 +2623,7 @@ static enum dc_status enable_link(
|
|||
* new link settings.
|
||||
*/
|
||||
if (link->link_status.link_active) {
|
||||
disable_link(link, pipe_ctx->stream->signal);
|
||||
disable_link(link, &pipe_ctx->link_res, pipe_ctx->stream->signal);
|
||||
}
|
||||
|
||||
switch (pipe_ctx->stream->signal) {
|
||||
|
@ -3421,7 +3398,7 @@ static enum dc_status dc_link_update_sst_payload(struct pipe_ctx *pipe_ctx,
|
|||
{
|
||||
struct dc_stream_state *stream = pipe_ctx->stream;
|
||||
struct dc_link *link = stream->link;
|
||||
struct hpo_dp_link_encoder *hpo_dp_link_encoder = link->hpo_dp_link_enc;
|
||||
struct hpo_dp_link_encoder *hpo_dp_link_encoder = pipe_ctx->link_res.hpo_dp_link_enc;
|
||||
struct hpo_dp_stream_encoder *hpo_dp_stream_encoder = pipe_ctx->stream_res.hpo_dp_stream_enc;
|
||||
struct link_mst_stream_allocation_table proposed_table = {0};
|
||||
struct fixed31_32 avg_time_slots_per_mtp;
|
||||
|
@ -3503,7 +3480,7 @@ enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx)
|
|||
struct link_encoder *link_encoder = NULL;
|
||||
struct stream_encoder *stream_encoder = pipe_ctx->stream_res.stream_enc;
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
struct hpo_dp_link_encoder *hpo_dp_link_encoder = link->hpo_dp_link_enc;
|
||||
struct hpo_dp_link_encoder *hpo_dp_link_encoder = pipe_ctx->link_res.hpo_dp_link_enc;
|
||||
struct hpo_dp_stream_encoder *hpo_dp_stream_encoder = pipe_ctx->stream_res.hpo_dp_stream_enc;
|
||||
#endif
|
||||
struct dp_mst_stream_allocation_table proposed_table = {0};
|
||||
|
@ -3833,7 +3810,7 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
|
|||
struct link_encoder *link_encoder = NULL;
|
||||
struct stream_encoder *stream_encoder = pipe_ctx->stream_res.stream_enc;
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
struct hpo_dp_link_encoder *hpo_dp_link_encoder = link->hpo_dp_link_enc;
|
||||
struct hpo_dp_link_encoder *hpo_dp_link_encoder = pipe_ctx->link_res.hpo_dp_link_enc;
|
||||
struct hpo_dp_stream_encoder *hpo_dp_stream_encoder = pipe_ctx->stream_res.hpo_dp_stream_enc;
|
||||
#endif
|
||||
struct dp_mst_stream_allocation_table proposed_table = {0};
|
||||
|
@ -4012,9 +3989,6 @@ static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off)
|
|||
|
||||
if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_PHY ||
|
||||
pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
|
||||
link_enc = pipe_ctx->stream->link->link_enc;
|
||||
config.dio_output_type = pipe_ctx->stream->link->ep_type;
|
||||
config.dio_output_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A;
|
||||
if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_PHY)
|
||||
link_enc = pipe_ctx->stream->link->link_enc;
|
||||
else if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
|
||||
|
@ -4081,7 +4055,8 @@ static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off)
|
|||
config.link_enc_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A;
|
||||
if (is_dp_128b_132b_signal(pipe_ctx)) {
|
||||
config.stream_enc_idx = pipe_ctx->stream_res.hpo_dp_stream_enc->id - ENGINE_ID_HPO_DP_0;
|
||||
config.link_enc_idx = pipe_ctx->stream->link->hpo_dp_link_enc->inst;
|
||||
|
||||
config.link_enc_idx = pipe_ctx->link_res.hpo_dp_link_enc->inst;
|
||||
config.dp2_enabled = 1;
|
||||
}
|
||||
#endif
|
||||
|
@ -4112,7 +4087,7 @@ static void fpga_dp_hpo_enable_link_and_stream(struct dc_state *state, struct pi
|
|||
stream->link->cur_link_settings = link_settings;
|
||||
|
||||
/* Enable clock, Configure lane count, and Enable Link Encoder*/
|
||||
enable_dp_hpo_output(stream->link, &stream->link->cur_link_settings);
|
||||
enable_dp_hpo_output(stream->link, &pipe_ctx->link_res, &stream->link->cur_link_settings);
|
||||
|
||||
#ifdef DIAGS_BUILD
|
||||
/* Workaround for FPGA HPO capture DP link data:
|
||||
|
@ -4162,12 +4137,12 @@ static void fpga_dp_hpo_enable_link_and_stream(struct dc_state *state, struct pi
|
|||
proposed_table.stream_allocations[0].hpo_dp_stream_enc = pipe_ctx->stream_res.hpo_dp_stream_enc;
|
||||
}
|
||||
|
||||
stream->link->hpo_dp_link_enc->funcs->update_stream_allocation_table(
|
||||
stream->link->hpo_dp_link_enc,
|
||||
pipe_ctx->link_res.hpo_dp_link_enc->funcs->update_stream_allocation_table(
|
||||
pipe_ctx->link_res.hpo_dp_link_enc,
|
||||
&proposed_table);
|
||||
|
||||
stream->link->hpo_dp_link_enc->funcs->set_throttled_vcp_size(
|
||||
stream->link->hpo_dp_link_enc,
|
||||
pipe_ctx->link_res.hpo_dp_link_enc->funcs->set_throttled_vcp_size(
|
||||
pipe_ctx->link_res.hpo_dp_link_enc,
|
||||
pipe_ctx->stream_res.hpo_dp_stream_enc->inst,
|
||||
avg_time_slots_per_mtp);
|
||||
|
||||
|
@ -4356,7 +4331,8 @@ void core_link_enable_stream(
|
|||
if (status != DC_FAIL_DP_LINK_TRAINING ||
|
||||
pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
|
||||
if (false == stream->link->link_status.link_active)
|
||||
disable_link(stream->link, pipe_ctx->stream->signal);
|
||||
disable_link(stream->link, &pipe_ctx->link_res,
|
||||
pipe_ctx->stream->signal);
|
||||
BREAK_TO_DEBUGGER();
|
||||
return;
|
||||
}
|
||||
|
@ -4505,14 +4481,14 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx)
|
|||
* state machine.
|
||||
* In DP2 or MST mode, our encoder will stay video active
|
||||
*/
|
||||
disable_link(pipe_ctx->stream->link, pipe_ctx->stream->signal);
|
||||
disable_link(pipe_ctx->stream->link, &pipe_ctx->link_res, pipe_ctx->stream->signal);
|
||||
dc->hwss.disable_stream(pipe_ctx);
|
||||
} else {
|
||||
dc->hwss.disable_stream(pipe_ctx);
|
||||
disable_link(pipe_ctx->stream->link, pipe_ctx->stream->signal);
|
||||
disable_link(pipe_ctx->stream->link, &pipe_ctx->link_res, pipe_ctx->stream->signal);
|
||||
}
|
||||
#else
|
||||
disable_link(pipe_ctx->stream->link, pipe_ctx->stream->signal);
|
||||
disable_link(pipe_ctx->stream->link, &pipe_ctx->link_res, pipe_ctx->stream->signal);
|
||||
|
||||
dc->hwss.disable_stream(pipe_ctx);
|
||||
#endif
|
||||
|
@ -4595,16 +4571,22 @@ void dc_link_set_drive_settings(struct dc *dc,
|
|||
{
|
||||
|
||||
int i;
|
||||
struct pipe_ctx *pipe = NULL;
|
||||
const struct link_resource *link_res;
|
||||
|
||||
for (i = 0; i < dc->link_count; i++) {
|
||||
if (dc->links[i] == link)
|
||||
link_res = dc_link_get_cur_link_res(link);
|
||||
|
||||
for (i = 0; i < MAX_PIPES; i++) {
|
||||
pipe = &dc->current_state->res_ctx.pipe_ctx[i];
|
||||
if (pipe->stream && pipe->stream->link) {
|
||||
if (pipe->stream->link == link)
|
||||
break;
|
||||
}
|
||||
|
||||
if (i >= dc->link_count)
|
||||
}
|
||||
if (pipe && link_res)
|
||||
dc_link_dp_set_drive_settings(pipe->stream->link, link_res, lt_settings);
|
||||
else
|
||||
ASSERT_CRITICAL(false);
|
||||
|
||||
dc_link_dp_set_drive_settings(dc->links[i], lt_settings);
|
||||
}
|
||||
|
||||
void dc_link_set_preferred_link_settings(struct dc *dc,
|
||||
|
@ -4665,11 +4647,9 @@ void dc_link_set_preferred_training_settings(struct dc *dc,
|
|||
if (link_setting != NULL) {
|
||||
link->preferred_link_setting = *link_setting;
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
if (dp_get_link_encoding_format(link_setting) ==
|
||||
DP_128b_132b_ENCODING && !link->hpo_dp_link_enc) {
|
||||
if (!add_dp_hpo_link_encoder_to_link(link))
|
||||
memset(&link->preferred_link_setting, 0, sizeof(link->preferred_link_setting));
|
||||
}
|
||||
if (dp_get_link_encoding_format(link_setting) == DP_128b_132b_ENCODING)
|
||||
/* TODO: add dc update for acquiring link res */
|
||||
skip_immediate_retrain = true;
|
||||
#endif
|
||||
} else {
|
||||
link->preferred_link_setting.lane_count = LANE_COUNT_UNKNOWN;
|
||||
|
@ -4796,6 +4776,9 @@ void dc_link_overwrite_extended_receiver_cap(
|
|||
|
||||
bool dc_link_is_fec_supported(const struct dc_link *link)
|
||||
{
|
||||
/* TODO - use asic cap instead of link_enc->features
|
||||
* we no longer know which link enc to use for this link before commit
|
||||
*/
|
||||
struct link_encoder *link_enc = NULL;
|
||||
|
||||
/* Links supporting dynamically assigned link encoder will be assigned next
|
||||
|
@ -4890,3 +4873,125 @@ uint32_t dc_bandwidth_in_kbps_from_timing(
|
|||
return kbps;
|
||||
|
||||
}
|
||||
|
||||
const struct link_resource *dc_link_get_cur_link_res(const struct dc_link *link)
|
||||
{
|
||||
int i;
|
||||
struct pipe_ctx *pipe = NULL;
|
||||
const struct link_resource *link_res = NULL;
|
||||
|
||||
for (i = 0; i < MAX_PIPES; i++) {
|
||||
pipe = &link->dc->current_state->res_ctx.pipe_ctx[i];
|
||||
if (pipe->stream && pipe->stream->link && pipe->top_pipe == NULL) {
|
||||
if (pipe->stream->link == link) {
|
||||
link_res = &pipe->link_res;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return link_res;
|
||||
}
|
||||
|
||||
/**
|
||||
* dc_get_cur_link_res_map() - take a snapshot of current link resource allocation state
|
||||
* @dc: pointer to dc of the dm calling this
|
||||
* @map: a dc link resource snapshot defined internally to dc.
|
||||
*
|
||||
* DM needs to capture a snapshot of current link resource allocation mapping
|
||||
* and store it in its persistent storage.
|
||||
*
|
||||
* Some of the link resource is using first come first serve policy.
|
||||
* The allocation mapping depends on original hotplug order. This information
|
||||
* is lost after driver is loaded next time. The snapshot is used in order to
|
||||
* restore link resource to its previous state so user will get consistent
|
||||
* link capability allocation across reboot.
|
||||
*
|
||||
* Return: none (void function)
|
||||
*
|
||||
*/
|
||||
void dc_get_cur_link_res_map(const struct dc *dc, uint32_t *map)
|
||||
{
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
struct dc_link *link;
|
||||
uint8_t i;
|
||||
uint32_t hpo_dp_recycle_map = 0;
|
||||
|
||||
*map = 0;
|
||||
|
||||
if (dc->caps.dp_hpo) {
|
||||
for (i = 0; i < dc->caps.max_links; i++) {
|
||||
link = dc->links[i];
|
||||
if (link->link_status.link_active &&
|
||||
dp_get_link_encoding_format(&link->reported_link_cap) == DP_128b_132b_ENCODING &&
|
||||
dp_get_link_encoding_format(&link->cur_link_settings) != DP_128b_132b_ENCODING)
|
||||
/* hpo dp link encoder is considered as recycled, when RX reports 128b/132b encoding capability
|
||||
* but current link doesn't use it.
|
||||
*/
|
||||
hpo_dp_recycle_map |= (1 << i);
|
||||
}
|
||||
*map |= (hpo_dp_recycle_map << LINK_RES_HPO_DP_REC_MAP__SHIFT);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* dc_restore_link_res_map() - restore link resource allocation state from a snapshot
|
||||
* @dc: pointer to dc of the dm calling this
|
||||
* @map: a dc link resource snapshot defined internally to dc.
|
||||
*
|
||||
* DM needs to call this function after initial link detection on boot and
|
||||
* before first commit streams to restore link resource allocation state
|
||||
* from previous boot session.
|
||||
*
|
||||
* Some of the link resource is using first come first serve policy.
|
||||
* The allocation mapping depends on original hotplug order. This information
|
||||
* is lost after driver is loaded next time. The snapshot is used in order to
|
||||
* restore link resource to its previous state so user will get consistent
|
||||
* link capability allocation across reboot.
|
||||
*
|
||||
* Return: none (void function)
|
||||
*
|
||||
*/
|
||||
void dc_restore_link_res_map(const struct dc *dc, uint32_t *map)
|
||||
{
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
struct dc_link *link;
|
||||
uint8_t i;
|
||||
unsigned int available_hpo_dp_count;
|
||||
uint32_t hpo_dp_recycle_map = (*map & LINK_RES_HPO_DP_REC_MAP__MASK)
|
||||
>> LINK_RES_HPO_DP_REC_MAP__SHIFT;
|
||||
|
||||
if (dc->caps.dp_hpo) {
|
||||
available_hpo_dp_count = dc->res_pool->hpo_dp_link_enc_count;
|
||||
/* remove excess 128b/132b encoding support for not recycled links */
|
||||
for (i = 0; i < dc->caps.max_links; i++) {
|
||||
if ((hpo_dp_recycle_map & (1 << i)) == 0) {
|
||||
link = dc->links[i];
|
||||
if (link->type != dc_connection_none &&
|
||||
dp_get_link_encoding_format(&link->verified_link_cap) == DP_128b_132b_ENCODING) {
|
||||
if (available_hpo_dp_count > 0)
|
||||
available_hpo_dp_count--;
|
||||
else
|
||||
/* remove 128b/132b encoding capability by limiting verified link rate to HBR3 */
|
||||
link->verified_link_cap.link_rate = LINK_RATE_HIGH3;
|
||||
}
|
||||
}
|
||||
}
|
||||
/* remove excess 128b/132b encoding support for recycled links */
|
||||
for (i = 0; i < dc->caps.max_links; i++) {
|
||||
if ((hpo_dp_recycle_map & (1 << i)) != 0) {
|
||||
link = dc->links[i];
|
||||
if (link->type != dc_connection_none &&
|
||||
dp_get_link_encoding_format(&link->verified_link_cap) == DP_128b_132b_ENCODING) {
|
||||
if (available_hpo_dp_count > 0)
|
||||
available_hpo_dp_count--;
|
||||
else
|
||||
/* remove 128b/132b encoding capability by limiting verified link rate to HBR3 */
|
||||
link->verified_link_cap.link_rate = LINK_RATE_HIGH3;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
|
|
@ -1251,6 +1251,7 @@ bool dp_is_max_vs_reached(
|
|||
|
||||
static bool perform_post_lt_adj_req_sequence(
|
||||
struct dc_link *link,
|
||||
const struct link_resource *link_res,
|
||||
struct link_training_settings *lt_settings)
|
||||
{
|
||||
enum dc_lane_count lane_count =
|
||||
|
@ -1314,6 +1315,7 @@ static bool perform_post_lt_adj_req_sequence(
|
|||
lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
|
||||
|
||||
dc_link_dp_set_drive_settings(link,
|
||||
link_res,
|
||||
lt_settings);
|
||||
break;
|
||||
}
|
||||
|
@ -1388,6 +1390,7 @@ enum link_training_result dp_get_cr_failure(enum dc_lane_count ln_count,
|
|||
|
||||
static enum link_training_result perform_channel_equalization_sequence(
|
||||
struct dc_link *link,
|
||||
const struct link_resource *link_res,
|
||||
struct link_training_settings *lt_settings,
|
||||
uint32_t offset)
|
||||
{
|
||||
|
@ -1410,12 +1413,12 @@ static enum link_training_result perform_channel_equalization_sequence(
|
|||
tr_pattern = DP_TRAINING_PATTERN_SEQUENCE_4;
|
||||
#endif
|
||||
|
||||
dp_set_hw_training_pattern(link, tr_pattern, offset);
|
||||
dp_set_hw_training_pattern(link, link_res, tr_pattern, offset);
|
||||
|
||||
for (retries_ch_eq = 0; retries_ch_eq <= LINK_TRAINING_MAX_RETRY_COUNT;
|
||||
retries_ch_eq++) {
|
||||
|
||||
dp_set_hw_lane_settings(link, lt_settings, offset);
|
||||
dp_set_hw_lane_settings(link, link_res, lt_settings, offset);
|
||||
|
||||
/* 2. update DPCD*/
|
||||
if (!retries_ch_eq)
|
||||
|
@ -1479,18 +1482,20 @@ static enum link_training_result perform_channel_equalization_sequence(
|
|||
}
|
||||
|
||||
static void start_clock_recovery_pattern_early(struct dc_link *link,
|
||||
const struct link_resource *link_res,
|
||||
struct link_training_settings *lt_settings,
|
||||
uint32_t offset)
|
||||
{
|
||||
DC_LOG_HW_LINK_TRAINING("%s\n GPU sends TPS1. Wait 400us.\n",
|
||||
__func__);
|
||||
dp_set_hw_training_pattern(link, lt_settings->pattern_for_cr, offset);
|
||||
dp_set_hw_lane_settings(link, lt_settings, offset);
|
||||
dp_set_hw_training_pattern(link, link_res, lt_settings->pattern_for_cr, offset);
|
||||
dp_set_hw_lane_settings(link, link_res, lt_settings, offset);
|
||||
udelay(400);
|
||||
}
|
||||
|
||||
static enum link_training_result perform_clock_recovery_sequence(
|
||||
struct dc_link *link,
|
||||
const struct link_resource *link_res,
|
||||
struct link_training_settings *lt_settings,
|
||||
uint32_t offset)
|
||||
{
|
||||
|
@ -1506,7 +1511,7 @@ static enum link_training_result perform_clock_recovery_sequence(
|
|||
retry_count = 0;
|
||||
|
||||
if (!link->ctx->dc->work_arounds.lt_early_cr_pattern)
|
||||
dp_set_hw_training_pattern(link, lt_settings->pattern_for_cr, offset);
|
||||
dp_set_hw_training_pattern(link, link_res, lt_settings->pattern_for_cr, offset);
|
||||
|
||||
/* najeeb - The synaptics MST hub can put the LT in
|
||||
* infinite loop by switching the VS
|
||||
|
@ -1523,6 +1528,7 @@ static enum link_training_result perform_clock_recovery_sequence(
|
|||
/* 1. call HWSS to set lane settings*/
|
||||
dp_set_hw_lane_settings(
|
||||
link,
|
||||
link_res,
|
||||
lt_settings,
|
||||
offset);
|
||||
|
||||
|
@ -1544,9 +1550,6 @@ static enum link_training_result perform_clock_recovery_sequence(
|
|||
/* 3. wait receiver to lock-on*/
|
||||
wait_time_microsec = lt_settings->cr_pattern_time;
|
||||
|
||||
if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT)
|
||||
wait_time_microsec = TRAINING_AUX_RD_INTERVAL;
|
||||
|
||||
if (link->dc->debug.apply_vendor_specific_lttpr_wa &&
|
||||
(link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN)) {
|
||||
wait_time_microsec = 16000;
|
||||
|
@ -1624,13 +1627,14 @@ static enum link_training_result perform_clock_recovery_sequence(
|
|||
|
||||
static inline enum link_training_result dp_transition_to_video_idle(
|
||||
struct dc_link *link,
|
||||
const struct link_resource *link_res,
|
||||
struct link_training_settings *lt_settings,
|
||||
enum link_training_result status)
|
||||
{
|
||||
union lane_count_set lane_count_set = {0};
|
||||
|
||||
/* 4. mainlink output idle pattern*/
|
||||
dp_set_hw_test_pattern(link, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0);
|
||||
dp_set_hw_test_pattern(link, link_res, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0);
|
||||
|
||||
/*
|
||||
* 5. post training adjust if required
|
||||
|
@ -1654,7 +1658,7 @@ static inline enum link_training_result dp_transition_to_video_idle(
|
|||
}
|
||||
|
||||
if (status == LINK_TRAINING_SUCCESS &&
|
||||
perform_post_lt_adj_req_sequence(link, lt_settings) == false)
|
||||
perform_post_lt_adj_req_sequence(link, link_res, lt_settings) == false)
|
||||
status = LINK_TRAINING_LQA_FAIL;
|
||||
|
||||
lane_count_set.bits.LANE_COUNT_SET = lt_settings->link_settings.lane_count;
|
||||
|
@ -2097,10 +2101,11 @@ static void print_status_message(
|
|||
|
||||
void dc_link_dp_set_drive_settings(
|
||||
struct dc_link *link,
|
||||
const struct link_resource *link_res,
|
||||
struct link_training_settings *lt_settings)
|
||||
{
|
||||
/* program ASIC PHY settings*/
|
||||
dp_set_hw_lane_settings(link, lt_settings, DPRX);
|
||||
dp_set_hw_lane_settings(link, link_res, lt_settings, DPRX);
|
||||
|
||||
dp_hw_to_dpcd_lane_settings(lt_settings,
|
||||
lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
|
||||
|
@ -2111,6 +2116,7 @@ void dc_link_dp_set_drive_settings(
|
|||
|
||||
bool dc_link_dp_perform_link_training_skip_aux(
|
||||
struct dc_link *link,
|
||||
const struct link_resource *link_res,
|
||||
const struct dc_link_settings *link_setting)
|
||||
{
|
||||
struct link_training_settings lt_settings = {0};
|
||||
|
@ -2127,10 +2133,10 @@ bool dc_link_dp_perform_link_training_skip_aux(
|
|||
/* 1. Perform_clock_recovery_sequence. */
|
||||
|
||||
/* transmit training pattern for clock recovery */
|
||||
dp_set_hw_training_pattern(link, lt_settings.pattern_for_cr, DPRX);
|
||||
dp_set_hw_training_pattern(link, link_res, lt_settings.pattern_for_cr, DPRX);
|
||||
|
||||
/* call HWSS to set lane settings*/
|
||||
dp_set_hw_lane_settings(link, <_settings, DPRX);
|
||||
dp_set_hw_lane_settings(link, link_res, <_settings, DPRX);
|
||||
|
||||
/* wait receiver to lock-on*/
|
||||
dp_wait_for_training_aux_rd_interval(link, lt_settings.cr_pattern_time);
|
||||
|
@ -2138,10 +2144,10 @@ bool dc_link_dp_perform_link_training_skip_aux(
|
|||
/* 2. Perform_channel_equalization_sequence. */
|
||||
|
||||
/* transmit training pattern for channel equalization. */
|
||||
dp_set_hw_training_pattern(link, lt_settings.pattern_for_eq, DPRX);
|
||||
dp_set_hw_training_pattern(link, link_res, lt_settings.pattern_for_eq, DPRX);
|
||||
|
||||
/* call HWSS to set lane settings*/
|
||||
dp_set_hw_lane_settings(link, <_settings, DPRX);
|
||||
dp_set_hw_lane_settings(link, link_res, <_settings, DPRX);
|
||||
|
||||
/* wait receiver to lock-on. */
|
||||
dp_wait_for_training_aux_rd_interval(link, lt_settings.eq_pattern_time);
|
||||
|
@ -2149,7 +2155,7 @@ bool dc_link_dp_perform_link_training_skip_aux(
|
|||
/* 3. Perform_link_training_int. */
|
||||
|
||||
/* Mainlink output idle pattern. */
|
||||
dp_set_hw_test_pattern(link, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0);
|
||||
dp_set_hw_test_pattern(link, link_res, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0);
|
||||
|
||||
print_status_message(link, <_settings, LINK_TRAINING_SUCCESS);
|
||||
|
||||
|
@ -2230,6 +2236,7 @@ static void dpcd_128b_132b_get_aux_rd_interval(struct dc_link *link,
|
|||
|
||||
static enum link_training_result dp_perform_128b_132b_channel_eq_done_sequence(
|
||||
struct dc_link *link,
|
||||
const struct link_resource *link_res,
|
||||
struct link_training_settings *lt_settings)
|
||||
{
|
||||
uint8_t loop_count;
|
||||
|
@ -2241,7 +2248,7 @@ static enum link_training_result dp_perform_128b_132b_channel_eq_done_sequence(
|
|||
union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0};
|
||||
|
||||
/* Transmit 128b/132b_TPS1 over Main-Link */
|
||||
dp_set_hw_training_pattern(link, lt_settings->pattern_for_cr, DPRX);
|
||||
dp_set_hw_training_pattern(link, link_res, lt_settings->pattern_for_cr, DPRX);
|
||||
/* Set TRAINING_PATTERN_SET to 01h */
|
||||
dpcd_set_training_pattern(link, lt_settings->pattern_for_cr);
|
||||
|
||||
|
@ -2251,8 +2258,8 @@ static enum link_training_result dp_perform_128b_132b_channel_eq_done_sequence(
|
|||
&dpcd_lane_status_updated, dpcd_lane_adjust, DPRX);
|
||||
dp_decide_lane_settings(lt_settings, dpcd_lane_adjust,
|
||||
lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
|
||||
dp_set_hw_lane_settings(link, lt_settings, DPRX);
|
||||
dp_set_hw_training_pattern(link, lt_settings->pattern_for_eq, DPRX);
|
||||
dp_set_hw_lane_settings(link, link_res, lt_settings, DPRX);
|
||||
dp_set_hw_training_pattern(link, link_res, lt_settings->pattern_for_eq, DPRX);
|
||||
|
||||
/* Set loop counter to start from 1 */
|
||||
loop_count = 1;
|
||||
|
@ -2279,7 +2286,7 @@ static enum link_training_result dp_perform_128b_132b_channel_eq_done_sequence(
|
|||
} else if (dpcd_lane_status_updated.bits.LT_FAILED_128b_132b) {
|
||||
status = DP_128b_132b_LT_FAILED;
|
||||
} else {
|
||||
dp_set_hw_lane_settings(link, lt_settings, DPRX);
|
||||
dp_set_hw_lane_settings(link, link_res, lt_settings, DPRX);
|
||||
dpcd_set_lane_settings(link, lt_settings, DPRX);
|
||||
}
|
||||
loop_count++;
|
||||
|
@ -2308,6 +2315,7 @@ static enum link_training_result dp_perform_128b_132b_channel_eq_done_sequence(
|
|||
|
||||
static enum link_training_result dp_perform_128b_132b_cds_done_sequence(
|
||||
struct dc_link *link,
|
||||
const struct link_resource *link_res,
|
||||
struct link_training_settings *lt_settings)
|
||||
{
|
||||
/* Assumption: assume hardware has transmitted eq pattern */
|
||||
|
@ -2344,6 +2352,7 @@ static enum link_training_result dp_perform_128b_132b_cds_done_sequence(
|
|||
|
||||
static enum link_training_result dp_perform_8b_10b_link_training(
|
||||
struct dc_link *link,
|
||||
const struct link_resource *link_res,
|
||||
struct link_training_settings *lt_settings)
|
||||
{
|
||||
enum link_training_result status = LINK_TRAINING_SUCCESS;
|
||||
|
@ -2353,7 +2362,7 @@ static enum link_training_result dp_perform_8b_10b_link_training(
|
|||
uint8_t lane = 0;
|
||||
|
||||
if (link->ctx->dc->work_arounds.lt_early_cr_pattern)
|
||||
start_clock_recovery_pattern_early(link, lt_settings, DPRX);
|
||||
start_clock_recovery_pattern_early(link, link_res, lt_settings, DPRX);
|
||||
|
||||
/* 1. set link rate, lane count and spread. */
|
||||
dpcd_set_link_settings(link, lt_settings);
|
||||
|
@ -2367,12 +2376,13 @@ static enum link_training_result dp_perform_8b_10b_link_training(
|
|||
|
||||
for (repeater_id = repeater_cnt; (repeater_id > 0 && status == LINK_TRAINING_SUCCESS);
|
||||
repeater_id--) {
|
||||
status = perform_clock_recovery_sequence(link, lt_settings, repeater_id);
|
||||
status = perform_clock_recovery_sequence(link, link_res, lt_settings, repeater_id);
|
||||
|
||||
if (status != LINK_TRAINING_SUCCESS)
|
||||
break;
|
||||
|
||||
status = perform_channel_equalization_sequence(link,
|
||||
link_res,
|
||||
lt_settings,
|
||||
repeater_id);
|
||||
|
||||
|
@ -2387,9 +2397,10 @@ static enum link_training_result dp_perform_8b_10b_link_training(
|
|||
}
|
||||
|
||||
if (status == LINK_TRAINING_SUCCESS) {
|
||||
status = perform_clock_recovery_sequence(link, lt_settings, DPRX);
|
||||
status = perform_clock_recovery_sequence(link, link_res, lt_settings, DPRX);
|
||||
if (status == LINK_TRAINING_SUCCESS) {
|
||||
status = perform_channel_equalization_sequence(link,
|
||||
link_res,
|
||||
lt_settings,
|
||||
DPRX);
|
||||
}
|
||||
|
@ -2401,6 +2412,7 @@ static enum link_training_result dp_perform_8b_10b_link_training(
|
|||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
static enum link_training_result dp_perform_128b_132b_link_training(
|
||||
struct dc_link *link,
|
||||
const struct link_resource *link_res,
|
||||
struct link_training_settings *lt_settings)
|
||||
{
|
||||
enum link_training_result result = LINK_TRAINING_SUCCESS;
|
||||
|
@ -2412,23 +2424,358 @@ static enum link_training_result dp_perform_128b_132b_link_training(
|
|||
decide_8b_10b_training_settings(link,
|
||||
<_settings->link_settings,
|
||||
&legacy_settings);
|
||||
return dp_perform_8b_10b_link_training(link, &legacy_settings);
|
||||
return dp_perform_8b_10b_link_training(link, link_res, &legacy_settings);
|
||||
}
|
||||
|
||||
dpcd_set_link_settings(link, lt_settings);
|
||||
|
||||
if (result == LINK_TRAINING_SUCCESS)
|
||||
result = dp_perform_128b_132b_channel_eq_done_sequence(link, lt_settings);
|
||||
result = dp_perform_128b_132b_channel_eq_done_sequence(link, link_res, lt_settings);
|
||||
|
||||
if (result == LINK_TRAINING_SUCCESS)
|
||||
result = dp_perform_128b_132b_cds_done_sequence(link, lt_settings);
|
||||
result = dp_perform_128b_132b_cds_done_sequence(link, link_res, lt_settings);
|
||||
|
||||
return result;
|
||||
}
|
||||
#endif
|
||||
|
||||
static enum link_training_result dc_link_dp_perform_fixed_vs_pe_training_sequence(
|
||||
struct dc_link *link,
|
||||
const struct link_resource *link_res,
|
||||
struct link_training_settings *lt_settings)
|
||||
{
|
||||
const uint8_t vendor_lttpr_write_data_reset[4] = {0x1, 0x50, 0x63, 0xFF};
|
||||
const uint8_t offset = dp_convert_to_count(
|
||||
link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
|
||||
const uint8_t vendor_lttpr_write_data_intercept_en[4] = {0x1, 0x55, 0x63, 0x0};
|
||||
const uint8_t vendor_lttpr_write_data_intercept_dis[4] = {0x1, 0x55, 0x63, 0x68};
|
||||
uint8_t vendor_lttpr_write_data_vs[4] = {0x1, 0x51, 0x63, 0x0};
|
||||
uint8_t vendor_lttpr_write_data_pe[4] = {0x1, 0x52, 0x63, 0x0};
|
||||
uint32_t vendor_lttpr_write_address = 0xF004F;
|
||||
enum link_training_result status = LINK_TRAINING_SUCCESS;
|
||||
uint8_t lane = 0;
|
||||
union down_spread_ctrl downspread = {0};
|
||||
union lane_count_set lane_count_set = {0};
|
||||
uint8_t toggle_rate;
|
||||
uint8_t rate;
|
||||
|
||||
/* Only 8b/10b is supported */
|
||||
ASSERT(dp_get_link_encoding_format(<_settings->link_settings) ==
|
||||
DP_8b_10b_ENCODING);
|
||||
|
||||
if (offset != 0xFF) {
|
||||
vendor_lttpr_write_address +=
|
||||
((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
|
||||
}
|
||||
|
||||
/* Vendor specific: Reset lane settings */
|
||||
core_link_write_dpcd(
|
||||
link,
|
||||
vendor_lttpr_write_address,
|
||||
&vendor_lttpr_write_data_reset[0],
|
||||
sizeof(vendor_lttpr_write_data_reset));
|
||||
core_link_write_dpcd(
|
||||
link,
|
||||
vendor_lttpr_write_address,
|
||||
&vendor_lttpr_write_data_vs[0],
|
||||
sizeof(vendor_lttpr_write_data_vs));
|
||||
core_link_write_dpcd(
|
||||
link,
|
||||
vendor_lttpr_write_address,
|
||||
&vendor_lttpr_write_data_pe[0],
|
||||
sizeof(vendor_lttpr_write_data_pe));
|
||||
|
||||
/* Vendor specific: Enable intercept */
|
||||
core_link_write_dpcd(
|
||||
link,
|
||||
vendor_lttpr_write_address,
|
||||
&vendor_lttpr_write_data_intercept_en[0],
|
||||
sizeof(vendor_lttpr_write_data_intercept_en));
|
||||
|
||||
/* 1. set link rate, lane count and spread. */
|
||||
|
||||
downspread.raw = (uint8_t)(lt_settings->link_settings.link_spread);
|
||||
|
||||
lane_count_set.bits.LANE_COUNT_SET =
|
||||
lt_settings->link_settings.lane_count;
|
||||
|
||||
lane_count_set.bits.ENHANCED_FRAMING = lt_settings->enhanced_framing;
|
||||
lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = 0;
|
||||
|
||||
|
||||
if (lt_settings->pattern_for_eq < DP_TRAINING_PATTERN_SEQUENCE_4) {
|
||||
lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED =
|
||||
link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED;
|
||||
}
|
||||
|
||||
core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL,
|
||||
&downspread.raw, sizeof(downspread));
|
||||
|
||||
core_link_write_dpcd(link, DP_LANE_COUNT_SET,
|
||||
&lane_count_set.raw, 1);
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
rate = get_dpcd_link_rate(<_settings->link_settings);
|
||||
#else
|
||||
rate = (uint8_t) (lt_settings->link_settings.link_rate);
|
||||
#endif
|
||||
|
||||
/* Vendor specific: Toggle link rate */
|
||||
toggle_rate = (rate == 0x6) ? 0xA : 0x6;
|
||||
|
||||
if (link->vendor_specific_lttpr_link_rate_wa == rate) {
|
||||
core_link_write_dpcd(
|
||||
link,
|
||||
DP_LINK_BW_SET,
|
||||
&toggle_rate,
|
||||
1);
|
||||
}
|
||||
|
||||
link->vendor_specific_lttpr_link_rate_wa = rate;
|
||||
|
||||
core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1);
|
||||
|
||||
DC_LOG_HW_LINK_TRAINING("%s\n %x rate = %x\n %x lane = %x framing = %x\n %x spread = %x\n",
|
||||
__func__,
|
||||
DP_LINK_BW_SET,
|
||||
lt_settings->link_settings.link_rate,
|
||||
DP_LANE_COUNT_SET,
|
||||
lt_settings->link_settings.lane_count,
|
||||
lt_settings->enhanced_framing,
|
||||
DP_DOWNSPREAD_CTRL,
|
||||
lt_settings->link_settings.link_spread);
|
||||
|
||||
/* 2. Perform link training */
|
||||
|
||||
/* Perform Clock Recovery Sequence */
|
||||
if (status == LINK_TRAINING_SUCCESS) {
|
||||
uint32_t retries_cr;
|
||||
uint32_t retry_count;
|
||||
uint32_t wait_time_microsec;
|
||||
enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
|
||||
union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX];
|
||||
union lane_align_status_updated dpcd_lane_status_updated;
|
||||
union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0};
|
||||
|
||||
retries_cr = 0;
|
||||
retry_count = 0;
|
||||
|
||||
while ((retries_cr < LINK_TRAINING_MAX_RETRY_COUNT) &&
|
||||
(retry_count < LINK_TRAINING_MAX_CR_RETRY)) {
|
||||
|
||||
memset(&dpcd_lane_status, '\0', sizeof(dpcd_lane_status));
|
||||
memset(&dpcd_lane_status_updated, '\0',
|
||||
sizeof(dpcd_lane_status_updated));
|
||||
|
||||
/* 1. call HWSS to set lane settings */
|
||||
dp_set_hw_lane_settings(
|
||||
link,
|
||||
link_res,
|
||||
lt_settings,
|
||||
0);
|
||||
|
||||
/* 2. update DPCD of the receiver */
|
||||
if (!retry_count) {
|
||||
/* EPR #361076 - write as a 5-byte burst,
|
||||
* but only for the 1-st iteration.
|
||||
*/
|
||||
dpcd_set_lt_pattern_and_lane_settings(
|
||||
link,
|
||||
lt_settings,
|
||||
lt_settings->pattern_for_cr,
|
||||
0);
|
||||
/* Vendor specific: Disable intercept */
|
||||
core_link_write_dpcd(
|
||||
link,
|
||||
vendor_lttpr_write_address,
|
||||
&vendor_lttpr_write_data_intercept_dis[0],
|
||||
sizeof(vendor_lttpr_write_data_intercept_dis));
|
||||
} else {
|
||||
vendor_lttpr_write_data_vs[3] = 0;
|
||||
vendor_lttpr_write_data_pe[3] = 0;
|
||||
|
||||
for (lane = 0; lane < lane_count; lane++) {
|
||||
vendor_lttpr_write_data_vs[3] |=
|
||||
lt_settings->dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET << (2 * lane);
|
||||
vendor_lttpr_write_data_pe[3] |=
|
||||
lt_settings->dpcd_lane_settings[lane].bits.PRE_EMPHASIS_SET << (2 * lane);
|
||||
}
|
||||
|
||||
/* Vendor specific: Update VS and PE to DPRX requested value */
|
||||
core_link_write_dpcd(
|
||||
link,
|
||||
vendor_lttpr_write_address,
|
||||
&vendor_lttpr_write_data_vs[0],
|
||||
sizeof(vendor_lttpr_write_data_vs));
|
||||
core_link_write_dpcd(
|
||||
link,
|
||||
vendor_lttpr_write_address,
|
||||
&vendor_lttpr_write_data_pe[0],
|
||||
sizeof(vendor_lttpr_write_data_pe));
|
||||
|
||||
dpcd_set_lane_settings(
|
||||
link,
|
||||
lt_settings,
|
||||
0);
|
||||
}
|
||||
|
||||
/* 3. wait receiver to lock-on*/
|
||||
wait_time_microsec = lt_settings->cr_pattern_time;
|
||||
|
||||
dp_wait_for_training_aux_rd_interval(
|
||||
link,
|
||||
wait_time_microsec);
|
||||
|
||||
/* 4. Read lane status and requested drive
|
||||
* settings as set by the sink
|
||||
*/
|
||||
dp_get_lane_status_and_lane_adjust(
|
||||
link,
|
||||
lt_settings,
|
||||
dpcd_lane_status,
|
||||
&dpcd_lane_status_updated,
|
||||
dpcd_lane_adjust,
|
||||
0);
|
||||
|
||||
/* 5. check CR done*/
|
||||
if (dp_is_cr_done(lane_count, dpcd_lane_status)) {
|
||||
status = LINK_TRAINING_SUCCESS;
|
||||
break;
|
||||
}
|
||||
|
||||
/* 6. max VS reached*/
|
||||
if (dp_is_max_vs_reached(lt_settings))
|
||||
break;
|
||||
|
||||
/* 7. same lane settings */
|
||||
/* Note: settings are the same for all lanes,
|
||||
* so comparing first lane is sufficient
|
||||
*/
|
||||
if (lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET ==
|
||||
dpcd_lane_adjust[0].bits.VOLTAGE_SWING_LANE)
|
||||
retries_cr++;
|
||||
else
|
||||
retries_cr = 0;
|
||||
|
||||
/* 8. update VS/PE/PC2 in lt_settings*/
|
||||
dp_decide_lane_settings(lt_settings, dpcd_lane_adjust,
|
||||
lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
|
||||
retry_count++;
|
||||
}
|
||||
|
||||
if (retry_count >= LINK_TRAINING_MAX_CR_RETRY) {
|
||||
ASSERT(0);
|
||||
DC_LOG_ERROR("%s: Link Training Error, could not get CR after %d tries. Possibly voltage swing issue",
|
||||
__func__,
|
||||
LINK_TRAINING_MAX_CR_RETRY);
|
||||
|
||||
}
|
||||
|
||||
status = dp_get_cr_failure(lane_count, dpcd_lane_status);
|
||||
}
|
||||
|
||||
/* Perform Channel EQ Sequence */
|
||||
if (status == LINK_TRAINING_SUCCESS) {
|
||||
enum dc_dp_training_pattern tr_pattern;
|
||||
uint32_t retries_ch_eq;
|
||||
uint32_t wait_time_microsec;
|
||||
enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
|
||||
union lane_align_status_updated dpcd_lane_status_updated = {0};
|
||||
union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0};
|
||||
union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0};
|
||||
|
||||
/* Note: also check that TPS4 is a supported feature*/
|
||||
tr_pattern = lt_settings->pattern_for_eq;
|
||||
|
||||
dp_set_hw_training_pattern(link, link_res, tr_pattern, 0);
|
||||
|
||||
status = LINK_TRAINING_EQ_FAIL_EQ;
|
||||
|
||||
for (retries_ch_eq = 0; retries_ch_eq <= LINK_TRAINING_MAX_RETRY_COUNT;
|
||||
retries_ch_eq++) {
|
||||
|
||||
dp_set_hw_lane_settings(link, link_res, lt_settings, 0);
|
||||
|
||||
vendor_lttpr_write_data_vs[3] = 0;
|
||||
vendor_lttpr_write_data_pe[3] = 0;
|
||||
|
||||
for (lane = 0; lane < lane_count; lane++) {
|
||||
vendor_lttpr_write_data_vs[3] |=
|
||||
lt_settings->dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET << (2 * lane);
|
||||
vendor_lttpr_write_data_pe[3] |=
|
||||
lt_settings->dpcd_lane_settings[lane].bits.PRE_EMPHASIS_SET << (2 * lane);
|
||||
}
|
||||
|
||||
/* Vendor specific: Update VS and PE to DPRX requested value */
|
||||
core_link_write_dpcd(
|
||||
link,
|
||||
vendor_lttpr_write_address,
|
||||
&vendor_lttpr_write_data_vs[0],
|
||||
sizeof(vendor_lttpr_write_data_vs));
|
||||
core_link_write_dpcd(
|
||||
link,
|
||||
vendor_lttpr_write_address,
|
||||
&vendor_lttpr_write_data_pe[0],
|
||||
sizeof(vendor_lttpr_write_data_pe));
|
||||
|
||||
/* 2. update DPCD*/
|
||||
if (!retries_ch_eq)
|
||||
/* EPR #361076 - write as a 5-byte burst,
|
||||
* but only for the 1-st iteration
|
||||
*/
|
||||
|
||||
dpcd_set_lt_pattern_and_lane_settings(
|
||||
link,
|
||||
lt_settings,
|
||||
tr_pattern, 0);
|
||||
else
|
||||
dpcd_set_lane_settings(link, lt_settings, 0);
|
||||
|
||||
/* 3. wait for receiver to lock-on*/
|
||||
wait_time_microsec = lt_settings->eq_pattern_time;
|
||||
|
||||
dp_wait_for_training_aux_rd_interval(
|
||||
link,
|
||||
wait_time_microsec);
|
||||
|
||||
/* 4. Read lane status and requested
|
||||
* drive settings as set by the sink
|
||||
*/
|
||||
dp_get_lane_status_and_lane_adjust(
|
||||
link,
|
||||
lt_settings,
|
||||
dpcd_lane_status,
|
||||
&dpcd_lane_status_updated,
|
||||
dpcd_lane_adjust,
|
||||
0);
|
||||
|
||||
/* 5. check CR done*/
|
||||
if (!dp_is_cr_done(lane_count, dpcd_lane_status)) {
|
||||
status = LINK_TRAINING_EQ_FAIL_CR;
|
||||
break;
|
||||
}
|
||||
|
||||
/* 6. check CHEQ done*/
|
||||
if (dp_is_ch_eq_done(lane_count, dpcd_lane_status) &&
|
||||
dp_is_symbol_locked(lane_count, dpcd_lane_status) &&
|
||||
dp_is_interlane_aligned(dpcd_lane_status_updated)) {
|
||||
status = LINK_TRAINING_SUCCESS;
|
||||
break;
|
||||
}
|
||||
|
||||
/* 7. update VS/PE/PC2 in lt_settings*/
|
||||
dp_decide_lane_settings(lt_settings, dpcd_lane_adjust,
|
||||
lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
|
||||
}
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
|
||||
enum link_training_result dc_link_dp_perform_link_training(
|
||||
struct dc_link *link,
|
||||
const struct link_resource *link_res,
|
||||
const struct dc_link_settings *link_settings,
|
||||
bool skip_video_pattern)
|
||||
{
|
||||
|
@ -2459,18 +2806,22 @@ enum link_training_result dc_link_dp_perform_link_training(
|
|||
|
||||
/* configure link prior to entering training mode */
|
||||
dpcd_configure_lttpr_mode(link, <_settings);
|
||||
dp_set_fec_ready(link, lt_settings.should_set_fec_ready);
|
||||
dp_set_fec_ready(link, link_res, lt_settings.should_set_fec_ready);
|
||||
dpcd_configure_channel_coding(link, <_settings);
|
||||
|
||||
/* enter training mode:
|
||||
* Per DP specs starting from here, DPTX device shall not issue
|
||||
* Non-LT AUX transactions inside training mode.
|
||||
*/
|
||||
if (encoding == DP_8b_10b_ENCODING)
|
||||
status = dp_perform_8b_10b_link_training(link, <_settings);
|
||||
if (!link->dc->debug.apply_vendor_specific_lttpr_wa &&
|
||||
(link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
|
||||
link->lttpr_mode == LTTPR_MODE_TRANSPARENT)
|
||||
status = dc_link_dp_perform_fixed_vs_pe_training_sequence(link, link_res, <_settings);
|
||||
else if (encoding == DP_8b_10b_ENCODING)
|
||||
status = dp_perform_8b_10b_link_training(link, link_res, <_settings);
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
else if (encoding == DP_128b_132b_ENCODING)
|
||||
status = dp_perform_128b_132b_link_training(link, <_settings);
|
||||
status = dp_perform_128b_132b_link_training(link, link_res, <_settings);
|
||||
#endif
|
||||
else
|
||||
ASSERT(0);
|
||||
|
@ -2488,6 +2839,7 @@ enum link_training_result dc_link_dp_perform_link_training(
|
|||
/* switch to video idle */
|
||||
if ((status == LINK_TRAINING_SUCCESS) || !skip_video_pattern)
|
||||
status = dp_transition_to_video_idle(link,
|
||||
link_res,
|
||||
<_settings,
|
||||
status);
|
||||
|
||||
|
@ -2539,6 +2891,7 @@ bool perform_link_training_with_retries(
|
|||
|
||||
dp_enable_link_phy(
|
||||
link,
|
||||
&pipe_ctx->link_res,
|
||||
signal,
|
||||
pipe_ctx->clock_source->id,
|
||||
¤t_setting);
|
||||
|
@ -2566,21 +2919,22 @@ bool perform_link_training_with_retries(
|
|||
dp_set_panel_mode(link, panel_mode);
|
||||
|
||||
if (link->aux_access_disabled) {
|
||||
dc_link_dp_perform_link_training_skip_aux(link, ¤t_setting);
|
||||
dc_link_dp_perform_link_training_skip_aux(link, &pipe_ctx->link_res, ¤t_setting);
|
||||
return true;
|
||||
} else {
|
||||
/** @todo Consolidate USB4 DP and DPx.x training. */
|
||||
if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
|
||||
status = dc_link_dpia_perform_link_training(link,
|
||||
&pipe_ctx->link_res,
|
||||
¤t_setting,
|
||||
skip_video_pattern);
|
||||
|
||||
/* Transmit idle pattern once training successful. */
|
||||
if (status == LINK_TRAINING_SUCCESS)
|
||||
dp_set_hw_test_pattern(link, DP_TEST_PATTERN_VIDEO_MODE,
|
||||
NULL, 0);
|
||||
dp_set_hw_test_pattern(link, &pipe_ctx->link_res, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0);
|
||||
} else {
|
||||
status = dc_link_dp_perform_link_training(link,
|
||||
&pipe_ctx->link_res,
|
||||
¤t_setting,
|
||||
skip_video_pattern);
|
||||
}
|
||||
|
@ -2597,7 +2951,7 @@ bool perform_link_training_with_retries(
|
|||
DC_LOG_WARNING("%s: Link training attempt %u of %d failed\n",
|
||||
__func__, (unsigned int)j + 1, attempts);
|
||||
|
||||
dp_disable_link_phy(link, signal);
|
||||
dp_disable_link_phy(link, &pipe_ctx->link_res, signal);
|
||||
|
||||
/* Abort link training if failure due to sink being unplugged. */
|
||||
if (status == LINK_TRAINING_ABORT) {
|
||||
|
@ -2646,12 +3000,13 @@ static enum clock_source_id get_clock_source_id(struct dc_link *link)
|
|||
return dp_cs_id;
|
||||
}
|
||||
|
||||
static void set_dp_mst_mode(struct dc_link *link, bool mst_enable)
|
||||
static void set_dp_mst_mode(struct dc_link *link, const struct link_resource *link_res,
|
||||
bool mst_enable)
|
||||
{
|
||||
if (mst_enable == false &&
|
||||
link->type == dc_connection_mst_branch) {
|
||||
/* Disable MST on link. Use only local sink. */
|
||||
dp_disable_link_phy_mst(link, link->connector_signal);
|
||||
dp_disable_link_phy_mst(link, link_res, link->connector_signal);
|
||||
|
||||
link->type = dc_connection_single;
|
||||
link->local_sink = link->remote_sinks[0];
|
||||
|
@ -2662,7 +3017,7 @@ static void set_dp_mst_mode(struct dc_link *link, bool mst_enable)
|
|||
link->type == dc_connection_single &&
|
||||
link->remote_sinks[0] != NULL) {
|
||||
/* Re-enable MST on link. */
|
||||
dp_disable_link_phy(link, link->connector_signal);
|
||||
dp_disable_link_phy(link, link_res, link->connector_signal);
|
||||
dp_enable_mst_on_sink(link, true);
|
||||
|
||||
link->type = dc_connection_mst_branch;
|
||||
|
@ -2688,6 +3043,7 @@ bool dc_link_dp_sync_lt_begin(struct dc_link *link)
|
|||
|
||||
enum link_training_result dc_link_dp_sync_lt_attempt(
|
||||
struct dc_link *link,
|
||||
const struct link_resource *link_res,
|
||||
struct dc_link_settings *link_settings,
|
||||
struct dc_link_training_overrides *lt_overrides)
|
||||
{
|
||||
|
@ -2707,14 +3063,14 @@ enum link_training_result dc_link_dp_sync_lt_attempt(
|
|||
<_settings);
|
||||
/* Setup MST Mode */
|
||||
if (lt_overrides->mst_enable)
|
||||
set_dp_mst_mode(link, *lt_overrides->mst_enable);
|
||||
set_dp_mst_mode(link, link_res, *lt_overrides->mst_enable);
|
||||
|
||||
/* Disable link */
|
||||
dp_disable_link_phy(link, link->connector_signal);
|
||||
dp_disable_link_phy(link, link_res, link->connector_signal);
|
||||
|
||||
/* Enable link */
|
||||
dp_cs_id = get_clock_source_id(link);
|
||||
dp_enable_link_phy(link, link->connector_signal,
|
||||
dp_enable_link_phy(link, link_res, link->connector_signal,
|
||||
dp_cs_id, link_settings);
|
||||
|
||||
/* Set FEC enable */
|
||||
|
@ -2722,7 +3078,7 @@ enum link_training_result dc_link_dp_sync_lt_attempt(
|
|||
if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING) {
|
||||
#endif
|
||||
fec_enable = lt_overrides->fec_enable && *lt_overrides->fec_enable;
|
||||
dp_set_fec_ready(link, fec_enable);
|
||||
dp_set_fec_ready(link, NULL, fec_enable);
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
}
|
||||
#endif
|
||||
|
@ -2739,7 +3095,7 @@ enum link_training_result dc_link_dp_sync_lt_attempt(
|
|||
|
||||
/* Attempt to train with given link training settings */
|
||||
if (link->ctx->dc->work_arounds.lt_early_cr_pattern)
|
||||
start_clock_recovery_pattern_early(link, <_settings, DPRX);
|
||||
start_clock_recovery_pattern_early(link, link_res, <_settings, DPRX);
|
||||
|
||||
/* Set link rate, lane count and spread. */
|
||||
dpcd_set_link_settings(link, <_settings);
|
||||
|
@ -2747,9 +3103,10 @@ enum link_training_result dc_link_dp_sync_lt_attempt(
|
|||
/* 2. perform link training (set link training done
|
||||
* to false is done as well)
|
||||
*/
|
||||
lt_status = perform_clock_recovery_sequence(link, <_settings, DPRX);
|
||||
lt_status = perform_clock_recovery_sequence(link, link_res, <_settings, DPRX);
|
||||
if (lt_status == LINK_TRAINING_SUCCESS) {
|
||||
lt_status = perform_channel_equalization_sequence(link,
|
||||
link_res,
|
||||
<_settings,
|
||||
DPRX);
|
||||
}
|
||||
|
@ -2770,11 +3127,11 @@ bool dc_link_dp_sync_lt_end(struct dc_link *link, bool link_down)
|
|||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
struct dc_link_settings link_settings = link->cur_link_settings;
|
||||
#endif
|
||||
dp_disable_link_phy(link, link->connector_signal);
|
||||
dp_disable_link_phy(link, NULL, link->connector_signal);
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
if (dp_get_link_encoding_format(&link_settings) == DP_8b_10b_ENCODING)
|
||||
#endif
|
||||
dp_set_fec_ready(link, false);
|
||||
dp_set_fec_ready(link, NULL, false);
|
||||
}
|
||||
|
||||
link->sync_lt_in_progress = false;
|
||||
|
@ -2829,7 +3186,8 @@ bool dc_link_dp_get_max_link_enc_cap(const struct dc_link *link, struct dc_link_
|
|||
return false;
|
||||
}
|
||||
|
||||
static struct dc_link_settings get_max_link_cap(struct dc_link *link)
|
||||
static struct dc_link_settings get_max_link_cap(struct dc_link *link,
|
||||
const struct link_resource *link_res)
|
||||
{
|
||||
struct dc_link_settings max_link_cap = {0};
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
|
@ -2853,9 +3211,11 @@ static struct dc_link_settings get_max_link_cap(struct dc_link *link)
|
|||
if (link_enc)
|
||||
link_enc->funcs->get_max_link_cap(link_enc, &max_link_cap);
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
if (max_link_cap.link_rate >= LINK_RATE_UHBR10 &&
|
||||
!link->hpo_dp_link_enc)
|
||||
if (max_link_cap.link_rate >= LINK_RATE_UHBR10) {
|
||||
if (!link_res->hpo_dp_link_enc ||
|
||||
link->dc->debug.disable_uhbr)
|
||||
max_link_cap.link_rate = LINK_RATE_HIGH3;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Lower link settings based on sink's link cap */
|
||||
|
@ -2873,7 +3233,7 @@ static struct dc_link_settings get_max_link_cap(struct dc_link *link)
|
|||
* account for lttpr repeaters cap
|
||||
* notes: repeaters do not snoop in the DPRX Capabilities addresses (3.6.3).
|
||||
*/
|
||||
if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) {
|
||||
if (link->lttpr_mode != LTTPR_MODE_NON_LTTPR) {
|
||||
if (link->dpcd_caps.lttpr_caps.max_lane_count < max_link_cap.lane_count)
|
||||
max_link_cap.lane_count = link->dpcd_caps.lttpr_caps.max_lane_count;
|
||||
|
||||
|
@ -3012,6 +3372,7 @@ bool hpd_rx_irq_check_link_loss_status(
|
|||
|
||||
bool dp_verify_link_cap(
|
||||
struct dc_link *link,
|
||||
const struct link_resource *link_res,
|
||||
struct dc_link_settings *known_limit_link_setting,
|
||||
int *fail_count)
|
||||
{
|
||||
|
@ -3029,7 +3390,7 @@ bool dp_verify_link_cap(
|
|||
/* link training starts with the maximum common settings
|
||||
* supported by both sink and ASIC.
|
||||
*/
|
||||
max_link_cap = get_max_link_cap(link);
|
||||
max_link_cap = get_max_link_cap(link, link_res);
|
||||
initial_link_settings = get_common_supported_link_settings(
|
||||
*known_limit_link_setting,
|
||||
max_link_cap);
|
||||
|
@ -3069,7 +3430,7 @@ bool dp_verify_link_cap(
|
|||
* find the physical link capability
|
||||
*/
|
||||
/* disable PHY done possible by BIOS, will be done by driver itself */
|
||||
dp_disable_link_phy(link, link->connector_signal);
|
||||
dp_disable_link_phy(link, link_res, link->connector_signal);
|
||||
|
||||
dp_cs_id = get_clock_source_id(link);
|
||||
|
||||
|
@ -3081,8 +3442,8 @@ bool dp_verify_link_cap(
|
|||
*/
|
||||
if (link->link_enc && link->link_enc->features.flags.bits.DP_IS_USB_C &&
|
||||
link->dc->debug.usbc_combo_phy_reset_wa) {
|
||||
dp_enable_link_phy(link, link->connector_signal, dp_cs_id, cur);
|
||||
dp_disable_link_phy(link, link->connector_signal);
|
||||
dp_enable_link_phy(link, link_res, link->connector_signal, dp_cs_id, cur);
|
||||
dp_disable_link_phy(link, link_res, link->connector_signal);
|
||||
}
|
||||
|
||||
do {
|
||||
|
@ -3093,6 +3454,7 @@ bool dp_verify_link_cap(
|
|||
|
||||
dp_enable_link_phy(
|
||||
link,
|
||||
link_res,
|
||||
link->connector_signal,
|
||||
dp_cs_id,
|
||||
cur);
|
||||
|
@ -3103,6 +3465,7 @@ bool dp_verify_link_cap(
|
|||
else {
|
||||
status = dc_link_dp_perform_link_training(
|
||||
link,
|
||||
link_res,
|
||||
cur,
|
||||
skip_video_pattern);
|
||||
if (status == LINK_TRAINING_SUCCESS)
|
||||
|
@ -3124,7 +3487,7 @@ bool dp_verify_link_cap(
|
|||
* setting or before returning we'll enable it later
|
||||
* based on the actual mode we're driving
|
||||
*/
|
||||
dp_disable_link_phy(link, link->connector_signal);
|
||||
dp_disable_link_phy(link, link_res, link->connector_signal);
|
||||
} while (!success && decide_fallback_link_setting(link,
|
||||
initial_link_settings, cur, status));
|
||||
|
||||
|
@ -3148,6 +3511,7 @@ bool dp_verify_link_cap(
|
|||
|
||||
bool dp_verify_link_cap_with_retries(
|
||||
struct dc_link *link,
|
||||
const struct link_resource *link_res,
|
||||
struct dc_link_settings *known_limit_link_setting,
|
||||
int attempts)
|
||||
{
|
||||
|
@ -3165,7 +3529,7 @@ bool dp_verify_link_cap_with_retries(
|
|||
link->verified_link_cap.link_rate = LINK_RATE_LOW;
|
||||
link->verified_link_cap.link_spread = LINK_SPREAD_DISABLED;
|
||||
break;
|
||||
} else if (dp_verify_link_cap(link,
|
||||
} else if (dp_verify_link_cap(link, link_res,
|
||||
known_limit_link_setting,
|
||||
&fail_count) && fail_count == 0) {
|
||||
success = true;
|
||||
|
@ -3177,13 +3541,13 @@ bool dp_verify_link_cap_with_retries(
|
|||
}
|
||||
|
||||
bool dp_verify_mst_link_cap(
|
||||
struct dc_link *link)
|
||||
struct dc_link *link, const struct link_resource *link_res)
|
||||
{
|
||||
struct dc_link_settings max_link_cap = {0};
|
||||
|
||||
if (dp_get_link_encoding_format(&link->reported_link_cap) ==
|
||||
DP_8b_10b_ENCODING) {
|
||||
max_link_cap = get_max_link_cap(link);
|
||||
max_link_cap = get_max_link_cap(link, link_res);
|
||||
link->verified_link_cap = get_common_supported_link_settings(
|
||||
link->reported_link_cap,
|
||||
max_link_cap);
|
||||
|
@ -3192,6 +3556,7 @@ bool dp_verify_mst_link_cap(
|
|||
else if (dp_get_link_encoding_format(&link->reported_link_cap) ==
|
||||
DP_128b_132b_ENCODING) {
|
||||
dp_verify_link_cap_with_retries(link,
|
||||
link_res,
|
||||
&link->reported_link_cap,
|
||||
LINK_TRAINING_MAX_VERIFY_RETRY);
|
||||
}
|
||||
|
@ -5720,7 +6085,7 @@ bool dc_link_dp_set_test_pattern(
|
|||
DP_TEST_PATTERN_VIDEO_MODE) {
|
||||
/* Set CRTC Test Pattern */
|
||||
set_crtc_test_pattern(link, pipe_ctx, test_pattern, test_pattern_color_space);
|
||||
dp_set_hw_test_pattern(link, test_pattern,
|
||||
dp_set_hw_test_pattern(link, &pipe_ctx->link_res, test_pattern,
|
||||
(uint8_t *)p_custom_pattern,
|
||||
(uint32_t)cust_pattern_size);
|
||||
|
||||
|
@ -5751,7 +6116,7 @@ bool dc_link_dp_set_test_pattern(
|
|||
p_link_settings->dpcd_lane_settings,
|
||||
p_link_settings->link_settings.lane_count);
|
||||
} else {
|
||||
dp_set_hw_lane_settings(link, p_link_settings, DPRX);
|
||||
dp_set_hw_lane_settings(link, &pipe_ctx->link_res, p_link_settings, DPRX);
|
||||
dpcd_set_lane_settings(link, p_link_settings, DPRX);
|
||||
}
|
||||
}
|
||||
|
@ -5766,7 +6131,7 @@ bool dc_link_dp_set_test_pattern(
|
|||
pipes->stream_res.stream_enc->funcs->dp_blank(link, pipe_ctx->stream_res.stream_enc);
|
||||
}
|
||||
|
||||
dp_set_hw_test_pattern(link, test_pattern,
|
||||
dp_set_hw_test_pattern(link, &pipe_ctx->link_res, test_pattern,
|
||||
(uint8_t *)p_custom_pattern,
|
||||
(uint32_t)cust_pattern_size);
|
||||
|
||||
|
@ -6086,7 +6451,7 @@ enum dp_panel_mode dp_get_panel_mode(struct dc_link *link)
|
|||
return DP_PANEL_MODE_DEFAULT;
|
||||
}
|
||||
|
||||
enum dc_status dp_set_fec_ready(struct dc_link *link, bool ready)
|
||||
enum dc_status dp_set_fec_ready(struct dc_link *link, const struct link_resource *link_res, bool ready)
|
||||
{
|
||||
/* FEC has to be "set ready" before the link training.
|
||||
* The policy is to always train with FEC
|
||||
|
@ -6653,8 +7018,10 @@ struct fixed31_32 calculate_sst_avg_time_slots_per_mtp(
|
|||
|
||||
bool is_dp_128b_132b_signal(struct pipe_ctx *pipe_ctx)
|
||||
{
|
||||
/* If this assert is hit then we have a link encoder dynamic management issue */
|
||||
ASSERT(pipe_ctx->stream_res.hpo_dp_stream_enc ? pipe_ctx->link_res.hpo_dp_link_enc != NULL : true);
|
||||
return (pipe_ctx->stream_res.hpo_dp_stream_enc &&
|
||||
pipe_ctx->stream->link->hpo_dp_link_enc &&
|
||||
pipe_ctx->link_res.hpo_dp_link_enc &&
|
||||
dc_is_dp_signal(pipe_ctx->stream->signal));
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -77,7 +77,9 @@ enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link)
|
|||
* @param[in] link_setting Lane count, link rate and downspread control.
|
||||
* @param[out] lt_settings Link settings and drive settings (voltage swing and pre-emphasis).
|
||||
*/
|
||||
static enum link_training_result dpia_configure_link(struct dc_link *link,
|
||||
static enum link_training_result dpia_configure_link(
|
||||
struct dc_link *link,
|
||||
const struct link_resource *link_res,
|
||||
const struct dc_link_settings *link_setting,
|
||||
struct link_training_settings *lt_settings)
|
||||
{
|
||||
|
@ -111,7 +113,7 @@ static enum link_training_result dpia_configure_link(struct dc_link *link,
|
|||
fec_enable = *link->preferred_training_settings.fec_enable;
|
||||
else
|
||||
fec_enable = true;
|
||||
status = dp_set_fec_ready(link, fec_enable);
|
||||
status = dp_set_fec_ready(link, link_res, fec_enable);
|
||||
if (status != DC_OK && link->is_hpd_pending)
|
||||
return LINK_TRAINING_ABORT;
|
||||
|
||||
|
@ -252,7 +254,9 @@ static enum dc_status dpcd_set_lt_pattern(struct dc_link *link,
|
|||
* @param lt_settings link_setting and drive settings (voltage swing and pre-emphasis).
|
||||
* @param hop The Hop in display path. DPRX = 0.
|
||||
*/
|
||||
static enum link_training_result dpia_training_cr_non_transparent(struct dc_link *link,
|
||||
static enum link_training_result dpia_training_cr_non_transparent(
|
||||
struct dc_link *link,
|
||||
const struct link_resource *link_res,
|
||||
struct link_training_settings *lt_settings,
|
||||
uint32_t hop)
|
||||
{
|
||||
|
@ -411,7 +415,9 @@ static enum link_training_result dpia_training_cr_non_transparent(struct dc_link
|
|||
* @param link DPIA link being trained.
|
||||
* @param lt_settings link_setting and drive settings (voltage swing and pre-emphasis).
|
||||
*/
|
||||
static enum link_training_result dpia_training_cr_transparent(struct dc_link *link,
|
||||
static enum link_training_result dpia_training_cr_transparent(
|
||||
struct dc_link *link,
|
||||
const struct link_resource *link_res,
|
||||
struct link_training_settings *lt_settings)
|
||||
{
|
||||
enum link_training_result result = LINK_TRAINING_CR_FAIL_LANE0;
|
||||
|
@ -511,16 +517,18 @@ static enum link_training_result dpia_training_cr_transparent(struct dc_link *li
|
|||
* @param lt_settings link_setting and drive settings (voltage swing and pre-emphasis).
|
||||
* @param hop The Hop in display path. DPRX = 0.
|
||||
*/
|
||||
static enum link_training_result dpia_training_cr_phase(struct dc_link *link,
|
||||
static enum link_training_result dpia_training_cr_phase(
|
||||
struct dc_link *link,
|
||||
const struct link_resource *link_res,
|
||||
struct link_training_settings *lt_settings,
|
||||
uint32_t hop)
|
||||
{
|
||||
enum link_training_result result = LINK_TRAINING_CR_FAIL_LANE0;
|
||||
|
||||
if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT)
|
||||
result = dpia_training_cr_non_transparent(link, lt_settings, hop);
|
||||
result = dpia_training_cr_non_transparent(link, link_res, lt_settings, hop);
|
||||
else
|
||||
result = dpia_training_cr_transparent(link, lt_settings);
|
||||
result = dpia_training_cr_transparent(link, link_res, lt_settings);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
@ -561,7 +569,9 @@ static uint32_t dpia_get_eq_aux_rd_interval(const struct dc_link *link,
|
|||
* @param lt_settings link_setting and drive settings (voltage swing and pre-emphasis).
|
||||
* @param hop The Hop in display path. DPRX = 0.
|
||||
*/
|
||||
static enum link_training_result dpia_training_eq_non_transparent(struct dc_link *link,
|
||||
static enum link_training_result dpia_training_eq_non_transparent(
|
||||
struct dc_link *link,
|
||||
const struct link_resource *link_res,
|
||||
struct link_training_settings *lt_settings,
|
||||
uint32_t hop)
|
||||
{
|
||||
|
@ -700,7 +710,9 @@ static enum link_training_result dpia_training_eq_non_transparent(struct dc_link
|
|||
* @param lt_settings link_setting and drive settings (voltage swing and pre-emphasis).
|
||||
* @param hop The Hop in display path. DPRX = 0.
|
||||
*/
|
||||
static enum link_training_result dpia_training_eq_transparent(struct dc_link *link,
|
||||
static enum link_training_result dpia_training_eq_transparent(
|
||||
struct dc_link *link,
|
||||
const struct link_resource *link_res,
|
||||
struct link_training_settings *lt_settings)
|
||||
{
|
||||
enum link_training_result result = LINK_TRAINING_EQ_FAIL_EQ;
|
||||
|
@ -779,16 +791,18 @@ static enum link_training_result dpia_training_eq_transparent(struct dc_link *li
|
|||
* @param lt_settings link_setting and drive settings (voltage swing and pre-emphasis).
|
||||
* @param hop The Hop in display path. DPRX = 0.
|
||||
*/
|
||||
static enum link_training_result dpia_training_eq_phase(struct dc_link *link,
|
||||
static enum link_training_result dpia_training_eq_phase(
|
||||
struct dc_link *link,
|
||||
const struct link_resource *link_res,
|
||||
struct link_training_settings *lt_settings,
|
||||
uint32_t hop)
|
||||
{
|
||||
enum link_training_result result;
|
||||
|
||||
if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT)
|
||||
result = dpia_training_eq_non_transparent(link, lt_settings, hop);
|
||||
result = dpia_training_eq_non_transparent(link, link_res, lt_settings, hop);
|
||||
else
|
||||
result = dpia_training_eq_transparent(link, lt_settings);
|
||||
result = dpia_training_eq_transparent(link, link_res, lt_settings);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
@ -908,7 +922,9 @@ static void dpia_training_abort(struct dc_link *link, uint32_t hop)
|
|||
core_link_send_set_config(link, DPIA_SET_CFG_SET_LINK, data);
|
||||
}
|
||||
|
||||
enum link_training_result dc_link_dpia_perform_link_training(struct dc_link *link,
|
||||
enum link_training_result dc_link_dpia_perform_link_training(
|
||||
struct dc_link *link,
|
||||
const struct link_resource *link_res,
|
||||
const struct dc_link_settings *link_setting,
|
||||
bool skip_video_pattern)
|
||||
{
|
||||
|
@ -918,7 +934,7 @@ enum link_training_result dc_link_dpia_perform_link_training(struct dc_link *lin
|
|||
int8_t repeater_id; /* Current hop. */
|
||||
|
||||
/* Configure link as prescribed in link_setting and set LTTPR mode. */
|
||||
result = dpia_configure_link(link, link_setting, <_settings);
|
||||
result = dpia_configure_link(link, link_res, link_setting, <_settings);
|
||||
if (result != LINK_TRAINING_SUCCESS)
|
||||
return result;
|
||||
|
||||
|
@ -930,12 +946,12 @@ enum link_training_result dc_link_dpia_perform_link_training(struct dc_link *lin
|
|||
*/
|
||||
for (repeater_id = repeater_cnt; repeater_id >= 0; repeater_id--) {
|
||||
/* Clock recovery. */
|
||||
result = dpia_training_cr_phase(link, <_settings, repeater_id);
|
||||
result = dpia_training_cr_phase(link, link_res, <_settings, repeater_id);
|
||||
if (result != LINK_TRAINING_SUCCESS)
|
||||
break;
|
||||
|
||||
/* Equalization. */
|
||||
result = dpia_training_eq_phase(link, <_settings, repeater_id);
|
||||
result = dpia_training_eq_phase(link, link_res, <_settings, repeater_id);
|
||||
if (result != LINK_TRAINING_SUCCESS)
|
||||
break;
|
||||
|
||||
|
|
|
@ -71,6 +71,7 @@ void dp_source_sequence_trace(struct dc_link *link, uint8_t dp_test_mode)
|
|||
|
||||
void dp_enable_link_phy(
|
||||
struct dc_link *link,
|
||||
const struct link_resource *link_res,
|
||||
enum signal_type signal,
|
||||
enum clock_source_id clock_source,
|
||||
const struct dc_link_settings *link_settings)
|
||||
|
@ -135,7 +136,7 @@ void dp_enable_link_phy(
|
|||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
if (dp_get_link_encoding_format(link_settings) == DP_128b_132b_ENCODING) {
|
||||
enable_dp_hpo_output(link, link_settings);
|
||||
enable_dp_hpo_output(link, link_res, link_settings);
|
||||
} else if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING) {
|
||||
if (dc_is_dp_sst_signal(signal)) {
|
||||
link_enc->funcs->enable_dp_output(
|
||||
|
@ -236,12 +237,13 @@ bool edp_receiver_ready_T7(struct dc_link *link)
|
|||
return result;
|
||||
}
|
||||
|
||||
void dp_disable_link_phy(struct dc_link *link, enum signal_type signal)
|
||||
void dp_disable_link_phy(struct dc_link *link, const struct link_resource *link_res,
|
||||
enum signal_type signal)
|
||||
{
|
||||
struct dc *dc = link->ctx->dc;
|
||||
struct dmcu *dmcu = dc->res_pool->dmcu;
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
struct hpo_dp_link_encoder *hpo_link_enc = link->hpo_dp_link_enc;
|
||||
struct hpo_dp_link_encoder *hpo_link_enc = link_res->hpo_dp_link_enc;
|
||||
#endif
|
||||
struct link_encoder *link_enc;
|
||||
|
||||
|
@ -260,7 +262,7 @@ void dp_disable_link_phy(struct dc_link *link, enum signal_type signal)
|
|||
link->dc->hwss.edp_backlight_control(link, false);
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
if (dp_get_link_encoding_format(&link->cur_link_settings) == DP_128b_132b_ENCODING)
|
||||
disable_dp_hpo_output(link, signal);
|
||||
disable_dp_hpo_output(link, link_res, signal);
|
||||
else
|
||||
link_enc->funcs->disable_output(link_enc, signal);
|
||||
#else
|
||||
|
@ -274,7 +276,7 @@ void dp_disable_link_phy(struct dc_link *link, enum signal_type signal)
|
|||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
if (dp_get_link_encoding_format(&link->cur_link_settings) == DP_128b_132b_ENCODING &&
|
||||
hpo_link_enc)
|
||||
disable_dp_hpo_output(link, signal);
|
||||
disable_dp_hpo_output(link, link_res, signal);
|
||||
else
|
||||
link_enc->funcs->disable_output(link_enc, signal);
|
||||
#else
|
||||
|
@ -294,13 +296,14 @@ void dp_disable_link_phy(struct dc_link *link, enum signal_type signal)
|
|||
dc->clk_mgr->funcs->notify_link_rate_change(dc->clk_mgr, link);
|
||||
}
|
||||
|
||||
void dp_disable_link_phy_mst(struct dc_link *link, enum signal_type signal)
|
||||
void dp_disable_link_phy_mst(struct dc_link *link, const struct link_resource *link_res,
|
||||
enum signal_type signal)
|
||||
{
|
||||
/* MST disable link only when no stream use the link */
|
||||
if (link->mst_stream_alloc_table.stream_count > 0)
|
||||
return;
|
||||
|
||||
dp_disable_link_phy(link, signal);
|
||||
dp_disable_link_phy(link, link_res, signal);
|
||||
|
||||
/* set the sink to SST mode after disabling the link */
|
||||
dp_enable_mst_on_sink(link, false);
|
||||
|
@ -308,6 +311,7 @@ void dp_disable_link_phy_mst(struct dc_link *link, enum signal_type signal)
|
|||
|
||||
bool dp_set_hw_training_pattern(
|
||||
struct dc_link *link,
|
||||
const struct link_resource *link_res,
|
||||
enum dc_dp_training_pattern pattern,
|
||||
uint32_t offset)
|
||||
{
|
||||
|
@ -338,7 +342,7 @@ bool dp_set_hw_training_pattern(
|
|||
break;
|
||||
}
|
||||
|
||||
dp_set_hw_test_pattern(link, test_pattern, NULL, 0);
|
||||
dp_set_hw_test_pattern(link, link_res, test_pattern, NULL, 0);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -349,6 +353,7 @@ bool dp_set_hw_training_pattern(
|
|||
#endif
|
||||
void dp_set_hw_lane_settings(
|
||||
struct dc_link *link,
|
||||
const struct link_resource *link_res,
|
||||
const struct link_training_settings *link_settings,
|
||||
uint32_t offset)
|
||||
{
|
||||
|
@ -361,8 +366,8 @@ void dp_set_hw_lane_settings(
|
|||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
if (dp_get_link_encoding_format(&link_settings->link_settings) ==
|
||||
DP_128b_132b_ENCODING) {
|
||||
link->hpo_dp_link_enc->funcs->set_ffe(
|
||||
link->hpo_dp_link_enc,
|
||||
link_res->hpo_dp_link_enc->funcs->set_ffe(
|
||||
link_res->hpo_dp_link_enc,
|
||||
&link_settings->link_settings,
|
||||
link_settings->lane_settings[0].FFE_PRESET.raw);
|
||||
} else if (dp_get_link_encoding_format(&link_settings->link_settings)
|
||||
|
@ -379,6 +384,7 @@ void dp_set_hw_lane_settings(
|
|||
|
||||
void dp_set_hw_test_pattern(
|
||||
struct dc_link *link,
|
||||
const struct link_resource *link_res,
|
||||
enum dp_test_pattern test_pattern,
|
||||
uint8_t *custom_pattern,
|
||||
uint32_t custom_pattern_size)
|
||||
|
@ -406,8 +412,8 @@ void dp_set_hw_test_pattern(
|
|||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
switch (link_encoding_format) {
|
||||
case DP_128b_132b_ENCODING:
|
||||
link->hpo_dp_link_enc->funcs->set_link_test_pattern(
|
||||
link->hpo_dp_link_enc, &pattern_param);
|
||||
link_res->hpo_dp_link_enc->funcs->set_link_test_pattern(
|
||||
link_res->hpo_dp_link_enc, &pattern_param);
|
||||
break;
|
||||
case DP_8b_10b_ENCODING:
|
||||
ASSERT(encoder);
|
||||
|
@ -446,7 +452,7 @@ void dp_retrain_link_dp_test(struct dc_link *link,
|
|||
pipes[i].stream_res.stream_enc);
|
||||
|
||||
/* disable any test pattern that might be active */
|
||||
dp_set_hw_test_pattern(link,
|
||||
dp_set_hw_test_pattern(link, &pipes[i].link_res,
|
||||
DP_TEST_PATTERN_VIDEO_MODE, NULL, 0);
|
||||
|
||||
dp_receiver_power_ctrl(link, false);
|
||||
|
@ -763,7 +769,9 @@ static enum phyd32clk_clock_source get_phyd32clk_src(struct dc_link *link)
|
|||
}
|
||||
}
|
||||
|
||||
void enable_dp_hpo_output(struct dc_link *link, const struct dc_link_settings *link_settings)
|
||||
void enable_dp_hpo_output(struct dc_link *link,
|
||||
const struct link_resource *link_res,
|
||||
const struct dc_link_settings *link_settings)
|
||||
{
|
||||
const struct dc *dc = link->dc;
|
||||
enum phyd32clk_clock_source phyd32clk;
|
||||
|
@ -789,10 +797,11 @@ void enable_dp_hpo_output(struct dc_link *link, const struct dc_link_settings *l
|
|||
}
|
||||
} else {
|
||||
/* DP2.0 HW: call transmitter control to enable PHY */
|
||||
link->hpo_dp_link_enc->funcs->enable_link_phy(
|
||||
link->hpo_dp_link_enc,
|
||||
link_res->hpo_dp_link_enc->funcs->enable_link_phy(
|
||||
link_res->hpo_dp_link_enc,
|
||||
link_settings,
|
||||
link->link_enc->transmitter);
|
||||
link->link_enc->transmitter,
|
||||
link->link_enc->hpd_source);
|
||||
}
|
||||
|
||||
/* DCCG muxing and DTBCLK DTO */
|
||||
|
@ -806,24 +815,26 @@ void enable_dp_hpo_output(struct dc_link *link, const struct dc_link_settings *l
|
|||
phyd32clk = get_phyd32clk_src(link);
|
||||
dc->res_pool->dccg->funcs->enable_symclk32_le(
|
||||
dc->res_pool->dccg,
|
||||
link->hpo_dp_link_enc->inst,
|
||||
link_res->hpo_dp_link_enc->inst,
|
||||
phyd32clk);
|
||||
link->hpo_dp_link_enc->funcs->link_enable(
|
||||
link->hpo_dp_link_enc,
|
||||
link_res->hpo_dp_link_enc->funcs->link_enable(
|
||||
link_res->hpo_dp_link_enc,
|
||||
link_settings->lane_count);
|
||||
}
|
||||
}
|
||||
|
||||
void disable_dp_hpo_output(struct dc_link *link, enum signal_type signal)
|
||||
void disable_dp_hpo_output(struct dc_link *link,
|
||||
const struct link_resource *link_res,
|
||||
enum signal_type signal)
|
||||
{
|
||||
const struct dc *dc = link->dc;
|
||||
|
||||
link->hpo_dp_link_enc->funcs->link_disable(link->hpo_dp_link_enc);
|
||||
link_res->hpo_dp_link_enc->funcs->link_disable(link_res->hpo_dp_link_enc);
|
||||
|
||||
if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
|
||||
dc->res_pool->dccg->funcs->disable_symclk32_le(
|
||||
dc->res_pool->dccg,
|
||||
link->hpo_dp_link_enc->inst);
|
||||
link_res->hpo_dp_link_enc->inst);
|
||||
|
||||
dc->res_pool->dccg->funcs->set_physymclk(
|
||||
dc->res_pool->dccg,
|
||||
|
@ -834,8 +845,8 @@ void disable_dp_hpo_output(struct dc_link *link, enum signal_type signal)
|
|||
dm_set_phyd32clk(dc->ctx, 0);
|
||||
} else {
|
||||
/* DP2.0 HW: call transmitter control to disable PHY */
|
||||
link->hpo_dp_link_enc->funcs->disable_link_phy(
|
||||
link->hpo_dp_link_enc,
|
||||
link_res->hpo_dp_link_enc->funcs->disable_link_phy(
|
||||
link_res->hpo_dp_link_enc,
|
||||
signal);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -734,10 +734,6 @@ static void calculate_split_count_and_index(struct pipe_ctx *pipe_ctx, int *spli
|
|||
(*split_idx)++;
|
||||
split_pipe = split_pipe->top_pipe;
|
||||
}
|
||||
|
||||
/* MPO window on right side of ODM split */
|
||||
if (split_pipe && split_pipe->prev_odm_pipe && !pipe_ctx->prev_odm_pipe)
|
||||
(*split_idx)++;
|
||||
} else {
|
||||
/*Get odm split index*/
|
||||
struct pipe_ctx *split_pipe = pipe_ctx->prev_odm_pipe;
|
||||
|
@ -784,11 +780,7 @@ static void calculate_recout(struct pipe_ctx *pipe_ctx)
|
|||
/*
|
||||
* Only the leftmost ODM pipe should be offset by a nonzero distance
|
||||
*/
|
||||
if (pipe_ctx->top_pipe && pipe_ctx->top_pipe->prev_odm_pipe && !pipe_ctx->prev_odm_pipe) {
|
||||
/* MPO window on right side of ODM split */
|
||||
data->recout.x = stream->dst.x + (surf_clip.x - stream->dst.width/2) *
|
||||
stream->dst.width / stream->src.width;
|
||||
} else if (!pipe_ctx->prev_odm_pipe || split_idx == split_count) {
|
||||
if (!pipe_ctx->prev_odm_pipe || split_idx == split_count) {
|
||||
data->recout.x = stream->dst.x;
|
||||
if (stream->src.x < surf_clip.x)
|
||||
data->recout.x += (surf_clip.x - stream->src.x) * stream->dst.width
|
||||
|
@ -986,8 +978,6 @@ static void calculate_inits_and_viewports(struct pipe_ctx *pipe_ctx)
|
|||
* stream->dst.height / stream->src.height;
|
||||
if (pipe_ctx->prev_odm_pipe && split_idx)
|
||||
ro_lb = data->h_active * split_idx - recout_full_x;
|
||||
else if (pipe_ctx->top_pipe && pipe_ctx->top_pipe->prev_odm_pipe)
|
||||
ro_lb = data->h_active * split_idx - recout_full_x + data->recout.x;
|
||||
else
|
||||
ro_lb = data->recout.x - recout_full_x;
|
||||
ro_tb = data->recout.y - recout_full_y;
|
||||
|
@ -1086,9 +1076,6 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
|
|||
timing->v_border_top + timing->v_border_bottom;
|
||||
if (pipe_ctx->next_odm_pipe || pipe_ctx->prev_odm_pipe)
|
||||
pipe_ctx->plane_res.scl_data.h_active /= get_num_odm_splits(pipe_ctx) + 1;
|
||||
/* ODM + windows MPO, where window is on either right or left ODM half */
|
||||
else if (pipe_ctx->top_pipe && (pipe_ctx->top_pipe->next_odm_pipe || pipe_ctx->top_pipe->prev_odm_pipe))
|
||||
pipe_ctx->plane_res.scl_data.h_active /= get_num_odm_splits(pipe_ctx->top_pipe) + 1;
|
||||
|
||||
/* depends on h_active */
|
||||
calculate_recout(pipe_ctx);
|
||||
|
@ -1097,6 +1084,11 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
|
|||
/* depends on scaling ratios and recout, does not calculate offset yet */
|
||||
calculate_viewport_size(pipe_ctx);
|
||||
|
||||
/* Stopgap for validation of ODM + MPO on one side of screen case */
|
||||
if (pipe_ctx->plane_res.scl_data.viewport.height < 1 ||
|
||||
pipe_ctx->plane_res.scl_data.viewport.width < 1)
|
||||
return false;
|
||||
|
||||
/*
|
||||
* LB calculations depend on vp size, h/v_active and scaling ratios
|
||||
* Setting line buffer pixel depth to 24bpp yields banding
|
||||
|
@ -1445,31 +1437,14 @@ bool dc_add_plane_to_context(
|
|||
if (head_pipe != free_pipe) {
|
||||
tail_pipe = resource_get_tail_pipe(&context->res_ctx, head_pipe);
|
||||
ASSERT(tail_pipe);
|
||||
|
||||
/* ODM + window MPO, where MPO window is on right half only */
|
||||
if (free_pipe->plane_state &&
|
||||
(free_pipe->plane_state->clip_rect.x >= free_pipe->stream->src.width/2) &&
|
||||
tail_pipe->next_odm_pipe) {
|
||||
free_pipe->stream_res.tg = tail_pipe->next_odm_pipe->stream_res.tg;
|
||||
free_pipe->stream_res.abm = tail_pipe->next_odm_pipe->stream_res.abm;
|
||||
free_pipe->stream_res.opp = tail_pipe->next_odm_pipe->stream_res.opp;
|
||||
free_pipe->stream_res.stream_enc = tail_pipe->next_odm_pipe->stream_res.stream_enc;
|
||||
free_pipe->stream_res.audio = tail_pipe->next_odm_pipe->stream_res.audio;
|
||||
free_pipe->clock_source = tail_pipe->next_odm_pipe->clock_source;
|
||||
|
||||
free_pipe->top_pipe = tail_pipe->next_odm_pipe;
|
||||
tail_pipe->next_odm_pipe->bottom_pipe = free_pipe;
|
||||
} else {
|
||||
free_pipe->stream_res.tg = tail_pipe->stream_res.tg;
|
||||
free_pipe->stream_res.abm = tail_pipe->stream_res.abm;
|
||||
free_pipe->stream_res.opp = tail_pipe->stream_res.opp;
|
||||
free_pipe->stream_res.stream_enc = tail_pipe->stream_res.stream_enc;
|
||||
free_pipe->stream_res.audio = tail_pipe->stream_res.audio;
|
||||
free_pipe->clock_source = tail_pipe->clock_source;
|
||||
|
||||
free_pipe->top_pipe = tail_pipe;
|
||||
tail_pipe->bottom_pipe = free_pipe;
|
||||
|
||||
if (!free_pipe->next_odm_pipe && tail_pipe->next_odm_pipe && tail_pipe->next_odm_pipe->bottom_pipe) {
|
||||
free_pipe->next_odm_pipe = tail_pipe->next_odm_pipe->bottom_pipe;
|
||||
tail_pipe->next_odm_pipe->bottom_pipe->prev_odm_pipe = free_pipe;
|
||||
|
@ -1479,20 +1454,6 @@ bool dc_add_plane_to_context(
|
|||
tail_pipe->prev_odm_pipe->bottom_pipe->next_odm_pipe = free_pipe;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* ODM + window MPO, where MPO window is on left half only */
|
||||
if (free_pipe->plane_state &&
|
||||
(free_pipe->plane_state->clip_rect.x + free_pipe->plane_state->clip_rect.width <=
|
||||
free_pipe->stream->src.x + free_pipe->stream->src.width/2)) {
|
||||
break;
|
||||
}
|
||||
/* ODM + window MPO, where MPO window is on right half only */
|
||||
if (free_pipe->plane_state &&
|
||||
(free_pipe->plane_state->clip_rect.x >= free_pipe->stream->src.width/2)) {
|
||||
break;
|
||||
}
|
||||
|
||||
head_pipe = head_pipe->next_odm_pipe;
|
||||
}
|
||||
/* assign new surfaces*/
|
||||
|
@ -1763,6 +1724,94 @@ static void update_hpo_dp_stream_engine_usage(
|
|||
res_ctx->is_hpo_dp_stream_enc_acquired[i] = acquired;
|
||||
}
|
||||
}
|
||||
|
||||
static inline int find_acquired_hpo_dp_link_enc_for_link(
|
||||
const struct resource_context *res_ctx,
|
||||
const struct dc_link *link)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(res_ctx->hpo_dp_link_enc_to_link_idx); i++)
|
||||
if (res_ctx->hpo_dp_link_enc_ref_cnts[i] > 0 &&
|
||||
res_ctx->hpo_dp_link_enc_to_link_idx[i] == link->link_index)
|
||||
return i;
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
static inline int find_free_hpo_dp_link_enc(const struct resource_context *res_ctx,
|
||||
const struct resource_pool *pool)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(res_ctx->hpo_dp_link_enc_ref_cnts); i++)
|
||||
if (res_ctx->hpo_dp_link_enc_ref_cnts[i] == 0)
|
||||
break;
|
||||
|
||||
return (i < ARRAY_SIZE(res_ctx->hpo_dp_link_enc_ref_cnts) &&
|
||||
i < pool->hpo_dp_link_enc_count) ? i : -1;
|
||||
}
|
||||
|
||||
static inline void acquire_hpo_dp_link_enc(
|
||||
struct resource_context *res_ctx,
|
||||
unsigned int link_index,
|
||||
int enc_index)
|
||||
{
|
||||
res_ctx->hpo_dp_link_enc_to_link_idx[enc_index] = link_index;
|
||||
res_ctx->hpo_dp_link_enc_ref_cnts[enc_index] = 1;
|
||||
}
|
||||
|
||||
static inline void retain_hpo_dp_link_enc(
|
||||
struct resource_context *res_ctx,
|
||||
int enc_index)
|
||||
{
|
||||
res_ctx->hpo_dp_link_enc_ref_cnts[enc_index]++;
|
||||
}
|
||||
|
||||
static inline void release_hpo_dp_link_enc(
|
||||
struct resource_context *res_ctx,
|
||||
int enc_index)
|
||||
{
|
||||
ASSERT(res_ctx->hpo_dp_link_enc_ref_cnts[enc_index] > 0);
|
||||
res_ctx->hpo_dp_link_enc_ref_cnts[enc_index]--;
|
||||
}
|
||||
|
||||
static bool add_hpo_dp_link_enc_to_ctx(struct resource_context *res_ctx,
|
||||
const struct resource_pool *pool,
|
||||
struct pipe_ctx *pipe_ctx,
|
||||
struct dc_stream_state *stream)
|
||||
{
|
||||
int enc_index;
|
||||
|
||||
enc_index = find_acquired_hpo_dp_link_enc_for_link(res_ctx, stream->link);
|
||||
|
||||
if (enc_index >= 0) {
|
||||
retain_hpo_dp_link_enc(res_ctx, enc_index);
|
||||
} else {
|
||||
enc_index = find_free_hpo_dp_link_enc(res_ctx, pool);
|
||||
if (enc_index >= 0)
|
||||
acquire_hpo_dp_link_enc(res_ctx, stream->link->link_index, enc_index);
|
||||
}
|
||||
|
||||
if (enc_index >= 0)
|
||||
pipe_ctx->link_res.hpo_dp_link_enc = pool->hpo_dp_link_enc[enc_index];
|
||||
|
||||
return pipe_ctx->link_res.hpo_dp_link_enc != NULL;
|
||||
}
|
||||
|
||||
static void remove_hpo_dp_link_enc_from_ctx(struct resource_context *res_ctx,
|
||||
struct pipe_ctx *pipe_ctx,
|
||||
struct dc_stream_state *stream)
|
||||
{
|
||||
int enc_index;
|
||||
|
||||
enc_index = find_acquired_hpo_dp_link_enc_for_link(res_ctx, stream->link);
|
||||
|
||||
if (enc_index >= 0) {
|
||||
release_hpo_dp_link_enc(res_ctx, enc_index);
|
||||
pipe_ctx->link_res.hpo_dp_link_enc = NULL;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
/* TODO: release audio object */
|
||||
|
@ -1925,6 +1974,7 @@ enum dc_status dc_remove_stream_from_ctx(
|
|||
&new_ctx->res_ctx, dc->res_pool,
|
||||
del_pipe->stream_res.hpo_dp_stream_enc,
|
||||
false);
|
||||
remove_hpo_dp_link_enc_from_ctx(&new_ctx->res_ctx, del_pipe, del_pipe->stream);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -2200,6 +2250,8 @@ enum dc_status resource_map_pool_resources(
|
|||
&context->res_ctx, pool,
|
||||
pipe_ctx->stream_res.hpo_dp_stream_enc,
|
||||
true);
|
||||
if (!add_hpo_dp_link_enc_to_ctx(&context->res_ctx, pool, pipe_ctx, stream))
|
||||
return DC_NO_LINK_ENC_RESOURCE;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
@ -2875,6 +2927,8 @@ bool pipe_need_reprogram(
|
|||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
if (pipe_ctx_old->stream_res.hpo_dp_stream_enc != pipe_ctx->stream_res.hpo_dp_stream_enc)
|
||||
return true;
|
||||
if (pipe_ctx_old->link_res.hpo_dp_link_enc != pipe_ctx->link_res.hpo_dp_link_enc)
|
||||
return true;
|
||||
#endif
|
||||
|
||||
/* DIG link encoder resource assignment for stream changed. */
|
||||
|
@ -3143,22 +3197,23 @@ void get_audio_check(struct audio_info *aud_modes,
|
|||
}
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
struct hpo_dp_link_encoder *resource_get_unused_hpo_dp_link_encoder(
|
||||
const struct resource_pool *pool)
|
||||
struct hpo_dp_link_encoder *resource_get_hpo_dp_link_enc_for_det_lt(
|
||||
const struct resource_context *res_ctx,
|
||||
const struct resource_pool *pool,
|
||||
const struct dc_link *link)
|
||||
{
|
||||
uint8_t i;
|
||||
struct hpo_dp_link_encoder *enc = NULL;
|
||||
struct hpo_dp_link_encoder *hpo_dp_link_enc = NULL;
|
||||
int enc_index;
|
||||
|
||||
ASSERT(pool->hpo_dp_link_enc_count <= MAX_HPO_DP2_LINK_ENCODERS);
|
||||
enc_index = find_acquired_hpo_dp_link_enc_for_link(res_ctx, link);
|
||||
|
||||
for (i = 0; i < pool->hpo_dp_link_enc_count; i++) {
|
||||
if (pool->hpo_dp_link_enc[i]->transmitter == TRANSMITTER_UNKNOWN) {
|
||||
enc = pool->hpo_dp_link_enc[i];
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (enc_index < 0)
|
||||
enc_index = find_free_hpo_dp_link_enc(res_ctx, pool);
|
||||
|
||||
return enc;
|
||||
if (enc_index >= 0)
|
||||
hpo_dp_link_enc = pool->hpo_dp_link_enc[enc_index];
|
||||
|
||||
return hpo_dp_link_enc;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
|
@ -47,7 +47,7 @@ struct aux_payload;
|
|||
struct set_config_cmd_payload;
|
||||
struct dmub_notification;
|
||||
|
||||
#define DC_VER "3.2.166"
|
||||
#define DC_VER "3.2.167"
|
||||
|
||||
#define MAX_SURFACES 3
|
||||
#define MAX_PLANES 6
|
||||
|
@ -691,6 +691,7 @@ struct dc_debug_options {
|
|||
/* TODO - remove once tested */
|
||||
bool legacy_dp2_lt;
|
||||
bool set_mst_en_for_sst;
|
||||
bool disable_uhbr;
|
||||
bool force_dp2_lt_fallback_method;
|
||||
#endif
|
||||
union mem_low_power_enable_options enable_mem_low_power;
|
||||
|
|
|
@ -30,6 +30,8 @@
|
|||
#include "dc_types.h"
|
||||
#include "grph_object_defs.h"
|
||||
|
||||
struct link_resource;
|
||||
|
||||
enum dc_link_fec_state {
|
||||
dc_link_fec_not_ready,
|
||||
dc_link_fec_ready,
|
||||
|
@ -160,9 +162,6 @@ struct dc_link {
|
|||
|
||||
struct panel_cntl *panel_cntl;
|
||||
struct link_encoder *link_enc;
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
struct hpo_dp_link_encoder *hpo_dp_link_enc;
|
||||
#endif
|
||||
struct graphics_object_id link_id;
|
||||
/* Endpoint type distinguishes display endpoints which do not have entries
|
||||
* in the BIOS connector table from those that do. Helps when tracking link
|
||||
|
@ -359,14 +358,17 @@ void dc_link_remove_remote_sink(
|
|||
|
||||
void dc_link_dp_set_drive_settings(
|
||||
struct dc_link *link,
|
||||
const struct link_resource *link_res,
|
||||
struct link_training_settings *lt_settings);
|
||||
|
||||
bool dc_link_dp_perform_link_training_skip_aux(
|
||||
struct dc_link *link,
|
||||
const struct link_resource *link_res,
|
||||
const struct dc_link_settings *link_setting);
|
||||
|
||||
enum link_training_result dc_link_dp_perform_link_training(
|
||||
struct dc_link *link,
|
||||
const struct link_resource *link_res,
|
||||
const struct dc_link_settings *link_settings,
|
||||
bool skip_video_pattern);
|
||||
|
||||
|
@ -374,6 +376,7 @@ bool dc_link_dp_sync_lt_begin(struct dc_link *link);
|
|||
|
||||
enum link_training_result dc_link_dp_sync_lt_attempt(
|
||||
struct dc_link *link,
|
||||
const struct link_resource *link_res,
|
||||
struct dc_link_settings *link_setting,
|
||||
struct dc_link_training_overrides *lt_settings);
|
||||
|
||||
|
@ -454,4 +457,10 @@ bool dc_link_should_enable_fec(const struct dc_link *link);
|
|||
uint32_t dc_link_bw_kbps_from_raw_frl_link_rate_data(uint8_t bw);
|
||||
enum dp_link_encoding dc_link_dp_mst_decide_link_encoding_format(const struct dc_link *link);
|
||||
#endif
|
||||
|
||||
const struct link_resource *dc_link_get_cur_link_res(const struct dc_link *link);
|
||||
/* take a snapshot of current link resource allocation state */
|
||||
void dc_get_cur_link_res_map(const struct dc *dc, uint32_t *map);
|
||||
/* restore link resource allocation state from a snapshot */
|
||||
void dc_restore_link_res_map(const struct dc *dc, uint32_t *map);
|
||||
#endif /* DC_LINK_H_ */
|
||||
|
|
|
@ -468,8 +468,6 @@ void dcn10_log_hw_state(struct dc *dc,
|
|||
log_mpc_crc(dc, log_ctx);
|
||||
|
||||
{
|
||||
int hpo_dp_link_enc_count = 0;
|
||||
|
||||
if (pool->hpo_dp_stream_enc_count > 0) {
|
||||
DTN_INFO("DP HPO S_ENC: Enabled OTG Format Depth Vid SDP Compressed Link\n");
|
||||
for (i = 0; i < pool->hpo_dp_stream_enc_count; i++) {
|
||||
|
@ -500,18 +498,14 @@ void dcn10_log_hw_state(struct dc *dc,
|
|||
}
|
||||
|
||||
/* log DP HPO L_ENC section if any hpo_dp_link_enc exists */
|
||||
for (i = 0; i < dc->link_count; i++)
|
||||
if (dc->links[i]->hpo_dp_link_enc)
|
||||
hpo_dp_link_enc_count++;
|
||||
|
||||
if (hpo_dp_link_enc_count) {
|
||||
if (pool->hpo_dp_link_enc_count) {
|
||||
DTN_INFO("DP HPO L_ENC: Enabled Mode Lanes Stream Slots VC Rate X VC Rate Y\n");
|
||||
|
||||
for (i = 0; i < dc->link_count; i++) {
|
||||
struct hpo_dp_link_encoder *hpo_dp_link_enc = dc->links[i]->hpo_dp_link_enc;
|
||||
for (i = 0; i < pool->hpo_dp_link_enc_count; i++) {
|
||||
struct hpo_dp_link_encoder *hpo_dp_link_enc = pool->hpo_dp_link_enc[i];
|
||||
struct hpo_dp_link_enc_state hpo_dp_le_state = {0};
|
||||
|
||||
if (hpo_dp_link_enc && hpo_dp_link_enc->funcs->read_state) {
|
||||
if (hpo_dp_link_enc->funcs->read_state) {
|
||||
hpo_dp_link_enc->funcs->read_state(hpo_dp_link_enc, &hpo_dp_le_state);
|
||||
DTN_INFO("[%d]: %d %6s %d %d %d %d %d\n",
|
||||
hpo_dp_link_enc->inst,
|
||||
|
|
|
@ -78,6 +78,7 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
|
|||
.get_clock = dcn10_get_clock,
|
||||
.get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
|
||||
.calc_vupdate_position = dcn10_calc_vupdate_position,
|
||||
.power_down = dce110_power_down,
|
||||
.set_backlight_level = dce110_set_backlight_level,
|
||||
.set_abm_immediate_disable = dce110_set_abm_immediate_disable,
|
||||
.set_pipe = dce110_set_pipe,
|
||||
|
|
|
@ -2414,7 +2414,7 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx)
|
|||
pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->map_stream_to_link(
|
||||
pipe_ctx->stream_res.hpo_dp_stream_enc,
|
||||
pipe_ctx->stream_res.hpo_dp_stream_enc->inst,
|
||||
link->hpo_dp_link_enc->inst);
|
||||
pipe_ctx->link_res.hpo_dp_link_enc->inst);
|
||||
}
|
||||
|
||||
if (!is_dp_128b_132b_signal(pipe_ctx) && link_enc)
|
||||
|
|
|
@ -1069,7 +1069,7 @@ static const struct dc_debug_options debug_defaults_drv = {
|
|||
.timing_trace = false,
|
||||
.clock_trace = true,
|
||||
.disable_pplib_clock_request = true,
|
||||
.pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
|
||||
.pipe_split_policy = MPC_SPLIT_DYNAMIC,
|
||||
.force_single_disp_pipe_split = false,
|
||||
.disable_dcc = DCC_ENABLE,
|
||||
.vsr_support = true,
|
||||
|
@ -3093,8 +3093,7 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc
|
|||
else if (context->stream_count == 1 && context->streams[0]->signal == SIGNAL_TYPE_EDP) {
|
||||
struct dc_link *link = context->streams[0]->sink->link;
|
||||
|
||||
if ((link->link_index == 0 && link->psr_settings.psr_feature_enabled)
|
||||
|| context->bw_ctx.dml.vba.StutterPeriod > 5000.0)
|
||||
if (link->link_index == 0 && context->bw_ctx.dml.vba.StutterPeriod > 5000.0)
|
||||
return DCN_ZSTATE_SUPPORT_ALLOW;
|
||||
else
|
||||
return DCN_ZSTATE_SUPPORT_DISALLOW;
|
||||
|
|
|
@ -134,11 +134,12 @@ void dcn201_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx)
|
|||
PHYSICAL_ADDRESS_LOC addr;
|
||||
struct dc_plane_state *plane_state = pipe_ctx->plane_state;
|
||||
struct dce_hwseq *hws = dc->hwseq;
|
||||
struct dc_plane_address uma = plane_state->address;
|
||||
struct dc_plane_address uma;
|
||||
|
||||
if (plane_state == NULL)
|
||||
return;
|
||||
|
||||
uma = plane_state->address;
|
||||
addr_patched = patch_address_for_sbs_tb_stereo(pipe_ctx, &addr);
|
||||
|
||||
plane_address_in_gpu_space_to_uma(hws, &uma);
|
||||
|
|
|
@ -603,7 +603,7 @@ static const struct dc_debug_options debug_defaults_drv = {
|
|||
.timing_trace = false,
|
||||
.clock_trace = true,
|
||||
.disable_pplib_clock_request = true,
|
||||
.pipe_split_policy = MPC_SPLIT_AVOID,
|
||||
.pipe_split_policy = MPC_SPLIT_DYNAMIC,
|
||||
.force_single_disp_pipe_split = false,
|
||||
.disable_dcc = DCC_ENABLE,
|
||||
.vsr_support = true,
|
||||
|
|
|
@ -873,7 +873,7 @@ static const struct dc_debug_options debug_defaults_drv = {
|
|||
.clock_trace = true,
|
||||
.disable_pplib_clock_request = true,
|
||||
.min_disp_clk_khz = 100000,
|
||||
.pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
|
||||
.pipe_split_policy = MPC_SPLIT_DYNAMIC,
|
||||
.force_single_disp_pipe_split = false,
|
||||
.disable_dcc = DCC_ENABLE,
|
||||
.vsr_support = true,
|
||||
|
|
|
@ -840,7 +840,7 @@ static const struct dc_debug_options debug_defaults_drv = {
|
|||
.timing_trace = false,
|
||||
.clock_trace = true,
|
||||
.disable_pplib_clock_request = true,
|
||||
.pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
|
||||
.pipe_split_policy = MPC_SPLIT_DYNAMIC,
|
||||
.force_single_disp_pipe_split = false,
|
||||
.disable_dcc = DCC_ENABLE,
|
||||
.vsr_support = true,
|
||||
|
@ -1761,17 +1761,6 @@ static bool dcn30_split_stream_for_mpc_or_odm(
|
|||
int pipe_idx = sec_pipe->pipe_idx;
|
||||
const struct resource_pool *pool = dc->res_pool;
|
||||
|
||||
if (pri_pipe->plane_state) {
|
||||
/* ODM + window MPO, where MPO window is on left half only */
|
||||
if (pri_pipe->plane_state->clip_rect.x + pri_pipe->plane_state->clip_rect.width <=
|
||||
pri_pipe->stream->src.x + pri_pipe->stream->src.width/2)
|
||||
return true;
|
||||
|
||||
/* ODM + window MPO, where MPO window is on right half only */
|
||||
if (pri_pipe->plane_state->clip_rect.x >= pri_pipe->stream->src.width/2)
|
||||
return true;
|
||||
}
|
||||
|
||||
*sec_pipe = *pri_pipe;
|
||||
|
||||
sec_pipe->pipe_idx = pipe_idx;
|
||||
|
|
|
@ -686,7 +686,7 @@ static const struct dc_debug_options debug_defaults_drv = {
|
|||
.disable_clock_gate = true,
|
||||
.disable_pplib_clock_request = true,
|
||||
.disable_pplib_wm_range = true,
|
||||
.pipe_split_policy = MPC_SPLIT_AVOID,
|
||||
.pipe_split_policy = MPC_SPLIT_DYNAMIC,
|
||||
.force_single_disp_pipe_split = false,
|
||||
.disable_dcc = DCC_ENABLE,
|
||||
.vsr_support = true,
|
||||
|
|
|
@ -211,7 +211,7 @@ static const struct dc_debug_options debug_defaults_drv = {
|
|||
.timing_trace = false,
|
||||
.clock_trace = true,
|
||||
.disable_pplib_clock_request = true,
|
||||
.pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
|
||||
.pipe_split_policy = MPC_SPLIT_DYNAMIC,
|
||||
.force_single_disp_pipe_split = false,
|
||||
.disable_dcc = DCC_ENABLE,
|
||||
.vsr_support = true,
|
||||
|
|
|
@ -15,7 +15,11 @@
|
|||
SR(DPPCLK_DTO_CTRL),\
|
||||
DCCG_SRII(DTO_PARAM, DPPCLK, 0),\
|
||||
DCCG_SRII(DTO_PARAM, DPPCLK, 1),\
|
||||
SR(REFCLK_CNTL)
|
||||
SR(REFCLK_CNTL),\
|
||||
SR(DISPCLK_FREQ_CHANGE_CNTL),\
|
||||
DCCG_SRII(PIXEL_RATE_CNTL, OTG, 0),\
|
||||
DCCG_SRII(PIXEL_RATE_CNTL, OTG, 1)
|
||||
|
||||
|
||||
#define DCCG_MASK_SH_LIST_DCN3_03(mask_sh) \
|
||||
DCCG_SFI(DPPCLK_DTO_CTRL, DTO_ENABLE, DPPCLK, 0, mask_sh),\
|
||||
|
@ -25,6 +29,18 @@
|
|||
DCCG_SF(DPPCLK0_DTO_PARAM, DPPCLK0_DTO_PHASE, mask_sh),\
|
||||
DCCG_SF(DPPCLK0_DTO_PARAM, DPPCLK0_DTO_MODULO, mask_sh),\
|
||||
DCCG_SF(REFCLK_CNTL, REFCLK_CLOCK_EN, mask_sh),\
|
||||
DCCG_SF(REFCLK_CNTL, REFCLK_SRC_SEL, mask_sh)
|
||||
DCCG_SF(REFCLK_CNTL, REFCLK_SRC_SEL, mask_sh),\
|
||||
DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DISPCLK_STEP_DELAY, mask_sh),\
|
||||
DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DISPCLK_STEP_SIZE, mask_sh),\
|
||||
DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DISPCLK_FREQ_RAMP_DONE, mask_sh),\
|
||||
DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DISPCLK_MAX_ERRDET_CYCLES, mask_sh),\
|
||||
DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DCCG_FIFO_ERRDET_RESET, mask_sh),\
|
||||
DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DCCG_FIFO_ERRDET_STATE, mask_sh),\
|
||||
DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DCCG_FIFO_ERRDET_OVR_EN, mask_sh),\
|
||||
DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DISPCLK_CHG_FWD_CORR_DISABLE, mask_sh),\
|
||||
DCCG_SFII(OTG, PIXEL_RATE_CNTL, OTG, ADD_PIXEL, 0, mask_sh),\
|
||||
DCCG_SFII(OTG, PIXEL_RATE_CNTL, OTG, ADD_PIXEL, 1, mask_sh),\
|
||||
DCCG_SFII(OTG, PIXEL_RATE_CNTL, OTG, DROP_PIXEL, 0, mask_sh),\
|
||||
DCCG_SFII(OTG, PIXEL_RATE_CNTL, OTG, DROP_PIXEL, 1, mask_sh)
|
||||
|
||||
#endif //__DCN303_DCCG_H__
|
||||
|
|
|
@ -193,7 +193,7 @@ static const struct dc_debug_options debug_defaults_drv = {
|
|||
.timing_trace = false,
|
||||
.clock_trace = true,
|
||||
.disable_pplib_clock_request = true,
|
||||
.pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
|
||||
.pipe_split_policy = MPC_SPLIT_DYNAMIC,
|
||||
.force_single_disp_pipe_split = false,
|
||||
.disable_dcc = DCC_ENABLE,
|
||||
.vsr_support = true,
|
||||
|
|
|
@ -499,7 +499,8 @@ static enum bp_result link_transmitter_control(
|
|||
void dcn31_hpo_dp_link_enc_enable_dp_output(
|
||||
struct hpo_dp_link_encoder *enc,
|
||||
const struct dc_link_settings *link_settings,
|
||||
enum transmitter transmitter)
|
||||
enum transmitter transmitter,
|
||||
enum hpd_source_id hpd_source)
|
||||
{
|
||||
struct dcn31_hpo_dp_link_encoder *enc3 = DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(enc);
|
||||
struct bp_transmitter_control cntl = { 0 };
|
||||
|
@ -508,6 +509,9 @@ void dcn31_hpo_dp_link_enc_enable_dp_output(
|
|||
/* Set the transmitter */
|
||||
enc3->base.transmitter = transmitter;
|
||||
|
||||
/* Set the hpd source */
|
||||
enc3->base.hpd_source = hpd_source;
|
||||
|
||||
/* Enable the PHY */
|
||||
cntl.action = TRANSMITTER_CONTROL_ENABLE;
|
||||
cntl.engine_id = ENGINE_ID_UNKNOWN;
|
||||
|
|
|
@ -184,7 +184,8 @@ void hpo_dp_link_encoder31_construct(struct dcn31_hpo_dp_link_encoder *enc31,
|
|||
void dcn31_hpo_dp_link_enc_enable_dp_output(
|
||||
struct hpo_dp_link_encoder *enc,
|
||||
const struct dc_link_settings *link_settings,
|
||||
enum transmitter transmitter);
|
||||
enum transmitter transmitter,
|
||||
enum hpd_source_id hpd_source);
|
||||
|
||||
void dcn31_hpo_dp_link_enc_disable_output(
|
||||
struct hpo_dp_link_encoder *enc,
|
||||
|
|
|
@ -103,6 +103,7 @@ static const struct hw_sequencer_funcs dcn31_funcs = {
|
|||
.z10_restore = dcn31_z10_restore,
|
||||
.z10_save_init = dcn31_z10_save_init,
|
||||
.set_disp_pattern_generator = dcn30_set_disp_pattern_generator,
|
||||
.optimize_pwr_state = dcn21_optimize_pwr_state,
|
||||
.exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state,
|
||||
.update_visual_confirm_color = dcn20_update_visual_confirm_color,
|
||||
};
|
||||
|
|
|
@ -355,6 +355,14 @@ static const struct dce110_clk_src_regs clk_src_regs[] = {
|
|||
clk_src_regs(3, D),
|
||||
clk_src_regs(4, E)
|
||||
};
|
||||
/*pll_id being rempped in dmub, in driver it is logical instance*/
|
||||
static const struct dce110_clk_src_regs clk_src_regs_b0[] = {
|
||||
clk_src_regs(0, A),
|
||||
clk_src_regs(1, B),
|
||||
clk_src_regs(2, F),
|
||||
clk_src_regs(3, G),
|
||||
clk_src_regs(4, E)
|
||||
};
|
||||
|
||||
static const struct dce110_clk_src_shift cs_shift = {
|
||||
CS_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT)
|
||||
|
@ -995,7 +1003,7 @@ static const struct dc_debug_options debug_defaults_drv = {
|
|||
.timing_trace = false,
|
||||
.clock_trace = true,
|
||||
.disable_pplib_clock_request = false,
|
||||
.pipe_split_policy = MPC_SPLIT_AVOID,
|
||||
.pipe_split_policy = MPC_SPLIT_DYNAMIC,
|
||||
.force_single_disp_pipe_split = false,
|
||||
.disable_dcc = DCC_ENABLE,
|
||||
.vsr_support = true,
|
||||
|
@ -2294,6 +2302,17 @@ static bool dcn31_resource_construct(
|
|||
dcn30_clock_source_create(ctx, ctx->dc_bios,
|
||||
CLOCK_SOURCE_COMBO_PHY_PLL1,
|
||||
&clk_src_regs[1], false);
|
||||
/*move phypllx_pixclk_resync to dmub next*/
|
||||
if (dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) {
|
||||
pool->base.clock_sources[DCN31_CLK_SRC_PLL2] =
|
||||
dcn30_clock_source_create(ctx, ctx->dc_bios,
|
||||
CLOCK_SOURCE_COMBO_PHY_PLL2,
|
||||
&clk_src_regs_b0[2], false);
|
||||
pool->base.clock_sources[DCN31_CLK_SRC_PLL3] =
|
||||
dcn30_clock_source_create(ctx, ctx->dc_bios,
|
||||
CLOCK_SOURCE_COMBO_PHY_PLL3,
|
||||
&clk_src_regs_b0[3], false);
|
||||
} else {
|
||||
pool->base.clock_sources[DCN31_CLK_SRC_PLL2] =
|
||||
dcn30_clock_source_create(ctx, ctx->dc_bios,
|
||||
CLOCK_SOURCE_COMBO_PHY_PLL2,
|
||||
|
@ -2302,6 +2321,8 @@ static bool dcn31_resource_construct(
|
|||
dcn30_clock_source_create(ctx, ctx->dc_bios,
|
||||
CLOCK_SOURCE_COMBO_PHY_PLL3,
|
||||
&clk_src_regs[3], false);
|
||||
}
|
||||
|
||||
pool->base.clock_sources[DCN31_CLK_SRC_PLL4] =
|
||||
dcn30_clock_source_create(ctx, ctx->dc_bios,
|
||||
CLOCK_SOURCE_COMBO_PHY_PLL4,
|
||||
|
|
|
@ -49,4 +49,35 @@ struct resource_pool *dcn31_create_resource_pool(
|
|||
const struct dc_init_data *init_data,
|
||||
struct dc *dc);
|
||||
|
||||
/*temp: B0 specific before switch to dcn313 headers*/
|
||||
#ifndef regPHYPLLF_PIXCLK_RESYNC_CNTL
|
||||
#define regPHYPLLF_PIXCLK_RESYNC_CNTL 0x007e
|
||||
#define regPHYPLLF_PIXCLK_RESYNC_CNTL_BASE_IDX 1
|
||||
#define regPHYPLLG_PIXCLK_RESYNC_CNTL 0x005f
|
||||
#define regPHYPLLG_PIXCLK_RESYNC_CNTL_BASE_IDX 1
|
||||
|
||||
//PHYPLLF_PIXCLK_RESYNC_CNTL
|
||||
#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_RESYNC_ENABLE__SHIFT 0x0
|
||||
#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_DEEP_COLOR_DTO_ENABLE_STATUS__SHIFT 0x1
|
||||
#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_DCCG_DEEP_COLOR_CNTL__SHIFT 0x4
|
||||
#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_ENABLE__SHIFT 0x8
|
||||
#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_DOUBLE_RATE_ENABLE__SHIFT 0x9
|
||||
#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_RESYNC_ENABLE_MASK 0x00000001L
|
||||
#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_DEEP_COLOR_DTO_ENABLE_STATUS_MASK 0x00000002L
|
||||
#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_DCCG_DEEP_COLOR_CNTL_MASK 0x00000030L
|
||||
#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_ENABLE_MASK 0x00000100L
|
||||
#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_DOUBLE_RATE_ENABLE_MASK 0x00000200L
|
||||
|
||||
//PHYPLLG_PIXCLK_RESYNC_CNTL
|
||||
#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_PIXCLK_RESYNC_ENABLE__SHIFT 0x0
|
||||
#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_DEEP_COLOR_DTO_ENABLE_STATUS__SHIFT 0x1
|
||||
#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_DCCG_DEEP_COLOR_CNTL__SHIFT 0x4
|
||||
#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_PIXCLK_ENABLE__SHIFT 0x8
|
||||
#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_PIXCLK_DOUBLE_RATE_ENABLE__SHIFT 0x9
|
||||
#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_PIXCLK_RESYNC_ENABLE_MASK 0x00000001L
|
||||
#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_DEEP_COLOR_DTO_ENABLE_STATUS_MASK 0x00000002L
|
||||
#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_DCCG_DEEP_COLOR_CNTL_MASK 0x00000030L
|
||||
#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_PIXCLK_ENABLE_MASK 0x00000100L
|
||||
#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_PIXCLK_DOUBLE_RATE_ENABLE_MASK 0x00000200L
|
||||
#endif
|
||||
#endif /* _DCN31_RESOURCE_H_ */
|
||||
|
|
|
@ -1274,7 +1274,7 @@ static void dcn20_adjust_adaptive_sync_v_startup(
|
|||
static bool is_dp_128b_132b_signal(struct pipe_ctx *pipe_ctx)
|
||||
{
|
||||
return (pipe_ctx->stream_res.hpo_dp_stream_enc &&
|
||||
pipe_ctx->stream->link->hpo_dp_link_enc &&
|
||||
pipe_ctx->link_res.hpo_dp_link_enc &&
|
||||
dc_is_dp_signal(pipe_ctx->stream->signal));
|
||||
}
|
||||
|
||||
|
|
|
@ -53,6 +53,8 @@ enum dc_status {
|
|||
DC_NOT_SUPPORTED = 24,
|
||||
DC_UNSUPPORTED_VALUE = 25,
|
||||
|
||||
DC_NO_LINK_ENC_RESOURCE = 26,
|
||||
|
||||
DC_ERROR_UNEXPECTED = -1
|
||||
};
|
||||
|
||||
|
|
|
@ -334,6 +334,20 @@ struct plane_resource {
|
|||
struct dcn_fe_bandwidth bw;
|
||||
};
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
#define LINK_RES_HPO_DP_REC_MAP__MASK 0xFFFF
|
||||
#define LINK_RES_HPO_DP_REC_MAP__SHIFT 0
|
||||
#endif
|
||||
|
||||
/* all mappable hardware resources used to enable a link */
|
||||
struct link_resource {
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
struct hpo_dp_link_encoder *hpo_dp_link_enc;
|
||||
#else
|
||||
void *dummy;
|
||||
#endif
|
||||
};
|
||||
|
||||
union pipe_update_flags {
|
||||
struct {
|
||||
uint32_t enable : 1;
|
||||
|
@ -361,6 +375,7 @@ struct pipe_ctx {
|
|||
|
||||
struct plane_resource plane_res;
|
||||
struct stream_resource stream_res;
|
||||
struct link_resource link_res;
|
||||
|
||||
struct clock_source *clock_source;
|
||||
|
||||
|
@ -412,6 +427,8 @@ struct resource_context {
|
|||
struct link_enc_cfg_context link_enc_cfg_ctx;
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
bool is_hpo_dp_stream_enc_acquired[MAX_HPO_DP2_ENCODERS];
|
||||
unsigned int hpo_dp_link_enc_to_link_idx[MAX_HPO_DP2_LINK_ENCODERS];
|
||||
int hpo_dp_link_enc_ref_cnts[MAX_HPO_DP2_LINK_ENCODERS];
|
||||
#endif
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
bool is_mpc_3dlut_acquired[MAX_PIPES];
|
||||
|
|
|
@ -56,16 +56,19 @@ enum {
|
|||
|
||||
bool dp_verify_link_cap(
|
||||
struct dc_link *link,
|
||||
const struct link_resource *link_res,
|
||||
struct dc_link_settings *known_limit_link_setting,
|
||||
int *fail_count);
|
||||
|
||||
bool dp_verify_link_cap_with_retries(
|
||||
struct dc_link *link,
|
||||
const struct link_resource *link_res,
|
||||
struct dc_link_settings *known_limit_link_setting,
|
||||
int attempts);
|
||||
|
||||
bool dp_verify_mst_link_cap(
|
||||
struct dc_link *link);
|
||||
struct dc_link *link,
|
||||
const struct link_resource *link_res);
|
||||
|
||||
bool dp_validate_mode_timing(
|
||||
struct dc_link *link,
|
||||
|
@ -168,7 +171,7 @@ uint8_t dc_dp_initialize_scrambling_data_symbols(
|
|||
struct dc_link *link,
|
||||
enum dc_dp_training_pattern pattern);
|
||||
|
||||
enum dc_status dp_set_fec_ready(struct dc_link *link, bool ready);
|
||||
enum dc_status dp_set_fec_ready(struct dc_link *link, const struct link_resource *link_res, bool ready);
|
||||
void dp_set_fec_enable(struct dc_link *link, bool enable);
|
||||
struct link_encoder *dp_get_link_enc(struct dc_link *link);
|
||||
bool dp_set_dsc_enable(struct pipe_ctx *pipe_ctx, bool enable);
|
||||
|
@ -211,8 +214,12 @@ bool dpcd_poll_for_allocation_change_trigger(struct dc_link *link);
|
|||
struct fixed31_32 calculate_sst_avg_time_slots_per_mtp(
|
||||
const struct dc_stream_state *stream,
|
||||
const struct dc_link *link);
|
||||
void enable_dp_hpo_output(struct dc_link *link, const struct dc_link_settings *link_settings);
|
||||
void disable_dp_hpo_output(struct dc_link *link, enum signal_type signal);
|
||||
void enable_dp_hpo_output(struct dc_link *link,
|
||||
const struct link_resource *link_res,
|
||||
const struct dc_link_settings *link_settings);
|
||||
void disable_dp_hpo_output(struct dc_link *link,
|
||||
const struct link_resource *link_res,
|
||||
enum signal_type signal);
|
||||
void setup_dp_hpo_stream(struct pipe_ctx *pipe_ctx, bool enable);
|
||||
bool is_dp_128b_132b_signal(struct pipe_ctx *pipe_ctx);
|
||||
void reset_dp_hpo_stream_encoders_for_link(struct dc_link *link);
|
||||
|
|
|
@ -91,8 +91,9 @@ enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link);
|
|||
* DPIA equivalent of dc_link_dp_perfrorm_link_training.
|
||||
* Aborts link training upon detection of sink unplug.
|
||||
*/
|
||||
enum link_training_result
|
||||
dc_link_dpia_perform_link_training(struct dc_link *link,
|
||||
enum link_training_result dc_link_dpia_perform_link_training(
|
||||
struct dc_link *link,
|
||||
const struct link_resource *link_res,
|
||||
const struct dc_link_settings *link_setting,
|
||||
bool skip_video_pattern);
|
||||
|
||||
|
|
|
@ -268,7 +268,8 @@ struct hpo_dp_link_encoder_funcs {
|
|||
|
||||
void (*enable_link_phy)(struct hpo_dp_link_encoder *enc,
|
||||
const struct dc_link_settings *link_settings,
|
||||
enum transmitter transmitter);
|
||||
enum transmitter transmitter,
|
||||
enum hpd_source_id hpd_source);
|
||||
|
||||
void (*disable_link_phy)(struct hpo_dp_link_encoder *link_enc,
|
||||
enum signal_type signal);
|
||||
|
|
|
@ -32,6 +32,7 @@ struct gpio *get_hpd_gpio(struct dc_bios *dcb,
|
|||
|
||||
void dp_enable_link_phy(
|
||||
struct dc_link *link,
|
||||
const struct link_resource *link_res,
|
||||
enum signal_type signal,
|
||||
enum clock_source_id clock_source,
|
||||
const struct dc_link_settings *link_settings);
|
||||
|
@ -42,22 +43,27 @@ void edp_add_delay_for_T9(struct dc_link *link);
|
|||
bool edp_receiver_ready_T9(struct dc_link *link);
|
||||
bool edp_receiver_ready_T7(struct dc_link *link);
|
||||
|
||||
void dp_disable_link_phy(struct dc_link *link, enum signal_type signal);
|
||||
void dp_disable_link_phy(struct dc_link *link, const struct link_resource *link_res,
|
||||
enum signal_type signal);
|
||||
|
||||
void dp_disable_link_phy_mst(struct dc_link *link, enum signal_type signal);
|
||||
void dp_disable_link_phy_mst(struct dc_link *link, const struct link_resource *link_res,
|
||||
enum signal_type signal);
|
||||
|
||||
bool dp_set_hw_training_pattern(
|
||||
struct dc_link *link,
|
||||
const struct link_resource *link_res,
|
||||
enum dc_dp_training_pattern pattern,
|
||||
uint32_t offset);
|
||||
|
||||
void dp_set_hw_lane_settings(
|
||||
struct dc_link *link,
|
||||
const struct link_resource *link_res,
|
||||
const struct link_training_settings *link_settings,
|
||||
uint32_t offset);
|
||||
|
||||
void dp_set_hw_test_pattern(
|
||||
struct dc_link *link,
|
||||
const struct link_resource *link_res,
|
||||
enum dp_test_pattern test_pattern,
|
||||
uint8_t *custom_pattern,
|
||||
uint32_t custom_pattern_size);
|
||||
|
|
|
@ -206,8 +206,10 @@ int get_num_mpc_splits(struct pipe_ctx *pipe);
|
|||
int get_num_odm_splits(struct pipe_ctx *pipe);
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
struct hpo_dp_link_encoder *resource_get_unused_hpo_dp_link_encoder(
|
||||
const struct resource_pool *pool);
|
||||
struct hpo_dp_link_encoder *resource_get_hpo_dp_link_enc_for_det_lt(
|
||||
const struct resource_context *res_ctx,
|
||||
const struct resource_pool *pool,
|
||||
const struct dc_link *link);
|
||||
#endif
|
||||
|
||||
void reset_syncd_pipes_from_disabled_pipes(struct dc *dc,
|
||||
|
|
|
@ -46,10 +46,10 @@
|
|||
|
||||
/* Firmware versioning. */
|
||||
#ifdef DMUB_EXPOSE_VERSION
|
||||
#define DMUB_FW_VERSION_GIT_HASH 0xc99a4517
|
||||
#define DMUB_FW_VERSION_GIT_HASH 0xbaf06b95
|
||||
#define DMUB_FW_VERSION_MAJOR 0
|
||||
#define DMUB_FW_VERSION_MINOR 0
|
||||
#define DMUB_FW_VERSION_REVISION 97
|
||||
#define DMUB_FW_VERSION_REVISION 98
|
||||
#define DMUB_FW_VERSION_TEST 0
|
||||
#define DMUB_FW_VERSION_VBIOS 0
|
||||
#define DMUB_FW_VERSION_HOTFIX 0
|
||||
|
|
|
@ -852,7 +852,7 @@ bool dmub_srv_should_detect(struct dmub_srv *dmub)
|
|||
|
||||
enum dmub_status dmub_srv_clear_inbox0_ack(struct dmub_srv *dmub)
|
||||
{
|
||||
if (!dmub->hw_init || dmub->hw_funcs.clear_inbox0_ack_register)
|
||||
if (!dmub->hw_init || !dmub->hw_funcs.clear_inbox0_ack_register)
|
||||
return DMUB_STATUS_INVALID;
|
||||
|
||||
dmub->hw_funcs.clear_inbox0_ack_register(dmub);
|
||||
|
@ -878,7 +878,7 @@ enum dmub_status dmub_srv_wait_for_inbox0_ack(struct dmub_srv *dmub, uint32_t ti
|
|||
enum dmub_status dmub_srv_send_inbox0_cmd(struct dmub_srv *dmub,
|
||||
union dmub_inbox0_data_register data)
|
||||
{
|
||||
if (!dmub->hw_init || dmub->hw_funcs.send_inbox0_cmd)
|
||||
if (!dmub->hw_init || !dmub->hw_funcs.send_inbox0_cmd)
|
||||
return DMUB_STATUS_INVALID;
|
||||
|
||||
dmub->hw_funcs.send_inbox0_cmd(dmub, data);
|
||||
|
|
|
@ -20753,8 +20753,6 @@
|
|||
|
||||
// addressBlock: nbio_nbif0_gdc_GDCDEC
|
||||
// base address: 0xd0000000
|
||||
#define regGDC1_LOGAN_FAST_WRITE_RESPONSE_CNTL 0x2ffc0eda
|
||||
#define regGDC1_LOGAN_FAST_WRITE_RESPONSE_CNTL_BASE_IDX 5
|
||||
#define regGDC1_NGDC_SDP_PORT_CTRL 0x2ffc0ee2
|
||||
#define regGDC1_NGDC_SDP_PORT_CTRL_BASE_IDX 5
|
||||
#define regGDC1_SHUB_REGS_IF_CTL 0x2ffc0ee3
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
|
||||
/*
|
||||
* Copyright (C) 2020 Advanced Micro Devices, Inc.
|
||||
*
|
||||
|
@ -108541,17 +108540,6 @@
|
|||
|
||||
|
||||
// addressBlock: nbio_nbif0_gdc_GDCDEC
|
||||
//GDC1_LOGAN_FAST_WRITE_RESPONSE_CNTL
|
||||
#define GDC1_LOGAN_FAST_WRITE_RESPONSE_CNTL__LOGAN0_FAST_WRITE_RESPONSE_EN__SHIFT 0x0
|
||||
#define GDC1_LOGAN_FAST_WRITE_RESPONSE_CNTL__LOGAN1_FAST_WRITE_RESPONSE_EN__SHIFT 0x1
|
||||
#define GDC1_LOGAN_FAST_WRITE_RESPONSE_CNTL__LOGAN2_FAST_WRITE_RESPONSE_EN__SHIFT 0x2
|
||||
#define GDC1_LOGAN_FAST_WRITE_RESPONSE_CNTL__LOGAN3_FAST_WRITE_RESPONSE_EN__SHIFT 0x3
|
||||
#define GDC1_LOGAN_FAST_WRITE_RESPONSE_CNTL__FWR_NORMAL_ARB_MODE__SHIFT 0x10
|
||||
#define GDC1_LOGAN_FAST_WRITE_RESPONSE_CNTL__LOGAN0_FAST_WRITE_RESPONSE_EN_MASK 0x00000001L
|
||||
#define GDC1_LOGAN_FAST_WRITE_RESPONSE_CNTL__LOGAN1_FAST_WRITE_RESPONSE_EN_MASK 0x00000002L
|
||||
#define GDC1_LOGAN_FAST_WRITE_RESPONSE_CNTL__LOGAN2_FAST_WRITE_RESPONSE_EN_MASK 0x00000004L
|
||||
#define GDC1_LOGAN_FAST_WRITE_RESPONSE_CNTL__LOGAN3_FAST_WRITE_RESPONSE_EN_MASK 0x00000008L
|
||||
#define GDC1_LOGAN_FAST_WRITE_RESPONSE_CNTL__FWR_NORMAL_ARB_MODE_MASK 0x00010000L
|
||||
//GDC1_NGDC_SDP_PORT_CTRL
|
||||
#define GDC1_NGDC_SDP_PORT_CTRL__SDP_DISCON_HYSTERESIS__SHIFT 0x0
|
||||
#define GDC1_NGDC_SDP_PORT_CTRL__NGDC_OBFF_HW_URGENT_EARLY_WAKEUP_EN__SHIFT 0xf
|
||||
|
|
|
@ -143,6 +143,55 @@ struct gc_info_v1_0 {
|
|||
uint32_t gc_num_gl2a;
|
||||
};
|
||||
|
||||
struct gc_info_v1_1 {
|
||||
struct gpu_info_header header;
|
||||
|
||||
uint32_t gc_num_se;
|
||||
uint32_t gc_num_wgp0_per_sa;
|
||||
uint32_t gc_num_wgp1_per_sa;
|
||||
uint32_t gc_num_rb_per_se;
|
||||
uint32_t gc_num_gl2c;
|
||||
uint32_t gc_num_gprs;
|
||||
uint32_t gc_num_max_gs_thds;
|
||||
uint32_t gc_gs_table_depth;
|
||||
uint32_t gc_gsprim_buff_depth;
|
||||
uint32_t gc_parameter_cache_depth;
|
||||
uint32_t gc_double_offchip_lds_buffer;
|
||||
uint32_t gc_wave_size;
|
||||
uint32_t gc_max_waves_per_simd;
|
||||
uint32_t gc_max_scratch_slots_per_cu;
|
||||
uint32_t gc_lds_size;
|
||||
uint32_t gc_num_sc_per_se;
|
||||
uint32_t gc_num_sa_per_se;
|
||||
uint32_t gc_num_packer_per_sc;
|
||||
uint32_t gc_num_gl2a;
|
||||
uint32_t gc_num_tcp_per_sa;
|
||||
uint32_t gc_num_sdp_interface;
|
||||
uint32_t gc_num_tcps;
|
||||
};
|
||||
|
||||
struct gc_info_v2_0 {
|
||||
struct gpu_info_header header;
|
||||
|
||||
uint32_t gc_num_se;
|
||||
uint32_t gc_num_cu_per_sh;
|
||||
uint32_t gc_num_sh_per_se;
|
||||
uint32_t gc_num_rb_per_se;
|
||||
uint32_t gc_num_tccs;
|
||||
uint32_t gc_num_gprs;
|
||||
uint32_t gc_num_max_gs_thds;
|
||||
uint32_t gc_gs_table_depth;
|
||||
uint32_t gc_gsprim_buff_depth;
|
||||
uint32_t gc_parameter_cache_depth;
|
||||
uint32_t gc_double_offchip_lds_buffer;
|
||||
uint32_t gc_wave_size;
|
||||
uint32_t gc_max_waves_per_simd;
|
||||
uint32_t gc_max_scratch_slots_per_cu;
|
||||
uint32_t gc_lds_size;
|
||||
uint32_t gc_num_sc_per_se;
|
||||
uint32_t gc_num_packer_per_sc;
|
||||
};
|
||||
|
||||
typedef struct harvest_info_header {
|
||||
uint32_t signature; /* Table Signature */
|
||||
uint32_t version; /* Table Version */
|
||||
|
|
|
@ -2090,7 +2090,8 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
|
|||
} else if (DEVICE_ATTR_IS(unique_id)) {
|
||||
if (asic_type != CHIP_VEGA10 &&
|
||||
asic_type != CHIP_VEGA20 &&
|
||||
asic_type != CHIP_ARCTURUS)
|
||||
asic_type != CHIP_ARCTURUS &&
|
||||
asic_type != CHIP_ALDEBARAN)
|
||||
*states = ATTR_STATE_UNSUPPORTED;
|
||||
} else if (DEVICE_ATTR_IS(pp_features)) {
|
||||
if (adev->flags & AMD_IS_APU || asic_type < CHIP_VEGA10)
|
||||
|
@ -2133,6 +2134,12 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
|
|||
}
|
||||
}
|
||||
|
||||
/* setting should not be allowed from VF */
|
||||
if (amdgpu_sriov_vf(adev)) {
|
||||
dev_attr->attr.mode &= ~S_IWUGO;
|
||||
dev_attr->store = NULL;
|
||||
}
|
||||
|
||||
#undef DEVICE_ATTR_IS
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -102,7 +102,9 @@
|
|||
|
||||
#define PPSMC_MSG_GfxDriverResetRecovery 0x42
|
||||
#define PPSMC_MSG_BoardPowerCalibration 0x43
|
||||
#define PPSMC_Message_Count 0x44
|
||||
#define PPSMC_MSG_HeavySBR 0x45
|
||||
#define PPSMC_Message_Count 0x46
|
||||
|
||||
|
||||
//PPSMC Reset Types
|
||||
#define PPSMC_RESET_TYPE_WARM_RESET 0x00
|
||||
|
|
|
@ -1257,9 +1257,9 @@ struct pptable_funcs {
|
|||
int (*set_fine_grain_gfx_freq_parameters)(struct smu_context *smu);
|
||||
|
||||
/**
|
||||
* @set_light_sbr: Set light sbr mode for the SMU.
|
||||
* @smu_handle_passthrough_sbr: Send message to SMU about special handling for SBR.
|
||||
*/
|
||||
int (*set_light_sbr)(struct smu_context *smu, bool enable);
|
||||
int (*smu_handle_passthrough_sbr)(struct smu_context *smu, bool enable);
|
||||
|
||||
/**
|
||||
* @wait_for_event: Wait for events from SMU.
|
||||
|
@ -1415,7 +1415,7 @@ int smu_allow_xgmi_power_down(struct smu_context *smu, bool en);
|
|||
|
||||
int smu_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value);
|
||||
|
||||
int smu_set_light_sbr(struct smu_context *smu, bool enable);
|
||||
int smu_handle_passthrough_sbr(struct smu_context *smu, bool enable);
|
||||
|
||||
int smu_wait_for_event(struct amdgpu_device *adev, enum smu_event_type event,
|
||||
uint64_t event_arg);
|
||||
|
|
|
@ -229,7 +229,8 @@
|
|||
__SMU_DUMMY_MAP(BoardPowerCalibration), \
|
||||
__SMU_DUMMY_MAP(RequestGfxclk), \
|
||||
__SMU_DUMMY_MAP(ForceGfxVid), \
|
||||
__SMU_DUMMY_MAP(UnforceGfxVid),
|
||||
__SMU_DUMMY_MAP(UnforceGfxVid), \
|
||||
__SMU_DUMMY_MAP(HeavySBR),
|
||||
|
||||
#undef __SMU_DUMMY_MAP
|
||||
#define __SMU_DUMMY_MAP(type) SMU_MSG_##type
|
||||
|
|
|
@ -312,7 +312,7 @@ int smu_v11_0_deep_sleep_control(struct smu_context *smu,
|
|||
|
||||
void smu_v11_0_interrupt_work(struct smu_context *smu);
|
||||
|
||||
int smu_v11_0_set_light_sbr(struct smu_context *smu, bool enable);
|
||||
int smu_v11_0_handle_passthrough_sbr(struct smu_context *smu, bool enable);
|
||||
|
||||
int smu_v11_0_restore_user_od_settings(struct smu_context *smu);
|
||||
|
||||
|
|
|
@ -1569,8 +1569,6 @@ static int smu_suspend(void *handle)
|
|||
|
||||
smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
|
||||
|
||||
/* skip CGPG when in S0ix */
|
||||
if (smu->is_apu && !adev->in_s0ix)
|
||||
smu_set_gfx_cgpg(&adev->smu, false);
|
||||
|
||||
return 0;
|
||||
|
@ -1602,7 +1600,6 @@ static int smu_resume(void *handle)
|
|||
return ret;
|
||||
}
|
||||
|
||||
if (smu->is_apu)
|
||||
smu_set_gfx_cgpg(&adev->smu, true);
|
||||
|
||||
smu->disable_uclk_switch = 0;
|
||||
|
@ -3061,13 +3058,13 @@ static int smu_gfx_state_change_set(void *handle,
|
|||
return ret;
|
||||
}
|
||||
|
||||
int smu_set_light_sbr(struct smu_context *smu, bool enable)
|
||||
int smu_handle_passthrough_sbr(struct smu_context *smu, bool enable)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&smu->mutex);
|
||||
if (smu->ppt_funcs->set_light_sbr)
|
||||
ret = smu->ppt_funcs->set_light_sbr(smu, enable);
|
||||
if (smu->ppt_funcs->smu_handle_passthrough_sbr)
|
||||
ret = smu->ppt_funcs->smu_handle_passthrough_sbr(smu, enable);
|
||||
mutex_unlock(&smu->mutex);
|
||||
|
||||
return ret;
|
||||
|
|
|
@ -2472,7 +2472,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
|
|||
.deep_sleep_control = smu_v11_0_deep_sleep_control,
|
||||
.get_fan_parameters = arcturus_get_fan_parameters,
|
||||
.interrupt_work = smu_v11_0_interrupt_work,
|
||||
.set_light_sbr = smu_v11_0_set_light_sbr,
|
||||
.smu_handle_passthrough_sbr = smu_v11_0_handle_passthrough_sbr,
|
||||
.set_mp1_state = smu_cmn_set_mp1_state,
|
||||
};
|
||||
|
||||
|
|
|
@ -1724,7 +1724,7 @@ int smu_v11_0_mode1_reset(struct smu_context *smu)
|
|||
return ret;
|
||||
}
|
||||
|
||||
int smu_v11_0_set_light_sbr(struct smu_context *smu, bool enable)
|
||||
int smu_v11_0_handle_passthrough_sbr(struct smu_context *smu, bool enable)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
|
|
|
@ -120,7 +120,8 @@ int smu_v12_0_powergate_sdma(struct smu_context *smu, bool gate)
|
|||
|
||||
int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable)
|
||||
{
|
||||
if (!(smu->adev->pg_flags & AMD_PG_SUPPORT_GFX_PG))
|
||||
/* Until now the SMU12 only implemented for Renoir series so here neen't do APU check. */
|
||||
if (!(smu->adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) || smu->adev->in_s0ix)
|
||||
return 0;
|
||||
|
||||
return smu_cmn_send_smc_msg_with_param(smu,
|
||||
|
|
|
@ -141,6 +141,7 @@ static const struct cmn2asic_msg_mapping aldebaran_message_map[SMU_MSG_MAX_COUNT
|
|||
MSG_MAP(SetUclkDpmMode, PPSMC_MSG_SetUclkDpmMode, 0),
|
||||
MSG_MAP(GfxDriverResetRecovery, PPSMC_MSG_GfxDriverResetRecovery, 0),
|
||||
MSG_MAP(BoardPowerCalibration, PPSMC_MSG_BoardPowerCalibration, 0),
|
||||
MSG_MAP(HeavySBR, PPSMC_MSG_HeavySBR, 0),
|
||||
};
|
||||
|
||||
static const struct cmn2asic_mapping aldebaran_clk_map[SMU_CLK_COUNT] = {
|
||||
|
@ -1605,6 +1606,7 @@ out_unlock:
|
|||
mutex_unlock(&smu->metrics_lock);
|
||||
|
||||
adev->unique_id = ((uint64_t)upper32 << 32) | lower32;
|
||||
if (adev->serial[0] == '\0')
|
||||
sprintf(adev->serial, "%016llx", adev->unique_id);
|
||||
}
|
||||
|
||||
|
@ -1625,7 +1627,7 @@ static int aldebaran_allow_xgmi_power_down(struct smu_context *smu, bool en)
|
|||
{
|
||||
return smu_cmn_send_smc_msg_with_param(smu,
|
||||
SMU_MSG_GmiPwrDnControl,
|
||||
en ? 1 : 0,
|
||||
en ? 0 : 1,
|
||||
NULL);
|
||||
}
|
||||
|
||||
|
@ -1912,6 +1914,14 @@ out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int aldebaran_smu_handle_passthrough_sbr(struct smu_context *smu, bool enable)
|
||||
{
|
||||
int ret = 0;
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_HeavySBR, enable ? 1 : 0, NULL);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool aldebaran_is_mode1_reset_supported(struct smu_context *smu)
|
||||
{
|
||||
#if 0
|
||||
|
@ -2021,6 +2031,7 @@ static const struct pptable_funcs aldebaran_ppt_funcs = {
|
|||
.get_gpu_metrics = aldebaran_get_gpu_metrics,
|
||||
.mode1_reset_is_support = aldebaran_is_mode1_reset_supported,
|
||||
.mode2_reset_is_support = aldebaran_is_mode2_reset_supported,
|
||||
.smu_handle_passthrough_sbr = aldebaran_smu_handle_passthrough_sbr,
|
||||
.mode1_reset = aldebaran_mode1_reset,
|
||||
.set_mp1_state = aldebaran_set_mp1_state,
|
||||
.mode2_reset = aldebaran_mode2_reset,
|
||||
|
|
|
@ -212,7 +212,7 @@ int smu_v13_0_check_fw_version(struct smu_context *smu)
|
|||
if (smu->is_apu)
|
||||
adev->pm.fw_version = smu_version;
|
||||
|
||||
switch (smu->adev->ip_versions[MP1_HWIP][0]) {
|
||||
switch (adev->ip_versions[MP1_HWIP][0]) {
|
||||
case IP_VERSION(13, 0, 2):
|
||||
smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_ALDE;
|
||||
break;
|
||||
|
@ -221,12 +221,17 @@ int smu_v13_0_check_fw_version(struct smu_context *smu)
|
|||
smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_YELLOW_CARP;
|
||||
break;
|
||||
default:
|
||||
dev_err(smu->adev->dev, "smu unsupported IP version: 0x%x.\n",
|
||||
smu->adev->ip_versions[MP1_HWIP][0]);
|
||||
dev_err(adev->dev, "smu unsupported IP version: 0x%x.\n",
|
||||
adev->ip_versions[MP1_HWIP][0]);
|
||||
smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_INV;
|
||||
break;
|
||||
}
|
||||
|
||||
/* only for dGPU w/ SMU13*/
|
||||
if (adev->pm.fw)
|
||||
dev_dbg(adev->dev, "smu fw reported version = 0x%08x (%d.%d.%d)\n",
|
||||
smu_version, smu_major, smu_minor, smu_debug);
|
||||
|
||||
/*
|
||||
* 1. if_version mismatch is not critical as our fw is designed
|
||||
* to be backward compatible.
|
||||
|
@ -236,11 +241,11 @@ int smu_v13_0_check_fw_version(struct smu_context *smu)
|
|||
* of halt driver loading.
|
||||
*/
|
||||
if (if_version != smu->smc_driver_if_version) {
|
||||
dev_info(smu->adev->dev, "smu driver if version = 0x%08x, smu fw if version = 0x%08x, "
|
||||
dev_info(adev->dev, "smu driver if version = 0x%08x, smu fw if version = 0x%08x, "
|
||||
"smu fw version = 0x%08x (%d.%d.%d)\n",
|
||||
smu->smc_driver_if_version, if_version,
|
||||
smu_version, smu_major, smu_minor, smu_debug);
|
||||
dev_warn(smu->adev->dev, "SMU driver if version not matched\n");
|
||||
dev_warn(adev->dev, "SMU driver if version not matched\n");
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
|
Loading…
Reference in New Issue