Merge tag 'amd-drm-fixes-5.16-2021-11-10' of https://gitlab.freedesktop.org/agd5f/linux into drm-next
amd-drm-fixes-5.16-2021-11-10: amdgpu: - Don't allow partial copy from user for DC debugfs - SRIOV fixes - GFX9 CSB pin count fix - Various IP version check fixes - DP 2.0 fixes - Limit DCN1 MPO fix to DCN1 amdkfd: - SVM fixes - Reset fixes Signed-off-by: Dave Airlie <airlied@redhat.com> From: Alex Deucher <alexander.deucher@amd.com> Link: https://patchwork.freedesktop.org/patch/msgid/20211110222536.7527-1-alexander.deucher@amd.com
This commit is contained in:
commit
951bad0bd9
|
@ -297,7 +297,7 @@ void amdgpu_amdkfd_ras_poison_consumption_handler(struct kgd_dev *kgd);
|
|||
void amdgpu_amdkfd_gpuvm_init_mem_limits(void);
|
||||
void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
|
||||
struct amdgpu_vm *vm);
|
||||
void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo);
|
||||
void amdgpu_amdkfd_release_notify(struct amdgpu_bo *bo);
|
||||
void amdgpu_amdkfd_reserve_system_mem(uint64_t size);
|
||||
#else
|
||||
static inline
|
||||
|
@ -312,7 +312,7 @@ void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
|
|||
}
|
||||
|
||||
static inline
|
||||
void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
|
||||
void amdgpu_amdkfd_release_notify(struct amdgpu_bo *bo)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -207,7 +207,7 @@ static void unreserve_mem_limit(struct amdgpu_device *adev,
|
|||
spin_unlock(&kfd_mem_limit.mem_limit_lock);
|
||||
}
|
||||
|
||||
void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
|
||||
void amdgpu_amdkfd_release_notify(struct amdgpu_bo *bo)
|
||||
{
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
|
||||
u32 domain = bo->preferred_domains;
|
||||
|
@ -219,6 +219,8 @@ void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
|
|||
}
|
||||
|
||||
unreserve_mem_limit(adev, amdgpu_bo_size(bo), domain, sg);
|
||||
|
||||
kfree(bo->kfd_bo);
|
||||
}
|
||||
|
||||
|
||||
|
@ -734,14 +736,19 @@ static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem,
|
|||
}
|
||||
|
||||
/* Add BO to VM internal data structures */
|
||||
ret = amdgpu_bo_reserve(bo[i], false);
|
||||
if (ret) {
|
||||
pr_debug("Unable to reserve BO during memory attach");
|
||||
goto unwind;
|
||||
}
|
||||
attachment[i]->bo_va = amdgpu_vm_bo_add(adev, vm, bo[i]);
|
||||
amdgpu_bo_unreserve(bo[i]);
|
||||
if (unlikely(!attachment[i]->bo_va)) {
|
||||
ret = -ENOMEM;
|
||||
pr_err("Failed to add BO object to VM. ret == %d\n",
|
||||
ret);
|
||||
goto unwind;
|
||||
}
|
||||
|
||||
attachment[i]->va = va;
|
||||
attachment[i]->pte_flags = get_pte_flags(adev, mem);
|
||||
attachment[i]->adev = adev;
|
||||
|
@ -757,7 +764,9 @@ unwind:
|
|||
if (!attachment[i])
|
||||
continue;
|
||||
if (attachment[i]->bo_va) {
|
||||
amdgpu_bo_reserve(bo[i], true);
|
||||
amdgpu_vm_bo_rmv(adev, attachment[i]->bo_va);
|
||||
amdgpu_bo_unreserve(bo[i]);
|
||||
list_del(&attachment[i]->list);
|
||||
}
|
||||
if (bo[i])
|
||||
|
@ -1568,12 +1577,12 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
|
|||
pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va,
|
||||
mem->va + bo_size * (1 + mem->aql_queue));
|
||||
|
||||
ret = unreserve_bo_and_vms(&ctx, false, false);
|
||||
|
||||
/* Remove from VM internal data structures */
|
||||
list_for_each_entry_safe(entry, tmp, &mem->attachments, list)
|
||||
kfd_mem_detach(entry);
|
||||
|
||||
ret = unreserve_bo_and_vms(&ctx, false, false);
|
||||
|
||||
/* Free the sync object */
|
||||
amdgpu_sync_free(&mem->sync);
|
||||
|
||||
|
@ -1600,9 +1609,13 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
|
|||
drm_vma_node_revoke(&mem->bo->tbo.base.vma_node, drm_priv);
|
||||
if (mem->dmabuf)
|
||||
dma_buf_put(mem->dmabuf);
|
||||
drm_gem_object_put(&mem->bo->tbo.base);
|
||||
mutex_destroy(&mem->lock);
|
||||
kfree(mem);
|
||||
|
||||
/* If this releases the last reference, it will end up calling
|
||||
* amdgpu_amdkfd_release_notify and kfree the mem struct. That's why
|
||||
* this needs to be the last call here.
|
||||
*/
|
||||
drm_gem_object_put(&mem->bo->tbo.base);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -3167,11 +3167,21 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
|
|||
{
|
||||
switch (asic_type) {
|
||||
#if defined(CONFIG_DRM_AMD_DC)
|
||||
#if defined(CONFIG_DRM_AMD_DC_SI)
|
||||
case CHIP_TAHITI:
|
||||
case CHIP_PITCAIRN:
|
||||
case CHIP_VERDE:
|
||||
case CHIP_OLAND:
|
||||
/*
|
||||
* We have systems in the wild with these ASICs that require
|
||||
* LVDS and VGA support which is not supported with DC.
|
||||
*
|
||||
* Fallback to the non-DC driver here by default so as not to
|
||||
* cause regressions.
|
||||
*/
|
||||
#if defined(CONFIG_DRM_AMD_DC_SI)
|
||||
return amdgpu_dc > 0;
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
case CHIP_BONAIRE:
|
||||
case CHIP_KAVERI:
|
||||
|
@ -4283,8 +4293,6 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
|
|||
if (r)
|
||||
return r;
|
||||
|
||||
amdgpu_amdkfd_pre_reset(adev);
|
||||
|
||||
/* Resume IP prior to SMC */
|
||||
r = amdgpu_device_ip_reinit_early_sriov(adev);
|
||||
if (r)
|
||||
|
@ -5020,8 +5028,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
|
|||
|
||||
cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
|
||||
|
||||
if (!amdgpu_sriov_vf(tmp_adev))
|
||||
amdgpu_amdkfd_pre_reset(tmp_adev);
|
||||
amdgpu_amdkfd_pre_reset(tmp_adev);
|
||||
|
||||
/*
|
||||
* Mark these ASICs to be reseted as untracked first
|
||||
|
|
|
@ -867,7 +867,8 @@ static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev)
|
|||
case IP_VERSION(2, 0, 2):
|
||||
case IP_VERSION(2, 2, 0):
|
||||
amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
|
||||
if (!amdgpu_sriov_vf(adev))
|
||||
amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
|
||||
break;
|
||||
case IP_VERSION(2, 0, 3):
|
||||
break;
|
||||
|
@ -881,6 +882,7 @@ static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev)
|
|||
break;
|
||||
case IP_VERSION(3, 0, 0):
|
||||
case IP_VERSION(3, 0, 16):
|
||||
case IP_VERSION(3, 0, 64):
|
||||
case IP_VERSION(3, 1, 1):
|
||||
case IP_VERSION(3, 0, 2):
|
||||
amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
|
||||
|
|
|
@ -1274,7 +1274,7 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
|
|||
abo = ttm_to_amdgpu_bo(bo);
|
||||
|
||||
if (abo->kfd_bo)
|
||||
amdgpu_amdkfd_unreserve_memory_limit(abo);
|
||||
amdgpu_amdkfd_release_notify(abo);
|
||||
|
||||
/* We only remove the fence if the resv has individualized. */
|
||||
WARN_ON_ONCE(bo->type == ttm_bo_type_kernel
|
||||
|
|
|
@ -134,6 +134,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
|
|||
adev->vcn.indirect_sram = true;
|
||||
break;
|
||||
case IP_VERSION(3, 0, 0):
|
||||
case IP_VERSION(3, 0, 64):
|
||||
if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0))
|
||||
fw_name = FIRMWARE_SIENNA_CICHLID;
|
||||
else
|
||||
|
|
|
@ -806,9 +806,9 @@ static void amdgpu_xgmi_reset_ras_error_count(struct amdgpu_device *adev)
|
|||
for (i = 0; i < ARRAY_SIZE(xgmi23_pcs_err_status_reg_aldebaran); i++)
|
||||
pcs_clear_status(adev,
|
||||
xgmi23_pcs_err_status_reg_aldebaran[i]);
|
||||
for (i = 0; i < ARRAY_SIZE(xgmi23_pcs_err_status_reg_aldebaran); i++)
|
||||
for (i = 0; i < ARRAY_SIZE(xgmi3x16_pcs_err_status_reg_aldebaran); i++)
|
||||
pcs_clear_status(adev,
|
||||
xgmi23_pcs_err_status_reg_aldebaran[i]);
|
||||
xgmi3x16_pcs_err_status_reg_aldebaran[i]);
|
||||
for (i = 0; i < ARRAY_SIZE(walf_pcs_err_status_reg_aldebaran); i++)
|
||||
pcs_clear_status(adev,
|
||||
walf_pcs_err_status_reg_aldebaran[i]);
|
||||
|
|
|
@ -8249,6 +8249,9 @@ static int gfx_v10_0_update_gfx_clock_gating(struct amdgpu_device *adev,
|
|||
static void gfx_v10_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
|
||||
{
|
||||
u32 reg, data;
|
||||
|
||||
amdgpu_gfx_off_ctrl(adev, false);
|
||||
|
||||
/* not for *_SOC15 */
|
||||
reg = SOC15_REG_OFFSET(GC, 0, mmRLC_SPM_MC_CNTL);
|
||||
if (amdgpu_sriov_is_pp_one_vf(adev))
|
||||
|
@ -8263,6 +8266,8 @@ static void gfx_v10_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
|
|||
WREG32_SOC15_NO_KIQ(GC, 0, mmRLC_SPM_MC_CNTL, data);
|
||||
else
|
||||
WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data);
|
||||
|
||||
amdgpu_gfx_off_ctrl(adev, true);
|
||||
}
|
||||
|
||||
static bool gfx_v10_0_check_rlcg_range(struct amdgpu_device *adev,
|
||||
|
|
|
@ -3575,12 +3575,16 @@ static void gfx_v7_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
|
|||
{
|
||||
u32 data;
|
||||
|
||||
amdgpu_gfx_off_ctrl(adev, false);
|
||||
|
||||
data = RREG32(mmRLC_SPM_VMID);
|
||||
|
||||
data &= ~RLC_SPM_VMID__RLC_SPM_VMID_MASK;
|
||||
data |= (vmid & RLC_SPM_VMID__RLC_SPM_VMID_MASK) << RLC_SPM_VMID__RLC_SPM_VMID__SHIFT;
|
||||
|
||||
WREG32(mmRLC_SPM_VMID, data);
|
||||
|
||||
amdgpu_gfx_off_ctrl(adev, true);
|
||||
}
|
||||
|
||||
static void gfx_v7_0_enable_cgcg(struct amdgpu_device *adev, bool enable)
|
||||
|
|
|
@ -5624,6 +5624,8 @@ static void gfx_v8_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
|
|||
{
|
||||
u32 data;
|
||||
|
||||
amdgpu_gfx_off_ctrl(adev, false);
|
||||
|
||||
if (amdgpu_sriov_is_pp_one_vf(adev))
|
||||
data = RREG32_NO_KIQ(mmRLC_SPM_VMID);
|
||||
else
|
||||
|
@ -5636,6 +5638,8 @@ static void gfx_v8_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
|
|||
WREG32_NO_KIQ(mmRLC_SPM_VMID, data);
|
||||
else
|
||||
WREG32(mmRLC_SPM_VMID, data);
|
||||
|
||||
amdgpu_gfx_off_ctrl(adev, true);
|
||||
}
|
||||
|
||||
static const struct amdgpu_rlc_funcs iceland_rlc_funcs = {
|
||||
|
|
|
@ -2462,7 +2462,9 @@ static int gfx_v9_0_sw_fini(void *handle)
|
|||
amdgpu_gfx_kiq_fini(adev);
|
||||
|
||||
gfx_v9_0_mec_fini(adev);
|
||||
amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj);
|
||||
amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
|
||||
&adev->gfx.rlc.clear_state_gpu_addr,
|
||||
(void **)&adev->gfx.rlc.cs_ptr);
|
||||
if (adev->flags & AMD_IS_APU) {
|
||||
amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
|
||||
&adev->gfx.rlc.cp_table_gpu_addr,
|
||||
|
@ -5102,6 +5104,8 @@ static void gfx_v9_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
|
|||
{
|
||||
u32 reg, data;
|
||||
|
||||
amdgpu_gfx_off_ctrl(adev, false);
|
||||
|
||||
reg = SOC15_REG_OFFSET(GC, 0, mmRLC_SPM_MC_CNTL);
|
||||
if (amdgpu_sriov_is_pp_one_vf(adev))
|
||||
data = RREG32_NO_KIQ(reg);
|
||||
|
@ -5115,6 +5119,8 @@ static void gfx_v9_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
|
|||
WREG32_SOC15_NO_KIQ(GC, 0, mmRLC_SPM_MC_CNTL, data);
|
||||
else
|
||||
WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data);
|
||||
|
||||
amdgpu_gfx_off_ctrl(adev, true);
|
||||
}
|
||||
|
||||
static bool gfx_v9_0_check_rlcg_range(struct amdgpu_device *adev,
|
||||
|
|
|
@ -348,6 +348,10 @@ static void gfxhub_v1_0_gart_disable(struct amdgpu_device *adev)
|
|||
WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT0_CNTL,
|
||||
i * hub->ctx_distance, 0);
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
/* Avoid write to GMC registers */
|
||||
return;
|
||||
|
||||
/* Setup TLB control */
|
||||
tmp = RREG32_SOC15(GC, 0, mmMC_VM_MX_L1_TLB_CNTL);
|
||||
tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
|
||||
|
|
|
@ -182,6 +182,7 @@ static int nv_query_video_codecs(struct amdgpu_device *adev, bool encode,
|
|||
{
|
||||
switch (adev->ip_versions[UVD_HWIP][0]) {
|
||||
case IP_VERSION(3, 0, 0):
|
||||
case IP_VERSION(3, 0, 64):
|
||||
if (amdgpu_sriov_vf(adev)) {
|
||||
if (encode)
|
||||
*codecs = &sriov_sc_video_codecs_encode;
|
||||
|
|
|
@ -534,6 +534,19 @@ static int uvd_v6_0_hw_fini(void *handle)
|
|||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
cancel_delayed_work_sync(&adev->uvd.idle_work);
|
||||
|
||||
if (RREG32(mmUVD_STATUS) != 0)
|
||||
uvd_v6_0_stop(adev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int uvd_v6_0_suspend(void *handle)
|
||||
{
|
||||
int r;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
/*
|
||||
* Proper cleanups before halting the HW engine:
|
||||
* - cancel the delayed idle work
|
||||
|
@ -558,17 +571,6 @@ static int uvd_v6_0_hw_fini(void *handle)
|
|||
AMD_CG_STATE_GATE);
|
||||
}
|
||||
|
||||
if (RREG32(mmUVD_STATUS) != 0)
|
||||
uvd_v6_0_stop(adev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int uvd_v6_0_suspend(void *handle)
|
||||
{
|
||||
int r;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
r = uvd_v6_0_hw_fini(adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
|
|
@ -1430,7 +1430,7 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
|
|||
|
||||
if (!dqm->sched_running)
|
||||
return 0;
|
||||
if (dqm->is_hws_hang)
|
||||
if (dqm->is_hws_hang || dqm->is_resetting)
|
||||
return -EIO;
|
||||
if (!dqm->active_runlist)
|
||||
return retval;
|
||||
|
|
|
@ -308,7 +308,7 @@
|
|||
* 16MB are reserved for kernel use (CWSR trap handler and kernel IB
|
||||
* for now).
|
||||
*/
|
||||
#define SVM_USER_BASE 0x1000000ull
|
||||
#define SVM_USER_BASE (u64)(KFD_CWSR_TBA_TMA_SIZE + 2*PAGE_SIZE)
|
||||
#define SVM_CWSR_BASE (SVM_USER_BASE - KFD_CWSR_TBA_TMA_SIZE)
|
||||
#define SVM_IB_BASE (SVM_CWSR_BASE - PAGE_SIZE)
|
||||
|
||||
|
|
|
@ -861,6 +861,11 @@ static vm_fault_t svm_migrate_to_ram(struct vm_fault *vmf)
|
|||
pr_debug("failed find process at fault address 0x%lx\n", addr);
|
||||
return VM_FAULT_SIGBUS;
|
||||
}
|
||||
if (READ_ONCE(p->svms.faulting_task) == current) {
|
||||
pr_debug("skipping ram migration\n");
|
||||
kfd_unref_process(p);
|
||||
return 0;
|
||||
}
|
||||
addr >>= PAGE_SHIFT;
|
||||
pr_debug("CPU page fault svms 0x%p address 0x%lx\n", &p->svms, addr);
|
||||
|
||||
|
|
|
@ -766,8 +766,10 @@ struct svm_range_list {
|
|||
struct list_head deferred_range_list;
|
||||
spinlock_t deferred_list_lock;
|
||||
atomic_t evicted_ranges;
|
||||
bool drain_pagefaults;
|
||||
struct delayed_work restore_work;
|
||||
DECLARE_BITMAP(bitmap_supported, MAX_GPU_INSTANCE);
|
||||
struct task_struct *faulting_task;
|
||||
};
|
||||
|
||||
/* Process data */
|
||||
|
|
|
@ -1715,7 +1715,11 @@ int kfd_process_evict_queues(struct kfd_process *p)
|
|||
|
||||
r = pdd->dev->dqm->ops.evict_process_queues(pdd->dev->dqm,
|
||||
&pdd->qpd);
|
||||
if (r) {
|
||||
/* evict return -EIO if HWS is hang or asic is resetting, in this case
|
||||
* we would like to set all the queues to be in evicted state to prevent
|
||||
* them been add back since they actually not be saved right now.
|
||||
*/
|
||||
if (r && r != -EIO) {
|
||||
pr_err("Failed to evict process queues\n");
|
||||
goto fail;
|
||||
}
|
||||
|
|
|
@ -1496,9 +1496,11 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
|
|||
|
||||
next = min(vma->vm_end, end);
|
||||
npages = (next - addr) >> PAGE_SHIFT;
|
||||
WRITE_ONCE(p->svms.faulting_task, current);
|
||||
r = amdgpu_hmm_range_get_pages(&prange->notifier, mm, NULL,
|
||||
addr, npages, &hmm_range,
|
||||
readonly, true, owner);
|
||||
WRITE_ONCE(p->svms.faulting_task, NULL);
|
||||
if (r) {
|
||||
pr_debug("failed %d to get svm range pages\n", r);
|
||||
goto unreserve_out;
|
||||
|
@ -2000,20 +2002,28 @@ static void svm_range_deferred_list_work(struct work_struct *work)
|
|||
pr_debug("prange 0x%p [0x%lx 0x%lx] op %d\n", prange,
|
||||
prange->start, prange->last, prange->work_item.op);
|
||||
|
||||
/* Make sure no stale retry fault coming after range is freed */
|
||||
if (prange->work_item.op == SVM_OP_UNMAP_RANGE)
|
||||
svm_range_drain_retry_fault(prange->svms);
|
||||
|
||||
mm = prange->work_item.mm;
|
||||
retry:
|
||||
mmap_write_lock(mm);
|
||||
mutex_lock(&svms->lock);
|
||||
|
||||
/* Remove from deferred_list must be inside mmap write lock,
|
||||
/* Checking for the need to drain retry faults must be in
|
||||
* mmap write lock to serialize with munmap notifiers.
|
||||
*
|
||||
* Remove from deferred_list must be inside mmap write lock,
|
||||
* otherwise, svm_range_list_lock_and_flush_work may hold mmap
|
||||
* write lock, and continue because deferred_list is empty, then
|
||||
* deferred_list handle is blocked by mmap write lock.
|
||||
*/
|
||||
spin_lock(&svms->deferred_list_lock);
|
||||
if (unlikely(svms->drain_pagefaults)) {
|
||||
svms->drain_pagefaults = false;
|
||||
spin_unlock(&svms->deferred_list_lock);
|
||||
mutex_unlock(&svms->lock);
|
||||
mmap_write_unlock(mm);
|
||||
svm_range_drain_retry_fault(svms);
|
||||
goto retry;
|
||||
}
|
||||
list_del_init(&prange->deferred_list);
|
||||
spin_unlock(&svms->deferred_list_lock);
|
||||
|
||||
|
@ -2046,6 +2056,12 @@ svm_range_add_list_work(struct svm_range_list *svms, struct svm_range *prange,
|
|||
struct mm_struct *mm, enum svm_work_list_ops op)
|
||||
{
|
||||
spin_lock(&svms->deferred_list_lock);
|
||||
/* Make sure pending page faults are drained in the deferred worker
|
||||
* before the range is freed to avoid straggler interrupts on
|
||||
* unmapped memory causing "phantom faults".
|
||||
*/
|
||||
if (op == SVM_OP_UNMAP_RANGE)
|
||||
svms->drain_pagefaults = true;
|
||||
/* if prange is on the deferred list */
|
||||
if (!list_empty(&prange->deferred_list)) {
|
||||
pr_debug("update exist prange 0x%p work op %d\n", prange, op);
|
||||
|
|
|
@ -217,6 +217,7 @@ static const struct drm_format_info *
|
|||
amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
|
||||
|
||||
static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
|
||||
static void handle_hpd_rx_irq(void *param);
|
||||
|
||||
static bool
|
||||
is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
|
||||
|
@ -619,7 +620,7 @@ static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
|
|||
|
||||
amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
|
||||
}
|
||||
#endif
|
||||
#endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
|
||||
|
||||
/**
|
||||
* dmub_aux_setconfig_reply_callback - Callback for AUX or SET_CONFIG command.
|
||||
|
@ -669,10 +670,7 @@ void dmub_hpd_callback(struct amdgpu_device *adev, struct dmub_notification *not
|
|||
return;
|
||||
}
|
||||
|
||||
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
|
||||
|
||||
link_index = notify->link_index;
|
||||
|
||||
link = adev->dm.dc->links[link_index];
|
||||
|
||||
drm_connector_list_iter_begin(dev, &iter);
|
||||
|
@ -685,10 +683,13 @@ void dmub_hpd_callback(struct amdgpu_device *adev, struct dmub_notification *not
|
|||
}
|
||||
}
|
||||
drm_connector_list_iter_end(&iter);
|
||||
drm_modeset_unlock(&dev->mode_config.connection_mutex);
|
||||
|
||||
if (hpd_aconnector)
|
||||
handle_hpd_irq_helper(hpd_aconnector);
|
||||
if (hpd_aconnector) {
|
||||
if (notify->type == DMUB_NOTIFICATION_HPD)
|
||||
handle_hpd_irq_helper(hpd_aconnector);
|
||||
else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
|
||||
handle_hpd_rx_irq(hpd_aconnector);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -764,6 +765,10 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params)
|
|||
DRM_ERROR("DM: notify type %d invalid!", notify.type);
|
||||
continue;
|
||||
}
|
||||
if (!dm->dmub_callback[notify.type]) {
|
||||
DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type);
|
||||
continue;
|
||||
}
|
||||
if (dm->dmub_thread_offload[notify.type] == true) {
|
||||
dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
|
||||
if (!dmub_hpd_wrk) {
|
||||
|
@ -813,7 +818,7 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params)
|
|||
if (count > DMUB_TRACE_MAX_READ)
|
||||
DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
|
||||
}
|
||||
#endif
|
||||
#endif /* CONFIG_DRM_AMD_DC_DCN */
|
||||
|
||||
static int dm_set_clockgating_state(void *handle,
|
||||
enum amd_clockgating_state state)
|
||||
|
@ -1564,7 +1569,11 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
|
|||
DRM_ERROR("amdgpu: fail to register dmub hpd callback");
|
||||
goto error;
|
||||
}
|
||||
#endif
|
||||
if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) {
|
||||
DRM_ERROR("amdgpu: fail to register dmub hpd callback");
|
||||
goto error;
|
||||
}
|
||||
#endif /* CONFIG_DRM_AMD_DC_DCN */
|
||||
}
|
||||
|
||||
if (amdgpu_dm_initialize_drm_device(adev)) {
|
||||
|
@ -4573,7 +4582,8 @@ static void get_min_max_dc_plane_scaling(struct drm_device *dev,
|
|||
}
|
||||
|
||||
|
||||
static int fill_dc_scaling_info(const struct drm_plane_state *state,
|
||||
static int fill_dc_scaling_info(struct amdgpu_device *adev,
|
||||
const struct drm_plane_state *state,
|
||||
struct dc_scaling_info *scaling_info)
|
||||
{
|
||||
int scale_w, scale_h, min_downscale, max_upscale;
|
||||
|
@ -4587,7 +4597,8 @@ static int fill_dc_scaling_info(const struct drm_plane_state *state,
|
|||
/*
|
||||
* For reasons we don't (yet) fully understand a non-zero
|
||||
* src_y coordinate into an NV12 buffer can cause a
|
||||
* system hang. To avoid hangs (and maybe be overly cautious)
|
||||
* system hang on DCN1x.
|
||||
* To avoid hangs (and maybe be overly cautious)
|
||||
* let's reject both non-zero src_x and src_y.
|
||||
*
|
||||
* We currently know of only one use-case to reproduce a
|
||||
|
@ -4595,10 +4606,10 @@ static int fill_dc_scaling_info(const struct drm_plane_state *state,
|
|||
* is to gesture the YouTube Android app into full screen
|
||||
* on ChromeOS.
|
||||
*/
|
||||
if (state->fb &&
|
||||
state->fb->format->format == DRM_FORMAT_NV12 &&
|
||||
(scaling_info->src_rect.x != 0 ||
|
||||
scaling_info->src_rect.y != 0))
|
||||
if (((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) ||
|
||||
(adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) &&
|
||||
(state->fb && state->fb->format->format == DRM_FORMAT_NV12 &&
|
||||
(scaling_info->src_rect.x != 0 || scaling_info->src_rect.y != 0)))
|
||||
return -EINVAL;
|
||||
|
||||
scaling_info->src_rect.width = state->src_w >> 16;
|
||||
|
@ -5504,7 +5515,7 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,
|
|||
int ret;
|
||||
bool force_disable_dcc = false;
|
||||
|
||||
ret = fill_dc_scaling_info(plane_state, &scaling_info);
|
||||
ret = fill_dc_scaling_info(adev, plane_state, &scaling_info);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -6078,7 +6089,7 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
|
|||
if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
|
||||
stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
|
||||
}
|
||||
#endif
|
||||
#endif /* CONFIG_DRM_AMD_DC_DCN */
|
||||
|
||||
/**
|
||||
* DOC: FreeSync Video
|
||||
|
@ -7567,7 +7578,7 @@ static int dm_plane_atomic_check(struct drm_plane *plane,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = fill_dc_scaling_info(new_plane_state, &scaling_info);
|
||||
ret = fill_dc_scaling_info(adev, new_plane_state, &scaling_info);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -9015,7 +9026,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
|
|||
bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
|
||||
}
|
||||
|
||||
fill_dc_scaling_info(new_plane_state,
|
||||
fill_dc_scaling_info(dm->adev, new_plane_state,
|
||||
&bundle->scaling_infos[planes_count]);
|
||||
|
||||
bundle->surface_updates[planes_count].scaling_info =
|
||||
|
@ -10802,7 +10813,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
|
|||
|
||||
ret = drm_atomic_add_affected_connectors(state, crtc);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto fail;
|
||||
|
||||
ret = drm_atomic_add_affected_planes(state, crtc);
|
||||
if (ret)
|
||||
|
|
|
@ -78,12 +78,10 @@ static int parse_write_buffer_into_params(char *wr_buf, uint32_t wr_buf_size,
|
|||
|
||||
wr_buf_ptr = wr_buf;
|
||||
|
||||
r = copy_from_user(wr_buf_ptr, buf, wr_buf_size);
|
||||
|
||||
/* r is bytes not be copied */
|
||||
if (r >= wr_buf_size) {
|
||||
DRM_DEBUG_DRIVER("user data not be read\n");
|
||||
return -EINVAL;
|
||||
/* r is bytes not be copied */
|
||||
if (copy_from_user(wr_buf_ptr, buf, wr_buf_size)) {
|
||||
DRM_DEBUG_DRIVER("user data could not be read successfully\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
/* check number of parameters. isspace could not differ space and \n */
|
||||
|
|
|
@ -1892,6 +1892,7 @@ static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context)
|
|||
return false;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DRM_AMD_DC_DCN
|
||||
/* Perform updates here which need to be deferred until next vupdate
|
||||
*
|
||||
* i.e. blnd lut, 3dlut, and shaper lut bypass regs are double buffered
|
||||
|
@ -1901,7 +1902,6 @@ static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context)
|
|||
*/
|
||||
static void process_deferred_updates(struct dc *dc)
|
||||
{
|
||||
#ifdef CONFIG_DRM_AMD_DC_DCN
|
||||
int i = 0;
|
||||
|
||||
if (dc->debug.enable_mem_low_power.bits.cm) {
|
||||
|
@ -1910,8 +1910,8 @@ static void process_deferred_updates(struct dc *dc)
|
|||
if (dc->res_pool->dpps[i]->funcs->dpp_deferred_update)
|
||||
dc->res_pool->dpps[i]->funcs->dpp_deferred_update(dc->res_pool->dpps[i]);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
#endif /* CONFIG_DRM_AMD_DC_DCN */
|
||||
|
||||
void dc_post_update_surfaces_to_stream(struct dc *dc)
|
||||
{
|
||||
|
@ -1938,7 +1938,9 @@ void dc_post_update_surfaces_to_stream(struct dc *dc)
|
|||
dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DRM_AMD_DC_DCN
|
||||
process_deferred_updates(dc);
|
||||
#endif
|
||||
|
||||
dc->hwss.optimize_bandwidth(dc, context);
|
||||
|
||||
|
|
|
@ -4770,7 +4770,7 @@ uint32_t dc_bandwidth_in_kbps_from_timing(
|
|||
timing->dsc_cfg.bits_per_pixel,
|
||||
timing->dsc_cfg.num_slices_h,
|
||||
timing->dsc_cfg.is_dp);
|
||||
#endif
|
||||
#endif /* CONFIG_DRM_AMD_DC_DCN */
|
||||
|
||||
switch (timing->display_color_depth) {
|
||||
case COLOR_DEPTH_666:
|
||||
|
|
|
@ -1024,6 +1024,8 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
|
|||
uint32_t min_freq, max_freq = 0;
|
||||
uint32_t ret = 0;
|
||||
|
||||
phm_get_sysfs_buf(&buf, &size);
|
||||
|
||||
switch (type) {
|
||||
case PP_SCLK:
|
||||
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency, &now);
|
||||
|
@ -1065,7 +1067,7 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
size = sysfs_emit(buf, "%s:\n", "OD_SCLK");
|
||||
size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
|
||||
size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
|
||||
(data->gfx_actual_soft_min_freq > 0) ? data->gfx_actual_soft_min_freq : min_freq);
|
||||
size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
|
||||
|
@ -1081,7 +1083,7 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
size = sysfs_emit(buf, "%s:\n", "OD_RANGE");
|
||||
size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
|
||||
size += sysfs_emit_at(buf, size, "SCLK: %7uMHz %10uMHz\n",
|
||||
min_freq, max_freq);
|
||||
}
|
||||
|
@ -1456,6 +1458,8 @@ static int smu10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
|
|||
if (!buf)
|
||||
return -EINVAL;
|
||||
|
||||
phm_get_sysfs_buf(&buf, &size);
|
||||
|
||||
size += sysfs_emit_at(buf, size, "%s %16s %s %s %s %s\n",title[0],
|
||||
title[1], title[2], title[3], title[4], title[5]);
|
||||
|
||||
|
|
|
@ -4914,6 +4914,8 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
|
|||
int size = 0;
|
||||
uint32_t i, now, clock, pcie_speed;
|
||||
|
||||
phm_get_sysfs_buf(&buf, &size);
|
||||
|
||||
switch (type) {
|
||||
case PP_SCLK:
|
||||
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency, &clock);
|
||||
|
@ -4963,7 +4965,7 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
|
|||
break;
|
||||
case OD_SCLK:
|
||||
if (hwmgr->od_enabled) {
|
||||
size = sysfs_emit(buf, "%s:\n", "OD_SCLK");
|
||||
size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
|
||||
for (i = 0; i < odn_sclk_table->num_of_pl; i++)
|
||||
size += sysfs_emit_at(buf, size, "%d: %10uMHz %10umV\n",
|
||||
i, odn_sclk_table->entries[i].clock/100,
|
||||
|
@ -4972,7 +4974,7 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
|
|||
break;
|
||||
case OD_MCLK:
|
||||
if (hwmgr->od_enabled) {
|
||||
size = sysfs_emit(buf, "%s:\n", "OD_MCLK");
|
||||
size += sysfs_emit_at(buf, size, "%s:\n", "OD_MCLK");
|
||||
for (i = 0; i < odn_mclk_table->num_of_pl; i++)
|
||||
size += sysfs_emit_at(buf, size, "%d: %10uMHz %10umV\n",
|
||||
i, odn_mclk_table->entries[i].clock/100,
|
||||
|
@ -4981,7 +4983,7 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
|
|||
break;
|
||||
case OD_RANGE:
|
||||
if (hwmgr->od_enabled) {
|
||||
size = sysfs_emit(buf, "%s:\n", "OD_RANGE");
|
||||
size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
|
||||
size += sysfs_emit_at(buf, size, "SCLK: %7uMHz %10uMHz\n",
|
||||
data->golden_dpm_table.sclk_table.dpm_levels[0].value/100,
|
||||
hwmgr->platform_descriptor.overdriveLimit.engineClock/100);
|
||||
|
@ -5518,6 +5520,8 @@ static int smu7_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
|
|||
if (!buf)
|
||||
return -EINVAL;
|
||||
|
||||
phm_get_sysfs_buf(&buf, &size);
|
||||
|
||||
size += sysfs_emit_at(buf, size, "%s %16s %16s %16s %16s %16s %16s %16s\n",
|
||||
title[0], title[1], title[2], title[3],
|
||||
title[4], title[5], title[6], title[7]);
|
||||
|
|
|
@ -1550,6 +1550,8 @@ static int smu8_print_clock_levels(struct pp_hwmgr *hwmgr,
|
|||
uint32_t i, now;
|
||||
int size = 0;
|
||||
|
||||
phm_get_sysfs_buf(&buf, &size);
|
||||
|
||||
switch (type) {
|
||||
case PP_SCLK:
|
||||
now = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device,
|
||||
|
|
|
@ -109,6 +109,19 @@ int phm_irq_process(struct amdgpu_device *adev,
|
|||
struct amdgpu_irq_src *source,
|
||||
struct amdgpu_iv_entry *entry);
|
||||
|
||||
/*
|
||||
* Helper function to make sysfs_emit_at() happy. Align buf to
|
||||
* the current page boundary and record the offset.
|
||||
*/
|
||||
static inline void phm_get_sysfs_buf(char **buf, int *offset)
|
||||
{
|
||||
if (!*buf || !offset)
|
||||
return;
|
||||
|
||||
*offset = offset_in_page(*buf);
|
||||
*buf -= *offset;
|
||||
}
|
||||
|
||||
int smu9_register_irq_handlers(struct pp_hwmgr *hwmgr);
|
||||
|
||||
void *smu_atom_get_data_table(void *dev, uint32_t table, uint16_t *size,
|
||||
|
|
|
@ -4548,6 +4548,8 @@ static int vega10_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf)
|
|||
int ret = 0;
|
||||
int size = 0;
|
||||
|
||||
phm_get_sysfs_buf(&buf, &size);
|
||||
|
||||
ret = vega10_get_enabled_smc_features(hwmgr, &features_enabled);
|
||||
PP_ASSERT_WITH_CODE(!ret,
|
||||
"[EnableAllSmuFeatures] Failed to get enabled smc features!",
|
||||
|
@ -4637,6 +4639,8 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
|
|||
|
||||
int i, now, size = 0, count = 0;
|
||||
|
||||
phm_get_sysfs_buf(&buf, &size);
|
||||
|
||||
switch (type) {
|
||||
case PP_SCLK:
|
||||
if (data->registry_data.sclk_dpm_key_disabled)
|
||||
|
@ -4717,7 +4721,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
|
|||
|
||||
case OD_SCLK:
|
||||
if (hwmgr->od_enabled) {
|
||||
size = sysfs_emit(buf, "%s:\n", "OD_SCLK");
|
||||
size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
|
||||
podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_sclk;
|
||||
for (i = 0; i < podn_vdd_dep->count; i++)
|
||||
size += sysfs_emit_at(buf, size, "%d: %10uMhz %10umV\n",
|
||||
|
@ -4727,7 +4731,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
|
|||
break;
|
||||
case OD_MCLK:
|
||||
if (hwmgr->od_enabled) {
|
||||
size = sysfs_emit(buf, "%s:\n", "OD_MCLK");
|
||||
size += sysfs_emit_at(buf, size, "%s:\n", "OD_MCLK");
|
||||
podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_mclk;
|
||||
for (i = 0; i < podn_vdd_dep->count; i++)
|
||||
size += sysfs_emit_at(buf, size, "%d: %10uMhz %10umV\n",
|
||||
|
@ -4737,7 +4741,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
|
|||
break;
|
||||
case OD_RANGE:
|
||||
if (hwmgr->od_enabled) {
|
||||
size = sysfs_emit(buf, "%s:\n", "OD_RANGE");
|
||||
size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
|
||||
size += sysfs_emit_at(buf, size, "SCLK: %7uMHz %10uMHz\n",
|
||||
data->golden_dpm_table.gfx_table.dpm_levels[0].value/100,
|
||||
hwmgr->platform_descriptor.overdriveLimit.engineClock/100);
|
||||
|
@ -5112,6 +5116,8 @@ static int vega10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
|
|||
if (!buf)
|
||||
return -EINVAL;
|
||||
|
||||
phm_get_sysfs_buf(&buf, &size);
|
||||
|
||||
size += sysfs_emit_at(buf, size, "%s %16s %s %s %s %s\n",title[0],
|
||||
title[1], title[2], title[3], title[4], title[5]);
|
||||
|
||||
|
|
|
@ -2141,6 +2141,8 @@ static int vega12_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf)
|
|||
int ret = 0;
|
||||
int size = 0;
|
||||
|
||||
phm_get_sysfs_buf(&buf, &size);
|
||||
|
||||
ret = vega12_get_enabled_smc_features(hwmgr, &features_enabled);
|
||||
PP_ASSERT_WITH_CODE(!ret,
|
||||
"[EnableAllSmuFeatures] Failed to get enabled smc features!",
|
||||
|
@ -2244,6 +2246,8 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
|
|||
int i, now, size = 0;
|
||||
struct pp_clock_levels_with_latency clocks;
|
||||
|
||||
phm_get_sysfs_buf(&buf, &size);
|
||||
|
||||
switch (type) {
|
||||
case PP_SCLK:
|
||||
PP_ASSERT_WITH_CODE(
|
||||
|
|
|
@ -3238,6 +3238,8 @@ static int vega20_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf)
|
|||
int ret = 0;
|
||||
int size = 0;
|
||||
|
||||
phm_get_sysfs_buf(&buf, &size);
|
||||
|
||||
ret = vega20_get_enabled_smc_features(hwmgr, &features_enabled);
|
||||
PP_ASSERT_WITH_CODE(!ret,
|
||||
"[EnableAllSmuFeatures] Failed to get enabled smc features!",
|
||||
|
@ -3364,6 +3366,8 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
|
|||
int ret = 0;
|
||||
uint32_t gen_speed, lane_width, current_gen_speed, current_lane_width;
|
||||
|
||||
phm_get_sysfs_buf(&buf, &size);
|
||||
|
||||
switch (type) {
|
||||
case PP_SCLK:
|
||||
ret = vega20_get_current_clk_freq(hwmgr, PPCLK_GFXCLK, &now);
|
||||
|
@ -3479,7 +3483,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
|
|||
case OD_SCLK:
|
||||
if (od8_settings[OD8_SETTING_GFXCLK_FMIN].feature_id &&
|
||||
od8_settings[OD8_SETTING_GFXCLK_FMAX].feature_id) {
|
||||
size = sysfs_emit(buf, "%s:\n", "OD_SCLK");
|
||||
size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
|
||||
size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
|
||||
od_table->GfxclkFmin);
|
||||
size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
|
||||
|
@ -3489,7 +3493,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
|
|||
|
||||
case OD_MCLK:
|
||||
if (od8_settings[OD8_SETTING_UCLK_FMAX].feature_id) {
|
||||
size = sysfs_emit(buf, "%s:\n", "OD_MCLK");
|
||||
size += sysfs_emit_at(buf, size, "%s:\n", "OD_MCLK");
|
||||
size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
|
||||
od_table->UclkFmax);
|
||||
}
|
||||
|
@ -3503,7 +3507,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
|
|||
od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id &&
|
||||
od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id &&
|
||||
od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id) {
|
||||
size = sysfs_emit(buf, "%s:\n", "OD_VDDC_CURVE");
|
||||
size += sysfs_emit_at(buf, size, "%s:\n", "OD_VDDC_CURVE");
|
||||
size += sysfs_emit_at(buf, size, "0: %10uMhz %10dmV\n",
|
||||
od_table->GfxclkFreq1,
|
||||
od_table->GfxclkVolt1 / VOLTAGE_SCALE);
|
||||
|
@ -3518,7 +3522,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
|
|||
break;
|
||||
|
||||
case OD_RANGE:
|
||||
size = sysfs_emit(buf, "%s:\n", "OD_RANGE");
|
||||
size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
|
||||
|
||||
if (od8_settings[OD8_SETTING_GFXCLK_FMIN].feature_id &&
|
||||
od8_settings[OD8_SETTING_GFXCLK_FMAX].feature_id) {
|
||||
|
@ -4003,6 +4007,8 @@ static int vega20_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
|
|||
if (!buf)
|
||||
return -EINVAL;
|
||||
|
||||
phm_get_sysfs_buf(&buf, &size);
|
||||
|
||||
size += sysfs_emit_at(buf, size, "%16s %s %s %s %s %s %s %s %s %s %s\n",
|
||||
title[0], title[1], title[2], title[3], title[4], title[5],
|
||||
title[6], title[7], title[8], title[9], title[10]);
|
||||
|
|
|
@ -1468,7 +1468,7 @@ static int smu_disable_dpms(struct smu_context *smu)
|
|||
dev_err(adev->dev, "Failed to disable smu features.\n");
|
||||
}
|
||||
|
||||
if (adev->ip_versions[MP1_HWIP][0] >= IP_VERSION(11, 0, 0) &&
|
||||
if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 0, 0) &&
|
||||
adev->gfx.rlc.funcs->stop)
|
||||
adev->gfx.rlc.funcs->stop(adev);
|
||||
|
||||
|
|
Loading…
Reference in New Issue