Merge tag 'amd-drm-fixes-5.12-2021-03-31' of https://gitlab.freedesktop.org/agd5f/linux into drm-fixes
amd-drm-fixes-5.12-2021-03-31: amdgpu: - Polaris idle power fix - VM fix - Vangogh S3 fix - Fixes for non-4K page sizes amdkfd: - dqm fence memory corruption fix Signed-off-by: Dave Airlie <airlied@redhat.com> From: Alex Deucher <alexander.deucher@amd.com> Link: https://patchwork.freedesktop.org/patch/msgid/20210401020057.17831-1-alexander.deucher@amd.com
This commit is contained in:
commit
dcdb7aa452
|
@ -778,9 +778,9 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
|||
dev_info->high_va_offset = AMDGPU_GMC_HOLE_END;
|
||||
dev_info->high_va_max = AMDGPU_GMC_HOLE_END | vm_size;
|
||||
}
|
||||
dev_info->virtual_address_alignment = max((int)PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
|
||||
dev_info->virtual_address_alignment = max_t(u32, PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
|
||||
dev_info->pte_fragment_size = (1 << adev->vm_manager.fragment_size) * AMDGPU_GPU_PAGE_SIZE;
|
||||
dev_info->gart_page_size = AMDGPU_GPU_PAGE_SIZE;
|
||||
dev_info->gart_page_size = max_t(u32, PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
|
||||
dev_info->cu_active_number = adev->gfx.cu_info.number;
|
||||
dev_info->cu_ao_mask = adev->gfx.cu_info.ao_cu_mask;
|
||||
dev_info->ce_ram_size = adev->gfx.ce_ram_size;
|
||||
|
|
|
@ -2197,8 +2197,8 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
|
|||
uint64_t eaddr;
|
||||
|
||||
/* validate the parameters */
|
||||
if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
|
||||
size == 0 || size & AMDGPU_GPU_PAGE_MASK)
|
||||
if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK ||
|
||||
size == 0 || size & ~PAGE_MASK)
|
||||
return -EINVAL;
|
||||
|
||||
/* make sure object fit at this offset */
|
||||
|
@ -2263,8 +2263,8 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
|
|||
int r;
|
||||
|
||||
/* validate the parameters */
|
||||
if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
|
||||
size == 0 || size & AMDGPU_GPU_PAGE_MASK)
|
||||
if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK ||
|
||||
size == 0 || size & ~PAGE_MASK)
|
||||
return -EINVAL;
|
||||
|
||||
/* make sure object fit at this offset */
|
||||
|
@ -2409,7 +2409,7 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
|
|||
after->start = eaddr + 1;
|
||||
after->last = tmp->last;
|
||||
after->offset = tmp->offset;
|
||||
after->offset += after->start - tmp->start;
|
||||
after->offset += (after->start - tmp->start) << PAGE_SHIFT;
|
||||
after->flags = tmp->flags;
|
||||
after->bo_va = tmp->bo_va;
|
||||
list_add(&after->list, &tmp->bo_va->invalids);
|
||||
|
|
|
@ -155,7 +155,7 @@ static int dbgdev_diq_submit_ib(struct kfd_dbgdev *dbgdev,
|
|||
|
||||
/* Wait till CP writes sync code: */
|
||||
status = amdkfd_fence_wait_timeout(
|
||||
(unsigned int *) rm_state,
|
||||
rm_state,
|
||||
QUEUESTATE__ACTIVE, 1500);
|
||||
|
||||
kfd_gtt_sa_free(dbgdev->dev, mem_obj);
|
||||
|
|
|
@ -1167,7 +1167,7 @@ static int start_cpsch(struct device_queue_manager *dqm)
|
|||
if (retval)
|
||||
goto fail_allocate_vidmem;
|
||||
|
||||
dqm->fence_addr = dqm->fence_mem->cpu_ptr;
|
||||
dqm->fence_addr = (uint64_t *)dqm->fence_mem->cpu_ptr;
|
||||
dqm->fence_gpu_addr = dqm->fence_mem->gpu_addr;
|
||||
|
||||
init_interrupts(dqm);
|
||||
|
@ -1340,8 +1340,8 @@ out:
|
|||
return retval;
|
||||
}
|
||||
|
||||
int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
|
||||
unsigned int fence_value,
|
||||
int amdkfd_fence_wait_timeout(uint64_t *fence_addr,
|
||||
uint64_t fence_value,
|
||||
unsigned int timeout_ms)
|
||||
{
|
||||
unsigned long end_jiffies = msecs_to_jiffies(timeout_ms) + jiffies;
|
||||
|
|
|
@ -192,7 +192,7 @@ struct device_queue_manager {
|
|||
uint16_t vmid_pasid[VMID_NUM];
|
||||
uint64_t pipelines_addr;
|
||||
uint64_t fence_gpu_addr;
|
||||
unsigned int *fence_addr;
|
||||
uint64_t *fence_addr;
|
||||
struct kfd_mem_obj *fence_mem;
|
||||
bool active_runlist;
|
||||
int sched_policy;
|
||||
|
|
|
@ -347,7 +347,7 @@ fail_create_runlist_ib:
|
|||
}
|
||||
|
||||
int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
|
||||
uint32_t fence_value)
|
||||
uint64_t fence_value)
|
||||
{
|
||||
uint32_t *buffer, size;
|
||||
int retval = 0;
|
||||
|
|
|
@ -283,7 +283,7 @@ static int pm_unmap_queues_v9(struct packet_manager *pm, uint32_t *buffer,
|
|||
}
|
||||
|
||||
static int pm_query_status_v9(struct packet_manager *pm, uint32_t *buffer,
|
||||
uint64_t fence_address, uint32_t fence_value)
|
||||
uint64_t fence_address, uint64_t fence_value)
|
||||
{
|
||||
struct pm4_mes_query_status *packet;
|
||||
|
||||
|
|
|
@ -263,7 +263,7 @@ static int pm_unmap_queues_vi(struct packet_manager *pm, uint32_t *buffer,
|
|||
}
|
||||
|
||||
static int pm_query_status_vi(struct packet_manager *pm, uint32_t *buffer,
|
||||
uint64_t fence_address, uint32_t fence_value)
|
||||
uint64_t fence_address, uint64_t fence_value)
|
||||
{
|
||||
struct pm4_mes_query_status *packet;
|
||||
|
||||
|
|
|
@ -1003,8 +1003,8 @@ int pqm_get_wave_state(struct process_queue_manager *pqm,
|
|||
u32 *ctl_stack_used_size,
|
||||
u32 *save_area_used_size);
|
||||
|
||||
int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
|
||||
unsigned int fence_value,
|
||||
int amdkfd_fence_wait_timeout(uint64_t *fence_addr,
|
||||
uint64_t fence_value,
|
||||
unsigned int timeout_ms);
|
||||
|
||||
/* Packet Manager */
|
||||
|
@ -1040,7 +1040,7 @@ struct packet_manager_funcs {
|
|||
uint32_t filter_param, bool reset,
|
||||
unsigned int sdma_engine);
|
||||
int (*query_status)(struct packet_manager *pm, uint32_t *buffer,
|
||||
uint64_t fence_address, uint32_t fence_value);
|
||||
uint64_t fence_address, uint64_t fence_value);
|
||||
int (*release_mem)(uint64_t gpu_addr, uint32_t *buffer);
|
||||
|
||||
/* Packet sizes */
|
||||
|
@ -1062,7 +1062,7 @@ int pm_send_set_resources(struct packet_manager *pm,
|
|||
struct scheduling_resources *res);
|
||||
int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues);
|
||||
int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
|
||||
uint32_t fence_value);
|
||||
uint64_t fence_value);
|
||||
|
||||
int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
|
||||
enum kfd_unmap_queues_filter mode,
|
||||
|
|
|
@ -3330,7 +3330,8 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
|
|||
|
||||
disable_mclk_switching_for_display = ((1 < hwmgr->display_config->num_display) &&
|
||||
!hwmgr->display_config->multi_monitor_in_sync) ||
|
||||
smu7_vblank_too_short(hwmgr, hwmgr->display_config->min_vblank_time);
|
||||
(hwmgr->display_config->num_display &&
|
||||
smu7_vblank_too_short(hwmgr, hwmgr->display_config->min_vblank_time));
|
||||
|
||||
disable_mclk_switching = disable_mclk_switching_for_frame_lock ||
|
||||
disable_mclk_switching_for_display;
|
||||
|
|
|
@ -384,10 +384,15 @@ static int vangogh_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)
|
|||
|
||||
static bool vangogh_is_dpm_running(struct smu_context *smu)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
int ret = 0;
|
||||
uint32_t feature_mask[2];
|
||||
uint64_t feature_enabled;
|
||||
|
||||
/* we need to re-init after suspend so return false */
|
||||
if (adev->in_suspend)
|
||||
return false;
|
||||
|
||||
ret = smu_cmn_get_enabled_32_bits_mask(smu, feature_mask, 2);
|
||||
|
||||
if (ret)
|
||||
|
|
Loading…
Reference in New Issue