drm/amdkfd: dqm fence memory corruption
Amdgpu driver uses 4-byte data type as DQM fence memory, and transmits GPU address of fence memory to microcode through query status PM4 message. However, query status PM4 message definition and microcode processing are all processed according to 8 bytes. Fence memory only allocates 4 bytes of memory, but microcode does write 8 bytes of memory, so there is a memory corruption. Changes since v1: * Change dqm->fence_addr as a u64 pointer to fix this issue, also fix up query_status and amdkfd_fence_wait_timeout function uses 64 bit fence value to make them consistent. Signed-off-by: Qu Huang <jinsdb@126.com> Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com> Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com> Cc: stable@vger.kernel.org
This commit is contained in:
parent
5e61b84f9d
commit
e92049ae45
|
@ -155,7 +155,7 @@ static int dbgdev_diq_submit_ib(struct kfd_dbgdev *dbgdev,
|
||||||
|
|
||||||
/* Wait till CP writes sync code: */
|
/* Wait till CP writes sync code: */
|
||||||
status = amdkfd_fence_wait_timeout(
|
status = amdkfd_fence_wait_timeout(
|
||||||
(unsigned int *) rm_state,
|
rm_state,
|
||||||
QUEUESTATE__ACTIVE, 1500);
|
QUEUESTATE__ACTIVE, 1500);
|
||||||
|
|
||||||
kfd_gtt_sa_free(dbgdev->dev, mem_obj);
|
kfd_gtt_sa_free(dbgdev->dev, mem_obj);
|
||||||
|
|
|
@ -1167,7 +1167,7 @@ static int start_cpsch(struct device_queue_manager *dqm)
|
||||||
if (retval)
|
if (retval)
|
||||||
goto fail_allocate_vidmem;
|
goto fail_allocate_vidmem;
|
||||||
|
|
||||||
dqm->fence_addr = dqm->fence_mem->cpu_ptr;
|
dqm->fence_addr = (uint64_t *)dqm->fence_mem->cpu_ptr;
|
||||||
dqm->fence_gpu_addr = dqm->fence_mem->gpu_addr;
|
dqm->fence_gpu_addr = dqm->fence_mem->gpu_addr;
|
||||||
|
|
||||||
init_interrupts(dqm);
|
init_interrupts(dqm);
|
||||||
|
@ -1340,8 +1340,8 @@ out:
|
||||||
return retval;
|
return retval;
|
||||||
}
|
}
|
||||||
|
|
||||||
int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
|
int amdkfd_fence_wait_timeout(uint64_t *fence_addr,
|
||||||
unsigned int fence_value,
|
uint64_t fence_value,
|
||||||
unsigned int timeout_ms)
|
unsigned int timeout_ms)
|
||||||
{
|
{
|
||||||
unsigned long end_jiffies = msecs_to_jiffies(timeout_ms) + jiffies;
|
unsigned long end_jiffies = msecs_to_jiffies(timeout_ms) + jiffies;
|
||||||
|
|
|
@ -192,7 +192,7 @@ struct device_queue_manager {
|
||||||
uint16_t vmid_pasid[VMID_NUM];
|
uint16_t vmid_pasid[VMID_NUM];
|
||||||
uint64_t pipelines_addr;
|
uint64_t pipelines_addr;
|
||||||
uint64_t fence_gpu_addr;
|
uint64_t fence_gpu_addr;
|
||||||
unsigned int *fence_addr;
|
uint64_t *fence_addr;
|
||||||
struct kfd_mem_obj *fence_mem;
|
struct kfd_mem_obj *fence_mem;
|
||||||
bool active_runlist;
|
bool active_runlist;
|
||||||
int sched_policy;
|
int sched_policy;
|
||||||
|
|
|
@ -347,7 +347,7 @@ fail_create_runlist_ib:
|
||||||
}
|
}
|
||||||
|
|
||||||
int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
|
int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
|
||||||
uint32_t fence_value)
|
uint64_t fence_value)
|
||||||
{
|
{
|
||||||
uint32_t *buffer, size;
|
uint32_t *buffer, size;
|
||||||
int retval = 0;
|
int retval = 0;
|
||||||
|
|
|
@ -283,7 +283,7 @@ static int pm_unmap_queues_v9(struct packet_manager *pm, uint32_t *buffer,
|
||||||
}
|
}
|
||||||
|
|
||||||
static int pm_query_status_v9(struct packet_manager *pm, uint32_t *buffer,
|
static int pm_query_status_v9(struct packet_manager *pm, uint32_t *buffer,
|
||||||
uint64_t fence_address, uint32_t fence_value)
|
uint64_t fence_address, uint64_t fence_value)
|
||||||
{
|
{
|
||||||
struct pm4_mes_query_status *packet;
|
struct pm4_mes_query_status *packet;
|
||||||
|
|
||||||
|
|
|
@ -263,7 +263,7 @@ static int pm_unmap_queues_vi(struct packet_manager *pm, uint32_t *buffer,
|
||||||
}
|
}
|
||||||
|
|
||||||
static int pm_query_status_vi(struct packet_manager *pm, uint32_t *buffer,
|
static int pm_query_status_vi(struct packet_manager *pm, uint32_t *buffer,
|
||||||
uint64_t fence_address, uint32_t fence_value)
|
uint64_t fence_address, uint64_t fence_value)
|
||||||
{
|
{
|
||||||
struct pm4_mes_query_status *packet;
|
struct pm4_mes_query_status *packet;
|
||||||
|
|
||||||
|
|
|
@ -1003,8 +1003,8 @@ int pqm_get_wave_state(struct process_queue_manager *pqm,
|
||||||
u32 *ctl_stack_used_size,
|
u32 *ctl_stack_used_size,
|
||||||
u32 *save_area_used_size);
|
u32 *save_area_used_size);
|
||||||
|
|
||||||
int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
|
int amdkfd_fence_wait_timeout(uint64_t *fence_addr,
|
||||||
unsigned int fence_value,
|
uint64_t fence_value,
|
||||||
unsigned int timeout_ms);
|
unsigned int timeout_ms);
|
||||||
|
|
||||||
/* Packet Manager */
|
/* Packet Manager */
|
||||||
|
@ -1040,7 +1040,7 @@ struct packet_manager_funcs {
|
||||||
uint32_t filter_param, bool reset,
|
uint32_t filter_param, bool reset,
|
||||||
unsigned int sdma_engine);
|
unsigned int sdma_engine);
|
||||||
int (*query_status)(struct packet_manager *pm, uint32_t *buffer,
|
int (*query_status)(struct packet_manager *pm, uint32_t *buffer,
|
||||||
uint64_t fence_address, uint32_t fence_value);
|
uint64_t fence_address, uint64_t fence_value);
|
||||||
int (*release_mem)(uint64_t gpu_addr, uint32_t *buffer);
|
int (*release_mem)(uint64_t gpu_addr, uint32_t *buffer);
|
||||||
|
|
||||||
/* Packet sizes */
|
/* Packet sizes */
|
||||||
|
@ -1062,7 +1062,7 @@ int pm_send_set_resources(struct packet_manager *pm,
|
||||||
struct scheduling_resources *res);
|
struct scheduling_resources *res);
|
||||||
int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues);
|
int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues);
|
||||||
int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
|
int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
|
||||||
uint32_t fence_value);
|
uint64_t fence_value);
|
||||||
|
|
||||||
int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
|
int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
|
||||||
enum kfd_unmap_queues_filter mode,
|
enum kfd_unmap_queues_filter mode,
|
||||||
|
|
Loading…
Reference in New Issue