drm/amdkfd: prepare map process for single process debug devices
Older HW only supports debugging on a single process because the SPI debug mode setting registers are device global. The HWS has supplied a single pinned VMID (0xf) for MAP_PROCESS for debug purposes. To pin the VMID, the KFD will remove the VMID from the HWS dynamic VMID allocation via SET_RESOUCES so that a debugged process will never migrate away from its pinned VMID. The KFD is responsible for reserving and releasing this pinned VMID accordingly whenever the debugger attaches and detaches respectively. Signed-off-by: Jonathan Kim <jonathan.kim@amd.com> Reviewed-by: Felix Kuehling <felix.kuehling@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
7cee6a6824
commit
97ae3c8cce
|
@ -1525,6 +1525,7 @@ static int initialize_cpsch(struct device_queue_manager *dqm)
|
||||||
dqm->gws_queue_count = 0;
|
dqm->gws_queue_count = 0;
|
||||||
dqm->active_runlist = false;
|
dqm->active_runlist = false;
|
||||||
INIT_WORK(&dqm->hw_exception_work, kfd_process_hw_exception);
|
INIT_WORK(&dqm->hw_exception_work, kfd_process_hw_exception);
|
||||||
|
dqm->trap_debug_vmid = 0;
|
||||||
|
|
||||||
init_sdma_bitmaps(dqm);
|
init_sdma_bitmaps(dqm);
|
||||||
|
|
||||||
|
@ -2501,6 +2502,98 @@ static void kfd_process_hw_exception(struct work_struct *work)
|
||||||
amdgpu_amdkfd_gpu_reset(dqm->dev->adev);
|
amdgpu_amdkfd_gpu_reset(dqm->dev->adev);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int reserve_debug_trap_vmid(struct device_queue_manager *dqm,
|
||||||
|
struct qcm_process_device *qpd)
|
||||||
|
{
|
||||||
|
int r;
|
||||||
|
int updated_vmid_mask;
|
||||||
|
|
||||||
|
if (dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
|
||||||
|
pr_err("Unsupported on sched_policy: %i\n", dqm->sched_policy);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
dqm_lock(dqm);
|
||||||
|
|
||||||
|
if (dqm->trap_debug_vmid != 0) {
|
||||||
|
pr_err("Trap debug id already reserved\n");
|
||||||
|
r = -EBUSY;
|
||||||
|
goto out_unlock;
|
||||||
|
}
|
||||||
|
|
||||||
|
r = unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0,
|
||||||
|
USE_DEFAULT_GRACE_PERIOD, false);
|
||||||
|
if (r)
|
||||||
|
goto out_unlock;
|
||||||
|
|
||||||
|
updated_vmid_mask = dqm->dev->kfd->shared_resources.compute_vmid_bitmap;
|
||||||
|
updated_vmid_mask &= ~(1 << dqm->dev->vm_info.last_vmid_kfd);
|
||||||
|
|
||||||
|
dqm->dev->kfd->shared_resources.compute_vmid_bitmap = updated_vmid_mask;
|
||||||
|
dqm->trap_debug_vmid = dqm->dev->vm_info.last_vmid_kfd;
|
||||||
|
r = set_sched_resources(dqm);
|
||||||
|
if (r)
|
||||||
|
goto out_unlock;
|
||||||
|
|
||||||
|
r = map_queues_cpsch(dqm);
|
||||||
|
if (r)
|
||||||
|
goto out_unlock;
|
||||||
|
|
||||||
|
pr_debug("Reserved VMID for trap debug: %i\n", dqm->trap_debug_vmid);
|
||||||
|
|
||||||
|
out_unlock:
|
||||||
|
dqm_unlock(dqm);
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Releases vmid for the trap debugger
|
||||||
|
*/
|
||||||
|
int release_debug_trap_vmid(struct device_queue_manager *dqm,
|
||||||
|
struct qcm_process_device *qpd)
|
||||||
|
{
|
||||||
|
int r;
|
||||||
|
int updated_vmid_mask;
|
||||||
|
uint32_t trap_debug_vmid;
|
||||||
|
|
||||||
|
if (dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
|
||||||
|
pr_err("Unsupported on sched_policy: %i\n", dqm->sched_policy);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
dqm_lock(dqm);
|
||||||
|
trap_debug_vmid = dqm->trap_debug_vmid;
|
||||||
|
if (dqm->trap_debug_vmid == 0) {
|
||||||
|
pr_err("Trap debug id is not reserved\n");
|
||||||
|
r = -EINVAL;
|
||||||
|
goto out_unlock;
|
||||||
|
}
|
||||||
|
|
||||||
|
r = unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0,
|
||||||
|
USE_DEFAULT_GRACE_PERIOD, false);
|
||||||
|
if (r)
|
||||||
|
goto out_unlock;
|
||||||
|
|
||||||
|
updated_vmid_mask = dqm->dev->kfd->shared_resources.compute_vmid_bitmap;
|
||||||
|
updated_vmid_mask |= (1 << dqm->dev->vm_info.last_vmid_kfd);
|
||||||
|
|
||||||
|
dqm->dev->kfd->shared_resources.compute_vmid_bitmap = updated_vmid_mask;
|
||||||
|
dqm->trap_debug_vmid = 0;
|
||||||
|
r = set_sched_resources(dqm);
|
||||||
|
if (r)
|
||||||
|
goto out_unlock;
|
||||||
|
|
||||||
|
r = map_queues_cpsch(dqm);
|
||||||
|
if (r)
|
||||||
|
goto out_unlock;
|
||||||
|
|
||||||
|
pr_debug("Released VMID for trap debug: %i\n", trap_debug_vmid);
|
||||||
|
|
||||||
|
out_unlock:
|
||||||
|
dqm_unlock(dqm);
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
#if defined(CONFIG_DEBUG_FS)
|
#if defined(CONFIG_DEBUG_FS)
|
||||||
|
|
||||||
static void seq_reg_dump(struct seq_file *m,
|
static void seq_reg_dump(struct seq_file *m,
|
||||||
|
|
|
@ -250,6 +250,7 @@ struct device_queue_manager {
|
||||||
struct kfd_mem_obj *fence_mem;
|
struct kfd_mem_obj *fence_mem;
|
||||||
bool active_runlist;
|
bool active_runlist;
|
||||||
int sched_policy;
|
int sched_policy;
|
||||||
|
uint32_t trap_debug_vmid;
|
||||||
|
|
||||||
/* hw exception */
|
/* hw exception */
|
||||||
bool is_hws_hang;
|
bool is_hws_hang;
|
||||||
|
@ -285,6 +286,10 @@ unsigned int get_queues_per_pipe(struct device_queue_manager *dqm);
|
||||||
unsigned int get_pipes_per_mec(struct device_queue_manager *dqm);
|
unsigned int get_pipes_per_mec(struct device_queue_manager *dqm);
|
||||||
unsigned int get_num_sdma_queues(struct device_queue_manager *dqm);
|
unsigned int get_num_sdma_queues(struct device_queue_manager *dqm);
|
||||||
unsigned int get_num_xgmi_sdma_queues(struct device_queue_manager *dqm);
|
unsigned int get_num_xgmi_sdma_queues(struct device_queue_manager *dqm);
|
||||||
|
int reserve_debug_trap_vmid(struct device_queue_manager *dqm,
|
||||||
|
struct qcm_process_device *qpd);
|
||||||
|
int release_debug_trap_vmid(struct device_queue_manager *dqm,
|
||||||
|
struct qcm_process_device *qpd);
|
||||||
|
|
||||||
static inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd)
|
static inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd)
|
||||||
{
|
{
|
||||||
|
|
|
@ -34,6 +34,9 @@ static int pm_map_process_v9(struct packet_manager *pm,
|
||||||
{
|
{
|
||||||
struct pm4_mes_map_process *packet;
|
struct pm4_mes_map_process *packet;
|
||||||
uint64_t vm_page_table_base_addr = qpd->page_table_base;
|
uint64_t vm_page_table_base_addr = qpd->page_table_base;
|
||||||
|
struct kfd_node *kfd = pm->dqm->dev;
|
||||||
|
struct kfd_process_device *pdd =
|
||||||
|
container_of(qpd, struct kfd_process_device, qpd);
|
||||||
|
|
||||||
packet = (struct pm4_mes_map_process *)buffer;
|
packet = (struct pm4_mes_map_process *)buffer;
|
||||||
memset(buffer, 0, sizeof(struct pm4_mes_map_process));
|
memset(buffer, 0, sizeof(struct pm4_mes_map_process));
|
||||||
|
@ -49,6 +52,12 @@ static int pm_map_process_v9(struct packet_manager *pm,
|
||||||
packet->bitfields14.sdma_enable = 1;
|
packet->bitfields14.sdma_enable = 1;
|
||||||
packet->bitfields14.num_queues = (qpd->is_debug) ? 0 : qpd->queue_count;
|
packet->bitfields14.num_queues = (qpd->is_debug) ? 0 : qpd->queue_count;
|
||||||
|
|
||||||
|
if (kfd->dqm->trap_debug_vmid && pdd->process->debug_trap_enabled &&
|
||||||
|
pdd->process->runtime_info.runtime_state == DEBUG_RUNTIME_STATE_ENABLED) {
|
||||||
|
packet->bitfields2.debug_vmid = kfd->dqm->trap_debug_vmid;
|
||||||
|
packet->bitfields2.new_debug = 1;
|
||||||
|
}
|
||||||
|
|
||||||
packet->sh_mem_config = qpd->sh_mem_config;
|
packet->sh_mem_config = qpd->sh_mem_config;
|
||||||
packet->sh_mem_bases = qpd->sh_mem_bases;
|
packet->sh_mem_bases = qpd->sh_mem_bases;
|
||||||
if (qpd->tba_addr) {
|
if (qpd->tba_addr) {
|
||||||
|
|
|
@ -146,7 +146,10 @@ struct pm4_mes_map_process {
|
||||||
union {
|
union {
|
||||||
struct {
|
struct {
|
||||||
uint32_t pasid:16;
|
uint32_t pasid:16;
|
||||||
uint32_t reserved1:8;
|
uint32_t reserved1:2;
|
||||||
|
uint32_t debug_vmid:4;
|
||||||
|
uint32_t new_debug:1;
|
||||||
|
uint32_t reserved2:1;
|
||||||
uint32_t diq_enable:1;
|
uint32_t diq_enable:1;
|
||||||
uint32_t process_quantum:7;
|
uint32_t process_quantum:7;
|
||||||
} bitfields2;
|
} bitfields2;
|
||||||
|
|
Loading…
Reference in New Issue