drm/amdkfd: Avoid ambiguity by indicating it's cp queue
The queues represented in queue_bitmap are only CP queues. Signed-off-by: Yong Zhao <Yong.Zhao@amd.com> Acked-by: Alex Deucher <alexander.deucher@amd.com> Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
81b820b304
commit
e694530418
|
@ -126,7 +126,7 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
|
|||
/* this is going to have a few of the MSBs set that we need to
|
||||
* clear
|
||||
*/
|
||||
bitmap_complement(gpu_resources.queue_bitmap,
|
||||
bitmap_complement(gpu_resources.cp_queue_bitmap,
|
||||
adev->gfx.mec.queue_bitmap,
|
||||
KGD_MAX_QUEUES);
|
||||
|
||||
|
@ -137,7 +137,7 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
|
|||
* adev->gfx.mec.num_pipe_per_mec
|
||||
* adev->gfx.mec.num_queue_per_pipe;
|
||||
for (i = last_valid_bit; i < KGD_MAX_QUEUES; ++i)
|
||||
clear_bit(i, gpu_resources.queue_bitmap);
|
||||
clear_bit(i, gpu_resources.cp_queue_bitmap);
|
||||
|
||||
amdgpu_doorbell_get_kfd_info(adev,
|
||||
&gpu_resources.doorbell_physical_address,
|
||||
|
|
|
@ -78,14 +78,14 @@ static bool is_pipe_enabled(struct device_queue_manager *dqm, int mec, int pipe)
|
|||
/* queue is available for KFD usage if bit is 1 */
|
||||
for (i = 0; i < dqm->dev->shared_resources.num_queue_per_pipe; ++i)
|
||||
if (test_bit(pipe_offset + i,
|
||||
dqm->dev->shared_resources.queue_bitmap))
|
||||
dqm->dev->shared_resources.cp_queue_bitmap))
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
unsigned int get_queues_num(struct device_queue_manager *dqm)
|
||||
unsigned int get_cp_queues_num(struct device_queue_manager *dqm)
|
||||
{
|
||||
return bitmap_weight(dqm->dev->shared_resources.queue_bitmap,
|
||||
return bitmap_weight(dqm->dev->shared_resources.cp_queue_bitmap,
|
||||
KGD_MAX_QUEUES);
|
||||
}
|
||||
|
||||
|
@ -908,7 +908,7 @@ static int initialize_nocpsch(struct device_queue_manager *dqm)
|
|||
|
||||
for (queue = 0; queue < get_queues_per_pipe(dqm); queue++)
|
||||
if (test_bit(pipe_offset + queue,
|
||||
dqm->dev->shared_resources.queue_bitmap))
|
||||
dqm->dev->shared_resources.cp_queue_bitmap))
|
||||
dqm->allocated_queues[pipe] |= 1 << queue;
|
||||
}
|
||||
|
||||
|
@ -1029,7 +1029,7 @@ static int set_sched_resources(struct device_queue_manager *dqm)
|
|||
mec = (i / dqm->dev->shared_resources.num_queue_per_pipe)
|
||||
/ dqm->dev->shared_resources.num_pipe_per_mec;
|
||||
|
||||
if (!test_bit(i, dqm->dev->shared_resources.queue_bitmap))
|
||||
if (!test_bit(i, dqm->dev->shared_resources.cp_queue_bitmap))
|
||||
continue;
|
||||
|
||||
/* only acquire queues from the first MEC */
|
||||
|
@ -1979,7 +1979,7 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data)
|
|||
|
||||
for (queue = 0; queue < get_queues_per_pipe(dqm); queue++) {
|
||||
if (!test_bit(pipe_offset + queue,
|
||||
dqm->dev->shared_resources.queue_bitmap))
|
||||
dqm->dev->shared_resources.cp_queue_bitmap))
|
||||
continue;
|
||||
|
||||
r = dqm->dev->kfd2kgd->hqd_dump(
|
||||
|
|
|
@ -219,7 +219,7 @@ void device_queue_manager_init_v10_navi10(
|
|||
struct device_queue_manager_asic_ops *asic_ops);
|
||||
void program_sh_mem_settings(struct device_queue_manager *dqm,
|
||||
struct qcm_process_device *qpd);
|
||||
unsigned int get_queues_num(struct device_queue_manager *dqm);
|
||||
unsigned int get_cp_queues_num(struct device_queue_manager *dqm);
|
||||
unsigned int get_queues_per_pipe(struct device_queue_manager *dqm);
|
||||
unsigned int get_pipes_per_mec(struct device_queue_manager *dqm);
|
||||
unsigned int get_num_sdma_queues(struct device_queue_manager *dqm);
|
||||
|
|
|
@ -62,7 +62,7 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
|
|||
max_proc_per_quantum = dev->max_proc_per_quantum;
|
||||
|
||||
if ((process_count > max_proc_per_quantum) ||
|
||||
compute_queue_count > get_queues_num(pm->dqm)) {
|
||||
compute_queue_count > get_cp_queues_num(pm->dqm)) {
|
||||
*over_subscription = true;
|
||||
pr_debug("Over subscribed runlist\n");
|
||||
}
|
||||
|
|
|
@ -266,7 +266,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
|
|||
if ((dev->dqm->sched_policy ==
|
||||
KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION) &&
|
||||
((dev->dqm->processes_count >= dev->vm_info.vmid_num_kfd) ||
|
||||
(dev->dqm->active_queue_count >= get_queues_num(dev->dqm)))) {
|
||||
(dev->dqm->active_queue_count >= get_cp_queues_num(dev->dqm)))) {
|
||||
pr_debug("Over-subscription is not allowed when amdkfd.sched_policy == 1\n");
|
||||
retval = -EPERM;
|
||||
goto err_create_queue;
|
||||
|
|
|
@ -1320,7 +1320,7 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
|
|||
dev->node_props.num_gws = (hws_gws_support &&
|
||||
dev->gpu->dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) ?
|
||||
amdgpu_amdkfd_get_num_gws(dev->gpu->kgd) : 0;
|
||||
dev->node_props.num_cp_queues = get_queues_num(dev->gpu->dqm);
|
||||
dev->node_props.num_cp_queues = get_cp_queues_num(dev->gpu->dqm);
|
||||
dev->node_props.unique_id = gpu->unique_id;
|
||||
|
||||
kfd_fill_mem_clk_max_info(dev);
|
||||
|
|
|
@ -123,7 +123,7 @@ struct kgd2kfd_shared_resources {
|
|||
uint32_t num_queue_per_pipe;
|
||||
|
||||
/* Bit n == 1 means Queue n is available for KFD */
|
||||
DECLARE_BITMAP(queue_bitmap, KGD_MAX_QUEUES);
|
||||
DECLARE_BITMAP(cp_queue_bitmap, KGD_MAX_QUEUES);
|
||||
|
||||
/* SDMA doorbell assignments (SOC15 and later chips only). Only
|
||||
* specific doorbells are routed to each SDMA engine. Others
|
||||
|
|
Loading…
Reference in New Issue