drm/amdkfd: Using new gtt sa in amdkfd
This patch change the calls throughout the amdkfd driver from the old kfd-->kgd interface to the new kfd gtt sa inside amdkfd v2: change the new call in sdma code that appeared because of the sdma feature Signed-off-by: Oded Gabbay <oded.gabbay@amd.com> Reviewed-by: Alexey Skidanov <Alexey.skidanov@amd.com> Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
73a1da0bb3
commit
a86aa3ca5a
|
@ -519,11 +519,8 @@ static int init_pipelines(struct device_queue_manager *dqm,
|
|||
* because it contains no data when there are no active queues.
|
||||
*/
|
||||
|
||||
err = kfd2kgd->allocate_mem(dqm->dev->kgd,
|
||||
CIK_HPD_EOP_BYTES * pipes_num,
|
||||
PAGE_SIZE,
|
||||
KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
|
||||
(struct kgd_mem **) &dqm->pipeline_mem);
|
||||
err = kfd_gtt_sa_allocate(dqm->dev, CIK_HPD_EOP_BYTES * pipes_num,
|
||||
&dqm->pipeline_mem);
|
||||
|
||||
if (err) {
|
||||
pr_err("kfd: error allocate vidmem num pipes: %d\n",
|
||||
|
@ -538,8 +535,7 @@ static int init_pipelines(struct device_queue_manager *dqm,
|
|||
|
||||
mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_COMPUTE);
|
||||
if (mqd == NULL) {
|
||||
kfd2kgd->free_mem(dqm->dev->kgd,
|
||||
(struct kgd_mem *) dqm->pipeline_mem);
|
||||
kfd_gtt_sa_free(dqm->dev, dqm->pipeline_mem);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
@ -614,8 +610,7 @@ static void uninitialize_nocpsch(struct device_queue_manager *dqm)
|
|||
for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++)
|
||||
kfree(dqm->mqds[i]);
|
||||
mutex_destroy(&dqm->lock);
|
||||
kfd2kgd->free_mem(dqm->dev->kgd,
|
||||
(struct kgd_mem *) dqm->pipeline_mem);
|
||||
kfd_gtt_sa_free(dqm->dev, dqm->pipeline_mem);
|
||||
}
|
||||
|
||||
static int start_nocpsch(struct device_queue_manager *dqm)
|
||||
|
@ -773,11 +768,8 @@ static int start_cpsch(struct device_queue_manager *dqm)
|
|||
pr_debug("kfd: allocating fence memory\n");
|
||||
|
||||
/* allocate fence memory on the gart */
|
||||
retval = kfd2kgd->allocate_mem(dqm->dev->kgd,
|
||||
sizeof(*dqm->fence_addr),
|
||||
32,
|
||||
KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
|
||||
(struct kgd_mem **) &dqm->fence_mem);
|
||||
retval = kfd_gtt_sa_allocate(dqm->dev, sizeof(*dqm->fence_addr),
|
||||
&dqm->fence_mem);
|
||||
|
||||
if (retval != 0)
|
||||
goto fail_allocate_vidmem;
|
||||
|
@ -812,8 +804,7 @@ static int stop_cpsch(struct device_queue_manager *dqm)
|
|||
pdd = qpd_to_pdd(node->qpd);
|
||||
pdd->bound = false;
|
||||
}
|
||||
kfd2kgd->free_mem(dqm->dev->kgd,
|
||||
(struct kgd_mem *) dqm->fence_mem);
|
||||
kfd_gtt_sa_free(dqm->dev, dqm->fence_mem);
|
||||
pm_uninit(&dqm->packets);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -72,11 +72,7 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
|
|||
if (prop.doorbell_ptr == NULL)
|
||||
goto err_get_kernel_doorbell;
|
||||
|
||||
retval = kfd2kgd->allocate_mem(dev->kgd,
|
||||
queue_size,
|
||||
PAGE_SIZE,
|
||||
KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
|
||||
(struct kgd_mem **) &kq->pq);
|
||||
retval = kfd_gtt_sa_allocate(dev, queue_size, &kq->pq);
|
||||
|
||||
if (retval != 0)
|
||||
goto err_pq_allocate_vidmem;
|
||||
|
@ -84,11 +80,8 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
|
|||
kq->pq_kernel_addr = kq->pq->cpu_ptr;
|
||||
kq->pq_gpu_addr = kq->pq->gpu_addr;
|
||||
|
||||
retval = kfd2kgd->allocate_mem(dev->kgd,
|
||||
sizeof(*kq->rptr_kernel),
|
||||
32,
|
||||
KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
|
||||
(struct kgd_mem **) &kq->rptr_mem);
|
||||
retval = kfd_gtt_sa_allocate(dev, sizeof(*kq->rptr_kernel),
|
||||
&kq->rptr_mem);
|
||||
|
||||
if (retval != 0)
|
||||
goto err_rptr_allocate_vidmem;
|
||||
|
@ -96,11 +89,8 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
|
|||
kq->rptr_kernel = kq->rptr_mem->cpu_ptr;
|
||||
kq->rptr_gpu_addr = kq->rptr_mem->gpu_addr;
|
||||
|
||||
retval = kfd2kgd->allocate_mem(dev->kgd,
|
||||
sizeof(*kq->wptr_kernel),
|
||||
32,
|
||||
KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
|
||||
(struct kgd_mem **) &kq->wptr_mem);
|
||||
retval = kfd_gtt_sa_allocate(dev, sizeof(*kq->wptr_kernel),
|
||||
&kq->wptr_mem);
|
||||
|
||||
if (retval != 0)
|
||||
goto err_wptr_allocate_vidmem;
|
||||
|
@ -145,11 +135,8 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
|
|||
} else {
|
||||
/* allocate fence for DIQ */
|
||||
|
||||
retval = kfd2kgd->allocate_mem(dev->kgd,
|
||||
sizeof(uint32_t),
|
||||
32,
|
||||
KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
|
||||
(struct kgd_mem **) &kq->fence_mem_obj);
|
||||
retval = kfd_gtt_sa_allocate(dev, sizeof(uint32_t),
|
||||
&kq->fence_mem_obj);
|
||||
|
||||
if (retval != 0)
|
||||
goto err_alloc_fence;
|
||||
|
@ -165,11 +152,11 @@ err_alloc_fence:
|
|||
err_init_mqd:
|
||||
uninit_queue(kq->queue);
|
||||
err_init_queue:
|
||||
kfd2kgd->free_mem(dev->kgd, (struct kgd_mem *) kq->wptr_mem);
|
||||
kfd_gtt_sa_free(dev, kq->wptr_mem);
|
||||
err_wptr_allocate_vidmem:
|
||||
kfd2kgd->free_mem(dev->kgd, (struct kgd_mem *) kq->rptr_mem);
|
||||
kfd_gtt_sa_free(dev, kq->rptr_mem);
|
||||
err_rptr_allocate_vidmem:
|
||||
kfd2kgd->free_mem(dev->kgd, (struct kgd_mem *) kq->pq);
|
||||
kfd_gtt_sa_free(dev, kq->pq);
|
||||
err_pq_allocate_vidmem:
|
||||
pr_err("kfd: error init pq\n");
|
||||
kfd_release_kernel_doorbell(dev, prop.doorbell_ptr);
|
||||
|
@ -190,10 +177,12 @@ static void uninitialize(struct kernel_queue *kq)
|
|||
QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS,
|
||||
kq->queue->pipe,
|
||||
kq->queue->queue);
|
||||
else if (kq->queue->properties.type == KFD_QUEUE_TYPE_DIQ)
|
||||
kfd_gtt_sa_free(kq->dev, kq->fence_mem_obj);
|
||||
|
||||
kfd2kgd->free_mem(kq->dev->kgd, (struct kgd_mem *) kq->rptr_mem);
|
||||
kfd2kgd->free_mem(kq->dev->kgd, (struct kgd_mem *) kq->wptr_mem);
|
||||
kfd2kgd->free_mem(kq->dev->kgd, (struct kgd_mem *) kq->pq);
|
||||
kfd_gtt_sa_free(kq->dev, kq->rptr_mem);
|
||||
kfd_gtt_sa_free(kq->dev, kq->wptr_mem);
|
||||
kfd_gtt_sa_free(kq->dev, kq->pq);
|
||||
kfd_release_kernel_doorbell(kq->dev,
|
||||
kq->queue->properties.doorbell_ptr);
|
||||
uninit_queue(kq->queue);
|
||||
|
|
|
@ -52,11 +52,8 @@ static int init_mqd(struct mqd_manager *mm, void **mqd,
|
|||
|
||||
pr_debug("kfd: In func %s\n", __func__);
|
||||
|
||||
retval = kfd2kgd->allocate_mem(mm->dev->kgd,
|
||||
sizeof(struct cik_mqd),
|
||||
256,
|
||||
KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
|
||||
(struct kgd_mem **) mqd_mem_obj);
|
||||
retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct cik_mqd),
|
||||
mqd_mem_obj);
|
||||
|
||||
if (retval != 0)
|
||||
return -ENOMEM;
|
||||
|
@ -121,11 +118,9 @@ static int init_mqd_sdma(struct mqd_manager *mm, void **mqd,
|
|||
|
||||
BUG_ON(!mm || !mqd || !mqd_mem_obj);
|
||||
|
||||
retval = kfd2kgd->allocate_mem(mm->dev->kgd,
|
||||
retval = kfd_gtt_sa_allocate(mm->dev,
|
||||
sizeof(struct cik_sdma_rlc_registers),
|
||||
256,
|
||||
KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
|
||||
(struct kgd_mem **) mqd_mem_obj);
|
||||
mqd_mem_obj);
|
||||
|
||||
if (retval != 0)
|
||||
return -ENOMEM;
|
||||
|
@ -147,14 +142,14 @@ static void uninit_mqd(struct mqd_manager *mm, void *mqd,
|
|||
struct kfd_mem_obj *mqd_mem_obj)
|
||||
{
|
||||
BUG_ON(!mm || !mqd);
|
||||
kfd2kgd->free_mem(mm->dev->kgd, (struct kgd_mem *) mqd_mem_obj);
|
||||
kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
|
||||
}
|
||||
|
||||
static void uninit_mqd_sdma(struct mqd_manager *mm, void *mqd,
|
||||
struct kfd_mem_obj *mqd_mem_obj)
|
||||
{
|
||||
BUG_ON(!mm || !mqd);
|
||||
kfd2kgd->free_mem(mm->dev->kgd, (struct kgd_mem *) mqd_mem_obj);
|
||||
kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
|
||||
}
|
||||
|
||||
static int load_mqd(struct mqd_manager *mm, void *mqd, uint32_t pipe_id,
|
||||
|
@ -306,11 +301,8 @@ static int init_mqd_hiq(struct mqd_manager *mm, void **mqd,
|
|||
|
||||
pr_debug("kfd: In func %s\n", __func__);
|
||||
|
||||
retval = kfd2kgd->allocate_mem(mm->dev->kgd,
|
||||
sizeof(struct cik_mqd),
|
||||
256,
|
||||
KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
|
||||
(struct kgd_mem **) mqd_mem_obj);
|
||||
retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct cik_mqd),
|
||||
mqd_mem_obj);
|
||||
|
||||
if (retval != 0)
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -97,11 +97,8 @@ static int pm_allocate_runlist_ib(struct packet_manager *pm,
|
|||
|
||||
pm_calc_rlib_size(pm, rl_buffer_size, is_over_subscription);
|
||||
|
||||
retval = kfd2kgd->allocate_mem(pm->dqm->dev->kgd,
|
||||
*rl_buffer_size,
|
||||
PAGE_SIZE,
|
||||
KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
|
||||
(struct kgd_mem **) &pm->ib_buffer_obj);
|
||||
retval = kfd_gtt_sa_allocate(pm->dqm->dev, *rl_buffer_size,
|
||||
&pm->ib_buffer_obj);
|
||||
|
||||
if (retval != 0) {
|
||||
pr_err("kfd: failed to allocate runlist IB\n");
|
||||
|
@ -557,8 +554,7 @@ void pm_release_ib(struct packet_manager *pm)
|
|||
|
||||
mutex_lock(&pm->lock);
|
||||
if (pm->allocated) {
|
||||
kfd2kgd->free_mem(pm->dqm->dev->kgd,
|
||||
(struct kgd_mem *) pm->ib_buffer_obj);
|
||||
kfd_gtt_sa_free(pm->dqm->dev, pm->ib_buffer_obj);
|
||||
pm->allocated = false;
|
||||
}
|
||||
mutex_unlock(&pm->lock);
|
||||
|
|
Loading…
Reference in New Issue