drm/amdgpu: implement more ib pools (v2)
We have three ib pools, they are normal, VM, direct pools. Any jobs which schedule IBs without dependence on gpu scheduler should use DIRECT pool. Any jobs schedule direct VM update IBs should use VM pool. Any other jobs use NORMAL pool. v2: squash in coding style fix Signed-off-by: xinhui pan <xinhui.pan@amd.com> Reviewed-by: Christian König <christian.koenig@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
ac60b2294c
commit
c8e42d5785
|
@ -388,6 +388,13 @@ struct amdgpu_sa_bo {
|
|||
int amdgpu_fence_slab_init(void);
|
||||
void amdgpu_fence_slab_fini(void);
|
||||
|
||||
enum amdgpu_ib_pool_type {
|
||||
AMDGPU_IB_POOL_NORMAL = 0,
|
||||
AMDGPU_IB_POOL_VM,
|
||||
AMDGPU_IB_POOL_DIRECT,
|
||||
|
||||
AMDGPU_IB_POOL_MAX
|
||||
};
|
||||
/*
|
||||
* IRQS.
|
||||
*/
|
||||
|
@ -439,7 +446,9 @@ struct amdgpu_fpriv {
|
|||
int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv);
|
||||
|
||||
int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||
unsigned size, struct amdgpu_ib *ib);
|
||||
unsigned size,
|
||||
enum amdgpu_ib_pool_type pool,
|
||||
struct amdgpu_ib *ib);
|
||||
void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
|
||||
struct dma_fence *f);
|
||||
int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
|
||||
|
@ -843,7 +852,7 @@ struct amdgpu_device {
|
|||
unsigned num_rings;
|
||||
struct amdgpu_ring *rings[AMDGPU_MAX_RINGS];
|
||||
bool ib_pool_ready;
|
||||
struct amdgpu_sa_manager ring_tmp_bo;
|
||||
struct amdgpu_sa_manager ring_tmp_bo[AMDGPU_IB_POOL_MAX];
|
||||
|
||||
/* interrupts */
|
||||
struct amdgpu_irq irq;
|
||||
|
|
|
@ -924,7 +924,7 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
|
|||
|
||||
ring = to_amdgpu_ring(entity->rq->sched);
|
||||
r = amdgpu_ib_get(adev, vm, ring->funcs->parse_cs ?
|
||||
chunk_ib->ib_bytes : 0, ib);
|
||||
chunk_ib->ib_bytes : 0, AMDGPU_IB_POOL_NORMAL, ib);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to get ib !\n");
|
||||
return r;
|
||||
|
|
|
@ -61,12 +61,14 @@
|
|||
* Returns 0 on success, error on failure.
|
||||
*/
|
||||
int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||
unsigned size, struct amdgpu_ib *ib)
|
||||
unsigned size,
|
||||
enum amdgpu_ib_pool_type pool_type,
|
||||
struct amdgpu_ib *ib)
|
||||
{
|
||||
int r;
|
||||
|
||||
if (size) {
|
||||
r = amdgpu_sa_bo_new(&adev->ring_tmp_bo,
|
||||
r = amdgpu_sa_bo_new(&adev->ring_tmp_bo[pool_type],
|
||||
&ib->sa_bo, size, 256);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "failed to get a new IB (%d)\n", r);
|
||||
|
@ -280,19 +282,27 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
|
|||
*/
|
||||
int amdgpu_ib_pool_init(struct amdgpu_device *adev)
|
||||
{
|
||||
int r;
|
||||
int r, i;
|
||||
unsigned size;
|
||||
|
||||
if (adev->ib_pool_ready) {
|
||||
return 0;
|
||||
}
|
||||
r = amdgpu_sa_bo_manager_init(adev, &adev->ring_tmp_bo,
|
||||
AMDGPU_IB_POOL_SIZE*64*1024,
|
||||
AMDGPU_GPU_PAGE_SIZE,
|
||||
AMDGPU_GEM_DOMAIN_GTT);
|
||||
if (r) {
|
||||
return r;
|
||||
for (i = 0; i < AMDGPU_IB_POOL_MAX; i++) {
|
||||
if (i == AMDGPU_IB_POOL_DIRECT)
|
||||
size = PAGE_SIZE * 2;
|
||||
else
|
||||
size = AMDGPU_IB_POOL_SIZE*64*1024;
|
||||
r = amdgpu_sa_bo_manager_init(adev, &adev->ring_tmp_bo[i],
|
||||
size,
|
||||
AMDGPU_GPU_PAGE_SIZE,
|
||||
AMDGPU_GEM_DOMAIN_GTT);
|
||||
if (r) {
|
||||
for (i--; i >= 0; i--)
|
||||
amdgpu_sa_bo_manager_fini(adev, &adev->ring_tmp_bo[i]);
|
||||
return r;
|
||||
}
|
||||
}
|
||||
|
||||
adev->ib_pool_ready = true;
|
||||
|
||||
return 0;
|
||||
|
@ -308,8 +318,11 @@ int amdgpu_ib_pool_init(struct amdgpu_device *adev)
|
|||
*/
|
||||
void amdgpu_ib_pool_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (adev->ib_pool_ready) {
|
||||
amdgpu_sa_bo_manager_fini(adev, &adev->ring_tmp_bo);
|
||||
for (i = 0; i < AMDGPU_IB_POOL_MAX; i++)
|
||||
amdgpu_sa_bo_manager_fini(adev, &adev->ring_tmp_bo[i]);
|
||||
adev->ib_pool_ready = false;
|
||||
}
|
||||
}
|
||||
|
@ -406,7 +419,12 @@ static int amdgpu_debugfs_sa_info(struct seq_file *m, void *data)
|
|||
struct drm_device *dev = node->minor->dev;
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
|
||||
amdgpu_sa_bo_dump_debug_info(&adev->ring_tmp_bo, m);
|
||||
seq_printf(m, "-------------------- NORMAL -------------------- \n");
|
||||
amdgpu_sa_bo_dump_debug_info(&adev->ring_tmp_bo[AMDGPU_IB_POOL_NORMAL], m);
|
||||
seq_printf(m, "---------------------- VM ---------------------- \n");
|
||||
amdgpu_sa_bo_dump_debug_info(&adev->ring_tmp_bo[AMDGPU_IB_POOL_VM], m);
|
||||
seq_printf(m, "-------------------- DIRECT--------------------- \n");
|
||||
amdgpu_sa_bo_dump_debug_info(&adev->ring_tmp_bo[AMDGPU_IB_POOL_DIRECT], m);
|
||||
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -87,7 +87,8 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
|
|||
}
|
||||
|
||||
int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
|
||||
struct amdgpu_job **job)
|
||||
enum amdgpu_ib_pool_type pool_type,
|
||||
struct amdgpu_job **job)
|
||||
{
|
||||
int r;
|
||||
|
||||
|
@ -95,7 +96,7 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
|
|||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_ib_get(adev, NULL, size, &(*job)->ibs[0]);
|
||||
r = amdgpu_ib_get(adev, NULL, size, pool_type, &(*job)->ibs[0]);
|
||||
if (r)
|
||||
kfree(*job);
|
||||
|
||||
|
|
|
@ -38,6 +38,7 @@
|
|||
#define AMDGPU_JOB_GET_VMID(job) ((job) ? (job)->vmid : 0)
|
||||
|
||||
struct amdgpu_fence;
|
||||
enum amdgpu_ib_pool_type;
|
||||
|
||||
struct amdgpu_job {
|
||||
struct drm_sched_job base;
|
||||
|
@ -67,8 +68,7 @@ struct amdgpu_job {
|
|||
int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
|
||||
struct amdgpu_job **job, struct amdgpu_vm *vm);
|
||||
int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
|
||||
struct amdgpu_job **job);
|
||||
|
||||
enum amdgpu_ib_pool_type pool, struct amdgpu_job **job);
|
||||
void amdgpu_job_free_resources(struct amdgpu_job *job);
|
||||
void amdgpu_job_free(struct amdgpu_job *job);
|
||||
int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
|
||||
|
|
|
@ -144,7 +144,8 @@ static int amdgpu_jpeg_dec_set_reg(struct amdgpu_ring *ring, uint32_t handle,
|
|||
const unsigned ib_size_dw = 16;
|
||||
int i, r;
|
||||
|
||||
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
|
||||
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
|
||||
AMDGPU_IB_POOL_DIRECT, &job);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
|
|
@ -2043,7 +2043,8 @@ static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
|
|||
num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
|
||||
num_bytes = num_pages * 8;
|
||||
|
||||
r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes, &job);
|
||||
r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes,
|
||||
AMDGPU_IB_POOL_NORMAL, &job);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
@ -2102,7 +2103,8 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
|
|||
num_loops = DIV_ROUND_UP(byte_count, max_bytes);
|
||||
num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->copy_num_dw, 8);
|
||||
|
||||
r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job);
|
||||
r = amdgpu_job_alloc_with_ib(adev, num_dw * 4,
|
||||
direct_submit ? AMDGPU_IB_POOL_DIRECT : AMDGPU_IB_POOL_NORMAL, &job);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
@ -2191,7 +2193,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
|
|||
/* for IB padding */
|
||||
num_dw += 64;
|
||||
|
||||
r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job);
|
||||
r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, AMDGPU_IB_POOL_NORMAL, &job);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
|
|
@ -1056,7 +1056,8 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
|
|||
goto err;
|
||||
}
|
||||
|
||||
r = amdgpu_job_alloc_with_ib(adev, 64, &job);
|
||||
r = amdgpu_job_alloc_with_ib(adev, 64,
|
||||
direct ? AMDGPU_IB_POOL_DIRECT : AMDGPU_IB_POOL_NORMAL, &job);
|
||||
if (r)
|
||||
goto err;
|
||||
|
||||
|
|
|
@ -446,7 +446,8 @@ static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
|
|||
uint64_t addr;
|
||||
int i, r;
|
||||
|
||||
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
|
||||
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
|
||||
AMDGPU_IB_POOL_DIRECT, &job);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
@ -524,7 +525,8 @@ static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
|
|||
struct dma_fence *f = NULL;
|
||||
int i, r;
|
||||
|
||||
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
|
||||
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
|
||||
direct ? AMDGPU_IB_POOL_DIRECT : AMDGPU_IB_POOL_NORMAL, &job);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
|
|
@ -390,7 +390,8 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
|
|||
uint64_t addr;
|
||||
int i, r;
|
||||
|
||||
r = amdgpu_job_alloc_with_ib(adev, 64, &job);
|
||||
r = amdgpu_job_alloc_with_ib(adev, 64,
|
||||
AMDGPU_IB_POOL_DIRECT, &job);
|
||||
if (r)
|
||||
goto err;
|
||||
|
||||
|
@ -557,7 +558,8 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
|
|||
uint64_t addr;
|
||||
int i, r;
|
||||
|
||||
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
|
||||
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
|
||||
AMDGPU_IB_POOL_DIRECT, &job);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
@ -610,7 +612,8 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
|
|||
uint64_t addr;
|
||||
int i, r;
|
||||
|
||||
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
|
||||
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
|
||||
AMDGPU_IB_POOL_DIRECT, &job);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
|
|
@ -64,7 +64,8 @@ static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
|
|||
unsigned int ndw = AMDGPU_VM_SDMA_MIN_NUM_DW;
|
||||
int r;
|
||||
|
||||
r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4, &p->job);
|
||||
r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4,
|
||||
p->direct ? AMDGPU_IB_POOL_VM : AMDGPU_IB_POOL_NORMAL, &p->job);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
@ -223,7 +224,8 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
|
|||
ndw = max(ndw, AMDGPU_VM_SDMA_MIN_NUM_DW);
|
||||
ndw = min(ndw, AMDGPU_VM_SDMA_MAX_NUM_DW);
|
||||
|
||||
r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4, &p->job);
|
||||
r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4,
|
||||
p->direct ? AMDGPU_IB_POOL_VM : AMDGPU_IB_POOL_NORMAL, &p->job);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
|
|
@ -679,7 +679,8 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
|||
tmp = 0xCAFEDEAD;
|
||||
adev->wb.wb[index] = cpu_to_le32(tmp);
|
||||
memset(&ib, 0, sizeof(ib));
|
||||
r = amdgpu_ib_get(adev, NULL, 256, &ib);
|
||||
r = amdgpu_ib_get(adev, NULL, 256,
|
||||
AMDGPU_IB_POOL_DIRECT, &ib);
|
||||
if (r)
|
||||
goto err0;
|
||||
|
||||
|
|
|
@ -557,7 +557,8 @@ static int gfx_v10_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
|||
gpu_addr = adev->wb.gpu_addr + (index * 4);
|
||||
adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
|
||||
memset(&ib, 0, sizeof(ib));
|
||||
r = amdgpu_ib_get(adev, NULL, 16, &ib);
|
||||
r = amdgpu_ib_get(adev, NULL, 16,
|
||||
AMDGPU_IB_POOL_DIRECT, &ib);
|
||||
if (r)
|
||||
goto err1;
|
||||
|
||||
|
|
|
@ -1914,7 +1914,8 @@ static int gfx_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
|||
|
||||
WREG32(scratch, 0xCAFEDEAD);
|
||||
memset(&ib, 0, sizeof(ib));
|
||||
r = amdgpu_ib_get(adev, NULL, 256, &ib);
|
||||
r = amdgpu_ib_get(adev, NULL, 256,
|
||||
AMDGPU_IB_POOL_DIRECT, &ib);
|
||||
if (r)
|
||||
goto err1;
|
||||
|
||||
|
|
|
@ -2364,7 +2364,8 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
|||
|
||||
WREG32(scratch, 0xCAFEDEAD);
|
||||
memset(&ib, 0, sizeof(ib));
|
||||
r = amdgpu_ib_get(adev, NULL, 256, &ib);
|
||||
r = amdgpu_ib_get(adev, NULL, 256,
|
||||
AMDGPU_IB_POOL_DIRECT, &ib);
|
||||
if (r)
|
||||
goto err1;
|
||||
|
||||
|
|
|
@ -888,7 +888,8 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
|||
gpu_addr = adev->wb.gpu_addr + (index * 4);
|
||||
adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
|
||||
memset(&ib, 0, sizeof(ib));
|
||||
r = amdgpu_ib_get(adev, NULL, 16, &ib);
|
||||
r = amdgpu_ib_get(adev, NULL, 16,
|
||||
AMDGPU_IB_POOL_DIRECT, &ib);
|
||||
if (r)
|
||||
goto err1;
|
||||
|
||||
|
@ -1550,7 +1551,8 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
|
|||
|
||||
/* allocate an indirect buffer to put the commands in */
|
||||
memset(&ib, 0, sizeof(ib));
|
||||
r = amdgpu_ib_get(adev, NULL, total_size, &ib);
|
||||
r = amdgpu_ib_get(adev, NULL, total_size,
|
||||
AMDGPU_IB_POOL_DIRECT, &ib);
|
||||
if (r) {
|
||||
DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
|
||||
return r;
|
||||
|
|
|
@ -1082,7 +1082,8 @@ static int gfx_v9_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
|||
gpu_addr = adev->wb.gpu_addr + (index * 4);
|
||||
adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
|
||||
memset(&ib, 0, sizeof(ib));
|
||||
r = amdgpu_ib_get(adev, NULL, 16, &ib);
|
||||
r = amdgpu_ib_get(adev, NULL, 16,
|
||||
AMDGPU_IB_POOL_DIRECT, &ib);
|
||||
if (r)
|
||||
goto err1;
|
||||
|
||||
|
@ -4485,7 +4486,8 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
|
|||
|
||||
/* allocate an indirect buffer to put the commands in */
|
||||
memset(&ib, 0, sizeof(ib));
|
||||
r = amdgpu_ib_get(adev, NULL, total_size, &ib);
|
||||
r = amdgpu_ib_get(adev, NULL, total_size,
|
||||
AMDGPU_IB_POOL_DIRECT, &ib);
|
||||
if (r) {
|
||||
DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
|
||||
return r;
|
||||
|
|
|
@ -369,7 +369,7 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
|
|||
* translation. Avoid this by doing the invalidation from the SDMA
|
||||
* itself.
|
||||
*/
|
||||
r = amdgpu_job_alloc_with_ib(adev, 16 * 4, &job);
|
||||
r = amdgpu_job_alloc_with_ib(adev, 16 * 4, AMDGPU_IB_POOL_VM, &job);
|
||||
if (r)
|
||||
goto error_alloc;
|
||||
|
||||
|
|
|
@ -614,7 +614,8 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
|||
tmp = 0xCAFEDEAD;
|
||||
adev->wb.wb[index] = cpu_to_le32(tmp);
|
||||
memset(&ib, 0, sizeof(ib));
|
||||
r = amdgpu_ib_get(adev, NULL, 256, &ib);
|
||||
r = amdgpu_ib_get(adev, NULL, 256,
|
||||
AMDGPU_IB_POOL_DIRECT, &ib);
|
||||
if (r)
|
||||
goto err0;
|
||||
|
||||
|
|
|
@ -886,7 +886,8 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
|||
tmp = 0xCAFEDEAD;
|
||||
adev->wb.wb[index] = cpu_to_le32(tmp);
|
||||
memset(&ib, 0, sizeof(ib));
|
||||
r = amdgpu_ib_get(adev, NULL, 256, &ib);
|
||||
r = amdgpu_ib_get(adev, NULL, 256,
|
||||
AMDGPU_IB_POOL_DIRECT, &ib);
|
||||
if (r)
|
||||
goto err0;
|
||||
|
||||
|
|
|
@ -1539,7 +1539,8 @@ static int sdma_v4_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
|||
tmp = 0xCAFEDEAD;
|
||||
adev->wb.wb[index] = cpu_to_le32(tmp);
|
||||
memset(&ib, 0, sizeof(ib));
|
||||
r = amdgpu_ib_get(adev, NULL, 256, &ib);
|
||||
r = amdgpu_ib_get(adev, NULL, 256,
|
||||
AMDGPU_IB_POOL_DIRECT, &ib);
|
||||
if (r)
|
||||
goto err0;
|
||||
|
||||
|
|
|
@ -948,7 +948,8 @@ static int sdma_v5_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
|||
tmp = 0xCAFEDEAD;
|
||||
adev->wb.wb[index] = cpu_to_le32(tmp);
|
||||
memset(&ib, 0, sizeof(ib));
|
||||
r = amdgpu_ib_get(adev, NULL, 256, &ib);
|
||||
r = amdgpu_ib_get(adev, NULL, 256,
|
||||
AMDGPU_IB_POOL_DIRECT, &ib);
|
||||
if (r) {
|
||||
DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
|
||||
goto err0;
|
||||
|
|
|
@ -267,7 +267,8 @@ static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
|||
tmp = 0xCAFEDEAD;
|
||||
adev->wb.wb[index] = cpu_to_le32(tmp);
|
||||
memset(&ib, 0, sizeof(ib));
|
||||
r = amdgpu_ib_get(adev, NULL, 256, &ib);
|
||||
r = amdgpu_ib_get(adev, NULL, 256,
|
||||
AMDGPU_IB_POOL_DIRECT, &ib);
|
||||
if (r)
|
||||
goto err0;
|
||||
|
||||
|
|
|
@ -216,7 +216,8 @@ static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle
|
|||
uint64_t addr;
|
||||
int i, r;
|
||||
|
||||
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
|
||||
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
|
||||
AMDGPU_IB_POOL_DIRECT, &job);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
@ -279,7 +280,8 @@ static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring,
|
|||
uint64_t addr;
|
||||
int i, r;
|
||||
|
||||
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
|
||||
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
|
||||
AMDGPU_IB_POOL_DIRECT, &job);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
|
|
@ -224,7 +224,8 @@ static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle
|
|||
uint64_t addr;
|
||||
int i, r;
|
||||
|
||||
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
|
||||
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
|
||||
AMDGPU_IB_POOL_DIRECT, &job);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
@ -286,7 +287,8 @@ static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handl
|
|||
uint64_t addr;
|
||||
int i, r;
|
||||
|
||||
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
|
||||
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
|
||||
AMDGPU_IB_POOL_DIRECT, &job);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
|
Loading…
Reference in New Issue