Merge branch 'drm-next-4.3' of git://people.freedesktop.org/~agd5f/linux into drm-next
A few more fixes for amdgpu from the last few days: - Fix several copy paste typos - Resume from suspend fixes for VCE - Fix the GPU scheduler warning in kfifo_out - Re-enable GPUVM fault interrupts which were inadvertently disabled - GPUVM page table hang fix when paging * 'drm-next-4.3' of git://people.freedesktop.org/~agd5f/linux: drm/amdgpu: rename gmc_v8_0_init_compute_vmid drm/amdgpu: fix vce3 instance handling drm/amdgpu: remove ib test for the second VCE Ring drm/amdgpu: properly enable VM fault interrupts drm/amdgpu: fix warning in scheduler drm/amdgpu: fix buffer placement under memory pressure drm/amdgpu/cz: fix cz_dpm_update_low_memory_pstate logic drm/amdgpu: fix typo in dce11 watermark setup drm/amdgpu: fix typo in dce10 watermark setup drm/amdgpu: use top down allocation for non-CPU accessible vram drm/amdgpu: be explicit about cpu vram access for driver BOs (v2)
This commit is contained in:
commit
73bf1b7be7
|
@ -354,7 +354,7 @@ int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p)
|
|||
* into account. We don't want to disallow buffer moves
|
||||
* completely.
|
||||
*/
|
||||
if (current_domain != AMDGPU_GEM_DOMAIN_CPU &&
|
||||
if ((lobj->allowed_domains & current_domain) != 0 &&
|
||||
(domain & current_domain) == 0 && /* will be moved */
|
||||
bytes_moved > bytes_moved_threshold) {
|
||||
/* don't move it */
|
||||
|
|
|
@ -244,7 +244,8 @@ static int amdgpu_vram_scratch_init(struct amdgpu_device *adev)
|
|||
|
||||
if (adev->vram_scratch.robj == NULL) {
|
||||
r = amdgpu_bo_create(adev, AMDGPU_GPU_PAGE_SIZE,
|
||||
PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, 0,
|
||||
PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
|
||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
|
||||
NULL, &adev->vram_scratch.robj);
|
||||
if (r) {
|
||||
return r;
|
||||
|
|
|
@ -126,8 +126,8 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
|
|||
aligned_size = ALIGN(size, PAGE_SIZE);
|
||||
ret = amdgpu_gem_object_create(adev, aligned_size, 0,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
0, true,
|
||||
&gobj);
|
||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
|
||||
true, &gobj);
|
||||
if (ret) {
|
||||
printk(KERN_ERR "failed to allocate framebuffer (%d)\n",
|
||||
aligned_size);
|
||||
|
|
|
@ -125,7 +125,8 @@ int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev)
|
|||
|
||||
if (adev->gart.robj == NULL) {
|
||||
r = amdgpu_bo_create(adev, adev->gart.table_size,
|
||||
PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, 0,
|
||||
PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
|
||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
|
||||
NULL, &adev->gart.robj);
|
||||
if (r) {
|
||||
return r;
|
||||
|
|
|
@ -656,7 +656,8 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
|
|||
|
||||
r = amdgpu_gem_object_create(adev, args->size, 0,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
0, ttm_bo_type_device,
|
||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
|
||||
ttm_bo_type_device,
|
||||
&gobj);
|
||||
if (r)
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -127,7 +127,7 @@ static void amdgpu_ttm_placement_init(struct amdgpu_device *adev,
|
|||
placements[c].fpfn =
|
||||
adev->mc.visible_vram_size >> PAGE_SHIFT;
|
||||
placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
|
||||
TTM_PL_FLAG_VRAM;
|
||||
TTM_PL_FLAG_VRAM | TTM_PL_FLAG_TOPDOWN;
|
||||
}
|
||||
placements[c].fpfn = 0;
|
||||
placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
|
||||
|
|
|
@ -859,7 +859,8 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
|
|||
amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size);
|
||||
|
||||
r = amdgpu_bo_create(adev, 256 * 1024, PAGE_SIZE, true,
|
||||
AMDGPU_GEM_DOMAIN_VRAM, 0,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
|
||||
NULL, &adev->stollen_vga_memory);
|
||||
if (r) {
|
||||
return r;
|
||||
|
|
|
@ -154,7 +154,9 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
|
|||
bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8)
|
||||
+ AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE;
|
||||
r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true,
|
||||
AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &adev->uvd.vcpu_bo);
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
|
||||
NULL, &adev->uvd.vcpu_bo);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r);
|
||||
return r;
|
||||
|
@ -901,7 +903,9 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
|
|||
int r, i;
|
||||
|
||||
r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
|
||||
AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &bo);
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
|
||||
NULL, &bo);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
@ -948,7 +952,9 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
|
|||
int r, i;
|
||||
|
||||
r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
|
||||
AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &bo);
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
|
||||
NULL, &bo);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
|
|
@ -141,7 +141,9 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
|
|||
/* allocate firmware, stack and heap BO */
|
||||
|
||||
r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
|
||||
AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &adev->vce.vcpu_bo);
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
|
||||
NULL, &adev->vce.vcpu_bo);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "(%d) failed to allocate VCE bo\n", r);
|
||||
return r;
|
||||
|
@ -836,6 +838,10 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring)
|
|||
struct fence *fence = NULL;
|
||||
int r;
|
||||
|
||||
/* skip vce ring1 ib test for now, since it's not reliable */
|
||||
if (ring == &ring->adev->vce.ring[1])
|
||||
return 0;
|
||||
|
||||
r = amdgpu_vce_get_create_msg(ring, 1, NULL);
|
||||
if (r) {
|
||||
DRM_ERROR("amdgpu: failed to get create msg (%d).\n", r);
|
||||
|
|
|
@ -1099,7 +1099,9 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
|
|||
|
||||
r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8,
|
||||
AMDGPU_GPU_PAGE_SIZE, true,
|
||||
AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &pt);
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
|
||||
NULL, &pt);
|
||||
if (r)
|
||||
goto error_free;
|
||||
|
||||
|
@ -1299,7 +1301,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
|
|||
vm->page_directory_fence = NULL;
|
||||
|
||||
r = amdgpu_bo_create(adev, pd_size, align, true,
|
||||
AMDGPU_GEM_DOMAIN_VRAM, 0,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
|
||||
NULL, &vm->page_directory);
|
||||
if (r)
|
||||
return r;
|
||||
|
|
|
@ -1596,9 +1596,9 @@ static int cz_dpm_update_low_memory_pstate(struct amdgpu_device *adev)
|
|||
|
||||
if (pi->sys_info.nb_dpm_enable) {
|
||||
if (ps->force_high)
|
||||
cz_dpm_nbdpm_lm_pstate_enable(adev, true);
|
||||
else
|
||||
cz_dpm_nbdpm_lm_pstate_enable(adev, false);
|
||||
else
|
||||
cz_dpm_nbdpm_lm_pstate_enable(adev, true);
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
|
|
@ -1353,7 +1353,7 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev,
|
|||
tmp = REG_SET_FIELD(wm_mask, DPG_WATERMARK_MASK_CONTROL, URGENCY_WATERMARK_MASK, 2);
|
||||
WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
|
||||
tmp = RREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset);
|
||||
tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, latency_watermark_a);
|
||||
tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, latency_watermark_b);
|
||||
tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_HIGH_WATERMARK, line_time);
|
||||
WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, tmp);
|
||||
/* restore original selection */
|
||||
|
|
|
@ -1329,7 +1329,7 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev,
|
|||
tmp = REG_SET_FIELD(wm_mask, DPG_WATERMARK_MASK_CONTROL, URGENCY_WATERMARK_MASK, 2);
|
||||
WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
|
||||
tmp = RREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset);
|
||||
tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, latency_watermark_a);
|
||||
tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, latency_watermark_b);
|
||||
tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_HIGH_WATERMARK, line_time);
|
||||
WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, tmp);
|
||||
/* restore original selection */
|
||||
|
|
|
@ -762,7 +762,9 @@ int fiji_smu_init(struct amdgpu_device *adev)
|
|||
|
||||
/* Allocate FW image data structure and header buffer */
|
||||
ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE,
|
||||
true, AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, toc_buf);
|
||||
true, AMDGPU_GEM_DOMAIN_VRAM,
|
||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
|
||||
NULL, toc_buf);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to allocate memory for TOC buffer\n");
|
||||
return -ENOMEM;
|
||||
|
@ -770,7 +772,9 @@ int fiji_smu_init(struct amdgpu_device *adev)
|
|||
|
||||
/* Allocate buffer for SMU internal buffer */
|
||||
ret = amdgpu_bo_create(adev, smu_internal_buffer_size, PAGE_SIZE,
|
||||
true, AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, smu_buf);
|
||||
true, AMDGPU_GEM_DOMAIN_VRAM,
|
||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
|
||||
NULL, smu_buf);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to allocate memory for SMU internal buffer\n");
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -3786,7 +3786,9 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
|
|||
/* save restore block */
|
||||
if (adev->gfx.rlc.save_restore_obj == NULL) {
|
||||
r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
|
||||
AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &adev->gfx.rlc.save_restore_obj);
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
|
||||
NULL, &adev->gfx.rlc.save_restore_obj);
|
||||
if (r) {
|
||||
dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", r);
|
||||
return r;
|
||||
|
@ -3827,7 +3829,9 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
|
|||
|
||||
if (adev->gfx.rlc.clear_state_obj == NULL) {
|
||||
r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
|
||||
AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &adev->gfx.rlc.clear_state_obj);
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
|
||||
NULL, &adev->gfx.rlc.clear_state_obj);
|
||||
if (r) {
|
||||
dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
|
||||
gfx_v7_0_rlc_fini(adev);
|
||||
|
@ -3864,7 +3868,9 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
|
|||
if (adev->gfx.rlc.cp_table_size) {
|
||||
if (adev->gfx.rlc.cp_table_obj == NULL) {
|
||||
r = amdgpu_bo_create(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, true,
|
||||
AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &adev->gfx.rlc.cp_table_obj);
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
|
||||
NULL, &adev->gfx.rlc.cp_table_obj);
|
||||
if (r) {
|
||||
dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r);
|
||||
gfx_v7_0_rlc_fini(adev);
|
||||
|
|
|
@ -2005,7 +2005,7 @@ static void gfx_v8_0_setup_rb(struct amdgpu_device *adev,
|
|||
}
|
||||
|
||||
/**
|
||||
* gmc_v8_0_init_compute_vmid - gart enable
|
||||
* gfx_v8_0_init_compute_vmid - gart enable
|
||||
*
|
||||
* @rdev: amdgpu_device pointer
|
||||
*
|
||||
|
@ -2015,7 +2015,7 @@ static void gfx_v8_0_setup_rb(struct amdgpu_device *adev,
|
|||
#define DEFAULT_SH_MEM_BASES (0x6000)
|
||||
#define FIRST_COMPUTE_VMID (8)
|
||||
#define LAST_COMPUTE_VMID (16)
|
||||
static void gmc_v8_0_init_compute_vmid(struct amdgpu_device *adev)
|
||||
static void gfx_v8_0_init_compute_vmid(struct amdgpu_device *adev)
|
||||
{
|
||||
int i;
|
||||
uint32_t sh_mem_config;
|
||||
|
@ -2282,7 +2282,7 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
|
|||
vi_srbm_select(adev, 0, 0, 0, 0);
|
||||
mutex_unlock(&adev->srbm_mutex);
|
||||
|
||||
gmc_v8_0_init_compute_vmid(adev);
|
||||
gfx_v8_0_init_compute_vmid(adev);
|
||||
|
||||
mutex_lock(&adev->grbm_idx_mutex);
|
||||
/*
|
||||
|
|
|
@ -523,17 +523,11 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
|
|||
tmp = RREG32(mmVM_CONTEXT1_CNTL);
|
||||
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
|
||||
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, 1);
|
||||
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
|
||||
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
|
||||
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
|
||||
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
|
||||
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
|
||||
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
|
||||
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, VALID_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
|
||||
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
|
||||
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, READ_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
|
||||
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
|
||||
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
|
||||
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
|
||||
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE,
|
||||
amdgpu_vm_block_size - 9);
|
||||
|
@ -852,6 +846,13 @@ static int gmc_v7_0_early_init(void *handle)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int gmc_v7_0_late_init(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
|
||||
}
|
||||
|
||||
static int gmc_v7_0_sw_init(void *handle)
|
||||
{
|
||||
int r;
|
||||
|
@ -976,6 +977,7 @@ static int gmc_v7_0_hw_fini(void *handle)
|
|||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
|
||||
gmc_v7_0_gart_disable(adev);
|
||||
|
||||
return 0;
|
||||
|
@ -1301,7 +1303,7 @@ static int gmc_v7_0_set_powergating_state(void *handle,
|
|||
|
||||
const struct amd_ip_funcs gmc_v7_0_ip_funcs = {
|
||||
.early_init = gmc_v7_0_early_init,
|
||||
.late_init = NULL,
|
||||
.late_init = gmc_v7_0_late_init,
|
||||
.sw_init = gmc_v7_0_sw_init,
|
||||
.sw_fini = gmc_v7_0_sw_fini,
|
||||
.hw_init = gmc_v7_0_hw_init,
|
||||
|
|
|
@ -653,19 +653,12 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
|
|||
tmp = RREG32(mmVM_CONTEXT1_CNTL);
|
||||
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
|
||||
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, 1);
|
||||
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
|
||||
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
|
||||
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
|
||||
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
|
||||
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
|
||||
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
|
||||
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, VALID_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
|
||||
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
|
||||
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, READ_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
|
||||
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
|
||||
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
|
||||
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
|
||||
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
|
||||
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
|
||||
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE,
|
||||
amdgpu_vm_block_size - 9);
|
||||
|
@ -852,6 +845,13 @@ static int gmc_v8_0_early_init(void *handle)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int gmc_v8_0_late_init(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
|
||||
}
|
||||
|
||||
static int gmc_v8_0_sw_init(void *handle)
|
||||
{
|
||||
int r;
|
||||
|
@ -978,6 +978,7 @@ static int gmc_v8_0_hw_fini(void *handle)
|
|||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
|
||||
gmc_v8_0_gart_disable(adev);
|
||||
|
||||
return 0;
|
||||
|
@ -1288,7 +1289,7 @@ static int gmc_v8_0_set_powergating_state(void *handle,
|
|||
|
||||
const struct amd_ip_funcs gmc_v8_0_ip_funcs = {
|
||||
.early_init = gmc_v8_0_early_init,
|
||||
.late_init = NULL,
|
||||
.late_init = gmc_v8_0_late_init,
|
||||
.sw_init = gmc_v8_0_sw_init,
|
||||
.sw_fini = gmc_v8_0_sw_fini,
|
||||
.hw_init = gmc_v8_0_hw_init,
|
||||
|
|
|
@ -623,7 +623,9 @@ int iceland_smu_init(struct amdgpu_device *adev)
|
|||
|
||||
/* Allocate FW image data structure and header buffer */
|
||||
ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE,
|
||||
true, AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, toc_buf);
|
||||
true, AMDGPU_GEM_DOMAIN_VRAM,
|
||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
|
||||
NULL, toc_buf);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to allocate memory for TOC buffer\n");
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -761,7 +761,9 @@ int tonga_smu_init(struct amdgpu_device *adev)
|
|||
|
||||
/* Allocate FW image data structure and header buffer */
|
||||
ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE,
|
||||
true, AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, toc_buf);
|
||||
true, AMDGPU_GEM_DOMAIN_VRAM,
|
||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
|
||||
NULL, toc_buf);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to allocate memory for TOC buffer\n");
|
||||
return -ENOMEM;
|
||||
|
@ -769,7 +771,9 @@ int tonga_smu_init(struct amdgpu_device *adev)
|
|||
|
||||
/* Allocate buffer for SMU internal buffer */
|
||||
ret = amdgpu_bo_create(adev, smu_internal_buffer_size, PAGE_SIZE,
|
||||
true, AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, smu_buf);
|
||||
true, AMDGPU_GEM_DOMAIN_VRAM,
|
||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
|
||||
NULL, smu_buf);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to allocate memory for SMU internal buffer\n");
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -32,8 +32,8 @@
|
|||
#include "vid.h"
|
||||
#include "vce/vce_3_0_d.h"
|
||||
#include "vce/vce_3_0_sh_mask.h"
|
||||
#include "oss/oss_2_0_d.h"
|
||||
#include "oss/oss_2_0_sh_mask.h"
|
||||
#include "oss/oss_3_0_d.h"
|
||||
#include "oss/oss_3_0_sh_mask.h"
|
||||
#include "gca/gfx_8_0_d.h"
|
||||
#include "smu/smu_7_1_2_d.h"
|
||||
#include "smu/smu_7_1_2_sh_mask.h"
|
||||
|
@ -426,17 +426,41 @@ static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx)
|
|||
static bool vce_v3_0_is_idle(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
u32 mask = 0;
|
||||
int idx;
|
||||
|
||||
return !(RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK);
|
||||
for (idx = 0; idx < 2; ++idx) {
|
||||
if (adev->vce.harvest_config & (1 << idx))
|
||||
continue;
|
||||
|
||||
if (idx == 0)
|
||||
mask |= SRBM_STATUS2__VCE0_BUSY_MASK;
|
||||
else
|
||||
mask |= SRBM_STATUS2__VCE1_BUSY_MASK;
|
||||
}
|
||||
|
||||
return !(RREG32(mmSRBM_STATUS2) & mask);
|
||||
}
|
||||
|
||||
static int vce_v3_0_wait_for_idle(void *handle)
|
||||
{
|
||||
unsigned i;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
u32 mask = 0;
|
||||
int idx;
|
||||
|
||||
for (idx = 0; idx < 2; ++idx) {
|
||||
if (adev->vce.harvest_config & (1 << idx))
|
||||
continue;
|
||||
|
||||
if (idx == 0)
|
||||
mask |= SRBM_STATUS2__VCE0_BUSY_MASK;
|
||||
else
|
||||
mask |= SRBM_STATUS2__VCE1_BUSY_MASK;
|
||||
}
|
||||
|
||||
for (i = 0; i < adev->usec_timeout; i++) {
|
||||
if (!(RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK))
|
||||
if (!(RREG32(mmSRBM_STATUS2) & mask))
|
||||
return 0;
|
||||
}
|
||||
return -ETIMEDOUT;
|
||||
|
@ -445,9 +469,21 @@ static int vce_v3_0_wait_for_idle(void *handle)
|
|||
static int vce_v3_0_soft_reset(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
u32 mask = 0;
|
||||
int idx;
|
||||
|
||||
WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_VCE_MASK,
|
||||
~SRBM_SOFT_RESET__SOFT_RESET_VCE_MASK);
|
||||
for (idx = 0; idx < 2; ++idx) {
|
||||
if (adev->vce.harvest_config & (1 << idx))
|
||||
continue;
|
||||
|
||||
if (idx == 0)
|
||||
mask |= SRBM_SOFT_RESET__SOFT_RESET_VCE0_MASK;
|
||||
else
|
||||
mask |= SRBM_SOFT_RESET__SOFT_RESET_VCE1_MASK;
|
||||
}
|
||||
WREG32_P(mmSRBM_SOFT_RESET, mask,
|
||||
~(SRBM_SOFT_RESET__SOFT_RESET_VCE0_MASK |
|
||||
SRBM_SOFT_RESET__SOFT_RESET_VCE1_MASK));
|
||||
mdelay(5);
|
||||
|
||||
return vce_v3_0_start(adev);
|
||||
|
|
|
@ -332,7 +332,7 @@ static int amd_sched_main(void *param)
|
|||
{
|
||||
struct sched_param sparam = {.sched_priority = 1};
|
||||
struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param;
|
||||
int r;
|
||||
int r, count;
|
||||
|
||||
sched_setscheduler(current, SCHED_FIFO, &sparam);
|
||||
|
||||
|
@ -361,7 +361,8 @@ static int amd_sched_main(void *param)
|
|||
fence_put(fence);
|
||||
}
|
||||
|
||||
kfifo_out(&entity->job_queue, &job, sizeof(job));
|
||||
count = kfifo_out(&entity->job_queue, &job, sizeof(job));
|
||||
WARN_ON(count != sizeof(job));
|
||||
wake_up(&sched->job_scheduled);
|
||||
}
|
||||
return 0;
|
||||
|
|
Loading…
Reference in New Issue