drm/amdgpu/virt: use kiq to access registers (v2)

For virtualization, it is must for driver to use KIQ to access
registers when it is out of GPU full access mode.

v2: agd: rebase

Signed-off-by: Xiangliang Yu <Xiangliang.Yu@amd.com>
Signed-off-by: Monk Liu <Monk.Liu@amd.com>
Reviewed-by: Monk Liu <Monk.Liu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Xiangliang Yu 2017-01-12 14:29:34 +08:00 committed by Alex Deucher
parent 5ec9f06e10
commit bc992ba5a3
4 changed files with 75 additions and 0 deletions

View File

@ -94,6 +94,11 @@ uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
{ {
uint32_t ret; uint32_t ret;
if (amdgpu_sriov_runtime(adev)) {
BUG_ON(in_interrupt());
return amdgpu_virt_kiq_rreg(adev, reg);
}
if ((reg * 4) < adev->rmmio_size && !always_indirect) if ((reg * 4) < adev->rmmio_size && !always_indirect)
ret = readl(((void __iomem *)adev->rmmio) + (reg * 4)); ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
else { else {
@ -113,6 +118,11 @@ void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
{ {
trace_amdgpu_mm_wreg(adev->pdev->device, reg, v); trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
if (amdgpu_sriov_runtime(adev)) {
BUG_ON(in_interrupt());
return amdgpu_virt_kiq_wreg(adev, reg, v);
}
if ((reg * 4) < adev->rmmio_size && !always_indirect) if ((reg * 4) < adev->rmmio_size && !always_indirect)
writel(v, ((void __iomem *)adev->rmmio) + (reg * 4)); writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
else { else {

View File

@ -91,3 +91,61 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm)
ttm_eu_backoff_reservation(&ticket, &list); ttm_eu_backoff_reservation(&ticket, &list);
return 0; return 0;
} }
void amdgpu_virt_init_setting(struct amdgpu_device *adev)
{
mutex_init(&adev->virt.lock);
}
uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
{
signed long r;
uint32_t val;
struct dma_fence *f;
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
struct amdgpu_ring *ring = &kiq->ring;
BUG_ON(!ring->funcs->emit_rreg);
mutex_lock(&adev->virt.lock);
amdgpu_ring_alloc(ring, 32);
amdgpu_ring_emit_hdp_flush(ring);
amdgpu_ring_emit_rreg(ring, reg);
amdgpu_ring_emit_hdp_invalidate(ring);
amdgpu_fence_emit(ring, &f);
amdgpu_ring_commit(ring);
mutex_unlock(&adev->virt.lock);
r = dma_fence_wait(f, false);
if (r)
DRM_ERROR("wait for kiq fence error: %ld.\n", r);
dma_fence_put(f);
val = adev->wb.wb[adev->virt.reg_val_offs];
return val;
}
void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
{
signed long r;
struct dma_fence *f;
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
struct amdgpu_ring *ring = &kiq->ring;
BUG_ON(!ring->funcs->emit_wreg);
mutex_lock(&adev->virt.lock);
amdgpu_ring_alloc(ring, 32);
amdgpu_ring_emit_hdp_flush(ring);
amdgpu_ring_emit_wreg(ring, reg, v);
amdgpu_ring_emit_hdp_invalidate(ring);
amdgpu_fence_emit(ring, &f);
amdgpu_ring_commit(ring);
mutex_unlock(&adev->virt.lock);
r = dma_fence_wait(f, false);
if (r)
DRM_ERROR("wait for kiq fence error: %ld.\n", r);
dma_fence_put(f);
}

View File

@ -36,6 +36,7 @@ struct amdgpu_virt {
struct amdgpu_bo *csa_obj; struct amdgpu_bo *csa_obj;
uint64_t csa_vmid0_addr; uint64_t csa_vmid0_addr;
uint32_t reg_val_offs; uint32_t reg_val_offs;
struct mutex lock;
}; };
#define AMDGPU_CSA_SIZE (8 * 1024) #define AMDGPU_CSA_SIZE (8 * 1024)
@ -68,5 +69,8 @@ static inline bool is_virtual_machine(void)
struct amdgpu_vm; struct amdgpu_vm;
int amdgpu_allocate_static_csa(struct amdgpu_device *adev); int amdgpu_allocate_static_csa(struct amdgpu_device *adev);
int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm); int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm);
void amdgpu_virt_init_setting(struct amdgpu_device *adev);
uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg);
void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v);
#endif #endif

View File

@ -921,6 +921,9 @@ static int vi_common_early_init(void *handle)
(amdgpu_ip_block_mask & (1 << AMD_IP_BLOCK_TYPE_SMC))) (amdgpu_ip_block_mask & (1 << AMD_IP_BLOCK_TYPE_SMC)))
smc_enabled = true; smc_enabled = true;
if (amdgpu_sriov_vf(adev))
amdgpu_virt_init_setting(adev);
adev->rev_id = vi_get_rev_id(adev); adev->rev_id = vi_get_rev_id(adev);
adev->external_rev_id = 0xFF; adev->external_rev_id = 0xFF;
switch (adev->asic_type) { switch (adev->asic_type) {