drm/radeon: use gart memory for DMA ring tests
Avoids HDP cache flush issues when using vram which can cause ring test failures on certain boards. Signed-off-by: Alex Deucher <alexander.deucher@amd.com> Cc: Alexander Fyodorov <halcy@yandex.ru> Cc: stable@vger.kernel.org
This commit is contained in:
parent
4910403836
commit
adfed2b058
|
@ -618,16 +618,19 @@ int cik_sdma_ring_test(struct radeon_device *rdev,
|
|||
{
|
||||
unsigned i;
|
||||
int r;
|
||||
void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
|
||||
unsigned index;
|
||||
u32 tmp;
|
||||
u64 gpu_addr;
|
||||
|
||||
if (!ptr) {
|
||||
DRM_ERROR("invalid vram scratch pointer\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (ring->idx == R600_RING_TYPE_DMA_INDEX)
|
||||
index = R600_WB_DMA_RING_TEST_OFFSET;
|
||||
else
|
||||
index = CAYMAN_WB_DMA1_RING_TEST_OFFSET;
|
||||
|
||||
gpu_addr = rdev->wb.gpu_addr + index;
|
||||
|
||||
tmp = 0xCAFEDEAD;
|
||||
writel(tmp, ptr);
|
||||
rdev->wb.wb[index/4] = cpu_to_le32(tmp);
|
||||
|
||||
r = radeon_ring_lock(rdev, ring, 5);
|
||||
if (r) {
|
||||
|
@ -635,14 +638,14 @@ int cik_sdma_ring_test(struct radeon_device *rdev,
|
|||
return r;
|
||||
}
|
||||
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
|
||||
radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc);
|
||||
radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr));
|
||||
radeon_ring_write(ring, lower_32_bits(gpu_addr));
|
||||
radeon_ring_write(ring, upper_32_bits(gpu_addr));
|
||||
radeon_ring_write(ring, 1); /* number of DWs to follow */
|
||||
radeon_ring_write(ring, 0xDEADBEEF);
|
||||
radeon_ring_unlock_commit(rdev, ring, false);
|
||||
|
||||
for (i = 0; i < rdev->usec_timeout; i++) {
|
||||
tmp = readl(ptr);
|
||||
tmp = le32_to_cpu(rdev->wb.wb[index/4]);
|
||||
if (tmp == 0xDEADBEEF)
|
||||
break;
|
||||
DRM_UDELAY(1);
|
||||
|
|
|
@ -241,16 +241,19 @@ int r600_dma_ring_test(struct radeon_device *rdev,
|
|||
{
|
||||
unsigned i;
|
||||
int r;
|
||||
void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
|
||||
unsigned index;
|
||||
u32 tmp;
|
||||
u64 gpu_addr;
|
||||
|
||||
if (!ptr) {
|
||||
DRM_ERROR("invalid vram scratch pointer\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (ring->idx == R600_RING_TYPE_DMA_INDEX)
|
||||
index = R600_WB_DMA_RING_TEST_OFFSET;
|
||||
else
|
||||
index = CAYMAN_WB_DMA1_RING_TEST_OFFSET;
|
||||
|
||||
gpu_addr = rdev->wb.gpu_addr + index;
|
||||
|
||||
tmp = 0xCAFEDEAD;
|
||||
writel(tmp, ptr);
|
||||
rdev->wb.wb[index/4] = cpu_to_le32(tmp);
|
||||
|
||||
r = radeon_ring_lock(rdev, ring, 4);
|
||||
if (r) {
|
||||
|
@ -258,13 +261,13 @@ int r600_dma_ring_test(struct radeon_device *rdev,
|
|||
return r;
|
||||
}
|
||||
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
|
||||
radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc);
|
||||
radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff);
|
||||
radeon_ring_write(ring, lower_32_bits(gpu_addr));
|
||||
radeon_ring_write(ring, upper_32_bits(gpu_addr) & 0xff);
|
||||
radeon_ring_write(ring, 0xDEADBEEF);
|
||||
radeon_ring_unlock_commit(rdev, ring, false);
|
||||
|
||||
for (i = 0; i < rdev->usec_timeout; i++) {
|
||||
tmp = readl(ptr);
|
||||
tmp = le32_to_cpu(rdev->wb.wb[index/4]);
|
||||
if (tmp == 0xDEADBEEF)
|
||||
break;
|
||||
DRM_UDELAY(1);
|
||||
|
|
|
@ -1132,6 +1132,8 @@ struct radeon_wb {
|
|||
#define R600_WB_EVENT_OFFSET 3072
|
||||
#define CIK_WB_CP1_WPTR_OFFSET 3328
|
||||
#define CIK_WB_CP2_WPTR_OFFSET 3584
|
||||
#define R600_WB_DMA_RING_TEST_OFFSET 3588
|
||||
#define CAYMAN_WB_DMA1_RING_TEST_OFFSET 3592
|
||||
|
||||
/**
|
||||
* struct radeon_pm - power management datas
|
||||
|
|
Loading…
Reference in New Issue