drm/radeon: separate DMA code
Similar to separating the UVD code, just put the DMA functions into separate files. Signed-off-by: Christian König <christian.koenig@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
e409b12862
commit
2483b4ea98
|
@ -82,6 +82,15 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
|
|||
trinity_smc.o ni_dpm.o si_smc.o si_dpm.o kv_smc.o kv_dpm.o ci_smc.o \
|
||||
ci_dpm.o
|
||||
|
||||
# add async DMA block
|
||||
radeon-y += \
|
||||
r600_dma.o \
|
||||
rv770_dma.o \
|
||||
evergreen_dma.o \
|
||||
ni_dma.o \
|
||||
si_dma.o \
|
||||
cik_sdma.o \
|
||||
|
||||
# add UVD block
|
||||
radeon-y += \
|
||||
radeon_uvd.o \
|
||||
|
|
|
@ -64,6 +64,14 @@ extern int sumo_rlc_init(struct radeon_device *rdev);
|
|||
extern void si_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
|
||||
extern void si_rlc_reset(struct radeon_device *rdev);
|
||||
extern void si_init_uvd_internal_cg(struct radeon_device *rdev);
|
||||
extern int cik_sdma_resume(struct radeon_device *rdev);
|
||||
extern void cik_sdma_enable(struct radeon_device *rdev, bool enable);
|
||||
extern void cik_sdma_fini(struct radeon_device *rdev);
|
||||
extern void cik_sdma_vm_set_page(struct radeon_device *rdev,
|
||||
struct radeon_ib *ib,
|
||||
uint64_t pe,
|
||||
uint64_t addr, unsigned count,
|
||||
uint32_t incr, uint32_t flags);
|
||||
static void cik_rlc_stop(struct radeon_device *rdev);
|
||||
static void cik_pcie_gen3_enable(struct radeon_device *rdev);
|
||||
static void cik_program_aspm(struct radeon_device *rdev);
|
||||
|
@ -3987,579 +3995,6 @@ static int cik_cp_resume(struct radeon_device *rdev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* sDMA - System DMA
|
||||
* Starting with CIK, the GPU has new asynchronous
|
||||
* DMA engines. These engines are used for compute
|
||||
* and gfx. There are two DMA engines (SDMA0, SDMA1)
|
||||
* and each one supports 1 ring buffer used for gfx
|
||||
* and 2 queues used for compute.
|
||||
*
|
||||
* The programming model is very similar to the CP
|
||||
* (ring buffer, IBs, etc.), but sDMA has it's own
|
||||
* packet format that is different from the PM4 format
|
||||
* used by the CP. sDMA supports copying data, writing
|
||||
* embedded data, solid fills, and a number of other
|
||||
* things. It also has support for tiling/detiling of
|
||||
* buffers.
|
||||
*/
|
||||
/**
|
||||
* cik_sdma_ring_ib_execute - Schedule an IB on the DMA engine
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @ib: IB object to schedule
|
||||
*
|
||||
* Schedule an IB in the DMA ring (CIK).
|
||||
*/
|
||||
void cik_sdma_ring_ib_execute(struct radeon_device *rdev,
|
||||
struct radeon_ib *ib)
|
||||
{
|
||||
struct radeon_ring *ring = &rdev->ring[ib->ring];
|
||||
u32 extra_bits = (ib->vm ? ib->vm->id : 0) & 0xf;
|
||||
|
||||
if (rdev->wb.enabled) {
|
||||
u32 next_rptr = ring->wptr + 5;
|
||||
while ((next_rptr & 7) != 4)
|
||||
next_rptr++;
|
||||
next_rptr += 4;
|
||||
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
|
||||
radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
|
||||
radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
|
||||
radeon_ring_write(ring, 1); /* number of DWs to follow */
|
||||
radeon_ring_write(ring, next_rptr);
|
||||
}
|
||||
|
||||
/* IB packet must end on a 8 DW boundary */
|
||||
while ((ring->wptr & 7) != 4)
|
||||
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0));
|
||||
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_INDIRECT_BUFFER, 0, extra_bits));
|
||||
radeon_ring_write(ring, ib->gpu_addr & 0xffffffe0); /* base must be 32 byte aligned */
|
||||
radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xffffffff);
|
||||
radeon_ring_write(ring, ib->length_dw);
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* cik_sdma_fence_ring_emit - emit a fence on the DMA ring
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @fence: radeon fence object
|
||||
*
|
||||
* Add a DMA fence packet to the ring to write
|
||||
* the fence seq number and DMA trap packet to generate
|
||||
* an interrupt if needed (CIK).
|
||||
*/
|
||||
void cik_sdma_fence_ring_emit(struct radeon_device *rdev,
|
||||
struct radeon_fence *fence)
|
||||
{
|
||||
struct radeon_ring *ring = &rdev->ring[fence->ring];
|
||||
u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
|
||||
u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(1) |
|
||||
SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */
|
||||
u32 ref_and_mask;
|
||||
|
||||
if (fence->ring == R600_RING_TYPE_DMA_INDEX)
|
||||
ref_and_mask = SDMA0;
|
||||
else
|
||||
ref_and_mask = SDMA1;
|
||||
|
||||
/* write the fence */
|
||||
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_FENCE, 0, 0));
|
||||
radeon_ring_write(ring, addr & 0xffffffff);
|
||||
radeon_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
|
||||
radeon_ring_write(ring, fence->seq);
|
||||
/* generate an interrupt */
|
||||
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_TRAP, 0, 0));
|
||||
/* flush HDP */
|
||||
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
|
||||
radeon_ring_write(ring, GPU_HDP_FLUSH_DONE);
|
||||
radeon_ring_write(ring, GPU_HDP_FLUSH_REQ);
|
||||
radeon_ring_write(ring, ref_and_mask); /* REFERENCE */
|
||||
radeon_ring_write(ring, ref_and_mask); /* MASK */
|
||||
radeon_ring_write(ring, (4 << 16) | 10); /* RETRY_COUNT, POLL_INTERVAL */
|
||||
}
|
||||
|
||||
/**
|
||||
* cik_sdma_semaphore_ring_emit - emit a semaphore on the dma ring
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @ring: radeon_ring structure holding ring information
|
||||
* @semaphore: radeon semaphore object
|
||||
* @emit_wait: wait or signal semaphore
|
||||
*
|
||||
* Add a DMA semaphore packet to the ring wait on or signal
|
||||
* other rings (CIK).
|
||||
*/
|
||||
void cik_sdma_semaphore_ring_emit(struct radeon_device *rdev,
|
||||
struct radeon_ring *ring,
|
||||
struct radeon_semaphore *semaphore,
|
||||
bool emit_wait)
|
||||
{
|
||||
u64 addr = semaphore->gpu_addr;
|
||||
u32 extra_bits = emit_wait ? 0 : SDMA_SEMAPHORE_EXTRA_S;
|
||||
|
||||
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SEMAPHORE, 0, extra_bits));
|
||||
radeon_ring_write(ring, addr & 0xfffffff8);
|
||||
radeon_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
|
||||
}
|
||||
|
||||
/**
|
||||
* cik_sdma_gfx_stop - stop the gfx async dma engines
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
*
|
||||
* Stop the gfx async dma ring buffers (CIK).
|
||||
*/
|
||||
static void cik_sdma_gfx_stop(struct radeon_device *rdev)
|
||||
{
|
||||
u32 rb_cntl, reg_offset;
|
||||
int i;
|
||||
|
||||
radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
|
||||
|
||||
for (i = 0; i < 2; i++) {
|
||||
if (i == 0)
|
||||
reg_offset = SDMA0_REGISTER_OFFSET;
|
||||
else
|
||||
reg_offset = SDMA1_REGISTER_OFFSET;
|
||||
rb_cntl = RREG32(SDMA0_GFX_RB_CNTL + reg_offset);
|
||||
rb_cntl &= ~SDMA_RB_ENABLE;
|
||||
WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl);
|
||||
WREG32(SDMA0_GFX_IB_CNTL + reg_offset, 0);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* cik_sdma_rlc_stop - stop the compute async dma engines
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
*
|
||||
* Stop the compute async dma queues (CIK).
|
||||
*/
|
||||
static void cik_sdma_rlc_stop(struct radeon_device *rdev)
|
||||
{
|
||||
/* XXX todo */
|
||||
}
|
||||
|
||||
/**
|
||||
* cik_sdma_enable - stop the async dma engines
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @enable: enable/disable the DMA MEs.
|
||||
*
|
||||
* Halt or unhalt the async dma engines (CIK).
|
||||
*/
|
||||
static void cik_sdma_enable(struct radeon_device *rdev, bool enable)
|
||||
{
|
||||
u32 me_cntl, reg_offset;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 2; i++) {
|
||||
if (i == 0)
|
||||
reg_offset = SDMA0_REGISTER_OFFSET;
|
||||
else
|
||||
reg_offset = SDMA1_REGISTER_OFFSET;
|
||||
me_cntl = RREG32(SDMA0_ME_CNTL + reg_offset);
|
||||
if (enable)
|
||||
me_cntl &= ~SDMA_HALT;
|
||||
else
|
||||
me_cntl |= SDMA_HALT;
|
||||
WREG32(SDMA0_ME_CNTL + reg_offset, me_cntl);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* cik_sdma_gfx_resume - setup and start the async dma engines
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
*
|
||||
* Set up the gfx DMA ring buffers and enable them (CIK).
|
||||
* Returns 0 for success, error for failure.
|
||||
*/
|
||||
static int cik_sdma_gfx_resume(struct radeon_device *rdev)
|
||||
{
|
||||
struct radeon_ring *ring;
|
||||
u32 rb_cntl, ib_cntl;
|
||||
u32 rb_bufsz;
|
||||
u32 reg_offset, wb_offset;
|
||||
int i, r;
|
||||
|
||||
for (i = 0; i < 2; i++) {
|
||||
if (i == 0) {
|
||||
ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
|
||||
reg_offset = SDMA0_REGISTER_OFFSET;
|
||||
wb_offset = R600_WB_DMA_RPTR_OFFSET;
|
||||
} else {
|
||||
ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
|
||||
reg_offset = SDMA1_REGISTER_OFFSET;
|
||||
wb_offset = CAYMAN_WB_DMA1_RPTR_OFFSET;
|
||||
}
|
||||
|
||||
WREG32(SDMA0_SEM_INCOMPLETE_TIMER_CNTL + reg_offset, 0);
|
||||
WREG32(SDMA0_SEM_WAIT_FAIL_TIMER_CNTL + reg_offset, 0);
|
||||
|
||||
/* Set ring buffer size in dwords */
|
||||
rb_bufsz = drm_order(ring->ring_size / 4);
|
||||
rb_cntl = rb_bufsz << 1;
|
||||
#ifdef __BIG_ENDIAN
|
||||
rb_cntl |= SDMA_RB_SWAP_ENABLE | SDMA_RPTR_WRITEBACK_SWAP_ENABLE;
|
||||
#endif
|
||||
WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl);
|
||||
|
||||
/* Initialize the ring buffer's read and write pointers */
|
||||
WREG32(SDMA0_GFX_RB_RPTR + reg_offset, 0);
|
||||
WREG32(SDMA0_GFX_RB_WPTR + reg_offset, 0);
|
||||
|
||||
/* set the wb address whether it's enabled or not */
|
||||
WREG32(SDMA0_GFX_RB_RPTR_ADDR_HI + reg_offset,
|
||||
upper_32_bits(rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
|
||||
WREG32(SDMA0_GFX_RB_RPTR_ADDR_LO + reg_offset,
|
||||
((rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC));
|
||||
|
||||
if (rdev->wb.enabled)
|
||||
rb_cntl |= SDMA_RPTR_WRITEBACK_ENABLE;
|
||||
|
||||
WREG32(SDMA0_GFX_RB_BASE + reg_offset, ring->gpu_addr >> 8);
|
||||
WREG32(SDMA0_GFX_RB_BASE_HI + reg_offset, ring->gpu_addr >> 40);
|
||||
|
||||
ring->wptr = 0;
|
||||
WREG32(SDMA0_GFX_RB_WPTR + reg_offset, ring->wptr << 2);
|
||||
|
||||
ring->rptr = RREG32(SDMA0_GFX_RB_RPTR + reg_offset) >> 2;
|
||||
|
||||
/* enable DMA RB */
|
||||
WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl | SDMA_RB_ENABLE);
|
||||
|
||||
ib_cntl = SDMA_IB_ENABLE;
|
||||
#ifdef __BIG_ENDIAN
|
||||
ib_cntl |= SDMA_IB_SWAP_ENABLE;
|
||||
#endif
|
||||
/* enable DMA IBs */
|
||||
WREG32(SDMA0_GFX_IB_CNTL + reg_offset, ib_cntl);
|
||||
|
||||
ring->ready = true;
|
||||
|
||||
r = radeon_ring_test(rdev, ring->idx, ring);
|
||||
if (r) {
|
||||
ring->ready = false;
|
||||
return r;
|
||||
}
|
||||
}
|
||||
|
||||
radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* cik_sdma_rlc_resume - setup and start the async dma engines
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
*
|
||||
* Set up the compute DMA queues and enable them (CIK).
|
||||
* Returns 0 for success, error for failure.
|
||||
*/
|
||||
static int cik_sdma_rlc_resume(struct radeon_device *rdev)
|
||||
{
|
||||
/* XXX todo */
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* cik_sdma_load_microcode - load the sDMA ME ucode
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
*
|
||||
* Loads the sDMA0/1 ucode.
|
||||
* Returns 0 for success, -EINVAL if the ucode is not available.
|
||||
*/
|
||||
static int cik_sdma_load_microcode(struct radeon_device *rdev)
|
||||
{
|
||||
const __be32 *fw_data;
|
||||
int i;
|
||||
|
||||
if (!rdev->sdma_fw)
|
||||
return -EINVAL;
|
||||
|
||||
/* stop the gfx rings and rlc compute queues */
|
||||
cik_sdma_gfx_stop(rdev);
|
||||
cik_sdma_rlc_stop(rdev);
|
||||
|
||||
/* halt the MEs */
|
||||
cik_sdma_enable(rdev, false);
|
||||
|
||||
/* sdma0 */
|
||||
fw_data = (const __be32 *)rdev->sdma_fw->data;
|
||||
WREG32(SDMA0_UCODE_ADDR + SDMA0_REGISTER_OFFSET, 0);
|
||||
for (i = 0; i < CIK_SDMA_UCODE_SIZE; i++)
|
||||
WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, be32_to_cpup(fw_data++));
|
||||
WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION);
|
||||
|
||||
/* sdma1 */
|
||||
fw_data = (const __be32 *)rdev->sdma_fw->data;
|
||||
WREG32(SDMA0_UCODE_ADDR + SDMA1_REGISTER_OFFSET, 0);
|
||||
for (i = 0; i < CIK_SDMA_UCODE_SIZE; i++)
|
||||
WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, be32_to_cpup(fw_data++));
|
||||
WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION);
|
||||
|
||||
WREG32(SDMA0_UCODE_ADDR + SDMA0_REGISTER_OFFSET, 0);
|
||||
WREG32(SDMA0_UCODE_ADDR + SDMA1_REGISTER_OFFSET, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* cik_sdma_resume - setup and start the async dma engines
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
*
|
||||
* Set up the DMA engines and enable them (CIK).
|
||||
* Returns 0 for success, error for failure.
|
||||
*/
|
||||
static int cik_sdma_resume(struct radeon_device *rdev)
|
||||
{
|
||||
int r;
|
||||
|
||||
/* Reset dma */
|
||||
WREG32(SRBM_SOFT_RESET, SOFT_RESET_SDMA | SOFT_RESET_SDMA1);
|
||||
RREG32(SRBM_SOFT_RESET);
|
||||
udelay(50);
|
||||
WREG32(SRBM_SOFT_RESET, 0);
|
||||
RREG32(SRBM_SOFT_RESET);
|
||||
|
||||
r = cik_sdma_load_microcode(rdev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
/* unhalt the MEs */
|
||||
cik_sdma_enable(rdev, true);
|
||||
|
||||
/* start the gfx rings and rlc compute queues */
|
||||
r = cik_sdma_gfx_resume(rdev);
|
||||
if (r)
|
||||
return r;
|
||||
r = cik_sdma_rlc_resume(rdev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* cik_sdma_fini - tear down the async dma engines
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
*
|
||||
* Stop the async dma engines and free the rings (CIK).
|
||||
*/
|
||||
static void cik_sdma_fini(struct radeon_device *rdev)
|
||||
{
|
||||
/* stop the gfx rings and rlc compute queues */
|
||||
cik_sdma_gfx_stop(rdev);
|
||||
cik_sdma_rlc_stop(rdev);
|
||||
/* halt the MEs */
|
||||
cik_sdma_enable(rdev, false);
|
||||
radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
|
||||
radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]);
|
||||
/* XXX - compute dma queue tear down */
|
||||
}
|
||||
|
||||
/**
|
||||
* cik_copy_dma - copy pages using the DMA engine
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @src_offset: src GPU address
|
||||
* @dst_offset: dst GPU address
|
||||
* @num_gpu_pages: number of GPU pages to xfer
|
||||
* @fence: radeon fence object
|
||||
*
|
||||
* Copy GPU paging using the DMA engine (CIK).
|
||||
* Used by the radeon ttm implementation to move pages if
|
||||
* registered as the asic copy callback.
|
||||
*/
|
||||
int cik_copy_dma(struct radeon_device *rdev,
|
||||
uint64_t src_offset, uint64_t dst_offset,
|
||||
unsigned num_gpu_pages,
|
||||
struct radeon_fence **fence)
|
||||
{
|
||||
struct radeon_semaphore *sem = NULL;
|
||||
int ring_index = rdev->asic->copy.dma_ring_index;
|
||||
struct radeon_ring *ring = &rdev->ring[ring_index];
|
||||
u32 size_in_bytes, cur_size_in_bytes;
|
||||
int i, num_loops;
|
||||
int r = 0;
|
||||
|
||||
r = radeon_semaphore_create(rdev, &sem);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: moving bo (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
|
||||
num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff);
|
||||
r = radeon_ring_lock(rdev, ring, num_loops * 7 + 14);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: moving bo (%d).\n", r);
|
||||
radeon_semaphore_free(rdev, &sem, NULL);
|
||||
return r;
|
||||
}
|
||||
|
||||
if (radeon_fence_need_sync(*fence, ring->idx)) {
|
||||
radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
|
||||
ring->idx);
|
||||
radeon_fence_note_sync(*fence, ring->idx);
|
||||
} else {
|
||||
radeon_semaphore_free(rdev, &sem, NULL);
|
||||
}
|
||||
|
||||
for (i = 0; i < num_loops; i++) {
|
||||
cur_size_in_bytes = size_in_bytes;
|
||||
if (cur_size_in_bytes > 0x1fffff)
|
||||
cur_size_in_bytes = 0x1fffff;
|
||||
size_in_bytes -= cur_size_in_bytes;
|
||||
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_LINEAR, 0));
|
||||
radeon_ring_write(ring, cur_size_in_bytes);
|
||||
radeon_ring_write(ring, 0); /* src/dst endian swap */
|
||||
radeon_ring_write(ring, src_offset & 0xffffffff);
|
||||
radeon_ring_write(ring, upper_32_bits(src_offset) & 0xffffffff);
|
||||
radeon_ring_write(ring, dst_offset & 0xfffffffc);
|
||||
radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xffffffff);
|
||||
src_offset += cur_size_in_bytes;
|
||||
dst_offset += cur_size_in_bytes;
|
||||
}
|
||||
|
||||
r = radeon_fence_emit(rdev, fence, ring->idx);
|
||||
if (r) {
|
||||
radeon_ring_unlock_undo(rdev, ring);
|
||||
return r;
|
||||
}
|
||||
|
||||
radeon_ring_unlock_commit(rdev, ring);
|
||||
radeon_semaphore_free(rdev, &sem, *fence);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
* cik_sdma_ring_test - simple async dma engine test
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @ring: radeon_ring structure holding ring information
|
||||
*
|
||||
* Test the DMA engine by writing using it to write an
|
||||
* value to memory. (CIK).
|
||||
* Returns 0 for success, error for failure.
|
||||
*/
|
||||
int cik_sdma_ring_test(struct radeon_device *rdev,
|
||||
struct radeon_ring *ring)
|
||||
{
|
||||
unsigned i;
|
||||
int r;
|
||||
void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
|
||||
u32 tmp;
|
||||
|
||||
if (!ptr) {
|
||||
DRM_ERROR("invalid vram scratch pointer\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
tmp = 0xCAFEDEAD;
|
||||
writel(tmp, ptr);
|
||||
|
||||
r = radeon_ring_lock(rdev, ring, 4);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r);
|
||||
return r;
|
||||
}
|
||||
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
|
||||
radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc);
|
||||
radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xffffffff);
|
||||
radeon_ring_write(ring, 1); /* number of DWs to follow */
|
||||
radeon_ring_write(ring, 0xDEADBEEF);
|
||||
radeon_ring_unlock_commit(rdev, ring);
|
||||
|
||||
for (i = 0; i < rdev->usec_timeout; i++) {
|
||||
tmp = readl(ptr);
|
||||
if (tmp == 0xDEADBEEF)
|
||||
break;
|
||||
DRM_UDELAY(1);
|
||||
}
|
||||
|
||||
if (i < rdev->usec_timeout) {
|
||||
DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
|
||||
} else {
|
||||
DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
|
||||
ring->idx, tmp);
|
||||
r = -EINVAL;
|
||||
}
|
||||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
* cik_sdma_ib_test - test an IB on the DMA engine
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @ring: radeon_ring structure holding ring information
|
||||
*
|
||||
* Test a simple IB in the DMA ring (CIK).
|
||||
* Returns 0 on success, error on failure.
|
||||
*/
|
||||
int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
|
||||
{
|
||||
struct radeon_ib ib;
|
||||
unsigned i;
|
||||
int r;
|
||||
void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
|
||||
u32 tmp = 0;
|
||||
|
||||
if (!ptr) {
|
||||
DRM_ERROR("invalid vram scratch pointer\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
tmp = 0xCAFEDEAD;
|
||||
writel(tmp, ptr);
|
||||
|
||||
r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: failed to get ib (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
|
||||
ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc;
|
||||
ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xffffffff;
|
||||
ib.ptr[3] = 1;
|
||||
ib.ptr[4] = 0xDEADBEEF;
|
||||
ib.length_dw = 5;
|
||||
|
||||
r = radeon_ib_schedule(rdev, &ib, NULL);
|
||||
if (r) {
|
||||
radeon_ib_free(rdev, &ib);
|
||||
DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
r = radeon_fence_wait(ib.fence, false);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: fence wait failed (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
for (i = 0; i < rdev->usec_timeout; i++) {
|
||||
tmp = readl(ptr);
|
||||
if (tmp == 0xDEADBEEF)
|
||||
break;
|
||||
DRM_UDELAY(1);
|
||||
}
|
||||
if (i < rdev->usec_timeout) {
|
||||
DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
|
||||
} else {
|
||||
DRM_ERROR("radeon: ib test failed (0x%08X)\n", tmp);
|
||||
r = -EINVAL;
|
||||
}
|
||||
radeon_ib_free(rdev, &ib);
|
||||
return r;
|
||||
}
|
||||
|
||||
|
||||
static void cik_print_gpu_status_regs(struct radeon_device *rdev)
|
||||
{
|
||||
dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
|
||||
|
@ -4609,7 +4044,7 @@ static void cik_print_gpu_status_regs(struct radeon_device *rdev)
|
|||
* mask to be used by cik_gpu_soft_reset().
|
||||
* Returns a mask of the blocks to be reset.
|
||||
*/
|
||||
static u32 cik_gpu_check_soft_reset(struct radeon_device *rdev)
|
||||
u32 cik_gpu_check_soft_reset(struct radeon_device *rdev)
|
||||
{
|
||||
u32 reset_mask = 0;
|
||||
u32 tmp;
|
||||
|
@ -4860,34 +4295,6 @@ bool cik_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
|
|||
return radeon_ring_test_lockup(rdev, ring);
|
||||
}
|
||||
|
||||
/**
|
||||
* cik_sdma_is_lockup - Check if the DMA engine is locked up
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @ring: radeon_ring structure holding ring information
|
||||
*
|
||||
* Check if the async DMA engine is locked up (CIK).
|
||||
* Returns true if the engine appears to be locked up, false if not.
|
||||
*/
|
||||
bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
|
||||
{
|
||||
u32 reset_mask = cik_gpu_check_soft_reset(rdev);
|
||||
u32 mask;
|
||||
|
||||
if (ring->idx == R600_RING_TYPE_DMA_INDEX)
|
||||
mask = RADEON_RESET_DMA;
|
||||
else
|
||||
mask = RADEON_RESET_DMA1;
|
||||
|
||||
if (!(reset_mask & mask)) {
|
||||
radeon_ring_lockup_update(ring);
|
||||
return false;
|
||||
}
|
||||
/* force ring activities */
|
||||
radeon_ring_force_activity(rdev, ring);
|
||||
return radeon_ring_test_lockup(rdev, ring);
|
||||
}
|
||||
|
||||
/* MC */
|
||||
/**
|
||||
* cik_mc_program - program the GPU memory controller
|
||||
|
@ -5424,133 +4831,10 @@ void cik_vm_set_page(struct radeon_device *rdev,
|
|||
}
|
||||
} else {
|
||||
/* DMA */
|
||||
if (flags & RADEON_VM_PAGE_SYSTEM) {
|
||||
while (count) {
|
||||
ndw = count * 2;
|
||||
if (ndw > 0xFFFFE)
|
||||
ndw = 0xFFFFE;
|
||||
|
||||
/* for non-physically contiguous pages (system) */
|
||||
ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
|
||||
ib->ptr[ib->length_dw++] = pe;
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(pe);
|
||||
ib->ptr[ib->length_dw++] = ndw;
|
||||
for (; ndw > 0; ndw -= 2, --count, pe += 8) {
|
||||
if (flags & RADEON_VM_PAGE_SYSTEM) {
|
||||
value = radeon_vm_map_gart(rdev, addr);
|
||||
value &= 0xFFFFFFFFFFFFF000ULL;
|
||||
} else if (flags & RADEON_VM_PAGE_VALID) {
|
||||
value = addr;
|
||||
} else {
|
||||
value = 0;
|
||||
}
|
||||
addr += incr;
|
||||
value |= r600_flags;
|
||||
ib->ptr[ib->length_dw++] = value;
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(value);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
while (count) {
|
||||
ndw = count;
|
||||
if (ndw > 0x7FFFF)
|
||||
ndw = 0x7FFFF;
|
||||
|
||||
if (flags & RADEON_VM_PAGE_VALID)
|
||||
value = addr;
|
||||
else
|
||||
value = 0;
|
||||
/* for physically contiguous pages (vram) */
|
||||
ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0);
|
||||
ib->ptr[ib->length_dw++] = pe; /* dst addr */
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(pe);
|
||||
ib->ptr[ib->length_dw++] = r600_flags; /* mask */
|
||||
ib->ptr[ib->length_dw++] = 0;
|
||||
ib->ptr[ib->length_dw++] = value; /* value */
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(value);
|
||||
ib->ptr[ib->length_dw++] = incr; /* increment size */
|
||||
ib->ptr[ib->length_dw++] = 0;
|
||||
ib->ptr[ib->length_dw++] = ndw; /* number of entries */
|
||||
pe += ndw * 8;
|
||||
addr += ndw * incr;
|
||||
count -= ndw;
|
||||
}
|
||||
}
|
||||
while (ib->length_dw & 0x7)
|
||||
ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0);
|
||||
cik_sdma_vm_set_page(rdev, ib, pe, addr, count, incr, flags);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* cik_dma_vm_flush - cik vm flush using sDMA
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
*
|
||||
* Update the page table base and flush the VM TLB
|
||||
* using sDMA (CIK).
|
||||
*/
|
||||
void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
|
||||
{
|
||||
struct radeon_ring *ring = &rdev->ring[ridx];
|
||||
u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(1) |
|
||||
SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */
|
||||
u32 ref_and_mask;
|
||||
|
||||
if (vm == NULL)
|
||||
return;
|
||||
|
||||
if (ridx == R600_RING_TYPE_DMA_INDEX)
|
||||
ref_and_mask = SDMA0;
|
||||
else
|
||||
ref_and_mask = SDMA1;
|
||||
|
||||
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
|
||||
if (vm->id < 8) {
|
||||
radeon_ring_write(ring, (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2);
|
||||
} else {
|
||||
radeon_ring_write(ring, (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2);
|
||||
}
|
||||
radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
|
||||
|
||||
/* update SH_MEM_* regs */
|
||||
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
|
||||
radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
|
||||
radeon_ring_write(ring, VMID(vm->id));
|
||||
|
||||
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
|
||||
radeon_ring_write(ring, SH_MEM_BASES >> 2);
|
||||
radeon_ring_write(ring, 0);
|
||||
|
||||
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
|
||||
radeon_ring_write(ring, SH_MEM_CONFIG >> 2);
|
||||
radeon_ring_write(ring, 0);
|
||||
|
||||
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
|
||||
radeon_ring_write(ring, SH_MEM_APE1_BASE >> 2);
|
||||
radeon_ring_write(ring, 1);
|
||||
|
||||
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
|
||||
radeon_ring_write(ring, SH_MEM_APE1_LIMIT >> 2);
|
||||
radeon_ring_write(ring, 0);
|
||||
|
||||
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
|
||||
radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
|
||||
radeon_ring_write(ring, VMID(0));
|
||||
|
||||
/* flush HDP */
|
||||
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
|
||||
radeon_ring_write(ring, GPU_HDP_FLUSH_DONE);
|
||||
radeon_ring_write(ring, GPU_HDP_FLUSH_REQ);
|
||||
radeon_ring_write(ring, ref_and_mask); /* REFERENCE */
|
||||
radeon_ring_write(ring, ref_and_mask); /* MASK */
|
||||
radeon_ring_write(ring, (4 << 16) | 10); /* RETRY_COUNT, POLL_INTERVAL */
|
||||
|
||||
/* flush TLB */
|
||||
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
|
||||
radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
|
||||
radeon_ring_write(ring, 1 << vm->id);
|
||||
}
|
||||
|
||||
/*
|
||||
* RLC
|
||||
* The RLC is a multi-purpose microengine that handles a
|
||||
|
|
|
@ -0,0 +1,785 @@
|
|||
/*
|
||||
* Copyright 2013 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Alex Deucher
|
||||
*/
|
||||
#include <linux/firmware.h>
|
||||
#include <drm/drmP.h>
|
||||
#include "radeon.h"
|
||||
#include "radeon_asic.h"
|
||||
#include "cikd.h"
|
||||
|
||||
/* sdma */
|
||||
#define CIK_SDMA_UCODE_SIZE 1050
|
||||
#define CIK_SDMA_UCODE_VERSION 64
|
||||
|
||||
u32 cik_gpu_check_soft_reset(struct radeon_device *rdev);
|
||||
|
||||
/*
|
||||
* sDMA - System DMA
|
||||
* Starting with CIK, the GPU has new asynchronous
|
||||
* DMA engines. These engines are used for compute
|
||||
* and gfx. There are two DMA engines (SDMA0, SDMA1)
|
||||
* and each one supports 1 ring buffer used for gfx
|
||||
* and 2 queues used for compute.
|
||||
*
|
||||
* The programming model is very similar to the CP
|
||||
* (ring buffer, IBs, etc.), but sDMA has it's own
|
||||
* packet format that is different from the PM4 format
|
||||
* used by the CP. sDMA supports copying data, writing
|
||||
* embedded data, solid fills, and a number of other
|
||||
* things. It also has support for tiling/detiling of
|
||||
* buffers.
|
||||
*/
|
||||
|
||||
/**
|
||||
* cik_sdma_ring_ib_execute - Schedule an IB on the DMA engine
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @ib: IB object to schedule
|
||||
*
|
||||
* Schedule an IB in the DMA ring (CIK).
|
||||
*/
|
||||
void cik_sdma_ring_ib_execute(struct radeon_device *rdev,
|
||||
struct radeon_ib *ib)
|
||||
{
|
||||
struct radeon_ring *ring = &rdev->ring[ib->ring];
|
||||
u32 extra_bits = (ib->vm ? ib->vm->id : 0) & 0xf;
|
||||
|
||||
if (rdev->wb.enabled) {
|
||||
u32 next_rptr = ring->wptr + 5;
|
||||
while ((next_rptr & 7) != 4)
|
||||
next_rptr++;
|
||||
next_rptr += 4;
|
||||
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
|
||||
radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
|
||||
radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
|
||||
radeon_ring_write(ring, 1); /* number of DWs to follow */
|
||||
radeon_ring_write(ring, next_rptr);
|
||||
}
|
||||
|
||||
/* IB packet must end on a 8 DW boundary */
|
||||
while ((ring->wptr & 7) != 4)
|
||||
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0));
|
||||
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_INDIRECT_BUFFER, 0, extra_bits));
|
||||
radeon_ring_write(ring, ib->gpu_addr & 0xffffffe0); /* base must be 32 byte aligned */
|
||||
radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xffffffff);
|
||||
radeon_ring_write(ring, ib->length_dw);
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* cik_sdma_fence_ring_emit - emit a fence on the DMA ring
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @fence: radeon fence object
|
||||
*
|
||||
* Add a DMA fence packet to the ring to write
|
||||
* the fence seq number and DMA trap packet to generate
|
||||
* an interrupt if needed (CIK).
|
||||
*/
|
||||
void cik_sdma_fence_ring_emit(struct radeon_device *rdev,
|
||||
struct radeon_fence *fence)
|
||||
{
|
||||
struct radeon_ring *ring = &rdev->ring[fence->ring];
|
||||
u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
|
||||
u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(1) |
|
||||
SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */
|
||||
u32 ref_and_mask;
|
||||
|
||||
if (fence->ring == R600_RING_TYPE_DMA_INDEX)
|
||||
ref_and_mask = SDMA0;
|
||||
else
|
||||
ref_and_mask = SDMA1;
|
||||
|
||||
/* write the fence */
|
||||
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_FENCE, 0, 0));
|
||||
radeon_ring_write(ring, addr & 0xffffffff);
|
||||
radeon_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
|
||||
radeon_ring_write(ring, fence->seq);
|
||||
/* generate an interrupt */
|
||||
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_TRAP, 0, 0));
|
||||
/* flush HDP */
|
||||
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
|
||||
radeon_ring_write(ring, GPU_HDP_FLUSH_DONE);
|
||||
radeon_ring_write(ring, GPU_HDP_FLUSH_REQ);
|
||||
radeon_ring_write(ring, ref_and_mask); /* REFERENCE */
|
||||
radeon_ring_write(ring, ref_and_mask); /* MASK */
|
||||
radeon_ring_write(ring, (4 << 16) | 10); /* RETRY_COUNT, POLL_INTERVAL */
|
||||
}
|
||||
|
||||
/**
|
||||
* cik_sdma_semaphore_ring_emit - emit a semaphore on the dma ring
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @ring: radeon_ring structure holding ring information
|
||||
* @semaphore: radeon semaphore object
|
||||
* @emit_wait: wait or signal semaphore
|
||||
*
|
||||
* Add a DMA semaphore packet to the ring wait on or signal
|
||||
* other rings (CIK).
|
||||
*/
|
||||
void cik_sdma_semaphore_ring_emit(struct radeon_device *rdev,
|
||||
struct radeon_ring *ring,
|
||||
struct radeon_semaphore *semaphore,
|
||||
bool emit_wait)
|
||||
{
|
||||
u64 addr = semaphore->gpu_addr;
|
||||
u32 extra_bits = emit_wait ? 0 : SDMA_SEMAPHORE_EXTRA_S;
|
||||
|
||||
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SEMAPHORE, 0, extra_bits));
|
||||
radeon_ring_write(ring, addr & 0xfffffff8);
|
||||
radeon_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
|
||||
}
|
||||
|
||||
/**
|
||||
* cik_sdma_gfx_stop - stop the gfx async dma engines
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
*
|
||||
* Stop the gfx async dma ring buffers (CIK).
|
||||
*/
|
||||
static void cik_sdma_gfx_stop(struct radeon_device *rdev)
|
||||
{
|
||||
u32 rb_cntl, reg_offset;
|
||||
int i;
|
||||
|
||||
radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
|
||||
|
||||
for (i = 0; i < 2; i++) {
|
||||
if (i == 0)
|
||||
reg_offset = SDMA0_REGISTER_OFFSET;
|
||||
else
|
||||
reg_offset = SDMA1_REGISTER_OFFSET;
|
||||
rb_cntl = RREG32(SDMA0_GFX_RB_CNTL + reg_offset);
|
||||
rb_cntl &= ~SDMA_RB_ENABLE;
|
||||
WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl);
|
||||
WREG32(SDMA0_GFX_IB_CNTL + reg_offset, 0);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* cik_sdma_rlc_stop - stop the compute async dma engines
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
*
|
||||
* Stop the compute async dma queues (CIK).
|
||||
*/
|
||||
static void cik_sdma_rlc_stop(struct radeon_device *rdev)
|
||||
{
|
||||
/* XXX todo */
|
||||
}
|
||||
|
||||
/**
|
||||
* cik_sdma_enable - stop the async dma engines
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @enable: enable/disable the DMA MEs.
|
||||
*
|
||||
* Halt or unhalt the async dma engines (CIK).
|
||||
*/
|
||||
void cik_sdma_enable(struct radeon_device *rdev, bool enable)
|
||||
{
|
||||
u32 me_cntl, reg_offset;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 2; i++) {
|
||||
if (i == 0)
|
||||
reg_offset = SDMA0_REGISTER_OFFSET;
|
||||
else
|
||||
reg_offset = SDMA1_REGISTER_OFFSET;
|
||||
me_cntl = RREG32(SDMA0_ME_CNTL + reg_offset);
|
||||
if (enable)
|
||||
me_cntl &= ~SDMA_HALT;
|
||||
else
|
||||
me_cntl |= SDMA_HALT;
|
||||
WREG32(SDMA0_ME_CNTL + reg_offset, me_cntl);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* cik_sdma_gfx_resume - setup and start the async dma engines
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
*
|
||||
* Set up the gfx DMA ring buffers and enable them (CIK).
|
||||
* Returns 0 for success, error for failure.
|
||||
*/
|
||||
static int cik_sdma_gfx_resume(struct radeon_device *rdev)
|
||||
{
|
||||
struct radeon_ring *ring;
|
||||
u32 rb_cntl, ib_cntl;
|
||||
u32 rb_bufsz;
|
||||
u32 reg_offset, wb_offset;
|
||||
int i, r;
|
||||
|
||||
for (i = 0; i < 2; i++) {
|
||||
if (i == 0) {
|
||||
ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
|
||||
reg_offset = SDMA0_REGISTER_OFFSET;
|
||||
wb_offset = R600_WB_DMA_RPTR_OFFSET;
|
||||
} else {
|
||||
ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
|
||||
reg_offset = SDMA1_REGISTER_OFFSET;
|
||||
wb_offset = CAYMAN_WB_DMA1_RPTR_OFFSET;
|
||||
}
|
||||
|
||||
WREG32(SDMA0_SEM_INCOMPLETE_TIMER_CNTL + reg_offset, 0);
|
||||
WREG32(SDMA0_SEM_WAIT_FAIL_TIMER_CNTL + reg_offset, 0);
|
||||
|
||||
/* Set ring buffer size in dwords */
|
||||
rb_bufsz = drm_order(ring->ring_size / 4);
|
||||
rb_cntl = rb_bufsz << 1;
|
||||
#ifdef __BIG_ENDIAN
|
||||
rb_cntl |= SDMA_RB_SWAP_ENABLE | SDMA_RPTR_WRITEBACK_SWAP_ENABLE;
|
||||
#endif
|
||||
WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl);
|
||||
|
||||
/* Initialize the ring buffer's read and write pointers */
|
||||
WREG32(SDMA0_GFX_RB_RPTR + reg_offset, 0);
|
||||
WREG32(SDMA0_GFX_RB_WPTR + reg_offset, 0);
|
||||
|
||||
/* set the wb address whether it's enabled or not */
|
||||
WREG32(SDMA0_GFX_RB_RPTR_ADDR_HI + reg_offset,
|
||||
upper_32_bits(rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
|
||||
WREG32(SDMA0_GFX_RB_RPTR_ADDR_LO + reg_offset,
|
||||
((rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC));
|
||||
|
||||
if (rdev->wb.enabled)
|
||||
rb_cntl |= SDMA_RPTR_WRITEBACK_ENABLE;
|
||||
|
||||
WREG32(SDMA0_GFX_RB_BASE + reg_offset, ring->gpu_addr >> 8);
|
||||
WREG32(SDMA0_GFX_RB_BASE_HI + reg_offset, ring->gpu_addr >> 40);
|
||||
|
||||
ring->wptr = 0;
|
||||
WREG32(SDMA0_GFX_RB_WPTR + reg_offset, ring->wptr << 2);
|
||||
|
||||
ring->rptr = RREG32(SDMA0_GFX_RB_RPTR + reg_offset) >> 2;
|
||||
|
||||
/* enable DMA RB */
|
||||
WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl | SDMA_RB_ENABLE);
|
||||
|
||||
ib_cntl = SDMA_IB_ENABLE;
|
||||
#ifdef __BIG_ENDIAN
|
||||
ib_cntl |= SDMA_IB_SWAP_ENABLE;
|
||||
#endif
|
||||
/* enable DMA IBs */
|
||||
WREG32(SDMA0_GFX_IB_CNTL + reg_offset, ib_cntl);
|
||||
|
||||
ring->ready = true;
|
||||
|
||||
r = radeon_ring_test(rdev, ring->idx, ring);
|
||||
if (r) {
|
||||
ring->ready = false;
|
||||
return r;
|
||||
}
|
||||
}
|
||||
|
||||
radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* cik_sdma_rlc_resume - setup and start the async dma engines
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
*
|
||||
* Set up the compute DMA queues and enable them (CIK).
|
||||
* Returns 0 for success, error for failure.
|
||||
*/
|
||||
static int cik_sdma_rlc_resume(struct radeon_device *rdev)
|
||||
{
|
||||
/* XXX todo */
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* cik_sdma_load_microcode - load the sDMA ME ucode
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
*
|
||||
* Loads the sDMA0/1 ucode.
|
||||
* Returns 0 for success, -EINVAL if the ucode is not available.
|
||||
*/
|
||||
static int cik_sdma_load_microcode(struct radeon_device *rdev)
|
||||
{
|
||||
const __be32 *fw_data;
|
||||
int i;
|
||||
|
||||
if (!rdev->sdma_fw)
|
||||
return -EINVAL;
|
||||
|
||||
/* stop the gfx rings and rlc compute queues */
|
||||
cik_sdma_gfx_stop(rdev);
|
||||
cik_sdma_rlc_stop(rdev);
|
||||
|
||||
/* halt the MEs */
|
||||
cik_sdma_enable(rdev, false);
|
||||
|
||||
/* sdma0 */
|
||||
fw_data = (const __be32 *)rdev->sdma_fw->data;
|
||||
WREG32(SDMA0_UCODE_ADDR + SDMA0_REGISTER_OFFSET, 0);
|
||||
for (i = 0; i < CIK_SDMA_UCODE_SIZE; i++)
|
||||
WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, be32_to_cpup(fw_data++));
|
||||
WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION);
|
||||
|
||||
/* sdma1 */
|
||||
fw_data = (const __be32 *)rdev->sdma_fw->data;
|
||||
WREG32(SDMA0_UCODE_ADDR + SDMA1_REGISTER_OFFSET, 0);
|
||||
for (i = 0; i < CIK_SDMA_UCODE_SIZE; i++)
|
||||
WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, be32_to_cpup(fw_data++));
|
||||
WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION);
|
||||
|
||||
WREG32(SDMA0_UCODE_ADDR + SDMA0_REGISTER_OFFSET, 0);
|
||||
WREG32(SDMA0_UCODE_ADDR + SDMA1_REGISTER_OFFSET, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* cik_sdma_resume - setup and start the async dma engines
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
*
|
||||
* Set up the DMA engines and enable them (CIK).
|
||||
* Returns 0 for success, error for failure.
|
||||
*/
|
||||
int cik_sdma_resume(struct radeon_device *rdev)
|
||||
{
|
||||
int r;
|
||||
|
||||
/* Reset dma */
|
||||
WREG32(SRBM_SOFT_RESET, SOFT_RESET_SDMA | SOFT_RESET_SDMA1);
|
||||
RREG32(SRBM_SOFT_RESET);
|
||||
udelay(50);
|
||||
WREG32(SRBM_SOFT_RESET, 0);
|
||||
RREG32(SRBM_SOFT_RESET);
|
||||
|
||||
r = cik_sdma_load_microcode(rdev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
/* unhalt the MEs */
|
||||
cik_sdma_enable(rdev, true);
|
||||
|
||||
/* start the gfx rings and rlc compute queues */
|
||||
r = cik_sdma_gfx_resume(rdev);
|
||||
if (r)
|
||||
return r;
|
||||
r = cik_sdma_rlc_resume(rdev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* cik_sdma_fini - tear down the async dma engines
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
*
|
||||
* Stop the async dma engines and free the rings (CIK).
|
||||
*/
|
||||
void cik_sdma_fini(struct radeon_device *rdev)
|
||||
{
|
||||
/* stop the gfx rings and rlc compute queues */
|
||||
cik_sdma_gfx_stop(rdev);
|
||||
cik_sdma_rlc_stop(rdev);
|
||||
/* halt the MEs */
|
||||
cik_sdma_enable(rdev, false);
|
||||
radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
|
||||
radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]);
|
||||
/* XXX - compute dma queue tear down */
|
||||
}
|
||||
|
||||
/**
|
||||
* cik_copy_dma - copy pages using the DMA engine
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @src_offset: src GPU address
|
||||
* @dst_offset: dst GPU address
|
||||
* @num_gpu_pages: number of GPU pages to xfer
|
||||
* @fence: radeon fence object
|
||||
*
|
||||
* Copy GPU paging using the DMA engine (CIK).
|
||||
* Used by the radeon ttm implementation to move pages if
|
||||
* registered as the asic copy callback.
|
||||
*/
|
||||
int cik_copy_dma(struct radeon_device *rdev,
|
||||
uint64_t src_offset, uint64_t dst_offset,
|
||||
unsigned num_gpu_pages,
|
||||
struct radeon_fence **fence)
|
||||
{
|
||||
struct radeon_semaphore *sem = NULL;
|
||||
int ring_index = rdev->asic->copy.dma_ring_index;
|
||||
struct radeon_ring *ring = &rdev->ring[ring_index];
|
||||
u32 size_in_bytes, cur_size_in_bytes;
|
||||
int i, num_loops;
|
||||
int r = 0;
|
||||
|
||||
r = radeon_semaphore_create(rdev, &sem);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: moving bo (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
|
||||
num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff);
|
||||
r = radeon_ring_lock(rdev, ring, num_loops * 7 + 14);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: moving bo (%d).\n", r);
|
||||
radeon_semaphore_free(rdev, &sem, NULL);
|
||||
return r;
|
||||
}
|
||||
|
||||
if (radeon_fence_need_sync(*fence, ring->idx)) {
|
||||
radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
|
||||
ring->idx);
|
||||
radeon_fence_note_sync(*fence, ring->idx);
|
||||
} else {
|
||||
radeon_semaphore_free(rdev, &sem, NULL);
|
||||
}
|
||||
|
||||
for (i = 0; i < num_loops; i++) {
|
||||
cur_size_in_bytes = size_in_bytes;
|
||||
if (cur_size_in_bytes > 0x1fffff)
|
||||
cur_size_in_bytes = 0x1fffff;
|
||||
size_in_bytes -= cur_size_in_bytes;
|
||||
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_LINEAR, 0));
|
||||
radeon_ring_write(ring, cur_size_in_bytes);
|
||||
radeon_ring_write(ring, 0); /* src/dst endian swap */
|
||||
radeon_ring_write(ring, src_offset & 0xffffffff);
|
||||
radeon_ring_write(ring, upper_32_bits(src_offset) & 0xffffffff);
|
||||
radeon_ring_write(ring, dst_offset & 0xfffffffc);
|
||||
radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xffffffff);
|
||||
src_offset += cur_size_in_bytes;
|
||||
dst_offset += cur_size_in_bytes;
|
||||
}
|
||||
|
||||
r = radeon_fence_emit(rdev, fence, ring->idx);
|
||||
if (r) {
|
||||
radeon_ring_unlock_undo(rdev, ring);
|
||||
return r;
|
||||
}
|
||||
|
||||
radeon_ring_unlock_commit(rdev, ring);
|
||||
radeon_semaphore_free(rdev, &sem, *fence);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
* cik_sdma_ring_test - simple async dma engine test
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @ring: radeon_ring structure holding ring information
|
||||
*
|
||||
* Test the DMA engine by writing using it to write an
|
||||
* value to memory. (CIK).
|
||||
* Returns 0 for success, error for failure.
|
||||
*/
|
||||
int cik_sdma_ring_test(struct radeon_device *rdev,
|
||||
struct radeon_ring *ring)
|
||||
{
|
||||
unsigned i;
|
||||
int r;
|
||||
void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
|
||||
u32 tmp;
|
||||
|
||||
if (!ptr) {
|
||||
DRM_ERROR("invalid vram scratch pointer\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
tmp = 0xCAFEDEAD;
|
||||
writel(tmp, ptr);
|
||||
|
||||
r = radeon_ring_lock(rdev, ring, 4);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r);
|
||||
return r;
|
||||
}
|
||||
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
|
||||
radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc);
|
||||
radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xffffffff);
|
||||
radeon_ring_write(ring, 1); /* number of DWs to follow */
|
||||
radeon_ring_write(ring, 0xDEADBEEF);
|
||||
radeon_ring_unlock_commit(rdev, ring);
|
||||
|
||||
for (i = 0; i < rdev->usec_timeout; i++) {
|
||||
tmp = readl(ptr);
|
||||
if (tmp == 0xDEADBEEF)
|
||||
break;
|
||||
DRM_UDELAY(1);
|
||||
}
|
||||
|
||||
if (i < rdev->usec_timeout) {
|
||||
DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
|
||||
} else {
|
||||
DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
|
||||
ring->idx, tmp);
|
||||
r = -EINVAL;
|
||||
}
|
||||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
* cik_sdma_ib_test - test an IB on the DMA engine
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @ring: radeon_ring structure holding ring information
|
||||
*
|
||||
* Test a simple IB in the DMA ring (CIK).
|
||||
* Returns 0 on success, error on failure.
|
||||
*/
|
||||
int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
|
||||
{
|
||||
struct radeon_ib ib;
|
||||
unsigned i;
|
||||
int r;
|
||||
void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
|
||||
u32 tmp = 0;
|
||||
|
||||
if (!ptr) {
|
||||
DRM_ERROR("invalid vram scratch pointer\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
tmp = 0xCAFEDEAD;
|
||||
writel(tmp, ptr);
|
||||
|
||||
r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: failed to get ib (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
|
||||
ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc;
|
||||
ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xffffffff;
|
||||
ib.ptr[3] = 1;
|
||||
ib.ptr[4] = 0xDEADBEEF;
|
||||
ib.length_dw = 5;
|
||||
|
||||
r = radeon_ib_schedule(rdev, &ib, NULL);
|
||||
if (r) {
|
||||
radeon_ib_free(rdev, &ib);
|
||||
DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
r = radeon_fence_wait(ib.fence, false);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: fence wait failed (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
for (i = 0; i < rdev->usec_timeout; i++) {
|
||||
tmp = readl(ptr);
|
||||
if (tmp == 0xDEADBEEF)
|
||||
break;
|
||||
DRM_UDELAY(1);
|
||||
}
|
||||
if (i < rdev->usec_timeout) {
|
||||
DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
|
||||
} else {
|
||||
DRM_ERROR("radeon: ib test failed (0x%08X)\n", tmp);
|
||||
r = -EINVAL;
|
||||
}
|
||||
radeon_ib_free(rdev, &ib);
|
||||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
* cik_sdma_is_lockup - Check if the DMA engine is locked up
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @ring: radeon_ring structure holding ring information
|
||||
*
|
||||
* Check if the async DMA engine is locked up (CIK).
|
||||
* Returns true if the engine appears to be locked up, false if not.
|
||||
*/
|
||||
bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
|
||||
{
|
||||
u32 reset_mask = cik_gpu_check_soft_reset(rdev);
|
||||
u32 mask;
|
||||
|
||||
if (ring->idx == R600_RING_TYPE_DMA_INDEX)
|
||||
mask = RADEON_RESET_DMA;
|
||||
else
|
||||
mask = RADEON_RESET_DMA1;
|
||||
|
||||
if (!(reset_mask & mask)) {
|
||||
radeon_ring_lockup_update(ring);
|
||||
return false;
|
||||
}
|
||||
/* force ring activities */
|
||||
radeon_ring_force_activity(rdev, ring);
|
||||
return radeon_ring_test_lockup(rdev, ring);
|
||||
}
|
||||
|
||||
/**
|
||||
* cik_sdma_vm_set_page - update the page tables using sDMA
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @ib: indirect buffer to fill with commands
|
||||
* @pe: addr of the page entry
|
||||
* @addr: dst addr to write into pe
|
||||
* @count: number of page entries to update
|
||||
* @incr: increase next addr by incr bytes
|
||||
* @flags: access flags
|
||||
*
|
||||
* Update the page tables using sDMA (CIK).
|
||||
*/
|
||||
void cik_sdma_vm_set_page(struct radeon_device *rdev,
|
||||
struct radeon_ib *ib,
|
||||
uint64_t pe,
|
||||
uint64_t addr, unsigned count,
|
||||
uint32_t incr, uint32_t flags)
|
||||
{
|
||||
uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
|
||||
uint64_t value;
|
||||
unsigned ndw;
|
||||
|
||||
if (flags & RADEON_VM_PAGE_SYSTEM) {
|
||||
while (count) {
|
||||
ndw = count * 2;
|
||||
if (ndw > 0xFFFFE)
|
||||
ndw = 0xFFFFE;
|
||||
|
||||
/* for non-physically contiguous pages (system) */
|
||||
ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
|
||||
ib->ptr[ib->length_dw++] = pe;
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(pe);
|
||||
ib->ptr[ib->length_dw++] = ndw;
|
||||
for (; ndw > 0; ndw -= 2, --count, pe += 8) {
|
||||
if (flags & RADEON_VM_PAGE_SYSTEM) {
|
||||
value = radeon_vm_map_gart(rdev, addr);
|
||||
value &= 0xFFFFFFFFFFFFF000ULL;
|
||||
} else if (flags & RADEON_VM_PAGE_VALID) {
|
||||
value = addr;
|
||||
} else {
|
||||
value = 0;
|
||||
}
|
||||
addr += incr;
|
||||
value |= r600_flags;
|
||||
ib->ptr[ib->length_dw++] = value;
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(value);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
while (count) {
|
||||
ndw = count;
|
||||
if (ndw > 0x7FFFF)
|
||||
ndw = 0x7FFFF;
|
||||
|
||||
if (flags & RADEON_VM_PAGE_VALID)
|
||||
value = addr;
|
||||
else
|
||||
value = 0;
|
||||
/* for physically contiguous pages (vram) */
|
||||
ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0);
|
||||
ib->ptr[ib->length_dw++] = pe; /* dst addr */
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(pe);
|
||||
ib->ptr[ib->length_dw++] = r600_flags; /* mask */
|
||||
ib->ptr[ib->length_dw++] = 0;
|
||||
ib->ptr[ib->length_dw++] = value; /* value */
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(value);
|
||||
ib->ptr[ib->length_dw++] = incr; /* increment size */
|
||||
ib->ptr[ib->length_dw++] = 0;
|
||||
ib->ptr[ib->length_dw++] = ndw; /* number of entries */
|
||||
pe += ndw * 8;
|
||||
addr += ndw * incr;
|
||||
count -= ndw;
|
||||
}
|
||||
}
|
||||
while (ib->length_dw & 0x7)
|
||||
ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* cik_dma_vm_flush - cik vm flush using sDMA
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
*
|
||||
* Update the page table base and flush the VM TLB
|
||||
* using sDMA (CIK).
|
||||
*/
|
||||
void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
|
||||
{
|
||||
struct radeon_ring *ring = &rdev->ring[ridx];
|
||||
u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(1) |
|
||||
SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */
|
||||
u32 ref_and_mask;
|
||||
|
||||
if (vm == NULL)
|
||||
return;
|
||||
|
||||
if (ridx == R600_RING_TYPE_DMA_INDEX)
|
||||
ref_and_mask = SDMA0;
|
||||
else
|
||||
ref_and_mask = SDMA1;
|
||||
|
||||
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
|
||||
if (vm->id < 8) {
|
||||
radeon_ring_write(ring, (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2);
|
||||
} else {
|
||||
radeon_ring_write(ring, (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2);
|
||||
}
|
||||
radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
|
||||
|
||||
/* update SH_MEM_* regs */
|
||||
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
|
||||
radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
|
||||
radeon_ring_write(ring, VMID(vm->id));
|
||||
|
||||
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
|
||||
radeon_ring_write(ring, SH_MEM_BASES >> 2);
|
||||
radeon_ring_write(ring, 0);
|
||||
|
||||
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
|
||||
radeon_ring_write(ring, SH_MEM_CONFIG >> 2);
|
||||
radeon_ring_write(ring, 0);
|
||||
|
||||
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
|
||||
radeon_ring_write(ring, SH_MEM_APE1_BASE >> 2);
|
||||
radeon_ring_write(ring, 1);
|
||||
|
||||
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
|
||||
radeon_ring_write(ring, SH_MEM_APE1_LIMIT >> 2);
|
||||
radeon_ring_write(ring, 0);
|
||||
|
||||
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
|
||||
radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
|
||||
radeon_ring_write(ring, VMID(0));
|
||||
|
||||
/* flush HDP */
|
||||
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
|
||||
radeon_ring_write(ring, GPU_HDP_FLUSH_DONE);
|
||||
radeon_ring_write(ring, GPU_HDP_FLUSH_REQ);
|
||||
radeon_ring_write(ring, ref_and_mask); /* REFERENCE */
|
||||
radeon_ring_write(ring, ref_and_mask); /* MASK */
|
||||
radeon_ring_write(ring, (4 << 16) | 10); /* RETRY_COUNT, POLL_INTERVAL */
|
||||
|
||||
/* flush TLB */
|
||||
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
|
||||
radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
|
||||
radeon_ring_write(ring, 1 << vm->id);
|
||||
}
|
||||
|
|
@ -3613,7 +3613,7 @@ bool evergreen_is_display_hung(struct radeon_device *rdev)
|
|||
return true;
|
||||
}
|
||||
|
||||
static u32 evergreen_gpu_check_soft_reset(struct radeon_device *rdev)
|
||||
u32 evergreen_gpu_check_soft_reset(struct radeon_device *rdev)
|
||||
{
|
||||
u32 reset_mask = 0;
|
||||
u32 tmp;
|
||||
|
@ -3839,28 +3839,6 @@ bool evergreen_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *rin
|
|||
return radeon_ring_test_lockup(rdev, ring);
|
||||
}
|
||||
|
||||
/**
|
||||
* evergreen_dma_is_lockup - Check if the DMA engine is locked up
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @ring: radeon_ring structure holding ring information
|
||||
*
|
||||
* Check if the async DMA engine is locked up.
|
||||
* Returns true if the engine appears to be locked up, false if not.
|
||||
*/
|
||||
bool evergreen_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
|
||||
{
|
||||
u32 reset_mask = evergreen_gpu_check_soft_reset(rdev);
|
||||
|
||||
if (!(reset_mask & RADEON_RESET_DMA)) {
|
||||
radeon_ring_lockup_update(ring);
|
||||
return false;
|
||||
}
|
||||
/* force ring activities */
|
||||
radeon_ring_force_activity(rdev, ring);
|
||||
return radeon_ring_test_lockup(rdev, ring);
|
||||
}
|
||||
|
||||
/*
|
||||
* RLC
|
||||
*/
|
||||
|
@ -5024,143 +5002,6 @@ restart_ih:
|
|||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
/**
|
||||
* evergreen_dma_fence_ring_emit - emit a fence on the DMA ring
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @fence: radeon fence object
|
||||
*
|
||||
* Add a DMA fence packet to the ring to write
|
||||
* the fence seq number and DMA trap packet to generate
|
||||
* an interrupt if needed (evergreen-SI).
|
||||
*/
|
||||
void evergreen_dma_fence_ring_emit(struct radeon_device *rdev,
|
||||
struct radeon_fence *fence)
|
||||
{
|
||||
struct radeon_ring *ring = &rdev->ring[fence->ring];
|
||||
u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
|
||||
/* write the fence */
|
||||
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0));
|
||||
radeon_ring_write(ring, addr & 0xfffffffc);
|
||||
radeon_ring_write(ring, (upper_32_bits(addr) & 0xff));
|
||||
radeon_ring_write(ring, fence->seq);
|
||||
/* generate an interrupt */
|
||||
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0));
|
||||
/* flush HDP */
|
||||
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0));
|
||||
radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
|
||||
radeon_ring_write(ring, 1);
|
||||
}
|
||||
|
||||
/**
|
||||
* evergreen_dma_ring_ib_execute - schedule an IB on the DMA engine
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @ib: IB object to schedule
|
||||
*
|
||||
* Schedule an IB in the DMA ring (evergreen).
|
||||
*/
|
||||
void evergreen_dma_ring_ib_execute(struct radeon_device *rdev,
|
||||
struct radeon_ib *ib)
|
||||
{
|
||||
struct radeon_ring *ring = &rdev->ring[ib->ring];
|
||||
|
||||
if (rdev->wb.enabled) {
|
||||
u32 next_rptr = ring->wptr + 4;
|
||||
while ((next_rptr & 7) != 5)
|
||||
next_rptr++;
|
||||
next_rptr += 3;
|
||||
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 1));
|
||||
radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
|
||||
radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
|
||||
radeon_ring_write(ring, next_rptr);
|
||||
}
|
||||
|
||||
/* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
|
||||
* Pad as necessary with NOPs.
|
||||
*/
|
||||
while ((ring->wptr & 7) != 5)
|
||||
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0));
|
||||
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0));
|
||||
radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
|
||||
radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* evergreen_copy_dma - copy pages using the DMA engine
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @src_offset: src GPU address
|
||||
* @dst_offset: dst GPU address
|
||||
* @num_gpu_pages: number of GPU pages to xfer
|
||||
* @fence: radeon fence object
|
||||
*
|
||||
* Copy GPU paging using the DMA engine (evergreen-cayman).
|
||||
* Used by the radeon ttm implementation to move pages if
|
||||
* registered as the asic copy callback.
|
||||
*/
|
||||
int evergreen_copy_dma(struct radeon_device *rdev,
|
||||
uint64_t src_offset, uint64_t dst_offset,
|
||||
unsigned num_gpu_pages,
|
||||
struct radeon_fence **fence)
|
||||
{
|
||||
struct radeon_semaphore *sem = NULL;
|
||||
int ring_index = rdev->asic->copy.dma_ring_index;
|
||||
struct radeon_ring *ring = &rdev->ring[ring_index];
|
||||
u32 size_in_dw, cur_size_in_dw;
|
||||
int i, num_loops;
|
||||
int r = 0;
|
||||
|
||||
r = radeon_semaphore_create(rdev, &sem);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: moving bo (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
|
||||
num_loops = DIV_ROUND_UP(size_in_dw, 0xfffff);
|
||||
r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: moving bo (%d).\n", r);
|
||||
radeon_semaphore_free(rdev, &sem, NULL);
|
||||
return r;
|
||||
}
|
||||
|
||||
if (radeon_fence_need_sync(*fence, ring->idx)) {
|
||||
radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
|
||||
ring->idx);
|
||||
radeon_fence_note_sync(*fence, ring->idx);
|
||||
} else {
|
||||
radeon_semaphore_free(rdev, &sem, NULL);
|
||||
}
|
||||
|
||||
for (i = 0; i < num_loops; i++) {
|
||||
cur_size_in_dw = size_in_dw;
|
||||
if (cur_size_in_dw > 0xFFFFF)
|
||||
cur_size_in_dw = 0xFFFFF;
|
||||
size_in_dw -= cur_size_in_dw;
|
||||
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, cur_size_in_dw));
|
||||
radeon_ring_write(ring, dst_offset & 0xfffffffc);
|
||||
radeon_ring_write(ring, src_offset & 0xfffffffc);
|
||||
radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
|
||||
radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
|
||||
src_offset += cur_size_in_dw * 4;
|
||||
dst_offset += cur_size_in_dw * 4;
|
||||
}
|
||||
|
||||
r = radeon_fence_emit(rdev, fence, ring->idx);
|
||||
if (r) {
|
||||
radeon_ring_unlock_undo(rdev, ring);
|
||||
return r;
|
||||
}
|
||||
|
||||
radeon_ring_unlock_commit(rdev, ring);
|
||||
radeon_semaphore_free(rdev, &sem, *fence);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
static int evergreen_startup(struct radeon_device *rdev)
|
||||
{
|
||||
struct radeon_ring *ring;
|
||||
|
|
|
@ -0,0 +1,190 @@
|
|||
/*
|
||||
* Copyright 2010 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Alex Deucher
|
||||
*/
|
||||
#include <drm/drmP.h>
|
||||
#include "radeon.h"
|
||||
#include "radeon_asic.h"
|
||||
#include "evergreend.h"
|
||||
|
||||
u32 evergreen_gpu_check_soft_reset(struct radeon_device *rdev);
|
||||
|
||||
/**
|
||||
* evergreen_dma_fence_ring_emit - emit a fence on the DMA ring
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @fence: radeon fence object
|
||||
*
|
||||
* Add a DMA fence packet to the ring to write
|
||||
* the fence seq number and DMA trap packet to generate
|
||||
* an interrupt if needed (evergreen-SI).
|
||||
*/
|
||||
void evergreen_dma_fence_ring_emit(struct radeon_device *rdev,
|
||||
struct radeon_fence *fence)
|
||||
{
|
||||
struct radeon_ring *ring = &rdev->ring[fence->ring];
|
||||
u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
|
||||
/* write the fence */
|
||||
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0));
|
||||
radeon_ring_write(ring, addr & 0xfffffffc);
|
||||
radeon_ring_write(ring, (upper_32_bits(addr) & 0xff));
|
||||
radeon_ring_write(ring, fence->seq);
|
||||
/* generate an interrupt */
|
||||
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0));
|
||||
/* flush HDP */
|
||||
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0));
|
||||
radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
|
||||
radeon_ring_write(ring, 1);
|
||||
}
|
||||
|
||||
/**
|
||||
* evergreen_dma_ring_ib_execute - schedule an IB on the DMA engine
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @ib: IB object to schedule
|
||||
*
|
||||
* Schedule an IB in the DMA ring (evergreen).
|
||||
*/
|
||||
void evergreen_dma_ring_ib_execute(struct radeon_device *rdev,
|
||||
struct radeon_ib *ib)
|
||||
{
|
||||
struct radeon_ring *ring = &rdev->ring[ib->ring];
|
||||
|
||||
if (rdev->wb.enabled) {
|
||||
u32 next_rptr = ring->wptr + 4;
|
||||
while ((next_rptr & 7) != 5)
|
||||
next_rptr++;
|
||||
next_rptr += 3;
|
||||
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 1));
|
||||
radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
|
||||
radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
|
||||
radeon_ring_write(ring, next_rptr);
|
||||
}
|
||||
|
||||
/* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
|
||||
* Pad as necessary with NOPs.
|
||||
*/
|
||||
while ((ring->wptr & 7) != 5)
|
||||
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0));
|
||||
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0));
|
||||
radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
|
||||
radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* evergreen_copy_dma - copy pages using the DMA engine
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @src_offset: src GPU address
|
||||
* @dst_offset: dst GPU address
|
||||
* @num_gpu_pages: number of GPU pages to xfer
|
||||
* @fence: radeon fence object
|
||||
*
|
||||
* Copy GPU paging using the DMA engine (evergreen-cayman).
|
||||
* Used by the radeon ttm implementation to move pages if
|
||||
* registered as the asic copy callback.
|
||||
*/
|
||||
int evergreen_copy_dma(struct radeon_device *rdev,
|
||||
uint64_t src_offset, uint64_t dst_offset,
|
||||
unsigned num_gpu_pages,
|
||||
struct radeon_fence **fence)
|
||||
{
|
||||
struct radeon_semaphore *sem = NULL;
|
||||
int ring_index = rdev->asic->copy.dma_ring_index;
|
||||
struct radeon_ring *ring = &rdev->ring[ring_index];
|
||||
u32 size_in_dw, cur_size_in_dw;
|
||||
int i, num_loops;
|
||||
int r = 0;
|
||||
|
||||
r = radeon_semaphore_create(rdev, &sem);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: moving bo (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
|
||||
num_loops = DIV_ROUND_UP(size_in_dw, 0xfffff);
|
||||
r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: moving bo (%d).\n", r);
|
||||
radeon_semaphore_free(rdev, &sem, NULL);
|
||||
return r;
|
||||
}
|
||||
|
||||
if (radeon_fence_need_sync(*fence, ring->idx)) {
|
||||
radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
|
||||
ring->idx);
|
||||
radeon_fence_note_sync(*fence, ring->idx);
|
||||
} else {
|
||||
radeon_semaphore_free(rdev, &sem, NULL);
|
||||
}
|
||||
|
||||
for (i = 0; i < num_loops; i++) {
|
||||
cur_size_in_dw = size_in_dw;
|
||||
if (cur_size_in_dw > 0xFFFFF)
|
||||
cur_size_in_dw = 0xFFFFF;
|
||||
size_in_dw -= cur_size_in_dw;
|
||||
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, cur_size_in_dw));
|
||||
radeon_ring_write(ring, dst_offset & 0xfffffffc);
|
||||
radeon_ring_write(ring, src_offset & 0xfffffffc);
|
||||
radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
|
||||
radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
|
||||
src_offset += cur_size_in_dw * 4;
|
||||
dst_offset += cur_size_in_dw * 4;
|
||||
}
|
||||
|
||||
r = radeon_fence_emit(rdev, fence, ring->idx);
|
||||
if (r) {
|
||||
radeon_ring_unlock_undo(rdev, ring);
|
||||
return r;
|
||||
}
|
||||
|
||||
radeon_ring_unlock_commit(rdev, ring);
|
||||
radeon_semaphore_free(rdev, &sem, *fence);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
* evergreen_dma_is_lockup - Check if the DMA engine is locked up
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @ring: radeon_ring structure holding ring information
|
||||
*
|
||||
* Check if the async DMA engine is locked up.
|
||||
* Returns true if the engine appears to be locked up, false if not.
|
||||
*/
|
||||
bool evergreen_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
|
||||
{
|
||||
u32 reset_mask = evergreen_gpu_check_soft_reset(rdev);
|
||||
|
||||
if (!(reset_mask & RADEON_RESET_DMA)) {
|
||||
radeon_ring_lockup_update(ring);
|
||||
return false;
|
||||
}
|
||||
/* force ring activities */
|
||||
radeon_ring_force_activity(rdev, ring);
|
||||
return radeon_ring_test_lockup(rdev, ring);
|
||||
}
|
||||
|
||||
|
|
@ -174,6 +174,11 @@ extern void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
|
|||
extern void evergreen_program_aspm(struct radeon_device *rdev);
|
||||
extern void sumo_rlc_fini(struct radeon_device *rdev);
|
||||
extern int sumo_rlc_init(struct radeon_device *rdev);
|
||||
extern void cayman_dma_vm_set_page(struct radeon_device *rdev,
|
||||
struct radeon_ib *ib,
|
||||
uint64_t pe,
|
||||
uint64_t addr, unsigned count,
|
||||
uint32_t incr, uint32_t flags);
|
||||
|
||||
/* Firmware Names */
|
||||
MODULE_FIRMWARE("radeon/BARTS_pfp.bin");
|
||||
|
@ -1595,186 +1600,7 @@ static int cayman_cp_resume(struct radeon_device *rdev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* DMA
|
||||
* Starting with R600, the GPU has an asynchronous
|
||||
* DMA engine. The programming model is very similar
|
||||
* to the 3D engine (ring buffer, IBs, etc.), but the
|
||||
* DMA controller has it's own packet format that is
|
||||
* different form the PM4 format used by the 3D engine.
|
||||
* It supports copying data, writing embedded data,
|
||||
* solid fills, and a number of other things. It also
|
||||
* has support for tiling/detiling of buffers.
|
||||
* Cayman and newer support two asynchronous DMA engines.
|
||||
*/
|
||||
/**
|
||||
* cayman_dma_ring_ib_execute - Schedule an IB on the DMA engine
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @ib: IB object to schedule
|
||||
*
|
||||
* Schedule an IB in the DMA ring (cayman-SI).
|
||||
*/
|
||||
void cayman_dma_ring_ib_execute(struct radeon_device *rdev,
|
||||
struct radeon_ib *ib)
|
||||
{
|
||||
struct radeon_ring *ring = &rdev->ring[ib->ring];
|
||||
|
||||
if (rdev->wb.enabled) {
|
||||
u32 next_rptr = ring->wptr + 4;
|
||||
while ((next_rptr & 7) != 5)
|
||||
next_rptr++;
|
||||
next_rptr += 3;
|
||||
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
|
||||
radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
|
||||
radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
|
||||
radeon_ring_write(ring, next_rptr);
|
||||
}
|
||||
|
||||
/* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
|
||||
* Pad as necessary with NOPs.
|
||||
*/
|
||||
while ((ring->wptr & 7) != 5)
|
||||
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
|
||||
radeon_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, ib->vm ? ib->vm->id : 0, 0));
|
||||
radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
|
||||
radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* cayman_dma_stop - stop the async dma engines
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
*
|
||||
* Stop the async dma engines (cayman-SI).
|
||||
*/
|
||||
void cayman_dma_stop(struct radeon_device *rdev)
|
||||
{
|
||||
u32 rb_cntl;
|
||||
|
||||
radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
|
||||
|
||||
/* dma0 */
|
||||
rb_cntl = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
|
||||
rb_cntl &= ~DMA_RB_ENABLE;
|
||||
WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, rb_cntl);
|
||||
|
||||
/* dma1 */
|
||||
rb_cntl = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
|
||||
rb_cntl &= ~DMA_RB_ENABLE;
|
||||
WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, rb_cntl);
|
||||
|
||||
rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
|
||||
rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false;
|
||||
}
|
||||
|
||||
/**
|
||||
* cayman_dma_resume - setup and start the async dma engines
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
*
|
||||
* Set up the DMA ring buffers and enable them. (cayman-SI).
|
||||
* Returns 0 for success, error for failure.
|
||||
*/
|
||||
int cayman_dma_resume(struct radeon_device *rdev)
|
||||
{
|
||||
struct radeon_ring *ring;
|
||||
u32 rb_cntl, dma_cntl, ib_cntl;
|
||||
u32 rb_bufsz;
|
||||
u32 reg_offset, wb_offset;
|
||||
int i, r;
|
||||
|
||||
/* Reset dma */
|
||||
WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1);
|
||||
RREG32(SRBM_SOFT_RESET);
|
||||
udelay(50);
|
||||
WREG32(SRBM_SOFT_RESET, 0);
|
||||
|
||||
for (i = 0; i < 2; i++) {
|
||||
if (i == 0) {
|
||||
ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
|
||||
reg_offset = DMA0_REGISTER_OFFSET;
|
||||
wb_offset = R600_WB_DMA_RPTR_OFFSET;
|
||||
} else {
|
||||
ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
|
||||
reg_offset = DMA1_REGISTER_OFFSET;
|
||||
wb_offset = CAYMAN_WB_DMA1_RPTR_OFFSET;
|
||||
}
|
||||
|
||||
WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL + reg_offset, 0);
|
||||
WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL + reg_offset, 0);
|
||||
|
||||
/* Set ring buffer size in dwords */
|
||||
rb_bufsz = drm_order(ring->ring_size / 4);
|
||||
rb_cntl = rb_bufsz << 1;
|
||||
#ifdef __BIG_ENDIAN
|
||||
rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
|
||||
#endif
|
||||
WREG32(DMA_RB_CNTL + reg_offset, rb_cntl);
|
||||
|
||||
/* Initialize the ring buffer's read and write pointers */
|
||||
WREG32(DMA_RB_RPTR + reg_offset, 0);
|
||||
WREG32(DMA_RB_WPTR + reg_offset, 0);
|
||||
|
||||
/* set the wb address whether it's enabled or not */
|
||||
WREG32(DMA_RB_RPTR_ADDR_HI + reg_offset,
|
||||
upper_32_bits(rdev->wb.gpu_addr + wb_offset) & 0xFF);
|
||||
WREG32(DMA_RB_RPTR_ADDR_LO + reg_offset,
|
||||
((rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC));
|
||||
|
||||
if (rdev->wb.enabled)
|
||||
rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
|
||||
|
||||
WREG32(DMA_RB_BASE + reg_offset, ring->gpu_addr >> 8);
|
||||
|
||||
/* enable DMA IBs */
|
||||
ib_cntl = DMA_IB_ENABLE | CMD_VMID_FORCE;
|
||||
#ifdef __BIG_ENDIAN
|
||||
ib_cntl |= DMA_IB_SWAP_ENABLE;
|
||||
#endif
|
||||
WREG32(DMA_IB_CNTL + reg_offset, ib_cntl);
|
||||
|
||||
dma_cntl = RREG32(DMA_CNTL + reg_offset);
|
||||
dma_cntl &= ~CTXEMPTY_INT_ENABLE;
|
||||
WREG32(DMA_CNTL + reg_offset, dma_cntl);
|
||||
|
||||
ring->wptr = 0;
|
||||
WREG32(DMA_RB_WPTR + reg_offset, ring->wptr << 2);
|
||||
|
||||
ring->rptr = RREG32(DMA_RB_RPTR + reg_offset) >> 2;
|
||||
|
||||
WREG32(DMA_RB_CNTL + reg_offset, rb_cntl | DMA_RB_ENABLE);
|
||||
|
||||
ring->ready = true;
|
||||
|
||||
r = radeon_ring_test(rdev, ring->idx, ring);
|
||||
if (r) {
|
||||
ring->ready = false;
|
||||
return r;
|
||||
}
|
||||
}
|
||||
|
||||
radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* cayman_dma_fini - tear down the async dma engines
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
*
|
||||
* Stop the async dma engines and free the rings (cayman-SI).
|
||||
*/
|
||||
void cayman_dma_fini(struct radeon_device *rdev)
|
||||
{
|
||||
cayman_dma_stop(rdev);
|
||||
radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
|
||||
radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]);
|
||||
}
|
||||
|
||||
static u32 cayman_gpu_check_soft_reset(struct radeon_device *rdev)
|
||||
u32 cayman_gpu_check_soft_reset(struct radeon_device *rdev)
|
||||
{
|
||||
u32 reset_mask = 0;
|
||||
u32 tmp;
|
||||
|
@ -2027,34 +1853,6 @@ bool cayman_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
|
|||
return radeon_ring_test_lockup(rdev, ring);
|
||||
}
|
||||
|
||||
/**
|
||||
* cayman_dma_is_lockup - Check if the DMA engine is locked up
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @ring: radeon_ring structure holding ring information
|
||||
*
|
||||
* Check if the async DMA engine is locked up.
|
||||
* Returns true if the engine appears to be locked up, false if not.
|
||||
*/
|
||||
bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
|
||||
{
|
||||
u32 reset_mask = cayman_gpu_check_soft_reset(rdev);
|
||||
u32 mask;
|
||||
|
||||
if (ring->idx == R600_RING_TYPE_DMA_INDEX)
|
||||
mask = RADEON_RESET_DMA;
|
||||
else
|
||||
mask = RADEON_RESET_DMA1;
|
||||
|
||||
if (!(reset_mask & mask)) {
|
||||
radeon_ring_lockup_update(ring);
|
||||
return false;
|
||||
}
|
||||
/* force ring activities */
|
||||
radeon_ring_force_activity(rdev, ring);
|
||||
return radeon_ring_test_lockup(rdev, ring);
|
||||
}
|
||||
|
||||
static int cayman_startup(struct radeon_device *rdev)
|
||||
{
|
||||
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
|
||||
|
@ -2658,61 +2456,7 @@ void cayman_vm_set_page(struct radeon_device *rdev,
|
|||
}
|
||||
}
|
||||
} else {
|
||||
if ((flags & RADEON_VM_PAGE_SYSTEM) ||
|
||||
(count == 1)) {
|
||||
while (count) {
|
||||
ndw = count * 2;
|
||||
if (ndw > 0xFFFFE)
|
||||
ndw = 0xFFFFE;
|
||||
|
||||
/* for non-physically contiguous pages (system) */
|
||||
ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, ndw);
|
||||
ib->ptr[ib->length_dw++] = pe;
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
|
||||
for (; ndw > 0; ndw -= 2, --count, pe += 8) {
|
||||
if (flags & RADEON_VM_PAGE_SYSTEM) {
|
||||
value = radeon_vm_map_gart(rdev, addr);
|
||||
value &= 0xFFFFFFFFFFFFF000ULL;
|
||||
} else if (flags & RADEON_VM_PAGE_VALID) {
|
||||
value = addr;
|
||||
} else {
|
||||
value = 0;
|
||||
}
|
||||
addr += incr;
|
||||
value |= r600_flags;
|
||||
ib->ptr[ib->length_dw++] = value;
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(value);
|
||||
}
|
||||
}
|
||||
while (ib->length_dw & 0x7)
|
||||
ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0);
|
||||
} else {
|
||||
while (count) {
|
||||
ndw = count * 2;
|
||||
if (ndw > 0xFFFFE)
|
||||
ndw = 0xFFFFE;
|
||||
|
||||
if (flags & RADEON_VM_PAGE_VALID)
|
||||
value = addr;
|
||||
else
|
||||
value = 0;
|
||||
/* for physically contiguous pages (vram) */
|
||||
ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
|
||||
ib->ptr[ib->length_dw++] = pe; /* dst addr */
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
|
||||
ib->ptr[ib->length_dw++] = r600_flags; /* mask */
|
||||
ib->ptr[ib->length_dw++] = 0;
|
||||
ib->ptr[ib->length_dw++] = value; /* value */
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(value);
|
||||
ib->ptr[ib->length_dw++] = incr; /* increment size */
|
||||
ib->ptr[ib->length_dw++] = 0;
|
||||
pe += ndw * 4;
|
||||
addr += (ndw / 2) * incr;
|
||||
count -= ndw / 2;
|
||||
}
|
||||
}
|
||||
while (ib->length_dw & 0x7)
|
||||
ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0);
|
||||
cayman_dma_vm_set_page(rdev, ib, pe, addr, count, incr, flags);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2746,26 +2490,3 @@ void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
|
|||
radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
|
||||
radeon_ring_write(ring, 0x0);
|
||||
}
|
||||
|
||||
void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
|
||||
{
|
||||
struct radeon_ring *ring = &rdev->ring[ridx];
|
||||
|
||||
if (vm == NULL)
|
||||
return;
|
||||
|
||||
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
|
||||
radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2));
|
||||
radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
|
||||
|
||||
/* flush hdp cache */
|
||||
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
|
||||
radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
|
||||
radeon_ring_write(ring, 1);
|
||||
|
||||
/* bits 0-7 are the VM contexts0-7 */
|
||||
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
|
||||
radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
|
||||
radeon_ring_write(ring, 1 << vm->id);
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,338 @@
|
|||
/*
|
||||
* Copyright 2010 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Alex Deucher
|
||||
*/
|
||||
#include <drm/drmP.h>
|
||||
#include "radeon.h"
|
||||
#include "radeon_asic.h"
|
||||
#include "nid.h"
|
||||
|
||||
u32 cayman_gpu_check_soft_reset(struct radeon_device *rdev);
|
||||
|
||||
/*
|
||||
* DMA
|
||||
* Starting with R600, the GPU has an asynchronous
|
||||
* DMA engine. The programming model is very similar
|
||||
* to the 3D engine (ring buffer, IBs, etc.), but the
|
||||
* DMA controller has it's own packet format that is
|
||||
* different form the PM4 format used by the 3D engine.
|
||||
* It supports copying data, writing embedded data,
|
||||
* solid fills, and a number of other things. It also
|
||||
* has support for tiling/detiling of buffers.
|
||||
* Cayman and newer support two asynchronous DMA engines.
|
||||
*/
|
||||
|
||||
/**
|
||||
* cayman_dma_ring_ib_execute - Schedule an IB on the DMA engine
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @ib: IB object to schedule
|
||||
*
|
||||
* Schedule an IB in the DMA ring (cayman-SI).
|
||||
*/
|
||||
void cayman_dma_ring_ib_execute(struct radeon_device *rdev,
|
||||
struct radeon_ib *ib)
|
||||
{
|
||||
struct radeon_ring *ring = &rdev->ring[ib->ring];
|
||||
|
||||
if (rdev->wb.enabled) {
|
||||
u32 next_rptr = ring->wptr + 4;
|
||||
while ((next_rptr & 7) != 5)
|
||||
next_rptr++;
|
||||
next_rptr += 3;
|
||||
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
|
||||
radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
|
||||
radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
|
||||
radeon_ring_write(ring, next_rptr);
|
||||
}
|
||||
|
||||
/* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
|
||||
* Pad as necessary with NOPs.
|
||||
*/
|
||||
while ((ring->wptr & 7) != 5)
|
||||
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
|
||||
radeon_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, ib->vm ? ib->vm->id : 0, 0));
|
||||
radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
|
||||
radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* cayman_dma_stop - stop the async dma engines
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
*
|
||||
* Stop the async dma engines (cayman-SI).
|
||||
*/
|
||||
void cayman_dma_stop(struct radeon_device *rdev)
|
||||
{
|
||||
u32 rb_cntl;
|
||||
|
||||
radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
|
||||
|
||||
/* dma0 */
|
||||
rb_cntl = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
|
||||
rb_cntl &= ~DMA_RB_ENABLE;
|
||||
WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, rb_cntl);
|
||||
|
||||
/* dma1 */
|
||||
rb_cntl = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
|
||||
rb_cntl &= ~DMA_RB_ENABLE;
|
||||
WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, rb_cntl);
|
||||
|
||||
rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
|
||||
rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false;
|
||||
}
|
||||
|
||||
/**
|
||||
* cayman_dma_resume - setup and start the async dma engines
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
*
|
||||
* Set up the DMA ring buffers and enable them. (cayman-SI).
|
||||
* Returns 0 for success, error for failure.
|
||||
*/
|
||||
int cayman_dma_resume(struct radeon_device *rdev)
|
||||
{
|
||||
struct radeon_ring *ring;
|
||||
u32 rb_cntl, dma_cntl, ib_cntl;
|
||||
u32 rb_bufsz;
|
||||
u32 reg_offset, wb_offset;
|
||||
int i, r;
|
||||
|
||||
/* Reset dma */
|
||||
WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1);
|
||||
RREG32(SRBM_SOFT_RESET);
|
||||
udelay(50);
|
||||
WREG32(SRBM_SOFT_RESET, 0);
|
||||
|
||||
for (i = 0; i < 2; i++) {
|
||||
if (i == 0) {
|
||||
ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
|
||||
reg_offset = DMA0_REGISTER_OFFSET;
|
||||
wb_offset = R600_WB_DMA_RPTR_OFFSET;
|
||||
} else {
|
||||
ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
|
||||
reg_offset = DMA1_REGISTER_OFFSET;
|
||||
wb_offset = CAYMAN_WB_DMA1_RPTR_OFFSET;
|
||||
}
|
||||
|
||||
WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL + reg_offset, 0);
|
||||
WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL + reg_offset, 0);
|
||||
|
||||
/* Set ring buffer size in dwords */
|
||||
rb_bufsz = drm_order(ring->ring_size / 4);
|
||||
rb_cntl = rb_bufsz << 1;
|
||||
#ifdef __BIG_ENDIAN
|
||||
rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
|
||||
#endif
|
||||
WREG32(DMA_RB_CNTL + reg_offset, rb_cntl);
|
||||
|
||||
/* Initialize the ring buffer's read and write pointers */
|
||||
WREG32(DMA_RB_RPTR + reg_offset, 0);
|
||||
WREG32(DMA_RB_WPTR + reg_offset, 0);
|
||||
|
||||
/* set the wb address whether it's enabled or not */
|
||||
WREG32(DMA_RB_RPTR_ADDR_HI + reg_offset,
|
||||
upper_32_bits(rdev->wb.gpu_addr + wb_offset) & 0xFF);
|
||||
WREG32(DMA_RB_RPTR_ADDR_LO + reg_offset,
|
||||
((rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC));
|
||||
|
||||
if (rdev->wb.enabled)
|
||||
rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
|
||||
|
||||
WREG32(DMA_RB_BASE + reg_offset, ring->gpu_addr >> 8);
|
||||
|
||||
/* enable DMA IBs */
|
||||
ib_cntl = DMA_IB_ENABLE | CMD_VMID_FORCE;
|
||||
#ifdef __BIG_ENDIAN
|
||||
ib_cntl |= DMA_IB_SWAP_ENABLE;
|
||||
#endif
|
||||
WREG32(DMA_IB_CNTL + reg_offset, ib_cntl);
|
||||
|
||||
dma_cntl = RREG32(DMA_CNTL + reg_offset);
|
||||
dma_cntl &= ~CTXEMPTY_INT_ENABLE;
|
||||
WREG32(DMA_CNTL + reg_offset, dma_cntl);
|
||||
|
||||
ring->wptr = 0;
|
||||
WREG32(DMA_RB_WPTR + reg_offset, ring->wptr << 2);
|
||||
|
||||
ring->rptr = RREG32(DMA_RB_RPTR + reg_offset) >> 2;
|
||||
|
||||
WREG32(DMA_RB_CNTL + reg_offset, rb_cntl | DMA_RB_ENABLE);
|
||||
|
||||
ring->ready = true;
|
||||
|
||||
r = radeon_ring_test(rdev, ring->idx, ring);
|
||||
if (r) {
|
||||
ring->ready = false;
|
||||
return r;
|
||||
}
|
||||
}
|
||||
|
||||
radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* cayman_dma_fini - tear down the async dma engines
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
*
|
||||
* Stop the async dma engines and free the rings (cayman-SI).
|
||||
*/
|
||||
void cayman_dma_fini(struct radeon_device *rdev)
|
||||
{
|
||||
cayman_dma_stop(rdev);
|
||||
radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
|
||||
radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]);
|
||||
}
|
||||
|
||||
/**
|
||||
* cayman_dma_is_lockup - Check if the DMA engine is locked up
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @ring: radeon_ring structure holding ring information
|
||||
*
|
||||
* Check if the async DMA engine is locked up.
|
||||
* Returns true if the engine appears to be locked up, false if not.
|
||||
*/
|
||||
bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
|
||||
{
|
||||
u32 reset_mask = cayman_gpu_check_soft_reset(rdev);
|
||||
u32 mask;
|
||||
|
||||
if (ring->idx == R600_RING_TYPE_DMA_INDEX)
|
||||
mask = RADEON_RESET_DMA;
|
||||
else
|
||||
mask = RADEON_RESET_DMA1;
|
||||
|
||||
if (!(reset_mask & mask)) {
|
||||
radeon_ring_lockup_update(ring);
|
||||
return false;
|
||||
}
|
||||
/* force ring activities */
|
||||
radeon_ring_force_activity(rdev, ring);
|
||||
return radeon_ring_test_lockup(rdev, ring);
|
||||
}
|
||||
|
||||
/**
|
||||
* cayman_dma_vm_set_page - update the page tables using the DMA
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @ib: indirect buffer to fill with commands
|
||||
* @pe: addr of the page entry
|
||||
* @addr: dst addr to write into pe
|
||||
* @count: number of page entries to update
|
||||
* @incr: increase next addr by incr bytes
|
||||
* @flags: access flags
|
||||
* @r600_flags: hw access flags
|
||||
*
|
||||
* Update the page tables using the DMA (cayman/TN).
|
||||
*/
|
||||
void cayman_dma_vm_set_page(struct radeon_device *rdev,
|
||||
struct radeon_ib *ib,
|
||||
uint64_t pe,
|
||||
uint64_t addr, unsigned count,
|
||||
uint32_t incr, uint32_t flags)
|
||||
{
|
||||
uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
|
||||
uint64_t value;
|
||||
unsigned ndw;
|
||||
|
||||
if ((flags & RADEON_VM_PAGE_SYSTEM) || (count == 1)) {
|
||||
while (count) {
|
||||
ndw = count * 2;
|
||||
if (ndw > 0xFFFFE)
|
||||
ndw = 0xFFFFE;
|
||||
|
||||
/* for non-physically contiguous pages (system) */
|
||||
ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, ndw);
|
||||
ib->ptr[ib->length_dw++] = pe;
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
|
||||
for (; ndw > 0; ndw -= 2, --count, pe += 8) {
|
||||
if (flags & RADEON_VM_PAGE_SYSTEM) {
|
||||
value = radeon_vm_map_gart(rdev, addr);
|
||||
value &= 0xFFFFFFFFFFFFF000ULL;
|
||||
} else if (flags & RADEON_VM_PAGE_VALID) {
|
||||
value = addr;
|
||||
} else {
|
||||
value = 0;
|
||||
}
|
||||
addr += incr;
|
||||
value |= r600_flags;
|
||||
ib->ptr[ib->length_dw++] = value;
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(value);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
while (count) {
|
||||
ndw = count * 2;
|
||||
if (ndw > 0xFFFFE)
|
||||
ndw = 0xFFFFE;
|
||||
|
||||
if (flags & RADEON_VM_PAGE_VALID)
|
||||
value = addr;
|
||||
else
|
||||
value = 0;
|
||||
/* for physically contiguous pages (vram) */
|
||||
ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
|
||||
ib->ptr[ib->length_dw++] = pe; /* dst addr */
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
|
||||
ib->ptr[ib->length_dw++] = r600_flags; /* mask */
|
||||
ib->ptr[ib->length_dw++] = 0;
|
||||
ib->ptr[ib->length_dw++] = value; /* value */
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(value);
|
||||
ib->ptr[ib->length_dw++] = incr; /* increment size */
|
||||
ib->ptr[ib->length_dw++] = 0;
|
||||
pe += ndw * 4;
|
||||
addr += (ndw / 2) * incr;
|
||||
count -= ndw / 2;
|
||||
}
|
||||
}
|
||||
while (ib->length_dw & 0x7)
|
||||
ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0);
|
||||
}
|
||||
|
||||
void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
|
||||
{
|
||||
struct radeon_ring *ring = &rdev->ring[ridx];
|
||||
|
||||
if (vm == NULL)
|
||||
return;
|
||||
|
||||
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
|
||||
radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2));
|
||||
radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
|
||||
|
||||
/* flush hdp cache */
|
||||
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
|
||||
radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
|
||||
radeon_ring_write(ring, 1);
|
||||
|
||||
/* bits 0-7 are the VM contexts0-7 */
|
||||
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
|
||||
radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
|
||||
radeon_ring_write(ring, 1 << vm->id);
|
||||
}
|
||||
|
|
@ -1374,7 +1374,7 @@ static bool r600_is_display_hung(struct radeon_device *rdev)
|
|||
return true;
|
||||
}
|
||||
|
||||
static u32 r600_gpu_check_soft_reset(struct radeon_device *rdev)
|
||||
u32 r600_gpu_check_soft_reset(struct radeon_device *rdev)
|
||||
{
|
||||
u32 reset_mask = 0;
|
||||
u32 tmp;
|
||||
|
@ -1622,28 +1622,6 @@ bool r600_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
|
|||
return radeon_ring_test_lockup(rdev, ring);
|
||||
}
|
||||
|
||||
/**
|
||||
* r600_dma_is_lockup - Check if the DMA engine is locked up
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @ring: radeon_ring structure holding ring information
|
||||
*
|
||||
* Check if the async DMA engine is locked up.
|
||||
* Returns true if the engine appears to be locked up, false if not.
|
||||
*/
|
||||
bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
|
||||
{
|
||||
u32 reset_mask = r600_gpu_check_soft_reset(rdev);
|
||||
|
||||
if (!(reset_mask & RADEON_RESET_DMA)) {
|
||||
radeon_ring_lockup_update(ring);
|
||||
return false;
|
||||
}
|
||||
/* force ring activities */
|
||||
radeon_ring_force_activity(rdev, ring);
|
||||
return radeon_ring_test_lockup(rdev, ring);
|
||||
}
|
||||
|
||||
u32 r6xx_remap_render_backend(struct radeon_device *rdev,
|
||||
u32 tiling_pipe_num,
|
||||
u32 max_rb_num,
|
||||
|
@ -2493,176 +2471,6 @@ void r600_cp_fini(struct radeon_device *rdev)
|
|||
radeon_scratch_free(rdev, ring->rptr_save_reg);
|
||||
}
|
||||
|
||||
/*
|
||||
* DMA
|
||||
* Starting with R600, the GPU has an asynchronous
|
||||
* DMA engine. The programming model is very similar
|
||||
* to the 3D engine (ring buffer, IBs, etc.), but the
|
||||
* DMA controller has it's own packet format that is
|
||||
* different form the PM4 format used by the 3D engine.
|
||||
* It supports copying data, writing embedded data,
|
||||
* solid fills, and a number of other things. It also
|
||||
* has support for tiling/detiling of buffers.
|
||||
*/
|
||||
|
||||
/**
|
||||
* r600_dma_get_rptr - get the current read pointer
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @ring: radeon ring pointer
|
||||
*
|
||||
* Get the current rptr from the hardware (r6xx+).
|
||||
*/
|
||||
uint32_t r600_dma_get_rptr(struct radeon_device *rdev,
|
||||
struct radeon_ring *ring)
|
||||
{
|
||||
return (radeon_ring_generic_get_rptr(rdev, ring) & 0x3fffc) >> 2;
|
||||
}
|
||||
|
||||
/**
|
||||
* r600_dma_get_wptr - get the current write pointer
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @ring: radeon ring pointer
|
||||
*
|
||||
* Get the current wptr from the hardware (r6xx+).
|
||||
*/
|
||||
uint32_t r600_dma_get_wptr(struct radeon_device *rdev,
|
||||
struct radeon_ring *ring)
|
||||
{
|
||||
return (RREG32(ring->wptr_reg) & 0x3fffc) >> 2;
|
||||
}
|
||||
|
||||
/**
|
||||
* r600_dma_set_wptr - commit the write pointer
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @ring: radeon ring pointer
|
||||
*
|
||||
* Write the wptr back to the hardware (r6xx+).
|
||||
*/
|
||||
void r600_dma_set_wptr(struct radeon_device *rdev,
|
||||
struct radeon_ring *ring)
|
||||
{
|
||||
WREG32(ring->wptr_reg, (ring->wptr << 2) & 0x3fffc);
|
||||
}
|
||||
|
||||
/**
|
||||
* r600_dma_stop - stop the async dma engine
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
*
|
||||
* Stop the async dma engine (r6xx-evergreen).
|
||||
*/
|
||||
void r600_dma_stop(struct radeon_device *rdev)
|
||||
{
|
||||
u32 rb_cntl = RREG32(DMA_RB_CNTL);
|
||||
|
||||
radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
|
||||
|
||||
rb_cntl &= ~DMA_RB_ENABLE;
|
||||
WREG32(DMA_RB_CNTL, rb_cntl);
|
||||
|
||||
rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
|
||||
}
|
||||
|
||||
/**
|
||||
* r600_dma_resume - setup and start the async dma engine
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
*
|
||||
* Set up the DMA ring buffer and enable it. (r6xx-evergreen).
|
||||
* Returns 0 for success, error for failure.
|
||||
*/
|
||||
int r600_dma_resume(struct radeon_device *rdev)
|
||||
{
|
||||
struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
|
||||
u32 rb_cntl, dma_cntl, ib_cntl;
|
||||
u32 rb_bufsz;
|
||||
int r;
|
||||
|
||||
/* Reset dma */
|
||||
if (rdev->family >= CHIP_RV770)
|
||||
WREG32(SRBM_SOFT_RESET, RV770_SOFT_RESET_DMA);
|
||||
else
|
||||
WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA);
|
||||
RREG32(SRBM_SOFT_RESET);
|
||||
udelay(50);
|
||||
WREG32(SRBM_SOFT_RESET, 0);
|
||||
|
||||
WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL, 0);
|
||||
WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL, 0);
|
||||
|
||||
/* Set ring buffer size in dwords */
|
||||
rb_bufsz = drm_order(ring->ring_size / 4);
|
||||
rb_cntl = rb_bufsz << 1;
|
||||
#ifdef __BIG_ENDIAN
|
||||
rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
|
||||
#endif
|
||||
WREG32(DMA_RB_CNTL, rb_cntl);
|
||||
|
||||
/* Initialize the ring buffer's read and write pointers */
|
||||
WREG32(DMA_RB_RPTR, 0);
|
||||
WREG32(DMA_RB_WPTR, 0);
|
||||
|
||||
/* set the wb address whether it's enabled or not */
|
||||
WREG32(DMA_RB_RPTR_ADDR_HI,
|
||||
upper_32_bits(rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFF);
|
||||
WREG32(DMA_RB_RPTR_ADDR_LO,
|
||||
((rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFFFFFFFC));
|
||||
|
||||
if (rdev->wb.enabled)
|
||||
rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
|
||||
|
||||
WREG32(DMA_RB_BASE, ring->gpu_addr >> 8);
|
||||
|
||||
/* enable DMA IBs */
|
||||
ib_cntl = DMA_IB_ENABLE;
|
||||
#ifdef __BIG_ENDIAN
|
||||
ib_cntl |= DMA_IB_SWAP_ENABLE;
|
||||
#endif
|
||||
WREG32(DMA_IB_CNTL, ib_cntl);
|
||||
|
||||
dma_cntl = RREG32(DMA_CNTL);
|
||||
dma_cntl &= ~CTXEMPTY_INT_ENABLE;
|
||||
WREG32(DMA_CNTL, dma_cntl);
|
||||
|
||||
if (rdev->family >= CHIP_RV770)
|
||||
WREG32(DMA_MODE, 1);
|
||||
|
||||
ring->wptr = 0;
|
||||
WREG32(DMA_RB_WPTR, ring->wptr << 2);
|
||||
|
||||
ring->rptr = RREG32(DMA_RB_RPTR) >> 2;
|
||||
|
||||
WREG32(DMA_RB_CNTL, rb_cntl | DMA_RB_ENABLE);
|
||||
|
||||
ring->ready = true;
|
||||
|
||||
r = radeon_ring_test(rdev, R600_RING_TYPE_DMA_INDEX, ring);
|
||||
if (r) {
|
||||
ring->ready = false;
|
||||
return r;
|
||||
}
|
||||
|
||||
radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* r600_dma_fini - tear down the async dma engine
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
*
|
||||
* Stop the async dma engine and free the ring (r6xx-evergreen).
|
||||
*/
|
||||
void r600_dma_fini(struct radeon_device *rdev)
|
||||
{
|
||||
r600_dma_stop(rdev);
|
||||
radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
|
||||
}
|
||||
|
||||
/*
|
||||
* GPU scratch registers helpers function.
|
||||
*/
|
||||
|
@ -2718,60 +2526,6 @@ int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
|
|||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
* r600_dma_ring_test - simple async dma engine test
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @ring: radeon_ring structure holding ring information
|
||||
*
|
||||
* Test the DMA engine by writing using it to write an
|
||||
* value to memory. (r6xx-SI).
|
||||
* Returns 0 for success, error for failure.
|
||||
*/
|
||||
int r600_dma_ring_test(struct radeon_device *rdev,
|
||||
struct radeon_ring *ring)
|
||||
{
|
||||
unsigned i;
|
||||
int r;
|
||||
void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
|
||||
u32 tmp;
|
||||
|
||||
if (!ptr) {
|
||||
DRM_ERROR("invalid vram scratch pointer\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
tmp = 0xCAFEDEAD;
|
||||
writel(tmp, ptr);
|
||||
|
||||
r = radeon_ring_lock(rdev, ring, 4);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r);
|
||||
return r;
|
||||
}
|
||||
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
|
||||
radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc);
|
||||
radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff);
|
||||
radeon_ring_write(ring, 0xDEADBEEF);
|
||||
radeon_ring_unlock_commit(rdev, ring);
|
||||
|
||||
for (i = 0; i < rdev->usec_timeout; i++) {
|
||||
tmp = readl(ptr);
|
||||
if (tmp == 0xDEADBEEF)
|
||||
break;
|
||||
DRM_UDELAY(1);
|
||||
}
|
||||
|
||||
if (i < rdev->usec_timeout) {
|
||||
DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
|
||||
} else {
|
||||
DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
|
||||
ring->idx, tmp);
|
||||
r = -EINVAL;
|
||||
}
|
||||
return r;
|
||||
}
|
||||
|
||||
/*
|
||||
* CP fences/semaphores
|
||||
*/
|
||||
|
@ -2839,59 +2593,6 @@ void r600_semaphore_ring_emit(struct radeon_device *rdev,
|
|||
radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel);
|
||||
}
|
||||
|
||||
/*
|
||||
* DMA fences/semaphores
|
||||
*/
|
||||
|
||||
/**
|
||||
* r600_dma_fence_ring_emit - emit a fence on the DMA ring
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @fence: radeon fence object
|
||||
*
|
||||
* Add a DMA fence packet to the ring to write
|
||||
* the fence seq number and DMA trap packet to generate
|
||||
* an interrupt if needed (r6xx-r7xx).
|
||||
*/
|
||||
void r600_dma_fence_ring_emit(struct radeon_device *rdev,
|
||||
struct radeon_fence *fence)
|
||||
{
|
||||
struct radeon_ring *ring = &rdev->ring[fence->ring];
|
||||
u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
|
||||
|
||||
/* write the fence */
|
||||
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0));
|
||||
radeon_ring_write(ring, addr & 0xfffffffc);
|
||||
radeon_ring_write(ring, (upper_32_bits(addr) & 0xff));
|
||||
radeon_ring_write(ring, lower_32_bits(fence->seq));
|
||||
/* generate an interrupt */
|
||||
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0));
|
||||
}
|
||||
|
||||
/**
|
||||
* r600_dma_semaphore_ring_emit - emit a semaphore on the dma ring
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @ring: radeon_ring structure holding ring information
|
||||
* @semaphore: radeon semaphore object
|
||||
* @emit_wait: wait or signal semaphore
|
||||
*
|
||||
* Add a DMA semaphore packet to the ring wait on or signal
|
||||
* other rings (r6xx-SI).
|
||||
*/
|
||||
void r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
|
||||
struct radeon_ring *ring,
|
||||
struct radeon_semaphore *semaphore,
|
||||
bool emit_wait)
|
||||
{
|
||||
u64 addr = semaphore->gpu_addr;
|
||||
u32 s = emit_wait ? 0 : 1;
|
||||
|
||||
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SEMAPHORE, 0, s, 0));
|
||||
radeon_ring_write(ring, addr & 0xfffffffc);
|
||||
radeon_ring_write(ring, upper_32_bits(addr) & 0xff);
|
||||
}
|
||||
|
||||
/**
|
||||
* r600_copy_cpdma - copy pages using the CP DMA engine
|
||||
*
|
||||
|
@ -2976,80 +2677,6 @@ int r600_copy_cpdma(struct radeon_device *rdev,
|
|||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
* r600_copy_dma - copy pages using the DMA engine
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @src_offset: src GPU address
|
||||
* @dst_offset: dst GPU address
|
||||
* @num_gpu_pages: number of GPU pages to xfer
|
||||
* @fence: radeon fence object
|
||||
*
|
||||
* Copy GPU paging using the DMA engine (r6xx).
|
||||
* Used by the radeon ttm implementation to move pages if
|
||||
* registered as the asic copy callback.
|
||||
*/
|
||||
int r600_copy_dma(struct radeon_device *rdev,
|
||||
uint64_t src_offset, uint64_t dst_offset,
|
||||
unsigned num_gpu_pages,
|
||||
struct radeon_fence **fence)
|
||||
{
|
||||
struct radeon_semaphore *sem = NULL;
|
||||
int ring_index = rdev->asic->copy.dma_ring_index;
|
||||
struct radeon_ring *ring = &rdev->ring[ring_index];
|
||||
u32 size_in_dw, cur_size_in_dw;
|
||||
int i, num_loops;
|
||||
int r = 0;
|
||||
|
||||
r = radeon_semaphore_create(rdev, &sem);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: moving bo (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
|
||||
num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFE);
|
||||
r = radeon_ring_lock(rdev, ring, num_loops * 4 + 8);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: moving bo (%d).\n", r);
|
||||
radeon_semaphore_free(rdev, &sem, NULL);
|
||||
return r;
|
||||
}
|
||||
|
||||
if (radeon_fence_need_sync(*fence, ring->idx)) {
|
||||
radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
|
||||
ring->idx);
|
||||
radeon_fence_note_sync(*fence, ring->idx);
|
||||
} else {
|
||||
radeon_semaphore_free(rdev, &sem, NULL);
|
||||
}
|
||||
|
||||
for (i = 0; i < num_loops; i++) {
|
||||
cur_size_in_dw = size_in_dw;
|
||||
if (cur_size_in_dw > 0xFFFE)
|
||||
cur_size_in_dw = 0xFFFE;
|
||||
size_in_dw -= cur_size_in_dw;
|
||||
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
|
||||
radeon_ring_write(ring, dst_offset & 0xfffffffc);
|
||||
radeon_ring_write(ring, src_offset & 0xfffffffc);
|
||||
radeon_ring_write(ring, (((upper_32_bits(dst_offset) & 0xff) << 16) |
|
||||
(upper_32_bits(src_offset) & 0xff)));
|
||||
src_offset += cur_size_in_dw * 4;
|
||||
dst_offset += cur_size_in_dw * 4;
|
||||
}
|
||||
|
||||
r = radeon_fence_emit(rdev, fence, ring->idx);
|
||||
if (r) {
|
||||
radeon_ring_unlock_undo(rdev, ring);
|
||||
return r;
|
||||
}
|
||||
|
||||
radeon_ring_unlock_commit(rdev, ring);
|
||||
radeon_semaphore_free(rdev, &sem, *fence);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
int r600_set_surface_reg(struct radeon_device *rdev, int reg,
|
||||
uint32_t tiling_flags, uint32_t pitch,
|
||||
uint32_t offset, uint32_t obj_size)
|
||||
|
@ -3409,104 +3036,6 @@ free_scratch:
|
|||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
* r600_dma_ib_test - test an IB on the DMA engine
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @ring: radeon_ring structure holding ring information
|
||||
*
|
||||
* Test a simple IB in the DMA ring (r6xx-SI).
|
||||
* Returns 0 on success, error on failure.
|
||||
*/
|
||||
int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
|
||||
{
|
||||
struct radeon_ib ib;
|
||||
unsigned i;
|
||||
int r;
|
||||
void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
|
||||
u32 tmp = 0;
|
||||
|
||||
if (!ptr) {
|
||||
DRM_ERROR("invalid vram scratch pointer\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
tmp = 0xCAFEDEAD;
|
||||
writel(tmp, ptr);
|
||||
|
||||
r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: failed to get ib (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1);
|
||||
ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc;
|
||||
ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff;
|
||||
ib.ptr[3] = 0xDEADBEEF;
|
||||
ib.length_dw = 4;
|
||||
|
||||
r = radeon_ib_schedule(rdev, &ib, NULL);
|
||||
if (r) {
|
||||
radeon_ib_free(rdev, &ib);
|
||||
DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
r = radeon_fence_wait(ib.fence, false);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: fence wait failed (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
for (i = 0; i < rdev->usec_timeout; i++) {
|
||||
tmp = readl(ptr);
|
||||
if (tmp == 0xDEADBEEF)
|
||||
break;
|
||||
DRM_UDELAY(1);
|
||||
}
|
||||
if (i < rdev->usec_timeout) {
|
||||
DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
|
||||
} else {
|
||||
DRM_ERROR("radeon: ib test failed (0x%08X)\n", tmp);
|
||||
r = -EINVAL;
|
||||
}
|
||||
radeon_ib_free(rdev, &ib);
|
||||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
* r600_dma_ring_ib_execute - Schedule an IB on the DMA engine
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @ib: IB object to schedule
|
||||
*
|
||||
* Schedule an IB in the DMA ring (r6xx-r7xx).
|
||||
*/
|
||||
void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
|
||||
{
|
||||
struct radeon_ring *ring = &rdev->ring[ib->ring];
|
||||
|
||||
if (rdev->wb.enabled) {
|
||||
u32 next_rptr = ring->wptr + 4;
|
||||
while ((next_rptr & 7) != 5)
|
||||
next_rptr++;
|
||||
next_rptr += 3;
|
||||
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
|
||||
radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
|
||||
radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
|
||||
radeon_ring_write(ring, next_rptr);
|
||||
}
|
||||
|
||||
/* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
|
||||
* Pad as necessary with NOPs.
|
||||
*/
|
||||
while ((ring->wptr & 7) != 5)
|
||||
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
|
||||
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0, 0));
|
||||
radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
|
||||
radeon_ring_write(ring, (ib->length_dw << 16) | (upper_32_bits(ib->gpu_addr) & 0xFF));
|
||||
|
||||
}
|
||||
|
||||
/*
|
||||
* Interrupts
|
||||
*
|
||||
|
|
|
@ -0,0 +1,497 @@
|
|||
/*
|
||||
* Copyright 2013 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Alex Deucher
|
||||
*/
|
||||
#include <drm/drmP.h>
|
||||
#include "radeon.h"
|
||||
#include "radeon_asic.h"
|
||||
#include "r600d.h"
|
||||
|
||||
u32 r600_gpu_check_soft_reset(struct radeon_device *rdev);
|
||||
|
||||
/*
|
||||
* DMA
|
||||
* Starting with R600, the GPU has an asynchronous
|
||||
* DMA engine. The programming model is very similar
|
||||
* to the 3D engine (ring buffer, IBs, etc.), but the
|
||||
* DMA controller has it's own packet format that is
|
||||
* different form the PM4 format used by the 3D engine.
|
||||
* It supports copying data, writing embedded data,
|
||||
* solid fills, and a number of other things. It also
|
||||
* has support for tiling/detiling of buffers.
|
||||
*/
|
||||
|
||||
/**
|
||||
* r600_dma_get_rptr - get the current read pointer
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @ring: radeon ring pointer
|
||||
*
|
||||
* Get the current rptr from the hardware (r6xx+).
|
||||
*/
|
||||
uint32_t r600_dma_get_rptr(struct radeon_device *rdev,
|
||||
struct radeon_ring *ring)
|
||||
{
|
||||
return (radeon_ring_generic_get_rptr(rdev, ring) & 0x3fffc) >> 2;
|
||||
}
|
||||
|
||||
/**
|
||||
* r600_dma_get_wptr - get the current write pointer
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @ring: radeon ring pointer
|
||||
*
|
||||
* Get the current wptr from the hardware (r6xx+).
|
||||
*/
|
||||
uint32_t r600_dma_get_wptr(struct radeon_device *rdev,
|
||||
struct radeon_ring *ring)
|
||||
{
|
||||
return (RREG32(ring->wptr_reg) & 0x3fffc) >> 2;
|
||||
}
|
||||
|
||||
/**
|
||||
* r600_dma_set_wptr - commit the write pointer
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @ring: radeon ring pointer
|
||||
*
|
||||
* Write the wptr back to the hardware (r6xx+).
|
||||
*/
|
||||
void r600_dma_set_wptr(struct radeon_device *rdev,
|
||||
struct radeon_ring *ring)
|
||||
{
|
||||
WREG32(ring->wptr_reg, (ring->wptr << 2) & 0x3fffc);
|
||||
}
|
||||
|
||||
/**
|
||||
* r600_dma_stop - stop the async dma engine
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
*
|
||||
* Stop the async dma engine (r6xx-evergreen).
|
||||
*/
|
||||
void r600_dma_stop(struct radeon_device *rdev)
|
||||
{
|
||||
u32 rb_cntl = RREG32(DMA_RB_CNTL);
|
||||
|
||||
radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
|
||||
|
||||
rb_cntl &= ~DMA_RB_ENABLE;
|
||||
WREG32(DMA_RB_CNTL, rb_cntl);
|
||||
|
||||
rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
|
||||
}
|
||||
|
||||
/**
|
||||
* r600_dma_resume - setup and start the async dma engine
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
*
|
||||
* Set up the DMA ring buffer and enable it. (r6xx-evergreen).
|
||||
* Returns 0 for success, error for failure.
|
||||
*/
|
||||
int r600_dma_resume(struct radeon_device *rdev)
|
||||
{
|
||||
struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
|
||||
u32 rb_cntl, dma_cntl, ib_cntl;
|
||||
u32 rb_bufsz;
|
||||
int r;
|
||||
|
||||
/* Reset dma */
|
||||
if (rdev->family >= CHIP_RV770)
|
||||
WREG32(SRBM_SOFT_RESET, RV770_SOFT_RESET_DMA);
|
||||
else
|
||||
WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA);
|
||||
RREG32(SRBM_SOFT_RESET);
|
||||
udelay(50);
|
||||
WREG32(SRBM_SOFT_RESET, 0);
|
||||
|
||||
WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL, 0);
|
||||
WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL, 0);
|
||||
|
||||
/* Set ring buffer size in dwords */
|
||||
rb_bufsz = drm_order(ring->ring_size / 4);
|
||||
rb_cntl = rb_bufsz << 1;
|
||||
#ifdef __BIG_ENDIAN
|
||||
rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
|
||||
#endif
|
||||
WREG32(DMA_RB_CNTL, rb_cntl);
|
||||
|
||||
/* Initialize the ring buffer's read and write pointers */
|
||||
WREG32(DMA_RB_RPTR, 0);
|
||||
WREG32(DMA_RB_WPTR, 0);
|
||||
|
||||
/* set the wb address whether it's enabled or not */
|
||||
WREG32(DMA_RB_RPTR_ADDR_HI,
|
||||
upper_32_bits(rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFF);
|
||||
WREG32(DMA_RB_RPTR_ADDR_LO,
|
||||
((rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFFFFFFFC));
|
||||
|
||||
if (rdev->wb.enabled)
|
||||
rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
|
||||
|
||||
WREG32(DMA_RB_BASE, ring->gpu_addr >> 8);
|
||||
|
||||
/* enable DMA IBs */
|
||||
ib_cntl = DMA_IB_ENABLE;
|
||||
#ifdef __BIG_ENDIAN
|
||||
ib_cntl |= DMA_IB_SWAP_ENABLE;
|
||||
#endif
|
||||
WREG32(DMA_IB_CNTL, ib_cntl);
|
||||
|
||||
dma_cntl = RREG32(DMA_CNTL);
|
||||
dma_cntl &= ~CTXEMPTY_INT_ENABLE;
|
||||
WREG32(DMA_CNTL, dma_cntl);
|
||||
|
||||
if (rdev->family >= CHIP_RV770)
|
||||
WREG32(DMA_MODE, 1);
|
||||
|
||||
ring->wptr = 0;
|
||||
WREG32(DMA_RB_WPTR, ring->wptr << 2);
|
||||
|
||||
ring->rptr = RREG32(DMA_RB_RPTR) >> 2;
|
||||
|
||||
WREG32(DMA_RB_CNTL, rb_cntl | DMA_RB_ENABLE);
|
||||
|
||||
ring->ready = true;
|
||||
|
||||
r = radeon_ring_test(rdev, R600_RING_TYPE_DMA_INDEX, ring);
|
||||
if (r) {
|
||||
ring->ready = false;
|
||||
return r;
|
||||
}
|
||||
|
||||
radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* r600_dma_fini - tear down the async dma engine
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
*
|
||||
* Stop the async dma engine and free the ring (r6xx-evergreen).
|
||||
*/
|
||||
void r600_dma_fini(struct radeon_device *rdev)
|
||||
{
|
||||
r600_dma_stop(rdev);
|
||||
radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
|
||||
}
|
||||
|
||||
/**
|
||||
* r600_dma_is_lockup - Check if the DMA engine is locked up
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @ring: radeon_ring structure holding ring information
|
||||
*
|
||||
* Check if the async DMA engine is locked up.
|
||||
* Returns true if the engine appears to be locked up, false if not.
|
||||
*/
|
||||
bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
|
||||
{
|
||||
u32 reset_mask = r600_gpu_check_soft_reset(rdev);
|
||||
|
||||
if (!(reset_mask & RADEON_RESET_DMA)) {
|
||||
radeon_ring_lockup_update(ring);
|
||||
return false;
|
||||
}
|
||||
/* force ring activities */
|
||||
radeon_ring_force_activity(rdev, ring);
|
||||
return radeon_ring_test_lockup(rdev, ring);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* r600_dma_ring_test - simple async dma engine test
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @ring: radeon_ring structure holding ring information
|
||||
*
|
||||
* Test the DMA engine by writing using it to write an
|
||||
* value to memory. (r6xx-SI).
|
||||
* Returns 0 for success, error for failure.
|
||||
*/
|
||||
int r600_dma_ring_test(struct radeon_device *rdev,
|
||||
struct radeon_ring *ring)
|
||||
{
|
||||
unsigned i;
|
||||
int r;
|
||||
void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
|
||||
u32 tmp;
|
||||
|
||||
if (!ptr) {
|
||||
DRM_ERROR("invalid vram scratch pointer\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
tmp = 0xCAFEDEAD;
|
||||
writel(tmp, ptr);
|
||||
|
||||
r = radeon_ring_lock(rdev, ring, 4);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r);
|
||||
return r;
|
||||
}
|
||||
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
|
||||
radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc);
|
||||
radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff);
|
||||
radeon_ring_write(ring, 0xDEADBEEF);
|
||||
radeon_ring_unlock_commit(rdev, ring);
|
||||
|
||||
for (i = 0; i < rdev->usec_timeout; i++) {
|
||||
tmp = readl(ptr);
|
||||
if (tmp == 0xDEADBEEF)
|
||||
break;
|
||||
DRM_UDELAY(1);
|
||||
}
|
||||
|
||||
if (i < rdev->usec_timeout) {
|
||||
DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
|
||||
} else {
|
||||
DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
|
||||
ring->idx, tmp);
|
||||
r = -EINVAL;
|
||||
}
|
||||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
* r600_dma_fence_ring_emit - emit a fence on the DMA ring
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @fence: radeon fence object
|
||||
*
|
||||
* Add a DMA fence packet to the ring to write
|
||||
* the fence seq number and DMA trap packet to generate
|
||||
* an interrupt if needed (r6xx-r7xx).
|
||||
*/
|
||||
void r600_dma_fence_ring_emit(struct radeon_device *rdev,
|
||||
struct radeon_fence *fence)
|
||||
{
|
||||
struct radeon_ring *ring = &rdev->ring[fence->ring];
|
||||
u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
|
||||
|
||||
/* write the fence */
|
||||
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0));
|
||||
radeon_ring_write(ring, addr & 0xfffffffc);
|
||||
radeon_ring_write(ring, (upper_32_bits(addr) & 0xff));
|
||||
radeon_ring_write(ring, lower_32_bits(fence->seq));
|
||||
/* generate an interrupt */
|
||||
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0));
|
||||
}
|
||||
|
||||
/**
|
||||
* r600_dma_semaphore_ring_emit - emit a semaphore on the dma ring
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @ring: radeon_ring structure holding ring information
|
||||
* @semaphore: radeon semaphore object
|
||||
* @emit_wait: wait or signal semaphore
|
||||
*
|
||||
* Add a DMA semaphore packet to the ring wait on or signal
|
||||
* other rings (r6xx-SI).
|
||||
*/
|
||||
void r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
|
||||
struct radeon_ring *ring,
|
||||
struct radeon_semaphore *semaphore,
|
||||
bool emit_wait)
|
||||
{
|
||||
u64 addr = semaphore->gpu_addr;
|
||||
u32 s = emit_wait ? 0 : 1;
|
||||
|
||||
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SEMAPHORE, 0, s, 0));
|
||||
radeon_ring_write(ring, addr & 0xfffffffc);
|
||||
radeon_ring_write(ring, upper_32_bits(addr) & 0xff);
|
||||
}
|
||||
|
||||
/**
|
||||
* r600_dma_ib_test - test an IB on the DMA engine
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @ring: radeon_ring structure holding ring information
|
||||
*
|
||||
* Test a simple IB in the DMA ring (r6xx-SI).
|
||||
* Returns 0 on success, error on failure.
|
||||
*/
|
||||
int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
|
||||
{
|
||||
struct radeon_ib ib;
|
||||
unsigned i;
|
||||
int r;
|
||||
void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
|
||||
u32 tmp = 0;
|
||||
|
||||
if (!ptr) {
|
||||
DRM_ERROR("invalid vram scratch pointer\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
tmp = 0xCAFEDEAD;
|
||||
writel(tmp, ptr);
|
||||
|
||||
r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: failed to get ib (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1);
|
||||
ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc;
|
||||
ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff;
|
||||
ib.ptr[3] = 0xDEADBEEF;
|
||||
ib.length_dw = 4;
|
||||
|
||||
r = radeon_ib_schedule(rdev, &ib, NULL);
|
||||
if (r) {
|
||||
radeon_ib_free(rdev, &ib);
|
||||
DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
r = radeon_fence_wait(ib.fence, false);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: fence wait failed (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
for (i = 0; i < rdev->usec_timeout; i++) {
|
||||
tmp = readl(ptr);
|
||||
if (tmp == 0xDEADBEEF)
|
||||
break;
|
||||
DRM_UDELAY(1);
|
||||
}
|
||||
if (i < rdev->usec_timeout) {
|
||||
DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
|
||||
} else {
|
||||
DRM_ERROR("radeon: ib test failed (0x%08X)\n", tmp);
|
||||
r = -EINVAL;
|
||||
}
|
||||
radeon_ib_free(rdev, &ib);
|
||||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
* r600_dma_ring_ib_execute - Schedule an IB on the DMA engine
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @ib: IB object to schedule
|
||||
*
|
||||
* Schedule an IB in the DMA ring (r6xx-r7xx).
|
||||
*/
|
||||
void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
|
||||
{
|
||||
struct radeon_ring *ring = &rdev->ring[ib->ring];
|
||||
|
||||
if (rdev->wb.enabled) {
|
||||
u32 next_rptr = ring->wptr + 4;
|
||||
while ((next_rptr & 7) != 5)
|
||||
next_rptr++;
|
||||
next_rptr += 3;
|
||||
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
|
||||
radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
|
||||
radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
|
||||
radeon_ring_write(ring, next_rptr);
|
||||
}
|
||||
|
||||
/* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
|
||||
* Pad as necessary with NOPs.
|
||||
*/
|
||||
while ((ring->wptr & 7) != 5)
|
||||
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
|
||||
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0, 0));
|
||||
radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
|
||||
radeon_ring_write(ring, (ib->length_dw << 16) | (upper_32_bits(ib->gpu_addr) & 0xFF));
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* r600_copy_dma - copy pages using the DMA engine
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @src_offset: src GPU address
|
||||
* @dst_offset: dst GPU address
|
||||
* @num_gpu_pages: number of GPU pages to xfer
|
||||
* @fence: radeon fence object
|
||||
*
|
||||
* Copy GPU paging using the DMA engine (r6xx).
|
||||
* Used by the radeon ttm implementation to move pages if
|
||||
* registered as the asic copy callback.
|
||||
*/
|
||||
int r600_copy_dma(struct radeon_device *rdev,
|
||||
uint64_t src_offset, uint64_t dst_offset,
|
||||
unsigned num_gpu_pages,
|
||||
struct radeon_fence **fence)
|
||||
{
|
||||
struct radeon_semaphore *sem = NULL;
|
||||
int ring_index = rdev->asic->copy.dma_ring_index;
|
||||
struct radeon_ring *ring = &rdev->ring[ring_index];
|
||||
u32 size_in_dw, cur_size_in_dw;
|
||||
int i, num_loops;
|
||||
int r = 0;
|
||||
|
||||
r = radeon_semaphore_create(rdev, &sem);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: moving bo (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
|
||||
num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFE);
|
||||
r = radeon_ring_lock(rdev, ring, num_loops * 4 + 8);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: moving bo (%d).\n", r);
|
||||
radeon_semaphore_free(rdev, &sem, NULL);
|
||||
return r;
|
||||
}
|
||||
|
||||
if (radeon_fence_need_sync(*fence, ring->idx)) {
|
||||
radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
|
||||
ring->idx);
|
||||
radeon_fence_note_sync(*fence, ring->idx);
|
||||
} else {
|
||||
radeon_semaphore_free(rdev, &sem, NULL);
|
||||
}
|
||||
|
||||
for (i = 0; i < num_loops; i++) {
|
||||
cur_size_in_dw = size_in_dw;
|
||||
if (cur_size_in_dw > 0xFFFE)
|
||||
cur_size_in_dw = 0xFFFE;
|
||||
size_in_dw -= cur_size_in_dw;
|
||||
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
|
||||
radeon_ring_write(ring, dst_offset & 0xfffffffc);
|
||||
radeon_ring_write(ring, src_offset & 0xfffffffc);
|
||||
radeon_ring_write(ring, (((upper_32_bits(dst_offset) & 0xff) << 16) |
|
||||
(upper_32_bits(src_offset) & 0xff)));
|
||||
src_offset += cur_size_in_dw * 4;
|
||||
dst_offset += cur_size_in_dw * 4;
|
||||
}
|
||||
|
||||
r = radeon_fence_emit(rdev, fence, ring->idx);
|
||||
if (r) {
|
||||
radeon_ring_unlock_undo(rdev, ring);
|
||||
return r;
|
||||
}
|
||||
|
||||
radeon_ring_unlock_commit(rdev, ring);
|
||||
radeon_semaphore_free(rdev, &sem, *fence);
|
||||
|
||||
return r;
|
||||
}
|
|
@ -1650,80 +1650,6 @@ static int rv770_mc_init(struct radeon_device *rdev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* rv770_copy_dma - copy pages using the DMA engine
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @src_offset: src GPU address
|
||||
* @dst_offset: dst GPU address
|
||||
* @num_gpu_pages: number of GPU pages to xfer
|
||||
* @fence: radeon fence object
|
||||
*
|
||||
* Copy GPU paging using the DMA engine (r7xx).
|
||||
* Used by the radeon ttm implementation to move pages if
|
||||
* registered as the asic copy callback.
|
||||
*/
|
||||
int rv770_copy_dma(struct radeon_device *rdev,
|
||||
uint64_t src_offset, uint64_t dst_offset,
|
||||
unsigned num_gpu_pages,
|
||||
struct radeon_fence **fence)
|
||||
{
|
||||
struct radeon_semaphore *sem = NULL;
|
||||
int ring_index = rdev->asic->copy.dma_ring_index;
|
||||
struct radeon_ring *ring = &rdev->ring[ring_index];
|
||||
u32 size_in_dw, cur_size_in_dw;
|
||||
int i, num_loops;
|
||||
int r = 0;
|
||||
|
||||
r = radeon_semaphore_create(rdev, &sem);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: moving bo (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
|
||||
num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFF);
|
||||
r = radeon_ring_lock(rdev, ring, num_loops * 5 + 8);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: moving bo (%d).\n", r);
|
||||
radeon_semaphore_free(rdev, &sem, NULL);
|
||||
return r;
|
||||
}
|
||||
|
||||
if (radeon_fence_need_sync(*fence, ring->idx)) {
|
||||
radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
|
||||
ring->idx);
|
||||
radeon_fence_note_sync(*fence, ring->idx);
|
||||
} else {
|
||||
radeon_semaphore_free(rdev, &sem, NULL);
|
||||
}
|
||||
|
||||
for (i = 0; i < num_loops; i++) {
|
||||
cur_size_in_dw = size_in_dw;
|
||||
if (cur_size_in_dw > 0xFFFF)
|
||||
cur_size_in_dw = 0xFFFF;
|
||||
size_in_dw -= cur_size_in_dw;
|
||||
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
|
||||
radeon_ring_write(ring, dst_offset & 0xfffffffc);
|
||||
radeon_ring_write(ring, src_offset & 0xfffffffc);
|
||||
radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
|
||||
radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
|
||||
src_offset += cur_size_in_dw * 4;
|
||||
dst_offset += cur_size_in_dw * 4;
|
||||
}
|
||||
|
||||
r = radeon_fence_emit(rdev, fence, ring->idx);
|
||||
if (r) {
|
||||
radeon_ring_unlock_undo(rdev, ring);
|
||||
return r;
|
||||
}
|
||||
|
||||
radeon_ring_unlock_commit(rdev, ring);
|
||||
radeon_semaphore_free(rdev, &sem, *fence);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
static int rv770_startup(struct radeon_device *rdev)
|
||||
{
|
||||
struct radeon_ring *ring;
|
||||
|
|
|
@ -0,0 +1,101 @@
|
|||
/*
|
||||
* Copyright 2013 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Alex Deucher
|
||||
*/
|
||||
#include <drm/drmP.h>
|
||||
#include "radeon.h"
|
||||
#include "radeon_asic.h"
|
||||
#include "rv770d.h"
|
||||
|
||||
/**
|
||||
* rv770_copy_dma - copy pages using the DMA engine
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @src_offset: src GPU address
|
||||
* @dst_offset: dst GPU address
|
||||
* @num_gpu_pages: number of GPU pages to xfer
|
||||
* @fence: radeon fence object
|
||||
*
|
||||
* Copy GPU paging using the DMA engine (r7xx).
|
||||
* Used by the radeon ttm implementation to move pages if
|
||||
* registered as the asic copy callback.
|
||||
*/
|
||||
int rv770_copy_dma(struct radeon_device *rdev,
|
||||
uint64_t src_offset, uint64_t dst_offset,
|
||||
unsigned num_gpu_pages,
|
||||
struct radeon_fence **fence)
|
||||
{
|
||||
struct radeon_semaphore *sem = NULL;
|
||||
int ring_index = rdev->asic->copy.dma_ring_index;
|
||||
struct radeon_ring *ring = &rdev->ring[ring_index];
|
||||
u32 size_in_dw, cur_size_in_dw;
|
||||
int i, num_loops;
|
||||
int r = 0;
|
||||
|
||||
r = radeon_semaphore_create(rdev, &sem);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: moving bo (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
|
||||
num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFF);
|
||||
r = radeon_ring_lock(rdev, ring, num_loops * 5 + 8);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: moving bo (%d).\n", r);
|
||||
radeon_semaphore_free(rdev, &sem, NULL);
|
||||
return r;
|
||||
}
|
||||
|
||||
if (radeon_fence_need_sync(*fence, ring->idx)) {
|
||||
radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
|
||||
ring->idx);
|
||||
radeon_fence_note_sync(*fence, ring->idx);
|
||||
} else {
|
||||
radeon_semaphore_free(rdev, &sem, NULL);
|
||||
}
|
||||
|
||||
for (i = 0; i < num_loops; i++) {
|
||||
cur_size_in_dw = size_in_dw;
|
||||
if (cur_size_in_dw > 0xFFFF)
|
||||
cur_size_in_dw = 0xFFFF;
|
||||
size_in_dw -= cur_size_in_dw;
|
||||
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
|
||||
radeon_ring_write(ring, dst_offset & 0xfffffffc);
|
||||
radeon_ring_write(ring, src_offset & 0xfffffffc);
|
||||
radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
|
||||
radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
|
||||
src_offset += cur_size_in_dw * 4;
|
||||
dst_offset += cur_size_in_dw * 4;
|
||||
}
|
||||
|
||||
r = radeon_fence_emit(rdev, fence, ring->idx);
|
||||
if (r) {
|
||||
radeon_ring_unlock_undo(rdev, ring);
|
||||
return r;
|
||||
}
|
||||
|
||||
radeon_ring_unlock_commit(rdev, ring);
|
||||
radeon_semaphore_free(rdev, &sem, *fence);
|
||||
|
||||
return r;
|
||||
}
|
|
@ -78,6 +78,11 @@ extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_
|
|||
extern u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev);
|
||||
extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev);
|
||||
extern bool evergreen_is_display_hung(struct radeon_device *rdev);
|
||||
extern void si_dma_vm_set_page(struct radeon_device *rdev,
|
||||
struct radeon_ib *ib,
|
||||
uint64_t pe,
|
||||
uint64_t addr, unsigned count,
|
||||
uint32_t incr, uint32_t flags);
|
||||
|
||||
static const u32 verde_rlc_save_restore_register_list[] =
|
||||
{
|
||||
|
@ -3495,7 +3500,7 @@ static int si_cp_resume(struct radeon_device *rdev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static u32 si_gpu_check_soft_reset(struct radeon_device *rdev)
|
||||
u32 si_gpu_check_soft_reset(struct radeon_device *rdev)
|
||||
{
|
||||
u32 reset_mask = 0;
|
||||
u32 tmp;
|
||||
|
@ -3744,34 +3749,6 @@ bool si_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
|
|||
return radeon_ring_test_lockup(rdev, ring);
|
||||
}
|
||||
|
||||
/**
|
||||
* si_dma_is_lockup - Check if the DMA engine is locked up
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @ring: radeon_ring structure holding ring information
|
||||
*
|
||||
* Check if the async DMA engine is locked up.
|
||||
* Returns true if the engine appears to be locked up, false if not.
|
||||
*/
|
||||
bool si_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
|
||||
{
|
||||
u32 reset_mask = si_gpu_check_soft_reset(rdev);
|
||||
u32 mask;
|
||||
|
||||
if (ring->idx == R600_RING_TYPE_DMA_INDEX)
|
||||
mask = RADEON_RESET_DMA;
|
||||
else
|
||||
mask = RADEON_RESET_DMA1;
|
||||
|
||||
if (!(reset_mask & mask)) {
|
||||
radeon_ring_lockup_update(ring);
|
||||
return false;
|
||||
}
|
||||
/* force ring activities */
|
||||
radeon_ring_force_activity(rdev, ring);
|
||||
return radeon_ring_test_lockup(rdev, ring);
|
||||
}
|
||||
|
||||
/* MC */
|
||||
static void si_mc_program(struct radeon_device *rdev)
|
||||
{
|
||||
|
@ -4710,58 +4687,7 @@ void si_vm_set_page(struct radeon_device *rdev,
|
|||
}
|
||||
} else {
|
||||
/* DMA */
|
||||
if (flags & RADEON_VM_PAGE_SYSTEM) {
|
||||
while (count) {
|
||||
ndw = count * 2;
|
||||
if (ndw > 0xFFFFE)
|
||||
ndw = 0xFFFFE;
|
||||
|
||||
/* for non-physically contiguous pages (system) */
|
||||
ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw);
|
||||
ib->ptr[ib->length_dw++] = pe;
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
|
||||
for (; ndw > 0; ndw -= 2, --count, pe += 8) {
|
||||
if (flags & RADEON_VM_PAGE_SYSTEM) {
|
||||
value = radeon_vm_map_gart(rdev, addr);
|
||||
value &= 0xFFFFFFFFFFFFF000ULL;
|
||||
} else if (flags & RADEON_VM_PAGE_VALID) {
|
||||
value = addr;
|
||||
} else {
|
||||
value = 0;
|
||||
}
|
||||
addr += incr;
|
||||
value |= r600_flags;
|
||||
ib->ptr[ib->length_dw++] = value;
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(value);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
while (count) {
|
||||
ndw = count * 2;
|
||||
if (ndw > 0xFFFFE)
|
||||
ndw = 0xFFFFE;
|
||||
|
||||
if (flags & RADEON_VM_PAGE_VALID)
|
||||
value = addr;
|
||||
else
|
||||
value = 0;
|
||||
/* for physically contiguous pages (vram) */
|
||||
ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
|
||||
ib->ptr[ib->length_dw++] = pe; /* dst addr */
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
|
||||
ib->ptr[ib->length_dw++] = r600_flags; /* mask */
|
||||
ib->ptr[ib->length_dw++] = 0;
|
||||
ib->ptr[ib->length_dw++] = value; /* value */
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(value);
|
||||
ib->ptr[ib->length_dw++] = incr; /* increment size */
|
||||
ib->ptr[ib->length_dw++] = 0;
|
||||
pe += ndw * 4;
|
||||
addr += (ndw / 2) * incr;
|
||||
count -= ndw / 2;
|
||||
}
|
||||
}
|
||||
while (ib->length_dw & 0x7)
|
||||
ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0);
|
||||
si_dma_vm_set_page(rdev, ib, pe, addr, count, incr, flags);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -4808,32 +4734,6 @@ void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
|
|||
radeon_ring_write(ring, 0x0);
|
||||
}
|
||||
|
||||
void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
|
||||
{
|
||||
struct radeon_ring *ring = &rdev->ring[ridx];
|
||||
|
||||
if (vm == NULL)
|
||||
return;
|
||||
|
||||
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
|
||||
if (vm->id < 8) {
|
||||
radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2));
|
||||
} else {
|
||||
radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2));
|
||||
}
|
||||
radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
|
||||
|
||||
/* flush hdp cache */
|
||||
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
|
||||
radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
|
||||
radeon_ring_write(ring, 1);
|
||||
|
||||
/* bits 0-7 are the VM contexts0-7 */
|
||||
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
|
||||
radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
|
||||
radeon_ring_write(ring, 1 << vm->id);
|
||||
}
|
||||
|
||||
/*
|
||||
* Power and clock gating
|
||||
*/
|
||||
|
@ -6177,80 +6077,6 @@ restart_ih:
|
|||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
/**
|
||||
* si_copy_dma - copy pages using the DMA engine
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @src_offset: src GPU address
|
||||
* @dst_offset: dst GPU address
|
||||
* @num_gpu_pages: number of GPU pages to xfer
|
||||
* @fence: radeon fence object
|
||||
*
|
||||
* Copy GPU paging using the DMA engine (SI).
|
||||
* Used by the radeon ttm implementation to move pages if
|
||||
* registered as the asic copy callback.
|
||||
*/
|
||||
int si_copy_dma(struct radeon_device *rdev,
|
||||
uint64_t src_offset, uint64_t dst_offset,
|
||||
unsigned num_gpu_pages,
|
||||
struct radeon_fence **fence)
|
||||
{
|
||||
struct radeon_semaphore *sem = NULL;
|
||||
int ring_index = rdev->asic->copy.dma_ring_index;
|
||||
struct radeon_ring *ring = &rdev->ring[ring_index];
|
||||
u32 size_in_bytes, cur_size_in_bytes;
|
||||
int i, num_loops;
|
||||
int r = 0;
|
||||
|
||||
r = radeon_semaphore_create(rdev, &sem);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: moving bo (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
|
||||
num_loops = DIV_ROUND_UP(size_in_bytes, 0xfffff);
|
||||
r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: moving bo (%d).\n", r);
|
||||
radeon_semaphore_free(rdev, &sem, NULL);
|
||||
return r;
|
||||
}
|
||||
|
||||
if (radeon_fence_need_sync(*fence, ring->idx)) {
|
||||
radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
|
||||
ring->idx);
|
||||
radeon_fence_note_sync(*fence, ring->idx);
|
||||
} else {
|
||||
radeon_semaphore_free(rdev, &sem, NULL);
|
||||
}
|
||||
|
||||
for (i = 0; i < num_loops; i++) {
|
||||
cur_size_in_bytes = size_in_bytes;
|
||||
if (cur_size_in_bytes > 0xFFFFF)
|
||||
cur_size_in_bytes = 0xFFFFF;
|
||||
size_in_bytes -= cur_size_in_bytes;
|
||||
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 1, 0, 0, cur_size_in_bytes));
|
||||
radeon_ring_write(ring, dst_offset & 0xffffffff);
|
||||
radeon_ring_write(ring, src_offset & 0xffffffff);
|
||||
radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
|
||||
radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
|
||||
src_offset += cur_size_in_bytes;
|
||||
dst_offset += cur_size_in_bytes;
|
||||
}
|
||||
|
||||
r = radeon_fence_emit(rdev, fence, ring->idx);
|
||||
if (r) {
|
||||
radeon_ring_unlock_undo(rdev, ring);
|
||||
return r;
|
||||
}
|
||||
|
||||
radeon_ring_unlock_commit(rdev, ring);
|
||||
radeon_semaphore_free(rdev, &sem, *fence);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
/*
|
||||
* startup/shutdown callbacks
|
||||
*/
|
||||
|
|
|
@ -0,0 +1,235 @@
|
|||
/*
|
||||
* Copyright 2013 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Alex Deucher
|
||||
*/
|
||||
#include <drm/drmP.h>
|
||||
#include "radeon.h"
|
||||
#include "radeon_asic.h"
|
||||
#include "sid.h"
|
||||
|
||||
u32 si_gpu_check_soft_reset(struct radeon_device *rdev);
|
||||
|
||||
/**
|
||||
* si_dma_is_lockup - Check if the DMA engine is locked up
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @ring: radeon_ring structure holding ring information
|
||||
*
|
||||
* Check if the async DMA engine is locked up.
|
||||
* Returns true if the engine appears to be locked up, false if not.
|
||||
*/
|
||||
bool si_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
|
||||
{
|
||||
u32 reset_mask = si_gpu_check_soft_reset(rdev);
|
||||
u32 mask;
|
||||
|
||||
if (ring->idx == R600_RING_TYPE_DMA_INDEX)
|
||||
mask = RADEON_RESET_DMA;
|
||||
else
|
||||
mask = RADEON_RESET_DMA1;
|
||||
|
||||
if (!(reset_mask & mask)) {
|
||||
radeon_ring_lockup_update(ring);
|
||||
return false;
|
||||
}
|
||||
/* force ring activities */
|
||||
radeon_ring_force_activity(rdev, ring);
|
||||
return radeon_ring_test_lockup(rdev, ring);
|
||||
}
|
||||
|
||||
/**
|
||||
* si_dma_vm_set_page - update the page tables using the DMA
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @ib: indirect buffer to fill with commands
|
||||
* @pe: addr of the page entry
|
||||
* @addr: dst addr to write into pe
|
||||
* @count: number of page entries to update
|
||||
* @incr: increase next addr by incr bytes
|
||||
* @flags: access flags
|
||||
*
|
||||
* Update the page tables using the DMA (SI).
|
||||
*/
|
||||
void si_dma_vm_set_page(struct radeon_device *rdev,
|
||||
struct radeon_ib *ib,
|
||||
uint64_t pe,
|
||||
uint64_t addr, unsigned count,
|
||||
uint32_t incr, uint32_t flags)
|
||||
{
|
||||
uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
|
||||
uint64_t value;
|
||||
unsigned ndw;
|
||||
|
||||
if (flags & RADEON_VM_PAGE_SYSTEM) {
|
||||
while (count) {
|
||||
ndw = count * 2;
|
||||
if (ndw > 0xFFFFE)
|
||||
ndw = 0xFFFFE;
|
||||
|
||||
/* for non-physically contiguous pages (system) */
|
||||
ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw);
|
||||
ib->ptr[ib->length_dw++] = pe;
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
|
||||
for (; ndw > 0; ndw -= 2, --count, pe += 8) {
|
||||
if (flags & RADEON_VM_PAGE_SYSTEM) {
|
||||
value = radeon_vm_map_gart(rdev, addr);
|
||||
value &= 0xFFFFFFFFFFFFF000ULL;
|
||||
} else if (flags & RADEON_VM_PAGE_VALID) {
|
||||
value = addr;
|
||||
} else {
|
||||
value = 0;
|
||||
}
|
||||
addr += incr;
|
||||
value |= r600_flags;
|
||||
ib->ptr[ib->length_dw++] = value;
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(value);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
while (count) {
|
||||
ndw = count * 2;
|
||||
if (ndw > 0xFFFFE)
|
||||
ndw = 0xFFFFE;
|
||||
|
||||
if (flags & RADEON_VM_PAGE_VALID)
|
||||
value = addr;
|
||||
else
|
||||
value = 0;
|
||||
/* for physically contiguous pages (vram) */
|
||||
ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
|
||||
ib->ptr[ib->length_dw++] = pe; /* dst addr */
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
|
||||
ib->ptr[ib->length_dw++] = r600_flags; /* mask */
|
||||
ib->ptr[ib->length_dw++] = 0;
|
||||
ib->ptr[ib->length_dw++] = value; /* value */
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(value);
|
||||
ib->ptr[ib->length_dw++] = incr; /* increment size */
|
||||
ib->ptr[ib->length_dw++] = 0;
|
||||
pe += ndw * 4;
|
||||
addr += (ndw / 2) * incr;
|
||||
count -= ndw / 2;
|
||||
}
|
||||
}
|
||||
while (ib->length_dw & 0x7)
|
||||
ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0);
|
||||
}
|
||||
|
||||
void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
|
||||
{
|
||||
struct radeon_ring *ring = &rdev->ring[ridx];
|
||||
|
||||
if (vm == NULL)
|
||||
return;
|
||||
|
||||
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
|
||||
if (vm->id < 8) {
|
||||
radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2));
|
||||
} else {
|
||||
radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2));
|
||||
}
|
||||
radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
|
||||
|
||||
/* flush hdp cache */
|
||||
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
|
||||
radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
|
||||
radeon_ring_write(ring, 1);
|
||||
|
||||
/* bits 0-7 are the VM contexts0-7 */
|
||||
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
|
||||
radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
|
||||
radeon_ring_write(ring, 1 << vm->id);
|
||||
}
|
||||
|
||||
/**
|
||||
* si_copy_dma - copy pages using the DMA engine
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @src_offset: src GPU address
|
||||
* @dst_offset: dst GPU address
|
||||
* @num_gpu_pages: number of GPU pages to xfer
|
||||
* @fence: radeon fence object
|
||||
*
|
||||
* Copy GPU paging using the DMA engine (SI).
|
||||
* Used by the radeon ttm implementation to move pages if
|
||||
* registered as the asic copy callback.
|
||||
*/
|
||||
int si_copy_dma(struct radeon_device *rdev,
|
||||
uint64_t src_offset, uint64_t dst_offset,
|
||||
unsigned num_gpu_pages,
|
||||
struct radeon_fence **fence)
|
||||
{
|
||||
struct radeon_semaphore *sem = NULL;
|
||||
int ring_index = rdev->asic->copy.dma_ring_index;
|
||||
struct radeon_ring *ring = &rdev->ring[ring_index];
|
||||
u32 size_in_bytes, cur_size_in_bytes;
|
||||
int i, num_loops;
|
||||
int r = 0;
|
||||
|
||||
r = radeon_semaphore_create(rdev, &sem);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: moving bo (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
|
||||
num_loops = DIV_ROUND_UP(size_in_bytes, 0xfffff);
|
||||
r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: moving bo (%d).\n", r);
|
||||
radeon_semaphore_free(rdev, &sem, NULL);
|
||||
return r;
|
||||
}
|
||||
|
||||
if (radeon_fence_need_sync(*fence, ring->idx)) {
|
||||
radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
|
||||
ring->idx);
|
||||
radeon_fence_note_sync(*fence, ring->idx);
|
||||
} else {
|
||||
radeon_semaphore_free(rdev, &sem, NULL);
|
||||
}
|
||||
|
||||
for (i = 0; i < num_loops; i++) {
|
||||
cur_size_in_bytes = size_in_bytes;
|
||||
if (cur_size_in_bytes > 0xFFFFF)
|
||||
cur_size_in_bytes = 0xFFFFF;
|
||||
size_in_bytes -= cur_size_in_bytes;
|
||||
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 1, 0, 0, cur_size_in_bytes));
|
||||
radeon_ring_write(ring, dst_offset & 0xffffffff);
|
||||
radeon_ring_write(ring, src_offset & 0xffffffff);
|
||||
radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
|
||||
radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
|
||||
src_offset += cur_size_in_bytes;
|
||||
dst_offset += cur_size_in_bytes;
|
||||
}
|
||||
|
||||
r = radeon_fence_emit(rdev, fence, ring->idx);
|
||||
if (r) {
|
||||
radeon_ring_unlock_undo(rdev, ring);
|
||||
return r;
|
||||
}
|
||||
|
||||
radeon_ring_unlock_commit(rdev, ring);
|
||||
radeon_semaphore_free(rdev, &sem, *fence);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
Loading…
Reference in New Issue