drm/radeon: rename struct radeon_cp to radeon_ring
That naming seems to make more sense, since we not only want to run PM4 rings with it. Signed-off-by: Christian König <deathsimple@vodafone.de> Reviewed-by: Jerome Glisse <jglisse@redhat.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
This commit is contained in:
parent
d6d2730c71
commit
e32eb50dbe
|
@ -1311,20 +1311,20 @@ void evergreen_mc_program(struct radeon_device *rdev)
|
|||
*/
|
||||
void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
|
||||
{
|
||||
struct radeon_cp *cp = &rdev->cp[ib->fence->ring];
|
||||
struct radeon_ring *ring = &rdev->ring[ib->fence->ring];
|
||||
|
||||
/* set to DX10/11 mode */
|
||||
radeon_ring_write(cp, PACKET3(PACKET3_MODE_CONTROL, 0));
|
||||
radeon_ring_write(cp, 1);
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
|
||||
radeon_ring_write(ring, 1);
|
||||
/* FIXME: implement */
|
||||
radeon_ring_write(cp, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
|
||||
radeon_ring_write(cp,
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
|
||||
radeon_ring_write(ring,
|
||||
#ifdef __BIG_ENDIAN
|
||||
(2 << 0) |
|
||||
#endif
|
||||
(ib->gpu_addr & 0xFFFFFFFC));
|
||||
radeon_ring_write(cp, upper_32_bits(ib->gpu_addr) & 0xFF);
|
||||
radeon_ring_write(cp, ib->length_dw);
|
||||
radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
|
||||
radeon_ring_write(ring, ib->length_dw);
|
||||
}
|
||||
|
||||
|
||||
|
@ -1362,73 +1362,73 @@ static int evergreen_cp_load_microcode(struct radeon_device *rdev)
|
|||
|
||||
static int evergreen_cp_start(struct radeon_device *rdev)
|
||||
{
|
||||
struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
|
||||
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
|
||||
int r, i;
|
||||
uint32_t cp_me;
|
||||
|
||||
r = radeon_ring_lock(rdev, cp, 7);
|
||||
r = radeon_ring_lock(rdev, ring, 7);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
radeon_ring_write(cp, PACKET3(PACKET3_ME_INITIALIZE, 5));
|
||||
radeon_ring_write(cp, 0x1);
|
||||
radeon_ring_write(cp, 0x0);
|
||||
radeon_ring_write(cp, rdev->config.evergreen.max_hw_contexts - 1);
|
||||
radeon_ring_write(cp, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
|
||||
radeon_ring_write(cp, 0);
|
||||
radeon_ring_write(cp, 0);
|
||||
radeon_ring_unlock_commit(rdev, cp);
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
|
||||
radeon_ring_write(ring, 0x1);
|
||||
radeon_ring_write(ring, 0x0);
|
||||
radeon_ring_write(ring, rdev->config.evergreen.max_hw_contexts - 1);
|
||||
radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
|
||||
radeon_ring_write(ring, 0);
|
||||
radeon_ring_write(ring, 0);
|
||||
radeon_ring_unlock_commit(rdev, ring);
|
||||
|
||||
cp_me = 0xff;
|
||||
WREG32(CP_ME_CNTL, cp_me);
|
||||
|
||||
r = radeon_ring_lock(rdev, cp, evergreen_default_size + 19);
|
||||
r = radeon_ring_lock(rdev, ring, evergreen_default_size + 19);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
/* setup clear context state */
|
||||
radeon_ring_write(cp, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
|
||||
radeon_ring_write(cp, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
|
||||
radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
|
||||
|
||||
for (i = 0; i < evergreen_default_size; i++)
|
||||
radeon_ring_write(cp, evergreen_default_state[i]);
|
||||
radeon_ring_write(ring, evergreen_default_state[i]);
|
||||
|
||||
radeon_ring_write(cp, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
|
||||
radeon_ring_write(cp, PACKET3_PREAMBLE_END_CLEAR_STATE);
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
|
||||
radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
|
||||
|
||||
/* set clear context state */
|
||||
radeon_ring_write(cp, PACKET3(PACKET3_CLEAR_STATE, 0));
|
||||
radeon_ring_write(cp, 0);
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
|
||||
radeon_ring_write(ring, 0);
|
||||
|
||||
/* SQ_VTX_BASE_VTX_LOC */
|
||||
radeon_ring_write(cp, 0xc0026f00);
|
||||
radeon_ring_write(cp, 0x00000000);
|
||||
radeon_ring_write(cp, 0x00000000);
|
||||
radeon_ring_write(cp, 0x00000000);
|
||||
radeon_ring_write(ring, 0xc0026f00);
|
||||
radeon_ring_write(ring, 0x00000000);
|
||||
radeon_ring_write(ring, 0x00000000);
|
||||
radeon_ring_write(ring, 0x00000000);
|
||||
|
||||
/* Clear consts */
|
||||
radeon_ring_write(cp, 0xc0036f00);
|
||||
radeon_ring_write(cp, 0x00000bc4);
|
||||
radeon_ring_write(cp, 0xffffffff);
|
||||
radeon_ring_write(cp, 0xffffffff);
|
||||
radeon_ring_write(cp, 0xffffffff);
|
||||
radeon_ring_write(ring, 0xc0036f00);
|
||||
radeon_ring_write(ring, 0x00000bc4);
|
||||
radeon_ring_write(ring, 0xffffffff);
|
||||
radeon_ring_write(ring, 0xffffffff);
|
||||
radeon_ring_write(ring, 0xffffffff);
|
||||
|
||||
radeon_ring_write(cp, 0xc0026900);
|
||||
radeon_ring_write(cp, 0x00000316);
|
||||
radeon_ring_write(cp, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
|
||||
radeon_ring_write(cp, 0x00000010); /* */
|
||||
radeon_ring_write(ring, 0xc0026900);
|
||||
radeon_ring_write(ring, 0x00000316);
|
||||
radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
|
||||
radeon_ring_write(ring, 0x00000010); /* */
|
||||
|
||||
radeon_ring_unlock_commit(rdev, cp);
|
||||
radeon_ring_unlock_commit(rdev, ring);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int evergreen_cp_resume(struct radeon_device *rdev)
|
||||
{
|
||||
struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
|
||||
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
|
||||
u32 tmp;
|
||||
u32 rb_bufsz;
|
||||
int r;
|
||||
|
@ -1446,7 +1446,7 @@ int evergreen_cp_resume(struct radeon_device *rdev)
|
|||
RREG32(GRBM_SOFT_RESET);
|
||||
|
||||
/* Set ring buffer size */
|
||||
rb_bufsz = drm_order(cp->ring_size / 8);
|
||||
rb_bufsz = drm_order(ring->ring_size / 8);
|
||||
tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
|
||||
#ifdef __BIG_ENDIAN
|
||||
tmp |= BUF_SWAP_32BIT;
|
||||
|
@ -1460,8 +1460,8 @@ int evergreen_cp_resume(struct radeon_device *rdev)
|
|||
/* Initialize the ring buffer's read and write pointers */
|
||||
WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
|
||||
WREG32(CP_RB_RPTR_WR, 0);
|
||||
cp->wptr = 0;
|
||||
WREG32(CP_RB_WPTR, cp->wptr);
|
||||
ring->wptr = 0;
|
||||
WREG32(CP_RB_WPTR, ring->wptr);
|
||||
|
||||
/* set the wb address wether it's enabled or not */
|
||||
WREG32(CP_RB_RPTR_ADDR,
|
||||
|
@ -1479,16 +1479,16 @@ int evergreen_cp_resume(struct radeon_device *rdev)
|
|||
mdelay(1);
|
||||
WREG32(CP_RB_CNTL, tmp);
|
||||
|
||||
WREG32(CP_RB_BASE, cp->gpu_addr >> 8);
|
||||
WREG32(CP_RB_BASE, ring->gpu_addr >> 8);
|
||||
WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
|
||||
|
||||
cp->rptr = RREG32(CP_RB_RPTR);
|
||||
ring->rptr = RREG32(CP_RB_RPTR);
|
||||
|
||||
evergreen_cp_start(rdev);
|
||||
cp->ready = true;
|
||||
r = radeon_ring_test(rdev, cp);
|
||||
ring->ready = true;
|
||||
r = radeon_ring_test(rdev, ring);
|
||||
if (r) {
|
||||
cp->ready = false;
|
||||
ring->ready = false;
|
||||
return r;
|
||||
}
|
||||
return 0;
|
||||
|
@ -2357,7 +2357,7 @@ int evergreen_mc_init(struct radeon_device *rdev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
bool evergreen_gpu_is_lockup(struct radeon_device *rdev, struct radeon_cp *cp)
|
||||
bool evergreen_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
|
||||
{
|
||||
u32 srbm_status;
|
||||
u32 grbm_status;
|
||||
|
@ -2370,19 +2370,19 @@ bool evergreen_gpu_is_lockup(struct radeon_device *rdev, struct radeon_cp *cp)
|
|||
grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
|
||||
grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
|
||||
if (!(grbm_status & GUI_ACTIVE)) {
|
||||
r100_gpu_lockup_update(lockup, cp);
|
||||
r100_gpu_lockup_update(lockup, ring);
|
||||
return false;
|
||||
}
|
||||
/* force CP activities */
|
||||
r = radeon_ring_lock(rdev, cp, 2);
|
||||
r = radeon_ring_lock(rdev, ring, 2);
|
||||
if (!r) {
|
||||
/* PACKET2 NOP */
|
||||
radeon_ring_write(cp, 0x80000000);
|
||||
radeon_ring_write(cp, 0x80000000);
|
||||
radeon_ring_unlock_commit(rdev, cp);
|
||||
radeon_ring_write(ring, 0x80000000);
|
||||
radeon_ring_write(ring, 0x80000000);
|
||||
radeon_ring_unlock_commit(rdev, ring);
|
||||
}
|
||||
cp->rptr = RREG32(CP_RB_RPTR);
|
||||
return r100_gpu_cp_is_lockup(rdev, lockup, cp);
|
||||
ring->rptr = RREG32(CP_RB_RPTR);
|
||||
return r100_gpu_cp_is_lockup(rdev, lockup, ring);
|
||||
}
|
||||
|
||||
static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
|
||||
|
@ -3056,7 +3056,7 @@ restart_ih:
|
|||
|
||||
static int evergreen_startup(struct radeon_device *rdev)
|
||||
{
|
||||
struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
|
||||
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
|
||||
int r;
|
||||
|
||||
/* enable pcie gen2 link */
|
||||
|
@ -3120,7 +3120,7 @@ static int evergreen_startup(struct radeon_device *rdev)
|
|||
}
|
||||
evergreen_irq_set(rdev);
|
||||
|
||||
r = radeon_ring_init(rdev, cp, cp->ring_size, RADEON_WB_CP_RPTR_OFFSET,
|
||||
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
|
||||
R600_CP_RB_RPTR, R600_CP_RB_WPTR);
|
||||
if (r)
|
||||
return r;
|
||||
|
@ -3168,11 +3168,11 @@ int evergreen_resume(struct radeon_device *rdev)
|
|||
|
||||
int evergreen_suspend(struct radeon_device *rdev)
|
||||
{
|
||||
struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
|
||||
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
|
||||
|
||||
/* FIXME: we should wait for ring to be empty */
|
||||
r700_cp_stop(rdev);
|
||||
cp->ready = false;
|
||||
ring->ready = false;
|
||||
evergreen_irq_suspend(rdev);
|
||||
radeon_wb_disable(rdev);
|
||||
evergreen_pcie_gart_disable(rdev);
|
||||
|
@ -3251,8 +3251,8 @@ int evergreen_init(struct radeon_device *rdev)
|
|||
if (r)
|
||||
return r;
|
||||
|
||||
rdev->cp[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
|
||||
r600_ring_init(rdev, &rdev->cp[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
|
||||
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
|
||||
r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
|
||||
|
||||
rdev->ih.ring_obj = NULL;
|
||||
r600_ih_ring_init(rdev, 64 * 1024);
|
||||
|
|
|
@ -49,7 +49,7 @@ static void
|
|||
set_render_target(struct radeon_device *rdev, int format,
|
||||
int w, int h, u64 gpu_addr)
|
||||
{
|
||||
struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
|
||||
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
|
||||
u32 cb_color_info;
|
||||
int pitch, slice;
|
||||
|
||||
|
@ -63,23 +63,23 @@ set_render_target(struct radeon_device *rdev, int format,
|
|||
pitch = (w / 8) - 1;
|
||||
slice = ((w * h) / 64) - 1;
|
||||
|
||||
radeon_ring_write(cp, PACKET3(PACKET3_SET_CONTEXT_REG, 15));
|
||||
radeon_ring_write(cp, (CB_COLOR0_BASE - PACKET3_SET_CONTEXT_REG_START) >> 2);
|
||||
radeon_ring_write(cp, gpu_addr >> 8);
|
||||
radeon_ring_write(cp, pitch);
|
||||
radeon_ring_write(cp, slice);
|
||||
radeon_ring_write(cp, 0);
|
||||
radeon_ring_write(cp, cb_color_info);
|
||||
radeon_ring_write(cp, 0);
|
||||
radeon_ring_write(cp, (w - 1) | ((h - 1) << 16));
|
||||
radeon_ring_write(cp, 0);
|
||||
radeon_ring_write(cp, 0);
|
||||
radeon_ring_write(cp, 0);
|
||||
radeon_ring_write(cp, 0);
|
||||
radeon_ring_write(cp, 0);
|
||||
radeon_ring_write(cp, 0);
|
||||
radeon_ring_write(cp, 0);
|
||||
radeon_ring_write(cp, 0);
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 15));
|
||||
radeon_ring_write(ring, (CB_COLOR0_BASE - PACKET3_SET_CONTEXT_REG_START) >> 2);
|
||||
radeon_ring_write(ring, gpu_addr >> 8);
|
||||
radeon_ring_write(ring, pitch);
|
||||
radeon_ring_write(ring, slice);
|
||||
radeon_ring_write(ring, 0);
|
||||
radeon_ring_write(ring, cb_color_info);
|
||||
radeon_ring_write(ring, 0);
|
||||
radeon_ring_write(ring, (w - 1) | ((h - 1) << 16));
|
||||
radeon_ring_write(ring, 0);
|
||||
radeon_ring_write(ring, 0);
|
||||
radeon_ring_write(ring, 0);
|
||||
radeon_ring_write(ring, 0);
|
||||
radeon_ring_write(ring, 0);
|
||||
radeon_ring_write(ring, 0);
|
||||
radeon_ring_write(ring, 0);
|
||||
radeon_ring_write(ring, 0);
|
||||
}
|
||||
|
||||
/* emits 5dw */
|
||||
|
@ -88,7 +88,7 @@ cp_set_surface_sync(struct radeon_device *rdev,
|
|||
u32 sync_type, u32 size,
|
||||
u64 mc_addr)
|
||||
{
|
||||
struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
|
||||
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
|
||||
u32 cp_coher_size;
|
||||
|
||||
if (size == 0xffffffff)
|
||||
|
@ -101,40 +101,40 @@ cp_set_surface_sync(struct radeon_device *rdev,
|
|||
* to the RB directly. For IBs, the CP programs this as part of the
|
||||
* surface_sync packet.
|
||||
*/
|
||||
radeon_ring_write(cp, PACKET3(PACKET3_SET_CONFIG_REG, 1));
|
||||
radeon_ring_write(cp, (0x85e8 - PACKET3_SET_CONFIG_REG_START) >> 2);
|
||||
radeon_ring_write(cp, 0); /* CP_COHER_CNTL2 */
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
|
||||
radeon_ring_write(ring, (0x85e8 - PACKET3_SET_CONFIG_REG_START) >> 2);
|
||||
radeon_ring_write(ring, 0); /* CP_COHER_CNTL2 */
|
||||
}
|
||||
radeon_ring_write(cp, PACKET3(PACKET3_SURFACE_SYNC, 3));
|
||||
radeon_ring_write(cp, sync_type);
|
||||
radeon_ring_write(cp, cp_coher_size);
|
||||
radeon_ring_write(cp, mc_addr >> 8);
|
||||
radeon_ring_write(cp, 10); /* poll interval */
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
|
||||
radeon_ring_write(ring, sync_type);
|
||||
radeon_ring_write(ring, cp_coher_size);
|
||||
radeon_ring_write(ring, mc_addr >> 8);
|
||||
radeon_ring_write(ring, 10); /* poll interval */
|
||||
}
|
||||
|
||||
/* emits 11dw + 1 surface sync = 16dw */
|
||||
static void
|
||||
set_shaders(struct radeon_device *rdev)
|
||||
{
|
||||
struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
|
||||
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
|
||||
u64 gpu_addr;
|
||||
|
||||
/* VS */
|
||||
gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
|
||||
radeon_ring_write(cp, PACKET3(PACKET3_SET_CONTEXT_REG, 3));
|
||||
radeon_ring_write(cp, (SQ_PGM_START_VS - PACKET3_SET_CONTEXT_REG_START) >> 2);
|
||||
radeon_ring_write(cp, gpu_addr >> 8);
|
||||
radeon_ring_write(cp, 2);
|
||||
radeon_ring_write(cp, 0);
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 3));
|
||||
radeon_ring_write(ring, (SQ_PGM_START_VS - PACKET3_SET_CONTEXT_REG_START) >> 2);
|
||||
radeon_ring_write(ring, gpu_addr >> 8);
|
||||
radeon_ring_write(ring, 2);
|
||||
radeon_ring_write(ring, 0);
|
||||
|
||||
/* PS */
|
||||
gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.ps_offset;
|
||||
radeon_ring_write(cp, PACKET3(PACKET3_SET_CONTEXT_REG, 4));
|
||||
radeon_ring_write(cp, (SQ_PGM_START_PS - PACKET3_SET_CONTEXT_REG_START) >> 2);
|
||||
radeon_ring_write(cp, gpu_addr >> 8);
|
||||
radeon_ring_write(cp, 1);
|
||||
radeon_ring_write(cp, 0);
|
||||
radeon_ring_write(cp, 2);
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 4));
|
||||
radeon_ring_write(ring, (SQ_PGM_START_PS - PACKET3_SET_CONTEXT_REG_START) >> 2);
|
||||
radeon_ring_write(ring, gpu_addr >> 8);
|
||||
radeon_ring_write(ring, 1);
|
||||
radeon_ring_write(ring, 0);
|
||||
radeon_ring_write(ring, 2);
|
||||
|
||||
gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
|
||||
cp_set_surface_sync(rdev, PACKET3_SH_ACTION_ENA, 512, gpu_addr);
|
||||
|
@ -144,7 +144,7 @@ set_shaders(struct radeon_device *rdev)
|
|||
static void
|
||||
set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
|
||||
{
|
||||
struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
|
||||
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
|
||||
u32 sq_vtx_constant_word2, sq_vtx_constant_word3;
|
||||
|
||||
/* high addr, stride */
|
||||
|
@ -159,16 +159,16 @@ set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
|
|||
SQ_VTCX_SEL_Z(SQ_SEL_Z) |
|
||||
SQ_VTCX_SEL_W(SQ_SEL_W);
|
||||
|
||||
radeon_ring_write(cp, PACKET3(PACKET3_SET_RESOURCE, 8));
|
||||
radeon_ring_write(cp, 0x580);
|
||||
radeon_ring_write(cp, gpu_addr & 0xffffffff);
|
||||
radeon_ring_write(cp, 48 - 1); /* size */
|
||||
radeon_ring_write(cp, sq_vtx_constant_word2);
|
||||
radeon_ring_write(cp, sq_vtx_constant_word3);
|
||||
radeon_ring_write(cp, 0);
|
||||
radeon_ring_write(cp, 0);
|
||||
radeon_ring_write(cp, 0);
|
||||
radeon_ring_write(cp, S__SQ_CONSTANT_TYPE(SQ_TEX_VTX_VALID_BUFFER));
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 8));
|
||||
radeon_ring_write(ring, 0x580);
|
||||
radeon_ring_write(ring, gpu_addr & 0xffffffff);
|
||||
radeon_ring_write(ring, 48 - 1); /* size */
|
||||
radeon_ring_write(ring, sq_vtx_constant_word2);
|
||||
radeon_ring_write(ring, sq_vtx_constant_word3);
|
||||
radeon_ring_write(ring, 0);
|
||||
radeon_ring_write(ring, 0);
|
||||
radeon_ring_write(ring, 0);
|
||||
radeon_ring_write(ring, S__SQ_CONSTANT_TYPE(SQ_TEX_VTX_VALID_BUFFER));
|
||||
|
||||
if ((rdev->family == CHIP_CEDAR) ||
|
||||
(rdev->family == CHIP_PALM) ||
|
||||
|
@ -189,7 +189,7 @@ set_tex_resource(struct radeon_device *rdev,
|
|||
int format, int w, int h, int pitch,
|
||||
u64 gpu_addr, u32 size)
|
||||
{
|
||||
struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
|
||||
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
|
||||
u32 sq_tex_resource_word0, sq_tex_resource_word1;
|
||||
u32 sq_tex_resource_word4, sq_tex_resource_word7;
|
||||
|
||||
|
@ -213,16 +213,16 @@ set_tex_resource(struct radeon_device *rdev,
|
|||
cp_set_surface_sync(rdev,
|
||||
PACKET3_TC_ACTION_ENA, size, gpu_addr);
|
||||
|
||||
radeon_ring_write(cp, PACKET3(PACKET3_SET_RESOURCE, 8));
|
||||
radeon_ring_write(cp, 0);
|
||||
radeon_ring_write(cp, sq_tex_resource_word0);
|
||||
radeon_ring_write(cp, sq_tex_resource_word1);
|
||||
radeon_ring_write(cp, gpu_addr >> 8);
|
||||
radeon_ring_write(cp, gpu_addr >> 8);
|
||||
radeon_ring_write(cp, sq_tex_resource_word4);
|
||||
radeon_ring_write(cp, 0);
|
||||
radeon_ring_write(cp, 0);
|
||||
radeon_ring_write(cp, sq_tex_resource_word7);
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 8));
|
||||
radeon_ring_write(ring, 0);
|
||||
radeon_ring_write(ring, sq_tex_resource_word0);
|
||||
radeon_ring_write(ring, sq_tex_resource_word1);
|
||||
radeon_ring_write(ring, gpu_addr >> 8);
|
||||
radeon_ring_write(ring, gpu_addr >> 8);
|
||||
radeon_ring_write(ring, sq_tex_resource_word4);
|
||||
radeon_ring_write(ring, 0);
|
||||
radeon_ring_write(ring, 0);
|
||||
radeon_ring_write(ring, sq_tex_resource_word7);
|
||||
}
|
||||
|
||||
/* emits 12 */
|
||||
|
@ -230,7 +230,7 @@ static void
|
|||
set_scissors(struct radeon_device *rdev, int x1, int y1,
|
||||
int x2, int y2)
|
||||
{
|
||||
struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
|
||||
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
|
||||
/* workaround some hw bugs */
|
||||
if (x2 == 0)
|
||||
x1 = 1;
|
||||
|
@ -241,44 +241,44 @@ set_scissors(struct radeon_device *rdev, int x1, int y1,
|
|||
x2 = 2;
|
||||
}
|
||||
|
||||
radeon_ring_write(cp, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
|
||||
radeon_ring_write(cp, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
|
||||
radeon_ring_write(cp, (x1 << 0) | (y1 << 16));
|
||||
radeon_ring_write(cp, (x2 << 0) | (y2 << 16));
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
|
||||
radeon_ring_write(ring, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
|
||||
radeon_ring_write(ring, (x1 << 0) | (y1 << 16));
|
||||
radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
|
||||
|
||||
radeon_ring_write(cp, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
|
||||
radeon_ring_write(cp, (PA_SC_GENERIC_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
|
||||
radeon_ring_write(cp, (x1 << 0) | (y1 << 16) | (1 << 31));
|
||||
radeon_ring_write(cp, (x2 << 0) | (y2 << 16));
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
|
||||
radeon_ring_write(ring, (PA_SC_GENERIC_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
|
||||
radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31));
|
||||
radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
|
||||
|
||||
radeon_ring_write(cp, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
|
||||
radeon_ring_write(cp, (PA_SC_WINDOW_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
|
||||
radeon_ring_write(cp, (x1 << 0) | (y1 << 16) | (1 << 31));
|
||||
radeon_ring_write(cp, (x2 << 0) | (y2 << 16));
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
|
||||
radeon_ring_write(ring, (PA_SC_WINDOW_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
|
||||
radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31));
|
||||
radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
|
||||
}
|
||||
|
||||
/* emits 10 */
|
||||
static void
|
||||
draw_auto(struct radeon_device *rdev)
|
||||
{
|
||||
struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
|
||||
radeon_ring_write(cp, PACKET3(PACKET3_SET_CONFIG_REG, 1));
|
||||
radeon_ring_write(cp, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_START) >> 2);
|
||||
radeon_ring_write(cp, DI_PT_RECTLIST);
|
||||
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
|
||||
radeon_ring_write(ring, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_START) >> 2);
|
||||
radeon_ring_write(ring, DI_PT_RECTLIST);
|
||||
|
||||
radeon_ring_write(cp, PACKET3(PACKET3_INDEX_TYPE, 0));
|
||||
radeon_ring_write(cp,
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_INDEX_TYPE, 0));
|
||||
radeon_ring_write(ring,
|
||||
#ifdef __BIG_ENDIAN
|
||||
(2 << 2) |
|
||||
#endif
|
||||
DI_INDEX_SIZE_16_BIT);
|
||||
|
||||
radeon_ring_write(cp, PACKET3(PACKET3_NUM_INSTANCES, 0));
|
||||
radeon_ring_write(cp, 1);
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_NUM_INSTANCES, 0));
|
||||
radeon_ring_write(ring, 1);
|
||||
|
||||
radeon_ring_write(cp, PACKET3(PACKET3_DRAW_INDEX_AUTO, 1));
|
||||
radeon_ring_write(cp, 3);
|
||||
radeon_ring_write(cp, DI_SRC_SEL_AUTO_INDEX);
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_DRAW_INDEX_AUTO, 1));
|
||||
radeon_ring_write(ring, 3);
|
||||
radeon_ring_write(ring, DI_SRC_SEL_AUTO_INDEX);
|
||||
|
||||
}
|
||||
|
||||
|
@ -286,7 +286,7 @@ draw_auto(struct radeon_device *rdev)
|
|||
static void
|
||||
set_default_state(struct radeon_device *rdev)
|
||||
{
|
||||
struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
|
||||
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
|
||||
u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2, sq_gpr_resource_mgmt_3;
|
||||
u32 sq_thread_resource_mgmt, sq_thread_resource_mgmt_2;
|
||||
u32 sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2, sq_stack_resource_mgmt_3;
|
||||
|
@ -300,8 +300,8 @@ set_default_state(struct radeon_device *rdev)
|
|||
int dwords;
|
||||
|
||||
/* set clear context state */
|
||||
radeon_ring_write(cp, PACKET3(PACKET3_CLEAR_STATE, 0));
|
||||
radeon_ring_write(cp, 0);
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
|
||||
radeon_ring_write(ring, 0);
|
||||
|
||||
if (rdev->family < CHIP_CAYMAN) {
|
||||
switch (rdev->family) {
|
||||
|
@ -558,60 +558,60 @@ set_default_state(struct radeon_device *rdev)
|
|||
NUM_LS_STACK_ENTRIES(num_ls_stack_entries));
|
||||
|
||||
/* disable dyn gprs */
|
||||
radeon_ring_write(cp, PACKET3(PACKET3_SET_CONFIG_REG, 1));
|
||||
radeon_ring_write(cp, (SQ_DYN_GPR_CNTL_PS_FLUSH_REQ - PACKET3_SET_CONFIG_REG_START) >> 2);
|
||||
radeon_ring_write(cp, 0);
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
|
||||
radeon_ring_write(ring, (SQ_DYN_GPR_CNTL_PS_FLUSH_REQ - PACKET3_SET_CONFIG_REG_START) >> 2);
|
||||
radeon_ring_write(ring, 0);
|
||||
|
||||
/* setup LDS */
|
||||
radeon_ring_write(cp, PACKET3(PACKET3_SET_CONFIG_REG, 1));
|
||||
radeon_ring_write(cp, (SQ_LDS_RESOURCE_MGMT - PACKET3_SET_CONFIG_REG_START) >> 2);
|
||||
radeon_ring_write(cp, 0x10001000);
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
|
||||
radeon_ring_write(ring, (SQ_LDS_RESOURCE_MGMT - PACKET3_SET_CONFIG_REG_START) >> 2);
|
||||
radeon_ring_write(ring, 0x10001000);
|
||||
|
||||
/* SQ config */
|
||||
radeon_ring_write(cp, PACKET3(PACKET3_SET_CONFIG_REG, 11));
|
||||
radeon_ring_write(cp, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_START) >> 2);
|
||||
radeon_ring_write(cp, sq_config);
|
||||
radeon_ring_write(cp, sq_gpr_resource_mgmt_1);
|
||||
radeon_ring_write(cp, sq_gpr_resource_mgmt_2);
|
||||
radeon_ring_write(cp, sq_gpr_resource_mgmt_3);
|
||||
radeon_ring_write(cp, 0);
|
||||
radeon_ring_write(cp, 0);
|
||||
radeon_ring_write(cp, sq_thread_resource_mgmt);
|
||||
radeon_ring_write(cp, sq_thread_resource_mgmt_2);
|
||||
radeon_ring_write(cp, sq_stack_resource_mgmt_1);
|
||||
radeon_ring_write(cp, sq_stack_resource_mgmt_2);
|
||||
radeon_ring_write(cp, sq_stack_resource_mgmt_3);
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 11));
|
||||
radeon_ring_write(ring, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_START) >> 2);
|
||||
radeon_ring_write(ring, sq_config);
|
||||
radeon_ring_write(ring, sq_gpr_resource_mgmt_1);
|
||||
radeon_ring_write(ring, sq_gpr_resource_mgmt_2);
|
||||
radeon_ring_write(ring, sq_gpr_resource_mgmt_3);
|
||||
radeon_ring_write(ring, 0);
|
||||
radeon_ring_write(ring, 0);
|
||||
radeon_ring_write(ring, sq_thread_resource_mgmt);
|
||||
radeon_ring_write(ring, sq_thread_resource_mgmt_2);
|
||||
radeon_ring_write(ring, sq_stack_resource_mgmt_1);
|
||||
radeon_ring_write(ring, sq_stack_resource_mgmt_2);
|
||||
radeon_ring_write(ring, sq_stack_resource_mgmt_3);
|
||||
}
|
||||
|
||||
/* CONTEXT_CONTROL */
|
||||
radeon_ring_write(cp, 0xc0012800);
|
||||
radeon_ring_write(cp, 0x80000000);
|
||||
radeon_ring_write(cp, 0x80000000);
|
||||
radeon_ring_write(ring, 0xc0012800);
|
||||
radeon_ring_write(ring, 0x80000000);
|
||||
radeon_ring_write(ring, 0x80000000);
|
||||
|
||||
/* SQ_VTX_BASE_VTX_LOC */
|
||||
radeon_ring_write(cp, 0xc0026f00);
|
||||
radeon_ring_write(cp, 0x00000000);
|
||||
radeon_ring_write(cp, 0x00000000);
|
||||
radeon_ring_write(cp, 0x00000000);
|
||||
radeon_ring_write(ring, 0xc0026f00);
|
||||
radeon_ring_write(ring, 0x00000000);
|
||||
radeon_ring_write(ring, 0x00000000);
|
||||
radeon_ring_write(ring, 0x00000000);
|
||||
|
||||
/* SET_SAMPLER */
|
||||
radeon_ring_write(cp, 0xc0036e00);
|
||||
radeon_ring_write(cp, 0x00000000);
|
||||
radeon_ring_write(cp, 0x00000012);
|
||||
radeon_ring_write(cp, 0x00000000);
|
||||
radeon_ring_write(cp, 0x00000000);
|
||||
radeon_ring_write(ring, 0xc0036e00);
|
||||
radeon_ring_write(ring, 0x00000000);
|
||||
radeon_ring_write(ring, 0x00000012);
|
||||
radeon_ring_write(ring, 0x00000000);
|
||||
radeon_ring_write(ring, 0x00000000);
|
||||
|
||||
/* set to DX10/11 mode */
|
||||
radeon_ring_write(cp, PACKET3(PACKET3_MODE_CONTROL, 0));
|
||||
radeon_ring_write(cp, 1);
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
|
||||
radeon_ring_write(ring, 1);
|
||||
|
||||
/* emit an IB pointing at default state */
|
||||
dwords = ALIGN(rdev->r600_blit.state_len, 0x10);
|
||||
gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset;
|
||||
radeon_ring_write(cp, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
|
||||
radeon_ring_write(cp, gpu_addr & 0xFFFFFFFC);
|
||||
radeon_ring_write(cp, upper_32_bits(gpu_addr) & 0xFF);
|
||||
radeon_ring_write(cp, dwords);
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
|
||||
radeon_ring_write(ring, gpu_addr & 0xFFFFFFFC);
|
||||
radeon_ring_write(ring, upper_32_bits(gpu_addr) & 0xFF);
|
||||
radeon_ring_write(ring, dwords);
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -1049,64 +1049,64 @@ static int cayman_cp_load_microcode(struct radeon_device *rdev)
|
|||
|
||||
static int cayman_cp_start(struct radeon_device *rdev)
|
||||
{
|
||||
struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
|
||||
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
|
||||
int r, i;
|
||||
|
||||
r = radeon_ring_lock(rdev, cp, 7);
|
||||
r = radeon_ring_lock(rdev, ring, 7);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
radeon_ring_write(cp, PACKET3(PACKET3_ME_INITIALIZE, 5));
|
||||
radeon_ring_write(cp, 0x1);
|
||||
radeon_ring_write(cp, 0x0);
|
||||
radeon_ring_write(cp, rdev->config.cayman.max_hw_contexts - 1);
|
||||
radeon_ring_write(cp, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
|
||||
radeon_ring_write(cp, 0);
|
||||
radeon_ring_write(cp, 0);
|
||||
radeon_ring_unlock_commit(rdev, cp);
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
|
||||
radeon_ring_write(ring, 0x1);
|
||||
radeon_ring_write(ring, 0x0);
|
||||
radeon_ring_write(ring, rdev->config.cayman.max_hw_contexts - 1);
|
||||
radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
|
||||
radeon_ring_write(ring, 0);
|
||||
radeon_ring_write(ring, 0);
|
||||
radeon_ring_unlock_commit(rdev, ring);
|
||||
|
||||
cayman_cp_enable(rdev, true);
|
||||
|
||||
r = radeon_ring_lock(rdev, cp, cayman_default_size + 19);
|
||||
r = radeon_ring_lock(rdev, ring, cayman_default_size + 19);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
/* setup clear context state */
|
||||
radeon_ring_write(cp, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
|
||||
radeon_ring_write(cp, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
|
||||
radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
|
||||
|
||||
for (i = 0; i < cayman_default_size; i++)
|
||||
radeon_ring_write(cp, cayman_default_state[i]);
|
||||
radeon_ring_write(ring, cayman_default_state[i]);
|
||||
|
||||
radeon_ring_write(cp, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
|
||||
radeon_ring_write(cp, PACKET3_PREAMBLE_END_CLEAR_STATE);
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
|
||||
radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
|
||||
|
||||
/* set clear context state */
|
||||
radeon_ring_write(cp, PACKET3(PACKET3_CLEAR_STATE, 0));
|
||||
radeon_ring_write(cp, 0);
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
|
||||
radeon_ring_write(ring, 0);
|
||||
|
||||
/* SQ_VTX_BASE_VTX_LOC */
|
||||
radeon_ring_write(cp, 0xc0026f00);
|
||||
radeon_ring_write(cp, 0x00000000);
|
||||
radeon_ring_write(cp, 0x00000000);
|
||||
radeon_ring_write(cp, 0x00000000);
|
||||
radeon_ring_write(ring, 0xc0026f00);
|
||||
radeon_ring_write(ring, 0x00000000);
|
||||
radeon_ring_write(ring, 0x00000000);
|
||||
radeon_ring_write(ring, 0x00000000);
|
||||
|
||||
/* Clear consts */
|
||||
radeon_ring_write(cp, 0xc0036f00);
|
||||
radeon_ring_write(cp, 0x00000bc4);
|
||||
radeon_ring_write(cp, 0xffffffff);
|
||||
radeon_ring_write(cp, 0xffffffff);
|
||||
radeon_ring_write(cp, 0xffffffff);
|
||||
radeon_ring_write(ring, 0xc0036f00);
|
||||
radeon_ring_write(ring, 0x00000bc4);
|
||||
radeon_ring_write(ring, 0xffffffff);
|
||||
radeon_ring_write(ring, 0xffffffff);
|
||||
radeon_ring_write(ring, 0xffffffff);
|
||||
|
||||
radeon_ring_write(cp, 0xc0026900);
|
||||
radeon_ring_write(cp, 0x00000316);
|
||||
radeon_ring_write(cp, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
|
||||
radeon_ring_write(cp, 0x00000010); /* */
|
||||
radeon_ring_write(ring, 0xc0026900);
|
||||
radeon_ring_write(ring, 0x00000316);
|
||||
radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
|
||||
radeon_ring_write(ring, 0x00000010); /* */
|
||||
|
||||
radeon_ring_unlock_commit(rdev, cp);
|
||||
radeon_ring_unlock_commit(rdev, ring);
|
||||
|
||||
/* XXX init other rings */
|
||||
|
||||
|
@ -1116,12 +1116,12 @@ static int cayman_cp_start(struct radeon_device *rdev)
|
|||
static void cayman_cp_fini(struct radeon_device *rdev)
|
||||
{
|
||||
cayman_cp_enable(rdev, false);
|
||||
radeon_ring_fini(rdev, &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]);
|
||||
radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
|
||||
}
|
||||
|
||||
int cayman_cp_resume(struct radeon_device *rdev)
|
||||
{
|
||||
struct radeon_cp *cp;
|
||||
struct radeon_ring *ring;
|
||||
u32 tmp;
|
||||
u32 rb_bufsz;
|
||||
int r;
|
||||
|
@ -1147,8 +1147,8 @@ int cayman_cp_resume(struct radeon_device *rdev)
|
|||
|
||||
/* ring 0 - compute and gfx */
|
||||
/* Set ring buffer size */
|
||||
cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
|
||||
rb_bufsz = drm_order(cp->ring_size / 8);
|
||||
ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
|
||||
rb_bufsz = drm_order(ring->ring_size / 8);
|
||||
tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
|
||||
#ifdef __BIG_ENDIAN
|
||||
tmp |= BUF_SWAP_32BIT;
|
||||
|
@ -1157,8 +1157,8 @@ int cayman_cp_resume(struct radeon_device *rdev)
|
|||
|
||||
/* Initialize the ring buffer's read and write pointers */
|
||||
WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA);
|
||||
cp->wptr = 0;
|
||||
WREG32(CP_RB0_WPTR, cp->wptr);
|
||||
ring->wptr = 0;
|
||||
WREG32(CP_RB0_WPTR, ring->wptr);
|
||||
|
||||
/* set the wb address wether it's enabled or not */
|
||||
WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC);
|
||||
|
@ -1175,14 +1175,14 @@ int cayman_cp_resume(struct radeon_device *rdev)
|
|||
mdelay(1);
|
||||
WREG32(CP_RB0_CNTL, tmp);
|
||||
|
||||
WREG32(CP_RB0_BASE, cp->gpu_addr >> 8);
|
||||
WREG32(CP_RB0_BASE, ring->gpu_addr >> 8);
|
||||
|
||||
cp->rptr = RREG32(CP_RB0_RPTR);
|
||||
ring->rptr = RREG32(CP_RB0_RPTR);
|
||||
|
||||
/* ring1 - compute only */
|
||||
/* Set ring buffer size */
|
||||
cp = &rdev->cp[CAYMAN_RING_TYPE_CP1_INDEX];
|
||||
rb_bufsz = drm_order(cp->ring_size / 8);
|
||||
ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
|
||||
rb_bufsz = drm_order(ring->ring_size / 8);
|
||||
tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
|
||||
#ifdef __BIG_ENDIAN
|
||||
tmp |= BUF_SWAP_32BIT;
|
||||
|
@ -1191,8 +1191,8 @@ int cayman_cp_resume(struct radeon_device *rdev)
|
|||
|
||||
/* Initialize the ring buffer's read and write pointers */
|
||||
WREG32(CP_RB1_CNTL, tmp | RB_RPTR_WR_ENA);
|
||||
cp->wptr = 0;
|
||||
WREG32(CP_RB1_WPTR, cp->wptr);
|
||||
ring->wptr = 0;
|
||||
WREG32(CP_RB1_WPTR, ring->wptr);
|
||||
|
||||
/* set the wb address wether it's enabled or not */
|
||||
WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC);
|
||||
|
@ -1201,14 +1201,14 @@ int cayman_cp_resume(struct radeon_device *rdev)
|
|||
mdelay(1);
|
||||
WREG32(CP_RB1_CNTL, tmp);
|
||||
|
||||
WREG32(CP_RB1_BASE, cp->gpu_addr >> 8);
|
||||
WREG32(CP_RB1_BASE, ring->gpu_addr >> 8);
|
||||
|
||||
cp->rptr = RREG32(CP_RB1_RPTR);
|
||||
ring->rptr = RREG32(CP_RB1_RPTR);
|
||||
|
||||
/* ring2 - compute only */
|
||||
/* Set ring buffer size */
|
||||
cp = &rdev->cp[CAYMAN_RING_TYPE_CP2_INDEX];
|
||||
rb_bufsz = drm_order(cp->ring_size / 8);
|
||||
ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
|
||||
rb_bufsz = drm_order(ring->ring_size / 8);
|
||||
tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
|
||||
#ifdef __BIG_ENDIAN
|
||||
tmp |= BUF_SWAP_32BIT;
|
||||
|
@ -1217,8 +1217,8 @@ int cayman_cp_resume(struct radeon_device *rdev)
|
|||
|
||||
/* Initialize the ring buffer's read and write pointers */
|
||||
WREG32(CP_RB2_CNTL, tmp | RB_RPTR_WR_ENA);
|
||||
cp->wptr = 0;
|
||||
WREG32(CP_RB2_WPTR, cp->wptr);
|
||||
ring->wptr = 0;
|
||||
WREG32(CP_RB2_WPTR, ring->wptr);
|
||||
|
||||
/* set the wb address wether it's enabled or not */
|
||||
WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC);
|
||||
|
@ -1227,28 +1227,28 @@ int cayman_cp_resume(struct radeon_device *rdev)
|
|||
mdelay(1);
|
||||
WREG32(CP_RB2_CNTL, tmp);
|
||||
|
||||
WREG32(CP_RB2_BASE, cp->gpu_addr >> 8);
|
||||
WREG32(CP_RB2_BASE, ring->gpu_addr >> 8);
|
||||
|
||||
cp->rptr = RREG32(CP_RB2_RPTR);
|
||||
ring->rptr = RREG32(CP_RB2_RPTR);
|
||||
|
||||
/* start the rings */
|
||||
cayman_cp_start(rdev);
|
||||
rdev->cp[RADEON_RING_TYPE_GFX_INDEX].ready = true;
|
||||
rdev->cp[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
|
||||
rdev->cp[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
|
||||
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true;
|
||||
rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
|
||||
rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
|
||||
/* this only test cp0 */
|
||||
r = radeon_ring_test(rdev, &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]);
|
||||
r = radeon_ring_test(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
|
||||
if (r) {
|
||||
rdev->cp[RADEON_RING_TYPE_GFX_INDEX].ready = false;
|
||||
rdev->cp[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
|
||||
rdev->cp[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
|
||||
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
|
||||
rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
|
||||
rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
|
||||
return r;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool cayman_gpu_is_lockup(struct radeon_device *rdev, struct radeon_cp *cp)
|
||||
bool cayman_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
|
||||
{
|
||||
u32 srbm_status;
|
||||
u32 grbm_status;
|
||||
|
@ -1261,20 +1261,20 @@ bool cayman_gpu_is_lockup(struct radeon_device *rdev, struct radeon_cp *cp)
|
|||
grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
|
||||
grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
|
||||
if (!(grbm_status & GUI_ACTIVE)) {
|
||||
r100_gpu_lockup_update(lockup, cp);
|
||||
r100_gpu_lockup_update(lockup, ring);
|
||||
return false;
|
||||
}
|
||||
/* force CP activities */
|
||||
r = radeon_ring_lock(rdev, cp, 2);
|
||||
r = radeon_ring_lock(rdev, ring, 2);
|
||||
if (!r) {
|
||||
/* PACKET2 NOP */
|
||||
radeon_ring_write(cp, 0x80000000);
|
||||
radeon_ring_write(cp, 0x80000000);
|
||||
radeon_ring_unlock_commit(rdev, cp);
|
||||
radeon_ring_write(ring, 0x80000000);
|
||||
radeon_ring_write(ring, 0x80000000);
|
||||
radeon_ring_unlock_commit(rdev, ring);
|
||||
}
|
||||
/* XXX deal with CP0,1,2 */
|
||||
cp->rptr = RREG32(cp->rptr_reg);
|
||||
return r100_gpu_cp_is_lockup(rdev, lockup, cp);
|
||||
ring->rptr = RREG32(ring->rptr_reg);
|
||||
return r100_gpu_cp_is_lockup(rdev, lockup, ring);
|
||||
}
|
||||
|
||||
static int cayman_gpu_soft_reset(struct radeon_device *rdev)
|
||||
|
@ -1343,7 +1343,7 @@ int cayman_asic_reset(struct radeon_device *rdev)
|
|||
|
||||
static int cayman_startup(struct radeon_device *rdev)
|
||||
{
|
||||
struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
|
||||
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
|
||||
int r;
|
||||
|
||||
/* enable pcie gen2 link */
|
||||
|
@ -1393,7 +1393,7 @@ static int cayman_startup(struct radeon_device *rdev)
|
|||
}
|
||||
evergreen_irq_set(rdev);
|
||||
|
||||
r = radeon_ring_init(rdev, cp, cp->ring_size, RADEON_WB_CP_RPTR_OFFSET,
|
||||
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
|
||||
CP_RB0_RPTR, CP_RB0_WPTR);
|
||||
if (r)
|
||||
return r;
|
||||
|
@ -1438,7 +1438,7 @@ int cayman_suspend(struct radeon_device *rdev)
|
|||
{
|
||||
/* FIXME: we should wait for ring to be empty */
|
||||
cayman_cp_enable(rdev, false);
|
||||
rdev->cp[RADEON_RING_TYPE_GFX_INDEX].ready = false;
|
||||
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
|
||||
evergreen_irq_suspend(rdev);
|
||||
radeon_wb_disable(rdev);
|
||||
cayman_pcie_gart_disable(rdev);
|
||||
|
@ -1455,7 +1455,7 @@ int cayman_suspend(struct radeon_device *rdev)
|
|||
*/
|
||||
int cayman_init(struct radeon_device *rdev)
|
||||
{
|
||||
struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
|
||||
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
|
||||
int r;
|
||||
|
||||
/* This don't do much */
|
||||
|
@ -1508,8 +1508,8 @@ int cayman_init(struct radeon_device *rdev)
|
|||
if (r)
|
||||
return r;
|
||||
|
||||
cp->ring_obj = NULL;
|
||||
r600_ring_init(rdev, cp, 1024 * 1024);
|
||||
ring->ring_obj = NULL;
|
||||
r600_ring_init(rdev, ring, 1024 * 1024);
|
||||
|
||||
rdev->ih.ring_obj = NULL;
|
||||
r600_ih_ring_init(rdev, 64 * 1024);
|
||||
|
|
|
@ -811,31 +811,31 @@ u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc)
|
|||
void r100_fence_ring_emit(struct radeon_device *rdev,
|
||||
struct radeon_fence *fence)
|
||||
{
|
||||
struct radeon_cp *cp = &rdev->cp[fence->ring];
|
||||
struct radeon_ring *ring = &rdev->ring[fence->ring];
|
||||
|
||||
/* We have to make sure that caches are flushed before
|
||||
* CPU might read something from VRAM. */
|
||||
radeon_ring_write(cp, PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0));
|
||||
radeon_ring_write(cp, RADEON_RB3D_DC_FLUSH_ALL);
|
||||
radeon_ring_write(cp, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0));
|
||||
radeon_ring_write(cp, RADEON_RB3D_ZC_FLUSH_ALL);
|
||||
radeon_ring_write(ring, PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0));
|
||||
radeon_ring_write(ring, RADEON_RB3D_DC_FLUSH_ALL);
|
||||
radeon_ring_write(ring, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0));
|
||||
radeon_ring_write(ring, RADEON_RB3D_ZC_FLUSH_ALL);
|
||||
/* Wait until IDLE & CLEAN */
|
||||
radeon_ring_write(cp, PACKET0(RADEON_WAIT_UNTIL, 0));
|
||||
radeon_ring_write(cp, RADEON_WAIT_2D_IDLECLEAN | RADEON_WAIT_3D_IDLECLEAN);
|
||||
radeon_ring_write(cp, PACKET0(RADEON_HOST_PATH_CNTL, 0));
|
||||
radeon_ring_write(cp, rdev->config.r100.hdp_cntl |
|
||||
radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
|
||||
radeon_ring_write(ring, RADEON_WAIT_2D_IDLECLEAN | RADEON_WAIT_3D_IDLECLEAN);
|
||||
radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
|
||||
radeon_ring_write(ring, rdev->config.r100.hdp_cntl |
|
||||
RADEON_HDP_READ_BUFFER_INVALIDATE);
|
||||
radeon_ring_write(cp, PACKET0(RADEON_HOST_PATH_CNTL, 0));
|
||||
radeon_ring_write(cp, rdev->config.r100.hdp_cntl);
|
||||
radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
|
||||
radeon_ring_write(ring, rdev->config.r100.hdp_cntl);
|
||||
/* Emit fence sequence & fire IRQ */
|
||||
radeon_ring_write(cp, PACKET0(rdev->fence_drv[fence->ring].scratch_reg, 0));
|
||||
radeon_ring_write(cp, fence->seq);
|
||||
radeon_ring_write(cp, PACKET0(RADEON_GEN_INT_STATUS, 0));
|
||||
radeon_ring_write(cp, RADEON_SW_INT_FIRE);
|
||||
radeon_ring_write(ring, PACKET0(rdev->fence_drv[fence->ring].scratch_reg, 0));
|
||||
radeon_ring_write(ring, fence->seq);
|
||||
radeon_ring_write(ring, PACKET0(RADEON_GEN_INT_STATUS, 0));
|
||||
radeon_ring_write(ring, RADEON_SW_INT_FIRE);
|
||||
}
|
||||
|
||||
void r100_semaphore_ring_emit(struct radeon_device *rdev,
|
||||
struct radeon_cp *cp,
|
||||
struct radeon_ring *ring,
|
||||
struct radeon_semaphore *semaphore,
|
||||
bool emit_wait)
|
||||
{
|
||||
|
@ -849,7 +849,7 @@ int r100_copy_blit(struct radeon_device *rdev,
|
|||
unsigned num_gpu_pages,
|
||||
struct radeon_fence *fence)
|
||||
{
|
||||
struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
|
||||
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
|
||||
uint32_t cur_pages;
|
||||
uint32_t stride_bytes = RADEON_GPU_PAGE_SIZE;
|
||||
uint32_t pitch;
|
||||
|
@ -867,7 +867,7 @@ int r100_copy_blit(struct radeon_device *rdev,
|
|||
|
||||
/* Ask for enough room for blit + flush + fence */
|
||||
ndw = 64 + (10 * num_loops);
|
||||
r = radeon_ring_lock(rdev, cp, ndw);
|
||||
r = radeon_ring_lock(rdev, ring, ndw);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw);
|
||||
return -EINVAL;
|
||||
|
@ -881,8 +881,8 @@ int r100_copy_blit(struct radeon_device *rdev,
|
|||
|
||||
/* pages are in Y direction - height
|
||||
page width in X direction - width */
|
||||
radeon_ring_write(cp, PACKET3(PACKET3_BITBLT_MULTI, 8));
|
||||
radeon_ring_write(cp,
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_BITBLT_MULTI, 8));
|
||||
radeon_ring_write(ring,
|
||||
RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
|
||||
RADEON_GMC_DST_PITCH_OFFSET_CNTL |
|
||||
RADEON_GMC_SRC_CLIPPING |
|
||||
|
@ -894,26 +894,26 @@ int r100_copy_blit(struct radeon_device *rdev,
|
|||
RADEON_DP_SRC_SOURCE_MEMORY |
|
||||
RADEON_GMC_CLR_CMP_CNTL_DIS |
|
||||
RADEON_GMC_WR_MSK_DIS);
|
||||
radeon_ring_write(cp, (pitch << 22) | (src_offset >> 10));
|
||||
radeon_ring_write(cp, (pitch << 22) | (dst_offset >> 10));
|
||||
radeon_ring_write(cp, (0x1fff) | (0x1fff << 16));
|
||||
radeon_ring_write(cp, 0);
|
||||
radeon_ring_write(cp, (0x1fff) | (0x1fff << 16));
|
||||
radeon_ring_write(cp, num_gpu_pages);
|
||||
radeon_ring_write(cp, num_gpu_pages);
|
||||
radeon_ring_write(cp, cur_pages | (stride_pixels << 16));
|
||||
radeon_ring_write(ring, (pitch << 22) | (src_offset >> 10));
|
||||
radeon_ring_write(ring, (pitch << 22) | (dst_offset >> 10));
|
||||
radeon_ring_write(ring, (0x1fff) | (0x1fff << 16));
|
||||
radeon_ring_write(ring, 0);
|
||||
radeon_ring_write(ring, (0x1fff) | (0x1fff << 16));
|
||||
radeon_ring_write(ring, num_gpu_pages);
|
||||
radeon_ring_write(ring, num_gpu_pages);
|
||||
radeon_ring_write(ring, cur_pages | (stride_pixels << 16));
|
||||
}
|
||||
radeon_ring_write(cp, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0));
|
||||
radeon_ring_write(cp, RADEON_RB2D_DC_FLUSH_ALL);
|
||||
radeon_ring_write(cp, PACKET0(RADEON_WAIT_UNTIL, 0));
|
||||
radeon_ring_write(cp,
|
||||
radeon_ring_write(ring, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0));
|
||||
radeon_ring_write(ring, RADEON_RB2D_DC_FLUSH_ALL);
|
||||
radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
|
||||
radeon_ring_write(ring,
|
||||
RADEON_WAIT_2D_IDLECLEAN |
|
||||
RADEON_WAIT_HOST_IDLECLEAN |
|
||||
RADEON_WAIT_DMA_GUI_IDLE);
|
||||
if (fence) {
|
||||
r = radeon_fence_emit(rdev, fence);
|
||||
}
|
||||
radeon_ring_unlock_commit(rdev, cp);
|
||||
radeon_ring_unlock_commit(rdev, ring);
|
||||
return r;
|
||||
}
|
||||
|
||||
|
@ -934,20 +934,20 @@ static int r100_cp_wait_for_idle(struct radeon_device *rdev)
|
|||
|
||||
void r100_ring_start(struct radeon_device *rdev)
|
||||
{
|
||||
struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
|
||||
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
|
||||
int r;
|
||||
|
||||
r = radeon_ring_lock(rdev, cp, 2);
|
||||
r = radeon_ring_lock(rdev, ring, 2);
|
||||
if (r) {
|
||||
return;
|
||||
}
|
||||
radeon_ring_write(cp, PACKET0(RADEON_ISYNC_CNTL, 0));
|
||||
radeon_ring_write(cp,
|
||||
radeon_ring_write(ring, PACKET0(RADEON_ISYNC_CNTL, 0));
|
||||
radeon_ring_write(ring,
|
||||
RADEON_ISYNC_ANY2D_IDLE3D |
|
||||
RADEON_ISYNC_ANY3D_IDLE2D |
|
||||
RADEON_ISYNC_WAIT_IDLEGUI |
|
||||
RADEON_ISYNC_CPSCRATCH_IDLEGUI);
|
||||
radeon_ring_unlock_commit(rdev, cp);
|
||||
radeon_ring_unlock_commit(rdev, ring);
|
||||
}
|
||||
|
||||
|
||||
|
@ -1048,7 +1048,7 @@ static void r100_cp_load_microcode(struct radeon_device *rdev)
|
|||
|
||||
int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
|
||||
{
|
||||
struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
|
||||
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
|
||||
unsigned rb_bufsz;
|
||||
unsigned rb_blksz;
|
||||
unsigned max_fetch;
|
||||
|
@ -1074,7 +1074,7 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
|
|||
rb_bufsz = drm_order(ring_size / 8);
|
||||
ring_size = (1 << (rb_bufsz + 1)) * 4;
|
||||
r100_cp_load_microcode(rdev);
|
||||
r = radeon_ring_init(rdev, cp, ring_size, RADEON_WB_CP_RPTR_OFFSET,
|
||||
r = radeon_ring_init(rdev, ring, ring_size, RADEON_WB_CP_RPTR_OFFSET,
|
||||
RADEON_CP_RB_RPTR, RADEON_CP_RB_WPTR);
|
||||
if (r) {
|
||||
return r;
|
||||
|
@ -1084,7 +1084,7 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
|
|||
rb_blksz = 9;
|
||||
/* cp will read 128bytes at a time (4 dwords) */
|
||||
max_fetch = 1;
|
||||
cp->align_mask = 16 - 1;
|
||||
ring->align_mask = 16 - 1;
|
||||
/* Write to CP_RB_WPTR will be delayed for pre_write_timer clocks */
|
||||
pre_write_timer = 64;
|
||||
/* Force CP_RB_WPTR write if written more than one time before the
|
||||
|
@ -1114,13 +1114,13 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
|
|||
WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_NO_UPDATE);
|
||||
|
||||
/* Set ring address */
|
||||
DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)cp->gpu_addr);
|
||||
WREG32(RADEON_CP_RB_BASE, cp->gpu_addr);
|
||||
DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)ring->gpu_addr);
|
||||
WREG32(RADEON_CP_RB_BASE, ring->gpu_addr);
|
||||
/* Force read & write ptr to 0 */
|
||||
WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA | RADEON_RB_NO_UPDATE);
|
||||
WREG32(RADEON_CP_RB_RPTR_WR, 0);
|
||||
cp->wptr = 0;
|
||||
WREG32(RADEON_CP_RB_WPTR, cp->wptr);
|
||||
ring->wptr = 0;
|
||||
WREG32(RADEON_CP_RB_WPTR, ring->wptr);
|
||||
|
||||
/* set the wb address whether it's enabled or not */
|
||||
WREG32(R_00070C_CP_RB_RPTR_ADDR,
|
||||
|
@ -1136,7 +1136,7 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
|
|||
|
||||
WREG32(RADEON_CP_RB_CNTL, tmp);
|
||||
udelay(10);
|
||||
cp->rptr = RREG32(RADEON_CP_RB_RPTR);
|
||||
ring->rptr = RREG32(RADEON_CP_RB_RPTR);
|
||||
/* Set cp mode to bus mastering & enable cp*/
|
||||
WREG32(RADEON_CP_CSQ_MODE,
|
||||
REG_SET(RADEON_INDIRECT2_START, indirect2_start) |
|
||||
|
@ -1145,12 +1145,12 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
|
|||
WREG32(RADEON_CP_CSQ_MODE, 0x00004D4D);
|
||||
WREG32(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIBM_INDBM);
|
||||
radeon_ring_start(rdev);
|
||||
r = radeon_ring_test(rdev, cp);
|
||||
r = radeon_ring_test(rdev, ring);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: cp isn't working (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
cp->ready = true;
|
||||
ring->ready = true;
|
||||
radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
|
||||
return 0;
|
||||
}
|
||||
|
@ -1162,7 +1162,7 @@ void r100_cp_fini(struct radeon_device *rdev)
|
|||
}
|
||||
/* Disable ring */
|
||||
r100_cp_disable(rdev);
|
||||
radeon_ring_fini(rdev, &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]);
|
||||
radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
|
||||
DRM_INFO("radeon: cp finalized\n");
|
||||
}
|
||||
|
||||
|
@ -1170,7 +1170,7 @@ void r100_cp_disable(struct radeon_device *rdev)
|
|||
{
|
||||
/* Disable ring */
|
||||
radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
|
||||
rdev->cp[RADEON_RING_TYPE_GFX_INDEX].ready = false;
|
||||
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
|
||||
WREG32(RADEON_CP_CSQ_MODE, 0);
|
||||
WREG32(RADEON_CP_CSQ_CNTL, 0);
|
||||
WREG32(R_000770_SCRATCH_UMSK, 0);
|
||||
|
@ -2107,9 +2107,9 @@ int r100_mc_wait_for_idle(struct radeon_device *rdev)
|
|||
return -1;
|
||||
}
|
||||
|
||||
void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, struct radeon_cp *cp)
|
||||
void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, struct radeon_ring *ring)
|
||||
{
|
||||
lockup->last_cp_rptr = cp->rptr;
|
||||
lockup->last_cp_rptr = ring->rptr;
|
||||
lockup->last_jiffies = jiffies;
|
||||
}
|
||||
|
||||
|
@ -2134,20 +2134,20 @@ void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, struct radeon_cp *cp
|
|||
* false positive when CP is just gived nothing to do.
|
||||
*
|
||||
**/
|
||||
bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *lockup, struct radeon_cp *cp)
|
||||
bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *lockup, struct radeon_ring *ring)
|
||||
{
|
||||
unsigned long cjiffies, elapsed;
|
||||
|
||||
cjiffies = jiffies;
|
||||
if (!time_after(cjiffies, lockup->last_jiffies)) {
|
||||
/* likely a wrap around */
|
||||
lockup->last_cp_rptr = cp->rptr;
|
||||
lockup->last_cp_rptr = ring->rptr;
|
||||
lockup->last_jiffies = jiffies;
|
||||
return false;
|
||||
}
|
||||
if (cp->rptr != lockup->last_cp_rptr) {
|
||||
if (ring->rptr != lockup->last_cp_rptr) {
|
||||
/* CP is still working no lockup */
|
||||
lockup->last_cp_rptr = cp->rptr;
|
||||
lockup->last_cp_rptr = ring->rptr;
|
||||
lockup->last_jiffies = jiffies;
|
||||
return false;
|
||||
}
|
||||
|
@ -2160,26 +2160,26 @@ bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *l
|
|||
return false;
|
||||
}
|
||||
|
||||
bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_cp *cp)
|
||||
bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
|
||||
{
|
||||
u32 rbbm_status;
|
||||
int r;
|
||||
|
||||
rbbm_status = RREG32(R_000E40_RBBM_STATUS);
|
||||
if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
|
||||
r100_gpu_lockup_update(&rdev->config.r100.lockup, cp);
|
||||
r100_gpu_lockup_update(&rdev->config.r100.lockup, ring);
|
||||
return false;
|
||||
}
|
||||
/* force CP activities */
|
||||
r = radeon_ring_lock(rdev, cp, 2);
|
||||
r = radeon_ring_lock(rdev, ring, 2);
|
||||
if (!r) {
|
||||
/* PACKET2 NOP */
|
||||
radeon_ring_write(cp, 0x80000000);
|
||||
radeon_ring_write(cp, 0x80000000);
|
||||
radeon_ring_unlock_commit(rdev, cp);
|
||||
radeon_ring_write(ring, 0x80000000);
|
||||
radeon_ring_write(ring, 0x80000000);
|
||||
radeon_ring_unlock_commit(rdev, ring);
|
||||
}
|
||||
cp->rptr = RREG32(cp->rptr_reg);
|
||||
return r100_gpu_cp_is_lockup(rdev, &rdev->config.r100.lockup, cp);
|
||||
ring->rptr = RREG32(ring->rptr_reg);
|
||||
return r100_gpu_cp_is_lockup(rdev, &rdev->config.r100.lockup, ring);
|
||||
}
|
||||
|
||||
void r100_bm_disable(struct radeon_device *rdev)
|
||||
|
@ -2587,22 +2587,22 @@ static int r100_debugfs_cp_ring_info(struct seq_file *m, void *data)
|
|||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
|
||||
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
|
||||
uint32_t rdp, wdp;
|
||||
unsigned count, i, j;
|
||||
|
||||
radeon_ring_free_size(rdev, cp);
|
||||
radeon_ring_free_size(rdev, ring);
|
||||
rdp = RREG32(RADEON_CP_RB_RPTR);
|
||||
wdp = RREG32(RADEON_CP_RB_WPTR);
|
||||
count = (rdp + cp->ring_size - wdp) & cp->ptr_mask;
|
||||
count = (rdp + ring->ring_size - wdp) & ring->ptr_mask;
|
||||
seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
|
||||
seq_printf(m, "CP_RB_WPTR 0x%08x\n", wdp);
|
||||
seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp);
|
||||
seq_printf(m, "%u free dwords in ring\n", cp->ring_free_dw);
|
||||
seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
|
||||
seq_printf(m, "%u dwords in ring\n", count);
|
||||
for (j = 0; j <= count; j++) {
|
||||
i = (rdp + j) & cp->ptr_mask;
|
||||
seq_printf(m, "r[%04d]=0x%08x\n", i, cp->ring[i]);
|
||||
i = (rdp + j) & ring->ptr_mask;
|
||||
seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -3644,7 +3644,7 @@ void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track
|
|||
}
|
||||
}
|
||||
|
||||
int r100_ring_test(struct radeon_device *rdev, struct radeon_cp *cp)
|
||||
int r100_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
|
||||
{
|
||||
uint32_t scratch;
|
||||
uint32_t tmp = 0;
|
||||
|
@ -3657,15 +3657,15 @@ int r100_ring_test(struct radeon_device *rdev, struct radeon_cp *cp)
|
|||
return r;
|
||||
}
|
||||
WREG32(scratch, 0xCAFEDEAD);
|
||||
r = radeon_ring_lock(rdev, cp, 2);
|
||||
r = radeon_ring_lock(rdev, ring, 2);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
|
||||
radeon_scratch_free(rdev, scratch);
|
||||
return r;
|
||||
}
|
||||
radeon_ring_write(cp, PACKET0(scratch, 0));
|
||||
radeon_ring_write(cp, 0xDEADBEEF);
|
||||
radeon_ring_unlock_commit(rdev, cp);
|
||||
radeon_ring_write(ring, PACKET0(scratch, 0));
|
||||
radeon_ring_write(ring, 0xDEADBEEF);
|
||||
radeon_ring_unlock_commit(rdev, ring);
|
||||
for (i = 0; i < rdev->usec_timeout; i++) {
|
||||
tmp = RREG32(scratch);
|
||||
if (tmp == 0xDEADBEEF) {
|
||||
|
@ -3686,11 +3686,11 @@ int r100_ring_test(struct radeon_device *rdev, struct radeon_cp *cp)
|
|||
|
||||
void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
|
||||
{
|
||||
struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
|
||||
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
|
||||
|
||||
radeon_ring_write(cp, PACKET0(RADEON_CP_IB_BASE, 1));
|
||||
radeon_ring_write(cp, ib->gpu_addr);
|
||||
radeon_ring_write(cp, ib->length_dw);
|
||||
radeon_ring_write(ring, PACKET0(RADEON_CP_IB_BASE, 1));
|
||||
radeon_ring_write(ring, ib->gpu_addr);
|
||||
radeon_ring_write(ring, ib->length_dw);
|
||||
}
|
||||
|
||||
int r100_ib_test(struct radeon_device *rdev)
|
||||
|
@ -3778,7 +3778,7 @@ void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save)
|
|||
/* Shutdown CP we shouldn't need to do that but better be safe than
|
||||
* sorry
|
||||
*/
|
||||
rdev->cp[RADEON_RING_TYPE_GFX_INDEX].ready = false;
|
||||
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
|
||||
WREG32(R_000740_CP_CSQ_CNTL, 0);
|
||||
|
||||
/* Save few CRTC registers */
|
||||
|
|
|
@ -87,7 +87,7 @@ int r200_copy_dma(struct radeon_device *rdev,
|
|||
unsigned num_gpu_pages,
|
||||
struct radeon_fence *fence)
|
||||
{
|
||||
struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
|
||||
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
|
||||
uint32_t size;
|
||||
uint32_t cur_size;
|
||||
int i, num_loops;
|
||||
|
@ -96,33 +96,33 @@ int r200_copy_dma(struct radeon_device *rdev,
|
|||
/* radeon pitch is /64 */
|
||||
size = num_gpu_pages << RADEON_GPU_PAGE_SHIFT;
|
||||
num_loops = DIV_ROUND_UP(size, 0x1FFFFF);
|
||||
r = radeon_ring_lock(rdev, cp, num_loops * 4 + 64);
|
||||
r = radeon_ring_lock(rdev, ring, num_loops * 4 + 64);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: moving bo (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
/* Must wait for 2D idle & clean before DMA or hangs might happen */
|
||||
radeon_ring_write(cp, PACKET0(RADEON_WAIT_UNTIL, 0));
|
||||
radeon_ring_write(cp, (1 << 16));
|
||||
radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
|
||||
radeon_ring_write(ring, (1 << 16));
|
||||
for (i = 0; i < num_loops; i++) {
|
||||
cur_size = size;
|
||||
if (cur_size > 0x1FFFFF) {
|
||||
cur_size = 0x1FFFFF;
|
||||
}
|
||||
size -= cur_size;
|
||||
radeon_ring_write(cp, PACKET0(0x720, 2));
|
||||
radeon_ring_write(cp, src_offset);
|
||||
radeon_ring_write(cp, dst_offset);
|
||||
radeon_ring_write(cp, cur_size | (1 << 31) | (1 << 30));
|
||||
radeon_ring_write(ring, PACKET0(0x720, 2));
|
||||
radeon_ring_write(ring, src_offset);
|
||||
radeon_ring_write(ring, dst_offset);
|
||||
radeon_ring_write(ring, cur_size | (1 << 31) | (1 << 30));
|
||||
src_offset += cur_size;
|
||||
dst_offset += cur_size;
|
||||
}
|
||||
radeon_ring_write(cp, PACKET0(RADEON_WAIT_UNTIL, 0));
|
||||
radeon_ring_write(cp, RADEON_WAIT_DMA_GUI_IDLE);
|
||||
radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
|
||||
radeon_ring_write(ring, RADEON_WAIT_DMA_GUI_IDLE);
|
||||
if (fence) {
|
||||
r = radeon_fence_emit(rdev, fence);
|
||||
}
|
||||
radeon_ring_unlock_commit(rdev, cp);
|
||||
radeon_ring_unlock_commit(rdev, ring);
|
||||
return r;
|
||||
}
|
||||
|
||||
|
|
|
@ -175,40 +175,40 @@ void rv370_pcie_gart_fini(struct radeon_device *rdev)
|
|||
void r300_fence_ring_emit(struct radeon_device *rdev,
|
||||
struct radeon_fence *fence)
|
||||
{
|
||||
struct radeon_cp *cp = &rdev->cp[fence->ring];
|
||||
struct radeon_ring *ring = &rdev->ring[fence->ring];
|
||||
|
||||
/* Who ever call radeon_fence_emit should call ring_lock and ask
|
||||
* for enough space (today caller are ib schedule and buffer move) */
|
||||
/* Write SC register so SC & US assert idle */
|
||||
radeon_ring_write(cp, PACKET0(R300_RE_SCISSORS_TL, 0));
|
||||
radeon_ring_write(cp, 0);
|
||||
radeon_ring_write(cp, PACKET0(R300_RE_SCISSORS_BR, 0));
|
||||
radeon_ring_write(cp, 0);
|
||||
radeon_ring_write(ring, PACKET0(R300_RE_SCISSORS_TL, 0));
|
||||
radeon_ring_write(ring, 0);
|
||||
radeon_ring_write(ring, PACKET0(R300_RE_SCISSORS_BR, 0));
|
||||
radeon_ring_write(ring, 0);
|
||||
/* Flush 3D cache */
|
||||
radeon_ring_write(cp, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
|
||||
radeon_ring_write(cp, R300_RB3D_DC_FLUSH);
|
||||
radeon_ring_write(cp, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
|
||||
radeon_ring_write(cp, R300_ZC_FLUSH);
|
||||
radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
|
||||
radeon_ring_write(ring, R300_RB3D_DC_FLUSH);
|
||||
radeon_ring_write(ring, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
|
||||
radeon_ring_write(ring, R300_ZC_FLUSH);
|
||||
/* Wait until IDLE & CLEAN */
|
||||
radeon_ring_write(cp, PACKET0(RADEON_WAIT_UNTIL, 0));
|
||||
radeon_ring_write(cp, (RADEON_WAIT_3D_IDLECLEAN |
|
||||
radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
|
||||
radeon_ring_write(ring, (RADEON_WAIT_3D_IDLECLEAN |
|
||||
RADEON_WAIT_2D_IDLECLEAN |
|
||||
RADEON_WAIT_DMA_GUI_IDLE));
|
||||
radeon_ring_write(cp, PACKET0(RADEON_HOST_PATH_CNTL, 0));
|
||||
radeon_ring_write(cp, rdev->config.r300.hdp_cntl |
|
||||
radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
|
||||
radeon_ring_write(ring, rdev->config.r300.hdp_cntl |
|
||||
RADEON_HDP_READ_BUFFER_INVALIDATE);
|
||||
radeon_ring_write(cp, PACKET0(RADEON_HOST_PATH_CNTL, 0));
|
||||
radeon_ring_write(cp, rdev->config.r300.hdp_cntl);
|
||||
radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
|
||||
radeon_ring_write(ring, rdev->config.r300.hdp_cntl);
|
||||
/* Emit fence sequence & fire IRQ */
|
||||
radeon_ring_write(cp, PACKET0(rdev->fence_drv[fence->ring].scratch_reg, 0));
|
||||
radeon_ring_write(cp, fence->seq);
|
||||
radeon_ring_write(cp, PACKET0(RADEON_GEN_INT_STATUS, 0));
|
||||
radeon_ring_write(cp, RADEON_SW_INT_FIRE);
|
||||
radeon_ring_write(ring, PACKET0(rdev->fence_drv[fence->ring].scratch_reg, 0));
|
||||
radeon_ring_write(ring, fence->seq);
|
||||
radeon_ring_write(ring, PACKET0(RADEON_GEN_INT_STATUS, 0));
|
||||
radeon_ring_write(ring, RADEON_SW_INT_FIRE);
|
||||
}
|
||||
|
||||
void r300_ring_start(struct radeon_device *rdev)
|
||||
{
|
||||
struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
|
||||
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
|
||||
unsigned gb_tile_config;
|
||||
int r;
|
||||
|
||||
|
@ -230,44 +230,44 @@ void r300_ring_start(struct radeon_device *rdev)
|
|||
break;
|
||||
}
|
||||
|
||||
r = radeon_ring_lock(rdev, cp, 64);
|
||||
r = radeon_ring_lock(rdev, ring, 64);
|
||||
if (r) {
|
||||
return;
|
||||
}
|
||||
radeon_ring_write(cp, PACKET0(RADEON_ISYNC_CNTL, 0));
|
||||
radeon_ring_write(cp,
|
||||
radeon_ring_write(ring, PACKET0(RADEON_ISYNC_CNTL, 0));
|
||||
radeon_ring_write(ring,
|
||||
RADEON_ISYNC_ANY2D_IDLE3D |
|
||||
RADEON_ISYNC_ANY3D_IDLE2D |
|
||||
RADEON_ISYNC_WAIT_IDLEGUI |
|
||||
RADEON_ISYNC_CPSCRATCH_IDLEGUI);
|
||||
radeon_ring_write(cp, PACKET0(R300_GB_TILE_CONFIG, 0));
|
||||
radeon_ring_write(cp, gb_tile_config);
|
||||
radeon_ring_write(cp, PACKET0(RADEON_WAIT_UNTIL, 0));
|
||||
radeon_ring_write(cp,
|
||||
radeon_ring_write(ring, PACKET0(R300_GB_TILE_CONFIG, 0));
|
||||
radeon_ring_write(ring, gb_tile_config);
|
||||
radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
|
||||
radeon_ring_write(ring,
|
||||
RADEON_WAIT_2D_IDLECLEAN |
|
||||
RADEON_WAIT_3D_IDLECLEAN);
|
||||
radeon_ring_write(cp, PACKET0(R300_DST_PIPE_CONFIG, 0));
|
||||
radeon_ring_write(cp, R300_PIPE_AUTO_CONFIG);
|
||||
radeon_ring_write(cp, PACKET0(R300_GB_SELECT, 0));
|
||||
radeon_ring_write(cp, 0);
|
||||
radeon_ring_write(cp, PACKET0(R300_GB_ENABLE, 0));
|
||||
radeon_ring_write(cp, 0);
|
||||
radeon_ring_write(cp, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
|
||||
radeon_ring_write(cp, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
|
||||
radeon_ring_write(cp, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
|
||||
radeon_ring_write(cp, R300_ZC_FLUSH | R300_ZC_FREE);
|
||||
radeon_ring_write(cp, PACKET0(RADEON_WAIT_UNTIL, 0));
|
||||
radeon_ring_write(cp,
|
||||
radeon_ring_write(ring, PACKET0(R300_DST_PIPE_CONFIG, 0));
|
||||
radeon_ring_write(ring, R300_PIPE_AUTO_CONFIG);
|
||||
radeon_ring_write(ring, PACKET0(R300_GB_SELECT, 0));
|
||||
radeon_ring_write(ring, 0);
|
||||
radeon_ring_write(ring, PACKET0(R300_GB_ENABLE, 0));
|
||||
radeon_ring_write(ring, 0);
|
||||
radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
|
||||
radeon_ring_write(ring, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
|
||||
radeon_ring_write(ring, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
|
||||
radeon_ring_write(ring, R300_ZC_FLUSH | R300_ZC_FREE);
|
||||
radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
|
||||
radeon_ring_write(ring,
|
||||
RADEON_WAIT_2D_IDLECLEAN |
|
||||
RADEON_WAIT_3D_IDLECLEAN);
|
||||
radeon_ring_write(cp, PACKET0(R300_GB_AA_CONFIG, 0));
|
||||
radeon_ring_write(cp, 0);
|
||||
radeon_ring_write(cp, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
|
||||
radeon_ring_write(cp, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
|
||||
radeon_ring_write(cp, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
|
||||
radeon_ring_write(cp, R300_ZC_FLUSH | R300_ZC_FREE);
|
||||
radeon_ring_write(cp, PACKET0(R300_GB_MSPOS0, 0));
|
||||
radeon_ring_write(cp,
|
||||
radeon_ring_write(ring, PACKET0(R300_GB_AA_CONFIG, 0));
|
||||
radeon_ring_write(ring, 0);
|
||||
radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
|
||||
radeon_ring_write(ring, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
|
||||
radeon_ring_write(ring, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
|
||||
radeon_ring_write(ring, R300_ZC_FLUSH | R300_ZC_FREE);
|
||||
radeon_ring_write(ring, PACKET0(R300_GB_MSPOS0, 0));
|
||||
radeon_ring_write(ring,
|
||||
((6 << R300_MS_X0_SHIFT) |
|
||||
(6 << R300_MS_Y0_SHIFT) |
|
||||
(6 << R300_MS_X1_SHIFT) |
|
||||
|
@ -276,8 +276,8 @@ void r300_ring_start(struct radeon_device *rdev)
|
|||
(6 << R300_MS_Y2_SHIFT) |
|
||||
(6 << R300_MSBD0_Y_SHIFT) |
|
||||
(6 << R300_MSBD0_X_SHIFT)));
|
||||
radeon_ring_write(cp, PACKET0(R300_GB_MSPOS1, 0));
|
||||
radeon_ring_write(cp,
|
||||
radeon_ring_write(ring, PACKET0(R300_GB_MSPOS1, 0));
|
||||
radeon_ring_write(ring,
|
||||
((6 << R300_MS_X3_SHIFT) |
|
||||
(6 << R300_MS_Y3_SHIFT) |
|
||||
(6 << R300_MS_X4_SHIFT) |
|
||||
|
@ -285,16 +285,16 @@ void r300_ring_start(struct radeon_device *rdev)
|
|||
(6 << R300_MS_X5_SHIFT) |
|
||||
(6 << R300_MS_Y5_SHIFT) |
|
||||
(6 << R300_MSBD1_SHIFT)));
|
||||
radeon_ring_write(cp, PACKET0(R300_GA_ENHANCE, 0));
|
||||
radeon_ring_write(cp, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL);
|
||||
radeon_ring_write(cp, PACKET0(R300_GA_POLY_MODE, 0));
|
||||
radeon_ring_write(cp,
|
||||
radeon_ring_write(ring, PACKET0(R300_GA_ENHANCE, 0));
|
||||
radeon_ring_write(ring, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL);
|
||||
radeon_ring_write(ring, PACKET0(R300_GA_POLY_MODE, 0));
|
||||
radeon_ring_write(ring,
|
||||
R300_FRONT_PTYPE_TRIANGE | R300_BACK_PTYPE_TRIANGE);
|
||||
radeon_ring_write(cp, PACKET0(R300_GA_ROUND_MODE, 0));
|
||||
radeon_ring_write(cp,
|
||||
radeon_ring_write(ring, PACKET0(R300_GA_ROUND_MODE, 0));
|
||||
radeon_ring_write(ring,
|
||||
R300_GEOMETRY_ROUND_NEAREST |
|
||||
R300_COLOR_ROUND_NEAREST);
|
||||
radeon_ring_unlock_commit(rdev, cp);
|
||||
radeon_ring_unlock_commit(rdev, ring);
|
||||
}
|
||||
|
||||
void r300_errata(struct radeon_device *rdev)
|
||||
|
@ -378,26 +378,26 @@ void r300_gpu_init(struct radeon_device *rdev)
|
|||
rdev->num_gb_pipes, rdev->num_z_pipes);
|
||||
}
|
||||
|
||||
bool r300_gpu_is_lockup(struct radeon_device *rdev, struct radeon_cp *cp)
|
||||
bool r300_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
|
||||
{
|
||||
u32 rbbm_status;
|
||||
int r;
|
||||
|
||||
rbbm_status = RREG32(R_000E40_RBBM_STATUS);
|
||||
if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
|
||||
r100_gpu_lockup_update(&rdev->config.r300.lockup, cp);
|
||||
r100_gpu_lockup_update(&rdev->config.r300.lockup, ring);
|
||||
return false;
|
||||
}
|
||||
/* force CP activities */
|
||||
r = radeon_ring_lock(rdev, cp, 2);
|
||||
r = radeon_ring_lock(rdev, ring, 2);
|
||||
if (!r) {
|
||||
/* PACKET2 NOP */
|
||||
radeon_ring_write(cp, 0x80000000);
|
||||
radeon_ring_write(cp, 0x80000000);
|
||||
radeon_ring_unlock_commit(rdev, cp);
|
||||
radeon_ring_write(ring, 0x80000000);
|
||||
radeon_ring_write(ring, 0x80000000);
|
||||
radeon_ring_unlock_commit(rdev, ring);
|
||||
}
|
||||
cp->rptr = RREG32(RADEON_CP_RB_RPTR);
|
||||
return r100_gpu_cp_is_lockup(rdev, &rdev->config.r300.lockup, cp);
|
||||
ring->rptr = RREG32(RADEON_CP_RB_RPTR);
|
||||
return r100_gpu_cp_is_lockup(rdev, &rdev->config.r300.lockup, ring);
|
||||
}
|
||||
|
||||
int r300_asic_reset(struct radeon_device *rdev)
|
||||
|
|
|
@ -199,7 +199,7 @@ static void r420_clock_resume(struct radeon_device *rdev)
|
|||
|
||||
static void r420_cp_errata_init(struct radeon_device *rdev)
|
||||
{
|
||||
struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
|
||||
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
|
||||
|
||||
/* RV410 and R420 can lock up if CP DMA to host memory happens
|
||||
* while the 2D engine is busy.
|
||||
|
@ -208,24 +208,24 @@ static void r420_cp_errata_init(struct radeon_device *rdev)
|
|||
* of the CP init, apparently.
|
||||
*/
|
||||
radeon_scratch_get(rdev, &rdev->config.r300.resync_scratch);
|
||||
radeon_ring_lock(rdev, cp, 8);
|
||||
radeon_ring_write(cp, PACKET0(R300_CP_RESYNC_ADDR, 1));
|
||||
radeon_ring_write(cp, rdev->config.r300.resync_scratch);
|
||||
radeon_ring_write(cp, 0xDEADBEEF);
|
||||
radeon_ring_unlock_commit(rdev, cp);
|
||||
radeon_ring_lock(rdev, ring, 8);
|
||||
radeon_ring_write(ring, PACKET0(R300_CP_RESYNC_ADDR, 1));
|
||||
radeon_ring_write(ring, rdev->config.r300.resync_scratch);
|
||||
radeon_ring_write(ring, 0xDEADBEEF);
|
||||
radeon_ring_unlock_commit(rdev, ring);
|
||||
}
|
||||
|
||||
static void r420_cp_errata_fini(struct radeon_device *rdev)
|
||||
{
|
||||
struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
|
||||
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
|
||||
|
||||
/* Catch the RESYNC we dispatched all the way back,
|
||||
* at the very beginning of the CP init.
|
||||
*/
|
||||
radeon_ring_lock(rdev, cp, 8);
|
||||
radeon_ring_write(cp, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
|
||||
radeon_ring_write(cp, R300_RB3D_DC_FINISH);
|
||||
radeon_ring_unlock_commit(rdev, cp);
|
||||
radeon_ring_lock(rdev, ring, 8);
|
||||
radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
|
||||
radeon_ring_write(ring, R300_RB3D_DC_FINISH);
|
||||
radeon_ring_unlock_commit(rdev, ring);
|
||||
radeon_scratch_free(rdev, rdev->config.r300.resync_scratch);
|
||||
}
|
||||
|
||||
|
|
|
@ -1344,7 +1344,7 @@ int r600_gpu_soft_reset(struct radeon_device *rdev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_cp *cp)
|
||||
bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
|
||||
{
|
||||
u32 srbm_status;
|
||||
u32 grbm_status;
|
||||
|
@ -1361,19 +1361,19 @@ bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_cp *cp)
|
|||
grbm_status = RREG32(R_008010_GRBM_STATUS);
|
||||
grbm_status2 = RREG32(R_008014_GRBM_STATUS2);
|
||||
if (!G_008010_GUI_ACTIVE(grbm_status)) {
|
||||
r100_gpu_lockup_update(lockup, cp);
|
||||
r100_gpu_lockup_update(lockup, ring);
|
||||
return false;
|
||||
}
|
||||
/* force CP activities */
|
||||
r = radeon_ring_lock(rdev, cp, 2);
|
||||
r = radeon_ring_lock(rdev, ring, 2);
|
||||
if (!r) {
|
||||
/* PACKET2 NOP */
|
||||
radeon_ring_write(cp, 0x80000000);
|
||||
radeon_ring_write(cp, 0x80000000);
|
||||
radeon_ring_unlock_commit(rdev, cp);
|
||||
radeon_ring_write(ring, 0x80000000);
|
||||
radeon_ring_write(ring, 0x80000000);
|
||||
radeon_ring_unlock_commit(rdev, ring);
|
||||
}
|
||||
cp->rptr = RREG32(cp->rptr_reg);
|
||||
return r100_gpu_cp_is_lockup(rdev, lockup, cp);
|
||||
ring->rptr = RREG32(ring->rptr_reg);
|
||||
return r100_gpu_cp_is_lockup(rdev, lockup, ring);
|
||||
}
|
||||
|
||||
int r600_asic_reset(struct radeon_device *rdev)
|
||||
|
@ -2144,28 +2144,28 @@ static int r600_cp_load_microcode(struct radeon_device *rdev)
|
|||
|
||||
int r600_cp_start(struct radeon_device *rdev)
|
||||
{
|
||||
struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
|
||||
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
|
||||
int r;
|
||||
uint32_t cp_me;
|
||||
|
||||
r = radeon_ring_lock(rdev, cp, 7);
|
||||
r = radeon_ring_lock(rdev, ring, 7);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
radeon_ring_write(cp, PACKET3(PACKET3_ME_INITIALIZE, 5));
|
||||
radeon_ring_write(cp, 0x1);
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
|
||||
radeon_ring_write(ring, 0x1);
|
||||
if (rdev->family >= CHIP_RV770) {
|
||||
radeon_ring_write(cp, 0x0);
|
||||
radeon_ring_write(cp, rdev->config.rv770.max_hw_contexts - 1);
|
||||
radeon_ring_write(ring, 0x0);
|
||||
radeon_ring_write(ring, rdev->config.rv770.max_hw_contexts - 1);
|
||||
} else {
|
||||
radeon_ring_write(cp, 0x3);
|
||||
radeon_ring_write(cp, rdev->config.r600.max_hw_contexts - 1);
|
||||
radeon_ring_write(ring, 0x3);
|
||||
radeon_ring_write(ring, rdev->config.r600.max_hw_contexts - 1);
|
||||
}
|
||||
radeon_ring_write(cp, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
|
||||
radeon_ring_write(cp, 0);
|
||||
radeon_ring_write(cp, 0);
|
||||
radeon_ring_unlock_commit(rdev, cp);
|
||||
radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
|
||||
radeon_ring_write(ring, 0);
|
||||
radeon_ring_write(ring, 0);
|
||||
radeon_ring_unlock_commit(rdev, ring);
|
||||
|
||||
cp_me = 0xff;
|
||||
WREG32(R_0086D8_CP_ME_CNTL, cp_me);
|
||||
|
@ -2174,7 +2174,7 @@ int r600_cp_start(struct radeon_device *rdev)
|
|||
|
||||
int r600_cp_resume(struct radeon_device *rdev)
|
||||
{
|
||||
struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
|
||||
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
|
||||
u32 tmp;
|
||||
u32 rb_bufsz;
|
||||
int r;
|
||||
|
@ -2186,7 +2186,7 @@ int r600_cp_resume(struct radeon_device *rdev)
|
|||
WREG32(GRBM_SOFT_RESET, 0);
|
||||
|
||||
/* Set ring buffer size */
|
||||
rb_bufsz = drm_order(cp->ring_size / 8);
|
||||
rb_bufsz = drm_order(ring->ring_size / 8);
|
||||
tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
|
||||
#ifdef __BIG_ENDIAN
|
||||
tmp |= BUF_SWAP_32BIT;
|
||||
|
@ -2200,8 +2200,8 @@ int r600_cp_resume(struct radeon_device *rdev)
|
|||
/* Initialize the ring buffer's read and write pointers */
|
||||
WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
|
||||
WREG32(CP_RB_RPTR_WR, 0);
|
||||
cp->wptr = 0;
|
||||
WREG32(CP_RB_WPTR, cp->wptr);
|
||||
ring->wptr = 0;
|
||||
WREG32(CP_RB_WPTR, ring->wptr);
|
||||
|
||||
/* set the wb address whether it's enabled or not */
|
||||
WREG32(CP_RB_RPTR_ADDR,
|
||||
|
@ -2219,36 +2219,36 @@ int r600_cp_resume(struct radeon_device *rdev)
|
|||
mdelay(1);
|
||||
WREG32(CP_RB_CNTL, tmp);
|
||||
|
||||
WREG32(CP_RB_BASE, cp->gpu_addr >> 8);
|
||||
WREG32(CP_RB_BASE, ring->gpu_addr >> 8);
|
||||
WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
|
||||
|
||||
cp->rptr = RREG32(CP_RB_RPTR);
|
||||
ring->rptr = RREG32(CP_RB_RPTR);
|
||||
|
||||
r600_cp_start(rdev);
|
||||
cp->ready = true;
|
||||
r = radeon_ring_test(rdev, cp);
|
||||
ring->ready = true;
|
||||
r = radeon_ring_test(rdev, ring);
|
||||
if (r) {
|
||||
cp->ready = false;
|
||||
ring->ready = false;
|
||||
return r;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void r600_ring_init(struct radeon_device *rdev, struct radeon_cp *cp, unsigned ring_size)
|
||||
void r600_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size)
|
||||
{
|
||||
u32 rb_bufsz;
|
||||
|
||||
/* Align ring size */
|
||||
rb_bufsz = drm_order(ring_size / 8);
|
||||
ring_size = (1 << (rb_bufsz + 1)) * 4;
|
||||
cp->ring_size = ring_size;
|
||||
cp->align_mask = 16 - 1;
|
||||
ring->ring_size = ring_size;
|
||||
ring->align_mask = 16 - 1;
|
||||
}
|
||||
|
||||
void r600_cp_fini(struct radeon_device *rdev)
|
||||
{
|
||||
r600_cp_stop(rdev);
|
||||
radeon_ring_fini(rdev, &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]);
|
||||
radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
|
||||
}
|
||||
|
||||
|
||||
|
@ -2267,11 +2267,11 @@ void r600_scratch_init(struct radeon_device *rdev)
|
|||
}
|
||||
}
|
||||
|
||||
int r600_ring_test(struct radeon_device *rdev, struct radeon_cp *cp)
|
||||
int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
|
||||
{
|
||||
uint32_t scratch;
|
||||
uint32_t tmp = 0;
|
||||
unsigned i, ridx = radeon_ring_index(rdev, cp);
|
||||
unsigned i, ridx = radeon_ring_index(rdev, ring);
|
||||
int r;
|
||||
|
||||
r = radeon_scratch_get(rdev, &scratch);
|
||||
|
@ -2280,16 +2280,16 @@ int r600_ring_test(struct radeon_device *rdev, struct radeon_cp *cp)
|
|||
return r;
|
||||
}
|
||||
WREG32(scratch, 0xCAFEDEAD);
|
||||
r = radeon_ring_lock(rdev, cp, 3);
|
||||
r = radeon_ring_lock(rdev, ring, 3);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n", ridx, r);
|
||||
radeon_scratch_free(rdev, scratch);
|
||||
return r;
|
||||
}
|
||||
radeon_ring_write(cp, PACKET3(PACKET3_SET_CONFIG_REG, 1));
|
||||
radeon_ring_write(cp, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
|
||||
radeon_ring_write(cp, 0xDEADBEEF);
|
||||
radeon_ring_unlock_commit(rdev, cp);
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
|
||||
radeon_ring_write(ring, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
|
||||
radeon_ring_write(ring, 0xDEADBEEF);
|
||||
radeon_ring_unlock_commit(rdev, ring);
|
||||
for (i = 0; i < rdev->usec_timeout; i++) {
|
||||
tmp = RREG32(scratch);
|
||||
if (tmp == 0xDEADBEEF)
|
||||
|
@ -2310,62 +2310,62 @@ int r600_ring_test(struct radeon_device *rdev, struct radeon_cp *cp)
|
|||
void r600_fence_ring_emit(struct radeon_device *rdev,
|
||||
struct radeon_fence *fence)
|
||||
{
|
||||
struct radeon_cp *cp = &rdev->cp[fence->ring];
|
||||
struct radeon_ring *ring = &rdev->ring[fence->ring];
|
||||
|
||||
if (rdev->wb.use_event) {
|
||||
u64 addr = rdev->wb.gpu_addr + R600_WB_EVENT_OFFSET +
|
||||
(u64)(rdev->fence_drv[fence->ring].scratch_reg - rdev->scratch.reg_base);
|
||||
/* flush read cache over gart */
|
||||
radeon_ring_write(cp, PACKET3(PACKET3_SURFACE_SYNC, 3));
|
||||
radeon_ring_write(cp, PACKET3_TC_ACTION_ENA |
|
||||
PACKET3_VC_ACTION_ENA |
|
||||
PACKET3_SH_ACTION_ENA);
|
||||
radeon_ring_write(cp, 0xFFFFFFFF);
|
||||
radeon_ring_write(cp, 0);
|
||||
radeon_ring_write(cp, 10); /* poll interval */
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
|
||||
radeon_ring_write(ring, PACKET3_TC_ACTION_ENA |
|
||||
PACKET3_VC_ACTION_ENA |
|
||||
PACKET3_SH_ACTION_ENA);
|
||||
radeon_ring_write(ring, 0xFFFFFFFF);
|
||||
radeon_ring_write(ring, 0);
|
||||
radeon_ring_write(ring, 10); /* poll interval */
|
||||
/* EVENT_WRITE_EOP - flush caches, send int */
|
||||
radeon_ring_write(cp, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
|
||||
radeon_ring_write(cp, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
|
||||
radeon_ring_write(cp, addr & 0xffffffff);
|
||||
radeon_ring_write(cp, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
|
||||
radeon_ring_write(cp, fence->seq);
|
||||
radeon_ring_write(cp, 0);
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
|
||||
radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
|
||||
radeon_ring_write(ring, addr & 0xffffffff);
|
||||
radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
|
||||
radeon_ring_write(ring, fence->seq);
|
||||
radeon_ring_write(ring, 0);
|
||||
} else {
|
||||
/* flush read cache over gart */
|
||||
radeon_ring_write(cp, PACKET3(PACKET3_SURFACE_SYNC, 3));
|
||||
radeon_ring_write(cp, PACKET3_TC_ACTION_ENA |
|
||||
PACKET3_VC_ACTION_ENA |
|
||||
PACKET3_SH_ACTION_ENA);
|
||||
radeon_ring_write(cp, 0xFFFFFFFF);
|
||||
radeon_ring_write(cp, 0);
|
||||
radeon_ring_write(cp, 10); /* poll interval */
|
||||
radeon_ring_write(cp, PACKET3(PACKET3_EVENT_WRITE, 0));
|
||||
radeon_ring_write(cp, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0));
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
|
||||
radeon_ring_write(ring, PACKET3_TC_ACTION_ENA |
|
||||
PACKET3_VC_ACTION_ENA |
|
||||
PACKET3_SH_ACTION_ENA);
|
||||
radeon_ring_write(ring, 0xFFFFFFFF);
|
||||
radeon_ring_write(ring, 0);
|
||||
radeon_ring_write(ring, 10); /* poll interval */
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
|
||||
radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0));
|
||||
/* wait for 3D idle clean */
|
||||
radeon_ring_write(cp, PACKET3(PACKET3_SET_CONFIG_REG, 1));
|
||||
radeon_ring_write(cp, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
|
||||
radeon_ring_write(cp, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
|
||||
radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
|
||||
radeon_ring_write(ring, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
|
||||
/* Emit fence sequence & fire IRQ */
|
||||
radeon_ring_write(cp, PACKET3(PACKET3_SET_CONFIG_REG, 1));
|
||||
radeon_ring_write(cp, ((rdev->fence_drv[fence->ring].scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
|
||||
radeon_ring_write(cp, fence->seq);
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
|
||||
radeon_ring_write(ring, ((rdev->fence_drv[fence->ring].scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
|
||||
radeon_ring_write(ring, fence->seq);
|
||||
/* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
|
||||
radeon_ring_write(cp, PACKET0(CP_INT_STATUS, 0));
|
||||
radeon_ring_write(cp, RB_INT_STAT);
|
||||
radeon_ring_write(ring, PACKET0(CP_INT_STATUS, 0));
|
||||
radeon_ring_write(ring, RB_INT_STAT);
|
||||
}
|
||||
}
|
||||
|
||||
void r600_semaphore_ring_emit(struct radeon_device *rdev,
|
||||
struct radeon_cp *cp,
|
||||
struct radeon_ring *ring,
|
||||
struct radeon_semaphore *semaphore,
|
||||
bool emit_wait)
|
||||
{
|
||||
uint64_t addr = semaphore->gpu_addr;
|
||||
unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL;
|
||||
|
||||
radeon_ring_write(cp, PACKET3(PACKET3_MEM_SEMAPHORE, 1));
|
||||
radeon_ring_write(cp, addr & 0xffffffff);
|
||||
radeon_ring_write(cp, (upper_32_bits(addr) & 0xff) | sel);
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1));
|
||||
radeon_ring_write(ring, addr & 0xffffffff);
|
||||
radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel);
|
||||
}
|
||||
|
||||
int r600_copy_blit(struct radeon_device *rdev,
|
||||
|
@ -2420,7 +2420,7 @@ void r600_clear_surface_reg(struct radeon_device *rdev, int reg)
|
|||
|
||||
int r600_startup(struct radeon_device *rdev)
|
||||
{
|
||||
struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
|
||||
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
|
||||
int r;
|
||||
|
||||
/* enable pcie gen2 link */
|
||||
|
@ -2468,7 +2468,7 @@ int r600_startup(struct radeon_device *rdev)
|
|||
}
|
||||
r600_irq_set(rdev);
|
||||
|
||||
r = radeon_ring_init(rdev, cp, cp->ring_size, RADEON_WB_CP_RPTR_OFFSET,
|
||||
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
|
||||
R600_CP_RB_RPTR, R600_CP_RB_WPTR);
|
||||
|
||||
if (r)
|
||||
|
@ -2534,7 +2534,7 @@ int r600_suspend(struct radeon_device *rdev)
|
|||
r600_audio_fini(rdev);
|
||||
/* FIXME: we should wait for ring to be empty */
|
||||
r600_cp_stop(rdev);
|
||||
rdev->cp[RADEON_RING_TYPE_GFX_INDEX].ready = false;
|
||||
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
|
||||
r600_irq_suspend(rdev);
|
||||
radeon_wb_disable(rdev);
|
||||
r600_pcie_gart_disable(rdev);
|
||||
|
@ -2609,8 +2609,8 @@ int r600_init(struct radeon_device *rdev)
|
|||
if (r)
|
||||
return r;
|
||||
|
||||
rdev->cp[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
|
||||
r600_ring_init(rdev, &rdev->cp[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
|
||||
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
|
||||
r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
|
||||
|
||||
rdev->ih.ring_obj = NULL;
|
||||
r600_ih_ring_init(rdev, 64 * 1024);
|
||||
|
@ -2677,17 +2677,17 @@ void r600_fini(struct radeon_device *rdev)
|
|||
*/
|
||||
void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
|
||||
{
|
||||
struct radeon_cp *cp = &rdev->cp[ib->fence->ring];
|
||||
struct radeon_ring *ring = &rdev->ring[ib->fence->ring];
|
||||
|
||||
/* FIXME: implement */
|
||||
radeon_ring_write(cp, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
|
||||
radeon_ring_write(cp,
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
|
||||
radeon_ring_write(ring,
|
||||
#ifdef __BIG_ENDIAN
|
||||
(2 << 0) |
|
||||
#endif
|
||||
(ib->gpu_addr & 0xFFFFFFFC));
|
||||
radeon_ring_write(cp, upper_32_bits(ib->gpu_addr) & 0xFF);
|
||||
radeon_ring_write(cp, ib->length_dw);
|
||||
radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
|
||||
radeon_ring_write(ring, ib->length_dw);
|
||||
}
|
||||
|
||||
int r600_ib_test(struct radeon_device *rdev, int ring)
|
||||
|
@ -3518,22 +3518,22 @@ static int r600_debugfs_cp_ring_info(struct seq_file *m, void *data)
|
|||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
|
||||
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
|
||||
unsigned count, i, j;
|
||||
|
||||
radeon_ring_free_size(rdev, cp);
|
||||
count = (cp->ring_size / 4) - cp->ring_free_dw;
|
||||
radeon_ring_free_size(rdev, ring);
|
||||
count = (ring->ring_size / 4) - ring->ring_free_dw;
|
||||
seq_printf(m, "CP_STAT 0x%08x\n", RREG32(CP_STAT));
|
||||
seq_printf(m, "CP_RB_WPTR 0x%08x\n", RREG32(CP_RB_WPTR));
|
||||
seq_printf(m, "CP_RB_RPTR 0x%08x\n", RREG32(CP_RB_RPTR));
|
||||
seq_printf(m, "driver's copy of the CP_RB_WPTR 0x%08x\n", cp->wptr);
|
||||
seq_printf(m, "driver's copy of the CP_RB_RPTR 0x%08x\n", cp->rptr);
|
||||
seq_printf(m, "%u free dwords in ring\n", cp->ring_free_dw);
|
||||
seq_printf(m, "driver's copy of the CP_RB_WPTR 0x%08x\n", ring->wptr);
|
||||
seq_printf(m, "driver's copy of the CP_RB_RPTR 0x%08x\n", ring->rptr);
|
||||
seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
|
||||
seq_printf(m, "%u dwords in ring\n", count);
|
||||
i = cp->rptr;
|
||||
i = ring->rptr;
|
||||
for (j = 0; j <= count; j++) {
|
||||
seq_printf(m, "r[%04d]=0x%08x\n", i, cp->ring[i]);
|
||||
i = (i + 1) & cp->ptr_mask;
|
||||
seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]);
|
||||
i = (i + 1) & ring->ptr_mask;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -50,7 +50,7 @@ static void
|
|||
set_render_target(struct radeon_device *rdev, int format,
|
||||
int w, int h, u64 gpu_addr)
|
||||
{
|
||||
struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
|
||||
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
|
||||
u32 cb_color_info;
|
||||
int pitch, slice;
|
||||
|
||||
|
@ -64,38 +64,38 @@ set_render_target(struct radeon_device *rdev, int format,
|
|||
pitch = (w / 8) - 1;
|
||||
slice = ((w * h) / 64) - 1;
|
||||
|
||||
radeon_ring_write(cp, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
|
||||
radeon_ring_write(cp, (CB_COLOR0_BASE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
|
||||
radeon_ring_write(cp, gpu_addr >> 8);
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
|
||||
radeon_ring_write(ring, (CB_COLOR0_BASE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
|
||||
radeon_ring_write(ring, gpu_addr >> 8);
|
||||
|
||||
if (rdev->family > CHIP_R600 && rdev->family < CHIP_RV770) {
|
||||
radeon_ring_write(cp, PACKET3(PACKET3_SURFACE_BASE_UPDATE, 0));
|
||||
radeon_ring_write(cp, 2 << 0);
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_BASE_UPDATE, 0));
|
||||
radeon_ring_write(ring, 2 << 0);
|
||||
}
|
||||
|
||||
radeon_ring_write(cp, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
|
||||
radeon_ring_write(cp, (CB_COLOR0_SIZE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
|
||||
radeon_ring_write(cp, (pitch << 0) | (slice << 10));
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
|
||||
radeon_ring_write(ring, (CB_COLOR0_SIZE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
|
||||
radeon_ring_write(ring, (pitch << 0) | (slice << 10));
|
||||
|
||||
radeon_ring_write(cp, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
|
||||
radeon_ring_write(cp, (CB_COLOR0_VIEW - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
|
||||
radeon_ring_write(cp, 0);
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
|
||||
radeon_ring_write(ring, (CB_COLOR0_VIEW - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
|
||||
radeon_ring_write(ring, 0);
|
||||
|
||||
radeon_ring_write(cp, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
|
||||
radeon_ring_write(cp, (CB_COLOR0_INFO - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
|
||||
radeon_ring_write(cp, cb_color_info);
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
|
||||
radeon_ring_write(ring, (CB_COLOR0_INFO - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
|
||||
radeon_ring_write(ring, cb_color_info);
|
||||
|
||||
radeon_ring_write(cp, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
|
||||
radeon_ring_write(cp, (CB_COLOR0_TILE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
|
||||
radeon_ring_write(cp, 0);
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
|
||||
radeon_ring_write(ring, (CB_COLOR0_TILE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
|
||||
radeon_ring_write(ring, 0);
|
||||
|
||||
radeon_ring_write(cp, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
|
||||
radeon_ring_write(cp, (CB_COLOR0_FRAG - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
|
||||
radeon_ring_write(cp, 0);
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
|
||||
radeon_ring_write(ring, (CB_COLOR0_FRAG - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
|
||||
radeon_ring_write(ring, 0);
|
||||
|
||||
radeon_ring_write(cp, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
|
||||
radeon_ring_write(cp, (CB_COLOR0_MASK - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
|
||||
radeon_ring_write(cp, 0);
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
|
||||
radeon_ring_write(ring, (CB_COLOR0_MASK - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
|
||||
radeon_ring_write(ring, 0);
|
||||
}
|
||||
|
||||
/* emits 5dw */
|
||||
|
@ -104,7 +104,7 @@ cp_set_surface_sync(struct radeon_device *rdev,
|
|||
u32 sync_type, u32 size,
|
||||
u64 mc_addr)
|
||||
{
|
||||
struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
|
||||
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
|
||||
u32 cp_coher_size;
|
||||
|
||||
if (size == 0xffffffff)
|
||||
|
@ -112,18 +112,18 @@ cp_set_surface_sync(struct radeon_device *rdev,
|
|||
else
|
||||
cp_coher_size = ((size + 255) >> 8);
|
||||
|
||||
radeon_ring_write(cp, PACKET3(PACKET3_SURFACE_SYNC, 3));
|
||||
radeon_ring_write(cp, sync_type);
|
||||
radeon_ring_write(cp, cp_coher_size);
|
||||
radeon_ring_write(cp, mc_addr >> 8);
|
||||
radeon_ring_write(cp, 10); /* poll interval */
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
|
||||
radeon_ring_write(ring, sync_type);
|
||||
radeon_ring_write(ring, cp_coher_size);
|
||||
radeon_ring_write(ring, mc_addr >> 8);
|
||||
radeon_ring_write(ring, 10); /* poll interval */
|
||||
}
|
||||
|
||||
/* emits 21dw + 1 surface sync = 26dw */
|
||||
static void
|
||||
set_shaders(struct radeon_device *rdev)
|
||||
{
|
||||
struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
|
||||
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
|
||||
u64 gpu_addr;
|
||||
u32 sq_pgm_resources;
|
||||
|
||||
|
@ -132,35 +132,35 @@ set_shaders(struct radeon_device *rdev)
|
|||
|
||||
/* VS */
|
||||
gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
|
||||
radeon_ring_write(cp, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
|
||||
radeon_ring_write(cp, (SQ_PGM_START_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
|
||||
radeon_ring_write(cp, gpu_addr >> 8);
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
|
||||
radeon_ring_write(ring, (SQ_PGM_START_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
|
||||
radeon_ring_write(ring, gpu_addr >> 8);
|
||||
|
||||
radeon_ring_write(cp, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
|
||||
radeon_ring_write(cp, (SQ_PGM_RESOURCES_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
|
||||
radeon_ring_write(cp, sq_pgm_resources);
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
|
||||
radeon_ring_write(ring, (SQ_PGM_RESOURCES_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
|
||||
radeon_ring_write(ring, sq_pgm_resources);
|
||||
|
||||
radeon_ring_write(cp, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
|
||||
radeon_ring_write(cp, (SQ_PGM_CF_OFFSET_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
|
||||
radeon_ring_write(cp, 0);
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
|
||||
radeon_ring_write(ring, (SQ_PGM_CF_OFFSET_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
|
||||
radeon_ring_write(ring, 0);
|
||||
|
||||
/* PS */
|
||||
gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.ps_offset;
|
||||
radeon_ring_write(cp, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
|
||||
radeon_ring_write(cp, (SQ_PGM_START_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
|
||||
radeon_ring_write(cp, gpu_addr >> 8);
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
|
||||
radeon_ring_write(ring, (SQ_PGM_START_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
|
||||
radeon_ring_write(ring, gpu_addr >> 8);
|
||||
|
||||
radeon_ring_write(cp, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
|
||||
radeon_ring_write(cp, (SQ_PGM_RESOURCES_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
|
||||
radeon_ring_write(cp, sq_pgm_resources | (1 << 28));
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
|
||||
radeon_ring_write(ring, (SQ_PGM_RESOURCES_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
|
||||
radeon_ring_write(ring, sq_pgm_resources | (1 << 28));
|
||||
|
||||
radeon_ring_write(cp, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
|
||||
radeon_ring_write(cp, (SQ_PGM_EXPORTS_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
|
||||
radeon_ring_write(cp, 2);
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
|
||||
radeon_ring_write(ring, (SQ_PGM_EXPORTS_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
|
||||
radeon_ring_write(ring, 2);
|
||||
|
||||
radeon_ring_write(cp, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
|
||||
radeon_ring_write(cp, (SQ_PGM_CF_OFFSET_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
|
||||
radeon_ring_write(cp, 0);
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
|
||||
radeon_ring_write(ring, (SQ_PGM_CF_OFFSET_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
|
||||
radeon_ring_write(ring, 0);
|
||||
|
||||
gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
|
||||
cp_set_surface_sync(rdev, PACKET3_SH_ACTION_ENA, 512, gpu_addr);
|
||||
|
@ -170,7 +170,7 @@ set_shaders(struct radeon_device *rdev)
|
|||
static void
|
||||
set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
|
||||
{
|
||||
struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
|
||||
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
|
||||
u32 sq_vtx_constant_word2;
|
||||
|
||||
sq_vtx_constant_word2 = SQ_VTXC_BASE_ADDR_HI(upper_32_bits(gpu_addr) & 0xff) |
|
||||
|
@ -179,15 +179,15 @@ set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
|
|||
sq_vtx_constant_word2 |= SQ_VTXC_ENDIAN_SWAP(SQ_ENDIAN_8IN32);
|
||||
#endif
|
||||
|
||||
radeon_ring_write(cp, PACKET3(PACKET3_SET_RESOURCE, 7));
|
||||
radeon_ring_write(cp, 0x460);
|
||||
radeon_ring_write(cp, gpu_addr & 0xffffffff);
|
||||
radeon_ring_write(cp, 48 - 1);
|
||||
radeon_ring_write(cp, sq_vtx_constant_word2);
|
||||
radeon_ring_write(cp, 1 << 0);
|
||||
radeon_ring_write(cp, 0);
|
||||
radeon_ring_write(cp, 0);
|
||||
radeon_ring_write(cp, SQ_TEX_VTX_VALID_BUFFER << 30);
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 7));
|
||||
radeon_ring_write(ring, 0x460);
|
||||
radeon_ring_write(ring, gpu_addr & 0xffffffff);
|
||||
radeon_ring_write(ring, 48 - 1);
|
||||
radeon_ring_write(ring, sq_vtx_constant_word2);
|
||||
radeon_ring_write(ring, 1 << 0);
|
||||
radeon_ring_write(ring, 0);
|
||||
radeon_ring_write(ring, 0);
|
||||
radeon_ring_write(ring, SQ_TEX_VTX_VALID_BUFFER << 30);
|
||||
|
||||
if ((rdev->family == CHIP_RV610) ||
|
||||
(rdev->family == CHIP_RV620) ||
|
||||
|
@ -207,7 +207,7 @@ set_tex_resource(struct radeon_device *rdev,
|
|||
int format, int w, int h, int pitch,
|
||||
u64 gpu_addr, u32 size)
|
||||
{
|
||||
struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
|
||||
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
|
||||
uint32_t sq_tex_resource_word0, sq_tex_resource_word1, sq_tex_resource_word4;
|
||||
|
||||
if (h < 1)
|
||||
|
@ -230,15 +230,15 @@ set_tex_resource(struct radeon_device *rdev,
|
|||
cp_set_surface_sync(rdev,
|
||||
PACKET3_TC_ACTION_ENA, size, gpu_addr);
|
||||
|
||||
radeon_ring_write(cp, PACKET3(PACKET3_SET_RESOURCE, 7));
|
||||
radeon_ring_write(cp, 0);
|
||||
radeon_ring_write(cp, sq_tex_resource_word0);
|
||||
radeon_ring_write(cp, sq_tex_resource_word1);
|
||||
radeon_ring_write(cp, gpu_addr >> 8);
|
||||
radeon_ring_write(cp, gpu_addr >> 8);
|
||||
radeon_ring_write(cp, sq_tex_resource_word4);
|
||||
radeon_ring_write(cp, 0);
|
||||
radeon_ring_write(cp, SQ_TEX_VTX_VALID_TEXTURE << 30);
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 7));
|
||||
radeon_ring_write(ring, 0);
|
||||
radeon_ring_write(ring, sq_tex_resource_word0);
|
||||
radeon_ring_write(ring, sq_tex_resource_word1);
|
||||
radeon_ring_write(ring, gpu_addr >> 8);
|
||||
radeon_ring_write(ring, gpu_addr >> 8);
|
||||
radeon_ring_write(ring, sq_tex_resource_word4);
|
||||
radeon_ring_write(ring, 0);
|
||||
radeon_ring_write(ring, SQ_TEX_VTX_VALID_TEXTURE << 30);
|
||||
}
|
||||
|
||||
/* emits 12 */
|
||||
|
@ -246,45 +246,45 @@ static void
|
|||
set_scissors(struct radeon_device *rdev, int x1, int y1,
|
||||
int x2, int y2)
|
||||
{
|
||||
struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
|
||||
radeon_ring_write(cp, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
|
||||
radeon_ring_write(cp, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
|
||||
radeon_ring_write(cp, (x1 << 0) | (y1 << 16));
|
||||
radeon_ring_write(cp, (x2 << 0) | (y2 << 16));
|
||||
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
|
||||
radeon_ring_write(ring, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
|
||||
radeon_ring_write(ring, (x1 << 0) | (y1 << 16));
|
||||
radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
|
||||
|
||||
radeon_ring_write(cp, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
|
||||
radeon_ring_write(cp, (PA_SC_GENERIC_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
|
||||
radeon_ring_write(cp, (x1 << 0) | (y1 << 16) | (1 << 31));
|
||||
radeon_ring_write(cp, (x2 << 0) | (y2 << 16));
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
|
||||
radeon_ring_write(ring, (PA_SC_GENERIC_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
|
||||
radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31));
|
||||
radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
|
||||
|
||||
radeon_ring_write(cp, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
|
||||
radeon_ring_write(cp, (PA_SC_WINDOW_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
|
||||
radeon_ring_write(cp, (x1 << 0) | (y1 << 16) | (1 << 31));
|
||||
radeon_ring_write(cp, (x2 << 0) | (y2 << 16));
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
|
||||
radeon_ring_write(ring, (PA_SC_WINDOW_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
|
||||
radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31));
|
||||
radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
|
||||
}
|
||||
|
||||
/* emits 10 */
|
||||
static void
|
||||
draw_auto(struct radeon_device *rdev)
|
||||
{
|
||||
struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
|
||||
radeon_ring_write(cp, PACKET3(PACKET3_SET_CONFIG_REG, 1));
|
||||
radeon_ring_write(cp, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
|
||||
radeon_ring_write(cp, DI_PT_RECTLIST);
|
||||
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
|
||||
radeon_ring_write(ring, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
|
||||
radeon_ring_write(ring, DI_PT_RECTLIST);
|
||||
|
||||
radeon_ring_write(cp, PACKET3(PACKET3_INDEX_TYPE, 0));
|
||||
radeon_ring_write(cp,
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_INDEX_TYPE, 0));
|
||||
radeon_ring_write(ring,
|
||||
#ifdef __BIG_ENDIAN
|
||||
(2 << 2) |
|
||||
#endif
|
||||
DI_INDEX_SIZE_16_BIT);
|
||||
|
||||
radeon_ring_write(cp, PACKET3(PACKET3_NUM_INSTANCES, 0));
|
||||
radeon_ring_write(cp, 1);
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_NUM_INSTANCES, 0));
|
||||
radeon_ring_write(ring, 1);
|
||||
|
||||
radeon_ring_write(cp, PACKET3(PACKET3_DRAW_INDEX_AUTO, 1));
|
||||
radeon_ring_write(cp, 3);
|
||||
radeon_ring_write(cp, DI_SRC_SEL_AUTO_INDEX);
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_DRAW_INDEX_AUTO, 1));
|
||||
radeon_ring_write(ring, 3);
|
||||
radeon_ring_write(ring, DI_SRC_SEL_AUTO_INDEX);
|
||||
|
||||
}
|
||||
|
||||
|
@ -292,7 +292,7 @@ draw_auto(struct radeon_device *rdev)
|
|||
static void
|
||||
set_default_state(struct radeon_device *rdev)
|
||||
{
|
||||
struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
|
||||
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
|
||||
u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2;
|
||||
u32 sq_thread_resource_mgmt, sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2;
|
||||
int num_ps_gprs, num_vs_gprs, num_temp_gprs, num_gs_gprs, num_es_gprs;
|
||||
|
@ -448,24 +448,24 @@ set_default_state(struct radeon_device *rdev)
|
|||
/* emit an IB pointing at default state */
|
||||
dwords = ALIGN(rdev->r600_blit.state_len, 0x10);
|
||||
gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset;
|
||||
radeon_ring_write(cp, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
|
||||
radeon_ring_write(cp,
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
|
||||
radeon_ring_write(ring,
|
||||
#ifdef __BIG_ENDIAN
|
||||
(2 << 0) |
|
||||
#endif
|
||||
(gpu_addr & 0xFFFFFFFC));
|
||||
radeon_ring_write(cp, upper_32_bits(gpu_addr) & 0xFF);
|
||||
radeon_ring_write(cp, dwords);
|
||||
radeon_ring_write(ring, upper_32_bits(gpu_addr) & 0xFF);
|
||||
radeon_ring_write(ring, dwords);
|
||||
|
||||
/* SQ config */
|
||||
radeon_ring_write(cp, PACKET3(PACKET3_SET_CONFIG_REG, 6));
|
||||
radeon_ring_write(cp, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
|
||||
radeon_ring_write(cp, sq_config);
|
||||
radeon_ring_write(cp, sq_gpr_resource_mgmt_1);
|
||||
radeon_ring_write(cp, sq_gpr_resource_mgmt_2);
|
||||
radeon_ring_write(cp, sq_thread_resource_mgmt);
|
||||
radeon_ring_write(cp, sq_stack_resource_mgmt_1);
|
||||
radeon_ring_write(cp, sq_stack_resource_mgmt_2);
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 6));
|
||||
radeon_ring_write(ring, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
|
||||
radeon_ring_write(ring, sq_config);
|
||||
radeon_ring_write(ring, sq_gpr_resource_mgmt_1);
|
||||
radeon_ring_write(ring, sq_gpr_resource_mgmt_2);
|
||||
radeon_ring_write(ring, sq_thread_resource_mgmt);
|
||||
radeon_ring_write(ring, sq_stack_resource_mgmt_1);
|
||||
radeon_ring_write(ring, sq_stack_resource_mgmt_2);
|
||||
}
|
||||
|
||||
static uint32_t i2f(uint32_t input)
|
||||
|
@ -687,7 +687,7 @@ static unsigned r600_blit_create_rect(unsigned num_gpu_pages,
|
|||
|
||||
int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages)
|
||||
{
|
||||
struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
|
||||
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
|
||||
int r;
|
||||
int ring_size;
|
||||
int num_loops = 0;
|
||||
|
@ -708,7 +708,7 @@ int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages)
|
|||
/* calculate number of loops correctly */
|
||||
ring_size = num_loops * dwords_per_loop;
|
||||
ring_size += rdev->r600_blit.ring_size_common;
|
||||
r = radeon_ring_lock(rdev, cp, ring_size);
|
||||
r = radeon_ring_lock(rdev, ring, ring_size);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
@ -727,7 +727,7 @@ void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence)
|
|||
if (fence)
|
||||
r = radeon_fence_emit(rdev, fence);
|
||||
|
||||
radeon_ring_unlock_commit(rdev, &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]);
|
||||
radeon_ring_unlock_commit(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
|
||||
}
|
||||
|
||||
void r600_kms_blit_copy(struct radeon_device *rdev,
|
||||
|
|
|
@ -231,7 +231,7 @@ int radeon_fence_count_emitted(struct radeon_device *rdev, int ring);
|
|||
/*
|
||||
* Semaphores.
|
||||
*/
|
||||
struct radeon_cp;
|
||||
struct radeon_ring;
|
||||
|
||||
struct radeon_semaphore_driver {
|
||||
rwlock_t lock;
|
||||
|
@ -485,7 +485,7 @@ void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc);
|
|||
void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc);
|
||||
|
||||
/*
|
||||
* CP & ring.
|
||||
* CP & rings.
|
||||
*/
|
||||
|
||||
/* max number of rings */
|
||||
|
@ -522,7 +522,7 @@ struct radeon_ib_pool {
|
|||
unsigned head_id;
|
||||
};
|
||||
|
||||
struct radeon_cp {
|
||||
struct radeon_ring {
|
||||
struct radeon_bo *ring_obj;
|
||||
volatile uint32_t *ring;
|
||||
unsigned rptr;
|
||||
|
@ -600,17 +600,17 @@ void radeon_ib_pool_fini(struct radeon_device *rdev);
|
|||
int radeon_ib_test(struct radeon_device *rdev);
|
||||
extern void radeon_ib_bogus_add(struct radeon_device *rdev, struct radeon_ib *ib);
|
||||
/* Ring access between begin & end cannot sleep */
|
||||
int radeon_ring_index(struct radeon_device *rdev, struct radeon_cp *cp);
|
||||
void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_cp *cp);
|
||||
int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_cp *cp, unsigned ndw);
|
||||
int radeon_ring_lock(struct radeon_device *rdev, struct radeon_cp *cp, unsigned ndw);
|
||||
void radeon_ring_commit(struct radeon_device *rdev, struct radeon_cp *cp);
|
||||
void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_cp *cp);
|
||||
void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_cp *cp);
|
||||
int radeon_ring_test(struct radeon_device *rdev, struct radeon_cp *cp);
|
||||
int radeon_ring_init(struct radeon_device *rdev, struct radeon_cp *cp, unsigned ring_size,
|
||||
int radeon_ring_index(struct radeon_device *rdev, struct radeon_ring *cp);
|
||||
void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *cp);
|
||||
int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw);
|
||||
int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw);
|
||||
void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *cp);
|
||||
void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *cp);
|
||||
void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *cp);
|
||||
int radeon_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
|
||||
int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ring_size,
|
||||
unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg);
|
||||
void radeon_ring_fini(struct radeon_device *rdev, struct radeon_cp *cp);
|
||||
void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *cp);
|
||||
|
||||
|
||||
/*
|
||||
|
@ -915,8 +915,8 @@ void radeon_benchmark(struct radeon_device *rdev, int test_number);
|
|||
*/
|
||||
void radeon_test_moves(struct radeon_device *rdev);
|
||||
void radeon_test_ring_sync(struct radeon_device *rdev,
|
||||
struct radeon_cp *cpA,
|
||||
struct radeon_cp *cpB);
|
||||
struct radeon_ring *cpA,
|
||||
struct radeon_ring *cpB);
|
||||
void radeon_test_syncing(struct radeon_device *rdev);
|
||||
|
||||
|
||||
|
@ -943,7 +943,7 @@ struct radeon_asic {
|
|||
int (*resume)(struct radeon_device *rdev);
|
||||
int (*suspend)(struct radeon_device *rdev);
|
||||
void (*vga_set_state)(struct radeon_device *rdev, bool state);
|
||||
bool (*gpu_is_lockup)(struct radeon_device *rdev, struct radeon_cp *cp);
|
||||
bool (*gpu_is_lockup)(struct radeon_device *rdev, struct radeon_ring *cp);
|
||||
int (*asic_reset)(struct radeon_device *rdev);
|
||||
void (*gart_tlb_flush)(struct radeon_device *rdev);
|
||||
int (*gart_set_page)(struct radeon_device *rdev, int i, uint64_t addr);
|
||||
|
@ -955,11 +955,11 @@ struct radeon_asic {
|
|||
struct {
|
||||
void (*ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib);
|
||||
void (*emit_fence)(struct radeon_device *rdev, struct radeon_fence *fence);
|
||||
void (*emit_semaphore)(struct radeon_device *rdev, struct radeon_cp *cp,
|
||||
void (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp,
|
||||
struct radeon_semaphore *semaphore, bool emit_wait);
|
||||
} ring[RADEON_NUM_RINGS];
|
||||
|
||||
int (*ring_test)(struct radeon_device *rdev, struct radeon_cp *cp);
|
||||
int (*ring_test)(struct radeon_device *rdev, struct radeon_ring *cp);
|
||||
int (*irq_set)(struct radeon_device *rdev);
|
||||
int (*irq_process)(struct radeon_device *rdev);
|
||||
u32 (*get_vblank_counter)(struct radeon_device *rdev, int crtc);
|
||||
|
@ -1293,7 +1293,7 @@ struct radeon_device {
|
|||
rwlock_t fence_lock;
|
||||
struct radeon_fence_driver fence_drv[RADEON_NUM_RINGS];
|
||||
struct radeon_semaphore_driver semaphore_drv;
|
||||
struct radeon_cp cp[RADEON_NUM_RINGS];
|
||||
struct radeon_ring ring[RADEON_NUM_RINGS];
|
||||
struct radeon_ib_pool ib_pool;
|
||||
struct radeon_irq irq;
|
||||
struct radeon_asic *asic;
|
||||
|
@ -1476,16 +1476,16 @@ void radeon_atombios_fini(struct radeon_device *rdev);
|
|||
* RING helpers.
|
||||
*/
|
||||
#if DRM_DEBUG_CODE == 0
|
||||
static inline void radeon_ring_write(struct radeon_cp *cp, uint32_t v)
|
||||
static inline void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
|
||||
{
|
||||
cp->ring[cp->wptr++] = v;
|
||||
cp->wptr &= cp->ptr_mask;
|
||||
cp->count_dw--;
|
||||
cp->ring_free_dw--;
|
||||
ring->ring[ring->wptr++] = v;
|
||||
ring->wptr &= ring->ptr_mask;
|
||||
ring->count_dw--;
|
||||
ring->ring_free_dw--;
|
||||
}
|
||||
#else
|
||||
/* With debugging this is just too big to inline */
|
||||
void radeon_ring_write(struct radeon_cp *cp, uint32_t v);
|
||||
void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
|
|
@ -58,7 +58,7 @@ void r100_fini(struct radeon_device *rdev);
|
|||
int r100_suspend(struct radeon_device *rdev);
|
||||
int r100_resume(struct radeon_device *rdev);
|
||||
void r100_vga_set_state(struct radeon_device *rdev, bool state);
|
||||
bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_cp *cp);
|
||||
bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
|
||||
int r100_asic_reset(struct radeon_device *rdev);
|
||||
u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc);
|
||||
void r100_pci_gart_tlb_flush(struct radeon_device *rdev);
|
||||
|
@ -69,7 +69,7 @@ int r100_irq_process(struct radeon_device *rdev);
|
|||
void r100_fence_ring_emit(struct radeon_device *rdev,
|
||||
struct radeon_fence *fence);
|
||||
void r100_semaphore_ring_emit(struct radeon_device *rdev,
|
||||
struct radeon_cp *cp,
|
||||
struct radeon_ring *cp,
|
||||
struct radeon_semaphore *semaphore,
|
||||
bool emit_wait);
|
||||
int r100_cs_parse(struct radeon_cs_parser *p);
|
||||
|
@ -86,7 +86,7 @@ int r100_set_surface_reg(struct radeon_device *rdev, int reg,
|
|||
void r100_clear_surface_reg(struct radeon_device *rdev, int reg);
|
||||
void r100_bandwidth_update(struct radeon_device *rdev);
|
||||
void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
|
||||
int r100_ring_test(struct radeon_device *rdev, struct radeon_cp *cp);
|
||||
int r100_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
|
||||
void r100_hpd_init(struct radeon_device *rdev);
|
||||
void r100_hpd_fini(struct radeon_device *rdev);
|
||||
bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
|
||||
|
@ -104,10 +104,10 @@ void r100_pci_gart_disable(struct radeon_device *rdev);
|
|||
int r100_debugfs_mc_info_init(struct radeon_device *rdev);
|
||||
int r100_gui_wait_for_idle(struct radeon_device *rdev);
|
||||
void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup,
|
||||
struct radeon_cp *cp);
|
||||
struct radeon_ring *cp);
|
||||
bool r100_gpu_cp_is_lockup(struct radeon_device *rdev,
|
||||
struct r100_gpu_lockup *lockup,
|
||||
struct radeon_cp *cp);
|
||||
struct radeon_ring *cp);
|
||||
void r100_ib_fini(struct radeon_device *rdev);
|
||||
int r100_ib_init(struct radeon_device *rdev);
|
||||
void r100_irq_disable(struct radeon_device *rdev);
|
||||
|
@ -157,7 +157,7 @@ extern int r300_init(struct radeon_device *rdev);
|
|||
extern void r300_fini(struct radeon_device *rdev);
|
||||
extern int r300_suspend(struct radeon_device *rdev);
|
||||
extern int r300_resume(struct radeon_device *rdev);
|
||||
extern bool r300_gpu_is_lockup(struct radeon_device *rdev, struct radeon_cp *cp);
|
||||
extern bool r300_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
|
||||
extern int r300_asic_reset(struct radeon_device *rdev);
|
||||
extern void r300_ring_start(struct radeon_device *rdev);
|
||||
extern void r300_fence_ring_emit(struct radeon_device *rdev,
|
||||
|
@ -303,10 +303,10 @@ int r600_cs_parse(struct radeon_cs_parser *p);
|
|||
void r600_fence_ring_emit(struct radeon_device *rdev,
|
||||
struct radeon_fence *fence);
|
||||
void r600_semaphore_ring_emit(struct radeon_device *rdev,
|
||||
struct radeon_cp *cp,
|
||||
struct radeon_ring *cp,
|
||||
struct radeon_semaphore *semaphore,
|
||||
bool emit_wait);
|
||||
bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_cp *cp);
|
||||
bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
|
||||
int r600_asic_reset(struct radeon_device *rdev);
|
||||
int r600_set_surface_reg(struct radeon_device *rdev, int reg,
|
||||
uint32_t tiling_flags, uint32_t pitch,
|
||||
|
@ -314,7 +314,7 @@ int r600_set_surface_reg(struct radeon_device *rdev, int reg,
|
|||
void r600_clear_surface_reg(struct radeon_device *rdev, int reg);
|
||||
int r600_ib_test(struct radeon_device *rdev, int ring);
|
||||
void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
|
||||
int r600_ring_test(struct radeon_device *rdev, struct radeon_cp *cp);
|
||||
int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
|
||||
int r600_copy_blit(struct radeon_device *rdev,
|
||||
uint64_t src_offset, uint64_t dst_offset,
|
||||
unsigned num_gpu_pages, struct radeon_fence *fence);
|
||||
|
@ -334,7 +334,7 @@ extern int r600_get_pcie_lanes(struct radeon_device *rdev);
|
|||
bool r600_card_posted(struct radeon_device *rdev);
|
||||
void r600_cp_stop(struct radeon_device *rdev);
|
||||
int r600_cp_start(struct radeon_device *rdev);
|
||||
void r600_ring_init(struct radeon_device *rdev, struct radeon_cp *cp, unsigned ring_size);
|
||||
void r600_ring_init(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ring_size);
|
||||
int r600_cp_resume(struct radeon_device *rdev);
|
||||
void r600_cp_fini(struct radeon_device *rdev);
|
||||
int r600_count_pipe_bits(uint32_t val);
|
||||
|
@ -403,7 +403,7 @@ int evergreen_init(struct radeon_device *rdev);
|
|||
void evergreen_fini(struct radeon_device *rdev);
|
||||
int evergreen_suspend(struct radeon_device *rdev);
|
||||
int evergreen_resume(struct radeon_device *rdev);
|
||||
bool evergreen_gpu_is_lockup(struct radeon_device *rdev, struct radeon_cp *cp);
|
||||
bool evergreen_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
|
||||
int evergreen_asic_reset(struct radeon_device *rdev);
|
||||
void evergreen_bandwidth_update(struct radeon_device *rdev);
|
||||
void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
|
||||
|
@ -434,7 +434,7 @@ int cayman_init(struct radeon_device *rdev);
|
|||
void cayman_fini(struct radeon_device *rdev);
|
||||
int cayman_suspend(struct radeon_device *rdev);
|
||||
int cayman_resume(struct radeon_device *rdev);
|
||||
bool cayman_gpu_is_lockup(struct radeon_device *rdev, struct radeon_cp *cp);
|
||||
bool cayman_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
|
||||
int cayman_asic_reset(struct radeon_device *rdev);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -719,7 +719,7 @@ int radeon_device_init(struct radeon_device *rdev,
|
|||
radeon_mutex_init(&rdev->cs_mutex);
|
||||
mutex_init(&rdev->ib_pool.mutex);
|
||||
for (i = 0; i < RADEON_NUM_RINGS; ++i)
|
||||
mutex_init(&rdev->cp[i].mutex);
|
||||
mutex_init(&rdev->ring[i].mutex);
|
||||
mutex_init(&rdev->dc_hw_i2c_mutex);
|
||||
if (rdev->family >= CHIP_R600)
|
||||
spin_lock_init(&rdev->ih.lock);
|
||||
|
|
|
@ -84,7 +84,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
|
|||
return 0;
|
||||
}
|
||||
fence->seq = atomic_add_return(1, &rdev->fence_drv[fence->ring].seq);
|
||||
if (!rdev->cp[fence->ring].ready)
|
||||
if (!rdev->ring[fence->ring].ready)
|
||||
/* FIXME: cp is not running assume everythings is done right
|
||||
* away
|
||||
*/
|
||||
|
@ -269,7 +269,7 @@ retry:
|
|||
* if we experiencing a lockup the value doesn't change
|
||||
*/
|
||||
if (seq == rdev->fence_drv[fence->ring].last_seq &&
|
||||
radeon_gpu_is_lockup(rdev, &rdev->cp[fence->ring])) {
|
||||
radeon_gpu_is_lockup(rdev, &rdev->ring[fence->ring])) {
|
||||
/* good news we believe it's a lockup */
|
||||
printk(KERN_WARNING "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n",
|
||||
fence->seq, seq);
|
||||
|
|
|
@ -163,7 +163,7 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
|
|||
args->vram_visible -= radeon_fbdev_total_size(rdev);
|
||||
args->gart_size = rdev->mc.gtt_size - 4096 - RADEON_IB_POOL_SIZE*64*1024;
|
||||
for(i = 0; i < RADEON_NUM_RINGS; ++i)
|
||||
args->gart_size -= rdev->cp[i].ring_size;
|
||||
args->gart_size -= rdev->ring[i].ring_size;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -253,8 +253,8 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
|
|||
mutex_lock(&rdev->ddev->struct_mutex);
|
||||
mutex_lock(&rdev->vram_mutex);
|
||||
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
|
||||
if (rdev->cp[i].ring_obj)
|
||||
mutex_lock(&rdev->cp[i].mutex);
|
||||
if (rdev->ring[i].ring_obj)
|
||||
mutex_lock(&rdev->ring[i].mutex);
|
||||
}
|
||||
|
||||
/* gui idle int has issues on older chips it seems */
|
||||
|
@ -271,13 +271,13 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
|
|||
radeon_irq_set(rdev);
|
||||
}
|
||||
} else {
|
||||
struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
|
||||
if (cp->ready) {
|
||||
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
|
||||
if (ring->ready) {
|
||||
struct radeon_fence *fence;
|
||||
radeon_ring_alloc(rdev, cp, 64);
|
||||
radeon_fence_create(rdev, &fence, radeon_ring_index(rdev, cp));
|
||||
radeon_ring_alloc(rdev, ring, 64);
|
||||
radeon_fence_create(rdev, &fence, radeon_ring_index(rdev, ring));
|
||||
radeon_fence_emit(rdev, fence);
|
||||
radeon_ring_commit(rdev, cp);
|
||||
radeon_ring_commit(rdev, ring);
|
||||
radeon_fence_wait(fence, false);
|
||||
radeon_fence_unref(&fence);
|
||||
}
|
||||
|
@ -312,8 +312,8 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
|
|||
rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
|
||||
|
||||
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
|
||||
if (rdev->cp[i].ring_obj)
|
||||
mutex_unlock(&rdev->cp[i].mutex);
|
||||
if (rdev->ring[i].ring_obj)
|
||||
mutex_unlock(&rdev->ring[i].mutex);
|
||||
}
|
||||
mutex_unlock(&rdev->vram_mutex);
|
||||
mutex_unlock(&rdev->ddev->struct_mutex);
|
||||
|
|
|
@ -60,17 +60,17 @@ u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
|
|||
return idx_value;
|
||||
}
|
||||
|
||||
void radeon_ring_write(struct radeon_cp *cp, uint32_t v)
|
||||
void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
|
||||
{
|
||||
#if DRM_DEBUG_CODE
|
||||
if (cp->count_dw <= 0) {
|
||||
if (ring->count_dw <= 0) {
|
||||
DRM_ERROR("radeon: writting more dword to ring than expected !\n");
|
||||
}
|
||||
#endif
|
||||
cp->ring[cp->wptr++] = v;
|
||||
cp->wptr &= cp->ptr_mask;
|
||||
cp->count_dw--;
|
||||
cp->ring_free_dw--;
|
||||
ring->ring[ring->wptr++] = v;
|
||||
ring->wptr &= ring->ptr_mask;
|
||||
ring->count_dw--;
|
||||
ring->ring_free_dw--;
|
||||
}
|
||||
|
||||
void radeon_ib_bogus_cleanup(struct radeon_device *rdev)
|
||||
|
@ -178,17 +178,17 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
|
|||
|
||||
int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
|
||||
{
|
||||
struct radeon_cp *cp = &rdev->cp[ib->fence->ring];
|
||||
struct radeon_ring *ring = &rdev->ring[ib->fence->ring];
|
||||
int r = 0;
|
||||
|
||||
if (!ib->length_dw || !cp->ready) {
|
||||
if (!ib->length_dw || !ring->ready) {
|
||||
/* TODO: Nothings in the ib we should report. */
|
||||
DRM_ERROR("radeon: couldn't schedule IB(%u).\n", ib->idx);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* 64 dwords should be enough for fence too */
|
||||
r = radeon_ring_lock(rdev, cp, 64);
|
||||
r = radeon_ring_lock(rdev, ring, 64);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: scheduling IB failed (%d).\n", r);
|
||||
return r;
|
||||
|
@ -199,7 +199,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
|
|||
/* once scheduled IB is considered free and protected by the fence */
|
||||
ib->free = true;
|
||||
mutex_unlock(&rdev->ib_pool.mutex);
|
||||
radeon_ring_unlock_commit(rdev, cp);
|
||||
radeon_ring_unlock_commit(rdev, ring);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -284,150 +284,150 @@ void radeon_ib_pool_fini(struct radeon_device *rdev)
|
|||
/*
|
||||
* Ring.
|
||||
*/
|
||||
int radeon_ring_index(struct radeon_device *rdev, struct radeon_cp *cp)
|
||||
int radeon_ring_index(struct radeon_device *rdev, struct radeon_ring *ring)
|
||||
{
|
||||
/* r1xx-r5xx only has CP ring */
|
||||
if (rdev->family < CHIP_R600)
|
||||
return RADEON_RING_TYPE_GFX_INDEX;
|
||||
|
||||
if (rdev->family >= CHIP_CAYMAN) {
|
||||
if (cp == &rdev->cp[CAYMAN_RING_TYPE_CP1_INDEX])
|
||||
if (ring == &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX])
|
||||
return CAYMAN_RING_TYPE_CP1_INDEX;
|
||||
else if (cp == &rdev->cp[CAYMAN_RING_TYPE_CP2_INDEX])
|
||||
else if (ring == &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX])
|
||||
return CAYMAN_RING_TYPE_CP2_INDEX;
|
||||
}
|
||||
return RADEON_RING_TYPE_GFX_INDEX;
|
||||
}
|
||||
|
||||
void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_cp *cp)
|
||||
void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *ring)
|
||||
{
|
||||
if (rdev->wb.enabled)
|
||||
cp->rptr = le32_to_cpu(rdev->wb.wb[cp->rptr_offs/4]);
|
||||
ring->rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]);
|
||||
else
|
||||
cp->rptr = RREG32(cp->rptr_reg);
|
||||
ring->rptr = RREG32(ring->rptr_reg);
|
||||
/* This works because ring_size is a power of 2 */
|
||||
cp->ring_free_dw = (cp->rptr + (cp->ring_size / 4));
|
||||
cp->ring_free_dw -= cp->wptr;
|
||||
cp->ring_free_dw &= cp->ptr_mask;
|
||||
if (!cp->ring_free_dw) {
|
||||
cp->ring_free_dw = cp->ring_size / 4;
|
||||
ring->ring_free_dw = (ring->rptr + (ring->ring_size / 4));
|
||||
ring->ring_free_dw -= ring->wptr;
|
||||
ring->ring_free_dw &= ring->ptr_mask;
|
||||
if (!ring->ring_free_dw) {
|
||||
ring->ring_free_dw = ring->ring_size / 4;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_cp *cp, unsigned ndw)
|
||||
int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw)
|
||||
{
|
||||
int r;
|
||||
|
||||
/* Align requested size with padding so unlock_commit can
|
||||
* pad safely */
|
||||
ndw = (ndw + cp->align_mask) & ~cp->align_mask;
|
||||
while (ndw > (cp->ring_free_dw - 1)) {
|
||||
radeon_ring_free_size(rdev, cp);
|
||||
if (ndw < cp->ring_free_dw) {
|
||||
ndw = (ndw + ring->align_mask) & ~ring->align_mask;
|
||||
while (ndw > (ring->ring_free_dw - 1)) {
|
||||
radeon_ring_free_size(rdev, ring);
|
||||
if (ndw < ring->ring_free_dw) {
|
||||
break;
|
||||
}
|
||||
r = radeon_fence_wait_next(rdev, radeon_ring_index(rdev, cp));
|
||||
r = radeon_fence_wait_next(rdev, radeon_ring_index(rdev, ring));
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
cp->count_dw = ndw;
|
||||
cp->wptr_old = cp->wptr;
|
||||
ring->count_dw = ndw;
|
||||
ring->wptr_old = ring->wptr;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int radeon_ring_lock(struct radeon_device *rdev, struct radeon_cp *cp, unsigned ndw)
|
||||
int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw)
|
||||
{
|
||||
int r;
|
||||
|
||||
mutex_lock(&cp->mutex);
|
||||
r = radeon_ring_alloc(rdev, cp, ndw);
|
||||
mutex_lock(&ring->mutex);
|
||||
r = radeon_ring_alloc(rdev, ring, ndw);
|
||||
if (r) {
|
||||
mutex_unlock(&cp->mutex);
|
||||
mutex_unlock(&ring->mutex);
|
||||
return r;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void radeon_ring_commit(struct radeon_device *rdev, struct radeon_cp *cp)
|
||||
void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring)
|
||||
{
|
||||
unsigned count_dw_pad;
|
||||
unsigned i;
|
||||
|
||||
/* We pad to match fetch size */
|
||||
count_dw_pad = (cp->align_mask + 1) -
|
||||
(cp->wptr & cp->align_mask);
|
||||
count_dw_pad = (ring->align_mask + 1) -
|
||||
(ring->wptr & ring->align_mask);
|
||||
for (i = 0; i < count_dw_pad; i++) {
|
||||
radeon_ring_write(cp, 2 << 30);
|
||||
radeon_ring_write(ring, 2 << 30);
|
||||
}
|
||||
DRM_MEMORYBARRIER();
|
||||
WREG32(cp->wptr_reg, cp->wptr);
|
||||
(void)RREG32(cp->wptr_reg);
|
||||
WREG32(ring->wptr_reg, ring->wptr);
|
||||
(void)RREG32(ring->wptr_reg);
|
||||
}
|
||||
|
||||
void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_cp *cp)
|
||||
void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *ring)
|
||||
{
|
||||
radeon_ring_commit(rdev, cp);
|
||||
mutex_unlock(&cp->mutex);
|
||||
radeon_ring_commit(rdev, ring);
|
||||
mutex_unlock(&ring->mutex);
|
||||
}
|
||||
|
||||
void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_cp *cp)
|
||||
void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *ring)
|
||||
{
|
||||
cp->wptr = cp->wptr_old;
|
||||
mutex_unlock(&cp->mutex);
|
||||
ring->wptr = ring->wptr_old;
|
||||
mutex_unlock(&ring->mutex);
|
||||
}
|
||||
|
||||
int radeon_ring_init(struct radeon_device *rdev, struct radeon_cp *cp, unsigned ring_size,
|
||||
int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size,
|
||||
unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg)
|
||||
{
|
||||
int r;
|
||||
|
||||
cp->ring_size = ring_size;
|
||||
cp->rptr_offs = rptr_offs;
|
||||
cp->rptr_reg = rptr_reg;
|
||||
cp->wptr_reg = wptr_reg;
|
||||
ring->ring_size = ring_size;
|
||||
ring->rptr_offs = rptr_offs;
|
||||
ring->rptr_reg = rptr_reg;
|
||||
ring->wptr_reg = wptr_reg;
|
||||
/* Allocate ring buffer */
|
||||
if (cp->ring_obj == NULL) {
|
||||
r = radeon_bo_create(rdev, cp->ring_size, PAGE_SIZE, true,
|
||||
if (ring->ring_obj == NULL) {
|
||||
r = radeon_bo_create(rdev, ring->ring_size, PAGE_SIZE, true,
|
||||
RADEON_GEM_DOMAIN_GTT,
|
||||
&cp->ring_obj);
|
||||
&ring->ring_obj);
|
||||
if (r) {
|
||||
dev_err(rdev->dev, "(%d) ring create failed\n", r);
|
||||
return r;
|
||||
}
|
||||
r = radeon_bo_reserve(cp->ring_obj, false);
|
||||
r = radeon_bo_reserve(ring->ring_obj, false);
|
||||
if (unlikely(r != 0))
|
||||
return r;
|
||||
r = radeon_bo_pin(cp->ring_obj, RADEON_GEM_DOMAIN_GTT,
|
||||
&cp->gpu_addr);
|
||||
r = radeon_bo_pin(ring->ring_obj, RADEON_GEM_DOMAIN_GTT,
|
||||
&ring->gpu_addr);
|
||||
if (r) {
|
||||
radeon_bo_unreserve(cp->ring_obj);
|
||||
radeon_bo_unreserve(ring->ring_obj);
|
||||
dev_err(rdev->dev, "(%d) ring pin failed\n", r);
|
||||
return r;
|
||||
}
|
||||
r = radeon_bo_kmap(cp->ring_obj,
|
||||
(void **)&cp->ring);
|
||||
radeon_bo_unreserve(cp->ring_obj);
|
||||
r = radeon_bo_kmap(ring->ring_obj,
|
||||
(void **)&ring->ring);
|
||||
radeon_bo_unreserve(ring->ring_obj);
|
||||
if (r) {
|
||||
dev_err(rdev->dev, "(%d) ring map failed\n", r);
|
||||
return r;
|
||||
}
|
||||
}
|
||||
cp->ptr_mask = (cp->ring_size / 4) - 1;
|
||||
cp->ring_free_dw = cp->ring_size / 4;
|
||||
ring->ptr_mask = (ring->ring_size / 4) - 1;
|
||||
ring->ring_free_dw = ring->ring_size / 4;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void radeon_ring_fini(struct radeon_device *rdev, struct radeon_cp *cp)
|
||||
void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *ring)
|
||||
{
|
||||
int r;
|
||||
struct radeon_bo *ring_obj;
|
||||
|
||||
mutex_lock(&cp->mutex);
|
||||
ring_obj = cp->ring_obj;
|
||||
cp->ring = NULL;
|
||||
cp->ring_obj = NULL;
|
||||
mutex_unlock(&cp->mutex);
|
||||
mutex_lock(&ring->mutex);
|
||||
ring_obj = ring->ring_obj;
|
||||
ring->ring = NULL;
|
||||
ring->ring_obj = NULL;
|
||||
mutex_unlock(&ring->mutex);
|
||||
|
||||
if (ring_obj) {
|
||||
r = radeon_bo_reserve(ring_obj, false);
|
||||
|
|
|
@ -121,13 +121,13 @@ int radeon_semaphore_create(struct radeon_device *rdev,
|
|||
void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
|
||||
struct radeon_semaphore *semaphore)
|
||||
{
|
||||
radeon_semaphore_ring_emit(rdev, ring, &rdev->cp[ring], semaphore, false);
|
||||
radeon_semaphore_ring_emit(rdev, ring, &rdev->ring[ring], semaphore, false);
|
||||
}
|
||||
|
||||
void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
|
||||
struct radeon_semaphore *semaphore)
|
||||
{
|
||||
radeon_semaphore_ring_emit(rdev, ring, &rdev->cp[ring], semaphore, true);
|
||||
radeon_semaphore_ring_emit(rdev, ring, &rdev->ring[ring], semaphore, true);
|
||||
}
|
||||
|
||||
void radeon_semaphore_free(struct radeon_device *rdev,
|
||||
|
|
|
@ -44,7 +44,7 @@ void radeon_test_moves(struct radeon_device *rdev)
|
|||
*/
|
||||
n = rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024;
|
||||
for (i = 0; i < RADEON_NUM_RINGS; ++i)
|
||||
n -= rdev->cp[i].ring_size;
|
||||
n -= rdev->ring[i].ring_size;
|
||||
if (rdev->wb.wb_obj)
|
||||
n -= RADEON_GPU_PAGE_SIZE;
|
||||
if (rdev->ih.ring_obj)
|
||||
|
@ -236,16 +236,16 @@ out_cleanup:
|
|||
}
|
||||
|
||||
void radeon_test_ring_sync(struct radeon_device *rdev,
|
||||
struct radeon_cp *cpA,
|
||||
struct radeon_cp *cpB)
|
||||
struct radeon_ring *ringA,
|
||||
struct radeon_ring *ringB)
|
||||
{
|
||||
struct radeon_fence *fence = NULL;
|
||||
struct radeon_semaphore *semaphore = NULL;
|
||||
int ringA = radeon_ring_index(rdev, cpA);
|
||||
int ringB = radeon_ring_index(rdev, cpB);
|
||||
int ridxA = radeon_ring_index(rdev, ringA);
|
||||
int ridxB = radeon_ring_index(rdev, ringB);
|
||||
int r;
|
||||
|
||||
r = radeon_fence_create(rdev, &fence, ringA);
|
||||
r = radeon_fence_create(rdev, &fence, ridxA);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to create sync fence\n");
|
||||
goto out_cleanup;
|
||||
|
@ -257,14 +257,14 @@ void radeon_test_ring_sync(struct radeon_device *rdev,
|
|||
goto out_cleanup;
|
||||
}
|
||||
|
||||
r = radeon_ring_lock(rdev, cpA, 64);
|
||||
r = radeon_ring_lock(rdev, ringA, 64);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to lock ring %d\n", ringA);
|
||||
DRM_ERROR("Failed to lock ring A %d\n", ridxA);
|
||||
goto out_cleanup;
|
||||
}
|
||||
radeon_semaphore_emit_wait(rdev, ringA, semaphore);
|
||||
radeon_semaphore_emit_wait(rdev, ridxA, semaphore);
|
||||
radeon_fence_emit(rdev, fence);
|
||||
radeon_ring_unlock_commit(rdev, cpA);
|
||||
radeon_ring_unlock_commit(rdev, ringA);
|
||||
|
||||
mdelay(1000);
|
||||
|
||||
|
@ -273,13 +273,13 @@ void radeon_test_ring_sync(struct radeon_device *rdev,
|
|||
goto out_cleanup;
|
||||
}
|
||||
|
||||
r = radeon_ring_lock(rdev, cpB, 64);
|
||||
r = radeon_ring_lock(rdev, ringB, 64);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to lock ring %d\n", ringB);
|
||||
DRM_ERROR("Failed to lock ring B %p\n", ringB);
|
||||
goto out_cleanup;
|
||||
}
|
||||
radeon_semaphore_emit_signal(rdev, ringB, semaphore);
|
||||
radeon_ring_unlock_commit(rdev, cpB);
|
||||
radeon_semaphore_emit_signal(rdev, ridxB, semaphore);
|
||||
radeon_ring_unlock_commit(rdev, ringB);
|
||||
|
||||
r = radeon_fence_wait(fence, false);
|
||||
if (r) {
|
||||
|
@ -287,7 +287,7 @@ void radeon_test_ring_sync(struct radeon_device *rdev,
|
|||
goto out_cleanup;
|
||||
}
|
||||
|
||||
DRM_INFO("Syncing between rings %d and %d seems to work.\n", ringA, ringB);
|
||||
DRM_INFO("Syncing between rings %d and %d seems to work.\n", ridxA, ridxB);
|
||||
|
||||
out_cleanup:
|
||||
if (semaphore)
|
||||
|
@ -305,20 +305,20 @@ void radeon_test_syncing(struct radeon_device *rdev)
|
|||
int i, j;
|
||||
|
||||
for (i = 1; i < RADEON_NUM_RINGS; ++i) {
|
||||
struct radeon_cp *cpA = &rdev->cp[i];
|
||||
if (!cpA->ready)
|
||||
struct radeon_ring *ringA = &rdev->ring[i];
|
||||
if (!ringA->ready)
|
||||
continue;
|
||||
|
||||
for (j = 0; j < i; ++j) {
|
||||
struct radeon_cp *cpB = &rdev->cp[j];
|
||||
if (!cpB->ready)
|
||||
struct radeon_ring *ringB = &rdev->ring[j];
|
||||
if (!ringB->ready)
|
||||
continue;
|
||||
|
||||
DRM_INFO("Testing syncing between rings %d and %d\n", i, j);
|
||||
radeon_test_ring_sync(rdev, cpA, cpB);
|
||||
radeon_test_ring_sync(rdev, ringA, ringB);
|
||||
|
||||
DRM_INFO("Testing syncing between rings %d and %d\n", j, i);
|
||||
radeon_test_ring_sync(rdev, cpB, cpA);
|
||||
radeon_test_ring_sync(rdev, ringB, ringA);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -188,7 +188,7 @@ static void radeon_evict_flags(struct ttm_buffer_object *bo,
|
|||
rbo = container_of(bo, struct radeon_bo, tbo);
|
||||
switch (bo->mem.mem_type) {
|
||||
case TTM_PL_VRAM:
|
||||
if (rbo->rdev->cp[RADEON_RING_TYPE_GFX_INDEX].ready == false)
|
||||
if (rbo->rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready == false)
|
||||
radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
|
||||
else
|
||||
radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
|
||||
|
@ -255,7 +255,7 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
|
|||
DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (!rdev->cp[RADEON_RING_TYPE_GFX_INDEX].ready) {
|
||||
if (!rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready) {
|
||||
DRM_ERROR("Trying to move memory with CP turned off.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -380,7 +380,7 @@ static int radeon_bo_move(struct ttm_buffer_object *bo,
|
|||
radeon_move_null(bo, new_mem);
|
||||
return 0;
|
||||
}
|
||||
if (!rdev->cp[RADEON_RING_TYPE_GFX_INDEX].ready || rdev->asic->copy == NULL) {
|
||||
if (!rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready || rdev->asic->copy == NULL) {
|
||||
/* use memcpy */
|
||||
goto memcpy;
|
||||
}
|
||||
|
|
|
@ -55,45 +55,45 @@ void rv515_debugfs(struct radeon_device *rdev)
|
|||
|
||||
void rv515_ring_start(struct radeon_device *rdev)
|
||||
{
|
||||
struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
|
||||
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
|
||||
int r;
|
||||
|
||||
r = radeon_ring_lock(rdev, cp, 64);
|
||||
r = radeon_ring_lock(rdev, ring, 64);
|
||||
if (r) {
|
||||
return;
|
||||
}
|
||||
radeon_ring_write(cp, PACKET0(ISYNC_CNTL, 0));
|
||||
radeon_ring_write(cp,
|
||||
radeon_ring_write(ring, PACKET0(ISYNC_CNTL, 0));
|
||||
radeon_ring_write(ring,
|
||||
ISYNC_ANY2D_IDLE3D |
|
||||
ISYNC_ANY3D_IDLE2D |
|
||||
ISYNC_WAIT_IDLEGUI |
|
||||
ISYNC_CPSCRATCH_IDLEGUI);
|
||||
radeon_ring_write(cp, PACKET0(WAIT_UNTIL, 0));
|
||||
radeon_ring_write(cp, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN);
|
||||
radeon_ring_write(cp, PACKET0(R300_DST_PIPE_CONFIG, 0));
|
||||
radeon_ring_write(cp, R300_PIPE_AUTO_CONFIG);
|
||||
radeon_ring_write(cp, PACKET0(GB_SELECT, 0));
|
||||
radeon_ring_write(cp, 0);
|
||||
radeon_ring_write(cp, PACKET0(GB_ENABLE, 0));
|
||||
radeon_ring_write(cp, 0);
|
||||
radeon_ring_write(cp, PACKET0(R500_SU_REG_DEST, 0));
|
||||
radeon_ring_write(cp, (1 << rdev->num_gb_pipes) - 1);
|
||||
radeon_ring_write(cp, PACKET0(VAP_INDEX_OFFSET, 0));
|
||||
radeon_ring_write(cp, 0);
|
||||
radeon_ring_write(cp, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0));
|
||||
radeon_ring_write(cp, RB3D_DC_FLUSH | RB3D_DC_FREE);
|
||||
radeon_ring_write(cp, PACKET0(ZB_ZCACHE_CTLSTAT, 0));
|
||||
radeon_ring_write(cp, ZC_FLUSH | ZC_FREE);
|
||||
radeon_ring_write(cp, PACKET0(WAIT_UNTIL, 0));
|
||||
radeon_ring_write(cp, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN);
|
||||
radeon_ring_write(cp, PACKET0(GB_AA_CONFIG, 0));
|
||||
radeon_ring_write(cp, 0);
|
||||
radeon_ring_write(cp, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0));
|
||||
radeon_ring_write(cp, RB3D_DC_FLUSH | RB3D_DC_FREE);
|
||||
radeon_ring_write(cp, PACKET0(ZB_ZCACHE_CTLSTAT, 0));
|
||||
radeon_ring_write(cp, ZC_FLUSH | ZC_FREE);
|
||||
radeon_ring_write(cp, PACKET0(GB_MSPOS0, 0));
|
||||
radeon_ring_write(cp,
|
||||
radeon_ring_write(ring, PACKET0(WAIT_UNTIL, 0));
|
||||
radeon_ring_write(ring, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN);
|
||||
radeon_ring_write(ring, PACKET0(R300_DST_PIPE_CONFIG, 0));
|
||||
radeon_ring_write(ring, R300_PIPE_AUTO_CONFIG);
|
||||
radeon_ring_write(ring, PACKET0(GB_SELECT, 0));
|
||||
radeon_ring_write(ring, 0);
|
||||
radeon_ring_write(ring, PACKET0(GB_ENABLE, 0));
|
||||
radeon_ring_write(ring, 0);
|
||||
radeon_ring_write(ring, PACKET0(R500_SU_REG_DEST, 0));
|
||||
radeon_ring_write(ring, (1 << rdev->num_gb_pipes) - 1);
|
||||
radeon_ring_write(ring, PACKET0(VAP_INDEX_OFFSET, 0));
|
||||
radeon_ring_write(ring, 0);
|
||||
radeon_ring_write(ring, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0));
|
||||
radeon_ring_write(ring, RB3D_DC_FLUSH | RB3D_DC_FREE);
|
||||
radeon_ring_write(ring, PACKET0(ZB_ZCACHE_CTLSTAT, 0));
|
||||
radeon_ring_write(ring, ZC_FLUSH | ZC_FREE);
|
||||
radeon_ring_write(ring, PACKET0(WAIT_UNTIL, 0));
|
||||
radeon_ring_write(ring, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN);
|
||||
radeon_ring_write(ring, PACKET0(GB_AA_CONFIG, 0));
|
||||
radeon_ring_write(ring, 0);
|
||||
radeon_ring_write(ring, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0));
|
||||
radeon_ring_write(ring, RB3D_DC_FLUSH | RB3D_DC_FREE);
|
||||
radeon_ring_write(ring, PACKET0(ZB_ZCACHE_CTLSTAT, 0));
|
||||
radeon_ring_write(ring, ZC_FLUSH | ZC_FREE);
|
||||
radeon_ring_write(ring, PACKET0(GB_MSPOS0, 0));
|
||||
radeon_ring_write(ring,
|
||||
((6 << MS_X0_SHIFT) |
|
||||
(6 << MS_Y0_SHIFT) |
|
||||
(6 << MS_X1_SHIFT) |
|
||||
|
@ -102,8 +102,8 @@ void rv515_ring_start(struct radeon_device *rdev)
|
|||
(6 << MS_Y2_SHIFT) |
|
||||
(6 << MSBD0_Y_SHIFT) |
|
||||
(6 << MSBD0_X_SHIFT)));
|
||||
radeon_ring_write(cp, PACKET0(GB_MSPOS1, 0));
|
||||
radeon_ring_write(cp,
|
||||
radeon_ring_write(ring, PACKET0(GB_MSPOS1, 0));
|
||||
radeon_ring_write(ring,
|
||||
((6 << MS_X3_SHIFT) |
|
||||
(6 << MS_Y3_SHIFT) |
|
||||
(6 << MS_X4_SHIFT) |
|
||||
|
@ -111,15 +111,15 @@ void rv515_ring_start(struct radeon_device *rdev)
|
|||
(6 << MS_X5_SHIFT) |
|
||||
(6 << MS_Y5_SHIFT) |
|
||||
(6 << MSBD1_SHIFT)));
|
||||
radeon_ring_write(cp, PACKET0(GA_ENHANCE, 0));
|
||||
radeon_ring_write(cp, GA_DEADLOCK_CNTL | GA_FASTSYNC_CNTL);
|
||||
radeon_ring_write(cp, PACKET0(GA_POLY_MODE, 0));
|
||||
radeon_ring_write(cp, FRONT_PTYPE_TRIANGE | BACK_PTYPE_TRIANGE);
|
||||
radeon_ring_write(cp, PACKET0(GA_ROUND_MODE, 0));
|
||||
radeon_ring_write(cp, GEOMETRY_ROUND_NEAREST | COLOR_ROUND_NEAREST);
|
||||
radeon_ring_write(cp, PACKET0(0x20C8, 0));
|
||||
radeon_ring_write(cp, 0);
|
||||
radeon_ring_unlock_commit(rdev, cp);
|
||||
radeon_ring_write(ring, PACKET0(GA_ENHANCE, 0));
|
||||
radeon_ring_write(ring, GA_DEADLOCK_CNTL | GA_FASTSYNC_CNTL);
|
||||
radeon_ring_write(ring, PACKET0(GA_POLY_MODE, 0));
|
||||
radeon_ring_write(ring, FRONT_PTYPE_TRIANGE | BACK_PTYPE_TRIANGE);
|
||||
radeon_ring_write(ring, PACKET0(GA_ROUND_MODE, 0));
|
||||
radeon_ring_write(ring, GEOMETRY_ROUND_NEAREST | COLOR_ROUND_NEAREST);
|
||||
radeon_ring_write(ring, PACKET0(0x20C8, 0));
|
||||
radeon_ring_write(ring, 0);
|
||||
radeon_ring_unlock_commit(rdev, ring);
|
||||
}
|
||||
|
||||
int rv515_mc_wait_for_idle(struct radeon_device *rdev)
|
||||
|
|
|
@ -357,7 +357,7 @@ static int rv770_cp_load_microcode(struct radeon_device *rdev)
|
|||
void r700_cp_fini(struct radeon_device *rdev)
|
||||
{
|
||||
r700_cp_stop(rdev);
|
||||
radeon_ring_fini(rdev, &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]);
|
||||
radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1043,7 +1043,7 @@ int rv770_mc_init(struct radeon_device *rdev)
|
|||
|
||||
static int rv770_startup(struct radeon_device *rdev)
|
||||
{
|
||||
struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
|
||||
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
|
||||
int r;
|
||||
|
||||
/* enable pcie gen2 link */
|
||||
|
@ -1092,7 +1092,7 @@ static int rv770_startup(struct radeon_device *rdev)
|
|||
}
|
||||
r600_irq_set(rdev);
|
||||
|
||||
r = radeon_ring_init(rdev, cp, cp->ring_size, RADEON_WB_CP_RPTR_OFFSET,
|
||||
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
|
||||
R600_CP_RB_RPTR, R600_CP_RB_WPTR);
|
||||
if (r)
|
||||
return r;
|
||||
|
@ -1144,7 +1144,7 @@ int rv770_suspend(struct radeon_device *rdev)
|
|||
r600_audio_fini(rdev);
|
||||
/* FIXME: we should wait for ring to be empty */
|
||||
r700_cp_stop(rdev);
|
||||
rdev->cp[RADEON_RING_TYPE_GFX_INDEX].ready = false;
|
||||
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
|
||||
r600_irq_suspend(rdev);
|
||||
radeon_wb_disable(rdev);
|
||||
rv770_pcie_gart_disable(rdev);
|
||||
|
@ -1217,8 +1217,8 @@ int rv770_init(struct radeon_device *rdev)
|
|||
if (r)
|
||||
return r;
|
||||
|
||||
rdev->cp[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
|
||||
r600_ring_init(rdev, &rdev->cp[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
|
||||
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
|
||||
r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
|
||||
|
||||
rdev->ih.ring_obj = NULL;
|
||||
r600_ih_ring_init(rdev, 64 * 1024);
|
||||
|
|
Loading…
Reference in New Issue