Merge branch 'next' of git://people.freedesktop.org/~deathsimple/linux into drm-next
This merges Christian work that has been hanging around on the list.
This commit is contained in:
commit
faadaf97e5
|
@ -1368,7 +1368,15 @@ void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
|
|||
/* set to DX10/11 mode */
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
|
||||
radeon_ring_write(ring, 1);
|
||||
/* FIXME: implement */
|
||||
|
||||
if (ring->rptr_save_reg) {
|
||||
uint32_t next_rptr = ring->wptr + 3 + 4;
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
|
||||
radeon_ring_write(ring, ((ring->rptr_save_reg -
|
||||
PACKET3_SET_CONFIG_REG_START) >> 2));
|
||||
radeon_ring_write(ring, next_rptr);
|
||||
}
|
||||
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
|
||||
radeon_ring_write(ring,
|
||||
#ifdef __BIG_ENDIAN
|
||||
|
@ -3087,13 +3095,11 @@ static int evergreen_startup(struct radeon_device *rdev)
|
|||
if (r)
|
||||
return r;
|
||||
|
||||
r = radeon_ib_pool_start(rdev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = radeon_ib_ring_tests(rdev);
|
||||
if (r)
|
||||
r = radeon_ib_pool_init(rdev);
|
||||
if (r) {
|
||||
dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
r = r600_audio_init(rdev);
|
||||
if (r) {
|
||||
|
@ -3137,9 +3143,6 @@ int evergreen_suspend(struct radeon_device *rdev)
|
|||
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
|
||||
|
||||
r600_audio_fini(rdev);
|
||||
/* FIXME: we should wait for ring to be empty */
|
||||
radeon_ib_pool_suspend(rdev);
|
||||
r600_blit_suspend(rdev);
|
||||
r700_cp_stop(rdev);
|
||||
ring->ready = false;
|
||||
evergreen_irq_suspend(rdev);
|
||||
|
@ -3225,20 +3228,14 @@ int evergreen_init(struct radeon_device *rdev)
|
|||
if (r)
|
||||
return r;
|
||||
|
||||
r = radeon_ib_pool_init(rdev);
|
||||
rdev->accel_working = true;
|
||||
if (r) {
|
||||
dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
|
||||
rdev->accel_working = false;
|
||||
}
|
||||
|
||||
r = evergreen_startup(rdev);
|
||||
if (r) {
|
||||
dev_err(rdev->dev, "disabling GPU acceleration\n");
|
||||
r700_cp_fini(rdev);
|
||||
r600_irq_fini(rdev);
|
||||
radeon_wb_fini(rdev);
|
||||
r100_ib_fini(rdev);
|
||||
radeon_ib_pool_fini(rdev);
|
||||
radeon_irq_kms_fini(rdev);
|
||||
evergreen_pcie_gart_fini(rdev);
|
||||
rdev->accel_working = false;
|
||||
|
@ -3265,7 +3262,7 @@ void evergreen_fini(struct radeon_device *rdev)
|
|||
r700_cp_fini(rdev);
|
||||
r600_irq_fini(rdev);
|
||||
radeon_wb_fini(rdev);
|
||||
r100_ib_fini(rdev);
|
||||
radeon_ib_pool_fini(rdev);
|
||||
radeon_irq_kms_fini(rdev);
|
||||
evergreen_pcie_gart_fini(rdev);
|
||||
r600_vram_scratch_fini(rdev);
|
||||
|
|
|
@ -634,10 +634,6 @@ int evergreen_blit_init(struct radeon_device *rdev)
|
|||
|
||||
rdev->r600_blit.max_dim = 16384;
|
||||
|
||||
/* pin copy shader into vram if already initialized */
|
||||
if (rdev->r600_blit.shader_obj)
|
||||
goto done;
|
||||
|
||||
rdev->r600_blit.state_offset = 0;
|
||||
|
||||
if (rdev->family < CHIP_CAYMAN)
|
||||
|
@ -668,11 +664,26 @@ int evergreen_blit_init(struct radeon_device *rdev)
|
|||
obj_size += cayman_ps_size * 4;
|
||||
obj_size = ALIGN(obj_size, 256);
|
||||
|
||||
r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
|
||||
NULL, &rdev->r600_blit.shader_obj);
|
||||
if (r) {
|
||||
DRM_ERROR("evergreen failed to allocate shader\n");
|
||||
return r;
|
||||
/* pin copy shader into vram if not already initialized */
|
||||
if (!rdev->r600_blit.shader_obj) {
|
||||
r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true,
|
||||
RADEON_GEM_DOMAIN_VRAM,
|
||||
NULL, &rdev->r600_blit.shader_obj);
|
||||
if (r) {
|
||||
DRM_ERROR("evergreen failed to allocate shader\n");
|
||||
return r;
|
||||
}
|
||||
|
||||
r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
|
||||
if (unlikely(r != 0))
|
||||
return r;
|
||||
r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
|
||||
&rdev->r600_blit.shader_gpu_addr);
|
||||
radeon_bo_unreserve(rdev->r600_blit.shader_obj);
|
||||
if (r) {
|
||||
dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
|
||||
return r;
|
||||
}
|
||||
}
|
||||
|
||||
DRM_DEBUG("evergreen blit allocated bo %08x vs %08x ps %08x\n",
|
||||
|
@ -714,17 +725,6 @@ int evergreen_blit_init(struct radeon_device *rdev)
|
|||
radeon_bo_kunmap(rdev->r600_blit.shader_obj);
|
||||
radeon_bo_unreserve(rdev->r600_blit.shader_obj);
|
||||
|
||||
done:
|
||||
r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
|
||||
if (unlikely(r != 0))
|
||||
return r;
|
||||
r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
|
||||
&rdev->r600_blit.shader_gpu_addr);
|
||||
radeon_bo_unreserve(rdev->r600_blit.shader_obj);
|
||||
if (r) {
|
||||
dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
|
||||
return r;
|
||||
}
|
||||
radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -855,6 +855,15 @@ void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
|
|||
/* set to DX10/11 mode */
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
|
||||
radeon_ring_write(ring, 1);
|
||||
|
||||
if (ring->rptr_save_reg) {
|
||||
uint32_t next_rptr = ring->wptr + 3 + 4 + 8;
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
|
||||
radeon_ring_write(ring, ((ring->rptr_save_reg -
|
||||
PACKET3_SET_CONFIG_REG_START) >> 2));
|
||||
radeon_ring_write(ring, next_rptr);
|
||||
}
|
||||
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
|
||||
radeon_ring_write(ring,
|
||||
#ifdef __BIG_ENDIAN
|
||||
|
@ -981,16 +990,41 @@ static int cayman_cp_start(struct radeon_device *rdev)
|
|||
|
||||
static void cayman_cp_fini(struct radeon_device *rdev)
|
||||
{
|
||||
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
|
||||
cayman_cp_enable(rdev, false);
|
||||
radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
|
||||
radeon_ring_fini(rdev, ring);
|
||||
radeon_scratch_free(rdev, ring->rptr_save_reg);
|
||||
}
|
||||
|
||||
int cayman_cp_resume(struct radeon_device *rdev)
|
||||
{
|
||||
static const int ridx[] = {
|
||||
RADEON_RING_TYPE_GFX_INDEX,
|
||||
CAYMAN_RING_TYPE_CP1_INDEX,
|
||||
CAYMAN_RING_TYPE_CP2_INDEX
|
||||
};
|
||||
static const unsigned cp_rb_cntl[] = {
|
||||
CP_RB0_CNTL,
|
||||
CP_RB1_CNTL,
|
||||
CP_RB2_CNTL,
|
||||
};
|
||||
static const unsigned cp_rb_rptr_addr[] = {
|
||||
CP_RB0_RPTR_ADDR,
|
||||
CP_RB1_RPTR_ADDR,
|
||||
CP_RB2_RPTR_ADDR
|
||||
};
|
||||
static const unsigned cp_rb_rptr_addr_hi[] = {
|
||||
CP_RB0_RPTR_ADDR_HI,
|
||||
CP_RB1_RPTR_ADDR_HI,
|
||||
CP_RB2_RPTR_ADDR_HI
|
||||
};
|
||||
static const unsigned cp_rb_base[] = {
|
||||
CP_RB0_BASE,
|
||||
CP_RB1_BASE,
|
||||
CP_RB2_BASE
|
||||
};
|
||||
struct radeon_ring *ring;
|
||||
u32 tmp;
|
||||
u32 rb_bufsz;
|
||||
int r;
|
||||
int i, r;
|
||||
|
||||
/* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */
|
||||
WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP |
|
||||
|
@ -1012,91 +1046,47 @@ int cayman_cp_resume(struct radeon_device *rdev)
|
|||
|
||||
WREG32(CP_DEBUG, (1 << 27));
|
||||
|
||||
/* ring 0 - compute and gfx */
|
||||
/* Set ring buffer size */
|
||||
ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
|
||||
rb_bufsz = drm_order(ring->ring_size / 8);
|
||||
tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
|
||||
#ifdef __BIG_ENDIAN
|
||||
tmp |= BUF_SWAP_32BIT;
|
||||
#endif
|
||||
WREG32(CP_RB0_CNTL, tmp);
|
||||
|
||||
/* Initialize the ring buffer's read and write pointers */
|
||||
WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA);
|
||||
ring->wptr = 0;
|
||||
WREG32(CP_RB0_WPTR, ring->wptr);
|
||||
|
||||
/* set the wb address wether it's enabled or not */
|
||||
WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC);
|
||||
WREG32(CP_RB0_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
|
||||
WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
|
||||
WREG32(SCRATCH_UMSK, 0xff);
|
||||
|
||||
if (rdev->wb.enabled)
|
||||
WREG32(SCRATCH_UMSK, 0xff);
|
||||
else {
|
||||
tmp |= RB_NO_UPDATE;
|
||||
WREG32(SCRATCH_UMSK, 0);
|
||||
for (i = 0; i < 3; ++i) {
|
||||
uint32_t rb_cntl;
|
||||
uint64_t addr;
|
||||
|
||||
/* Set ring buffer size */
|
||||
ring = &rdev->ring[ridx[i]];
|
||||
rb_cntl = drm_order(ring->ring_size / 8);
|
||||
rb_cntl |= drm_order(RADEON_GPU_PAGE_SIZE/8) << 8;
|
||||
#ifdef __BIG_ENDIAN
|
||||
rb_cntl |= BUF_SWAP_32BIT;
|
||||
#endif
|
||||
WREG32(cp_rb_cntl[i], rb_cntl);
|
||||
|
||||
/* set the wb address wether it's enabled or not */
|
||||
addr = rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET;
|
||||
WREG32(cp_rb_rptr_addr[i], addr & 0xFFFFFFFC);
|
||||
WREG32(cp_rb_rptr_addr_hi[i], upper_32_bits(addr) & 0xFF);
|
||||
}
|
||||
|
||||
mdelay(1);
|
||||
WREG32(CP_RB0_CNTL, tmp);
|
||||
/* set the rb base addr, this causes an internal reset of ALL rings */
|
||||
for (i = 0; i < 3; ++i) {
|
||||
ring = &rdev->ring[ridx[i]];
|
||||
WREG32(cp_rb_base[i], ring->gpu_addr >> 8);
|
||||
}
|
||||
|
||||
WREG32(CP_RB0_BASE, ring->gpu_addr >> 8);
|
||||
for (i = 0; i < 3; ++i) {
|
||||
/* Initialize the ring buffer's read and write pointers */
|
||||
ring = &rdev->ring[ridx[i]];
|
||||
WREG32_P(cp_rb_cntl[i], RB_RPTR_WR_ENA, ~RB_RPTR_WR_ENA);
|
||||
|
||||
ring->rptr = RREG32(CP_RB0_RPTR);
|
||||
ring->rptr = ring->wptr = 0;
|
||||
WREG32(ring->rptr_reg, ring->rptr);
|
||||
WREG32(ring->wptr_reg, ring->wptr);
|
||||
|
||||
/* ring1 - compute only */
|
||||
/* Set ring buffer size */
|
||||
ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
|
||||
rb_bufsz = drm_order(ring->ring_size / 8);
|
||||
tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
|
||||
#ifdef __BIG_ENDIAN
|
||||
tmp |= BUF_SWAP_32BIT;
|
||||
#endif
|
||||
WREG32(CP_RB1_CNTL, tmp);
|
||||
|
||||
/* Initialize the ring buffer's read and write pointers */
|
||||
WREG32(CP_RB1_CNTL, tmp | RB_RPTR_WR_ENA);
|
||||
ring->wptr = 0;
|
||||
WREG32(CP_RB1_WPTR, ring->wptr);
|
||||
|
||||
/* set the wb address wether it's enabled or not */
|
||||
WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC);
|
||||
WREG32(CP_RB1_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFF);
|
||||
|
||||
mdelay(1);
|
||||
WREG32(CP_RB1_CNTL, tmp);
|
||||
|
||||
WREG32(CP_RB1_BASE, ring->gpu_addr >> 8);
|
||||
|
||||
ring->rptr = RREG32(CP_RB1_RPTR);
|
||||
|
||||
/* ring2 - compute only */
|
||||
/* Set ring buffer size */
|
||||
ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
|
||||
rb_bufsz = drm_order(ring->ring_size / 8);
|
||||
tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
|
||||
#ifdef __BIG_ENDIAN
|
||||
tmp |= BUF_SWAP_32BIT;
|
||||
#endif
|
||||
WREG32(CP_RB2_CNTL, tmp);
|
||||
|
||||
/* Initialize the ring buffer's read and write pointers */
|
||||
WREG32(CP_RB2_CNTL, tmp | RB_RPTR_WR_ENA);
|
||||
ring->wptr = 0;
|
||||
WREG32(CP_RB2_WPTR, ring->wptr);
|
||||
|
||||
/* set the wb address wether it's enabled or not */
|
||||
WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC);
|
||||
WREG32(CP_RB2_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFF);
|
||||
|
||||
mdelay(1);
|
||||
WREG32(CP_RB2_CNTL, tmp);
|
||||
|
||||
WREG32(CP_RB2_BASE, ring->gpu_addr >> 8);
|
||||
|
||||
ring->rptr = RREG32(CP_RB2_RPTR);
|
||||
mdelay(1);
|
||||
WREG32_P(cp_rb_cntl[i], 0, ~RB_RPTR_WR_ENA);
|
||||
}
|
||||
|
||||
/* start the rings */
|
||||
cayman_cp_start(rdev);
|
||||
|
@ -1291,17 +1281,17 @@ static int cayman_startup(struct radeon_device *rdev)
|
|||
if (r)
|
||||
return r;
|
||||
|
||||
r = radeon_ib_pool_start(rdev);
|
||||
if (r)
|
||||
r = radeon_ib_pool_init(rdev);
|
||||
if (r) {
|
||||
dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
r = radeon_ib_ring_tests(rdev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = radeon_vm_manager_start(rdev);
|
||||
if (r)
|
||||
r = radeon_vm_manager_init(rdev);
|
||||
if (r) {
|
||||
dev_err(rdev->dev, "vm manager initialization failed (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
r = r600_audio_init(rdev);
|
||||
if (r)
|
||||
|
@ -1334,10 +1324,6 @@ int cayman_resume(struct radeon_device *rdev)
|
|||
int cayman_suspend(struct radeon_device *rdev)
|
||||
{
|
||||
r600_audio_fini(rdev);
|
||||
/* FIXME: we should wait for ring to be empty */
|
||||
radeon_ib_pool_suspend(rdev);
|
||||
radeon_vm_manager_suspend(rdev);
|
||||
r600_blit_suspend(rdev);
|
||||
cayman_cp_enable(rdev, false);
|
||||
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
|
||||
evergreen_irq_suspend(rdev);
|
||||
|
@ -1413,17 +1399,7 @@ int cayman_init(struct radeon_device *rdev)
|
|||
if (r)
|
||||
return r;
|
||||
|
||||
r = radeon_ib_pool_init(rdev);
|
||||
rdev->accel_working = true;
|
||||
if (r) {
|
||||
dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
|
||||
rdev->accel_working = false;
|
||||
}
|
||||
r = radeon_vm_manager_init(rdev);
|
||||
if (r) {
|
||||
dev_err(rdev->dev, "vm manager initialization failed (%d).\n", r);
|
||||
}
|
||||
|
||||
r = cayman_startup(rdev);
|
||||
if (r) {
|
||||
dev_err(rdev->dev, "disabling GPU acceleration\n");
|
||||
|
@ -1432,7 +1408,7 @@ int cayman_init(struct radeon_device *rdev)
|
|||
if (rdev->flags & RADEON_IS_IGP)
|
||||
si_rlc_fini(rdev);
|
||||
radeon_wb_fini(rdev);
|
||||
r100_ib_fini(rdev);
|
||||
radeon_ib_pool_fini(rdev);
|
||||
radeon_vm_manager_fini(rdev);
|
||||
radeon_irq_kms_fini(rdev);
|
||||
cayman_pcie_gart_fini(rdev);
|
||||
|
@ -1463,7 +1439,7 @@ void cayman_fini(struct radeon_device *rdev)
|
|||
si_rlc_fini(rdev);
|
||||
radeon_wb_fini(rdev);
|
||||
radeon_vm_manager_fini(rdev);
|
||||
r100_ib_fini(rdev);
|
||||
radeon_ib_pool_fini(rdev);
|
||||
radeon_irq_kms_fini(rdev);
|
||||
cayman_pcie_gart_fini(rdev);
|
||||
r600_vram_scratch_fini(rdev);
|
||||
|
|
|
@ -3722,12 +3722,6 @@ int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
|
|||
return r;
|
||||
}
|
||||
|
||||
void r100_ib_fini(struct radeon_device *rdev)
|
||||
{
|
||||
radeon_ib_pool_suspend(rdev);
|
||||
radeon_ib_pool_fini(rdev);
|
||||
}
|
||||
|
||||
void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save)
|
||||
{
|
||||
/* Shutdown CP we shouldn't need to do that but better be safe than
|
||||
|
@ -3887,13 +3881,11 @@ static int r100_startup(struct radeon_device *rdev)
|
|||
return r;
|
||||
}
|
||||
|
||||
r = radeon_ib_pool_start(rdev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = radeon_ib_ring_tests(rdev);
|
||||
if (r)
|
||||
r = radeon_ib_pool_init(rdev);
|
||||
if (r) {
|
||||
dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -3930,7 +3922,6 @@ int r100_resume(struct radeon_device *rdev)
|
|||
|
||||
int r100_suspend(struct radeon_device *rdev)
|
||||
{
|
||||
radeon_ib_pool_suspend(rdev);
|
||||
r100_cp_disable(rdev);
|
||||
radeon_wb_disable(rdev);
|
||||
r100_irq_disable(rdev);
|
||||
|
@ -3943,7 +3934,7 @@ void r100_fini(struct radeon_device *rdev)
|
|||
{
|
||||
r100_cp_fini(rdev);
|
||||
radeon_wb_fini(rdev);
|
||||
r100_ib_fini(rdev);
|
||||
radeon_ib_pool_fini(rdev);
|
||||
radeon_gem_fini(rdev);
|
||||
if (rdev->flags & RADEON_IS_PCI)
|
||||
r100_pci_gart_fini(rdev);
|
||||
|
@ -4050,20 +4041,14 @@ int r100_init(struct radeon_device *rdev)
|
|||
}
|
||||
r100_set_safe_registers(rdev);
|
||||
|
||||
r = radeon_ib_pool_init(rdev);
|
||||
rdev->accel_working = true;
|
||||
if (r) {
|
||||
dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
|
||||
rdev->accel_working = false;
|
||||
}
|
||||
|
||||
r = r100_startup(rdev);
|
||||
if (r) {
|
||||
/* Somethings want wront with the accel init stop accel */
|
||||
dev_err(rdev->dev, "Disabling GPU acceleration\n");
|
||||
r100_cp_fini(rdev);
|
||||
radeon_wb_fini(rdev);
|
||||
r100_ib_fini(rdev);
|
||||
radeon_ib_pool_fini(rdev);
|
||||
radeon_irq_kms_fini(rdev);
|
||||
if (rdev->flags & RADEON_IS_PCI)
|
||||
r100_pci_gart_fini(rdev);
|
||||
|
|
|
@ -1391,13 +1391,11 @@ static int r300_startup(struct radeon_device *rdev)
|
|||
return r;
|
||||
}
|
||||
|
||||
r = radeon_ib_pool_start(rdev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = radeon_ib_ring_tests(rdev);
|
||||
if (r)
|
||||
r = radeon_ib_pool_init(rdev);
|
||||
if (r) {
|
||||
dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1436,7 +1434,6 @@ int r300_resume(struct radeon_device *rdev)
|
|||
|
||||
int r300_suspend(struct radeon_device *rdev)
|
||||
{
|
||||
radeon_ib_pool_suspend(rdev);
|
||||
r100_cp_disable(rdev);
|
||||
radeon_wb_disable(rdev);
|
||||
r100_irq_disable(rdev);
|
||||
|
@ -1451,7 +1448,7 @@ void r300_fini(struct radeon_device *rdev)
|
|||
{
|
||||
r100_cp_fini(rdev);
|
||||
radeon_wb_fini(rdev);
|
||||
r100_ib_fini(rdev);
|
||||
radeon_ib_pool_fini(rdev);
|
||||
radeon_gem_fini(rdev);
|
||||
if (rdev->flags & RADEON_IS_PCIE)
|
||||
rv370_pcie_gart_fini(rdev);
|
||||
|
@ -1538,20 +1535,14 @@ int r300_init(struct radeon_device *rdev)
|
|||
}
|
||||
r300_set_reg_safe(rdev);
|
||||
|
||||
r = radeon_ib_pool_init(rdev);
|
||||
rdev->accel_working = true;
|
||||
if (r) {
|
||||
dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
|
||||
rdev->accel_working = false;
|
||||
}
|
||||
|
||||
r = r300_startup(rdev);
|
||||
if (r) {
|
||||
/* Somethings want wront with the accel init stop accel */
|
||||
dev_err(rdev->dev, "Disabling GPU acceleration\n");
|
||||
r100_cp_fini(rdev);
|
||||
radeon_wb_fini(rdev);
|
||||
r100_ib_fini(rdev);
|
||||
radeon_ib_pool_fini(rdev);
|
||||
radeon_irq_kms_fini(rdev);
|
||||
if (rdev->flags & RADEON_IS_PCIE)
|
||||
rv370_pcie_gart_fini(rdev);
|
||||
|
|
|
@ -275,13 +275,11 @@ static int r420_startup(struct radeon_device *rdev)
|
|||
}
|
||||
r420_cp_errata_init(rdev);
|
||||
|
||||
r = radeon_ib_pool_start(rdev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = radeon_ib_ring_tests(rdev);
|
||||
if (r)
|
||||
r = radeon_ib_pool_init(rdev);
|
||||
if (r) {
|
||||
dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -324,7 +322,6 @@ int r420_resume(struct radeon_device *rdev)
|
|||
|
||||
int r420_suspend(struct radeon_device *rdev)
|
||||
{
|
||||
radeon_ib_pool_suspend(rdev);
|
||||
r420_cp_errata_fini(rdev);
|
||||
r100_cp_disable(rdev);
|
||||
radeon_wb_disable(rdev);
|
||||
|
@ -340,7 +337,7 @@ void r420_fini(struct radeon_device *rdev)
|
|||
{
|
||||
r100_cp_fini(rdev);
|
||||
radeon_wb_fini(rdev);
|
||||
r100_ib_fini(rdev);
|
||||
radeon_ib_pool_fini(rdev);
|
||||
radeon_gem_fini(rdev);
|
||||
if (rdev->flags & RADEON_IS_PCIE)
|
||||
rv370_pcie_gart_fini(rdev);
|
||||
|
@ -438,20 +435,14 @@ int r420_init(struct radeon_device *rdev)
|
|||
}
|
||||
r420_set_reg_safe(rdev);
|
||||
|
||||
r = radeon_ib_pool_init(rdev);
|
||||
rdev->accel_working = true;
|
||||
if (r) {
|
||||
dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
|
||||
rdev->accel_working = false;
|
||||
}
|
||||
|
||||
r = r420_startup(rdev);
|
||||
if (r) {
|
||||
/* Somethings want wront with the accel init stop accel */
|
||||
dev_err(rdev->dev, "Disabling GPU acceleration\n");
|
||||
r100_cp_fini(rdev);
|
||||
radeon_wb_fini(rdev);
|
||||
r100_ib_fini(rdev);
|
||||
radeon_ib_pool_fini(rdev);
|
||||
radeon_irq_kms_fini(rdev);
|
||||
if (rdev->flags & RADEON_IS_PCIE)
|
||||
rv370_pcie_gart_fini(rdev);
|
||||
|
|
|
@ -203,13 +203,11 @@ static int r520_startup(struct radeon_device *rdev)
|
|||
return r;
|
||||
}
|
||||
|
||||
r = radeon_ib_pool_start(rdev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = radeon_ib_ring_tests(rdev);
|
||||
if (r)
|
||||
r = radeon_ib_pool_init(rdev);
|
||||
if (r) {
|
||||
dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -311,20 +309,14 @@ int r520_init(struct radeon_device *rdev)
|
|||
return r;
|
||||
rv515_set_safe_registers(rdev);
|
||||
|
||||
r = radeon_ib_pool_init(rdev);
|
||||
rdev->accel_working = true;
|
||||
if (r) {
|
||||
dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
|
||||
rdev->accel_working = false;
|
||||
}
|
||||
|
||||
r = r520_startup(rdev);
|
||||
if (r) {
|
||||
/* Somethings want wront with the accel init stop accel */
|
||||
dev_err(rdev->dev, "Disabling GPU acceleration\n");
|
||||
r100_cp_fini(rdev);
|
||||
radeon_wb_fini(rdev);
|
||||
r100_ib_fini(rdev);
|
||||
radeon_ib_pool_fini(rdev);
|
||||
radeon_irq_kms_fini(rdev);
|
||||
rv370_pcie_gart_fini(rdev);
|
||||
radeon_agp_fini(rdev);
|
||||
|
|
|
@ -2155,18 +2155,27 @@ int r600_cp_resume(struct radeon_device *rdev)
|
|||
void r600_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size)
|
||||
{
|
||||
u32 rb_bufsz;
|
||||
int r;
|
||||
|
||||
/* Align ring size */
|
||||
rb_bufsz = drm_order(ring_size / 8);
|
||||
ring_size = (1 << (rb_bufsz + 1)) * 4;
|
||||
ring->ring_size = ring_size;
|
||||
ring->align_mask = 16 - 1;
|
||||
|
||||
r = radeon_scratch_get(rdev, &ring->rptr_save_reg);
|
||||
if (r) {
|
||||
DRM_ERROR("failed to get scratch reg for rptr save (%d).\n", r);
|
||||
ring->rptr_save_reg = 0;
|
||||
}
|
||||
}
|
||||
|
||||
void r600_cp_fini(struct radeon_device *rdev)
|
||||
{
|
||||
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
|
||||
r600_cp_stop(rdev);
|
||||
radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
|
||||
radeon_ring_fini(rdev, ring);
|
||||
radeon_scratch_free(rdev, ring->rptr_save_reg);
|
||||
}
|
||||
|
||||
|
||||
|
@ -2307,20 +2316,6 @@ int r600_copy_blit(struct radeon_device *rdev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
void r600_blit_suspend(struct radeon_device *rdev)
|
||||
{
|
||||
int r;
|
||||
|
||||
/* unpin shaders bo */
|
||||
if (rdev->r600_blit.shader_obj) {
|
||||
r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
|
||||
if (!r) {
|
||||
radeon_bo_unpin(rdev->r600_blit.shader_obj);
|
||||
radeon_bo_unreserve(rdev->r600_blit.shader_obj);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int r600_set_surface_reg(struct radeon_device *rdev, int reg,
|
||||
uint32_t tiling_flags, uint32_t pitch,
|
||||
uint32_t offset, uint32_t obj_size)
|
||||
|
@ -2403,13 +2398,11 @@ int r600_startup(struct radeon_device *rdev)
|
|||
if (r)
|
||||
return r;
|
||||
|
||||
r = radeon_ib_pool_start(rdev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = radeon_ib_ring_tests(rdev);
|
||||
if (r)
|
||||
r = radeon_ib_pool_init(rdev);
|
||||
if (r) {
|
||||
dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
r = r600_audio_init(rdev);
|
||||
if (r) {
|
||||
|
@ -2459,9 +2452,6 @@ int r600_resume(struct radeon_device *rdev)
|
|||
int r600_suspend(struct radeon_device *rdev)
|
||||
{
|
||||
r600_audio_fini(rdev);
|
||||
radeon_ib_pool_suspend(rdev);
|
||||
r600_blit_suspend(rdev);
|
||||
/* FIXME: we should wait for ring to be empty */
|
||||
r600_cp_stop(rdev);
|
||||
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
|
||||
r600_irq_suspend(rdev);
|
||||
|
@ -2543,20 +2533,14 @@ int r600_init(struct radeon_device *rdev)
|
|||
if (r)
|
||||
return r;
|
||||
|
||||
r = radeon_ib_pool_init(rdev);
|
||||
rdev->accel_working = true;
|
||||
if (r) {
|
||||
dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
|
||||
rdev->accel_working = false;
|
||||
}
|
||||
|
||||
r = r600_startup(rdev);
|
||||
if (r) {
|
||||
dev_err(rdev->dev, "disabling GPU acceleration\n");
|
||||
r600_cp_fini(rdev);
|
||||
r600_irq_fini(rdev);
|
||||
radeon_wb_fini(rdev);
|
||||
r100_ib_fini(rdev);
|
||||
radeon_ib_pool_fini(rdev);
|
||||
radeon_irq_kms_fini(rdev);
|
||||
r600_pcie_gart_fini(rdev);
|
||||
rdev->accel_working = false;
|
||||
|
@ -2572,7 +2556,7 @@ void r600_fini(struct radeon_device *rdev)
|
|||
r600_cp_fini(rdev);
|
||||
r600_irq_fini(rdev);
|
||||
radeon_wb_fini(rdev);
|
||||
r100_ib_fini(rdev);
|
||||
radeon_ib_pool_fini(rdev);
|
||||
radeon_irq_kms_fini(rdev);
|
||||
r600_pcie_gart_fini(rdev);
|
||||
r600_vram_scratch_fini(rdev);
|
||||
|
@ -2593,7 +2577,14 @@ void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
|
|||
{
|
||||
struct radeon_ring *ring = &rdev->ring[ib->ring];
|
||||
|
||||
/* FIXME: implement */
|
||||
if (ring->rptr_save_reg) {
|
||||
uint32_t next_rptr = ring->wptr + 3 + 4;
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
|
||||
radeon_ring_write(ring, ((ring->rptr_save_reg -
|
||||
PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
|
||||
radeon_ring_write(ring, next_rptr);
|
||||
}
|
||||
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
|
||||
radeon_ring_write(ring,
|
||||
#ifdef __BIG_ENDIAN
|
||||
|
|
|
@ -524,10 +524,6 @@ int r600_blit_init(struct radeon_device *rdev)
|
|||
|
||||
rdev->r600_blit.max_dim = 8192;
|
||||
|
||||
/* pin copy shader into vram if already initialized */
|
||||
if (rdev->r600_blit.shader_obj)
|
||||
goto done;
|
||||
|
||||
rdev->r600_blit.state_offset = 0;
|
||||
|
||||
if (rdev->family >= CHIP_RV770)
|
||||
|
@ -552,11 +548,26 @@ int r600_blit_init(struct radeon_device *rdev)
|
|||
obj_size += r6xx_ps_size * 4;
|
||||
obj_size = ALIGN(obj_size, 256);
|
||||
|
||||
r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
|
||||
NULL, &rdev->r600_blit.shader_obj);
|
||||
if (r) {
|
||||
DRM_ERROR("r600 failed to allocate shader\n");
|
||||
return r;
|
||||
/* pin copy shader into vram if not already initialized */
|
||||
if (rdev->r600_blit.shader_obj == NULL) {
|
||||
r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true,
|
||||
RADEON_GEM_DOMAIN_VRAM,
|
||||
NULL, &rdev->r600_blit.shader_obj);
|
||||
if (r) {
|
||||
DRM_ERROR("r600 failed to allocate shader\n");
|
||||
return r;
|
||||
}
|
||||
|
||||
r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
|
||||
if (unlikely(r != 0))
|
||||
return r;
|
||||
r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
|
||||
&rdev->r600_blit.shader_gpu_addr);
|
||||
radeon_bo_unreserve(rdev->r600_blit.shader_obj);
|
||||
if (r) {
|
||||
dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
|
||||
return r;
|
||||
}
|
||||
}
|
||||
|
||||
DRM_DEBUG("r6xx blit allocated bo %08x vs %08x ps %08x\n",
|
||||
|
@ -587,17 +598,6 @@ int r600_blit_init(struct radeon_device *rdev)
|
|||
radeon_bo_kunmap(rdev->r600_blit.shader_obj);
|
||||
radeon_bo_unreserve(rdev->r600_blit.shader_obj);
|
||||
|
||||
done:
|
||||
r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
|
||||
if (unlikely(r != 0))
|
||||
return r;
|
||||
r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
|
||||
&rdev->r600_blit.shader_gpu_addr);
|
||||
radeon_bo_unreserve(rdev->r600_blit.shader_obj);
|
||||
if (r) {
|
||||
dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
|
||||
return r;
|
||||
}
|
||||
radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -239,7 +239,7 @@ void radeon_fence_process(struct radeon_device *rdev, int ring);
|
|||
bool radeon_fence_signaled(struct radeon_fence *fence);
|
||||
int radeon_fence_wait(struct radeon_fence *fence, bool interruptible);
|
||||
int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring);
|
||||
int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring);
|
||||
void radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring);
|
||||
int radeon_fence_wait_any(struct radeon_device *rdev,
|
||||
struct radeon_fence **fences,
|
||||
bool intr);
|
||||
|
@ -622,6 +622,7 @@ struct radeon_ring {
|
|||
unsigned rptr;
|
||||
unsigned rptr_offs;
|
||||
unsigned rptr_reg;
|
||||
unsigned rptr_save_reg;
|
||||
unsigned wptr;
|
||||
unsigned wptr_old;
|
||||
unsigned wptr_reg;
|
||||
|
@ -735,8 +736,6 @@ struct r600_blit {
|
|||
u32 state_len;
|
||||
};
|
||||
|
||||
void r600_blit_suspend(struct radeon_device *rdev);
|
||||
|
||||
/*
|
||||
* SI RLC stuff
|
||||
*/
|
||||
|
@ -755,8 +754,6 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib);
|
|||
int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib);
|
||||
int radeon_ib_pool_init(struct radeon_device *rdev);
|
||||
void radeon_ib_pool_fini(struct radeon_device *rdev);
|
||||
int radeon_ib_pool_start(struct radeon_device *rdev);
|
||||
int radeon_ib_pool_suspend(struct radeon_device *rdev);
|
||||
int radeon_ib_ring_tests(struct radeon_device *rdev);
|
||||
/* Ring access between begin & end cannot sleep */
|
||||
int radeon_ring_index(struct radeon_device *rdev, struct radeon_ring *cp);
|
||||
|
@ -771,6 +768,10 @@ int radeon_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
|
|||
void radeon_ring_force_activity(struct radeon_device *rdev, struct radeon_ring *ring);
|
||||
void radeon_ring_lockup_update(struct radeon_ring *ring);
|
||||
bool radeon_ring_test_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
|
||||
unsigned radeon_ring_backup(struct radeon_device *rdev, struct radeon_ring *ring,
|
||||
uint32_t **data);
|
||||
int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring,
|
||||
unsigned size, uint32_t *data);
|
||||
int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ring_size,
|
||||
unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg,
|
||||
u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop);
|
||||
|
@ -1446,6 +1447,7 @@ struct radeon_device {
|
|||
struct device *dev;
|
||||
struct drm_device *ddev;
|
||||
struct pci_dev *pdev;
|
||||
struct rw_semaphore exclusive_lock;
|
||||
/* ASIC */
|
||||
union radeon_asic_config config;
|
||||
enum radeon_family family;
|
||||
|
@ -1762,8 +1764,6 @@ extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size
|
|||
*/
|
||||
int radeon_vm_manager_init(struct radeon_device *rdev);
|
||||
void radeon_vm_manager_fini(struct radeon_device *rdev);
|
||||
int radeon_vm_manager_start(struct radeon_device *rdev);
|
||||
int radeon_vm_manager_suspend(struct radeon_device *rdev);
|
||||
int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm);
|
||||
void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm);
|
||||
int radeon_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm);
|
||||
|
|
|
@ -103,7 +103,6 @@ int r100_pci_gart_enable(struct radeon_device *rdev);
|
|||
void r100_pci_gart_disable(struct radeon_device *rdev);
|
||||
int r100_debugfs_mc_info_init(struct radeon_device *rdev);
|
||||
int r100_gui_wait_for_idle(struct radeon_device *rdev);
|
||||
void r100_ib_fini(struct radeon_device *rdev);
|
||||
int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
|
||||
void r100_irq_disable(struct radeon_device *rdev);
|
||||
void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save);
|
||||
|
|
|
@ -358,7 +358,7 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev,
|
|||
if (r) {
|
||||
DRM_ERROR("Failed to schedule IB !\n");
|
||||
}
|
||||
return 0;
|
||||
return r;
|
||||
}
|
||||
|
||||
static int radeon_bo_vm_update_pte(struct radeon_cs_parser *parser,
|
||||
|
@ -499,7 +499,9 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
|||
struct radeon_cs_parser parser;
|
||||
int r;
|
||||
|
||||
down_read(&rdev->exclusive_lock);
|
||||
if (!rdev->accel_working) {
|
||||
up_read(&rdev->exclusive_lock);
|
||||
return -EBUSY;
|
||||
}
|
||||
/* initialize parser */
|
||||
|
@ -512,6 +514,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
|||
if (r) {
|
||||
DRM_ERROR("Failed to initialize parser !\n");
|
||||
radeon_cs_parser_fini(&parser, r);
|
||||
up_read(&rdev->exclusive_lock);
|
||||
r = radeon_cs_handle_lockup(rdev, r);
|
||||
return r;
|
||||
}
|
||||
|
@ -520,6 +523,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
|||
if (r != -ERESTARTSYS)
|
||||
DRM_ERROR("Failed to parse relocation %d!\n", r);
|
||||
radeon_cs_parser_fini(&parser, r);
|
||||
up_read(&rdev->exclusive_lock);
|
||||
r = radeon_cs_handle_lockup(rdev, r);
|
||||
return r;
|
||||
}
|
||||
|
@ -533,6 +537,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
|||
}
|
||||
out:
|
||||
radeon_cs_parser_fini(&parser, r);
|
||||
up_read(&rdev->exclusive_lock);
|
||||
r = radeon_cs_handle_lockup(rdev, r);
|
||||
return r;
|
||||
}
|
||||
|
|
|
@ -734,6 +734,7 @@ int radeon_device_init(struct radeon_device *rdev,
|
|||
mutex_init(&rdev->gem.mutex);
|
||||
mutex_init(&rdev->pm.mutex);
|
||||
init_rwsem(&rdev->pm.mclk_lock);
|
||||
init_rwsem(&rdev->exclusive_lock);
|
||||
init_waitqueue_head(&rdev->irq.vblank_queue);
|
||||
init_waitqueue_head(&rdev->irq.idle_queue);
|
||||
r = radeon_gem_init(rdev);
|
||||
|
@ -821,6 +822,10 @@ int radeon_device_init(struct radeon_device *rdev,
|
|||
if (r)
|
||||
return r;
|
||||
|
||||
r = radeon_ib_ring_tests(rdev);
|
||||
if (r)
|
||||
DRM_ERROR("ib ring test failed (%d).\n", r);
|
||||
|
||||
if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
|
||||
/* Acceleration not working on AGP card try again
|
||||
* with fallback to PCI or PCIE GART
|
||||
|
@ -945,6 +950,7 @@ int radeon_resume_kms(struct drm_device *dev)
|
|||
{
|
||||
struct drm_connector *connector;
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
int r;
|
||||
|
||||
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
|
||||
return 0;
|
||||
|
@ -959,6 +965,11 @@ int radeon_resume_kms(struct drm_device *dev)
|
|||
/* resume AGP if in use */
|
||||
radeon_agp_resume(rdev);
|
||||
radeon_resume(rdev);
|
||||
|
||||
r = radeon_ib_ring_tests(rdev);
|
||||
if (r)
|
||||
DRM_ERROR("ib ring test failed (%d).\n", r);
|
||||
|
||||
radeon_pm_resume(rdev);
|
||||
radeon_restore_bios_scratch_regs(rdev);
|
||||
|
||||
|
@ -985,28 +996,67 @@ int radeon_resume_kms(struct drm_device *dev)
|
|||
|
||||
int radeon_gpu_reset(struct radeon_device *rdev)
|
||||
{
|
||||
int r;
|
||||
unsigned ring_sizes[RADEON_NUM_RINGS];
|
||||
uint32_t *ring_data[RADEON_NUM_RINGS];
|
||||
|
||||
bool saved = false;
|
||||
|
||||
int i, r;
|
||||
int resched;
|
||||
|
||||
down_write(&rdev->exclusive_lock);
|
||||
radeon_save_bios_scratch_regs(rdev);
|
||||
/* block TTM */
|
||||
resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
|
||||
radeon_suspend(rdev);
|
||||
|
||||
r = radeon_asic_reset(rdev);
|
||||
if (!r) {
|
||||
dev_info(rdev->dev, "GPU reset succeed\n");
|
||||
radeon_resume(rdev);
|
||||
radeon_restore_bios_scratch_regs(rdev);
|
||||
drm_helper_resume_force_mode(rdev->ddev);
|
||||
ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
|
||||
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
|
||||
ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i],
|
||||
&ring_data[i]);
|
||||
if (ring_sizes[i]) {
|
||||
saved = true;
|
||||
dev_info(rdev->dev, "Saved %d dwords of commands "
|
||||
"on ring %d.\n", ring_sizes[i], i);
|
||||
}
|
||||
}
|
||||
|
||||
retry:
|
||||
r = radeon_asic_reset(rdev);
|
||||
if (!r) {
|
||||
dev_info(rdev->dev, "GPU reset succeeded, trying to resume\n");
|
||||
radeon_resume(rdev);
|
||||
}
|
||||
|
||||
radeon_restore_bios_scratch_regs(rdev);
|
||||
drm_helper_resume_force_mode(rdev->ddev);
|
||||
|
||||
if (!r) {
|
||||
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
|
||||
radeon_ring_restore(rdev, &rdev->ring[i],
|
||||
ring_sizes[i], ring_data[i]);
|
||||
}
|
||||
|
||||
r = radeon_ib_ring_tests(rdev);
|
||||
if (r) {
|
||||
dev_err(rdev->dev, "ib ring test failed (%d).\n", r);
|
||||
if (saved) {
|
||||
radeon_suspend(rdev);
|
||||
goto retry;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
|
||||
kfree(ring_data[i]);
|
||||
}
|
||||
}
|
||||
|
||||
ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
|
||||
if (r) {
|
||||
/* bad news, how to tell it to userspace ? */
|
||||
dev_info(rdev->dev, "GPU reset failed\n");
|
||||
}
|
||||
|
||||
up_write(&rdev->exclusive_lock);
|
||||
return r;
|
||||
}
|
||||
|
||||
|
|
|
@ -42,21 +42,23 @@
|
|||
|
||||
static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring)
|
||||
{
|
||||
if (rdev->wb.enabled) {
|
||||
*rdev->fence_drv[ring].cpu_addr = cpu_to_le32(seq);
|
||||
struct radeon_fence_driver *drv = &rdev->fence_drv[ring];
|
||||
if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
|
||||
*drv->cpu_addr = cpu_to_le32(seq);
|
||||
} else {
|
||||
WREG32(rdev->fence_drv[ring].scratch_reg, seq);
|
||||
WREG32(drv->scratch_reg, seq);
|
||||
}
|
||||
}
|
||||
|
||||
static u32 radeon_fence_read(struct radeon_device *rdev, int ring)
|
||||
{
|
||||
struct radeon_fence_driver *drv = &rdev->fence_drv[ring];
|
||||
u32 seq = 0;
|
||||
|
||||
if (rdev->wb.enabled) {
|
||||
seq = le32_to_cpu(*rdev->fence_drv[ring].cpu_addr);
|
||||
if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
|
||||
seq = le32_to_cpu(*drv->cpu_addr);
|
||||
} else {
|
||||
seq = RREG32(rdev->fence_drv[ring].scratch_reg);
|
||||
seq = RREG32(drv->scratch_reg);
|
||||
}
|
||||
return seq;
|
||||
}
|
||||
|
@ -440,14 +442,11 @@ int radeon_fence_wait_any(struct radeon_device *rdev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* caller must hold ring lock */
|
||||
int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring)
|
||||
{
|
||||
uint64_t seq;
|
||||
|
||||
/* We are not protected by ring lock when reading current seq but
|
||||
* it's ok as worst case is we return to early while we could have
|
||||
* wait.
|
||||
*/
|
||||
seq = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL;
|
||||
if (seq >= rdev->fence_drv[ring].sync_seq[ring]) {
|
||||
/* nothing to wait for, last_seq is
|
||||
|
@ -457,15 +456,27 @@ int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring)
|
|||
return radeon_fence_wait_seq(rdev, seq, ring, false, false);
|
||||
}
|
||||
|
||||
int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring)
|
||||
/* caller must hold ring lock */
|
||||
void radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring)
|
||||
{
|
||||
/* We are not protected by ring lock when reading current seq
|
||||
* but it's ok as wait empty is call from place where no more
|
||||
* activity can be scheduled so there won't be concurrent access
|
||||
* to seq value.
|
||||
*/
|
||||
return radeon_fence_wait_seq(rdev, rdev->fence_drv[ring].sync_seq[ring],
|
||||
ring, false, false);
|
||||
uint64_t seq = rdev->fence_drv[ring].sync_seq[ring];
|
||||
|
||||
while(1) {
|
||||
int r;
|
||||
r = radeon_fence_wait_seq(rdev, seq, ring, false, false);
|
||||
if (r == -EDEADLK) {
|
||||
mutex_unlock(&rdev->ring_lock);
|
||||
r = radeon_gpu_reset(rdev);
|
||||
mutex_lock(&rdev->ring_lock);
|
||||
if (!r)
|
||||
continue;
|
||||
}
|
||||
if (r) {
|
||||
dev_err(rdev->dev, "error waiting for ring to become"
|
||||
" idle (%d)\n", r);
|
||||
}
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence)
|
||||
|
@ -567,7 +578,7 @@ int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
|
|||
}
|
||||
rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
|
||||
rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
|
||||
radeon_fence_write(rdev, rdev->fence_drv[ring].sync_seq[ring], ring);
|
||||
radeon_fence_write(rdev, atomic64_read(&rdev->fence_drv[ring].last_seq), ring);
|
||||
rdev->fence_drv[ring].initialized = true;
|
||||
dev_info(rdev->dev, "fence driver on ring %d use gpu addr 0x%016llx and cpu addr 0x%p\n",
|
||||
ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr);
|
||||
|
|
|
@ -282,27 +282,58 @@ void radeon_gart_fini(struct radeon_device *rdev)
|
|||
*
|
||||
* TODO bind a default page at vm initialization for default address
|
||||
*/
|
||||
|
||||
int radeon_vm_manager_init(struct radeon_device *rdev)
|
||||
{
|
||||
struct radeon_vm *vm;
|
||||
struct radeon_bo_va *bo_va;
|
||||
int r;
|
||||
|
||||
rdev->vm_manager.enabled = false;
|
||||
if (!rdev->vm_manager.enabled) {
|
||||
/* mark first vm as always in use, it's the system one */
|
||||
r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager,
|
||||
rdev->vm_manager.max_pfn * 8,
|
||||
RADEON_GEM_DOMAIN_VRAM);
|
||||
if (r) {
|
||||
dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n",
|
||||
(rdev->vm_manager.max_pfn * 8) >> 10);
|
||||
return r;
|
||||
}
|
||||
|
||||
/* mark first vm as always in use, it's the system one */
|
||||
r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager,
|
||||
rdev->vm_manager.max_pfn * 8,
|
||||
RADEON_GEM_DOMAIN_VRAM);
|
||||
if (r) {
|
||||
dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n",
|
||||
(rdev->vm_manager.max_pfn * 8) >> 10);
|
||||
return r;
|
||||
}
|
||||
|
||||
r = rdev->vm_manager.funcs->init(rdev);
|
||||
if (r == 0)
|
||||
r = rdev->vm_manager.funcs->init(rdev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
rdev->vm_manager.enabled = true;
|
||||
|
||||
return r;
|
||||
r = radeon_sa_bo_manager_start(rdev, &rdev->vm_manager.sa_manager);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
/* restore page table */
|
||||
list_for_each_entry(vm, &rdev->vm_manager.lru_vm, list) {
|
||||
if (vm->id == -1)
|
||||
continue;
|
||||
|
||||
list_for_each_entry(bo_va, &vm->va, vm_list) {
|
||||
struct ttm_mem_reg *mem = NULL;
|
||||
if (bo_va->valid)
|
||||
mem = &bo_va->bo->tbo.mem;
|
||||
|
||||
bo_va->valid = false;
|
||||
r = radeon_vm_bo_update_pte(rdev, vm, bo_va->bo, mem);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to update pte for vm %d!\n", vm->id);
|
||||
}
|
||||
}
|
||||
|
||||
r = rdev->vm_manager.funcs->bind(rdev, vm, vm->id);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to bind vm %d!\n", vm->id);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* global mutex must be lock */
|
||||
|
@ -316,10 +347,21 @@ static void radeon_vm_unbind_locked(struct radeon_device *rdev,
|
|||
}
|
||||
|
||||
/* wait for vm use to end */
|
||||
if (vm->fence) {
|
||||
radeon_fence_wait(vm->fence, false);
|
||||
radeon_fence_unref(&vm->fence);
|
||||
while (vm->fence) {
|
||||
int r;
|
||||
r = radeon_fence_wait(vm->fence, false);
|
||||
if (r)
|
||||
DRM_ERROR("error while waiting for fence: %d\n", r);
|
||||
if (r == -EDEADLK) {
|
||||
mutex_unlock(&rdev->vm_manager.lock);
|
||||
r = radeon_gpu_reset(rdev);
|
||||
mutex_lock(&rdev->vm_manager.lock);
|
||||
if (!r)
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
}
|
||||
radeon_fence_unref(&vm->fence);
|
||||
|
||||
/* hw unbind */
|
||||
rdev->vm_manager.funcs->unbind(rdev, vm);
|
||||
|
@ -335,27 +377,12 @@ static void radeon_vm_unbind_locked(struct radeon_device *rdev,
|
|||
}
|
||||
|
||||
void radeon_vm_manager_fini(struct radeon_device *rdev)
|
||||
{
|
||||
if (rdev->vm_manager.sa_manager.bo == NULL)
|
||||
return;
|
||||
radeon_vm_manager_suspend(rdev);
|
||||
rdev->vm_manager.funcs->fini(rdev);
|
||||
radeon_sa_bo_manager_fini(rdev, &rdev->vm_manager.sa_manager);
|
||||
rdev->vm_manager.enabled = false;
|
||||
}
|
||||
|
||||
int radeon_vm_manager_start(struct radeon_device *rdev)
|
||||
{
|
||||
if (rdev->vm_manager.sa_manager.bo == NULL) {
|
||||
return -EINVAL;
|
||||
}
|
||||
return radeon_sa_bo_manager_start(rdev, &rdev->vm_manager.sa_manager);
|
||||
}
|
||||
|
||||
int radeon_vm_manager_suspend(struct radeon_device *rdev)
|
||||
{
|
||||
struct radeon_vm *vm, *tmp;
|
||||
|
||||
if (!rdev->vm_manager.enabled)
|
||||
return;
|
||||
|
||||
mutex_lock(&rdev->vm_manager.lock);
|
||||
/* unbind all active vm */
|
||||
list_for_each_entry_safe(vm, tmp, &rdev->vm_manager.lru_vm, list) {
|
||||
|
@ -363,7 +390,10 @@ int radeon_vm_manager_suspend(struct radeon_device *rdev)
|
|||
}
|
||||
rdev->vm_manager.funcs->fini(rdev);
|
||||
mutex_unlock(&rdev->vm_manager.lock);
|
||||
return radeon_sa_bo_manager_suspend(rdev, &rdev->vm_manager.sa_manager);
|
||||
|
||||
radeon_sa_bo_manager_suspend(rdev, &rdev->vm_manager.sa_manager);
|
||||
radeon_sa_bo_manager_fini(rdev, &rdev->vm_manager.sa_manager);
|
||||
rdev->vm_manager.enabled = false;
|
||||
}
|
||||
|
||||
/* global mutex must be locked */
|
||||
|
|
|
@ -215,12 +215,14 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
|
|||
uint32_t handle;
|
||||
int r;
|
||||
|
||||
down_read(&rdev->exclusive_lock);
|
||||
/* create a gem object to contain this object in */
|
||||
args->size = roundup(args->size, PAGE_SIZE);
|
||||
r = radeon_gem_object_create(rdev, args->size, args->alignment,
|
||||
args->initial_domain, false,
|
||||
false, &gobj);
|
||||
if (r) {
|
||||
up_read(&rdev->exclusive_lock);
|
||||
r = radeon_gem_handle_lockup(rdev, r);
|
||||
return r;
|
||||
}
|
||||
|
@ -228,10 +230,12 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
|
|||
/* drop reference from allocate - handle holds it now */
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
if (r) {
|
||||
up_read(&rdev->exclusive_lock);
|
||||
r = radeon_gem_handle_lockup(rdev, r);
|
||||
return r;
|
||||
}
|
||||
args->handle = handle;
|
||||
up_read(&rdev->exclusive_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -240,6 +244,7 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
|
|||
{
|
||||
/* transition the BO to a domain -
|
||||
* just validate the BO into a certain domain */
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
struct drm_radeon_gem_set_domain *args = data;
|
||||
struct drm_gem_object *gobj;
|
||||
struct radeon_bo *robj;
|
||||
|
@ -247,10 +252,12 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
|
|||
|
||||
/* for now if someone requests domain CPU -
|
||||
* just make sure the buffer is finished with */
|
||||
down_read(&rdev->exclusive_lock);
|
||||
|
||||
/* just do a BO wait for now */
|
||||
gobj = drm_gem_object_lookup(dev, filp, args->handle);
|
||||
if (gobj == NULL) {
|
||||
up_read(&rdev->exclusive_lock);
|
||||
return -ENOENT;
|
||||
}
|
||||
robj = gem_to_radeon_bo(gobj);
|
||||
|
@ -258,6 +265,7 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
|
|||
r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
|
||||
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
up_read(&rdev->exclusive_lock);
|
||||
r = radeon_gem_handle_lockup(robj->rdev, r);
|
||||
return r;
|
||||
}
|
||||
|
|
|
@ -129,6 +129,12 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
|
|||
if (r) {
|
||||
return r;
|
||||
}
|
||||
|
||||
r = radeon_sa_bo_manager_start(rdev, &rdev->ring_tmp_bo);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
|
||||
rdev->ib_pool_ready = true;
|
||||
if (radeon_debugfs_sa_init(rdev)) {
|
||||
dev_err(rdev->dev, "failed to register debugfs file for SA\n");
|
||||
|
@ -139,21 +145,12 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
|
|||
void radeon_ib_pool_fini(struct radeon_device *rdev)
|
||||
{
|
||||
if (rdev->ib_pool_ready) {
|
||||
radeon_sa_bo_manager_suspend(rdev, &rdev->ring_tmp_bo);
|
||||
radeon_sa_bo_manager_fini(rdev, &rdev->ring_tmp_bo);
|
||||
rdev->ib_pool_ready = false;
|
||||
}
|
||||
}
|
||||
|
||||
int radeon_ib_pool_start(struct radeon_device *rdev)
|
||||
{
|
||||
return radeon_sa_bo_manager_start(rdev, &rdev->ring_tmp_bo);
|
||||
}
|
||||
|
||||
int radeon_ib_pool_suspend(struct radeon_device *rdev)
|
||||
{
|
||||
return radeon_sa_bo_manager_suspend(rdev, &rdev->ring_tmp_bo);
|
||||
}
|
||||
|
||||
int radeon_ib_ring_tests(struct radeon_device *rdev)
|
||||
{
|
||||
unsigned i;
|
||||
|
@ -272,13 +269,8 @@ int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *ring, unsig
|
|||
|
||||
void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring)
|
||||
{
|
||||
unsigned count_dw_pad;
|
||||
unsigned i;
|
||||
|
||||
/* We pad to match fetch size */
|
||||
count_dw_pad = (ring->align_mask + 1) -
|
||||
(ring->wptr & ring->align_mask);
|
||||
for (i = 0; i < count_dw_pad; i++) {
|
||||
while (ring->wptr & ring->align_mask) {
|
||||
radeon_ring_write(ring, ring->nop);
|
||||
}
|
||||
DRM_MEMORYBARRIER();
|
||||
|
@ -370,6 +362,88 @@ bool radeon_ring_test_lockup(struct radeon_device *rdev, struct radeon_ring *rin
|
|||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* radeon_ring_backup - Back up the content of a ring
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @ring: the ring we want to back up
|
||||
*
|
||||
* Saves all unprocessed commits from a ring, returns the number of dwords saved.
|
||||
*/
|
||||
unsigned radeon_ring_backup(struct radeon_device *rdev, struct radeon_ring *ring,
|
||||
uint32_t **data)
|
||||
{
|
||||
unsigned size, ptr, i;
|
||||
int ridx = radeon_ring_index(rdev, ring);
|
||||
|
||||
/* just in case lock the ring */
|
||||
mutex_lock(&rdev->ring_lock);
|
||||
*data = NULL;
|
||||
|
||||
if (ring->ring_obj == NULL || !ring->rptr_save_reg) {
|
||||
mutex_unlock(&rdev->ring_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* it doesn't make sense to save anything if all fences are signaled */
|
||||
if (!radeon_fence_count_emitted(rdev, ridx)) {
|
||||
mutex_unlock(&rdev->ring_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* calculate the number of dw on the ring */
|
||||
ptr = RREG32(ring->rptr_save_reg);
|
||||
size = ring->wptr + (ring->ring_size / 4);
|
||||
size -= ptr;
|
||||
size &= ring->ptr_mask;
|
||||
if (size == 0) {
|
||||
mutex_unlock(&rdev->ring_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* and then save the content of the ring */
|
||||
*data = kmalloc(size * 4, GFP_KERNEL);
|
||||
for (i = 0; i < size; ++i) {
|
||||
(*data)[i] = ring->ring[ptr++];
|
||||
ptr &= ring->ptr_mask;
|
||||
}
|
||||
|
||||
mutex_unlock(&rdev->ring_lock);
|
||||
return size;
|
||||
}
|
||||
|
||||
/**
|
||||
* radeon_ring_restore - append saved commands to the ring again
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @ring: ring to append commands to
|
||||
* @size: number of dwords we want to write
|
||||
* @data: saved commands
|
||||
*
|
||||
* Allocates space on the ring and restore the previously saved commands.
|
||||
*/
|
||||
int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring,
|
||||
unsigned size, uint32_t *data)
|
||||
{
|
||||
int i, r;
|
||||
|
||||
if (!size || !data)
|
||||
return 0;
|
||||
|
||||
/* restore the saved ring content */
|
||||
r = radeon_ring_lock(rdev, ring, size);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
for (i = 0; i < size; ++i) {
|
||||
radeon_ring_write(ring, data[i]);
|
||||
}
|
||||
|
||||
radeon_ring_unlock_commit(rdev, ring);
|
||||
kfree(data);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size,
|
||||
unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg,
|
||||
u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop)
|
||||
|
@ -459,6 +533,10 @@ static int radeon_debugfs_ring_info(struct seq_file *m, void *data)
|
|||
count = (ring->ring_size / 4) - ring->ring_free_dw;
|
||||
seq_printf(m, "wptr(0x%04x): 0x%08x\n", ring->wptr_reg, RREG32(ring->wptr_reg));
|
||||
seq_printf(m, "rptr(0x%04x): 0x%08x\n", ring->rptr_reg, RREG32(ring->rptr_reg));
|
||||
if (ring->rptr_save_reg) {
|
||||
seq_printf(m, "rptr next(0x%04x): 0x%08x\n", ring->rptr_save_reg,
|
||||
RREG32(ring->rptr_save_reg));
|
||||
}
|
||||
seq_printf(m, "driver's copy of the wptr: 0x%08x\n", ring->wptr);
|
||||
seq_printf(m, "driver's copy of the rptr: 0x%08x\n", ring->rptr);
|
||||
seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
|
||||
|
|
|
@ -426,13 +426,11 @@ static int rs400_startup(struct radeon_device *rdev)
|
|||
return r;
|
||||
}
|
||||
|
||||
r = radeon_ib_pool_start(rdev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = radeon_ib_ring_tests(rdev);
|
||||
if (r)
|
||||
r = radeon_ib_pool_init(rdev);
|
||||
if (r) {
|
||||
dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -470,7 +468,6 @@ int rs400_resume(struct radeon_device *rdev)
|
|||
|
||||
int rs400_suspend(struct radeon_device *rdev)
|
||||
{
|
||||
radeon_ib_pool_suspend(rdev);
|
||||
r100_cp_disable(rdev);
|
||||
radeon_wb_disable(rdev);
|
||||
r100_irq_disable(rdev);
|
||||
|
@ -482,7 +479,7 @@ void rs400_fini(struct radeon_device *rdev)
|
|||
{
|
||||
r100_cp_fini(rdev);
|
||||
radeon_wb_fini(rdev);
|
||||
r100_ib_fini(rdev);
|
||||
radeon_ib_pool_fini(rdev);
|
||||
radeon_gem_fini(rdev);
|
||||
rs400_gart_fini(rdev);
|
||||
radeon_irq_kms_fini(rdev);
|
||||
|
@ -550,20 +547,14 @@ int rs400_init(struct radeon_device *rdev)
|
|||
return r;
|
||||
r300_set_reg_safe(rdev);
|
||||
|
||||
r = radeon_ib_pool_init(rdev);
|
||||
rdev->accel_working = true;
|
||||
if (r) {
|
||||
dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
|
||||
rdev->accel_working = false;
|
||||
}
|
||||
|
||||
r = rs400_startup(rdev);
|
||||
if (r) {
|
||||
/* Somethings want wront with the accel init stop accel */
|
||||
dev_err(rdev->dev, "Disabling GPU acceleration\n");
|
||||
r100_cp_fini(rdev);
|
||||
radeon_wb_fini(rdev);
|
||||
r100_ib_fini(rdev);
|
||||
radeon_ib_pool_fini(rdev);
|
||||
rs400_gart_fini(rdev);
|
||||
radeon_irq_kms_fini(rdev);
|
||||
rdev->accel_working = false;
|
||||
|
|
|
@ -907,13 +907,11 @@ static int rs600_startup(struct radeon_device *rdev)
|
|||
return r;
|
||||
}
|
||||
|
||||
r = radeon_ib_pool_start(rdev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = radeon_ib_ring_tests(rdev);
|
||||
if (r)
|
||||
r = radeon_ib_pool_init(rdev);
|
||||
if (r) {
|
||||
dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
r = r600_audio_init(rdev);
|
||||
if (r) {
|
||||
|
@ -955,7 +953,6 @@ int rs600_resume(struct radeon_device *rdev)
|
|||
|
||||
int rs600_suspend(struct radeon_device *rdev)
|
||||
{
|
||||
radeon_ib_pool_suspend(rdev);
|
||||
r600_audio_fini(rdev);
|
||||
r100_cp_disable(rdev);
|
||||
radeon_wb_disable(rdev);
|
||||
|
@ -969,7 +966,7 @@ void rs600_fini(struct radeon_device *rdev)
|
|||
r600_audio_fini(rdev);
|
||||
r100_cp_fini(rdev);
|
||||
radeon_wb_fini(rdev);
|
||||
r100_ib_fini(rdev);
|
||||
radeon_ib_pool_fini(rdev);
|
||||
radeon_gem_fini(rdev);
|
||||
rs600_gart_fini(rdev);
|
||||
radeon_irq_kms_fini(rdev);
|
||||
|
@ -1037,20 +1034,14 @@ int rs600_init(struct radeon_device *rdev)
|
|||
return r;
|
||||
rs600_set_safe_registers(rdev);
|
||||
|
||||
r = radeon_ib_pool_init(rdev);
|
||||
rdev->accel_working = true;
|
||||
if (r) {
|
||||
dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
|
||||
rdev->accel_working = false;
|
||||
}
|
||||
|
||||
r = rs600_startup(rdev);
|
||||
if (r) {
|
||||
/* Somethings want wront with the accel init stop accel */
|
||||
dev_err(rdev->dev, "Disabling GPU acceleration\n");
|
||||
r100_cp_fini(rdev);
|
||||
radeon_wb_fini(rdev);
|
||||
r100_ib_fini(rdev);
|
||||
radeon_ib_pool_fini(rdev);
|
||||
rs600_gart_fini(rdev);
|
||||
radeon_irq_kms_fini(rdev);
|
||||
rdev->accel_working = false;
|
||||
|
|
|
@ -637,13 +637,11 @@ static int rs690_startup(struct radeon_device *rdev)
|
|||
return r;
|
||||
}
|
||||
|
||||
r = radeon_ib_pool_start(rdev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = radeon_ib_ring_tests(rdev);
|
||||
if (r)
|
||||
r = radeon_ib_pool_init(rdev);
|
||||
if (r) {
|
||||
dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
r = r600_audio_init(rdev);
|
||||
if (r) {
|
||||
|
@ -685,7 +683,6 @@ int rs690_resume(struct radeon_device *rdev)
|
|||
|
||||
int rs690_suspend(struct radeon_device *rdev)
|
||||
{
|
||||
radeon_ib_pool_suspend(rdev);
|
||||
r600_audio_fini(rdev);
|
||||
r100_cp_disable(rdev);
|
||||
radeon_wb_disable(rdev);
|
||||
|
@ -699,7 +696,7 @@ void rs690_fini(struct radeon_device *rdev)
|
|||
r600_audio_fini(rdev);
|
||||
r100_cp_fini(rdev);
|
||||
radeon_wb_fini(rdev);
|
||||
r100_ib_fini(rdev);
|
||||
radeon_ib_pool_fini(rdev);
|
||||
radeon_gem_fini(rdev);
|
||||
rs400_gart_fini(rdev);
|
||||
radeon_irq_kms_fini(rdev);
|
||||
|
@ -768,20 +765,14 @@ int rs690_init(struct radeon_device *rdev)
|
|||
return r;
|
||||
rs600_set_safe_registers(rdev);
|
||||
|
||||
r = radeon_ib_pool_init(rdev);
|
||||
rdev->accel_working = true;
|
||||
if (r) {
|
||||
dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
|
||||
rdev->accel_working = false;
|
||||
}
|
||||
|
||||
r = rs690_startup(rdev);
|
||||
if (r) {
|
||||
/* Somethings want wront with the accel init stop accel */
|
||||
dev_err(rdev->dev, "Disabling GPU acceleration\n");
|
||||
r100_cp_fini(rdev);
|
||||
radeon_wb_fini(rdev);
|
||||
r100_ib_fini(rdev);
|
||||
radeon_ib_pool_fini(rdev);
|
||||
rs400_gart_fini(rdev);
|
||||
radeon_irq_kms_fini(rdev);
|
||||
rdev->accel_working = false;
|
||||
|
|
|
@ -408,13 +408,11 @@ static int rv515_startup(struct radeon_device *rdev)
|
|||
return r;
|
||||
}
|
||||
|
||||
r = radeon_ib_pool_start(rdev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = radeon_ib_ring_tests(rdev);
|
||||
if (r)
|
||||
r = radeon_ib_pool_init(rdev);
|
||||
if (r) {
|
||||
dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -469,7 +467,7 @@ void rv515_fini(struct radeon_device *rdev)
|
|||
{
|
||||
r100_cp_fini(rdev);
|
||||
radeon_wb_fini(rdev);
|
||||
r100_ib_fini(rdev);
|
||||
radeon_ib_pool_fini(rdev);
|
||||
radeon_gem_fini(rdev);
|
||||
rv370_pcie_gart_fini(rdev);
|
||||
radeon_agp_fini(rdev);
|
||||
|
@ -543,20 +541,14 @@ int rv515_init(struct radeon_device *rdev)
|
|||
return r;
|
||||
rv515_set_safe_registers(rdev);
|
||||
|
||||
r = radeon_ib_pool_init(rdev);
|
||||
rdev->accel_working = true;
|
||||
if (r) {
|
||||
dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
|
||||
rdev->accel_working = false;
|
||||
}
|
||||
|
||||
r = rv515_startup(rdev);
|
||||
if (r) {
|
||||
/* Somethings want wront with the accel init stop accel */
|
||||
dev_err(rdev->dev, "Disabling GPU acceleration\n");
|
||||
r100_cp_fini(rdev);
|
||||
radeon_wb_fini(rdev);
|
||||
r100_ib_fini(rdev);
|
||||
radeon_ib_pool_fini(rdev);
|
||||
radeon_irq_kms_fini(rdev);
|
||||
rv370_pcie_gart_fini(rdev);
|
||||
radeon_agp_fini(rdev);
|
||||
|
|
|
@ -358,8 +358,10 @@ static int rv770_cp_load_microcode(struct radeon_device *rdev)
|
|||
|
||||
void r700_cp_fini(struct radeon_device *rdev)
|
||||
{
|
||||
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
|
||||
r700_cp_stop(rdev);
|
||||
radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
|
||||
radeon_ring_fini(rdev, ring);
|
||||
radeon_scratch_free(rdev, ring->rptr_save_reg);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -951,13 +953,11 @@ static int rv770_startup(struct radeon_device *rdev)
|
|||
if (r)
|
||||
return r;
|
||||
|
||||
r = radeon_ib_pool_start(rdev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = radeon_ib_ring_tests(rdev);
|
||||
if (r)
|
||||
r = radeon_ib_pool_init(rdev);
|
||||
if (r) {
|
||||
dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
r = r600_audio_init(rdev);
|
||||
if (r) {
|
||||
|
@ -994,9 +994,6 @@ int rv770_resume(struct radeon_device *rdev)
|
|||
int rv770_suspend(struct radeon_device *rdev)
|
||||
{
|
||||
r600_audio_fini(rdev);
|
||||
radeon_ib_pool_suspend(rdev);
|
||||
r600_blit_suspend(rdev);
|
||||
/* FIXME: we should wait for ring to be empty */
|
||||
r700_cp_stop(rdev);
|
||||
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
|
||||
r600_irq_suspend(rdev);
|
||||
|
@ -1076,20 +1073,14 @@ int rv770_init(struct radeon_device *rdev)
|
|||
if (r)
|
||||
return r;
|
||||
|
||||
r = radeon_ib_pool_init(rdev);
|
||||
rdev->accel_working = true;
|
||||
if (r) {
|
||||
dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
|
||||
rdev->accel_working = false;
|
||||
}
|
||||
|
||||
r = rv770_startup(rdev);
|
||||
if (r) {
|
||||
dev_err(rdev->dev, "disabling GPU acceleration\n");
|
||||
r700_cp_fini(rdev);
|
||||
r600_irq_fini(rdev);
|
||||
radeon_wb_fini(rdev);
|
||||
r100_ib_fini(rdev);
|
||||
radeon_ib_pool_fini(rdev);
|
||||
radeon_irq_kms_fini(rdev);
|
||||
rv770_pcie_gart_fini(rdev);
|
||||
rdev->accel_working = false;
|
||||
|
@ -1104,7 +1095,7 @@ void rv770_fini(struct radeon_device *rdev)
|
|||
r700_cp_fini(rdev);
|
||||
r600_irq_fini(rdev);
|
||||
radeon_wb_fini(rdev);
|
||||
r100_ib_fini(rdev);
|
||||
radeon_ib_pool_fini(rdev);
|
||||
radeon_irq_kms_fini(rdev);
|
||||
rv770_pcie_gart_fini(rdev);
|
||||
r600_vram_scratch_fini(rdev);
|
||||
|
|
|
@ -1765,6 +1765,14 @@ void si_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
|
|||
struct radeon_ring *ring = &rdev->ring[ib->ring];
|
||||
u32 header;
|
||||
|
||||
if (ring->rptr_save_reg) {
|
||||
uint32_t next_rptr = ring->wptr + 3 + 4 + 8;
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
|
||||
radeon_ring_write(ring, ((ring->rptr_save_reg -
|
||||
PACKET3_SET_CONFIG_REG_START) >> 2));
|
||||
radeon_ring_write(ring, next_rptr);
|
||||
}
|
||||
|
||||
if (ib->is_const_ib)
|
||||
header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
|
||||
else
|
||||
|
@ -1917,10 +1925,20 @@ static int si_cp_start(struct radeon_device *rdev)
|
|||
|
||||
static void si_cp_fini(struct radeon_device *rdev)
|
||||
{
|
||||
struct radeon_ring *ring;
|
||||
si_cp_enable(rdev, false);
|
||||
radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
|
||||
radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]);
|
||||
radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]);
|
||||
|
||||
ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
|
||||
radeon_ring_fini(rdev, ring);
|
||||
radeon_scratch_free(rdev, ring->rptr_save_reg);
|
||||
|
||||
ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
|
||||
radeon_ring_fini(rdev, ring);
|
||||
radeon_scratch_free(rdev, ring->rptr_save_reg);
|
||||
|
||||
ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
|
||||
radeon_ring_fini(rdev, ring);
|
||||
radeon_scratch_free(rdev, ring->rptr_save_reg);
|
||||
}
|
||||
|
||||
static int si_cp_resume(struct radeon_device *rdev)
|
||||
|
@ -3750,35 +3768,18 @@ static int si_startup(struct radeon_device *rdev)
|
|||
if (r)
|
||||
return r;
|
||||
|
||||
r = radeon_ib_pool_start(rdev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
|
||||
r = radeon_ib_pool_init(rdev);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: failed testing IB (%d) on CP ring 0\n", r);
|
||||
rdev->accel_working = false;
|
||||
dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
r = radeon_ib_test(rdev, CAYMAN_RING_TYPE_CP1_INDEX, &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]);
|
||||
r = radeon_vm_manager_init(rdev);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: failed testing IB (%d) on CP ring 1\n", r);
|
||||
rdev->accel_working = false;
|
||||
dev_err(rdev->dev, "vm manager initialization failed (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
r = radeon_ib_test(rdev, CAYMAN_RING_TYPE_CP2_INDEX, &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: failed testing IB (%d) on CP ring 2\n", r);
|
||||
rdev->accel_working = false;
|
||||
return r;
|
||||
}
|
||||
|
||||
r = radeon_vm_manager_start(rdev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -3807,12 +3808,6 @@ int si_resume(struct radeon_device *rdev)
|
|||
|
||||
int si_suspend(struct radeon_device *rdev)
|
||||
{
|
||||
/* FIXME: we should wait for ring to be empty */
|
||||
radeon_ib_pool_suspend(rdev);
|
||||
radeon_vm_manager_suspend(rdev);
|
||||
#if 0
|
||||
r600_blit_suspend(rdev);
|
||||
#endif
|
||||
si_cp_enable(rdev, false);
|
||||
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
|
||||
rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
|
||||
|
@ -3901,17 +3896,7 @@ int si_init(struct radeon_device *rdev)
|
|||
if (r)
|
||||
return r;
|
||||
|
||||
r = radeon_ib_pool_init(rdev);
|
||||
rdev->accel_working = true;
|
||||
if (r) {
|
||||
dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
|
||||
rdev->accel_working = false;
|
||||
}
|
||||
r = radeon_vm_manager_init(rdev);
|
||||
if (r) {
|
||||
dev_err(rdev->dev, "vm manager initialization failed (%d).\n", r);
|
||||
}
|
||||
|
||||
r = si_startup(rdev);
|
||||
if (r) {
|
||||
dev_err(rdev->dev, "disabling GPU acceleration\n");
|
||||
|
@ -3919,7 +3904,7 @@ int si_init(struct radeon_device *rdev)
|
|||
si_irq_fini(rdev);
|
||||
si_rlc_fini(rdev);
|
||||
radeon_wb_fini(rdev);
|
||||
r100_ib_fini(rdev);
|
||||
radeon_ib_pool_fini(rdev);
|
||||
radeon_vm_manager_fini(rdev);
|
||||
radeon_irq_kms_fini(rdev);
|
||||
si_pcie_gart_fini(rdev);
|
||||
|
@ -3948,7 +3933,7 @@ void si_fini(struct radeon_device *rdev)
|
|||
si_rlc_fini(rdev);
|
||||
radeon_wb_fini(rdev);
|
||||
radeon_vm_manager_fini(rdev);
|
||||
r100_ib_fini(rdev);
|
||||
radeon_ib_pool_fini(rdev);
|
||||
radeon_irq_kms_fini(rdev);
|
||||
si_pcie_gart_fini(rdev);
|
||||
r600_vram_scratch_fini(rdev);
|
||||
|
|
Loading…
Reference in New Issue