Merge branch 'drm-next-3.13' of git://people.freedesktop.org/~agd5f/linux into drm-fixes
More fixes for radeon. This adds new queries for tiling on CIK, and fixes a crash in handling acpi atif backlight events on CIK. Some fixes for radeon for 3.13. Mostly CI stability fixes. I think I've tracked down the stability problems with dpm on Trinity/Richland, so I'm going to enable that by default now. * 'drm-next-3.13' of git://people.freedesktop.org/~agd5f/linux: drm/radeon: hook up backlight functions for CI and KV family. drm/radeon/cik: Add macrotile mode array query drm/radeon/cik: Return backend map information to userspace drm/radeon: enable DPM by default in TN asics drm/radeon: adjust TN dpm parameters for stability (v2) drm/radeon: use a single doorbell for cik kms compute drm/radeon/vm: don't attempt to update ptes if ib allocation fails drm/radeon: disable CIK CP semaphores for now drm/radeon: allow semaphore emission to fail drm/radeon: add semaphore trace point radeon: workaround pinning failure on low ram gpu radeon/i2c: do not count reg index in number of i2c byte we are writing. drm/radeon: cypress_dpm: Fix unused variable warning when CONFIG_ACPI=n drm: radeon: ni_dpm: Fix unused variable warning when CONFIG_ACPI=n
This commit is contained in:
commit
ded5107e2a
|
@ -56,8 +56,10 @@ static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan,
|
|||
return -EINVAL;
|
||||
}
|
||||
args.ucRegIndex = buf[0];
|
||||
if (num > 1)
|
||||
memcpy(&out, &buf[1], num - 1);
|
||||
if (num > 1) {
|
||||
num--;
|
||||
memcpy(&out, &buf[1], num);
|
||||
}
|
||||
args.lpI2CDataOut = cpu_to_le16(out);
|
||||
} else {
|
||||
if (num > ATOM_MAX_HW_I2C_READ) {
|
||||
|
|
|
@ -1560,17 +1560,17 @@ u32 cik_get_xclk(struct radeon_device *rdev)
|
|||
* cik_mm_rdoorbell - read a doorbell dword
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @offset: byte offset into the aperture
|
||||
* @index: doorbell index
|
||||
*
|
||||
* Returns the value in the doorbell aperture at the
|
||||
* requested offset (CIK).
|
||||
* requested doorbell index (CIK).
|
||||
*/
|
||||
u32 cik_mm_rdoorbell(struct radeon_device *rdev, u32 offset)
|
||||
u32 cik_mm_rdoorbell(struct radeon_device *rdev, u32 index)
|
||||
{
|
||||
if (offset < rdev->doorbell.size) {
|
||||
return readl(((void __iomem *)rdev->doorbell.ptr) + offset);
|
||||
if (index < rdev->doorbell.num_doorbells) {
|
||||
return readl(rdev->doorbell.ptr + index);
|
||||
} else {
|
||||
DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", offset);
|
||||
DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
@ -1579,18 +1579,18 @@ u32 cik_mm_rdoorbell(struct radeon_device *rdev, u32 offset)
|
|||
* cik_mm_wdoorbell - write a doorbell dword
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @offset: byte offset into the aperture
|
||||
* @index: doorbell index
|
||||
* @v: value to write
|
||||
*
|
||||
* Writes @v to the doorbell aperture at the
|
||||
* requested offset (CIK).
|
||||
* requested doorbell index (CIK).
|
||||
*/
|
||||
void cik_mm_wdoorbell(struct radeon_device *rdev, u32 offset, u32 v)
|
||||
void cik_mm_wdoorbell(struct radeon_device *rdev, u32 index, u32 v)
|
||||
{
|
||||
if (offset < rdev->doorbell.size) {
|
||||
writel(v, ((void __iomem *)rdev->doorbell.ptr) + offset);
|
||||
if (index < rdev->doorbell.num_doorbells) {
|
||||
writel(v, rdev->doorbell.ptr + index);
|
||||
} else {
|
||||
DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", offset);
|
||||
DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2427,6 +2427,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
|
|||
gb_tile_moden = 0;
|
||||
break;
|
||||
}
|
||||
rdev->config.cik.macrotile_mode_array[reg_offset] = gb_tile_moden;
|
||||
WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
|
||||
}
|
||||
} else if (num_pipe_configs == 4) {
|
||||
|
@ -2773,6 +2774,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
|
|||
gb_tile_moden = 0;
|
||||
break;
|
||||
}
|
||||
rdev->config.cik.macrotile_mode_array[reg_offset] = gb_tile_moden;
|
||||
WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
|
||||
}
|
||||
} else if (num_pipe_configs == 2) {
|
||||
|
@ -2990,6 +2992,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
|
|||
gb_tile_moden = 0;
|
||||
break;
|
||||
}
|
||||
rdev->config.cik.macrotile_mode_array[reg_offset] = gb_tile_moden;
|
||||
WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
|
||||
}
|
||||
} else
|
||||
|
@ -3556,17 +3559,24 @@ void cik_fence_compute_ring_emit(struct radeon_device *rdev,
|
|||
radeon_ring_write(ring, 0);
|
||||
}
|
||||
|
||||
void cik_semaphore_ring_emit(struct radeon_device *rdev,
|
||||
bool cik_semaphore_ring_emit(struct radeon_device *rdev,
|
||||
struct radeon_ring *ring,
|
||||
struct radeon_semaphore *semaphore,
|
||||
bool emit_wait)
|
||||
{
|
||||
/* TODO: figure out why semaphore cause lockups */
|
||||
#if 0
|
||||
uint64_t addr = semaphore->gpu_addr;
|
||||
unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL;
|
||||
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1));
|
||||
radeon_ring_write(ring, addr & 0xffffffff);
|
||||
radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | sel);
|
||||
|
||||
return true;
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -3609,13 +3619,8 @@ int cik_copy_cpdma(struct radeon_device *rdev,
|
|||
return r;
|
||||
}
|
||||
|
||||
if (radeon_fence_need_sync(*fence, ring->idx)) {
|
||||
radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
|
||||
ring->idx);
|
||||
radeon_fence_note_sync(*fence, ring->idx);
|
||||
} else {
|
||||
radeon_semaphore_free(rdev, &sem, NULL);
|
||||
}
|
||||
radeon_semaphore_sync_to(sem, *fence);
|
||||
radeon_semaphore_sync_rings(rdev, sem, ring->idx);
|
||||
|
||||
for (i = 0; i < num_loops; i++) {
|
||||
cur_size_in_bytes = size_in_bytes;
|
||||
|
@ -4052,7 +4057,7 @@ void cik_compute_ring_set_wptr(struct radeon_device *rdev,
|
|||
struct radeon_ring *ring)
|
||||
{
|
||||
rdev->wb.wb[ring->wptr_offs/4] = cpu_to_le32(ring->wptr);
|
||||
WDOORBELL32(ring->doorbell_offset, ring->wptr);
|
||||
WDOORBELL32(ring->doorbell_index, ring->wptr);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -4393,10 +4398,6 @@ static int cik_cp_compute_resume(struct radeon_device *rdev)
|
|||
return r;
|
||||
}
|
||||
|
||||
/* doorbell offset */
|
||||
rdev->ring[idx].doorbell_offset =
|
||||
(rdev->ring[idx].doorbell_page_num * PAGE_SIZE) + 0;
|
||||
|
||||
/* init the mqd struct */
|
||||
memset(buf, 0, sizeof(struct bonaire_mqd));
|
||||
|
||||
|
@ -4508,7 +4509,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev)
|
|||
RREG32(CP_HQD_PQ_DOORBELL_CONTROL);
|
||||
mqd->queue_state.cp_hqd_pq_doorbell_control &= ~DOORBELL_OFFSET_MASK;
|
||||
mqd->queue_state.cp_hqd_pq_doorbell_control |=
|
||||
DOORBELL_OFFSET(rdev->ring[idx].doorbell_offset / 4);
|
||||
DOORBELL_OFFSET(rdev->ring[idx].doorbell_index);
|
||||
mqd->queue_state.cp_hqd_pq_doorbell_control |= DOORBELL_EN;
|
||||
mqd->queue_state.cp_hqd_pq_doorbell_control &=
|
||||
~(DOORBELL_SOURCE | DOORBELL_HIT);
|
||||
|
@ -7839,14 +7840,14 @@ int cik_init(struct radeon_device *rdev)
|
|||
ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
|
||||
ring->ring_obj = NULL;
|
||||
r600_ring_init(rdev, ring, 1024 * 1024);
|
||||
r = radeon_doorbell_get(rdev, &ring->doorbell_page_num);
|
||||
r = radeon_doorbell_get(rdev, &ring->doorbell_index);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
|
||||
ring->ring_obj = NULL;
|
||||
r600_ring_init(rdev, ring, 1024 * 1024);
|
||||
r = radeon_doorbell_get(rdev, &ring->doorbell_page_num);
|
||||
r = radeon_doorbell_get(rdev, &ring->doorbell_index);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
|
|
@ -130,7 +130,7 @@ void cik_sdma_fence_ring_emit(struct radeon_device *rdev,
|
|||
* Add a DMA semaphore packet to the ring wait on or signal
|
||||
* other rings (CIK).
|
||||
*/
|
||||
void cik_sdma_semaphore_ring_emit(struct radeon_device *rdev,
|
||||
bool cik_sdma_semaphore_ring_emit(struct radeon_device *rdev,
|
||||
struct radeon_ring *ring,
|
||||
struct radeon_semaphore *semaphore,
|
||||
bool emit_wait)
|
||||
|
@ -141,6 +141,8 @@ void cik_sdma_semaphore_ring_emit(struct radeon_device *rdev,
|
|||
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SEMAPHORE, 0, extra_bits));
|
||||
radeon_ring_write(ring, addr & 0xfffffff8);
|
||||
radeon_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -443,13 +445,8 @@ int cik_copy_dma(struct radeon_device *rdev,
|
|||
return r;
|
||||
}
|
||||
|
||||
if (radeon_fence_need_sync(*fence, ring->idx)) {
|
||||
radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
|
||||
ring->idx);
|
||||
radeon_fence_note_sync(*fence, ring->idx);
|
||||
} else {
|
||||
radeon_semaphore_free(rdev, &sem, NULL);
|
||||
}
|
||||
radeon_semaphore_sync_to(sem, *fence);
|
||||
radeon_semaphore_sync_rings(rdev, sem, ring->idx);
|
||||
|
||||
for (i = 0; i < num_loops; i++) {
|
||||
cur_size_in_bytes = size_in_bytes;
|
||||
|
|
|
@ -299,7 +299,9 @@ void cypress_program_response_times(struct radeon_device *rdev)
|
|||
static int cypress_pcie_performance_request(struct radeon_device *rdev,
|
||||
u8 perf_req, bool advertise)
|
||||
{
|
||||
#if defined(CONFIG_ACPI)
|
||||
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
|
||||
#endif
|
||||
u32 tmp;
|
||||
|
||||
udelay(10);
|
||||
|
|
|
@ -131,13 +131,8 @@ int evergreen_copy_dma(struct radeon_device *rdev,
|
|||
return r;
|
||||
}
|
||||
|
||||
if (radeon_fence_need_sync(*fence, ring->idx)) {
|
||||
radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
|
||||
ring->idx);
|
||||
radeon_fence_note_sync(*fence, ring->idx);
|
||||
} else {
|
||||
radeon_semaphore_free(rdev, &sem, NULL);
|
||||
}
|
||||
radeon_semaphore_sync_to(sem, *fence);
|
||||
radeon_semaphore_sync_rings(rdev, sem, ring->idx);
|
||||
|
||||
for (i = 0; i < num_loops; i++) {
|
||||
cur_size_in_dw = size_in_dw;
|
||||
|
|
|
@ -3445,9 +3445,9 @@ static int ni_enable_smc_cac(struct radeon_device *rdev,
|
|||
static int ni_pcie_performance_request(struct radeon_device *rdev,
|
||||
u8 perf_req, bool advertise)
|
||||
{
|
||||
#if defined(CONFIG_ACPI)
|
||||
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
|
||||
|
||||
#if defined(CONFIG_ACPI)
|
||||
if ((perf_req == PCIE_PERF_REQ_PECI_GEN1) ||
|
||||
(perf_req == PCIE_PERF_REQ_PECI_GEN2)) {
|
||||
if (eg_pi->pcie_performance_request_registered == false)
|
||||
|
|
|
@ -869,13 +869,14 @@ void r100_fence_ring_emit(struct radeon_device *rdev,
|
|||
radeon_ring_write(ring, RADEON_SW_INT_FIRE);
|
||||
}
|
||||
|
||||
void r100_semaphore_ring_emit(struct radeon_device *rdev,
|
||||
bool r100_semaphore_ring_emit(struct radeon_device *rdev,
|
||||
struct radeon_ring *ring,
|
||||
struct radeon_semaphore *semaphore,
|
||||
bool emit_wait)
|
||||
{
|
||||
/* Unused on older asics, since we don't have semaphores or multiple rings */
|
||||
BUG();
|
||||
return false;
|
||||
}
|
||||
|
||||
int r100_copy_blit(struct radeon_device *rdev,
|
||||
|
|
|
@ -2650,7 +2650,7 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
|
|||
}
|
||||
}
|
||||
|
||||
void r600_semaphore_ring_emit(struct radeon_device *rdev,
|
||||
bool r600_semaphore_ring_emit(struct radeon_device *rdev,
|
||||
struct radeon_ring *ring,
|
||||
struct radeon_semaphore *semaphore,
|
||||
bool emit_wait)
|
||||
|
@ -2664,6 +2664,8 @@ void r600_semaphore_ring_emit(struct radeon_device *rdev,
|
|||
radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1));
|
||||
radeon_ring_write(ring, addr & 0xffffffff);
|
||||
radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -2706,13 +2708,8 @@ int r600_copy_cpdma(struct radeon_device *rdev,
|
|||
return r;
|
||||
}
|
||||
|
||||
if (radeon_fence_need_sync(*fence, ring->idx)) {
|
||||
radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
|
||||
ring->idx);
|
||||
radeon_fence_note_sync(*fence, ring->idx);
|
||||
} else {
|
||||
radeon_semaphore_free(rdev, &sem, NULL);
|
||||
}
|
||||
radeon_semaphore_sync_to(sem, *fence);
|
||||
radeon_semaphore_sync_rings(rdev, sem, ring->idx);
|
||||
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
|
||||
radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
|
||||
|
|
|
@ -311,7 +311,7 @@ void r600_dma_fence_ring_emit(struct radeon_device *rdev,
|
|||
* Add a DMA semaphore packet to the ring wait on or signal
|
||||
* other rings (r6xx-SI).
|
||||
*/
|
||||
void r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
|
||||
bool r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
|
||||
struct radeon_ring *ring,
|
||||
struct radeon_semaphore *semaphore,
|
||||
bool emit_wait)
|
||||
|
@ -322,6 +322,8 @@ void r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
|
|||
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SEMAPHORE, 0, s, 0));
|
||||
radeon_ring_write(ring, addr & 0xfffffffc);
|
||||
radeon_ring_write(ring, upper_32_bits(addr) & 0xff);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -462,13 +464,8 @@ int r600_copy_dma(struct radeon_device *rdev,
|
|||
return r;
|
||||
}
|
||||
|
||||
if (radeon_fence_need_sync(*fence, ring->idx)) {
|
||||
radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
|
||||
ring->idx);
|
||||
radeon_fence_note_sync(*fence, ring->idx);
|
||||
} else {
|
||||
radeon_semaphore_free(rdev, &sem, NULL);
|
||||
}
|
||||
radeon_semaphore_sync_to(sem, *fence);
|
||||
radeon_semaphore_sync_rings(rdev, sem, ring->idx);
|
||||
|
||||
for (i = 0; i < num_loops; i++) {
|
||||
cur_size_in_dw = size_in_dw;
|
||||
|
|
|
@ -348,6 +348,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence **fence, i
|
|||
void radeon_fence_process(struct radeon_device *rdev, int ring);
|
||||
bool radeon_fence_signaled(struct radeon_fence *fence);
|
||||
int radeon_fence_wait(struct radeon_fence *fence, bool interruptible);
|
||||
int radeon_fence_wait_locked(struct radeon_fence *fence);
|
||||
int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring);
|
||||
int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring);
|
||||
int radeon_fence_wait_any(struct radeon_device *rdev,
|
||||
|
@ -548,17 +549,20 @@ struct radeon_semaphore {
|
|||
struct radeon_sa_bo *sa_bo;
|
||||
signed waiters;
|
||||
uint64_t gpu_addr;
|
||||
struct radeon_fence *sync_to[RADEON_NUM_RINGS];
|
||||
};
|
||||
|
||||
int radeon_semaphore_create(struct radeon_device *rdev,
|
||||
struct radeon_semaphore **semaphore);
|
||||
void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
|
||||
bool radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
|
||||
struct radeon_semaphore *semaphore);
|
||||
void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
|
||||
bool radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
|
||||
struct radeon_semaphore *semaphore);
|
||||
void radeon_semaphore_sync_to(struct radeon_semaphore *semaphore,
|
||||
struct radeon_fence *fence);
|
||||
int radeon_semaphore_sync_rings(struct radeon_device *rdev,
|
||||
struct radeon_semaphore *semaphore,
|
||||
int signaler, int waiter);
|
||||
int waiting_ring);
|
||||
void radeon_semaphore_free(struct radeon_device *rdev,
|
||||
struct radeon_semaphore **semaphore,
|
||||
struct radeon_fence *fence);
|
||||
|
@ -645,13 +649,15 @@ void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg);
|
|||
/*
|
||||
* GPU doorbell structures, functions & helpers
|
||||
*/
|
||||
#define RADEON_MAX_DOORBELLS 1024 /* Reserve at most 1024 doorbell slots for radeon-owned rings. */
|
||||
|
||||
struct radeon_doorbell {
|
||||
u32 num_pages;
|
||||
bool free[1024];
|
||||
/* doorbell mmio */
|
||||
resource_size_t base;
|
||||
resource_size_t size;
|
||||
void __iomem *ptr;
|
||||
resource_size_t base;
|
||||
resource_size_t size;
|
||||
u32 __iomem *ptr;
|
||||
u32 num_doorbells; /* Number of doorbells actually reserved for radeon. */
|
||||
unsigned long used[DIV_ROUND_UP(RADEON_MAX_DOORBELLS, BITS_PER_LONG)];
|
||||
};
|
||||
|
||||
int radeon_doorbell_get(struct radeon_device *rdev, u32 *page);
|
||||
|
@ -765,7 +771,6 @@ struct radeon_ib {
|
|||
struct radeon_fence *fence;
|
||||
struct radeon_vm *vm;
|
||||
bool is_const_ib;
|
||||
struct radeon_fence *sync_to[RADEON_NUM_RINGS];
|
||||
struct radeon_semaphore *semaphore;
|
||||
};
|
||||
|
||||
|
@ -799,8 +804,7 @@ struct radeon_ring {
|
|||
u32 pipe;
|
||||
u32 queue;
|
||||
struct radeon_bo *mqd_obj;
|
||||
u32 doorbell_page_num;
|
||||
u32 doorbell_offset;
|
||||
u32 doorbell_index;
|
||||
unsigned wptr_offs;
|
||||
};
|
||||
|
||||
|
@ -921,7 +925,6 @@ int radeon_ib_get(struct radeon_device *rdev, int ring,
|
|||
struct radeon_ib *ib, struct radeon_vm *vm,
|
||||
unsigned size);
|
||||
void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib);
|
||||
void radeon_ib_sync_to(struct radeon_ib *ib, struct radeon_fence *fence);
|
||||
int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
|
||||
struct radeon_ib *const_ib);
|
||||
int radeon_ib_pool_init(struct radeon_device *rdev);
|
||||
|
@ -1638,7 +1641,7 @@ struct radeon_asic_ring {
|
|||
/* command emmit functions */
|
||||
void (*ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib);
|
||||
void (*emit_fence)(struct radeon_device *rdev, struct radeon_fence *fence);
|
||||
void (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp,
|
||||
bool (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp,
|
||||
struct radeon_semaphore *semaphore, bool emit_wait);
|
||||
void (*vm_flush)(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
|
||||
|
||||
|
@ -1979,6 +1982,7 @@ struct cik_asic {
|
|||
|
||||
unsigned tile_config;
|
||||
uint32_t tile_mode_array[32];
|
||||
uint32_t macrotile_mode_array[16];
|
||||
};
|
||||
|
||||
union radeon_asic_config {
|
||||
|
@ -2239,8 +2243,8 @@ void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v,
|
|||
u32 r100_io_rreg(struct radeon_device *rdev, u32 reg);
|
||||
void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v);
|
||||
|
||||
u32 cik_mm_rdoorbell(struct radeon_device *rdev, u32 offset);
|
||||
void cik_mm_wdoorbell(struct radeon_device *rdev, u32 offset, u32 v);
|
||||
u32 cik_mm_rdoorbell(struct radeon_device *rdev, u32 index);
|
||||
void cik_mm_wdoorbell(struct radeon_device *rdev, u32 index, u32 v);
|
||||
|
||||
/*
|
||||
* Cast helper
|
||||
|
@ -2303,8 +2307,8 @@ void cik_mm_wdoorbell(struct radeon_device *rdev, u32 offset, u32 v);
|
|||
#define RREG32_IO(reg) r100_io_rreg(rdev, (reg))
|
||||
#define WREG32_IO(reg, v) r100_io_wreg(rdev, (reg), (v))
|
||||
|
||||
#define RDOORBELL32(offset) cik_mm_rdoorbell(rdev, (offset))
|
||||
#define WDOORBELL32(offset, v) cik_mm_wdoorbell(rdev, (offset), (v))
|
||||
#define RDOORBELL32(index) cik_mm_rdoorbell(rdev, (index))
|
||||
#define WDOORBELL32(index, v) cik_mm_wdoorbell(rdev, (index), (v))
|
||||
|
||||
/*
|
||||
* Indirect registers accessor
|
||||
|
|
|
@ -2015,6 +2015,8 @@ static struct radeon_asic ci_asic = {
|
|||
.bandwidth_update = &dce8_bandwidth_update,
|
||||
.get_vblank_counter = &evergreen_get_vblank_counter,
|
||||
.wait_for_vblank = &dce4_wait_for_vblank,
|
||||
.set_backlight_level = &atombios_set_backlight_level,
|
||||
.get_backlight_level = &atombios_get_backlight_level,
|
||||
.hdmi_enable = &evergreen_hdmi_enable,
|
||||
.hdmi_setmode = &evergreen_hdmi_setmode,
|
||||
},
|
||||
|
@ -2114,6 +2116,8 @@ static struct radeon_asic kv_asic = {
|
|||
.bandwidth_update = &dce8_bandwidth_update,
|
||||
.get_vblank_counter = &evergreen_get_vblank_counter,
|
||||
.wait_for_vblank = &dce4_wait_for_vblank,
|
||||
.set_backlight_level = &atombios_set_backlight_level,
|
||||
.get_backlight_level = &atombios_get_backlight_level,
|
||||
.hdmi_enable = &evergreen_hdmi_enable,
|
||||
.hdmi_setmode = &evergreen_hdmi_setmode,
|
||||
},
|
||||
|
|
|
@ -80,7 +80,7 @@ int r100_irq_set(struct radeon_device *rdev);
|
|||
int r100_irq_process(struct radeon_device *rdev);
|
||||
void r100_fence_ring_emit(struct radeon_device *rdev,
|
||||
struct radeon_fence *fence);
|
||||
void r100_semaphore_ring_emit(struct radeon_device *rdev,
|
||||
bool r100_semaphore_ring_emit(struct radeon_device *rdev,
|
||||
struct radeon_ring *cp,
|
||||
struct radeon_semaphore *semaphore,
|
||||
bool emit_wait);
|
||||
|
@ -313,13 +313,13 @@ int r600_cs_parse(struct radeon_cs_parser *p);
|
|||
int r600_dma_cs_parse(struct radeon_cs_parser *p);
|
||||
void r600_fence_ring_emit(struct radeon_device *rdev,
|
||||
struct radeon_fence *fence);
|
||||
void r600_semaphore_ring_emit(struct radeon_device *rdev,
|
||||
bool r600_semaphore_ring_emit(struct radeon_device *rdev,
|
||||
struct radeon_ring *cp,
|
||||
struct radeon_semaphore *semaphore,
|
||||
bool emit_wait);
|
||||
void r600_dma_fence_ring_emit(struct radeon_device *rdev,
|
||||
struct radeon_fence *fence);
|
||||
void r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
|
||||
bool r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
|
||||
struct radeon_ring *ring,
|
||||
struct radeon_semaphore *semaphore,
|
||||
bool emit_wait);
|
||||
|
@ -566,10 +566,6 @@ int sumo_dpm_force_performance_level(struct radeon_device *rdev,
|
|||
*/
|
||||
void cayman_fence_ring_emit(struct radeon_device *rdev,
|
||||
struct radeon_fence *fence);
|
||||
void cayman_uvd_semaphore_emit(struct radeon_device *rdev,
|
||||
struct radeon_ring *ring,
|
||||
struct radeon_semaphore *semaphore,
|
||||
bool emit_wait);
|
||||
void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev);
|
||||
int cayman_init(struct radeon_device *rdev);
|
||||
void cayman_fini(struct radeon_device *rdev);
|
||||
|
@ -697,7 +693,7 @@ void cik_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
|
|||
int cik_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
|
||||
void cik_sdma_fence_ring_emit(struct radeon_device *rdev,
|
||||
struct radeon_fence *fence);
|
||||
void cik_sdma_semaphore_ring_emit(struct radeon_device *rdev,
|
||||
bool cik_sdma_semaphore_ring_emit(struct radeon_device *rdev,
|
||||
struct radeon_ring *ring,
|
||||
struct radeon_semaphore *semaphore,
|
||||
bool emit_wait);
|
||||
|
@ -717,7 +713,7 @@ void cik_fence_gfx_ring_emit(struct radeon_device *rdev,
|
|||
struct radeon_fence *fence);
|
||||
void cik_fence_compute_ring_emit(struct radeon_device *rdev,
|
||||
struct radeon_fence *fence);
|
||||
void cik_semaphore_ring_emit(struct radeon_device *rdev,
|
||||
bool cik_semaphore_ring_emit(struct radeon_device *rdev,
|
||||
struct radeon_ring *cp,
|
||||
struct radeon_semaphore *semaphore,
|
||||
bool emit_wait);
|
||||
|
@ -807,7 +803,7 @@ void uvd_v1_0_stop(struct radeon_device *rdev);
|
|||
|
||||
int uvd_v1_0_ring_test(struct radeon_device *rdev, struct radeon_ring *ring);
|
||||
int uvd_v1_0_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
|
||||
void uvd_v1_0_semaphore_emit(struct radeon_device *rdev,
|
||||
bool uvd_v1_0_semaphore_emit(struct radeon_device *rdev,
|
||||
struct radeon_ring *ring,
|
||||
struct radeon_semaphore *semaphore,
|
||||
bool emit_wait);
|
||||
|
@ -819,7 +815,7 @@ void uvd_v2_2_fence_emit(struct radeon_device *rdev,
|
|||
struct radeon_fence *fence);
|
||||
|
||||
/* uvd v3.1 */
|
||||
void uvd_v3_1_semaphore_emit(struct radeon_device *rdev,
|
||||
bool uvd_v3_1_semaphore_emit(struct radeon_device *rdev,
|
||||
struct radeon_ring *ring,
|
||||
struct radeon_semaphore *semaphore,
|
||||
bool emit_wait);
|
||||
|
|
|
@ -159,7 +159,8 @@ static void radeon_cs_sync_rings(struct radeon_cs_parser *p)
|
|||
if (!p->relocs[i].robj)
|
||||
continue;
|
||||
|
||||
radeon_ib_sync_to(&p->ib, p->relocs[i].robj->tbo.sync_obj);
|
||||
radeon_semaphore_sync_to(p->ib.semaphore,
|
||||
p->relocs[i].robj->tbo.sync_obj);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -411,9 +412,9 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
|
|||
goto out;
|
||||
}
|
||||
radeon_cs_sync_rings(parser);
|
||||
radeon_ib_sync_to(&parser->ib, vm->fence);
|
||||
radeon_ib_sync_to(&parser->ib, radeon_vm_grab_id(
|
||||
rdev, vm, parser->ring));
|
||||
radeon_semaphore_sync_to(parser->ib.semaphore, vm->fence);
|
||||
radeon_semaphore_sync_to(parser->ib.semaphore,
|
||||
radeon_vm_grab_id(rdev, vm, parser->ring));
|
||||
|
||||
if ((rdev->family >= CHIP_TAHITI) &&
|
||||
(parser->chunk_const_ib_idx != -1)) {
|
||||
|
|
|
@ -251,28 +251,23 @@ void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
|
|||
*/
|
||||
int radeon_doorbell_init(struct radeon_device *rdev)
|
||||
{
|
||||
int i;
|
||||
|
||||
/* doorbell bar mapping */
|
||||
rdev->doorbell.base = pci_resource_start(rdev->pdev, 2);
|
||||
rdev->doorbell.size = pci_resource_len(rdev->pdev, 2);
|
||||
|
||||
/* limit to 4 MB for now */
|
||||
if (rdev->doorbell.size > (4 * 1024 * 1024))
|
||||
rdev->doorbell.size = 4 * 1024 * 1024;
|
||||
rdev->doorbell.num_doorbells = min_t(u32, rdev->doorbell.size / sizeof(u32), RADEON_MAX_DOORBELLS);
|
||||
if (rdev->doorbell.num_doorbells == 0)
|
||||
return -EINVAL;
|
||||
|
||||
rdev->doorbell.ptr = ioremap(rdev->doorbell.base, rdev->doorbell.size);
|
||||
rdev->doorbell.ptr = ioremap(rdev->doorbell.base, rdev->doorbell.num_doorbells * sizeof(u32));
|
||||
if (rdev->doorbell.ptr == NULL) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)rdev->doorbell.base);
|
||||
DRM_INFO("doorbell mmio size: %u\n", (unsigned)rdev->doorbell.size);
|
||||
|
||||
rdev->doorbell.num_pages = rdev->doorbell.size / PAGE_SIZE;
|
||||
memset(&rdev->doorbell.used, 0, sizeof(rdev->doorbell.used));
|
||||
|
||||
for (i = 0; i < rdev->doorbell.num_pages; i++) {
|
||||
rdev->doorbell.free[i] = true;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -290,40 +285,38 @@ void radeon_doorbell_fini(struct radeon_device *rdev)
|
|||
}
|
||||
|
||||
/**
|
||||
* radeon_doorbell_get - Allocate a doorbell page
|
||||
* radeon_doorbell_get - Allocate a doorbell entry
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @doorbell: doorbell page number
|
||||
* @doorbell: doorbell index
|
||||
*
|
||||
* Allocate a doorbell page for use by the driver (all asics).
|
||||
* Allocate a doorbell for use by the driver (all asics).
|
||||
* Returns 0 on success or -EINVAL on failure.
|
||||
*/
|
||||
int radeon_doorbell_get(struct radeon_device *rdev, u32 *doorbell)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < rdev->doorbell.num_pages; i++) {
|
||||
if (rdev->doorbell.free[i]) {
|
||||
rdev->doorbell.free[i] = false;
|
||||
*doorbell = i;
|
||||
return 0;
|
||||
}
|
||||
unsigned long offset = find_first_zero_bit(rdev->doorbell.used, rdev->doorbell.num_doorbells);
|
||||
if (offset < rdev->doorbell.num_doorbells) {
|
||||
__set_bit(offset, rdev->doorbell.used);
|
||||
*doorbell = offset;
|
||||
return 0;
|
||||
} else {
|
||||
return -EINVAL;
|
||||
}
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/**
|
||||
* radeon_doorbell_free - Free a doorbell page
|
||||
* radeon_doorbell_free - Free a doorbell entry
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @doorbell: doorbell page number
|
||||
* @doorbell: doorbell index
|
||||
*
|
||||
* Free a doorbell page allocated for use by the driver (all asics)
|
||||
* Free a doorbell allocated for use by the driver (all asics)
|
||||
*/
|
||||
void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell)
|
||||
{
|
||||
if (doorbell < rdev->doorbell.num_pages)
|
||||
rdev->doorbell.free[doorbell] = true;
|
||||
if (doorbell < rdev->doorbell.num_doorbells)
|
||||
__clear_bit(doorbell, rdev->doorbell.used);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -76,9 +76,10 @@
|
|||
* 2.32.0 - new info request for rings working
|
||||
* 2.33.0 - Add SI tiling mode array query
|
||||
* 2.34.0 - Add CIK tiling mode array query
|
||||
* 2.35.0 - Add CIK macrotile mode array query
|
||||
*/
|
||||
#define KMS_DRIVER_MAJOR 2
|
||||
#define KMS_DRIVER_MINOR 34
|
||||
#define KMS_DRIVER_MINOR 35
|
||||
#define KMS_DRIVER_PATCHLEVEL 0
|
||||
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
|
||||
int radeon_driver_unload_kms(struct drm_device *dev);
|
||||
|
|
|
@ -471,6 +471,36 @@ int radeon_fence_wait_any(struct radeon_device *rdev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* radeon_fence_wait_locked - wait for a fence to signal
|
||||
*
|
||||
* @fence: radeon fence object
|
||||
*
|
||||
* Wait for the requested fence to signal (all asics).
|
||||
* Returns 0 if the fence has passed, error for all other cases.
|
||||
*/
|
||||
int radeon_fence_wait_locked(struct radeon_fence *fence)
|
||||
{
|
||||
uint64_t seq[RADEON_NUM_RINGS] = {};
|
||||
int r;
|
||||
|
||||
if (fence == NULL) {
|
||||
WARN(1, "Querying an invalid fence : %p !\n", fence);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
seq[fence->ring] = fence->seq;
|
||||
if (seq[fence->ring] == RADEON_FENCE_SIGNALED_SEQ)
|
||||
return 0;
|
||||
|
||||
r = radeon_fence_wait_seq(fence->rdev, seq, false, false);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
fence->seq = RADEON_FENCE_SIGNALED_SEQ;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* radeon_fence_wait_next_locked - wait for the next fence to signal
|
||||
*
|
||||
|
|
|
@ -651,7 +651,7 @@ retry:
|
|||
radeon_asic_vm_set_page(rdev, &ib, vm->pd_gpu_addr,
|
||||
0, pd_entries, 0, 0);
|
||||
|
||||
radeon_ib_sync_to(&ib, vm->fence);
|
||||
radeon_semaphore_sync_to(ib.semaphore, vm->fence);
|
||||
r = radeon_ib_schedule(rdev, &ib, NULL);
|
||||
if (r) {
|
||||
radeon_ib_free(rdev, &ib);
|
||||
|
@ -1209,6 +1209,8 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
|
|||
return -ENOMEM;
|
||||
|
||||
r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, ndw * 4);
|
||||
if (r)
|
||||
return r;
|
||||
ib.length_dw = 0;
|
||||
|
||||
r = radeon_vm_update_pdes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset);
|
||||
|
@ -1220,7 +1222,7 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
|
|||
radeon_vm_update_ptes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset,
|
||||
addr, radeon_vm_page_flags(bo_va->flags));
|
||||
|
||||
radeon_ib_sync_to(&ib, vm->fence);
|
||||
radeon_semaphore_sync_to(ib.semaphore, vm->fence);
|
||||
r = radeon_ib_schedule(rdev, &ib, NULL);
|
||||
if (r) {
|
||||
radeon_ib_free(rdev, &ib);
|
||||
|
|
|
@ -340,7 +340,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
|||
break;
|
||||
case RADEON_INFO_BACKEND_MAP:
|
||||
if (rdev->family >= CHIP_BONAIRE)
|
||||
return -EINVAL;
|
||||
*value = rdev->config.cik.backend_map;
|
||||
else if (rdev->family >= CHIP_TAHITI)
|
||||
*value = rdev->config.si.backend_map;
|
||||
else if (rdev->family >= CHIP_CAYMAN)
|
||||
|
@ -449,6 +449,15 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
|||
return -EINVAL;
|
||||
}
|
||||
break;
|
||||
case RADEON_INFO_CIK_MACROTILE_MODE_ARRAY:
|
||||
if (rdev->family >= CHIP_BONAIRE) {
|
||||
value = rdev->config.cik.macrotile_mode_array;
|
||||
value_size = sizeof(uint32_t)*16;
|
||||
} else {
|
||||
DRM_DEBUG_KMS("macrotile mode array is cik+ only!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
break;
|
||||
case RADEON_INFO_SI_CP_DMA_COMPUTE:
|
||||
*value = 1;
|
||||
break;
|
||||
|
|
|
@ -422,6 +422,7 @@ int radeon_crtc_do_set_base(struct drm_crtc *crtc,
|
|||
/* Pin framebuffer & get tilling informations */
|
||||
obj = radeon_fb->obj;
|
||||
rbo = gem_to_radeon_bo(obj);
|
||||
retry:
|
||||
r = radeon_bo_reserve(rbo, false);
|
||||
if (unlikely(r != 0))
|
||||
return r;
|
||||
|
@ -430,6 +431,33 @@ int radeon_crtc_do_set_base(struct drm_crtc *crtc,
|
|||
&base);
|
||||
if (unlikely(r != 0)) {
|
||||
radeon_bo_unreserve(rbo);
|
||||
|
||||
/* On old GPU like RN50 with little vram pining can fails because
|
||||
* current fb is taking all space needed. So instead of unpining
|
||||
* the old buffer after pining the new one, first unpin old one
|
||||
* and then retry pining new one.
|
||||
*
|
||||
* As only master can set mode only master can pin and it is
|
||||
* unlikely the master client will race with itself especialy
|
||||
* on those old gpu with single crtc.
|
||||
*
|
||||
* We don't shutdown the display controller because new buffer
|
||||
* will end up in same spot.
|
||||
*/
|
||||
if (!atomic && fb && fb != crtc->fb) {
|
||||
struct radeon_bo *old_rbo;
|
||||
unsigned long nsize, osize;
|
||||
|
||||
old_rbo = gem_to_radeon_bo(to_radeon_framebuffer(fb)->obj);
|
||||
osize = radeon_bo_size(old_rbo);
|
||||
nsize = radeon_bo_size(rbo);
|
||||
if (nsize <= osize && !radeon_bo_reserve(old_rbo, false)) {
|
||||
radeon_bo_unpin(old_rbo);
|
||||
radeon_bo_unreserve(old_rbo);
|
||||
fb = NULL;
|
||||
goto retry;
|
||||
}
|
||||
}
|
||||
return -EINVAL;
|
||||
}
|
||||
radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
|
||||
|
|
|
@ -1252,7 +1252,6 @@ int radeon_pm_init(struct radeon_device *rdev)
|
|||
case CHIP_RS780:
|
||||
case CHIP_RS880:
|
||||
case CHIP_CAYMAN:
|
||||
case CHIP_ARUBA:
|
||||
case CHIP_BONAIRE:
|
||||
case CHIP_KABINI:
|
||||
case CHIP_KAVERI:
|
||||
|
@ -1284,6 +1283,7 @@ int radeon_pm_init(struct radeon_device *rdev)
|
|||
case CHIP_BARTS:
|
||||
case CHIP_TURKS:
|
||||
case CHIP_CAICOS:
|
||||
case CHIP_ARUBA:
|
||||
case CHIP_TAHITI:
|
||||
case CHIP_PITCAIRN:
|
||||
case CHIP_VERDE:
|
||||
|
|
|
@ -61,7 +61,7 @@ int radeon_ib_get(struct radeon_device *rdev, int ring,
|
|||
struct radeon_ib *ib, struct radeon_vm *vm,
|
||||
unsigned size)
|
||||
{
|
||||
int i, r;
|
||||
int r;
|
||||
|
||||
r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &ib->sa_bo, size, 256, true);
|
||||
if (r) {
|
||||
|
@ -87,8 +87,6 @@ int radeon_ib_get(struct radeon_device *rdev, int ring,
|
|||
ib->gpu_addr = radeon_sa_bo_gpu_addr(ib->sa_bo);
|
||||
}
|
||||
ib->is_const_ib = false;
|
||||
for (i = 0; i < RADEON_NUM_RINGS; ++i)
|
||||
ib->sync_to[i] = NULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -108,25 +106,6 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib)
|
|||
radeon_fence_unref(&ib->fence);
|
||||
}
|
||||
|
||||
/**
|
||||
* radeon_ib_sync_to - sync to fence before executing the IB
|
||||
*
|
||||
* @ib: IB object to add fence to
|
||||
* @fence: fence to sync to
|
||||
*
|
||||
* Sync to the fence before executing the IB
|
||||
*/
|
||||
void radeon_ib_sync_to(struct radeon_ib *ib, struct radeon_fence *fence)
|
||||
{
|
||||
struct radeon_fence *other;
|
||||
|
||||
if (!fence)
|
||||
return;
|
||||
|
||||
other = ib->sync_to[fence->ring];
|
||||
ib->sync_to[fence->ring] = radeon_fence_later(fence, other);
|
||||
}
|
||||
|
||||
/**
|
||||
* radeon_ib_schedule - schedule an IB (Indirect Buffer) on the ring
|
||||
*
|
||||
|
@ -151,8 +130,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
|
|||
struct radeon_ib *const_ib)
|
||||
{
|
||||
struct radeon_ring *ring = &rdev->ring[ib->ring];
|
||||
bool need_sync = false;
|
||||
int i, r = 0;
|
||||
int r = 0;
|
||||
|
||||
if (!ib->length_dw || !ring->ready) {
|
||||
/* TODO: Nothings in the ib we should report. */
|
||||
|
@ -166,19 +144,15 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
|
|||
dev_err(rdev->dev, "scheduling IB failed (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
|
||||
struct radeon_fence *fence = ib->sync_to[i];
|
||||
if (radeon_fence_need_sync(fence, ib->ring)) {
|
||||
need_sync = true;
|
||||
radeon_semaphore_sync_rings(rdev, ib->semaphore,
|
||||
fence->ring, ib->ring);
|
||||
radeon_fence_note_sync(fence, ib->ring);
|
||||
}
|
||||
}
|
||||
/* immediately free semaphore when we don't need to sync */
|
||||
if (!need_sync) {
|
||||
radeon_semaphore_free(rdev, &ib->semaphore, NULL);
|
||||
|
||||
/* sync with other rings */
|
||||
r = radeon_semaphore_sync_rings(rdev, ib->semaphore, ib->ring);
|
||||
if (r) {
|
||||
dev_err(rdev->dev, "failed to sync rings (%d)\n", r);
|
||||
radeon_ring_unlock_undo(rdev, ring);
|
||||
return r;
|
||||
}
|
||||
|
||||
/* if we can't remember our last VM flush then flush now! */
|
||||
/* XXX figure out why we have to flush for every IB */
|
||||
if (ib->vm /*&& !ib->vm->last_flush*/) {
|
||||
|
|
|
@ -29,12 +29,12 @@
|
|||
*/
|
||||
#include <drm/drmP.h>
|
||||
#include "radeon.h"
|
||||
|
||||
#include "radeon_trace.h"
|
||||
|
||||
int radeon_semaphore_create(struct radeon_device *rdev,
|
||||
struct radeon_semaphore **semaphore)
|
||||
{
|
||||
int r;
|
||||
int i, r;
|
||||
|
||||
*semaphore = kmalloc(sizeof(struct radeon_semaphore), GFP_KERNEL);
|
||||
if (*semaphore == NULL) {
|
||||
|
@ -50,55 +50,122 @@ int radeon_semaphore_create(struct radeon_device *rdev,
|
|||
(*semaphore)->waiters = 0;
|
||||
(*semaphore)->gpu_addr = radeon_sa_bo_gpu_addr((*semaphore)->sa_bo);
|
||||
*((uint64_t*)radeon_sa_bo_cpu_addr((*semaphore)->sa_bo)) = 0;
|
||||
|
||||
for (i = 0; i < RADEON_NUM_RINGS; ++i)
|
||||
(*semaphore)->sync_to[i] = NULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
|
||||
bool radeon_semaphore_emit_signal(struct radeon_device *rdev, int ridx,
|
||||
struct radeon_semaphore *semaphore)
|
||||
{
|
||||
--semaphore->waiters;
|
||||
radeon_semaphore_ring_emit(rdev, ring, &rdev->ring[ring], semaphore, false);
|
||||
struct radeon_ring *ring = &rdev->ring[ridx];
|
||||
|
||||
trace_radeon_semaphore_signale(ridx, semaphore);
|
||||
|
||||
if (radeon_semaphore_ring_emit(rdev, ridx, ring, semaphore, false)) {
|
||||
--semaphore->waiters;
|
||||
|
||||
/* for debugging lockup only, used by sysfs debug files */
|
||||
ring->last_semaphore_signal_addr = semaphore->gpu_addr;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
|
||||
bool radeon_semaphore_emit_wait(struct radeon_device *rdev, int ridx,
|
||||
struct radeon_semaphore *semaphore)
|
||||
{
|
||||
++semaphore->waiters;
|
||||
radeon_semaphore_ring_emit(rdev, ring, &rdev->ring[ring], semaphore, true);
|
||||
struct radeon_ring *ring = &rdev->ring[ridx];
|
||||
|
||||
trace_radeon_semaphore_wait(ridx, semaphore);
|
||||
|
||||
if (radeon_semaphore_ring_emit(rdev, ridx, ring, semaphore, true)) {
|
||||
++semaphore->waiters;
|
||||
|
||||
/* for debugging lockup only, used by sysfs debug files */
|
||||
ring->last_semaphore_wait_addr = semaphore->gpu_addr;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/* caller must hold ring lock */
|
||||
/**
|
||||
* radeon_semaphore_sync_to - use the semaphore to sync to a fence
|
||||
*
|
||||
* @semaphore: semaphore object to add fence to
|
||||
* @fence: fence to sync to
|
||||
*
|
||||
* Sync to the fence using this semaphore object
|
||||
*/
|
||||
void radeon_semaphore_sync_to(struct radeon_semaphore *semaphore,
|
||||
struct radeon_fence *fence)
|
||||
{
|
||||
struct radeon_fence *other;
|
||||
|
||||
if (!fence)
|
||||
return;
|
||||
|
||||
other = semaphore->sync_to[fence->ring];
|
||||
semaphore->sync_to[fence->ring] = radeon_fence_later(fence, other);
|
||||
}
|
||||
|
||||
/**
|
||||
* radeon_semaphore_sync_rings - sync ring to all registered fences
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @semaphore: semaphore object to use for sync
|
||||
* @ring: ring that needs sync
|
||||
*
|
||||
* Ensure that all registered fences are signaled before letting
|
||||
* the ring continue. The caller must hold the ring lock.
|
||||
*/
|
||||
int radeon_semaphore_sync_rings(struct radeon_device *rdev,
|
||||
struct radeon_semaphore *semaphore,
|
||||
int signaler, int waiter)
|
||||
int ring)
|
||||
{
|
||||
int r;
|
||||
int i, r;
|
||||
|
||||
/* no need to signal and wait on the same ring */
|
||||
if (signaler == waiter) {
|
||||
return 0;
|
||||
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
|
||||
struct radeon_fence *fence = semaphore->sync_to[i];
|
||||
|
||||
/* check if we really need to sync */
|
||||
if (!radeon_fence_need_sync(fence, ring))
|
||||
continue;
|
||||
|
||||
/* prevent GPU deadlocks */
|
||||
if (!rdev->ring[i].ready) {
|
||||
dev_err(rdev->dev, "Syncing to a disabled ring!");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* allocate enough space for sync command */
|
||||
r = radeon_ring_alloc(rdev, &rdev->ring[i], 16);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
|
||||
/* emit the signal semaphore */
|
||||
if (!radeon_semaphore_emit_signal(rdev, i, semaphore)) {
|
||||
/* signaling wasn't successful wait manually */
|
||||
radeon_ring_undo(&rdev->ring[i]);
|
||||
radeon_fence_wait_locked(fence);
|
||||
continue;
|
||||
}
|
||||
|
||||
/* we assume caller has already allocated space on waiters ring */
|
||||
if (!radeon_semaphore_emit_wait(rdev, ring, semaphore)) {
|
||||
/* waiting wasn't successful wait manually */
|
||||
radeon_ring_undo(&rdev->ring[i]);
|
||||
radeon_fence_wait_locked(fence);
|
||||
continue;
|
||||
}
|
||||
|
||||
radeon_ring_commit(rdev, &rdev->ring[i]);
|
||||
radeon_fence_note_sync(fence, ring);
|
||||
}
|
||||
|
||||
/* prevent GPU deadlocks */
|
||||
if (!rdev->ring[signaler].ready) {
|
||||
dev_err(rdev->dev, "Trying to sync to a disabled ring!");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
r = radeon_ring_alloc(rdev, &rdev->ring[signaler], 8);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
radeon_semaphore_emit_signal(rdev, signaler, semaphore);
|
||||
radeon_ring_commit(rdev, &rdev->ring[signaler]);
|
||||
|
||||
/* we assume caller has already allocated space on waiters ring */
|
||||
radeon_semaphore_emit_wait(rdev, waiter, semaphore);
|
||||
|
||||
/* for debugging lockup only, used by sysfs debug files */
|
||||
rdev->ring[signaler].last_semaphore_signal_addr = semaphore->gpu_addr;
|
||||
rdev->ring[waiter].last_semaphore_wait_addr = semaphore->gpu_addr;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -111,6 +111,42 @@ DEFINE_EVENT(radeon_fence_request, radeon_fence_wait_end,
|
|||
TP_ARGS(dev, seqno)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(radeon_semaphore_request,
|
||||
|
||||
TP_PROTO(int ring, struct radeon_semaphore *sem),
|
||||
|
||||
TP_ARGS(ring, sem),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(int, ring)
|
||||
__field(signed, waiters)
|
||||
__field(uint64_t, gpu_addr)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->ring = ring;
|
||||
__entry->waiters = sem->waiters;
|
||||
__entry->gpu_addr = sem->gpu_addr;
|
||||
),
|
||||
|
||||
TP_printk("ring=%u, waiters=%d, addr=%010Lx", __entry->ring,
|
||||
__entry->waiters, __entry->gpu_addr)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(radeon_semaphore_request, radeon_semaphore_signale,
|
||||
|
||||
TP_PROTO(int ring, struct radeon_semaphore *sem),
|
||||
|
||||
TP_ARGS(ring, sem)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(radeon_semaphore_request, radeon_semaphore_wait,
|
||||
|
||||
TP_PROTO(int ring, struct radeon_semaphore *sem),
|
||||
|
||||
TP_ARGS(ring, sem)
|
||||
);
|
||||
|
||||
#endif
|
||||
|
||||
/* This part must be outside protection */
|
||||
|
|
|
@ -66,13 +66,8 @@ int rv770_copy_dma(struct radeon_device *rdev,
|
|||
return r;
|
||||
}
|
||||
|
||||
if (radeon_fence_need_sync(*fence, ring->idx)) {
|
||||
radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
|
||||
ring->idx);
|
||||
radeon_fence_note_sync(*fence, ring->idx);
|
||||
} else {
|
||||
radeon_semaphore_free(rdev, &sem, NULL);
|
||||
}
|
||||
radeon_semaphore_sync_to(sem, *fence);
|
||||
radeon_semaphore_sync_rings(rdev, sem, ring->idx);
|
||||
|
||||
for (i = 0; i < num_loops; i++) {
|
||||
cur_size_in_dw = size_in_dw;
|
||||
|
|
|
@ -195,13 +195,8 @@ int si_copy_dma(struct radeon_device *rdev,
|
|||
return r;
|
||||
}
|
||||
|
||||
if (radeon_fence_need_sync(*fence, ring->idx)) {
|
||||
radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
|
||||
ring->idx);
|
||||
radeon_fence_note_sync(*fence, ring->idx);
|
||||
} else {
|
||||
radeon_semaphore_free(rdev, &sem, NULL);
|
||||
}
|
||||
radeon_semaphore_sync_to(sem, *fence);
|
||||
radeon_semaphore_sync_rings(rdev, sem, ring->idx);
|
||||
|
||||
for (i = 0; i < num_loops; i++) {
|
||||
cur_size_in_bytes = size_in_bytes;
|
||||
|
|
|
@ -1873,9 +1873,9 @@ int trinity_dpm_init(struct radeon_device *rdev)
|
|||
pi->enable_sclk_ds = true;
|
||||
pi->enable_gfx_power_gating = true;
|
||||
pi->enable_gfx_clock_gating = true;
|
||||
pi->enable_mg_clock_gating = true;
|
||||
pi->enable_gfx_dynamic_mgpg = true; /* ??? */
|
||||
pi->override_dynamic_mgpg = true;
|
||||
pi->enable_mg_clock_gating = false;
|
||||
pi->enable_gfx_dynamic_mgpg = false;
|
||||
pi->override_dynamic_mgpg = false;
|
||||
pi->enable_auto_thermal_throttling = true;
|
||||
pi->voltage_drop_in_dce = false; /* need to restructure dpm/modeset interaction */
|
||||
pi->uvd_dpm = true; /* ??? */
|
||||
|
|
|
@ -357,7 +357,7 @@ int uvd_v1_0_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
|
|||
*
|
||||
* Emit a semaphore command (either wait or signal) to the UVD ring.
|
||||
*/
|
||||
void uvd_v1_0_semaphore_emit(struct radeon_device *rdev,
|
||||
bool uvd_v1_0_semaphore_emit(struct radeon_device *rdev,
|
||||
struct radeon_ring *ring,
|
||||
struct radeon_semaphore *semaphore,
|
||||
bool emit_wait)
|
||||
|
@ -372,6 +372,8 @@ void uvd_v1_0_semaphore_emit(struct radeon_device *rdev,
|
|||
|
||||
radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0));
|
||||
radeon_ring_write(ring, emit_wait ? 1 : 0);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -37,7 +37,7 @@
|
|||
*
|
||||
* Emit a semaphore command (either wait or signal) to the UVD ring.
|
||||
*/
|
||||
void uvd_v3_1_semaphore_emit(struct radeon_device *rdev,
|
||||
bool uvd_v3_1_semaphore_emit(struct radeon_device *rdev,
|
||||
struct radeon_ring *ring,
|
||||
struct radeon_semaphore *semaphore,
|
||||
bool emit_wait)
|
||||
|
@ -52,4 +52,6 @@ void uvd_v3_1_semaphore_emit(struct radeon_device *rdev,
|
|||
|
||||
radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0));
|
||||
radeon_ring_write(ring, 0x80 | (emit_wait ? 1 : 0));
|
||||
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -981,6 +981,8 @@ struct drm_radeon_cs {
|
|||
#define RADEON_INFO_SI_TILE_MODE_ARRAY 0x16
|
||||
/* query if CP DMA is supported on the compute ring */
|
||||
#define RADEON_INFO_SI_CP_DMA_COMPUTE 0x17
|
||||
/* CIK macrotile mode array */
|
||||
#define RADEON_INFO_CIK_MACROTILE_MODE_ARRAY 0x18
|
||||
|
||||
|
||||
struct drm_radeon_info {
|
||||
|
|
Loading…
Reference in New Issue