Merge branch 'drm-fixes-3.7' of git://people.freedesktop.org/~agd5f/linux into drm-fixes

Alex writes:
"This is the first -fixes pull for 3.7.  I would have preferred
to have gotten it out a bit sooner, but I was on holiday last week.
- Cleanup of the new 2 level page table code it get it in
  better shape and using less memory.
- Fix some display issues related to the PLL rework.
- Fix some cmpiler warnings and errors with certain config
  options.
- Other misc bug fixes."

* 'drm-fixes-3.7' of git://people.freedesktop.org/~agd5f/linux:
  drm/radeon: fix spelling typos in debugging output
  drm/radeon: Don't destroy I2C Bus Rec in radeon_ext_tmds_enc_destroy().
  drm/radeon: check if pcie gen 2 is already enabled (v2)
  drm/radeon/cayman: set VM max pfn at MC init
  drm/radeon: separate pt alloc from lru add
  drm/radeon: don't add the IB pool to all VMs v2
  drm/radeon: allocate page tables on demand v4
  drm/radeon: update comments to clarify VM setup (v2)
  drm/radeon: allocate PPLLs from low to high
  drm/radeon: fix compilation with backlight disabled
  drm/radeon: use %zu for formatting size_t
This commit is contained in:
Dave Airlie 2012-10-16 10:13:24 +10:00
commit 39df01cd6c
13 changed files with 359 additions and 152 deletions

View File

@ -1690,10 +1690,10 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
}
/* all other cases */
pll_in_use = radeon_get_pll_use_mask(crtc);
if (!(pll_in_use & (1 << ATOM_PPLL2)))
return ATOM_PPLL2;
if (!(pll_in_use & (1 << ATOM_PPLL1)))
return ATOM_PPLL1;
if (!(pll_in_use & (1 << ATOM_PPLL2)))
return ATOM_PPLL2;
DRM_ERROR("unable to allocate a PPLL\n");
return ATOM_PPLL_INVALID;
} else {
@ -1715,10 +1715,10 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
}
/* all other cases */
pll_in_use = radeon_get_pll_use_mask(crtc);
if (!(pll_in_use & (1 << ATOM_PPLL2)))
return ATOM_PPLL2;
if (!(pll_in_use & (1 << ATOM_PPLL1)))
return ATOM_PPLL1;
if (!(pll_in_use & (1 << ATOM_PPLL2)))
return ATOM_PPLL2;
DRM_ERROR("unable to allocate a PPLL\n");
return ATOM_PPLL_INVALID;
} else {

View File

@ -3431,9 +3431,14 @@ void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
if (!(mask & DRM_PCIE_SPEED_50))
return;
speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
if (speed_cntl & LC_CURRENT_DATA_RATE) {
DRM_INFO("PCIE gen 2 link speeds already enabled\n");
return;
}
DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) ||
(speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {

View File

@ -770,9 +770,13 @@ static int cayman_pcie_gart_enable(struct radeon_device *rdev)
WREG32(0x15DC, 0);
/* empty context1-7 */
/* Assign the pt base to something valid for now; the pts used for
* the VMs are determined by the application and setup and assigned
* on the fly in the vm part of radeon_gart.c
*/
for (i = 1; i < 8; i++) {
WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (i << 2), 0);
WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (i << 2), 0);
WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (i << 2), rdev->vm_manager.max_pfn);
WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
rdev->gart.table_addr >> 12);
}
@ -1572,12 +1576,6 @@ void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
if (vm == NULL)
return;
radeon_ring_write(ring, PACKET0(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (vm->id << 2), 0));
radeon_ring_write(ring, 0);
radeon_ring_write(ring, PACKET0(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (vm->id << 2), 0));
radeon_ring_write(ring, vm->last_pfn);
radeon_ring_write(ring, PACKET0(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2), 0));
radeon_ring_write(ring, vm->pd_gpu_addr >> 12);

View File

@ -3703,6 +3703,12 @@ static void r600_pcie_gen2_enable(struct radeon_device *rdev)
if (!(mask & DRM_PCIE_SPEED_50))
return;
speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
if (speed_cntl & LC_CURRENT_DATA_RATE) {
DRM_INFO("PCIE gen 2 link speeds already enabled\n");
return;
}
DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
/* 55 nm r6xx asics */

View File

@ -663,9 +663,14 @@ struct radeon_vm {
struct list_head list;
struct list_head va;
unsigned id;
unsigned last_pfn;
u64 pd_gpu_addr;
struct radeon_sa_bo *sa_bo;
/* contains the page directory */
struct radeon_sa_bo *page_directory;
uint64_t pd_gpu_addr;
/* array of page tables, one for each page directory entry */
struct radeon_sa_bo **page_tables;
struct mutex mutex;
/* last fence for cs using this vm */
struct radeon_fence *fence;
@ -1843,9 +1848,10 @@ extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size
*/
int radeon_vm_manager_init(struct radeon_device *rdev);
void radeon_vm_manager_fini(struct radeon_device *rdev);
int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm);
void radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm);
void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm);
int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm);
void radeon_vm_add_to_lru(struct radeon_device *rdev, struct radeon_vm *vm);
struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
struct radeon_vm *vm, int ring);
void radeon_vm_fence(struct radeon_device *rdev,

View File

@ -201,7 +201,7 @@ static int radeon_atif_verify_interface(acpi_handle handle,
size = *(u16 *) info->buffer.pointer;
if (size < 12) {
DRM_INFO("ATIF buffer is too small: %lu\n", size);
DRM_INFO("ATIF buffer is too small: %zu\n", size);
err = -EINVAL;
goto out;
}
@ -370,6 +370,7 @@ int radeon_atif_handler(struct radeon_device *rdev,
radeon_set_backlight_level(rdev, enc, req.backlight_level);
#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
if (rdev->is_atom_bios) {
struct radeon_encoder_atom_dig *dig = enc->enc_priv;
backlight_force_update(dig->bl_dev,
@ -379,6 +380,7 @@ int radeon_atif_handler(struct radeon_device *rdev,
backlight_force_update(dig->bl_dev,
BACKLIGHT_UPDATE_HOTKEY);
}
#endif
}
}
/* TODO: check other events */
@ -485,7 +487,7 @@ static int radeon_atcs_verify_interface(acpi_handle handle,
size = *(u16 *) info->buffer.pointer;
if (size < 8) {
DRM_INFO("ATCS buffer is too small: %lu\n", size);
DRM_INFO("ATCS buffer is too small: %zu\n", size);
err = -EINVAL;
goto out;
}

View File

@ -478,6 +478,7 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
}
out:
radeon_vm_add_to_lru(rdev, vm);
mutex_unlock(&vm->mutex);
mutex_unlock(&rdev->vm_manager.lock);
return r;

View File

@ -1018,6 +1018,10 @@ int radeon_device_init(struct radeon_device *rdev,
return r;
/* initialize vm here */
mutex_init(&rdev->vm_manager.lock);
/* Adjust VM size here.
* Currently set to 4GB ((1 << 20) 4k pages).
* Max GPUVM size for cayman and SI is 40 bits.
*/
rdev->vm_manager.max_pfn = 1 << 20;
INIT_LIST_HEAD(&rdev->vm_manager.lru_vm);

View File

@ -422,6 +422,18 @@ void radeon_gart_fini(struct radeon_device *rdev)
* TODO bind a default page at vm initialization for default address
*/
/**
* radeon_vm_num_pde - return the number of page directory entries
*
* @rdev: radeon_device pointer
*
* Calculate the number of page directory entries (cayman+).
*/
static unsigned radeon_vm_num_pdes(struct radeon_device *rdev)
{
return rdev->vm_manager.max_pfn >> RADEON_VM_BLOCK_SIZE;
}
/**
* radeon_vm_directory_size - returns the size of the page directory in bytes
*
@ -431,7 +443,7 @@ void radeon_gart_fini(struct radeon_device *rdev)
*/
static unsigned radeon_vm_directory_size(struct radeon_device *rdev)
{
return (rdev->vm_manager.max_pfn >> RADEON_VM_BLOCK_SIZE) * 8;
return RADEON_GPU_PAGE_ALIGN(radeon_vm_num_pdes(rdev) * 8);
}
/**
@ -451,11 +463,11 @@ int radeon_vm_manager_init(struct radeon_device *rdev)
if (!rdev->vm_manager.enabled) {
/* allocate enough for 2 full VM pts */
size = RADEON_GPU_PAGE_ALIGN(radeon_vm_directory_size(rdev));
size += RADEON_GPU_PAGE_ALIGN(rdev->vm_manager.max_pfn * 8);
size = radeon_vm_directory_size(rdev);
size += rdev->vm_manager.max_pfn * 8;
size *= 2;
r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager,
size,
RADEON_GPU_PAGE_ALIGN(size),
RADEON_GEM_DOMAIN_VRAM);
if (r) {
dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n",
@ -476,7 +488,7 @@ int radeon_vm_manager_init(struct radeon_device *rdev)
/* restore page table */
list_for_each_entry(vm, &rdev->vm_manager.lru_vm, list) {
if (vm->sa_bo == NULL)
if (vm->page_directory == NULL)
continue;
list_for_each_entry(bo_va, &vm->va, vm_list) {
@ -500,16 +512,25 @@ static void radeon_vm_free_pt(struct radeon_device *rdev,
struct radeon_vm *vm)
{
struct radeon_bo_va *bo_va;
int i;
if (!vm->sa_bo)
if (!vm->page_directory)
return;
list_del_init(&vm->list);
radeon_sa_bo_free(rdev, &vm->sa_bo, vm->fence);
radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence);
list_for_each_entry(bo_va, &vm->va, vm_list) {
bo_va->valid = false;
}
if (vm->page_tables == NULL)
return;
for (i = 0; i < radeon_vm_num_pdes(rdev); i++)
radeon_sa_bo_free(rdev, &vm->page_tables[i], vm->fence);
kfree(vm->page_tables);
}
/**
@ -545,6 +566,35 @@ void radeon_vm_manager_fini(struct radeon_device *rdev)
rdev->vm_manager.enabled = false;
}
/**
* radeon_vm_evict - evict page table to make room for new one
*
* @rdev: radeon_device pointer
* @vm: VM we want to allocate something for
*
* Evict a VM from the lru, making sure that it isn't @vm. (cayman+).
* Returns 0 for success, -ENOMEM for failure.
*
* Global and local mutex must be locked!
*/
int radeon_vm_evict(struct radeon_device *rdev, struct radeon_vm *vm)
{
struct radeon_vm *vm_evict;
if (list_empty(&rdev->vm_manager.lru_vm))
return -ENOMEM;
vm_evict = list_first_entry(&rdev->vm_manager.lru_vm,
struct radeon_vm, list);
if (vm_evict == vm)
return -ENOMEM;
mutex_lock(&vm_evict->mutex);
radeon_vm_free_pt(rdev, vm_evict);
mutex_unlock(&vm_evict->mutex);
return 0;
}
/**
* radeon_vm_alloc_pt - allocates a page table for a VM
*
@ -552,57 +602,71 @@ void radeon_vm_manager_fini(struct radeon_device *rdev)
* @vm: vm to bind
*
* Allocate a page table for the requested vm (cayman+).
* Also starts to populate the page table.
* Returns 0 for success, error for failure.
*
* Global and local mutex must be locked!
*/
int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm)
{
struct radeon_vm *vm_evict;
int r;
unsigned pd_size, pts_size;
u64 *pd_addr;
int tables_size;
int r;
if (vm == NULL) {
return -EINVAL;
}
/* allocate enough to cover the current VM size */
tables_size = RADEON_GPU_PAGE_ALIGN(radeon_vm_directory_size(rdev));
tables_size += RADEON_GPU_PAGE_ALIGN(vm->last_pfn * 8);
if (vm->sa_bo != NULL) {
/* update lru */
list_del_init(&vm->list);
list_add_tail(&vm->list, &rdev->vm_manager.lru_vm);
if (vm->page_directory != NULL) {
return 0;
}
retry:
r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, &vm->sa_bo,
tables_size, RADEON_GPU_PAGE_SIZE, false);
pd_size = RADEON_GPU_PAGE_ALIGN(radeon_vm_directory_size(rdev));
r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager,
&vm->page_directory, pd_size,
RADEON_GPU_PAGE_SIZE, false);
if (r == -ENOMEM) {
if (list_empty(&rdev->vm_manager.lru_vm)) {
r = radeon_vm_evict(rdev, vm);
if (r)
return r;
}
vm_evict = list_first_entry(&rdev->vm_manager.lru_vm, struct radeon_vm, list);
mutex_lock(&vm_evict->mutex);
radeon_vm_free_pt(rdev, vm_evict);
mutex_unlock(&vm_evict->mutex);
goto retry;
} else if (r) {
return r;
}
pd_addr = radeon_sa_bo_cpu_addr(vm->sa_bo);
vm->pd_gpu_addr = radeon_sa_bo_gpu_addr(vm->sa_bo);
memset(pd_addr, 0, tables_size);
vm->pd_gpu_addr = radeon_sa_bo_gpu_addr(vm->page_directory);
/* Initially clear the page directory */
pd_addr = radeon_sa_bo_cpu_addr(vm->page_directory);
memset(pd_addr, 0, pd_size);
pts_size = radeon_vm_num_pdes(rdev) * sizeof(struct radeon_sa_bo *);
vm->page_tables = kzalloc(pts_size, GFP_KERNEL);
if (vm->page_tables == NULL) {
DRM_ERROR("Cannot allocate memory for page table array\n");
radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence);
return -ENOMEM;
}
return 0;
}
/**
* radeon_vm_add_to_lru - add VMs page table to LRU list
*
* @rdev: radeon_device pointer
* @vm: vm to add to LRU
*
* Add the allocated page table to the LRU list (cayman+).
*
* Global mutex must be locked!
*/
void radeon_vm_add_to_lru(struct radeon_device *rdev, struct radeon_vm *vm)
{
list_del_init(&vm->list);
list_add_tail(&vm->list, &rdev->vm_manager.lru_vm);
return radeon_vm_bo_update_pte(rdev, vm, rdev->ring_tmp_bo.bo,
&rdev->ring_tmp_bo.bo->tbo.mem);
}
/**
@ -793,20 +857,6 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
}
mutex_lock(&vm->mutex);
if (last_pfn > vm->last_pfn) {
/* release mutex and lock in right order */
mutex_unlock(&vm->mutex);
mutex_lock(&rdev->vm_manager.lock);
mutex_lock(&vm->mutex);
/* and check again */
if (last_pfn > vm->last_pfn) {
/* grow va space 32M by 32M */
unsigned align = ((32 << 20) >> 12) - 1;
radeon_vm_free_pt(rdev, vm);
vm->last_pfn = (last_pfn + align) & ~align;
}
mutex_unlock(&rdev->vm_manager.lock);
}
head = &vm->va;
last_offset = 0;
list_for_each_entry(tmp, &vm->va, vm_list) {
@ -864,6 +914,155 @@ uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr)
return result;
}
/**
* radeon_vm_update_pdes - make sure that page directory is valid
*
* @rdev: radeon_device pointer
* @vm: requested vm
* @start: start of GPU address range
* @end: end of GPU address range
*
* Allocates new page tables if necessary
* and updates the page directory (cayman+).
* Returns 0 for success, error for failure.
*
* Global and local mutex must be locked!
*/
static int radeon_vm_update_pdes(struct radeon_device *rdev,
struct radeon_vm *vm,
uint64_t start, uint64_t end)
{
static const uint32_t incr = RADEON_VM_PTE_COUNT * 8;
uint64_t last_pde = ~0, last_pt = ~0;
unsigned count = 0;
uint64_t pt_idx;
int r;
start = (start / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE;
end = (end / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE;
/* walk over the address space and update the page directory */
for (pt_idx = start; pt_idx <= end; ++pt_idx) {
uint64_t pde, pt;
if (vm->page_tables[pt_idx])
continue;
retry:
r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager,
&vm->page_tables[pt_idx],
RADEON_VM_PTE_COUNT * 8,
RADEON_GPU_PAGE_SIZE, false);
if (r == -ENOMEM) {
r = radeon_vm_evict(rdev, vm);
if (r)
return r;
goto retry;
} else if (r) {
return r;
}
pde = vm->pd_gpu_addr + pt_idx * 8;
pt = radeon_sa_bo_gpu_addr(vm->page_tables[pt_idx]);
if (((last_pde + 8 * count) != pde) ||
((last_pt + incr * count) != pt)) {
if (count) {
radeon_asic_vm_set_page(rdev, last_pde,
last_pt, count, incr,
RADEON_VM_PAGE_VALID);
}
count = 1;
last_pde = pde;
last_pt = pt;
} else {
++count;
}
}
if (count) {
radeon_asic_vm_set_page(rdev, last_pde, last_pt, count,
incr, RADEON_VM_PAGE_VALID);
}
return 0;
}
/**
* radeon_vm_update_ptes - make sure that page tables are valid
*
* @rdev: radeon_device pointer
* @vm: requested vm
* @start: start of GPU address range
* @end: end of GPU address range
* @dst: destination address to map to
* @flags: mapping flags
*
* Update the page tables in the range @start - @end (cayman+).
*
* Global and local mutex must be locked!
*/
static void radeon_vm_update_ptes(struct radeon_device *rdev,
struct radeon_vm *vm,
uint64_t start, uint64_t end,
uint64_t dst, uint32_t flags)
{
static const uint64_t mask = RADEON_VM_PTE_COUNT - 1;
uint64_t last_pte = ~0, last_dst = ~0;
unsigned count = 0;
uint64_t addr;
start = start / RADEON_GPU_PAGE_SIZE;
end = end / RADEON_GPU_PAGE_SIZE;
/* walk over the address space and update the page tables */
for (addr = start; addr < end; ) {
uint64_t pt_idx = addr >> RADEON_VM_BLOCK_SIZE;
unsigned nptes;
uint64_t pte;
if ((addr & ~mask) == (end & ~mask))
nptes = end - addr;
else
nptes = RADEON_VM_PTE_COUNT - (addr & mask);
pte = radeon_sa_bo_gpu_addr(vm->page_tables[pt_idx]);
pte += (addr & mask) * 8;
if (((last_pte + 8 * count) != pte) ||
((count + nptes) > 1 << 11)) {
if (count) {
radeon_asic_vm_set_page(rdev, last_pte,
last_dst, count,
RADEON_GPU_PAGE_SIZE,
flags);
}
count = nptes;
last_pte = pte;
last_dst = dst;
} else {
count += nptes;
}
addr += nptes;
dst += nptes * RADEON_GPU_PAGE_SIZE;
}
if (count) {
radeon_asic_vm_set_page(rdev, last_pte, last_dst, count,
RADEON_GPU_PAGE_SIZE, flags);
}
}
/**
* radeon_vm_bo_update_pte - map a bo into the vm page table
*
@ -887,12 +1086,11 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
struct radeon_semaphore *sem = NULL;
struct radeon_bo_va *bo_va;
unsigned nptes, npdes, ndw;
uint64_t pe, addr;
uint64_t pfn;
uint64_t addr;
int r;
/* nothing to do if vm isn't bound */
if (vm->sa_bo == NULL)
if (vm->page_directory == NULL)
return 0;
bo_va = radeon_vm_bo_find(vm, bo);
@ -939,25 +1137,29 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
}
}
/* estimate number of dw needed */
/* reserve space for 32-bit padding */
ndw = 32;
nptes = radeon_bo_ngpu_pages(bo);
pfn = (bo_va->soffset / RADEON_GPU_PAGE_SIZE);
/* assume two extra pdes in case the mapping overlaps the borders */
npdes = (nptes >> RADEON_VM_BLOCK_SIZE) + 2;
/* handle cases where a bo spans several pdes */
npdes = (ALIGN(pfn + nptes, RADEON_VM_PTE_COUNT) -
(pfn & ~(RADEON_VM_PTE_COUNT - 1))) >> RADEON_VM_BLOCK_SIZE;
/* estimate number of dw needed */
/* semaphore, fence and padding */
ndw = 32;
if (RADEON_VM_BLOCK_SIZE > 11)
/* reserve space for one header for every 2k dwords */
ndw += (nptes >> 11) * 3;
else
/* reserve space for one header for
every (1 << BLOCK_SIZE) entries */
ndw += (nptes >> RADEON_VM_BLOCK_SIZE) * 3;
/* reserve space for one header for every 2k dwords */
ndw += (nptes >> 11) * 3;
/* reserve space for pte addresses */
ndw += nptes * 2;
/* reserve space for one header for every 2k dwords */
ndw += (npdes >> 11) * 3;
/* reserve space for pde addresses */
ndw += npdes * 2;
@ -971,22 +1173,14 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
radeon_fence_note_sync(vm->fence, ridx);
}
/* update page table entries */
pe = vm->pd_gpu_addr;
pe += radeon_vm_directory_size(rdev);
pe += (bo_va->soffset / RADEON_GPU_PAGE_SIZE) * 8;
r = radeon_vm_update_pdes(rdev, vm, bo_va->soffset, bo_va->eoffset);
if (r) {
radeon_ring_unlock_undo(rdev, ring);
return r;
}
radeon_asic_vm_set_page(rdev, pe, addr, nptes,
RADEON_GPU_PAGE_SIZE, bo_va->flags);
/* update page directory entries */
addr = pe;
pe = vm->pd_gpu_addr;
pe += ((bo_va->soffset / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE) * 8;
radeon_asic_vm_set_page(rdev, pe, addr, npdes,
RADEON_VM_PTE_COUNT * 8, RADEON_VM_PAGE_VALID);
radeon_vm_update_ptes(rdev, vm, bo_va->soffset, bo_va->eoffset,
addr, bo_va->flags);
radeon_fence_unref(&vm->fence);
r = radeon_fence_emit(rdev, &vm->fence, ridx);
@ -997,6 +1191,7 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
radeon_ring_unlock_commit(rdev, ring);
radeon_semaphore_free(rdev, &sem, vm->fence);
radeon_fence_unref(&vm->last_flush);
return 0;
}
@ -1056,31 +1251,15 @@ void radeon_vm_bo_invalidate(struct radeon_device *rdev,
* @rdev: radeon_device pointer
* @vm: requested vm
*
* Init @vm (cayman+).
* Map the IB pool and any other shared objects into the VM
* by default as it's used by all VMs.
* Returns 0 for success, error for failure.
* Init @vm fields (cayman+).
*/
int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
void radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
{
struct radeon_bo_va *bo_va;
int r;
vm->id = 0;
vm->fence = NULL;
vm->last_pfn = 0;
mutex_init(&vm->mutex);
INIT_LIST_HEAD(&vm->list);
INIT_LIST_HEAD(&vm->va);
/* map the ib pool buffer at 0 in virtual address space, set
* read only
*/
bo_va = radeon_vm_bo_add(rdev, vm, rdev->ring_tmp_bo.bo);
r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET,
RADEON_VM_PAGE_READABLE |
RADEON_VM_PAGE_SNOOPED);
return r;
}
/**
@ -1102,17 +1281,6 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
radeon_vm_free_pt(rdev, vm);
mutex_unlock(&rdev->vm_manager.lock);
/* remove all bo at this point non are busy any more because unbind
* waited for the last vm fence to signal
*/
r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
if (!r) {
bo_va = radeon_vm_bo_find(vm, rdev->ring_tmp_bo.bo);
list_del_init(&bo_va->bo_list);
list_del_init(&bo_va->vm_list);
radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
kfree(bo_va);
}
if (!list_empty(&vm->va)) {
dev_err(rdev->dev, "still active bo inside vm\n");
}

View File

@ -419,6 +419,7 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
/* new gpu have virtual address space support */
if (rdev->family >= CHIP_CAYMAN) {
struct radeon_fpriv *fpriv;
struct radeon_bo_va *bo_va;
int r;
fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
@ -426,7 +427,15 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
return -ENOMEM;
}
r = radeon_vm_init(rdev, &fpriv->vm);
radeon_vm_init(rdev, &fpriv->vm);
/* map the ib pool buffer read only into
* virtual address space */
bo_va = radeon_vm_bo_add(rdev, &fpriv->vm,
rdev->ring_tmp_bo.bo);
r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET,
RADEON_VM_PAGE_READABLE |
RADEON_VM_PAGE_SNOOPED);
if (r) {
radeon_vm_fini(rdev, &fpriv->vm);
kfree(fpriv);
@ -454,6 +463,17 @@ void radeon_driver_postclose_kms(struct drm_device *dev,
/* new gpu have virtual address space support */
if (rdev->family >= CHIP_CAYMAN && file_priv->driver_priv) {
struct radeon_fpriv *fpriv = file_priv->driver_priv;
struct radeon_bo_va *bo_va;
int r;
r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
if (!r) {
bo_va = radeon_vm_bo_find(&fpriv->vm,
rdev->ring_tmp_bo.bo);
if (bo_va)
radeon_vm_bo_rmv(rdev, bo_va);
radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
}
radeon_vm_fini(rdev, &fpriv->vm);
kfree(fpriv);

View File

@ -269,27 +269,6 @@ static const struct drm_encoder_helper_funcs radeon_legacy_lvds_helper_funcs = {
.disable = radeon_legacy_encoder_disable,
};
#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
static uint8_t radeon_legacy_lvds_level(struct backlight_device *bd)
{
struct radeon_backlight_privdata *pdata = bl_get_data(bd);
uint8_t level;
/* Convert brightness to hardware level */
if (bd->props.brightness < 0)
level = 0;
else if (bd->props.brightness > RADEON_MAX_BL_LEVEL)
level = RADEON_MAX_BL_LEVEL;
else
level = bd->props.brightness;
if (pdata->negative)
level = RADEON_MAX_BL_LEVEL - level;
return level;
}
u8
radeon_legacy_get_backlight_level(struct radeon_encoder *radeon_encoder)
{
@ -331,6 +310,27 @@ radeon_legacy_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 leve
radeon_legacy_lvds_update(&radeon_encoder->base, dpms_mode);
}
#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
static uint8_t radeon_legacy_lvds_level(struct backlight_device *bd)
{
struct radeon_backlight_privdata *pdata = bl_get_data(bd);
uint8_t level;
/* Convert brightness to hardware level */
if (bd->props.brightness < 0)
level = 0;
else if (bd->props.brightness > RADEON_MAX_BL_LEVEL)
level = RADEON_MAX_BL_LEVEL;
else
level = bd->props.brightness;
if (pdata->negative)
level = RADEON_MAX_BL_LEVEL - level;
return level;
}
static int radeon_legacy_backlight_update_status(struct backlight_device *bd)
{
struct radeon_backlight_privdata *pdata = bl_get_data(bd);
@ -991,11 +991,7 @@ static void radeon_legacy_tmds_ext_mode_set(struct drm_encoder *encoder,
static void radeon_ext_tmds_enc_destroy(struct drm_encoder *encoder)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_ext_tmds *tmds = radeon_encoder->enc_priv;
if (tmds) {
if (tmds->i2c_bus)
radeon_i2c_destroy(tmds->i2c_bus);
}
/* don't destroy the i2c bus record here, this will be done in radeon_i2c_fini */
kfree(radeon_encoder->enc_priv);
drm_encoder_cleanup(encoder);
kfree(radeon_encoder);

View File

@ -305,7 +305,7 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
{
#if DRM_DEBUG_CODE
if (ring->count_dw <= 0) {
DRM_ERROR("radeon: writting more dword to ring than expected !\n");
DRM_ERROR("radeon: writing more dwords to the ring than expected!\n");
}
#endif
ring->ring[ring->wptr++] = v;

View File

@ -2407,12 +2407,13 @@ static int si_pcie_gart_enable(struct radeon_device *rdev)
WREG32(0x15DC, 0);
/* empty context1-15 */
/* FIXME start with 4G, once using 2 level pt switch to full
* vm size space
*/
/* set vm size, must be a multiple of 4 */
WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn);
/* Assign the pt base to something valid for now; the pts used for
* the VMs are determined by the application and setup and assigned
* on the fly in the vm part of radeon_gart.c
*/
for (i = 1; i < 16; i++) {
if (i < 8)
WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),