drm/radeon: Pass GART page flags to radeon_gart_set_page() explicitly
Signed-off-by: Michel Dänzer <michel.daenzer@amd.com> Reviewed-by: Christian König <christian.koenig@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
a3eb06dbca
commit
77497f2735
|
@ -682,7 +682,7 @@ void r100_pci_gart_disable(struct radeon_device *rdev)
|
|||
}
|
||||
|
||||
void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i,
|
||||
uint64_t addr)
|
||||
uint64_t addr, uint32_t flags)
|
||||
{
|
||||
u32 *gtt = rdev->gart.ptr;
|
||||
gtt[i] = cpu_to_le32(lower_32_bits(addr));
|
||||
|
|
|
@ -69,17 +69,23 @@ void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev)
|
|||
mb();
|
||||
}
|
||||
|
||||
#define R300_PTE_UNSNOOPED (1 << 0)
|
||||
#define R300_PTE_WRITEABLE (1 << 2)
|
||||
#define R300_PTE_READABLE (1 << 3)
|
||||
|
||||
void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
|
||||
uint64_t addr)
|
||||
uint64_t addr, uint32_t flags)
|
||||
{
|
||||
void __iomem *ptr = rdev->gart.ptr;
|
||||
|
||||
addr = (lower_32_bits(addr) >> 8) |
|
||||
((upper_32_bits(addr) & 0xff) << 24) |
|
||||
R300_PTE_WRITEABLE | R300_PTE_READABLE;
|
||||
((upper_32_bits(addr) & 0xff) << 24);
|
||||
if (flags & RADEON_GART_PAGE_READ)
|
||||
addr |= R300_PTE_READABLE;
|
||||
if (flags & RADEON_GART_PAGE_WRITE)
|
||||
addr |= R300_PTE_WRITEABLE;
|
||||
if (!(flags & RADEON_GART_PAGE_SNOOP))
|
||||
addr |= R300_PTE_UNSNOOPED;
|
||||
/* on x86 we want this to be CPU endian, on powerpc
|
||||
* on powerpc without HW swappers, it'll get swapped on way
|
||||
* into VRAM - so no need for cpu_to_le32 on VRAM tables */
|
||||
|
|
|
@ -593,6 +593,12 @@ struct radeon_mc;
|
|||
#define RADEON_GPU_PAGE_SHIFT 12
|
||||
#define RADEON_GPU_PAGE_ALIGN(a) (((a) + RADEON_GPU_PAGE_MASK) & ~RADEON_GPU_PAGE_MASK)
|
||||
|
||||
#define RADEON_GART_PAGE_DUMMY 0
|
||||
#define RADEON_GART_PAGE_VALID (1 << 0)
|
||||
#define RADEON_GART_PAGE_READ (1 << 1)
|
||||
#define RADEON_GART_PAGE_WRITE (1 << 2)
|
||||
#define RADEON_GART_PAGE_SNOOP (1 << 3)
|
||||
|
||||
struct radeon_gart {
|
||||
dma_addr_t table_addr;
|
||||
struct radeon_bo *robj;
|
||||
|
@ -617,7 +623,7 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
|
|||
int pages);
|
||||
int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
|
||||
int pages, struct page **pagelist,
|
||||
dma_addr_t *dma_addr);
|
||||
dma_addr_t *dma_addr, uint32_t flags);
|
||||
|
||||
|
||||
/*
|
||||
|
@ -1784,7 +1790,7 @@ struct radeon_asic {
|
|||
struct {
|
||||
void (*tlb_flush)(struct radeon_device *rdev);
|
||||
void (*set_page)(struct radeon_device *rdev, unsigned i,
|
||||
uint64_t addr);
|
||||
uint64_t addr, uint32_t flags);
|
||||
} gart;
|
||||
struct {
|
||||
int (*init)(struct radeon_device *rdev);
|
||||
|
@ -2745,7 +2751,7 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
|
|||
#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state))
|
||||
#define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev))
|
||||
#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev))
|
||||
#define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart.set_page((rdev), (i), (p))
|
||||
#define radeon_gart_set_page(rdev, i, p, f) (rdev)->asic->gart.set_page((rdev), (i), (p), (f))
|
||||
#define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev))
|
||||
#define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev))
|
||||
#define radeon_asic_vm_set_page(rdev, ib, pe, addr, count, incr, flags) ((rdev)->asic->vm.set_page((rdev), (ib), (pe), (addr), (count), (incr), (flags)))
|
||||
|
|
|
@ -68,7 +68,7 @@ int r100_asic_reset(struct radeon_device *rdev);
|
|||
u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc);
|
||||
void r100_pci_gart_tlb_flush(struct radeon_device *rdev);
|
||||
void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i,
|
||||
uint64_t addr);
|
||||
uint64_t addr, uint32_t flags);
|
||||
void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring);
|
||||
int r100_irq_set(struct radeon_device *rdev);
|
||||
int r100_irq_process(struct radeon_device *rdev);
|
||||
|
@ -173,7 +173,7 @@ extern void r300_fence_ring_emit(struct radeon_device *rdev,
|
|||
extern int r300_cs_parse(struct radeon_cs_parser *p);
|
||||
extern void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev);
|
||||
extern void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
|
||||
uint64_t addr);
|
||||
uint64_t addr, uint32_t flags);
|
||||
extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes);
|
||||
extern int rv370_get_pcie_lanes(struct radeon_device *rdev);
|
||||
extern void r300_set_reg_safe(struct radeon_device *rdev);
|
||||
|
@ -209,7 +209,7 @@ extern int rs400_suspend(struct radeon_device *rdev);
|
|||
extern int rs400_resume(struct radeon_device *rdev);
|
||||
void rs400_gart_tlb_flush(struct radeon_device *rdev);
|
||||
void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
|
||||
uint64_t addr);
|
||||
uint64_t addr, uint32_t flags);
|
||||
uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg);
|
||||
void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
|
||||
int rs400_gart_init(struct radeon_device *rdev);
|
||||
|
@ -233,7 +233,7 @@ void rs600_irq_disable(struct radeon_device *rdev);
|
|||
u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc);
|
||||
void rs600_gart_tlb_flush(struct radeon_device *rdev);
|
||||
void rs600_gart_set_page(struct radeon_device *rdev, unsigned i,
|
||||
uint64_t addr);
|
||||
uint64_t addr, uint32_t flags);
|
||||
uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg);
|
||||
void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
|
||||
void rs600_bandwidth_update(struct radeon_device *rdev);
|
||||
|
|
|
@ -243,7 +243,8 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
|
|||
page_base = rdev->gart.pages_addr[p];
|
||||
for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
|
||||
if (rdev->gart.ptr) {
|
||||
radeon_gart_set_page(rdev, t, page_base);
|
||||
radeon_gart_set_page(rdev, t, page_base,
|
||||
RADEON_GART_PAGE_DUMMY);
|
||||
}
|
||||
page_base += RADEON_GPU_PAGE_SIZE;
|
||||
}
|
||||
|
@ -261,13 +262,15 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
|
|||
* @pages: number of pages to bind
|
||||
* @pagelist: pages to bind
|
||||
* @dma_addr: DMA addresses of pages
|
||||
* @flags: RADEON_GART_PAGE_* flags
|
||||
*
|
||||
* Binds the requested pages to the gart page table
|
||||
* (all asics).
|
||||
* Returns 0 for success, -EINVAL for failure.
|
||||
*/
|
||||
int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
|
||||
int pages, struct page **pagelist, dma_addr_t *dma_addr)
|
||||
int pages, struct page **pagelist, dma_addr_t *dma_addr,
|
||||
uint32_t flags)
|
||||
{
|
||||
unsigned t;
|
||||
unsigned p;
|
||||
|
@ -287,7 +290,7 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
|
|||
if (rdev->gart.ptr) {
|
||||
page_base = rdev->gart.pages_addr[p];
|
||||
for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
|
||||
radeon_gart_set_page(rdev, t, page_base);
|
||||
radeon_gart_set_page(rdev, t, page_base, flags);
|
||||
page_base += RADEON_GPU_PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -521,6 +521,8 @@ static int radeon_ttm_backend_bind(struct ttm_tt *ttm,
|
|||
struct ttm_mem_reg *bo_mem)
|
||||
{
|
||||
struct radeon_ttm_tt *gtt = (void*)ttm;
|
||||
uint32_t flags = RADEON_GART_PAGE_VALID | RADEON_GART_PAGE_READ |
|
||||
RADEON_GART_PAGE_WRITE;
|
||||
int r;
|
||||
|
||||
gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
|
||||
|
@ -528,8 +530,10 @@ static int radeon_ttm_backend_bind(struct ttm_tt *ttm,
|
|||
WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
|
||||
ttm->num_pages, bo_mem, ttm);
|
||||
}
|
||||
r = radeon_gart_bind(gtt->rdev, gtt->offset,
|
||||
ttm->num_pages, ttm->pages, gtt->ttm.dma_address);
|
||||
if (ttm->caching_state == tt_cached)
|
||||
flags |= RADEON_GART_PAGE_SNOOP;
|
||||
r = radeon_gart_bind(gtt->rdev, gtt->offset, ttm->num_pages,
|
||||
ttm->pages, gtt->ttm.dma_address, flags);
|
||||
if (r) {
|
||||
DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
|
||||
ttm->num_pages, (unsigned)gtt->offset);
|
||||
|
|
|
@ -208,17 +208,24 @@ void rs400_gart_fini(struct radeon_device *rdev)
|
|||
radeon_gart_table_ram_free(rdev);
|
||||
}
|
||||
|
||||
#define RS400_PTE_UNSNOOPED (1 << 0)
|
||||
#define RS400_PTE_WRITEABLE (1 << 2)
|
||||
#define RS400_PTE_READABLE (1 << 3)
|
||||
|
||||
void rs400_gart_set_page(struct radeon_device *rdev, unsigned i, uint64_t addr)
|
||||
void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
|
||||
uint64_t addr, uint32_t flags)
|
||||
{
|
||||
uint32_t entry;
|
||||
u32 *gtt = rdev->gart.ptr;
|
||||
|
||||
entry = (lower_32_bits(addr) & PAGE_MASK) |
|
||||
((upper_32_bits(addr) & 0xff) << 4) |
|
||||
RS400_PTE_WRITEABLE | RS400_PTE_READABLE;
|
||||
((upper_32_bits(addr) & 0xff) << 4);
|
||||
if (flags & RADEON_GART_PAGE_READ)
|
||||
addr |= RS400_PTE_READABLE;
|
||||
if (flags & RADEON_GART_PAGE_WRITE)
|
||||
addr |= RS400_PTE_WRITEABLE;
|
||||
if (!(flags & RADEON_GART_PAGE_SNOOP))
|
||||
entry |= RS400_PTE_UNSNOOPED;
|
||||
entry = cpu_to_le32(entry);
|
||||
gtt[i] = entry;
|
||||
}
|
||||
|
|
|
@ -625,15 +625,21 @@ static void rs600_gart_fini(struct radeon_device *rdev)
|
|||
radeon_gart_table_vram_free(rdev);
|
||||
}
|
||||
|
||||
void rs600_gart_set_page(struct radeon_device *rdev, unsigned i, uint64_t addr)
|
||||
void rs600_gart_set_page(struct radeon_device *rdev, unsigned i,
|
||||
uint64_t addr, uint32_t flags)
|
||||
{
|
||||
void __iomem *ptr = (void *)rdev->gart.ptr;
|
||||
|
||||
addr = addr & 0xFFFFFFFFFFFFF000ULL;
|
||||
if (addr == rdev->dummy_page.addr)
|
||||
addr |= R600_PTE_SYSTEM | R600_PTE_SNOOPED;
|
||||
else
|
||||
addr |= R600_PTE_GART;
|
||||
addr |= R600_PTE_SYSTEM;
|
||||
if (flags & RADEON_GART_PAGE_VALID)
|
||||
addr |= R600_PTE_VALID;
|
||||
if (flags & RADEON_GART_PAGE_READ)
|
||||
addr |= R600_PTE_READABLE;
|
||||
if (flags & RADEON_GART_PAGE_WRITE)
|
||||
addr |= R600_PTE_WRITEABLE;
|
||||
if (flags & RADEON_GART_PAGE_SNOOP)
|
||||
addr |= R600_PTE_SNOOPED;
|
||||
writeq(addr, ptr + (i * 8));
|
||||
}
|
||||
|
||||
|
|
|
@ -796,7 +796,9 @@ struct drm_radeon_gem_info {
|
|||
uint64_t vram_visible;
|
||||
};
|
||||
|
||||
#define RADEON_GEM_NO_BACKING_STORE 1
|
||||
#define RADEON_GEM_NO_BACKING_STORE (1 << 0)
|
||||
#define RADEON_GEM_GTT_UC (1 << 1)
|
||||
#define RADEON_GEM_GTT_WC (1 << 2)
|
||||
|
||||
struct drm_radeon_gem_create {
|
||||
uint64_t size;
|
||||
|
|
Loading…
Reference in New Issue