drm/nv50: tidy up PCIEGART implementation
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
This commit is contained in:
parent
5f6fdca570
commit
b571fe21f5
|
@ -425,7 +425,6 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
|
||||||
man->available_caching = TTM_PL_FLAG_UNCACHED |
|
man->available_caching = TTM_PL_FLAG_UNCACHED |
|
||||||
TTM_PL_FLAG_WC;
|
TTM_PL_FLAG_WC;
|
||||||
man->default_caching = TTM_PL_FLAG_WC;
|
man->default_caching = TTM_PL_FLAG_WC;
|
||||||
man->gpu_offset = 0;
|
|
||||||
break;
|
break;
|
||||||
case TTM_PL_TT:
|
case TTM_PL_TT:
|
||||||
man->func = &ttm_bo_manager_func;
|
man->func = &ttm_bo_manager_func;
|
||||||
|
@ -441,13 +440,13 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
|
||||||
TTM_MEMTYPE_FLAG_CMA;
|
TTM_MEMTYPE_FLAG_CMA;
|
||||||
man->available_caching = TTM_PL_MASK_CACHING;
|
man->available_caching = TTM_PL_MASK_CACHING;
|
||||||
man->default_caching = TTM_PL_FLAG_CACHED;
|
man->default_caching = TTM_PL_FLAG_CACHED;
|
||||||
|
man->gpu_offset = dev_priv->gart_info.aper_base;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
NV_ERROR(dev, "Unknown GART type: %d\n",
|
NV_ERROR(dev, "Unknown GART type: %d\n",
|
||||||
dev_priv->gart_info.type);
|
dev_priv->gart_info.type);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
man->gpu_offset = dev_priv->vm_gart_base;
|
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
NV_ERROR(dev, "Unsupported memory type %u\n", (unsigned)type);
|
NV_ERROR(dev, "Unsupported memory type %u\n", (unsigned)type);
|
||||||
|
@ -531,12 +530,12 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
|
||||||
if (old_mem->mem_type == TTM_PL_VRAM)
|
if (old_mem->mem_type == TTM_PL_VRAM)
|
||||||
src_offset = nvbo->vma.offset;
|
src_offset = nvbo->vma.offset;
|
||||||
else
|
else
|
||||||
src_offset += dev_priv->vm_gart_base;
|
src_offset += dev_priv->gart_info.aper_base;
|
||||||
|
|
||||||
if (new_mem->mem_type == TTM_PL_VRAM)
|
if (new_mem->mem_type == TTM_PL_VRAM)
|
||||||
dst_offset = nvbo->vma.offset;
|
dst_offset = nvbo->vma.offset;
|
||||||
else
|
else
|
||||||
dst_offset += dev_priv->vm_gart_base;
|
dst_offset += dev_priv->gart_info.aper_base;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = RING_SPACE(chan, 3);
|
ret = RING_SPACE(chan, 3);
|
||||||
|
|
|
@ -248,7 +248,6 @@ struct nouveau_channel {
|
||||||
/* NV50 VM */
|
/* NV50 VM */
|
||||||
struct nouveau_vm *vm;
|
struct nouveau_vm *vm;
|
||||||
struct nouveau_gpuobj *vm_pd;
|
struct nouveau_gpuobj *vm_pd;
|
||||||
struct nouveau_gpuobj *vm_gart_pt;
|
|
||||||
|
|
||||||
/* Objects */
|
/* Objects */
|
||||||
struct nouveau_gpuobj *ramin; /* Private instmem */
|
struct nouveau_gpuobj *ramin; /* Private instmem */
|
||||||
|
@ -684,6 +683,7 @@ struct drm_nouveau_private {
|
||||||
uint64_t aper_free;
|
uint64_t aper_free;
|
||||||
|
|
||||||
struct nouveau_gpuobj *sg_ctxdma;
|
struct nouveau_gpuobj *sg_ctxdma;
|
||||||
|
struct nouveau_vma vma;
|
||||||
} gart_info;
|
} gart_info;
|
||||||
|
|
||||||
/* nv10-nv40 tiling regions */
|
/* nv10-nv40 tiling regions */
|
||||||
|
@ -709,8 +709,6 @@ struct drm_nouveau_private {
|
||||||
|
|
||||||
/* G8x/G9x virtual address space */
|
/* G8x/G9x virtual address space */
|
||||||
struct nouveau_vm *chan_vm;
|
struct nouveau_vm *chan_vm;
|
||||||
uint64_t vm_gart_base;
|
|
||||||
uint64_t vm_gart_size;
|
|
||||||
|
|
||||||
struct nvbios vbios;
|
struct nvbios vbios;
|
||||||
|
|
||||||
|
|
|
@ -433,7 +433,7 @@ nv50_gpuobj_dma_init(struct nouveau_gpuobj *obj, u32 offset, int class,
|
||||||
flags0 |= 0x00030000;
|
flags0 |= 0x00030000;
|
||||||
break;
|
break;
|
||||||
case NV_MEM_TARGET_GART:
|
case NV_MEM_TARGET_GART:
|
||||||
base += dev_priv->vm_gart_base;
|
base += dev_priv->gart_info.aper_base;
|
||||||
default:
|
default:
|
||||||
flags0 &= ~0x00100000;
|
flags0 &= ~0x00100000;
|
||||||
break;
|
break;
|
||||||
|
@ -801,7 +801,6 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
nouveau_vm_ref(dev_priv->chan_vm, &chan->vm, chan->vm_pd);
|
nouveau_vm_ref(dev_priv->chan_vm, &chan->vm, chan->vm_pd);
|
||||||
chan->vm->map_pgt(chan->vm_pd, 12, 1, dev_priv->gart_info.sg_ctxdma);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* RAMHT */
|
/* RAMHT */
|
||||||
|
@ -889,7 +888,6 @@ nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
|
||||||
|
|
||||||
nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd);
|
nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd);
|
||||||
nouveau_gpuobj_ref(NULL, &chan->vm_pd);
|
nouveau_gpuobj_ref(NULL, &chan->vm_pd);
|
||||||
nouveau_gpuobj_ref(NULL, &chan->vm_gart_pt);
|
|
||||||
|
|
||||||
if (chan->ramin_heap.free_stack.next)
|
if (chan->ramin_heap.free_stack.next)
|
||||||
drm_mm_takedown(&chan->ramin_heap);
|
drm_mm_takedown(&chan->ramin_heap);
|
||||||
|
|
|
@ -14,7 +14,7 @@ struct nouveau_sgdma_be {
|
||||||
dma_addr_t *pages;
|
dma_addr_t *pages;
|
||||||
unsigned nr_pages;
|
unsigned nr_pages;
|
||||||
|
|
||||||
unsigned pte_start;
|
u64 offset;
|
||||||
bool bound;
|
bool bound;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -74,18 +74,6 @@ nouveau_sgdma_clear(struct ttm_backend *be)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline unsigned
|
|
||||||
nouveau_sgdma_pte(struct drm_device *dev, uint64_t offset)
|
|
||||||
{
|
|
||||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
|
||||||
unsigned pte = (offset >> NV_CTXDMA_PAGE_SHIFT);
|
|
||||||
|
|
||||||
if (dev_priv->card_type < NV_50)
|
|
||||||
return pte + 2;
|
|
||||||
|
|
||||||
return pte << 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int
|
static int
|
||||||
nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
|
nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
|
||||||
{
|
{
|
||||||
|
@ -97,32 +85,17 @@ nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
|
||||||
|
|
||||||
NV_DEBUG(dev, "pg=0x%lx\n", mem->start);
|
NV_DEBUG(dev, "pg=0x%lx\n", mem->start);
|
||||||
|
|
||||||
pte = nouveau_sgdma_pte(nvbe->dev, mem->start << PAGE_SHIFT);
|
nvbe->offset = mem->start << PAGE_SHIFT;
|
||||||
nvbe->pte_start = pte;
|
pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
|
||||||
for (i = 0; i < nvbe->nr_pages; i++) {
|
for (i = 0; i < nvbe->nr_pages; i++) {
|
||||||
dma_addr_t dma_offset = nvbe->pages[i];
|
dma_addr_t dma_offset = nvbe->pages[i];
|
||||||
uint32_t offset_l = lower_32_bits(dma_offset);
|
uint32_t offset_l = lower_32_bits(dma_offset);
|
||||||
uint32_t offset_h = upper_32_bits(dma_offset);
|
|
||||||
|
|
||||||
for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) {
|
for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) {
|
||||||
if (dev_priv->card_type < NV_50) {
|
|
||||||
nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 3);
|
nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 3);
|
||||||
pte += 1;
|
|
||||||
} else {
|
|
||||||
nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 0x21);
|
|
||||||
nv_wo32(gpuobj, (pte * 4) + 4, offset_h & 0xff);
|
|
||||||
pte += 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
dma_offset += NV_CTXDMA_PAGE_SIZE;
|
dma_offset += NV_CTXDMA_PAGE_SIZE;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
dev_priv->engine.instmem.flush(nvbe->dev);
|
|
||||||
|
|
||||||
if (dev_priv->card_type == NV_50) {
|
|
||||||
dev_priv->engine.fifo.tlb_flush(dev);
|
|
||||||
dev_priv->engine.graph.tlb_flush(dev);
|
|
||||||
}
|
|
||||||
|
|
||||||
nvbe->bound = true;
|
nvbe->bound = true;
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -142,24 +115,10 @@ nouveau_sgdma_unbind(struct ttm_backend *be)
|
||||||
if (!nvbe->bound)
|
if (!nvbe->bound)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
pte = nvbe->pte_start;
|
pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
|
||||||
for (i = 0; i < nvbe->nr_pages; i++) {
|
for (i = 0; i < nvbe->nr_pages; i++) {
|
||||||
for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) {
|
for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++)
|
||||||
if (dev_priv->card_type < NV_50) {
|
|
||||||
nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000);
|
nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000);
|
||||||
pte += 1;
|
|
||||||
} else {
|
|
||||||
nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000);
|
|
||||||
nv_wo32(gpuobj, (pte * 4) + 4, 0x00000000);
|
|
||||||
pte += 2;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
dev_priv->engine.instmem.flush(nvbe->dev);
|
|
||||||
|
|
||||||
if (dev_priv->card_type == NV_50) {
|
|
||||||
dev_priv->engine.fifo.tlb_flush(dev);
|
|
||||||
dev_priv->engine.graph.tlb_flush(dev);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
nvbe->bound = false;
|
nvbe->bound = false;
|
||||||
|
@ -182,6 +141,35 @@ nouveau_sgdma_destroy(struct ttm_backend *be)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int
|
||||||
|
nv50_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
|
||||||
|
{
|
||||||
|
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
|
||||||
|
struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
|
||||||
|
|
||||||
|
nvbe->offset = mem->start << PAGE_SHIFT;
|
||||||
|
|
||||||
|
nouveau_vm_map_sg(&dev_priv->gart_info.vma, nvbe->offset,
|
||||||
|
nvbe->nr_pages << PAGE_SHIFT, nvbe->pages);
|
||||||
|
nvbe->bound = true;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int
|
||||||
|
nv50_sgdma_unbind(struct ttm_backend *be)
|
||||||
|
{
|
||||||
|
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
|
||||||
|
struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
|
||||||
|
|
||||||
|
if (!nvbe->bound)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
nouveau_vm_unmap_at(&dev_priv->gart_info.vma, nvbe->offset,
|
||||||
|
nvbe->nr_pages << PAGE_SHIFT);
|
||||||
|
nvbe->bound = false;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static struct ttm_backend_func nouveau_sgdma_backend = {
|
static struct ttm_backend_func nouveau_sgdma_backend = {
|
||||||
.populate = nouveau_sgdma_populate,
|
.populate = nouveau_sgdma_populate,
|
||||||
.clear = nouveau_sgdma_clear,
|
.clear = nouveau_sgdma_clear,
|
||||||
|
@ -190,23 +178,30 @@ static struct ttm_backend_func nouveau_sgdma_backend = {
|
||||||
.destroy = nouveau_sgdma_destroy
|
.destroy = nouveau_sgdma_destroy
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static struct ttm_backend_func nv50_sgdma_backend = {
|
||||||
|
.populate = nouveau_sgdma_populate,
|
||||||
|
.clear = nouveau_sgdma_clear,
|
||||||
|
.bind = nv50_sgdma_bind,
|
||||||
|
.unbind = nv50_sgdma_unbind,
|
||||||
|
.destroy = nouveau_sgdma_destroy
|
||||||
|
};
|
||||||
|
|
||||||
struct ttm_backend *
|
struct ttm_backend *
|
||||||
nouveau_sgdma_init_ttm(struct drm_device *dev)
|
nouveau_sgdma_init_ttm(struct drm_device *dev)
|
||||||
{
|
{
|
||||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||||
struct nouveau_sgdma_be *nvbe;
|
struct nouveau_sgdma_be *nvbe;
|
||||||
|
|
||||||
if (!dev_priv->gart_info.sg_ctxdma)
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
|
nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
|
||||||
if (!nvbe)
|
if (!nvbe)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
nvbe->dev = dev;
|
nvbe->dev = dev;
|
||||||
|
|
||||||
|
if (dev_priv->card_type < NV_50)
|
||||||
nvbe->backend.func = &nouveau_sgdma_backend;
|
nvbe->backend.func = &nouveau_sgdma_backend;
|
||||||
|
else
|
||||||
|
nvbe->backend.func = &nv50_sgdma_backend;
|
||||||
return &nvbe->backend;
|
return &nvbe->backend;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -226,11 +221,6 @@ nouveau_sgdma_init(struct drm_device *dev)
|
||||||
|
|
||||||
obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 4;
|
obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 4;
|
||||||
obj_size += 8; /* ctxdma header */
|
obj_size += 8; /* ctxdma header */
|
||||||
} else {
|
|
||||||
/* 1 entire VM page table */
|
|
||||||
aper_size = (512 * 1024 * 1024);
|
|
||||||
obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 8;
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16,
|
ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16,
|
||||||
NVOBJ_FLAG_ZERO_ALLOC |
|
NVOBJ_FLAG_ZERO_ALLOC |
|
||||||
|
@ -240,7 +230,6 @@ nouveau_sgdma_init(struct drm_device *dev)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (dev_priv->card_type < NV_50) {
|
|
||||||
nv_wo32(gpuobj, 0, NV_CLASS_DMA_IN_MEMORY |
|
nv_wo32(gpuobj, 0, NV_CLASS_DMA_IN_MEMORY |
|
||||||
(1 << 12) /* PT present */ |
|
(1 << 12) /* PT present */ |
|
||||||
(0 << 13) /* PT *not* linear */ |
|
(0 << 13) /* PT *not* linear */ |
|
||||||
|
@ -249,18 +238,23 @@ nouveau_sgdma_init(struct drm_device *dev)
|
||||||
nv_wo32(gpuobj, 4, aper_size - 1);
|
nv_wo32(gpuobj, 4, aper_size - 1);
|
||||||
for (i = 2; i < 2 + (aper_size >> 12); i++)
|
for (i = 2; i < 2 + (aper_size >> 12); i++)
|
||||||
nv_wo32(gpuobj, i * 4, 0x00000000);
|
nv_wo32(gpuobj, i * 4, 0x00000000);
|
||||||
} else {
|
|
||||||
for (i = 0; i < obj_size; i += 8) {
|
|
||||||
nv_wo32(gpuobj, i + 0, 0x00000000);
|
|
||||||
nv_wo32(gpuobj, i + 4, 0x00000000);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
dev_priv->engine.instmem.flush(dev);
|
|
||||||
|
|
||||||
dev_priv->gart_info.type = NOUVEAU_GART_SGDMA;
|
dev_priv->gart_info.sg_ctxdma = gpuobj;
|
||||||
dev_priv->gart_info.aper_base = 0;
|
dev_priv->gart_info.aper_base = 0;
|
||||||
dev_priv->gart_info.aper_size = aper_size;
|
dev_priv->gart_info.aper_size = aper_size;
|
||||||
dev_priv->gart_info.sg_ctxdma = gpuobj;
|
} else
|
||||||
|
if (dev_priv->chan_vm) {
|
||||||
|
ret = nouveau_vm_get(dev_priv->chan_vm, 512 * 1024 * 1024,
|
||||||
|
12, NV_MEM_ACCESS_RW,
|
||||||
|
&dev_priv->gart_info.vma);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
dev_priv->gart_info.aper_base = dev_priv->gart_info.vma.offset;
|
||||||
|
dev_priv->gart_info.aper_size = 512 * 1024 * 1024;
|
||||||
|
}
|
||||||
|
|
||||||
|
dev_priv->gart_info.type = NOUVEAU_GART_SGDMA;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -270,6 +264,7 @@ nouveau_sgdma_takedown(struct drm_device *dev)
|
||||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||||
|
|
||||||
nouveau_gpuobj_ref(NULL, &dev_priv->gart_info.sg_ctxdma);
|
nouveau_gpuobj_ref(NULL, &dev_priv->gart_info.sg_ctxdma);
|
||||||
|
nouveau_vm_put(&dev_priv->gart_info.vma);
|
||||||
}
|
}
|
||||||
|
|
||||||
int
|
int
|
||||||
|
|
|
@ -131,7 +131,6 @@ nv50_instmem_init(struct drm_device *dev)
|
||||||
struct nouveau_channel *chan;
|
struct nouveau_channel *chan;
|
||||||
struct nouveau_vm *vm;
|
struct nouveau_vm *vm;
|
||||||
int ret, i;
|
int ret, i;
|
||||||
u64 nongart_o;
|
|
||||||
u32 tmp;
|
u32 tmp;
|
||||||
|
|
||||||
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
|
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
|
||||||
|
@ -216,15 +215,10 @@ nv50_instmem_init(struct drm_device *dev)
|
||||||
for (i = 0; i < 8; i++)
|
for (i = 0; i < 8; i++)
|
||||||
nv_wr32(dev, 0x1900 + (i*4), 0);
|
nv_wr32(dev, 0x1900 + (i*4), 0);
|
||||||
|
|
||||||
/* Create shared channel VM, space is reserved for GART mappings at
|
/* Create shared channel VM, space is reserved at the beginning
|
||||||
* the beginning of this address space, it's managed separately
|
* to catch "NULL pointer" references
|
||||||
* because TTM makes life painful
|
|
||||||
*/
|
*/
|
||||||
dev_priv->vm_gart_base = 0x0020000000ULL;
|
ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0020000000ULL,
|
||||||
dev_priv->vm_gart_size = 512 * 1024 * 1024;
|
|
||||||
nongart_o = dev_priv->vm_gart_base + dev_priv->vm_gart_size;
|
|
||||||
|
|
||||||
ret = nouveau_vm_new(dev, 0, (1ULL << 40), nongart_o,
|
|
||||||
29, 12, 16, &dev_priv->chan_vm);
|
29, 12, 16, &dev_priv->chan_vm);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
Loading…
Reference in New Issue