drm/nouveau/fb: cosmetic changes

This is purely preparation for upcoming commits, there should be no
code changes here.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
This commit is contained in:
Ben Skeggs 2015-08-20 14:54:06 +10:00
parent 266f8b5ee6
commit b1e4553cb1
51 changed files with 649 additions and 663 deletions

View File

@ -46,7 +46,7 @@ struct nvkm_fb_tile {
};
struct nvkm_fb {
struct nvkm_subdev base;
struct nvkm_subdev subdev;
bool (*memtype_valid)(struct nvkm_fb *, u32 memtype);

View File

@ -48,23 +48,23 @@ nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg,
{
struct nouveau_drm *drm = nouveau_drm(dev);
int i = reg - drm->tile.reg;
struct nvkm_fb *pfb = nvxx_fb(&drm->device);
struct nvkm_fb_tile *tile = &pfb->tile.region[i];
struct nvkm_fb *fb = nvxx_fb(&drm->device);
struct nvkm_fb_tile *tile = &fb->tile.region[i];
struct nvkm_engine *engine;
nouveau_fence_unref(&reg->fence);
if (tile->pitch)
pfb->tile.fini(pfb, i, tile);
fb->tile.fini(fb, i, tile);
if (pitch)
pfb->tile.init(pfb, i, addr, size, pitch, flags, tile);
fb->tile.init(fb, i, addr, size, pitch, flags, tile);
pfb->tile.prog(pfb, i, tile);
fb->tile.prog(fb, i, tile);
if ((engine = nvkm_engine(pfb, NVDEV_ENGINE_GR)))
if ((engine = nvkm_engine(fb, NVDEV_ENGINE_GR)))
engine->tile_prog(engine, i);
if ((engine = nvkm_engine(pfb, NVDEV_ENGINE_MPEG)))
if ((engine = nvkm_engine(fb, NVDEV_ENGINE_MPEG)))
engine->tile_prog(engine, i);
}
@ -105,18 +105,18 @@ nv10_bo_set_tiling(struct drm_device *dev, u32 addr,
u32 size, u32 pitch, u32 flags)
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvkm_fb *pfb = nvxx_fb(&drm->device);
struct nvkm_fb *fb = nvxx_fb(&drm->device);
struct nouveau_drm_tile *tile, *found = NULL;
int i;
for (i = 0; i < pfb->tile.regions; i++) {
for (i = 0; i < fb->tile.regions; i++) {
tile = nv10_bo_get_tile_region(dev, i);
if (pitch && !found) {
found = tile;
continue;
} else if (tile && pfb->tile.region[i].pitch) {
} else if (tile && fb->tile.region[i].pitch) {
/* Kill an unused tile region. */
nv10_bo_update_tile_region(dev, tile, 0, 0, 0, 0);
}

View File

@ -254,12 +254,12 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_cli *cli = nouveau_cli(file_priv);
struct nvkm_fb *pfb = nvxx_fb(&drm->device);
struct nvkm_fb *fb = nvxx_fb(&drm->device);
struct drm_nouveau_gem_new *req = data;
struct nouveau_bo *nvbo = NULL;
int ret = 0;
if (!pfb->memtype_valid(pfb, req->info.tile_flags)) {
if (!fb->memtype_valid(fb, req->info.tile_flags)) {
NV_PRINTK(error, cli, "bad page flags: 0x%08x\n", req->info.tile_flags);
return -EINVAL;
}

View File

@ -33,8 +33,8 @@ static int
nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
{
struct nouveau_drm *drm = nouveau_bdev(man->bdev);
struct nvkm_fb *pfb = nvxx_fb(&drm->device);
man->priv = pfb;
struct nvkm_fb *fb = nvxx_fb(&drm->device);
man->priv = fb;
return 0;
}
@ -64,9 +64,9 @@ nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
struct ttm_mem_reg *mem)
{
struct nouveau_drm *drm = nouveau_bdev(man->bdev);
struct nvkm_fb *pfb = nvxx_fb(&drm->device);
struct nvkm_fb *fb = nvxx_fb(&drm->device);
nvkm_mem_node_cleanup(mem->mm_node);
pfb->ram->put(pfb, (struct nvkm_mem **)&mem->mm_node);
fb->ram->put(fb, (struct nvkm_mem **)&mem->mm_node);
}
static int
@ -76,7 +76,7 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
struct ttm_mem_reg *mem)
{
struct nouveau_drm *drm = nouveau_bdev(man->bdev);
struct nvkm_fb *pfb = nvxx_fb(&drm->device);
struct nvkm_fb *fb = nvxx_fb(&drm->device);
struct nouveau_bo *nvbo = nouveau_bo(bo);
struct nvkm_mem *node;
u32 size_nc = 0;
@ -88,7 +88,7 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG)
size_nc = 1 << nvbo->page_shift;
ret = pfb->ram->get(pfb, mem->num_pages << PAGE_SHIFT,
ret = fb->ram->get(fb, mem->num_pages << PAGE_SHIFT,
mem->page_alignment << PAGE_SHIFT, size_nc,
(nvbo->tile_flags >> 8) & 0x3ff, &node);
if (ret) {
@ -106,12 +106,12 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
static void
nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
{
struct nvkm_fb *pfb = man->priv;
struct nvkm_mm *mm = &pfb->vram;
struct nvkm_fb *fb = man->priv;
struct nvkm_mm *mm = &fb->vram;
struct nvkm_mm_node *r;
u32 total = 0, free = 0;
mutex_lock(&nv_subdev(pfb)->mutex);
mutex_lock(&nv_subdev(fb)->mutex);
list_for_each_entry(r, &mm->nodes, nl_entry) {
printk(KERN_DEBUG "%s %d: 0x%010llx 0x%010llx\n",
prefix, r->type, ((u64)r->offset << 12),
@ -121,7 +121,7 @@ nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
if (!r->type)
free += r->length;
}
mutex_unlock(&nv_subdev(pfb)->mutex);
mutex_unlock(&nv_subdev(fb)->mutex);
printk(KERN_DEBUG "%s total: 0x%010llx free: 0x%010llx\n",
prefix, (u64)total << 12, (u64)free << 12);

View File

@ -80,7 +80,7 @@ static int
nvkm_devobj_info(struct nvkm_object *object, void *data, u32 size)
{
struct nvkm_device *device = nv_device(object);
struct nvkm_fb *pfb = nvkm_fb(device);
struct nvkm_fb *fb = nvkm_fb(device);
struct nvkm_instmem *imem = nvkm_instmem(device);
union {
struct nv_device_info_v0 v0;
@ -139,8 +139,8 @@ nvkm_devobj_info(struct nvkm_object *object, void *data, u32 size)
args->v0.chipset = device->chipset;
args->v0.revision = device->chiprev;
if (pfb && pfb->ram)
args->v0.ram_size = args->v0.ram_user = pfb->ram->size;
if (fb && fb->ram)
args->v0.ram_size = args->v0.ram_user = fb->ram->size;
else
args->v0.ram_size = args->v0.ram_user = 0;
if (imem && args->v0.ram_size > 0)

View File

@ -64,7 +64,7 @@ nvkm_dmaobj_create_(struct nvkm_object *parent,
struct nvkm_instmem *instmem = nvkm_instmem(parent);
struct nvkm_client *client = nvkm_client(parent);
struct nvkm_device *device = nv_device(parent);
struct nvkm_fb *pfb = nvkm_fb(parent);
struct nvkm_fb *fb = nvkm_fb(parent);
struct nvkm_dmaobj *dmaobj;
void *data = *pdata;
u32 size = *psize;
@ -100,7 +100,7 @@ nvkm_dmaobj_create_(struct nvkm_object *parent,
break;
case NV_DMA_V0_TARGET_VRAM:
if (!client->super) {
if (dmaobj->limit >= pfb->ram->size - instmem->reserved)
if (dmaobj->limit >= fb->ram->size - instmem->reserved)
return -EACCES;
if (device->card_type >= NV_50)
return -EACCES;

View File

@ -295,7 +295,7 @@ static int
nv40_fifo_init(struct nvkm_object *object)
{
struct nv04_fifo_priv *priv = (void *)object;
struct nvkm_fb *pfb = nvkm_fb(object);
struct nvkm_fb *fb = nvkm_fb(object);
int ret;
ret = nvkm_fifo_init(&priv->base);
@ -326,7 +326,7 @@ nv40_fifo_init(struct nvkm_object *object)
break;
default:
nv_wr32(priv, 0x002230, 0x00000000);
nv_wr32(priv, 0x002220, ((pfb->ram->size - 512 * 1024 +
nv_wr32(priv, 0x002220, ((fb->ram->size - 512 * 1024 +
priv->ramfc->addr) >> 16) |
0x00030000);
break;

View File

@ -1249,7 +1249,7 @@ static int
nv10_gr_init(struct nvkm_object *object)
{
struct nvkm_engine *engine = nv_engine(object);
struct nvkm_fb *pfb = nvkm_fb(object);
struct nvkm_fb *fb = nvkm_fb(object);
struct nv10_gr_priv *priv = (void *)engine;
int ret, i;
@ -1279,7 +1279,7 @@ nv10_gr_init(struct nvkm_object *object)
}
/* Turn all the tiling regions off. */
for (i = 0; i < pfb->tile.regions; i++)
for (i = 0; i < fb->tile.regions; i++)
engine->tile_prog(engine, i);
nv_wr32(priv, NV10_PGRAPH_CTX_SWITCH(0), 0x00000000);

View File

@ -271,7 +271,7 @@ nv20_gr_init(struct nvkm_object *object)
{
struct nvkm_engine *engine = nv_engine(object);
struct nv20_gr_priv *priv = (void *)engine;
struct nvkm_fb *pfb = nvkm_fb(object);
struct nvkm_fb *fb = nvkm_fb(object);
u32 tmp, vramsz;
int ret, i;
@ -324,7 +324,7 @@ nv20_gr_init(struct nvkm_object *object)
}
/* Turn all the tiling regions off. */
for (i = 0; i < pfb->tile.regions; i++)
for (i = 0; i < fb->tile.regions; i++)
engine->tile_prog(engine, i);
nv_wr32(priv, 0x4009a0, nv_rd32(priv, 0x100324));

View File

@ -153,7 +153,7 @@ nv30_gr_init(struct nvkm_object *object)
{
struct nvkm_engine *engine = nv_engine(object);
struct nv20_gr_priv *priv = (void *)engine;
struct nvkm_fb *pfb = nvkm_fb(object);
struct nvkm_fb *fb = nvkm_fb(object);
int ret, i;
ret = nvkm_gr_init(&priv->base);
@ -198,7 +198,7 @@ nv30_gr_init(struct nvkm_object *object)
nv_wr32(priv, 0x4000c0, 0x00000016);
/* Turn all the tiling regions off. */
for (i = 0; i < pfb->tile.regions; i++)
for (i = 0; i < fb->tile.regions; i++)
engine->tile_prog(engine, i);
nv_wr32(priv, NV10_PGRAPH_CTX_CONTROL, 0x10000100);

View File

@ -366,7 +366,7 @@ static int
nv40_gr_init(struct nvkm_object *object)
{
struct nvkm_engine *engine = nv_engine(object);
struct nvkm_fb *pfb = nvkm_fb(object);
struct nvkm_fb *fb = nvkm_fb(object);
struct nv40_gr_priv *priv = (void *)engine;
int ret, i, j;
u32 vramsz;
@ -470,7 +470,7 @@ nv40_gr_init(struct nvkm_object *object)
}
/* Turn all the tiling regions off. */
for (i = 0; i < pfb->tile.regions; i++)
for (i = 0; i < fb->tile.regions; i++)
engine->tile_prog(engine, i);
/* begin RAM config */

View File

@ -260,7 +260,7 @@ nv31_mpeg_init(struct nvkm_object *object)
{
struct nvkm_engine *engine = nv_engine(object);
struct nv31_mpeg_priv *priv = (void *)object;
struct nvkm_fb *pfb = nvkm_fb(object);
struct nvkm_fb *fb = nvkm_fb(object);
int ret, i;
ret = nvkm_mpeg_init(&priv->base);
@ -271,7 +271,7 @@ nv31_mpeg_init(struct nvkm_object *object)
nv_wr32(priv, 0x00b0e0, 0x00000020); /* nvidia: rd 0x01, wr 0x20 */
nv_wr32(priv, 0x00b0e8, 0x00000020); /* nvidia: rd 0x01, wr 0x20 */
for (i = 0; i < pfb->tile.regions; i++)
for (i = 0; i < fb->tile.regions; i++)
engine->tile_prog(engine, i);
/* PMPEG init */

View File

@ -171,7 +171,7 @@ nvkm_cstate_new(struct nvkm_clk *clk, int idx, struct nvkm_pstate *pstate)
static int
nvkm_pstate_prog(struct nvkm_clk *clk, int pstatei)
{
struct nvkm_fb *pfb = nvkm_fb(clk);
struct nvkm_fb *fb = nvkm_fb(clk);
struct nvkm_pstate *pstate;
int ret, idx = 0;
@ -183,14 +183,14 @@ nvkm_pstate_prog(struct nvkm_clk *clk, int pstatei)
nv_debug(clk, "setting performance state %d\n", pstatei);
clk->pstate = pstatei;
if (pfb->ram && pfb->ram->calc) {
if (fb->ram && fb->ram->calc) {
int khz = pstate->base.domain[nv_clk_src_mem];
do {
ret = pfb->ram->calc(pfb, khz);
ret = fb->ram->calc(fb, khz);
if (ret == 0)
ret = pfb->ram->prog(pfb);
ret = fb->ram->prog(fb);
} while (ret > 0);
pfb->ram->tidy(pfb);
fb->ram->tidy(fb);
}
return nvkm_cstate_prog(clk, pstate, 0);

View File

@ -52,36 +52,36 @@ nvkm_fb_bios_memtype(struct nvkm_bios *bios)
int
_nvkm_fb_fini(struct nvkm_object *object, bool suspend)
{
struct nvkm_fb *pfb = (void *)object;
struct nvkm_fb *fb = (void *)object;
int ret;
if (pfb->ram) {
ret = nv_ofuncs(pfb->ram)->fini(nv_object(pfb->ram), suspend);
if (fb->ram) {
ret = nv_ofuncs(fb->ram)->fini(nv_object(fb->ram), suspend);
if (ret && suspend)
return ret;
}
return nvkm_subdev_fini(&pfb->base, suspend);
return nvkm_subdev_fini(&fb->subdev, suspend);
}
int
_nvkm_fb_init(struct nvkm_object *object)
{
struct nvkm_fb *pfb = (void *)object;
struct nvkm_fb *fb = (void *)object;
int ret, i;
ret = nvkm_subdev_init(&pfb->base);
ret = nvkm_subdev_init(&fb->subdev);
if (ret)
return ret;
if (pfb->ram) {
ret = nv_ofuncs(pfb->ram)->init(nv_object(pfb->ram));
if (fb->ram) {
ret = nv_ofuncs(fb->ram)->init(nv_object(fb->ram));
if (ret)
return ret;
}
for (i = 0; i < pfb->tile.regions; i++)
pfb->tile.prog(pfb, i, &pfb->tile.region[i]);
for (i = 0; i < fb->tile.regions; i++)
fb->tile.prog(fb, i, &fb->tile.region[i]);
return 0;
}
@ -89,19 +89,19 @@ _nvkm_fb_init(struct nvkm_object *object)
void
_nvkm_fb_dtor(struct nvkm_object *object)
{
struct nvkm_fb *pfb = (void *)object;
struct nvkm_fb *fb = (void *)object;
int i;
for (i = 0; i < pfb->tile.regions; i++)
pfb->tile.fini(pfb, i, &pfb->tile.region[i]);
nvkm_mm_fini(&pfb->tags);
for (i = 0; i < fb->tile.regions; i++)
fb->tile.fini(fb, i, &fb->tile.region[i]);
nvkm_mm_fini(&fb->tags);
if (pfb->ram) {
nvkm_mm_fini(&pfb->vram);
nvkm_object_ref(NULL, (struct nvkm_object **)&pfb->ram);
if (fb->ram) {
nvkm_mm_fini(&fb->vram);
nvkm_object_ref(NULL, (struct nvkm_object **)&fb->ram);
}
nvkm_subdev_destroy(&pfb->base);
nvkm_subdev_destroy(&fb->subdev);
}
int
@ -123,43 +123,43 @@ nvkm_fb_create_(struct nvkm_object *parent, struct nvkm_object *engine,
[NV_MEM_TYPE_GDDR5 ] = "GDDR5",
};
struct nvkm_object *ram;
struct nvkm_fb *pfb;
struct nvkm_fb *fb;
int ret;
ret = nvkm_subdev_create_(parent, engine, oclass, 0, "PFB", "fb",
length, pobject);
pfb = *pobject;
fb = *pobject;
if (ret)
return ret;
pfb->memtype_valid = impl->memtype;
fb->memtype_valid = impl->memtype;
if (!impl->ram)
return 0;
ret = nvkm_object_ctor(nv_object(pfb), NULL, impl->ram, NULL, 0, &ram);
ret = nvkm_object_ctor(nv_object(fb), NULL, impl->ram, NULL, 0, &ram);
if (ret) {
nv_fatal(pfb, "error detecting memory configuration!!\n");
nv_fatal(fb, "error detecting memory configuration!!\n");
return ret;
}
pfb->ram = (void *)ram;
fb->ram = (void *)ram;
if (!nvkm_mm_initialised(&pfb->vram)) {
ret = nvkm_mm_init(&pfb->vram, 0, pfb->ram->size >> 12, 1);
if (!nvkm_mm_initialised(&fb->vram)) {
ret = nvkm_mm_init(&fb->vram, 0, fb->ram->size >> 12, 1);
if (ret)
return ret;
}
if (!nvkm_mm_initialised(&pfb->tags)) {
ret = nvkm_mm_init(&pfb->tags, 0, pfb->ram->tags ?
++pfb->ram->tags : 0, 1);
if (!nvkm_mm_initialised(&fb->tags)) {
ret = nvkm_mm_init(&fb->tags, 0, fb->ram->tags ?
++fb->ram->tags : 0, 1);
if (ret)
return ret;
}
nv_info(pfb, "RAM type: %s\n", name[pfb->ram->type]);
nv_info(pfb, "RAM size: %d MiB\n", (int)(pfb->ram->size >> 20));
nv_info(pfb, " ZCOMP: %d tags\n", pfb->ram->tags);
nv_info(fb, "RAM type: %s\n", name[fb->ram->type]);
nv_info(fb, "RAM size: %d MiB\n", (int)(fb->ram->size >> 20));
nv_info(fb, " ZCOMP: %d tags\n", fb->ram->tags);
return 0;
}

View File

@ -26,7 +26,7 @@
extern const u8 gf100_pte_storage_type_map[256];
bool
gf100_fb_memtype_valid(struct nvkm_fb *pfb, u32 tile_flags)
gf100_fb_memtype_valid(struct nvkm_fb *fb, u32 tile_flags)
{
u8 memtype = (tile_flags & 0x0000ff00) >> 8;
return likely((gf100_pte_storage_type_map[memtype] != 0xff));
@ -35,32 +35,28 @@ gf100_fb_memtype_valid(struct nvkm_fb *pfb, u32 tile_flags)
static void
gf100_fb_intr(struct nvkm_subdev *subdev)
{
struct gf100_fb_priv *priv = (void *)subdev;
u32 intr = nv_rd32(priv, 0x000100);
if (intr & 0x08000000) {
nv_debug(priv, "PFFB intr\n");
intr &= ~0x08000000;
}
if (intr & 0x00002000) {
nv_debug(priv, "PBFB intr\n");
intr &= ~0x00002000;
}
struct gf100_fb *fb = (void *)subdev;
u32 intr = nv_rd32(fb, 0x000100);
if (intr & 0x08000000)
nv_debug(fb, "PFFB intr\n");
if (intr & 0x00002000)
nv_debug(fb, "PBFB intr\n");
}
int
gf100_fb_init(struct nvkm_object *object)
{
struct gf100_fb_priv *priv = (void *)object;
struct gf100_fb *fb = (void *)object;
int ret;
ret = nvkm_fb_init(&priv->base);
ret = nvkm_fb_init(&fb->base);
if (ret)
return ret;
if (priv->r100c10_page)
nv_wr32(priv, 0x100c10, priv->r100c10 >> 8);
if (fb->r100c10_page)
nv_wr32(fb, 0x100c10, fb->r100c10 >> 8);
nv_mask(priv, 0x100c80, 0x00000001, 0x00000000); /* 128KiB lpg */
nv_mask(fb, 0x100c80, 0x00000001, 0x00000000); /* 128KiB lpg */
return 0;
}
@ -68,15 +64,15 @@ void
gf100_fb_dtor(struct nvkm_object *object)
{
struct nvkm_device *device = nv_device(object);
struct gf100_fb_priv *priv = (void *)object;
struct gf100_fb *fb = (void *)object;
if (priv->r100c10_page) {
dma_unmap_page(nv_device_base(device), priv->r100c10, PAGE_SIZE,
if (fb->r100c10_page) {
dma_unmap_page(nv_device_base(device), fb->r100c10, PAGE_SIZE,
DMA_BIDIRECTIONAL);
__free_page(priv->r100c10_page);
__free_page(fb->r100c10_page);
}
nvkm_fb_destroy(&priv->base);
nvkm_fb_destroy(&fb->base);
}
int
@ -85,24 +81,24 @@ gf100_fb_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_object **pobject)
{
struct nvkm_device *device = nv_device(parent);
struct gf100_fb_priv *priv;
struct gf100_fb *fb;
int ret;
ret = nvkm_fb_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
ret = nvkm_fb_create(parent, engine, oclass, &fb);
*pobject = nv_object(fb);
if (ret)
return ret;
priv->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (priv->r100c10_page) {
priv->r100c10 = dma_map_page(nv_device_base(device),
priv->r100c10_page, 0, PAGE_SIZE,
fb->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (fb->r100c10_page) {
fb->r100c10 = dma_map_page(nv_device_base(device),
fb->r100c10_page, 0, PAGE_SIZE,
DMA_BIDIRECTIONAL);
if (dma_mapping_error(nv_device_base(device), priv->r100c10))
if (dma_mapping_error(nv_device_base(device), fb->r100c10))
return -EFAULT;
}
nv_subdev(priv)->intr = gf100_fb_intr;
nv_subdev(fb)->intr = gf100_fb_intr;
return 0;
}

View File

@ -3,7 +3,7 @@
#include "priv.h"
#include "nv50.h"
struct gf100_fb_priv {
struct gf100_fb {
struct nvkm_fb base;
struct page *r100c10_page;
dma_addr_t r100c10;

View File

@ -21,21 +21,17 @@
*/
#include "gf100.h"
struct gk20a_fb_priv {
struct nvkm_fb base;
};
static int
gk20a_fb_init(struct nvkm_object *object)
{
struct gk20a_fb_priv *priv = (void *)object;
struct nvkm_fb *fb = (void *)object;
int ret;
ret = nvkm_fb_init(&priv->base);
ret = nvkm_fb_init(fb);
if (ret)
return ret;
nv_mask(priv, 0x100c80, 0x00000001, 0x00000000); /* 128KiB lpg */
nv_mask(fb, 0x100c80, 0x00000001, 0x00000000); /* 128KiB lpg */
return 0;
}
@ -44,11 +40,11 @@ gk20a_fb_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject)
{
struct gk20a_fb_priv *priv;
struct nvkm_fb *fb;
int ret;
ret = nvkm_fb_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
ret = nvkm_fb_create(parent, engine, oclass, &fb);
*pobject = nv_object(fb);
if (ret)
return ret;

View File

@ -25,7 +25,7 @@
#include "regsnv04.h"
bool
nv04_fb_memtype_valid(struct nvkm_fb *pfb, u32 tile_flags)
nv04_fb_memtype_valid(struct nvkm_fb *fb, u32 tile_flags)
{
if (!(tile_flags & 0xff00))
return true;
@ -36,10 +36,10 @@ nv04_fb_memtype_valid(struct nvkm_fb *pfb, u32 tile_flags)
static int
nv04_fb_init(struct nvkm_object *object)
{
struct nv04_fb_priv *priv = (void *)object;
struct nvkm_fb *fb = (void *)object;
int ret;
ret = nvkm_fb_init(&priv->base);
ret = nvkm_fb_init(fb);
if (ret)
return ret;
@ -47,7 +47,7 @@ nv04_fb_init(struct nvkm_object *object)
* nvidia reading PFB_CFG_0, then writing back its original value.
* (which was 0x701114 in this case)
*/
nv_wr32(priv, NV04_PFB_CFG0, 0x1114);
nv_wr32(fb, NV04_PFB_CFG0, 0x1114);
return 0;
}
@ -57,19 +57,19 @@ nv04_fb_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_object **pobject)
{
struct nv04_fb_impl *impl = (void *)oclass;
struct nv04_fb_priv *priv;
struct nvkm_fb *fb;
int ret;
ret = nvkm_fb_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
ret = nvkm_fb_create(parent, engine, oclass, &fb);
*pobject = nv_object(fb);
if (ret)
return ret;
priv->base.tile.regions = impl->tile.regions;
priv->base.tile.init = impl->tile.init;
priv->base.tile.comp = impl->tile.comp;
priv->base.tile.fini = impl->tile.fini;
priv->base.tile.prog = impl->tile.prog;
fb->tile.regions = impl->tile.regions;
fb->tile.init = impl->tile.init;
fb->tile.comp = impl->tile.comp;
fb->tile.fini = impl->tile.fini;
fb->tile.prog = impl->tile.prog;
return 0;
}

View File

@ -2,10 +2,6 @@
#define __NVKM_FB_NV04_H__
#include "priv.h"
struct nv04_fb_priv {
struct nvkm_fb base;
};
int nv04_fb_ctor(struct nvkm_object *, struct nvkm_object *,
struct nvkm_oclass *, void *, u32,
struct nvkm_object **);

View File

@ -26,7 +26,7 @@
#include "nv04.h"
void
nv10_fb_tile_init(struct nvkm_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
nv10_fb_tile_init(struct nvkm_fb *fb, int i, u32 addr, u32 size, u32 pitch,
u32 flags, struct nvkm_fb_tile *tile)
{
tile->addr = 0x80000000 | addr;
@ -35,7 +35,7 @@ nv10_fb_tile_init(struct nvkm_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
}
void
nv10_fb_tile_fini(struct nvkm_fb *pfb, int i, struct nvkm_fb_tile *tile)
nv10_fb_tile_fini(struct nvkm_fb *fb, int i, struct nvkm_fb_tile *tile)
{
tile->addr = 0;
tile->limit = 0;
@ -44,12 +44,12 @@ nv10_fb_tile_fini(struct nvkm_fb *pfb, int i, struct nvkm_fb_tile *tile)
}
void
nv10_fb_tile_prog(struct nvkm_fb *pfb, int i, struct nvkm_fb_tile *tile)
nv10_fb_tile_prog(struct nvkm_fb *fb, int i, struct nvkm_fb_tile *tile)
{
nv_wr32(pfb, 0x100244 + (i * 0x10), tile->limit);
nv_wr32(pfb, 0x100248 + (i * 0x10), tile->pitch);
nv_wr32(pfb, 0x100240 + (i * 0x10), tile->addr);
nv_rd32(pfb, 0x100240 + (i * 0x10));
nv_wr32(fb, 0x100244 + (i * 0x10), tile->limit);
nv_wr32(fb, 0x100248 + (i * 0x10), tile->pitch);
nv_wr32(fb, 0x100240 + (i * 0x10), tile->addr);
nv_rd32(fb, 0x100240 + (i * 0x10));
}
struct nvkm_oclass *

View File

@ -26,25 +26,25 @@
#include "nv04.h"
void
nv20_fb_tile_init(struct nvkm_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
nv20_fb_tile_init(struct nvkm_fb *fb, int i, u32 addr, u32 size, u32 pitch,
u32 flags, struct nvkm_fb_tile *tile)
{
tile->addr = 0x00000001 | addr;
tile->limit = max(1u, addr + size) - 1;
tile->pitch = pitch;
if (flags & 4) {
pfb->tile.comp(pfb, i, size, flags, tile);
fb->tile.comp(fb, i, size, flags, tile);
tile->addr |= 2;
}
}
static void
nv20_fb_tile_comp(struct nvkm_fb *pfb, int i, u32 size, u32 flags,
nv20_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
struct nvkm_fb_tile *tile)
{
u32 tiles = DIV_ROUND_UP(size, 0x40);
u32 tags = round_up(tiles / pfb->ram->parts, 0x40);
if (!nvkm_mm_head(&pfb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
u32 tags = round_up(tiles / fb->ram->parts, 0x40);
if (!nvkm_mm_head(&fb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
if (!(flags & 2)) tile->zcomp = 0x00000000; /* Z16 */
else tile->zcomp = 0x04000000; /* Z24S8 */
tile->zcomp |= tile->tag->offset;
@ -56,23 +56,23 @@ nv20_fb_tile_comp(struct nvkm_fb *pfb, int i, u32 size, u32 flags,
}
void
nv20_fb_tile_fini(struct nvkm_fb *pfb, int i, struct nvkm_fb_tile *tile)
nv20_fb_tile_fini(struct nvkm_fb *fb, int i, struct nvkm_fb_tile *tile)
{
tile->addr = 0;
tile->limit = 0;
tile->pitch = 0;
tile->zcomp = 0;
nvkm_mm_free(&pfb->tags, &tile->tag);
nvkm_mm_free(&fb->tags, &tile->tag);
}
void
nv20_fb_tile_prog(struct nvkm_fb *pfb, int i, struct nvkm_fb_tile *tile)
nv20_fb_tile_prog(struct nvkm_fb *fb, int i, struct nvkm_fb_tile *tile)
{
nv_wr32(pfb, 0x100244 + (i * 0x10), tile->limit);
nv_wr32(pfb, 0x100248 + (i * 0x10), tile->pitch);
nv_wr32(pfb, 0x100240 + (i * 0x10), tile->addr);
nv_rd32(pfb, 0x100240 + (i * 0x10));
nv_wr32(pfb, 0x100300 + (i * 0x04), tile->zcomp);
nv_wr32(fb, 0x100244 + (i * 0x10), tile->limit);
nv_wr32(fb, 0x100248 + (i * 0x10), tile->pitch);
nv_wr32(fb, 0x100240 + (i * 0x10), tile->addr);
nv_rd32(fb, 0x100240 + (i * 0x10));
nv_wr32(fb, 0x100300 + (i * 0x04), tile->zcomp);
}
struct nvkm_oclass *

View File

@ -26,12 +26,12 @@
#include "nv04.h"
static void
nv25_fb_tile_comp(struct nvkm_fb *pfb, int i, u32 size, u32 flags,
nv25_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
struct nvkm_fb_tile *tile)
{
u32 tiles = DIV_ROUND_UP(size, 0x40);
u32 tags = round_up(tiles / pfb->ram->parts, 0x40);
if (!nvkm_mm_head(&pfb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
u32 tags = round_up(tiles / fb->ram->parts, 0x40);
if (!nvkm_mm_head(&fb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
if (!(flags & 2)) tile->zcomp = 0x00100000; /* Z16 */
else tile->zcomp = 0x00200000; /* Z24S8 */
tile->zcomp |= tile->tag->offset;

View File

@ -26,15 +26,15 @@
#include "nv04.h"
void
nv30_fb_tile_init(struct nvkm_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
nv30_fb_tile_init(struct nvkm_fb *fb, int i, u32 addr, u32 size, u32 pitch,
u32 flags, struct nvkm_fb_tile *tile)
{
/* for performance, select alternate bank offset for zeta */
if (!(flags & 4)) {
tile->addr = (0 << 4);
} else {
if (pfb->tile.comp) /* z compression */
pfb->tile.comp(pfb, i, size, flags, tile);
if (fb->tile.comp) /* z compression */
fb->tile.comp(fb, i, size, flags, tile);
tile->addr = (1 << 4);
}
@ -45,12 +45,12 @@ nv30_fb_tile_init(struct nvkm_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
}
static void
nv30_fb_tile_comp(struct nvkm_fb *pfb, int i, u32 size, u32 flags,
nv30_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
struct nvkm_fb_tile *tile)
{
u32 tiles = DIV_ROUND_UP(size, 0x40);
u32 tags = round_up(tiles / pfb->ram->parts, 0x40);
if (!nvkm_mm_head(&pfb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
u32 tags = round_up(tiles / fb->ram->parts, 0x40);
if (!nvkm_mm_head(&fb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
if (flags & 2) tile->zcomp |= 0x01000000; /* Z16 */
else tile->zcomp |= 0x02000000; /* Z24S8 */
tile->zcomp |= ((tile->tag->offset ) >> 6);
@ -62,23 +62,23 @@ nv30_fb_tile_comp(struct nvkm_fb *pfb, int i, u32 size, u32 flags,
}
static int
calc_bias(struct nv04_fb_priv *priv, int k, int i, int j)
calc_bias(struct nvkm_fb *fb, int k, int i, int j)
{
struct nvkm_device *device = nv_device(priv);
struct nvkm_device *device = nv_device(fb);
int b = (device->chipset > 0x30 ?
nv_rd32(priv, 0x122c + 0x10 * k + 0x4 * j) >> (4 * (i ^ 1)) :
nv_rd32(fb, 0x122c + 0x10 * k + 0x4 * j) >> (4 * (i ^ 1)) :
0) & 0xf;
return 2 * (b & 0x8 ? b - 0x10 : b);
}
static int
calc_ref(struct nv04_fb_priv *priv, int l, int k, int i)
calc_ref(struct nvkm_fb *fb, int l, int k, int i)
{
int j, x = 0;
for (j = 0; j < 4; j++) {
int m = (l >> (8 * i) & 0xff) + calc_bias(priv, k, i, j);
int m = (l >> (8 * i) & 0xff) + calc_bias(fb, k, i, j);
x |= (0x80 | clamp(m, 0, 0x1f)) << (8 * j);
}
@ -90,10 +90,10 @@ int
nv30_fb_init(struct nvkm_object *object)
{
struct nvkm_device *device = nv_device(object);
struct nv04_fb_priv *priv = (void *)object;
struct nvkm_fb *fb = (void *)object;
int ret, i, j;
ret = nvkm_fb_init(&priv->base);
ret = nvkm_fb_init(fb);
if (ret)
return ret;
@ -103,16 +103,16 @@ nv30_fb_init(struct nvkm_object *object)
device->chipset == 0x35) {
/* Related to ROP count */
int n = (device->chipset == 0x31 ? 2 : 4);
int l = nv_rd32(priv, 0x1003d0);
int l = nv_rd32(fb, 0x1003d0);
for (i = 0; i < n; i++) {
for (j = 0; j < 3; j++)
nv_wr32(priv, 0x10037c + 0xc * i + 0x4 * j,
calc_ref(priv, l, 0, j));
nv_wr32(fb, 0x10037c + 0xc * i + 0x4 * j,
calc_ref(fb, l, 0, j));
for (j = 0; j < 2; j++)
nv_wr32(priv, 0x1003ac + 0x8 * i + 0x4 * j,
calc_ref(priv, l, 1, j));
nv_wr32(fb, 0x1003ac + 0x8 * i + 0x4 * j,
calc_ref(fb, l, 1, j));
}
}

View File

@ -26,12 +26,12 @@
#include "nv04.h"
static void
nv35_fb_tile_comp(struct nvkm_fb *pfb, int i, u32 size, u32 flags,
nv35_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
struct nvkm_fb_tile *tile)
{
u32 tiles = DIV_ROUND_UP(size, 0x40);
u32 tags = round_up(tiles / pfb->ram->parts, 0x40);
if (!nvkm_mm_head(&pfb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
u32 tags = round_up(tiles / fb->ram->parts, 0x40);
if (!nvkm_mm_head(&fb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
if (flags & 2) tile->zcomp |= 0x04000000; /* Z16 */
else tile->zcomp |= 0x08000000; /* Z24S8 */
tile->zcomp |= ((tile->tag->offset ) >> 6);

View File

@ -26,12 +26,12 @@
#include "nv04.h"
static void
nv36_fb_tile_comp(struct nvkm_fb *pfb, int i, u32 size, u32 flags,
nv36_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
struct nvkm_fb_tile *tile)
{
u32 tiles = DIV_ROUND_UP(size, 0x40);
u32 tags = round_up(tiles / pfb->ram->parts, 0x40);
if (!nvkm_mm_head(&pfb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
u32 tags = round_up(tiles / fb->ram->parts, 0x40);
if (!nvkm_mm_head(&fb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
if (flags & 2) tile->zcomp |= 0x10000000; /* Z16 */
else tile->zcomp |= 0x20000000; /* Z24S8 */
tile->zcomp |= ((tile->tag->offset ) >> 6);

View File

@ -26,13 +26,13 @@
#include "nv04.h"
void
nv40_fb_tile_comp(struct nvkm_fb *pfb, int i, u32 size, u32 flags,
nv40_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
struct nvkm_fb_tile *tile)
{
u32 tiles = DIV_ROUND_UP(size, 0x80);
u32 tags = round_up(tiles / pfb->ram->parts, 0x100);
u32 tags = round_up(tiles / fb->ram->parts, 0x100);
if ( (flags & 2) &&
!nvkm_mm_head(&pfb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
!nvkm_mm_head(&fb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
tile->zcomp = 0x28000000; /* Z24S8_SPLIT_GRAD */
tile->zcomp |= ((tile->tag->offset ) >> 8);
tile->zcomp |= ((tile->tag->offset + tags - 1) >> 8) << 13;
@ -45,14 +45,14 @@ nv40_fb_tile_comp(struct nvkm_fb *pfb, int i, u32 size, u32 flags,
static int
nv40_fb_init(struct nvkm_object *object)
{
struct nv04_fb_priv *priv = (void *)object;
struct nvkm_fb *fb = (void *)object;
int ret;
ret = nvkm_fb_init(&priv->base);
ret = nvkm_fb_init(fb);
if (ret)
return ret;
nv_mask(priv, 0x10033c, 0x00008000, 0x00000000);
nv_mask(fb, 0x10033c, 0x00008000, 0x00000000);
return 0;
}

View File

@ -26,26 +26,26 @@
#include "nv04.h"
void
nv41_fb_tile_prog(struct nvkm_fb *pfb, int i, struct nvkm_fb_tile *tile)
nv41_fb_tile_prog(struct nvkm_fb *fb, int i, struct nvkm_fb_tile *tile)
{
nv_wr32(pfb, 0x100604 + (i * 0x10), tile->limit);
nv_wr32(pfb, 0x100608 + (i * 0x10), tile->pitch);
nv_wr32(pfb, 0x100600 + (i * 0x10), tile->addr);
nv_rd32(pfb, 0x100600 + (i * 0x10));
nv_wr32(pfb, 0x100700 + (i * 0x04), tile->zcomp);
nv_wr32(fb, 0x100604 + (i * 0x10), tile->limit);
nv_wr32(fb, 0x100608 + (i * 0x10), tile->pitch);
nv_wr32(fb, 0x100600 + (i * 0x10), tile->addr);
nv_rd32(fb, 0x100600 + (i * 0x10));
nv_wr32(fb, 0x100700 + (i * 0x04), tile->zcomp);
}
int
nv41_fb_init(struct nvkm_object *object)
{
struct nv04_fb_priv *priv = (void *)object;
struct nvkm_fb *fb = (void *)object;
int ret;
ret = nvkm_fb_init(&priv->base);
ret = nvkm_fb_init(fb);
if (ret)
return ret;
nv_wr32(priv, 0x100800, 0x00000001);
nv_wr32(fb, 0x100800, 0x00000001);
return 0;
}

View File

@ -26,7 +26,7 @@
#include "nv04.h"
static void
nv44_fb_tile_init(struct nvkm_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
nv44_fb_tile_init(struct nvkm_fb *fb, int i, u32 addr, u32 size, u32 pitch,
u32 flags, struct nvkm_fb_tile *tile)
{
tile->addr = 0x00000001; /* mode = vram */
@ -36,26 +36,26 @@ nv44_fb_tile_init(struct nvkm_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
}
void
nv44_fb_tile_prog(struct nvkm_fb *pfb, int i, struct nvkm_fb_tile *tile)
nv44_fb_tile_prog(struct nvkm_fb *fb, int i, struct nvkm_fb_tile *tile)
{
nv_wr32(pfb, 0x100604 + (i * 0x10), tile->limit);
nv_wr32(pfb, 0x100608 + (i * 0x10), tile->pitch);
nv_wr32(pfb, 0x100600 + (i * 0x10), tile->addr);
nv_rd32(pfb, 0x100600 + (i * 0x10));
nv_wr32(fb, 0x100604 + (i * 0x10), tile->limit);
nv_wr32(fb, 0x100608 + (i * 0x10), tile->pitch);
nv_wr32(fb, 0x100600 + (i * 0x10), tile->addr);
nv_rd32(fb, 0x100600 + (i * 0x10));
}
int
nv44_fb_init(struct nvkm_object *object)
{
struct nv04_fb_priv *priv = (void *)object;
struct nvkm_fb *fb = (void *)object;
int ret;
ret = nvkm_fb_init(&priv->base);
ret = nvkm_fb_init(fb);
if (ret)
return ret;
nv_wr32(priv, 0x100850, 0x80000000);
nv_wr32(priv, 0x100800, 0x00000001);
nv_wr32(fb, 0x100850, 0x80000000);
nv_wr32(fb, 0x100800, 0x00000001);
return 0;
}

View File

@ -26,7 +26,7 @@
#include "nv04.h"
void
nv46_fb_tile_init(struct nvkm_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
nv46_fb_tile_init(struct nvkm_fb *fb, int i, u32 addr, u32 size, u32 pitch,
u32 flags, struct nvkm_fb_tile *tile)
{
/* for performance, select alternate bank offset for zeta */

View File

@ -40,7 +40,7 @@ nv50_fb_memtype[0x80] = {
};
bool
nv50_fb_memtype_valid(struct nvkm_fb *pfb, u32 memtype)
nv50_fb_memtype_valid(struct nvkm_fb *fb, u32 memtype)
{
return nv50_fb_memtype[(memtype & 0xff00) >> 8] != 0;
}
@ -146,23 +146,23 @@ nv50_fb_intr(struct nvkm_subdev *subdev)
{
struct nvkm_device *device = nv_device(subdev);
struct nvkm_engine *engine;
struct nv50_fb_priv *priv = (void *)subdev;
struct nv50_fb *fb = (void *)subdev;
const struct nvkm_enum *en, *cl;
struct nvkm_object *engctx = NULL;
u32 trap[6], idx, chan;
u8 st0, st1, st2, st3;
int i;
idx = nv_rd32(priv, 0x100c90);
idx = nv_rd32(fb, 0x100c90);
if (!(idx & 0x80000000))
return;
idx &= 0x00ffffff;
for (i = 0; i < 6; i++) {
nv_wr32(priv, 0x100c90, idx | i << 24);
trap[i] = nv_rd32(priv, 0x100c94);
nv_wr32(fb, 0x100c90, idx | i << 24);
trap[i] = nv_rd32(fb, 0x100c94);
}
nv_wr32(priv, 0x100c90, idx | 0x80000000);
nv_wr32(fb, 0x100c90, idx | 0x80000000);
/* decode status bits into something more useful */
if (device->chipset < 0xa3 ||
@ -203,7 +203,7 @@ nv50_fb_intr(struct nvkm_subdev *subdev)
en = orig_en;
}
nv_error(priv, "trapped %s at 0x%02x%04x%04x on channel 0x%08x [%s] ",
nv_error(fb, "trapped %s at 0x%02x%04x%04x on channel 0x%08x [%s] ",
(trap[5] & 0x00000100) ? "read" : "write",
trap[5] & 0xff, trap[4] & 0xffff, trap[3] & 0xffff, chan,
nvkm_client_name(engctx));
@ -243,26 +243,26 @@ nv50_fb_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_object **pobject)
{
struct nvkm_device *device = nv_device(parent);
struct nv50_fb_priv *priv;
struct nv50_fb *fb;
int ret;
ret = nvkm_fb_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
ret = nvkm_fb_create(parent, engine, oclass, &fb);
*pobject = nv_object(fb);
if (ret)
return ret;
priv->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (priv->r100c08_page) {
priv->r100c08 = dma_map_page(nv_device_base(device),
priv->r100c08_page, 0, PAGE_SIZE,
fb->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (fb->r100c08_page) {
fb->r100c08 = dma_map_page(nv_device_base(device),
fb->r100c08_page, 0, PAGE_SIZE,
DMA_BIDIRECTIONAL);
if (dma_mapping_error(nv_device_base(device), priv->r100c08))
if (dma_mapping_error(nv_device_base(device), fb->r100c08))
return -EFAULT;
} else {
nv_warn(priv, "failed 0x100c08 page alloc\n");
nv_warn(fb, "failed 0x100c08 page alloc\n");
}
nv_subdev(priv)->intr = nv50_fb_intr;
nv_subdev(fb)->intr = nv50_fb_intr;
return 0;
}
@ -270,25 +270,25 @@ void
nv50_fb_dtor(struct nvkm_object *object)
{
struct nvkm_device *device = nv_device(object);
struct nv50_fb_priv *priv = (void *)object;
struct nv50_fb *fb = (void *)object;
if (priv->r100c08_page) {
dma_unmap_page(nv_device_base(device), priv->r100c08, PAGE_SIZE,
if (fb->r100c08_page) {
dma_unmap_page(nv_device_base(device), fb->r100c08, PAGE_SIZE,
DMA_BIDIRECTIONAL);
__free_page(priv->r100c08_page);
__free_page(fb->r100c08_page);
}
nvkm_fb_destroy(&priv->base);
nvkm_fb_destroy(&fb->base);
}
int
nv50_fb_init(struct nvkm_object *object)
{
struct nv50_fb_impl *impl = (void *)object->oclass;
struct nv50_fb_priv *priv = (void *)object;
struct nv50_fb *fb = (void *)object;
int ret;
ret = nvkm_fb_init(&priv->base);
ret = nvkm_fb_init(&fb->base);
if (ret)
return ret;
@ -296,11 +296,11 @@ nv50_fb_init(struct nvkm_object *object)
* scratch page, VRAM->GART blits with M2MF (as in DDX DFS)
* cause IOMMU "read from address 0" errors (rh#561267)
*/
nv_wr32(priv, 0x100c08, priv->r100c08 >> 8);
nv_wr32(fb, 0x100c08, fb->r100c08 >> 8);
/* This is needed to get meaningful information from 100c90
* on traps. No idea what these values mean exactly. */
nv_wr32(priv, 0x100c90, impl->trap);
nv_wr32(fb, 0x100c90, impl->trap);
return 0;
}

View File

@ -2,7 +2,7 @@
#define __NVKM_FB_NV50_H__
#include "priv.h"
struct nv50_fb_priv {
struct nv50_fb {
struct nvkm_fb base;
struct page *r100c08_page;
dma_addr_t r100c08;

View File

@ -42,16 +42,16 @@ int nvkm_gddr5_calc(struct nvkm_ram *ram, bool nuts);
#define nvkm_fb_create(p,e,c,d) \
nvkm_fb_create_((p), (e), (c), sizeof(**d), (void **)d)
#define nvkm_fb_destroy(p) ({ \
struct nvkm_fb *pfb = (p); \
_nvkm_fb_dtor(nv_object(pfb)); \
struct nvkm_fb *_fb = (p); \
_nvkm_fb_dtor(nv_object(_fb)); \
})
#define nvkm_fb_init(p) ({ \
struct nvkm_fb *pfb = (p); \
_nvkm_fb_init(nv_object(pfb)); \
struct nvkm_fb *_fb = (p); \
_nvkm_fb_init(nv_object(_fb)); \
})
#define nvkm_fb_fini(p,s) ({ \
struct nvkm_fb *pfb = (p); \
_nvkm_fb_fini(nv_object(pfb), (s)); \
struct nvkm_fb *_fb = (p); \
_nvkm_fb_fini(nv_object(_fb), (s)); \
})
int nvkm_fb_create_(struct nvkm_object *, struct nvkm_object *,

View File

@ -4,7 +4,7 @@
struct ramfuc {
struct nvkm_memx *memx;
struct nvkm_fb *pfb;
struct nvkm_fb *fb;
int sequence;
};
@ -54,9 +54,9 @@ ramfuc_reg(u32 addr)
}
static inline int
ramfuc_init(struct ramfuc *ram, struct nvkm_fb *pfb)
ramfuc_init(struct ramfuc *ram, struct nvkm_fb *fb)
{
struct nvkm_pmu *pmu = nvkm_pmu(pfb);
struct nvkm_pmu *pmu = nvkm_pmu(fb);
int ret;
ret = nvkm_memx_init(pmu, &ram->memx);
@ -64,7 +64,7 @@ ramfuc_init(struct ramfuc *ram, struct nvkm_fb *pfb)
return ret;
ram->sequence++;
ram->pfb = pfb;
ram->fb = fb;
return 0;
}
@ -72,9 +72,9 @@ static inline int
ramfuc_exec(struct ramfuc *ram, bool exec)
{
int ret = 0;
if (ram->pfb) {
if (ram->fb) {
ret = nvkm_memx_fini(&ram->memx, exec);
ram->pfb = NULL;
ram->fb = NULL;
}
return ret;
}
@ -83,7 +83,7 @@ static inline u32
ramfuc_rd32(struct ramfuc *ram, struct ramfuc_reg *reg)
{
if (reg->sequence != ram->sequence)
reg->data = nv_rd32(ram->pfb, reg->addr);
reg->data = nv_rd32(ram->fb, reg->addr);
return reg->data;
}
@ -144,9 +144,9 @@ ramfuc_train(struct ramfuc *ram)
}
static inline int
ramfuc_train_result(struct nvkm_fb *pfb, u32 *result, u32 rsize)
ramfuc_train_result(struct nvkm_fb *fb, u32 *result, u32 rsize)
{
struct nvkm_pmu *pmu = nvkm_pmu(pfb);
struct nvkm_pmu *pmu = nvkm_pmu(fb);
return nvkm_memx_train_result(pmu, result, rsize);
}

View File

@ -107,9 +107,9 @@ static void
gf100_ram_train(struct gf100_ramfuc *fuc, u32 magic)
{
struct gf100_ram *ram = container_of(fuc, typeof(*ram), fuc);
struct nvkm_fb *pfb = nvkm_fb(ram);
u32 part = nv_rd32(pfb, 0x022438), i;
u32 mask = nv_rd32(pfb, 0x022554);
struct nvkm_fb *fb = nvkm_fb(ram);
u32 part = nv_rd32(fb, 0x022438), i;
u32 mask = nv_rd32(fb, 0x022554);
u32 addr = 0x110974;
ram_wr32(fuc, 0x10f910, magic);
@ -123,11 +123,11 @@ gf100_ram_train(struct gf100_ramfuc *fuc, u32 magic)
}
static int
gf100_ram_calc(struct nvkm_fb *pfb, u32 freq)
gf100_ram_calc(struct nvkm_fb *fb, u32 freq)
{
struct nvkm_clk *clk = nvkm_clk(pfb);
struct nvkm_bios *bios = nvkm_bios(pfb);
struct gf100_ram *ram = (void *)pfb->ram;
struct nvkm_clk *clk = nvkm_clk(fb);
struct nvkm_bios *bios = nvkm_bios(fb);
struct gf100_ram *ram = (void *)fb->ram;
struct gf100_ramfuc *fuc = &ram->fuc;
struct nvbios_ramcfg cfg;
u8 ver, cnt, len, strap;
@ -144,20 +144,20 @@ gf100_ram_calc(struct nvkm_fb *pfb, u32 freq)
rammap.data = nvbios_rammapEm(bios, freq / 1000, &ver, &rammap.size,
&cnt, &ramcfg.size, &cfg);
if (!rammap.data || ver != 0x10 || rammap.size < 0x0e) {
nv_error(pfb, "invalid/missing rammap entry\n");
nv_error(fb, "invalid/missing rammap entry\n");
return -EINVAL;
}
/* locate specific data set for the attached memory */
strap = nvbios_ramcfg_index(nv_subdev(pfb));
strap = nvbios_ramcfg_index(nv_subdev(fb));
if (strap >= cnt) {
nv_error(pfb, "invalid ramcfg strap\n");
nv_error(fb, "invalid ramcfg strap\n");
return -EINVAL;
}
ramcfg.data = rammap.data + rammap.size + (strap * ramcfg.size);
if (!ramcfg.data || ver != 0x10 || ramcfg.size < 0x0e) {
nv_error(pfb, "invalid/missing ramcfg entry\n");
nv_error(fb, "invalid/missing ramcfg entry\n");
return -EINVAL;
}
@ -167,14 +167,14 @@ gf100_ram_calc(struct nvkm_fb *pfb, u32 freq)
timing.data = nvbios_timingEe(bios, strap, &ver, &timing.size,
&cnt, &len);
if (!timing.data || ver != 0x10 || timing.size < 0x19) {
nv_error(pfb, "invalid/missing timing entry\n");
nv_error(fb, "invalid/missing timing entry\n");
return -EINVAL;
}
} else {
timing.data = 0;
}
ret = ram_init(fuc, pfb);
ret = ram_init(fuc, fb);
if (ret)
return ret;
@ -209,10 +209,10 @@ gf100_ram_calc(struct nvkm_fb *pfb, u32 freq)
if (mode == 1 && from == 0) {
/* calculate refpll */
ret = gt215_pll_calc(nv_subdev(pfb), &ram->refpll,
ret = gt215_pll_calc(nv_subdev(fb), &ram->refpll,
ram->mempll.refclk, &N1, NULL, &M1, &P);
if (ret <= 0) {
nv_error(pfb, "unable to calc refpll\n");
nv_error(fb, "unable to calc refpll\n");
return ret ? ret : -ERANGE;
}
@ -224,10 +224,10 @@ gf100_ram_calc(struct nvkm_fb *pfb, u32 freq)
ram_wait(fuc, 0x137390, 0x00020000, 0x00020000, 64000);
/* calculate mempll */
ret = gt215_pll_calc(nv_subdev(pfb), &ram->mempll, freq,
ret = gt215_pll_calc(nv_subdev(fb), &ram->mempll, freq,
&N1, NULL, &M1, &P);
if (ret <= 0) {
nv_error(pfb, "unable to calc refpll\n");
nv_error(fb, "unable to calc refpll\n");
return ret ? ret : -ERANGE;
}
@ -401,19 +401,19 @@ gf100_ram_calc(struct nvkm_fb *pfb, u32 freq)
}
static int
gf100_ram_prog(struct nvkm_fb *pfb)
gf100_ram_prog(struct nvkm_fb *fb)
{
struct nvkm_device *device = nv_device(pfb);
struct gf100_ram *ram = (void *)pfb->ram;
struct nvkm_device *device = nv_device(fb);
struct gf100_ram *ram = (void *)fb->ram;
struct gf100_ramfuc *fuc = &ram->fuc;
ram_exec(fuc, nvkm_boolopt(device->cfgopt, "NvMemExec", true));
return 0;
}
static void
gf100_ram_tidy(struct nvkm_fb *pfb)
gf100_ram_tidy(struct nvkm_fb *fb)
{
struct gf100_ram *ram = (void *)pfb->ram;
struct gf100_ram *ram = (void *)fb->ram;
struct gf100_ramfuc *fuc = &ram->fuc;
ram_exec(fuc, false);
}
@ -421,29 +421,29 @@ gf100_ram_tidy(struct nvkm_fb *pfb)
extern const u8 gf100_pte_storage_type_map[256];
void
gf100_ram_put(struct nvkm_fb *pfb, struct nvkm_mem **pmem)
gf100_ram_put(struct nvkm_fb *fb, struct nvkm_mem **pmem)
{
struct nvkm_ltc *ltc = nvkm_ltc(pfb);
struct nvkm_ltc *ltc = nvkm_ltc(fb);
struct nvkm_mem *mem = *pmem;
*pmem = NULL;
if (unlikely(mem == NULL))
return;
mutex_lock(&pfb->base.mutex);
mutex_lock(&fb->subdev.mutex);
if (mem->tag)
ltc->tags_free(ltc, &mem->tag);
__nv50_ram_put(pfb, mem);
mutex_unlock(&pfb->base.mutex);
__nv50_ram_put(fb, mem);
mutex_unlock(&fb->subdev.mutex);
kfree(mem);
}
int
gf100_ram_get(struct nvkm_fb *pfb, u64 size, u32 align, u32 ncmin,
gf100_ram_get(struct nvkm_fb *fb, u64 size, u32 align, u32 ncmin,
u32 memtype, struct nvkm_mem **pmem)
{
struct nvkm_mm *mm = &pfb->vram;
struct nvkm_mm *mm = &fb->vram;
struct nvkm_mm_node *r;
struct nvkm_mem *mem;
int type = (memtype & 0x0ff);
@ -464,9 +464,9 @@ gf100_ram_get(struct nvkm_fb *pfb, u64 size, u32 align, u32 ncmin,
INIT_LIST_HEAD(&mem->regions);
mem->size = size;
mutex_lock(&pfb->base.mutex);
mutex_lock(&fb->subdev.mutex);
if (comp) {
struct nvkm_ltc *ltc = nvkm_ltc(pfb);
struct nvkm_ltc *ltc = nvkm_ltc(fb);
/* compression only works with lpages */
if (align == (1 << (17 - 12))) {
@ -485,15 +485,15 @@ gf100_ram_get(struct nvkm_fb *pfb, u64 size, u32 align, u32 ncmin,
else
ret = nvkm_mm_head(mm, 0, 1, size, ncmin, align, &r);
if (ret) {
mutex_unlock(&pfb->base.mutex);
pfb->ram->put(pfb, &mem);
mutex_unlock(&fb->subdev.mutex);
fb->ram->put(fb, &mem);
return ret;
}
list_add_tail(&r->rl_entry, &mem->regions);
size -= r->length;
} while (size);
mutex_unlock(&pfb->base.mutex);
mutex_unlock(&fb->subdev.mutex);
r = list_first_entry(&mem->regions, struct nvkm_mm_node, rl_entry);
mem->offset = (u64)r->offset << 12;
@ -506,14 +506,14 @@ gf100_ram_create_(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, u32 maskaddr, int size,
void **pobject)
{
struct nvkm_fb *pfb = nvkm_fb(parent);
struct nvkm_bios *bios = nvkm_bios(pfb);
struct nvkm_fb *fb = nvkm_fb(parent);
struct nvkm_bios *bios = nvkm_bios(fb);
struct nvkm_ram *ram;
const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
u32 parts = nv_rd32(pfb, 0x022438);
u32 pmask = nv_rd32(pfb, maskaddr);
u32 bsize = nv_rd32(pfb, 0x10f20c);
u32 parts = nv_rd32(fb, 0x022438);
u32 pmask = nv_rd32(fb, maskaddr);
u32 bsize = nv_rd32(fb, 0x10f20c);
u32 offset, length;
bool uniform = true;
int ret, part;
@ -523,23 +523,23 @@ gf100_ram_create_(struct nvkm_object *parent, struct nvkm_object *engine,
if (ret)
return ret;
nv_debug(pfb, "0x100800: 0x%08x\n", nv_rd32(pfb, 0x100800));
nv_debug(pfb, "parts 0x%08x mask 0x%08x\n", parts, pmask);
nv_debug(fb, "0x100800: 0x%08x\n", nv_rd32(fb, 0x100800));
nv_debug(fb, "parts 0x%08x mask 0x%08x\n", parts, pmask);
ram->type = nvkm_fb_bios_memtype(bios);
ram->ranks = (nv_rd32(pfb, 0x10f200) & 0x00000004) ? 2 : 1;
ram->ranks = (nv_rd32(fb, 0x10f200) & 0x00000004) ? 2 : 1;
/* read amount of vram attached to each memory controller */
for (part = 0; part < parts; part++) {
if (!(pmask & (1 << part))) {
u32 psize = nv_rd32(pfb, 0x11020c + (part * 0x1000));
u32 psize = nv_rd32(fb, 0x11020c + (part * 0x1000));
if (psize != bsize) {
if (psize < bsize)
bsize = psize;
uniform = false;
}
nv_debug(pfb, "%d: mem_amount 0x%08x\n", part, psize);
nv_debug(fb, "%d: mem_amount 0x%08x\n", part, psize);
ram->size += (u64)psize << 20;
}
}
@ -548,10 +548,10 @@ gf100_ram_create_(struct nvkm_object *parent, struct nvkm_object *engine,
if (uniform) {
offset = rsvd_head;
length = (ram->size >> 12) - rsvd_head - rsvd_tail;
ret = nvkm_mm_init(&pfb->vram, offset, length, 1);
ret = nvkm_mm_init(&fb->vram, offset, length, 1);
} else {
/* otherwise, address lowest common amount from 0GiB */
ret = nvkm_mm_init(&pfb->vram, rsvd_head,
ret = nvkm_mm_init(&fb->vram, rsvd_head,
(bsize << 8) * parts - rsvd_head, 1);
if (ret)
return ret;
@ -560,9 +560,9 @@ gf100_ram_create_(struct nvkm_object *parent, struct nvkm_object *engine,
offset = (0x0200000000ULL >> 12) + (bsize << 8);
length = (ram->size >> 12) - ((bsize * parts) << 8) - rsvd_tail;
ret = nvkm_mm_init(&pfb->vram, offset, length, 1);
ret = nvkm_mm_init(&fb->vram, offset, length, 1);
if (ret)
nvkm_mm_fini(&pfb->vram);
nvkm_mm_fini(&fb->vram);
}
if (ret)
@ -576,7 +576,7 @@ gf100_ram_create_(struct nvkm_object *parent, struct nvkm_object *engine,
static int
gf100_ram_init(struct nvkm_object *object)
{
struct nvkm_fb *pfb = (void *)object->parent;
struct nvkm_fb *fb = (void *)object->parent;
struct gf100_ram *ram = (void *)object;
int ret, i;
@ -601,16 +601,16 @@ gf100_ram_init(struct nvkm_object *object)
};
for (i = 0; i < 0x30; i++) {
nv_wr32(pfb, 0x10f968, 0x00000000 | (i << 8));
nv_wr32(pfb, 0x10f96c, 0x00000000 | (i << 8));
nv_wr32(pfb, 0x10f920, 0x00000100 | train0[i % 12]);
nv_wr32(pfb, 0x10f924, 0x00000100 | train0[i % 12]);
nv_wr32(pfb, 0x10f918, train1[i % 12]);
nv_wr32(pfb, 0x10f91c, train1[i % 12]);
nv_wr32(pfb, 0x10f920, 0x00000000 | train0[i % 12]);
nv_wr32(pfb, 0x10f924, 0x00000000 | train0[i % 12]);
nv_wr32(pfb, 0x10f918, train1[i % 12]);
nv_wr32(pfb, 0x10f91c, train1[i % 12]);
nv_wr32(fb, 0x10f968, 0x00000000 | (i << 8));
nv_wr32(fb, 0x10f96c, 0x00000000 | (i << 8));
nv_wr32(fb, 0x10f920, 0x00000100 | train0[i % 12]);
nv_wr32(fb, 0x10f924, 0x00000100 | train0[i % 12]);
nv_wr32(fb, 0x10f918, train1[i % 12]);
nv_wr32(fb, 0x10f91c, train1[i % 12]);
nv_wr32(fb, 0x10f920, 0x00000000 | train0[i % 12]);
nv_wr32(fb, 0x10f924, 0x00000000 | train0[i % 12]);
nv_wr32(fb, 0x10f918, train1[i % 12]);
nv_wr32(fb, 0x10f91c, train1[i % 12]);
}
} break;
default:

View File

@ -228,7 +228,7 @@ static void
gk104_ram_nuts(struct gk104_ram *ram, struct ramfuc_reg *reg,
u32 _mask, u32 _data, u32 _copy)
{
struct gk104_fb_priv *priv = (void *)nvkm_fb(ram);
struct gk104_fb *fb = (void *)nvkm_fb(ram);
struct ramfuc *fuc = &ram->fuc.base;
u32 addr = 0x110000 + (reg->addr & 0xfff);
u32 mask = _mask | _copy;
@ -237,7 +237,7 @@ gk104_ram_nuts(struct gk104_ram *ram, struct ramfuc_reg *reg,
for (i = 0; i < 16; i++, addr += 0x1000) {
if (ram->pnuts & (1 << i)) {
u32 prev = nv_rd32(priv, addr);
u32 prev = nv_rd32(fb, addr);
u32 next = (prev & ~mask) | data;
nvkm_memx_wr32(fuc->memx, addr, next);
}
@ -247,9 +247,9 @@ gk104_ram_nuts(struct gk104_ram *ram, struct ramfuc_reg *reg,
gk104_ram_nuts((s), &(s)->fuc.r_##r, (m), (d), (c))
static int
gk104_ram_calc_gddr5(struct nvkm_fb *pfb, u32 freq)
gk104_ram_calc_gddr5(struct nvkm_fb *fb, u32 freq)
{
struct gk104_ram *ram = (void *)pfb->ram;
struct gk104_ram *ram = (void *)fb->ram;
struct gk104_ramfuc *fuc = &ram->fuc;
struct nvkm_ram_data *next = ram->base.next;
int vc = !next->bios.ramcfg_11_02_08;
@ -673,9 +673,9 @@ gk104_ram_calc_gddr5(struct nvkm_fb *pfb, u32 freq)
******************************************************************************/
static int
gk104_ram_calc_sddr3(struct nvkm_fb *pfb, u32 freq)
gk104_ram_calc_sddr3(struct nvkm_fb *fb, u32 freq)
{
struct gk104_ram *ram = (void *)pfb->ram;
struct gk104_ram *ram = (void *)fb->ram;
struct gk104_ramfuc *fuc = &ram->fuc;
const u32 rcoef = (( ram->P1 << 16) | (ram->N1 << 8) | ram->M1);
const u32 runk0 = ram->fN1 << 16;
@ -925,9 +925,9 @@ gk104_ram_calc_sddr3(struct nvkm_fb *pfb, u32 freq)
******************************************************************************/
static int
gk104_ram_calc_data(struct nvkm_fb *pfb, u32 khz, struct nvkm_ram_data *data)
gk104_ram_calc_data(struct nvkm_fb *fb, u32 khz, struct nvkm_ram_data *data)
{
struct gk104_ram *ram = (void *)pfb->ram;
struct gk104_ram *ram = (void *)fb->ram;
struct nvkm_ram_data *cfg;
u32 mhz = khz / 1000;
@ -945,14 +945,14 @@ gk104_ram_calc_data(struct nvkm_fb *pfb, u32 khz, struct nvkm_ram_data *data)
}
static int
gk104_ram_calc_xits(struct nvkm_fb *pfb, struct nvkm_ram_data *next)
gk104_ram_calc_xits(struct nvkm_fb *fb, struct nvkm_ram_data *next)
{
struct gk104_ram *ram = (void *)pfb->ram;
struct gk104_ram *ram = (void *)fb->ram;
struct gk104_ramfuc *fuc = &ram->fuc;
int refclk, i;
int ret;
ret = ram_init(fuc, pfb);
ret = ram_init(fuc, fb);
if (ret)
return ret;
@ -972,11 +972,11 @@ gk104_ram_calc_xits(struct nvkm_fb *pfb, struct nvkm_ram_data *next)
refclk = fuc->mempll.refclk;
/* calculate refpll coefficients */
ret = gt215_pll_calc(nv_subdev(pfb), &fuc->refpll, refclk, &ram->N1,
ret = gt215_pll_calc(nv_subdev(fb), &fuc->refpll, refclk, &ram->N1,
&ram->fN1, &ram->M1, &ram->P1);
fuc->mempll.refclk = ret;
if (ret <= 0) {
nv_error(pfb, "unable to calc refpll\n");
nv_error(fb, "unable to calc refpll\n");
return -EINVAL;
}
@ -989,10 +989,10 @@ gk104_ram_calc_xits(struct nvkm_fb *pfb, struct nvkm_ram_data *next)
fuc->mempll.min_p = 1;
fuc->mempll.max_p = 2;
ret = gt215_pll_calc(nv_subdev(pfb), &fuc->mempll, next->freq,
ret = gt215_pll_calc(nv_subdev(fb), &fuc->mempll, next->freq,
&ram->N2, NULL, &ram->M2, &ram->P2);
if (ret <= 0) {
nv_error(pfb, "unable to calc mempll\n");
nv_error(fb, "unable to calc mempll\n");
return -EINVAL;
}
}
@ -1007,12 +1007,12 @@ gk104_ram_calc_xits(struct nvkm_fb *pfb, struct nvkm_ram_data *next)
case NV_MEM_TYPE_DDR3:
ret = nvkm_sddr3_calc(&ram->base);
if (ret == 0)
ret = gk104_ram_calc_sddr3(pfb, next->freq);
ret = gk104_ram_calc_sddr3(fb, next->freq);
break;
case NV_MEM_TYPE_GDDR5:
ret = nvkm_gddr5_calc(&ram->base, ram->pnuts != 0);
if (ret == 0)
ret = gk104_ram_calc_gddr5(pfb, next->freq);
ret = gk104_ram_calc_gddr5(fb, next->freq);
break;
default:
ret = -ENOSYS;
@ -1023,21 +1023,21 @@ gk104_ram_calc_xits(struct nvkm_fb *pfb, struct nvkm_ram_data *next)
}
static int
gk104_ram_calc(struct nvkm_fb *pfb, u32 freq)
gk104_ram_calc(struct nvkm_fb *fb, u32 freq)
{
struct nvkm_clk *clk = nvkm_clk(pfb);
struct gk104_ram *ram = (void *)pfb->ram;
struct nvkm_clk *clk = nvkm_clk(fb);
struct gk104_ram *ram = (void *)fb->ram;
struct nvkm_ram_data *xits = &ram->base.xition;
struct nvkm_ram_data *copy;
int ret;
if (ram->base.next == NULL) {
ret = gk104_ram_calc_data(pfb, clk->read(clk, nv_clk_src_mem),
ret = gk104_ram_calc_data(fb, clk->read(clk, nv_clk_src_mem),
&ram->base.former);
if (ret)
return ret;
ret = gk104_ram_calc_data(pfb, freq, &ram->base.target);
ret = gk104_ram_calc_data(fb, freq, &ram->base.target);
if (ret)
return ret;
@ -1061,13 +1061,13 @@ gk104_ram_calc(struct nvkm_fb *pfb, u32 freq)
ram->base.next = &ram->base.target;
}
return gk104_ram_calc_xits(pfb, ram->base.next);
return gk104_ram_calc_xits(fb, ram->base.next);
}
static void
gk104_ram_prog_0(struct nvkm_fb *pfb, u32 freq)
gk104_ram_prog_0(struct nvkm_fb *fb, u32 freq)
{
struct gk104_ram *ram = (void *)pfb->ram;
struct gk104_ram *ram = (void *)fb->ram;
struct nvkm_ram_data *cfg;
u32 mhz = freq / 1000;
u32 mask, data;
@ -1089,31 +1089,31 @@ gk104_ram_prog_0(struct nvkm_fb *pfb, u32 freq)
data |= cfg->bios.rammap_11_09_01ff;
mask |= 0x000001ff;
}
nv_mask(pfb, 0x10f468, mask, data);
nv_mask(fb, 0x10f468, mask, data);
if (mask = 0, data = 0, ram->diff.rammap_11_0a_0400) {
data |= cfg->bios.rammap_11_0a_0400;
mask |= 0x00000001;
}
nv_mask(pfb, 0x10f420, mask, data);
nv_mask(fb, 0x10f420, mask, data);
if (mask = 0, data = 0, ram->diff.rammap_11_0a_0800) {
data |= cfg->bios.rammap_11_0a_0800;
mask |= 0x00000001;
}
nv_mask(pfb, 0x10f430, mask, data);
nv_mask(fb, 0x10f430, mask, data);
if (mask = 0, data = 0, ram->diff.rammap_11_0b_01f0) {
data |= cfg->bios.rammap_11_0b_01f0;
mask |= 0x0000001f;
}
nv_mask(pfb, 0x10f400, mask, data);
nv_mask(fb, 0x10f400, mask, data);
if (mask = 0, data = 0, ram->diff.rammap_11_0b_0200) {
data |= cfg->bios.rammap_11_0b_0200 << 9;
mask |= 0x00000200;
}
nv_mask(pfb, 0x10f410, mask, data);
nv_mask(fb, 0x10f410, mask, data);
if (mask = 0, data = 0, ram->diff.rammap_11_0d) {
data |= cfg->bios.rammap_11_0d << 16;
@ -1123,7 +1123,7 @@ gk104_ram_prog_0(struct nvkm_fb *pfb, u32 freq)
data |= cfg->bios.rammap_11_0f << 8;
mask |= 0x0000ff00;
}
nv_mask(pfb, 0x10f440, mask, data);
nv_mask(fb, 0x10f440, mask, data);
if (mask = 0, data = 0, ram->diff.rammap_11_0e) {
data |= cfg->bios.rammap_11_0e << 8;
@ -1137,14 +1137,14 @@ gk104_ram_prog_0(struct nvkm_fb *pfb, u32 freq)
data |= cfg->bios.rammap_11_0b_0400 << 5;
mask |= 0x00000020;
}
nv_mask(pfb, 0x10f444, mask, data);
nv_mask(fb, 0x10f444, mask, data);
}
static int
gk104_ram_prog(struct nvkm_fb *pfb)
gk104_ram_prog(struct nvkm_fb *fb)
{
struct nvkm_device *device = nv_device(pfb);
struct gk104_ram *ram = (void *)pfb->ram;
struct nvkm_device *device = nv_device(fb);
struct gk104_ram *ram = (void *)fb->ram;
struct gk104_ramfuc *fuc = &ram->fuc;
struct nvkm_ram_data *next = ram->base.next;
@ -1153,17 +1153,17 @@ gk104_ram_prog(struct nvkm_fb *pfb)
return (ram->base.next == &ram->base.xition);
}
gk104_ram_prog_0(pfb, 1000);
gk104_ram_prog_0(fb, 1000);
ram_exec(fuc, true);
gk104_ram_prog_0(pfb, next->freq);
gk104_ram_prog_0(fb, next->freq);
return (ram->base.next == &ram->base.xition);
}
static void
gk104_ram_tidy(struct nvkm_fb *pfb)
gk104_ram_tidy(struct nvkm_fb *fb)
{
struct gk104_ram *ram = (void *)pfb->ram;
struct gk104_ram *ram = (void *)fb->ram;
struct gk104_ramfuc *fuc = &ram->fuc;
ram->base.next = NULL;
ram_exec(fuc, false);
@ -1182,10 +1182,10 @@ struct gk104_ram_train {
};
static int
gk104_ram_train_type(struct nvkm_fb *pfb, int i, u8 ramcfg,
gk104_ram_train_type(struct nvkm_fb *fb, int i, u8 ramcfg,
struct gk104_ram_train *train)
{
struct nvkm_bios *bios = nvkm_bios(pfb);
struct nvkm_bios *bios = nvkm_bios(fb);
struct nvbios_M0205E M0205E;
struct nvbios_M0205S M0205S;
struct nvbios_M0209E M0209E;
@ -1243,33 +1243,33 @@ gk104_ram_train_type(struct nvkm_fb *pfb, int i, u8 ramcfg,
}
static int
gk104_ram_train_init_0(struct nvkm_fb *pfb, struct gk104_ram_train *train)
gk104_ram_train_init_0(struct nvkm_fb *fb, struct gk104_ram_train *train)
{
int i, j;
if ((train->mask & 0x03d3) != 0x03d3) {
nv_warn(pfb, "missing link training data\n");
nv_warn(fb, "missing link training data\n");
return -EINVAL;
}
for (i = 0; i < 0x30; i++) {
for (j = 0; j < 8; j += 4) {
nv_wr32(pfb, 0x10f968 + j, 0x00000000 | (i << 8));
nv_wr32(pfb, 0x10f920 + j, 0x00000000 |
nv_wr32(fb, 0x10f968 + j, 0x00000000 | (i << 8));
nv_wr32(fb, 0x10f920 + j, 0x00000000 |
train->type08.data[i] << 4 |
train->type06.data[i]);
nv_wr32(pfb, 0x10f918 + j, train->type00.data[i]);
nv_wr32(pfb, 0x10f920 + j, 0x00000100 |
nv_wr32(fb, 0x10f918 + j, train->type00.data[i]);
nv_wr32(fb, 0x10f920 + j, 0x00000100 |
train->type09.data[i] << 4 |
train->type07.data[i]);
nv_wr32(pfb, 0x10f918 + j, train->type01.data[i]);
nv_wr32(fb, 0x10f918 + j, train->type01.data[i]);
}
}
for (j = 0; j < 8; j += 4) {
for (i = 0; i < 0x100; i++) {
nv_wr32(pfb, 0x10f968 + j, i);
nv_wr32(pfb, 0x10f900 + j, train->type04.data[i]);
nv_wr32(fb, 0x10f968 + j, i);
nv_wr32(fb, 0x10f900 + j, train->type04.data[i]);
}
}
@ -1277,23 +1277,24 @@ gk104_ram_train_init_0(struct nvkm_fb *pfb, struct gk104_ram_train *train)
}
static int
gk104_ram_train_init(struct nvkm_fb *pfb)
gk104_ram_train_init(struct nvkm_fb *fb)
{
u8 ramcfg = nvbios_ramcfg_index(nv_subdev(pfb));
u8 ramcfg = nvbios_ramcfg_index(nv_subdev(fb));
struct gk104_ram_train *train;
int ret = -ENOMEM, i;
int ret, i;
if ((train = kzalloc(sizeof(*train), GFP_KERNEL))) {
for (i = 0; i < 0x100; i++) {
ret = gk104_ram_train_type(pfb, i, ramcfg, train);
if (ret && ret != -ENOENT)
break;
}
if (!(train = kzalloc(sizeof(*train), GFP_KERNEL)))
return -ENOMEM;
for (i = 0; i < 0x100; i++) {
ret = gk104_ram_train_type(fb, i, ramcfg, train);
if (ret && ret != -ENOENT)
break;
}
switch (pfb->ram->type) {
switch (fb->ram->type) {
case NV_MEM_TYPE_GDDR5:
ret = gk104_ram_train_init_0(pfb, train);
ret = gk104_ram_train_init_0(fb, train);
break;
default:
ret = 0;
@ -1307,9 +1308,9 @@ gk104_ram_train_init(struct nvkm_fb *pfb)
int
gk104_ram_init(struct nvkm_object *object)
{
struct nvkm_fb *pfb = (void *)object->parent;
struct nvkm_fb *fb = (void *)object->parent;
struct gk104_ram *ram = (void *)object;
struct nvkm_bios *bios = nvkm_bios(pfb);
struct nvkm_bios *bios = nvkm_bios(fb);
u8 ver, hdr, cnt, len, snr, ssz;
u32 data, save;
int ret, i;
@ -1335,31 +1336,31 @@ gk104_ram_init(struct nvkm_object *object)
cnt = nv_ro08(bios, data + 0x14); /* guess at count */
data = nv_ro32(bios, data + 0x10); /* guess u32... */
save = nv_rd32(pfb, 0x10f65c) & 0x000000f0;
save = nv_rd32(fb, 0x10f65c) & 0x000000f0;
for (i = 0; i < cnt; i++, data += 4) {
if (i != save >> 4) {
nv_mask(pfb, 0x10f65c, 0x000000f0, i << 4);
nv_mask(fb, 0x10f65c, 0x000000f0, i << 4);
nvbios_exec(&(struct nvbios_init) {
.subdev = nv_subdev(pfb),
.subdev = nv_subdev(fb),
.bios = bios,
.offset = nv_ro32(bios, data),
.execute = 1,
});
}
}
nv_mask(pfb, 0x10f65c, 0x000000f0, save);
nv_mask(pfb, 0x10f584, 0x11000000, 0x00000000);
nv_wr32(pfb, 0x10ecc0, 0xffffffff);
nv_mask(pfb, 0x10f160, 0x00000010, 0x00000010);
nv_mask(fb, 0x10f65c, 0x000000f0, save);
nv_mask(fb, 0x10f584, 0x11000000, 0x00000000);
nv_wr32(fb, 0x10ecc0, 0xffffffff);
nv_mask(fb, 0x10f160, 0x00000010, 0x00000010);
return gk104_ram_train_init(pfb);
return gk104_ram_train_init(fb);
}
static int
gk104_ram_ctor_data(struct gk104_ram *ram, u8 ramcfg, int i)
{
struct nvkm_fb *pfb = (void *)nv_object(ram)->parent;
struct nvkm_bios *bios = nvkm_bios(pfb);
struct nvkm_fb *fb = (void *)nv_object(ram)->parent;
struct nvkm_bios *bios = nvkm_bios(fb);
struct nvkm_ram_data *cfg;
struct nvbios_ramcfg *d = &ram->diff;
struct nvbios_ramcfg *p, *n;
@ -1443,13 +1444,13 @@ gk104_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject)
{
struct nvkm_fb *pfb = nvkm_fb(parent);
struct nvkm_bios *bios = nvkm_bios(pfb);
struct nvkm_gpio *gpio = nvkm_gpio(pfb);
struct nvkm_fb *fb = nvkm_fb(parent);
struct nvkm_bios *bios = nvkm_bios(fb);
struct nvkm_gpio *gpio = nvkm_gpio(fb);
struct dcb_gpio_func func;
struct gk104_ram *ram;
int ret, i;
u8 ramcfg = nvbios_ramcfg_index(nv_subdev(pfb));
u8 ramcfg = nvbios_ramcfg_index(nv_subdev(fb));
u32 tmp;
ret = gf100_ram_create(parent, engine, oclass, 0x022554, &ram);
@ -1467,7 +1468,7 @@ gk104_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
ram->base.tidy = gk104_ram_tidy;
break;
default:
nv_warn(pfb, "reclocking of this RAM type is unsupported\n");
nv_warn(fb, "reclocking of this RAM type is unsupported\n");
break;
}
@ -1476,12 +1477,12 @@ gk104_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
* already without having to treat some of them differently to
* the others....
*/
ram->parts = nv_rd32(pfb, 0x022438);
ram->pmask = nv_rd32(pfb, 0x022554);
ram->parts = nv_rd32(fb, 0x022438);
ram->pmask = nv_rd32(fb, 0x022554);
ram->pnuts = 0;
for (i = 0, tmp = 0; i < ram->parts; i++) {
if (!(ram->pmask & (1 << i))) {
u32 cfg1 = nv_rd32(pfb, 0x110204 + (i * 0x1000));
u32 cfg1 = nv_rd32(fb, 0x110204 + (i * 0x1000));
if (tmp && tmp != cfg1) {
ram->pnuts |= (1 << i);
continue;
@ -1504,7 +1505,7 @@ gk104_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
for (i = 0; !ret; i++) {
ret = gk104_ram_ctor_data(ram, ramcfg, i);
if (ret && ret != -ENOENT) {
nv_error(pfb, "failed to parse ramcfg data\n");
nv_error(fb, "failed to parse ramcfg data\n");
return ret;
}
}
@ -1512,13 +1513,13 @@ gk104_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
/* parse bios data for both pll's */
ret = nvbios_pll_parse(bios, 0x0c, &ram->fuc.refpll);
if (ret) {
nv_error(pfb, "mclk refpll data not found\n");
nv_error(fb, "mclk refpll data not found\n");
return ret;
}
ret = nvbios_pll_parse(bios, 0x04, &ram->fuc.mempll);
if (ret) {
nv_error(pfb, "mclk pll data not found\n");
nv_error(fb, "mclk pll data not found\n");
return ret;
}

View File

@ -23,16 +23,12 @@
*/
#include "gf100.h"
struct gm107_ram {
struct nvkm_ram base;
};
static int
gm107_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject)
{
struct gm107_ram *ram;
struct nvkm_ram *ram;
int ret;
ret = gf100_ram_create(parent, engine, oclass, 0x021c14, &ram);

View File

@ -153,13 +153,13 @@ gt215_link_train_calc(u32 *vals, struct gt215_ltrain *train)
* Link training for (at least) DDR3
*/
int
gt215_link_train(struct nvkm_fb *pfb)
gt215_link_train(struct nvkm_fb *fb)
{
struct nvkm_bios *bios = nvkm_bios(pfb);
struct gt215_ram *ram = (void *)pfb->ram;
struct nvkm_clk *clk = nvkm_clk(pfb);
struct nvkm_bios *bios = nvkm_bios(fb);
struct gt215_ram *ram = (void *)fb->ram;
struct nvkm_clk *clk = nvkm_clk(fb);
struct gt215_ltrain *train = &ram->ltrain;
struct nvkm_device *device = nv_device(pfb);
struct nvkm_device *device = nv_device(fb);
struct gt215_ramfuc *fuc = &ram->fuc;
u32 *result, r1700;
int ret, i;
@ -181,8 +181,10 @@ gt215_link_train(struct nvkm_fb *pfb)
/* Clock speeds for training and back */
nvbios_M0205Tp(bios, &ver, &hdr, &cnt, &len, &snr, &ssz, &M0205T);
if (M0205T.freq == 0)
if (M0205T.freq == 0) {
kfree(result);
return -ENOENT;
}
clk_current = clk->read(clk, nv_clk_src_mem);
@ -191,17 +193,17 @@ gt215_link_train(struct nvkm_fb *pfb)
goto out;
/* First: clock up/down */
ret = ram->base.calc(pfb, (u32) M0205T.freq * 1000);
ret = ram->base.calc(fb, (u32) M0205T.freq * 1000);
if (ret)
goto out;
/* Do this *after* calc, eliminates write in script */
nv_wr32(pfb, 0x111400, 0x00000000);
nv_wr32(fb, 0x111400, 0x00000000);
/* XXX: Magic writes that improve train reliability? */
nv_mask(pfb, 0x100674, 0x0000ffff, 0x00000000);
nv_mask(pfb, 0x1005e4, 0x0000ffff, 0x00000000);
nv_mask(pfb, 0x100b0c, 0x000000ff, 0x00000000);
nv_wr32(pfb, 0x100c04, 0x00000400);
nv_mask(fb, 0x100674, 0x0000ffff, 0x00000000);
nv_mask(fb, 0x1005e4, 0x0000ffff, 0x00000000);
nv_mask(fb, 0x100b0c, 0x000000ff, 0x00000000);
nv_wr32(fb, 0x100c04, 0x00000400);
/* Now the training script */
r1700 = ram_rd32(fuc, 0x001700);
@ -234,21 +236,21 @@ gt215_link_train(struct nvkm_fb *pfb)
ram_exec(fuc, true);
ram->base.calc(pfb, clk_current);
ram->base.calc(fb, clk_current);
ram_exec(fuc, true);
/* Post-processing, avoids flicker */
nv_mask(pfb, 0x616308, 0x10, 0x10);
nv_mask(pfb, 0x616b08, 0x10, 0x10);
nv_mask(fb, 0x616308, 0x10, 0x10);
nv_mask(fb, 0x616b08, 0x10, 0x10);
gt215_clk_post(clk, f);
ram_train_result(pfb, result, 64);
ram_train_result(fb, result, 64);
for (i = 0; i < 64; i++)
nv_debug(pfb, "Train: %08x", result[i]);
nv_debug(fb, "Train: %08x", result[i]);
gt215_link_train_calc(result, train);
nv_debug(pfb, "Train: %08x %08x %08x", train->r_100720,
nv_debug(fb, "Train: %08x %08x %08x", train->r_100720,
train->r_1111e0, train->r_111400);
kfree(result);
@ -264,11 +266,12 @@ out:
train->state = NVA3_TRAIN_UNSUPPORTED;
gt215_clk_post(clk, f);
kfree(result);
return ret;
}
int
gt215_link_train_init(struct nvkm_fb *pfb)
gt215_link_train_init(struct nvkm_fb *fb)
{
static const u32 pattern[16] = {
0xaaaaaaaa, 0xcccccccc, 0xdddddddd, 0xeeeeeeee,
@ -276,8 +279,8 @@ gt215_link_train_init(struct nvkm_fb *pfb)
0x33333333, 0x55555555, 0x77777777, 0x66666666,
0x99999999, 0x88888888, 0xeeeeeeee, 0xbbbbbbbb,
};
struct nvkm_bios *bios = nvkm_bios(pfb);
struct gt215_ram *ram = (void *)pfb->ram;
struct nvkm_bios *bios = nvkm_bios(fb);
struct gt215_ram *ram = (void *)fb->ram;
struct gt215_ltrain *train = &ram->ltrain;
struct nvkm_mem *mem;
struct nvbios_M0205E M0205E;
@ -297,48 +300,48 @@ gt215_link_train_init(struct nvkm_fb *pfb)
train->state = NVA3_TRAIN_ONCE;
ret = pfb->ram->get(pfb, 0x8000, 0x10000, 0, 0x800, &ram->ltrain.mem);
ret = fb->ram->get(fb, 0x8000, 0x10000, 0, 0x800, &ram->ltrain.mem);
if (ret)
return ret;
mem = ram->ltrain.mem;
nv_wr32(pfb, 0x100538, 0x10000000 | (mem->offset >> 16));
nv_wr32(pfb, 0x1005a8, 0x0000ffff);
nv_mask(pfb, 0x10f800, 0x00000001, 0x00000001);
nv_wr32(fb, 0x100538, 0x10000000 | (mem->offset >> 16));
nv_wr32(fb, 0x1005a8, 0x0000ffff);
nv_mask(fb, 0x10f800, 0x00000001, 0x00000001);
for (i = 0; i < 0x30; i++) {
nv_wr32(pfb, 0x10f8c0, (i << 8) | i);
nv_wr32(pfb, 0x10f900, pattern[i % 16]);
nv_wr32(fb, 0x10f8c0, (i << 8) | i);
nv_wr32(fb, 0x10f900, pattern[i % 16]);
}
for (i = 0; i < 0x30; i++) {
nv_wr32(pfb, 0x10f8e0, (i << 8) | i);
nv_wr32(pfb, 0x10f920, pattern[i % 16]);
nv_wr32(fb, 0x10f8e0, (i << 8) | i);
nv_wr32(fb, 0x10f920, pattern[i % 16]);
}
/* And upload the pattern */
r001700 = nv_rd32(pfb, 0x1700);
nv_wr32(pfb, 0x1700, mem->offset >> 16);
r001700 = nv_rd32(fb, 0x1700);
nv_wr32(fb, 0x1700, mem->offset >> 16);
for (i = 0; i < 16; i++)
nv_wr32(pfb, 0x700000 + (i << 2), pattern[i]);
nv_wr32(fb, 0x700000 + (i << 2), pattern[i]);
for (i = 0; i < 16; i++)
nv_wr32(pfb, 0x700100 + (i << 2), pattern[i]);
nv_wr32(pfb, 0x1700, r001700);
nv_wr32(fb, 0x700100 + (i << 2), pattern[i]);
nv_wr32(fb, 0x1700, r001700);
train->r_100720 = nv_rd32(pfb, 0x100720);
train->r_1111e0 = nv_rd32(pfb, 0x1111e0);
train->r_111400 = nv_rd32(pfb, 0x111400);
train->r_100720 = nv_rd32(fb, 0x100720);
train->r_1111e0 = nv_rd32(fb, 0x1111e0);
train->r_111400 = nv_rd32(fb, 0x111400);
return 0;
}
void
gt215_link_train_fini(struct nvkm_fb *pfb)
gt215_link_train_fini(struct nvkm_fb *fb)
{
struct gt215_ram *ram = (void *)pfb->ram;
struct gt215_ram *ram = (void *)fb->ram;
if (ram->ltrain.mem)
pfb->ram->put(pfb, &ram->ltrain.mem);
fb->ram->put(fb, &ram->ltrain.mem);
}
/*
@ -346,17 +349,17 @@ gt215_link_train_fini(struct nvkm_fb *pfb)
*/
#define T(t) cfg->timing_10_##t
static int
gt215_ram_timing_calc(struct nvkm_fb *pfb, u32 *timing)
gt215_ram_timing_calc(struct nvkm_fb *fb, u32 *timing)
{
struct gt215_ram *ram = (void *)pfb->ram;
struct gt215_ram *ram = (void *)fb->ram;
struct nvbios_ramcfg *cfg = &ram->base.target.bios;
int tUNK_base, tUNK_40_0, prevCL;
u32 cur2, cur3, cur7, cur8;
cur2 = nv_rd32(pfb, 0x100228);
cur3 = nv_rd32(pfb, 0x10022c);
cur7 = nv_rd32(pfb, 0x10023c);
cur8 = nv_rd32(pfb, 0x100240);
cur2 = nv_rd32(fb, 0x100228);
cur3 = nv_rd32(fb, 0x10022c);
cur7 = nv_rd32(fb, 0x10023c);
cur8 = nv_rd32(fb, 0x100240);
switch ((!T(CWL)) * ram->base.type) {
@ -411,11 +414,11 @@ gt215_ram_timing_calc(struct nvkm_fb *pfb, u32 *timing)
break;
}
nv_debug(pfb, "Entry: 220: %08x %08x %08x %08x\n",
nv_debug(fb, "Entry: 220: %08x %08x %08x %08x\n",
timing[0], timing[1], timing[2], timing[3]);
nv_debug(pfb, " 230: %08x %08x %08x %08x\n",
nv_debug(fb, " 230: %08x %08x %08x %08x\n",
timing[4], timing[5], timing[6], timing[7]);
nv_debug(pfb, " 240: %08x\n", timing[8]);
nv_debug(fb, " 240: %08x\n", timing[8]);
return 0;
}
#undef T
@ -465,7 +468,7 @@ gt215_ram_lock_pll(struct gt215_ramfuc *fuc, struct gt215_clk_info *mclk)
static void
gt215_ram_fbvref(struct gt215_ramfuc *fuc, u32 val)
{
struct nvkm_gpio *gpio = nvkm_gpio(fuc->base.pfb);
struct nvkm_gpio *gpio = nvkm_gpio(fuc->base.fb);
struct dcb_gpio_func func;
u32 reg, sh, gpio_val;
int ret;
@ -486,10 +489,10 @@ gt215_ram_fbvref(struct gt215_ramfuc *fuc, u32 val)
}
static int
gt215_ram_calc(struct nvkm_fb *pfb, u32 freq)
gt215_ram_calc(struct nvkm_fb *fb, u32 freq)
{
struct nvkm_bios *bios = nvkm_bios(pfb);
struct gt215_ram *ram = (void *)pfb->ram;
struct nvkm_bios *bios = nvkm_bios(fb);
struct gt215_ram *ram = (void *)fb->ram;
struct gt215_ramfuc *fuc = &ram->fuc;
struct gt215_ltrain *train = &ram->ltrain;
struct gt215_clk_info mclk;
@ -507,28 +510,27 @@ gt215_ram_calc(struct nvkm_fb *pfb, u32 freq)
ram->base.next = next;
if (ram->ltrain.state == NVA3_TRAIN_ONCE)
gt215_link_train(pfb);
gt215_link_train(fb);
/* lookup memory config data relevant to the target frequency */
i = 0;
data = nvbios_rammapEm(bios, freq / 1000, &ver, &hdr, &cnt, &len,
&next->bios);
if (!data || ver != 0x10 || hdr < 0x05) {
nv_error(pfb, "invalid/missing rammap entry\n");
nv_error(fb, "invalid/missing rammap entry\n");
return -EINVAL;
}
/* locate specific data set for the attached memory */
strap = nvbios_ramcfg_index(nv_subdev(pfb));
strap = nvbios_ramcfg_index(nv_subdev(fb));
if (strap >= cnt) {
nv_error(pfb, "invalid ramcfg strap\n");
nv_error(fb, "invalid ramcfg strap\n");
return -EINVAL;
}
data = nvbios_rammapSp(bios, data, ver, hdr, cnt, len, strap,
&ver, &hdr, &next->bios);
if (!data || ver != 0x10 || hdr < 0x09) {
nv_error(pfb, "invalid/missing ramcfg entry\n");
nv_error(fb, "invalid/missing ramcfg entry\n");
return -EINVAL;
}
@ -538,20 +540,20 @@ gt215_ram_calc(struct nvkm_fb *pfb, u32 freq)
&ver, &hdr, &cnt, &len,
&next->bios);
if (!data || ver != 0x10 || hdr < 0x17) {
nv_error(pfb, "invalid/missing timing entry\n");
nv_error(fb, "invalid/missing timing entry\n");
return -EINVAL;
}
}
ret = gt215_pll_info(nvkm_clk(pfb), 0x12, 0x4000, freq, &mclk);
ret = gt215_pll_info(nvkm_clk(fb), 0x12, 0x4000, freq, &mclk);
if (ret < 0) {
nv_error(pfb, "failed mclk calculation\n");
nv_error(fb, "failed mclk calculation\n");
return ret;
}
gt215_ram_timing_calc(pfb, timing);
gt215_ram_timing_calc(fb, timing);
ret = ram_init(fuc, pfb);
ret = ram_init(fuc, fb);
if (ret)
return ret;
@ -649,7 +651,7 @@ gt215_ram_calc(struct nvkm_fb *pfb, u32 freq)
ram_wr32(fuc, 0x1002dc, 0x00000001);
ram_nsec(fuc, 2000);
if (nv_device(pfb)->chipset == 0xa3 && freq <= 500000)
if (nv_device(fb)->chipset == 0xa3 && freq <= 500000)
ram_mask(fuc, 0x100700, 0x00000006, 0x00000006);
/* Fiddle with clocks */
@ -707,7 +709,7 @@ gt215_ram_calc(struct nvkm_fb *pfb, u32 freq)
ram_mask(fuc, 0x1007e0, 0x22222222, r100760);
}
if (nv_device(pfb)->chipset == 0xa3 && freq > 500000) {
if (nv_device(fb)->chipset == 0xa3 && freq > 500000) {
ram_mask(fuc, 0x100700, 0x00000006, 0x00000000);
}
@ -752,7 +754,7 @@ gt215_ram_calc(struct nvkm_fb *pfb, u32 freq)
if (next->bios.ramcfg_10_02_04) {
switch (ram->base.type) {
case NV_MEM_TYPE_DDR3:
if (nv_device(pfb)->chipset != 0xa8)
if (nv_device(fb)->chipset != 0xa8)
r111100 |= 0x00000004;
/* no break */
case NV_MEM_TYPE_DDR2:
@ -768,7 +770,7 @@ gt215_ram_calc(struct nvkm_fb *pfb, u32 freq)
unk714 |= 0x00000010;
break;
case NV_MEM_TYPE_DDR3:
if (nv_device(pfb)->chipset == 0xa8) {
if (nv_device(fb)->chipset == 0xa8) {
r111100 |= 0x08000000;
} else {
r111100 &= ~0x00000004;
@ -854,24 +856,24 @@ gt215_ram_calc(struct nvkm_fb *pfb, u32 freq)
}
static int
gt215_ram_prog(struct nvkm_fb *pfb)
gt215_ram_prog(struct nvkm_fb *fb)
{
struct nvkm_device *device = nv_device(pfb);
struct gt215_ram *ram = (void *)pfb->ram;
struct nvkm_device *device = nv_device(fb);
struct gt215_ram *ram = (void *)fb->ram;
struct gt215_ramfuc *fuc = &ram->fuc;
bool exec = nvkm_boolopt(device->cfgopt, "NvMemExec", true);
if (exec) {
nv_mask(pfb, 0x001534, 0x2, 0x2);
nv_mask(fb, 0x001534, 0x2, 0x2);
ram_exec(fuc, true);
/* Post-processing, avoids flicker */
nv_mask(pfb, 0x002504, 0x1, 0x0);
nv_mask(pfb, 0x001534, 0x2, 0x0);
nv_mask(fb, 0x002504, 0x1, 0x0);
nv_mask(fb, 0x001534, 0x2, 0x0);
nv_mask(pfb, 0x616308, 0x10, 0x10);
nv_mask(pfb, 0x616b08, 0x10, 0x10);
nv_mask(fb, 0x616308, 0x10, 0x10);
nv_mask(fb, 0x616b08, 0x10, 0x10);
} else {
ram_exec(fuc, false);
}
@ -879,9 +881,9 @@ gt215_ram_prog(struct nvkm_fb *pfb)
}
static void
gt215_ram_tidy(struct nvkm_fb *pfb)
gt215_ram_tidy(struct nvkm_fb *fb)
{
struct gt215_ram *ram = (void *)pfb->ram;
struct gt215_ram *ram = (void *)fb->ram;
struct gt215_ramfuc *fuc = &ram->fuc;
ram_exec(fuc, false);
}
@ -889,7 +891,7 @@ gt215_ram_tidy(struct nvkm_fb *pfb)
static int
gt215_ram_init(struct nvkm_object *object)
{
struct nvkm_fb *pfb = (void *)object->parent;
struct nvkm_fb *fb = (void *)object->parent;
struct gt215_ram *ram = (void *)object;
int ret;
@ -897,17 +899,17 @@ gt215_ram_init(struct nvkm_object *object)
if (ret)
return ret;
gt215_link_train_init(pfb);
gt215_link_train_init(fb);
return 0;
}
static int
gt215_ram_fini(struct nvkm_object *object, bool suspend)
{
struct nvkm_fb *pfb = (void *)object->parent;
struct nvkm_fb *fb = (void *)object->parent;
if (!suspend)
gt215_link_train_fini(pfb);
gt215_link_train_fini(fb);
return 0;
}
@ -917,8 +919,8 @@ gt215_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 datasize,
struct nvkm_object **pobject)
{
struct nvkm_fb *pfb = nvkm_fb(parent);
struct nvkm_gpio *gpio = nvkm_gpio(pfb);
struct nvkm_fb *fb = nvkm_fb(parent);
struct nvkm_gpio *gpio = nvkm_gpio(fb);
struct dcb_gpio_func func;
struct gt215_ram *ram;
int ret, i;

View File

@ -23,7 +23,7 @@
*/
#include "nv50.h"
struct mcp77_ram_priv {
struct mcp77_ram {
struct nvkm_ram base;
u64 poller_base;
};
@ -35,58 +35,58 @@ mcp77_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
{
u32 rsvd_head = ( 256 * 1024); /* vga memory */
u32 rsvd_tail = (1024 * 1024); /* vbios etc */
struct nvkm_fb *pfb = nvkm_fb(parent);
struct mcp77_ram_priv *priv;
struct nvkm_fb *fb = nvkm_fb(parent);
struct mcp77_ram *ram;
int ret;
ret = nvkm_ram_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
ret = nvkm_ram_create(parent, engine, oclass, &ram);
*pobject = nv_object(fb);
if (ret)
return ret;
priv->base.type = NV_MEM_TYPE_STOLEN;
priv->base.stolen = (u64)nv_rd32(pfb, 0x100e10) << 12;
priv->base.size = (u64)nv_rd32(pfb, 0x100e14) << 12;
ram->base.type = NV_MEM_TYPE_STOLEN;
ram->base.stolen = (u64)nv_rd32(fb, 0x100e10) << 12;
ram->base.size = (u64)nv_rd32(fb, 0x100e14) << 12;
rsvd_tail += 0x1000;
priv->poller_base = priv->base.size - rsvd_tail;
ram->poller_base = ram->base.size - rsvd_tail;
ret = nvkm_mm_init(&pfb->vram, rsvd_head >> 12,
(priv->base.size - (rsvd_head + rsvd_tail)) >> 12,
ret = nvkm_mm_init(&fb->vram, rsvd_head >> 12,
(ram->base.size - (rsvd_head + rsvd_tail)) >> 12,
1);
if (ret)
return ret;
priv->base.get = nv50_ram_get;
priv->base.put = nv50_ram_put;
ram->base.get = nv50_ram_get;
ram->base.put = nv50_ram_put;
return 0;
}
static int
mcp77_ram_init(struct nvkm_object *object)
{
struct nvkm_fb *pfb = nvkm_fb(object);
struct mcp77_ram_priv *priv = (void *)object;
struct nvkm_fb *fb = nvkm_fb(object);
struct mcp77_ram *ram = (void *)object;
int ret;
u64 dniso, hostnb, flush;
ret = nvkm_ram_init(&priv->base);
ret = nvkm_ram_init(&ram->base);
if (ret)
return ret;
dniso = ((priv->base.size - (priv->poller_base + 0x00)) >> 5) - 1;
hostnb = ((priv->base.size - (priv->poller_base + 0x20)) >> 5) - 1;
flush = ((priv->base.size - (priv->poller_base + 0x40)) >> 5) - 1;
dniso = ((ram->base.size - (ram->poller_base + 0x00)) >> 5) - 1;
hostnb = ((ram->base.size - (ram->poller_base + 0x20)) >> 5) - 1;
flush = ((ram->base.size - (ram->poller_base + 0x40)) >> 5) - 1;
/* Enable NISO poller for various clients and set their associated
* read address, only for MCP77/78 and MCP79/7A. (fd#25701)
*/
nv_wr32(pfb, 0x100c18, dniso);
nv_mask(pfb, 0x100c14, 0x00000000, 0x00000001);
nv_wr32(pfb, 0x100c1c, hostnb);
nv_mask(pfb, 0x100c14, 0x00000000, 0x00000002);
nv_wr32(pfb, 0x100c24, flush);
nv_mask(pfb, 0x100c14, 0x00000000, 0x00010000);
nv_wr32(fb, 0x100c18, dniso);
nv_mask(fb, 0x100c14, 0x00000000, 0x00000001);
nv_wr32(fb, 0x100c1c, hostnb);
nv_mask(fb, 0x100c14, 0x00000000, 0x00000002);
nv_wr32(fb, 0x100c24, flush);
nv_mask(fb, 0x100c14, 0x00000000, 0x00010000);
return 0;
}

View File

@ -29,9 +29,9 @@ nv04_ram_create(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject)
{
struct nvkm_fb *pfb = nvkm_fb(parent);
struct nvkm_fb *fb = nvkm_fb(parent);
struct nvkm_ram *ram;
u32 boot0 = nv_rd32(pfb, NV04_PFB_BOOT_0);
u32 boot0 = nv_rd32(fb, NV04_PFB_BOOT_0);
int ret;
ret = nvkm_ram_create(parent, engine, oclass, &ram);

View File

@ -28,9 +28,9 @@ nv10_ram_create(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject)
{
struct nvkm_fb *pfb = nvkm_fb(parent);
struct nvkm_fb *fb = nvkm_fb(parent);
struct nvkm_ram *ram;
u32 cfg0 = nv_rd32(pfb, 0x100200);
u32 cfg0 = nv_rd32(fb, 0x100200);
int ret;
ret = nvkm_ram_create(parent, engine, oclass, &ram);
@ -43,7 +43,7 @@ nv10_ram_create(struct nvkm_object *parent, struct nvkm_object *engine,
else
ram->type = NV_MEM_TYPE_SDRAM;
ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000;
ram->size = nv_rd32(fb, 0x10020c) & 0xff000000;
return 0;
}

View File

@ -28,7 +28,7 @@ nv1a_ram_create(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject)
{
struct nvkm_fb *pfb = nvkm_fb(parent);
struct nvkm_fb *fb = nvkm_fb(parent);
struct nvkm_ram *ram;
struct pci_dev *bridge;
u32 mem, mib;
@ -36,7 +36,7 @@ nv1a_ram_create(struct nvkm_object *parent, struct nvkm_object *engine,
bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 1));
if (!bridge) {
nv_fatal(pfb, "no bridge device\n");
nv_fatal(fb, "no bridge device\n");
return -ENODEV;
}
@ -45,7 +45,7 @@ nv1a_ram_create(struct nvkm_object *parent, struct nvkm_object *engine,
if (ret)
return ret;
if (nv_device(pfb)->chipset == 0x1a) {
if (nv_device(fb)->chipset == 0x1a) {
pci_read_config_dword(bridge, 0x7c, &mem);
mib = ((mem >> 6) & 31) + 1;
} else {

View File

@ -28,9 +28,9 @@ nv20_ram_create(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject)
{
struct nvkm_fb *pfb = nvkm_fb(parent);
struct nvkm_fb *fb = nvkm_fb(parent);
struct nvkm_ram *ram;
u32 pbus1218 = nv_rd32(pfb, 0x001218);
u32 pbus1218 = nv_rd32(fb, 0x001218);
int ret;
ret = nvkm_ram_create(parent, engine, oclass, &ram);
@ -44,9 +44,9 @@ nv20_ram_create(struct nvkm_object *parent, struct nvkm_object *engine,
case 0x00000200: ram->type = NV_MEM_TYPE_GDDR3; break;
case 0x00000300: ram->type = NV_MEM_TYPE_GDDR2; break;
}
ram->size = (nv_rd32(pfb, 0x10020c) & 0xff000000);
ram->parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
ram->tags = nv_rd32(pfb, 0x100320);
ram->size = (nv_rd32(fb, 0x10020c) & 0xff000000);
ram->parts = (nv_rd32(fb, 0x100200) & 0x00000003) + 1;
ram->tags = nv_rd32(fb, 0x100320);
return 0;
}

View File

@ -31,21 +31,21 @@
#include <subdev/timer.h>
int
nv40_ram_calc(struct nvkm_fb *pfb, u32 freq)
nv40_ram_calc(struct nvkm_fb *fb, u32 freq)
{
struct nvkm_bios *bios = nvkm_bios(pfb);
struct nv40_ram *ram = (void *)pfb->ram;
struct nvkm_bios *bios = nvkm_bios(fb);
struct nv40_ram *ram = (void *)fb->ram;
struct nvbios_pll pll;
int N1, M1, N2, M2;
int log2P, ret;
ret = nvbios_pll_parse(bios, 0x04, &pll);
if (ret) {
nv_error(pfb, "mclk pll data not found\n");
nv_error(fb, "mclk pll data not found\n");
return ret;
}
ret = nv04_pll_calc(nv_subdev(pfb), &pll, freq,
ret = nv04_pll_calc(nv_subdev(fb), &pll, freq,
&N1, &M1, &N2, &M2, &log2P);
if (ret < 0)
return ret;
@ -64,10 +64,10 @@ nv40_ram_calc(struct nvkm_fb *pfb, u32 freq)
}
int
nv40_ram_prog(struct nvkm_fb *pfb)
nv40_ram_prog(struct nvkm_fb *fb)
{
struct nvkm_bios *bios = nvkm_bios(pfb);
struct nv40_ram *ram = (void *)pfb->ram;
struct nvkm_bios *bios = nvkm_bios(fb);
struct nv40_ram *ram = (void *)fb->ram;
struct bit_entry M;
u32 crtc_mask = 0;
u8 sr1[2];
@ -75,12 +75,12 @@ nv40_ram_prog(struct nvkm_fb *pfb)
/* determine which CRTCs are active, fetch VGA_SR1 for each */
for (i = 0; i < 2; i++) {
u32 vbl = nv_rd32(pfb, 0x600808 + (i * 0x2000));
u32 vbl = nv_rd32(fb, 0x600808 + (i * 0x2000));
u32 cnt = 0;
do {
if (vbl != nv_rd32(pfb, 0x600808 + (i * 0x2000))) {
nv_wr08(pfb, 0x0c03c4 + (i * 0x2000), 0x01);
sr1[i] = nv_rd08(pfb, 0x0c03c5 + (i * 0x2000));
if (vbl != nv_rd32(fb, 0x600808 + (i * 0x2000))) {
nv_wr08(fb, 0x0c03c4 + (i * 0x2000), 0x01);
sr1[i] = nv_rd08(fb, 0x0c03c5 + (i * 0x2000));
if (!(sr1[i] & 0x20))
crtc_mask |= (1 << i);
break;
@ -93,53 +93,53 @@ nv40_ram_prog(struct nvkm_fb *pfb)
for (i = 0; i < 2; i++) {
if (!(crtc_mask & (1 << i)))
continue;
nv_wait(pfb, 0x600808 + (i * 0x2000), 0x00010000, 0x00000000);
nv_wait(pfb, 0x600808 + (i * 0x2000), 0x00010000, 0x00010000);
nv_wr08(pfb, 0x0c03c4 + (i * 0x2000), 0x01);
nv_wr08(pfb, 0x0c03c5 + (i * 0x2000), sr1[i] | 0x20);
nv_wait(fb, 0x600808 + (i * 0x2000), 0x00010000, 0x00000000);
nv_wait(fb, 0x600808 + (i * 0x2000), 0x00010000, 0x00010000);
nv_wr08(fb, 0x0c03c4 + (i * 0x2000), 0x01);
nv_wr08(fb, 0x0c03c5 + (i * 0x2000), sr1[i] | 0x20);
}
/* prepare ram for reclocking */
nv_wr32(pfb, 0x1002d4, 0x00000001); /* precharge */
nv_wr32(pfb, 0x1002d0, 0x00000001); /* refresh */
nv_wr32(pfb, 0x1002d0, 0x00000001); /* refresh */
nv_mask(pfb, 0x100210, 0x80000000, 0x00000000); /* no auto refresh */
nv_wr32(pfb, 0x1002dc, 0x00000001); /* enable self-refresh */
nv_wr32(fb, 0x1002d4, 0x00000001); /* precharge */
nv_wr32(fb, 0x1002d0, 0x00000001); /* refresh */
nv_wr32(fb, 0x1002d0, 0x00000001); /* refresh */
nv_mask(fb, 0x100210, 0x80000000, 0x00000000); /* no auto refresh */
nv_wr32(fb, 0x1002dc, 0x00000001); /* enable self-refresh */
/* change the PLL of each memory partition */
nv_mask(pfb, 0x00c040, 0x0000c000, 0x00000000);
switch (nv_device(pfb)->chipset) {
nv_mask(fb, 0x00c040, 0x0000c000, 0x00000000);
switch (nv_device(fb)->chipset) {
case 0x40:
case 0x45:
case 0x41:
case 0x42:
case 0x47:
nv_mask(pfb, 0x004044, 0xc0771100, ram->ctrl);
nv_mask(pfb, 0x00402c, 0xc0771100, ram->ctrl);
nv_wr32(pfb, 0x004048, ram->coef);
nv_wr32(pfb, 0x004030, ram->coef);
nv_mask(fb, 0x004044, 0xc0771100, ram->ctrl);
nv_mask(fb, 0x00402c, 0xc0771100, ram->ctrl);
nv_wr32(fb, 0x004048, ram->coef);
nv_wr32(fb, 0x004030, ram->coef);
case 0x43:
case 0x49:
case 0x4b:
nv_mask(pfb, 0x004038, 0xc0771100, ram->ctrl);
nv_wr32(pfb, 0x00403c, ram->coef);
nv_mask(fb, 0x004038, 0xc0771100, ram->ctrl);
nv_wr32(fb, 0x00403c, ram->coef);
default:
nv_mask(pfb, 0x004020, 0xc0771100, ram->ctrl);
nv_wr32(pfb, 0x004024, ram->coef);
nv_mask(fb, 0x004020, 0xc0771100, ram->ctrl);
nv_wr32(fb, 0x004024, ram->coef);
break;
}
udelay(100);
nv_mask(pfb, 0x00c040, 0x0000c000, 0x0000c000);
nv_mask(fb, 0x00c040, 0x0000c000, 0x0000c000);
/* re-enable normal operation of memory controller */
nv_wr32(pfb, 0x1002dc, 0x00000000);
nv_mask(pfb, 0x100210, 0x80000000, 0x80000000);
nv_wr32(fb, 0x1002dc, 0x00000000);
nv_mask(fb, 0x100210, 0x80000000, 0x80000000);
udelay(100);
/* execute memory reset script from vbios */
if (!bit_entry(bios, 'M', &M)) {
struct nvbios_init init = {
.subdev = nv_subdev(pfb),
.subdev = nv_subdev(fb),
.bios = bios,
.offset = nv_ro16(bios, M.offset + 0x00),
.execute = 1,
@ -154,16 +154,16 @@ nv40_ram_prog(struct nvkm_fb *pfb)
for (i = 0; i < 2; i++) {
if (!(crtc_mask & (1 << i)))
continue;
nv_wait(pfb, 0x600808 + (i * 0x2000), 0x00010000, 0x00010000);
nv_wr08(pfb, 0x0c03c4 + (i * 0x2000), 0x01);
nv_wr08(pfb, 0x0c03c5 + (i * 0x2000), sr1[i]);
nv_wait(fb, 0x600808 + (i * 0x2000), 0x00010000, 0x00010000);
nv_wr08(fb, 0x0c03c4 + (i * 0x2000), 0x01);
nv_wr08(fb, 0x0c03c5 + (i * 0x2000), sr1[i]);
}
return 0;
}
void
nv40_ram_tidy(struct nvkm_fb *pfb)
nv40_ram_tidy(struct nvkm_fb *fb)
{
}
@ -172,9 +172,9 @@ nv40_ram_create(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject)
{
struct nvkm_fb *pfb = nvkm_fb(parent);
struct nvkm_fb *fb = nvkm_fb(parent);
struct nv40_ram *ram;
u32 pbus1218 = nv_rd32(pfb, 0x001218);
u32 pbus1218 = nv_rd32(fb, 0x001218);
int ret;
ret = nvkm_ram_create(parent, engine, oclass, &ram);
@ -189,9 +189,9 @@ nv40_ram_create(struct nvkm_object *parent, struct nvkm_object *engine,
case 0x00000300: ram->base.type = NV_MEM_TYPE_DDR2; break;
}
ram->base.size = nv_rd32(pfb, 0x10020c) & 0xff000000;
ram->base.parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
ram->base.tags = nv_rd32(pfb, 0x100320);
ram->base.size = nv_rd32(fb, 0x10020c) & 0xff000000;
ram->base.parts = (nv_rd32(fb, 0x100200) & 0x00000003) + 1;
ram->base.tags = nv_rd32(fb, 0x100320);
ram->base.calc = nv40_ram_calc;
ram->base.prog = nv40_ram_prog;
ram->base.tidy = nv40_ram_tidy;

View File

@ -28,9 +28,9 @@ nv41_ram_create(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject)
{
struct nvkm_fb *pfb = nvkm_fb(parent);
struct nvkm_fb *fb = nvkm_fb(parent);
struct nv40_ram *ram;
u32 pfb474 = nv_rd32(pfb, 0x100474);
u32 fb474 = nv_rd32(fb, 0x100474);
int ret;
ret = nvkm_ram_create(parent, engine, oclass, &ram);
@ -38,16 +38,16 @@ nv41_ram_create(struct nvkm_object *parent, struct nvkm_object *engine,
if (ret)
return ret;
if (pfb474 & 0x00000004)
if (fb474 & 0x00000004)
ram->base.type = NV_MEM_TYPE_GDDR3;
if (pfb474 & 0x00000002)
if (fb474 & 0x00000002)
ram->base.type = NV_MEM_TYPE_DDR2;
if (pfb474 & 0x00000001)
if (fb474 & 0x00000001)
ram->base.type = NV_MEM_TYPE_DDR1;
ram->base.size = nv_rd32(pfb, 0x10020c) & 0xff000000;
ram->base.parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
ram->base.tags = nv_rd32(pfb, 0x100320);
ram->base.size = nv_rd32(fb, 0x10020c) & 0xff000000;
ram->base.parts = (nv_rd32(fb, 0x100200) & 0x00000003) + 1;
ram->base.tags = nv_rd32(fb, 0x100320);
ram->base.calc = nv40_ram_calc;
ram->base.prog = nv40_ram_prog;
ram->base.tidy = nv40_ram_tidy;

View File

@ -28,9 +28,9 @@ nv44_ram_create(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject)
{
struct nvkm_fb *pfb = nvkm_fb(parent);
struct nvkm_fb *fb = nvkm_fb(parent);
struct nv40_ram *ram;
u32 pfb474 = nv_rd32(pfb, 0x100474);
u32 fb474 = nv_rd32(fb, 0x100474);
int ret;
ret = nvkm_ram_create(parent, engine, oclass, &ram);
@ -38,14 +38,14 @@ nv44_ram_create(struct nvkm_object *parent, struct nvkm_object *engine,
if (ret)
return ret;
if (pfb474 & 0x00000004)
if (fb474 & 0x00000004)
ram->base.type = NV_MEM_TYPE_GDDR3;
if (pfb474 & 0x00000002)
if (fb474 & 0x00000002)
ram->base.type = NV_MEM_TYPE_DDR2;
if (pfb474 & 0x00000001)
if (fb474 & 0x00000001)
ram->base.type = NV_MEM_TYPE_DDR1;
ram->base.size = nv_rd32(pfb, 0x10020c) & 0xff000000;
ram->base.size = nv_rd32(fb, 0x10020c) & 0xff000000;
ram->base.calc = nv40_ram_calc;
ram->base.prog = nv40_ram_prog;
ram->base.tidy = nv40_ram_tidy;

View File

@ -28,9 +28,9 @@ nv49_ram_create(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject)
{
struct nvkm_fb *pfb = nvkm_fb(parent);
struct nvkm_fb *fb = nvkm_fb(parent);
struct nv40_ram *ram;
u32 pfb914 = nv_rd32(pfb, 0x100914);
u32 fb914 = nv_rd32(fb, 0x100914);
int ret;
ret = nvkm_ram_create(parent, engine, oclass, &ram);
@ -38,16 +38,16 @@ nv49_ram_create(struct nvkm_object *parent, struct nvkm_object *engine,
if (ret)
return ret;
switch (pfb914 & 0x00000003) {
switch (fb914 & 0x00000003) {
case 0x00000000: ram->base.type = NV_MEM_TYPE_DDR1; break;
case 0x00000001: ram->base.type = NV_MEM_TYPE_DDR2; break;
case 0x00000002: ram->base.type = NV_MEM_TYPE_GDDR3; break;
case 0x00000003: break;
}
ram->base.size = nv_rd32(pfb, 0x10020c) & 0xff000000;
ram->base.parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
ram->base.tags = nv_rd32(pfb, 0x100320);
ram->base.size = nv_rd32(fb, 0x10020c) & 0xff000000;
ram->base.parts = (nv_rd32(fb, 0x100200) & 0x00000003) + 1;
ram->base.tags = nv_rd32(fb, 0x100320);
ram->base.calc = nv40_ram_calc;
ram->base.prog = nv40_ram_prog;
ram->base.tidy = nv40_ram_tidy;

View File

@ -28,7 +28,7 @@ nv4e_ram_create(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject)
{
struct nvkm_fb *pfb = nvkm_fb(parent);
struct nvkm_fb *fb = nvkm_fb(parent);
struct nvkm_ram *ram;
int ret;
@ -37,7 +37,7 @@ nv4e_ram_create(struct nvkm_object *parent, struct nvkm_object *engine,
if (ret)
return ret;
ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000;
ram->size = nv_rd32(fb, 0x10020c) & 0xff000000;
ram->type = NV_MEM_TYPE_STOLEN;
return 0;
}

View File

@ -66,18 +66,17 @@ struct nv50_ram {
#define T(t) cfg->timing_10_##t
static int
nv50_ram_timing_calc(struct nvkm_fb *pfb, u32 *timing)
nv50_ram_timing_calc(struct nvkm_fb *fb, u32 *timing)
{
struct nv50_ram *ram = (void *)pfb->ram;
struct nv50_ram *ram = (void *)fb->ram;
struct nvbios_ramcfg *cfg = &ram->base.target.bios;
u32 cur2, cur3, cur4, cur7, cur8;
u32 cur2, cur4, cur7, cur8;
u8 unkt3b;
cur2 = nv_rd32(pfb, 0x100228);
cur3 = nv_rd32(pfb, 0x10022c);
cur4 = nv_rd32(pfb, 0x100230);
cur7 = nv_rd32(pfb, 0x10023c);
cur8 = nv_rd32(pfb, 0x100240);
cur2 = nv_rd32(fb, 0x100228);
cur4 = nv_rd32(fb, 0x100230);
cur7 = nv_rd32(fb, 0x10023c);
cur8 = nv_rd32(fb, 0x100240);
switch ((!T(CWL)) * ram->base.type) {
case NV_MEM_TYPE_DDR2:
@ -89,7 +88,7 @@ nv50_ram_timing_calc(struct nvkm_fb *pfb, u32 *timing)
}
/* XXX: N=1 is not proper statistics */
if (nv_device(pfb)->chipset == 0xa0) {
if (nv_device(fb)->chipset == 0xa0) {
unkt3b = 0x19 + ram->base.next->bios.rammap_00_16_40;
timing[6] = (0x2d + T(CL) - T(CWL) +
ram->base.next->bios.rammap_00_16_40) << 16 |
@ -126,19 +125,19 @@ nv50_ram_timing_calc(struct nvkm_fb *pfb, u32 *timing)
timing[8] = (cur8 & 0xffffff00);
/* XXX: P.version == 1 only has DDR2 and GDDR3? */
if (pfb->ram->type == NV_MEM_TYPE_DDR2) {
if (fb->ram->type == NV_MEM_TYPE_DDR2) {
timing[5] |= (T(CL) + 3) << 8;
timing[8] |= (T(CL) - 4);
} else if (pfb->ram->type == NV_MEM_TYPE_GDDR3) {
} else if (fb->ram->type == NV_MEM_TYPE_GDDR3) {
timing[5] |= (T(CL) + 2) << 8;
timing[8] |= (T(CL) - 2);
}
nv_debug(pfb, " 220: %08x %08x %08x %08x\n",
nv_debug(fb, " 220: %08x %08x %08x %08x\n",
timing[0], timing[1], timing[2], timing[3]);
nv_debug(pfb, " 230: %08x %08x %08x %08x\n",
nv_debug(fb, " 230: %08x %08x %08x %08x\n",
timing[4], timing[5], timing[6], timing[7]);
nv_debug(pfb, " 240: %08x\n", timing[8]);
nv_debug(fb, " 240: %08x\n", timing[8]);
return 0;
}
#undef T
@ -152,10 +151,10 @@ nvkm_sddr2_dll_reset(struct nv50_ramseq *hwsq)
}
static int
nv50_ram_calc(struct nvkm_fb *pfb, u32 freq)
nv50_ram_calc(struct nvkm_fb *fb, u32 freq)
{
struct nvkm_bios *bios = nvkm_bios(pfb);
struct nv50_ram *ram = (void *)pfb->ram;
struct nvkm_bios *bios = nvkm_bios(fb);
struct nv50_ram *ram = (void *)fb->ram;
struct nv50_ramseq *hwsq = &ram->hwsq;
struct nvbios_perfE perfE;
struct nvbios_pll mpll;
@ -178,7 +177,7 @@ nv50_ram_calc(struct nvkm_fb *pfb, u32 freq)
&size, &perfE);
if (!data || (ver < 0x25 || ver >= 0x40) ||
(size < 2)) {
nv_error(pfb, "invalid/missing perftab entry\n");
nv_error(fb, "invalid/missing perftab entry\n");
return -EINVAL;
}
} while (perfE.memory < freq);
@ -186,16 +185,16 @@ nv50_ram_calc(struct nvkm_fb *pfb, u32 freq)
nvbios_rammapEp_from_perf(bios, data, hdr, &next->bios);
/* locate specific data set for the attached memory */
strap = nvbios_ramcfg_index(nv_subdev(pfb));
strap = nvbios_ramcfg_index(nv_subdev(fb));
if (strap >= cnt) {
nv_error(pfb, "invalid ramcfg strap\n");
nv_error(fb, "invalid ramcfg strap\n");
return -EINVAL;
}
data = nvbios_rammapSp_from_perf(bios, data + hdr, size, strap,
&next->bios);
if (!data) {
nv_error(pfb, "invalid/missing rammap entry ");
nv_error(fb, "invalid/missing rammap entry ");
return -EINVAL;
}
@ -204,16 +203,16 @@ nv50_ram_calc(struct nvkm_fb *pfb, u32 freq)
data = nvbios_timingEp(bios, next->bios.ramcfg_timing,
&ver, &hdr, &cnt, &len, &next->bios);
if (!data || ver != 0x10 || hdr < 0x12) {
nv_error(pfb, "invalid/missing timing entry "
nv_error(fb, "invalid/missing timing entry "
"%02x %04x %02x %02x\n",
strap, data, ver, hdr);
return -EINVAL;
}
}
nv50_ram_timing_calc(pfb, timing);
nv50_ram_timing_calc(fb, timing);
ret = ram_init(hwsq, nv_subdev(pfb));
ret = ram_init(hwsq, nv_subdev(fb));
if (ret)
return ret;
@ -254,10 +253,10 @@ nv50_ram_calc(struct nvkm_fb *pfb, u32 freq)
ret = nvbios_pll_parse(bios, 0x004008, &mpll);
mpll.vco2.max_freq = 0;
if (ret == 0) {
ret = nv04_pll_calc(nv_subdev(pfb), &mpll, freq,
if (ret >= 0) {
ret = nv04_pll_calc(nv_subdev(fb), &mpll, freq,
&N1, &M1, &N2, &M2, &P);
if (ret == 0)
if (ret <= 0)
ret = -EINVAL;
}
@ -282,7 +281,7 @@ nv50_ram_calc(struct nvkm_fb *pfb, u32 freq)
next->bios.rammap_00_16_40 << 14);
ram_mask(hwsq, 0x00400c, 0x0000ffff, (N1 << 8) | M1);
ram_mask(hwsq, 0x004008, 0x91ff0000, r004008);
if (nv_device(pfb)->chipset >= 0x96)
if (nv_device(fb)->chipset >= 0x96)
ram_wr32(hwsq, 0x100da0, r100da0);
ram_nsec(hwsq, 64000); /*XXX*/
ram_nsec(hwsq, 32000); /*XXX*/
@ -380,10 +379,10 @@ nv50_ram_calc(struct nvkm_fb *pfb, u32 freq)
}
static int
nv50_ram_prog(struct nvkm_fb *pfb)
nv50_ram_prog(struct nvkm_fb *fb)
{
struct nvkm_device *device = nv_device(pfb);
struct nv50_ram *ram = (void *)pfb->ram;
struct nvkm_device *device = nv_device(fb);
struct nv50_ram *ram = (void *)fb->ram;
struct nv50_ramseq *hwsq = &ram->hwsq;
ram_exec(hwsq, nvkm_boolopt(device->cfgopt, "NvMemExec", true));
@ -391,15 +390,15 @@ nv50_ram_prog(struct nvkm_fb *pfb)
}
static void
nv50_ram_tidy(struct nvkm_fb *pfb)
nv50_ram_tidy(struct nvkm_fb *fb)
{
struct nv50_ram *ram = (void *)pfb->ram;
struct nv50_ram *ram = (void *)fb->ram;
struct nv50_ramseq *hwsq = &ram->hwsq;
ram_exec(hwsq, false);
}
void
__nv50_ram_put(struct nvkm_fb *pfb, struct nvkm_mem *mem)
__nv50_ram_put(struct nvkm_fb *fb, struct nvkm_mem *mem)
{
struct nvkm_mm_node *this;
@ -407,14 +406,14 @@ __nv50_ram_put(struct nvkm_fb *pfb, struct nvkm_mem *mem)
this = list_first_entry(&mem->regions, typeof(*this), rl_entry);
list_del(&this->rl_entry);
nvkm_mm_free(&pfb->vram, &this);
nvkm_mm_free(&fb->vram, &this);
}
nvkm_mm_free(&pfb->tags, &mem->tag);
nvkm_mm_free(&fb->tags, &mem->tag);
}
void
nv50_ram_put(struct nvkm_fb *pfb, struct nvkm_mem **pmem)
nv50_ram_put(struct nvkm_fb *fb, struct nvkm_mem **pmem)
{
struct nvkm_mem *mem = *pmem;
@ -422,19 +421,19 @@ nv50_ram_put(struct nvkm_fb *pfb, struct nvkm_mem **pmem)
if (unlikely(mem == NULL))
return;
mutex_lock(&pfb->base.mutex);
__nv50_ram_put(pfb, mem);
mutex_unlock(&pfb->base.mutex);
mutex_lock(&fb->subdev.mutex);
__nv50_ram_put(fb, mem);
mutex_unlock(&fb->subdev.mutex);
kfree(mem);
}
int
nv50_ram_get(struct nvkm_fb *pfb, u64 size, u32 align, u32 ncmin,
nv50_ram_get(struct nvkm_fb *fb, u64 size, u32 align, u32 ncmin,
u32 memtype, struct nvkm_mem **pmem)
{
struct nvkm_mm *heap = &pfb->vram;
struct nvkm_mm *tags = &pfb->tags;
struct nvkm_mm *heap = &fb->vram;
struct nvkm_mm *tags = &fb->tags;
struct nvkm_mm_node *r;
struct nvkm_mem *mem;
int comp = (memtype & 0x300) >> 8;
@ -450,7 +449,7 @@ nv50_ram_get(struct nvkm_fb *pfb, u64 size, u32 align, u32 ncmin,
if (!mem)
return -ENOMEM;
mutex_lock(&pfb->base.mutex);
mutex_lock(&fb->subdev.mutex);
if (comp) {
if (align == 16) {
int n = (max >> 4) * comp;
@ -475,15 +474,15 @@ nv50_ram_get(struct nvkm_fb *pfb, u64 size, u32 align, u32 ncmin,
else
ret = nvkm_mm_head(heap, 0, type, max, min, align, &r);
if (ret) {
mutex_unlock(&pfb->base.mutex);
pfb->ram->put(pfb, &mem);
mutex_unlock(&fb->subdev.mutex);
fb->ram->put(fb, &mem);
return ret;
}
list_add_tail(&r->rl_entry, &mem->regions);
max -= r->length;
} while (max);
mutex_unlock(&pfb->base.mutex);
mutex_unlock(&fb->subdev.mutex);
r = list_first_entry(&mem->regions, struct nvkm_mm_node, rl_entry);
mem->offset = (u64)r->offset << 12;
@ -492,17 +491,17 @@ nv50_ram_get(struct nvkm_fb *pfb, u64 size, u32 align, u32 ncmin,
}
static u32
nv50_fb_vram_rblock(struct nvkm_fb *pfb, struct nvkm_ram *ram)
nv50_fb_vram_rblock(struct nvkm_fb *fb, struct nvkm_ram *ram)
{
int colbits, rowbitsa, rowbitsb, banks;
u64 rowsize, predicted;
u32 r0, r4, rt, rblock_size;
r0 = nv_rd32(pfb, 0x100200);
r4 = nv_rd32(pfb, 0x100204);
rt = nv_rd32(pfb, 0x100250);
nv_debug(pfb, "memcfg 0x%08x 0x%08x 0x%08x 0x%08x\n",
r0, r4, rt, nv_rd32(pfb, 0x001540));
r0 = nv_rd32(fb, 0x100200);
r4 = nv_rd32(fb, 0x100204);
rt = nv_rd32(fb, 0x100250);
nv_debug(fb, "memcfg 0x%08x 0x%08x 0x%08x 0x%08x\n",
r0, r4, rt, nv_rd32(fb, 0x001540));
colbits = (r4 & 0x0000f000) >> 12;
rowbitsa = ((r4 & 0x000f0000) >> 16) + 8;
@ -515,7 +514,7 @@ nv50_fb_vram_rblock(struct nvkm_fb *pfb, struct nvkm_ram *ram)
predicted += rowsize << rowbitsb;
if (predicted != ram->size) {
nv_warn(pfb, "memory controller reports %d MiB VRAM\n",
nv_warn(fb, "memory controller reports %d MiB VRAM\n",
(u32)(ram->size >> 20));
}
@ -523,7 +522,7 @@ nv50_fb_vram_rblock(struct nvkm_fb *pfb, struct nvkm_ram *ram)
if (rt & 1)
rblock_size *= 3;
nv_debug(pfb, "rblock %d bytes\n", rblock_size);
nv_debug(fb, "rblock %d bytes\n", rblock_size);
return rblock_size;
}
@ -534,7 +533,7 @@ nv50_ram_create_(struct nvkm_object *parent, struct nvkm_object *engine,
const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
struct nvkm_bios *bios = nvkm_bios(parent);
struct nvkm_fb *pfb = nvkm_fb(parent);
struct nvkm_fb *fb = nvkm_fb(parent);
struct nvkm_ram *ram;
int ret;
@ -543,13 +542,13 @@ nv50_ram_create_(struct nvkm_object *parent, struct nvkm_object *engine,
if (ret)
return ret;
ram->size = nv_rd32(pfb, 0x10020c);
ram->size = nv_rd32(fb, 0x10020c);
ram->size = (ram->size & 0xffffff00) | ((ram->size & 0x000000ff) << 32);
ram->part_mask = (nv_rd32(pfb, 0x001540) & 0x00ff0000) >> 16;
ram->part_mask = (nv_rd32(fb, 0x001540) & 0x00ff0000) >> 16;
ram->parts = hweight8(ram->part_mask);
switch (nv_rd32(pfb, 0x100714) & 0x00000007) {
switch (nv_rd32(fb, 0x100714) & 0x00000007) {
case 0: ram->type = NV_MEM_TYPE_DDR1; break;
case 1:
if (nvkm_fb_bios_memtype(bios) == NV_MEM_TYPE_DDR3)
@ -564,14 +563,14 @@ nv50_ram_create_(struct nvkm_object *parent, struct nvkm_object *engine,
break;
}
ret = nvkm_mm_init(&pfb->vram, rsvd_head, (ram->size >> 12) -
ret = nvkm_mm_init(&fb->vram, rsvd_head, (ram->size >> 12) -
(rsvd_head + rsvd_tail),
nv50_fb_vram_rblock(pfb, ram) >> 12);
nv50_fb_vram_rblock(fb, ram) >> 12);
if (ret)
return ret;
ram->ranks = (nv_rd32(pfb, 0x100200) & 0x4) ? 2 : 1;
ram->tags = nv_rd32(pfb, 0x100320);
ram->ranks = (nv_rd32(fb, 0x100200) & 0x4) ? 2 : 1;
ram->tags = nv_rd32(fb, 0x100320);
ram->get = nv50_ram_get;
ram->put = nv50_ram_put;
return 0;

View File

@ -82,8 +82,8 @@ static void
nv50_instobj_dtor(struct nvkm_object *object)
{
struct nv50_instobj_priv *node = (void *)object;
struct nvkm_fb *pfb = nvkm_fb(object);
pfb->ram->put(pfb, &node->mem);
struct nvkm_fb *fb = nvkm_fb(object);
fb->ram->put(fb, &node->mem);
nvkm_instobj_destroy(&node->base);
}
@ -92,7 +92,7 @@ nv50_instobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject)
{
struct nvkm_fb *pfb = nvkm_fb(parent);
struct nvkm_fb *fb = nvkm_fb(parent);
struct nvkm_instobj_args *args = data;
struct nv50_instobj_priv *node;
int ret;
@ -105,7 +105,7 @@ nv50_instobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
if (ret)
return ret;
ret = pfb->ram->get(pfb, args->size, args->align, 0, 0x800, &node->mem);
ret = fb->ram->get(fb, args->size, args->align, 0, 0x800, &node->mem);
if (ret)
return ret;

View File

@ -132,12 +132,12 @@ gf100_ltc_init(struct nvkm_object *object)
void
gf100_ltc_dtor(struct nvkm_object *object)
{
struct nvkm_fb *pfb = nvkm_fb(object);
struct nvkm_fb *fb = nvkm_fb(object);
struct nvkm_ltc_priv *priv = (void *)object;
nvkm_mm_fini(&priv->tags);
if (pfb->ram)
nvkm_mm_free(&pfb->vram, &priv->tag_ram);
if (fb->ram)
nvkm_mm_free(&fb->vram, &priv->tag_ram);
nvkm_ltc_destroy(priv);
}
@ -145,19 +145,19 @@ gf100_ltc_dtor(struct nvkm_object *object)
/* TODO: Figure out tag memory details and drop the over-cautious allocation.
*/
int
gf100_ltc_init_tag_ram(struct nvkm_fb *pfb, struct nvkm_ltc_priv *priv)
gf100_ltc_init_tag_ram(struct nvkm_fb *fb, struct nvkm_ltc_priv *priv)
{
u32 tag_size, tag_margin, tag_align;
int ret;
/* No VRAM, no tags for now. */
if (!pfb->ram) {
if (!fb->ram) {
priv->num_tags = 0;
goto mm_init;
}
/* tags for 1/4 of VRAM should be enough (8192/4 per GiB of VRAM) */
priv->num_tags = (pfb->ram->size >> 17) / 4;
priv->num_tags = (fb->ram->size >> 17) / 4;
if (priv->num_tags > (1 << 17))
priv->num_tags = 1 << 17; /* we have 17 bits in PTE */
priv->num_tags = (priv->num_tags + 63) & ~63; /* round up to 64 */
@ -177,7 +177,7 @@ gf100_ltc_init_tag_ram(struct nvkm_fb *pfb, struct nvkm_ltc_priv *priv)
tag_size += tag_align;
tag_size = (tag_size + 0xfff) >> 12; /* round up */
ret = nvkm_mm_tail(&pfb->vram, 1, 1, tag_size, tag_size, 1,
ret = nvkm_mm_tail(&fb->vram, 1, 1, tag_size, tag_size, 1,
&priv->tag_ram);
if (ret) {
priv->num_tags = 0;
@ -200,7 +200,7 @@ gf100_ltc_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject)
{
struct nvkm_fb *pfb = nvkm_fb(parent);
struct nvkm_fb *fb = nvkm_fb(parent);
struct nvkm_ltc_priv *priv;
u32 parts, mask;
int ret, i;
@ -218,7 +218,7 @@ gf100_ltc_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
}
priv->lts_nr = nv_rd32(priv, 0x17e8dc) >> 28;
ret = gf100_ltc_init_tag_ram(pfb, priv);
ret = gf100_ltc_init_tag_ram(fb, priv);
if (ret)
return ret;

View File

@ -110,7 +110,7 @@ gm107_ltc_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject)
{
struct nvkm_fb *pfb = nvkm_fb(parent);
struct nvkm_fb *fb = nvkm_fb(parent);
struct nvkm_ltc_priv *priv;
u32 parts, mask;
int ret, i;
@ -128,7 +128,7 @@ gm107_ltc_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
}
priv->lts_nr = nv_rd32(priv, 0x17e280) >> 28;
ret = gf100_ltc_init_tag_ram(pfb, priv);
ret = gf100_ltc_init_tag_ram(fb, priv);
if (ret)
return ret;