drm/nouveau/gr/gf100-: make global bundle_cb actually global
This was thought to be per-channel initially - it's not. The backing pages for the VMM mappings are shared for all channels. - switches to more straight-forward patch interfaces - prepares for sub-context support Signed-off-by: Ben Skeggs <bskeggs@redhat.com> Reviewed-by: Lyude Paul <lyude@redhat.com>
This commit is contained in:
parent
d05095b53c
commit
95f78acd88
drivers/gpu/drm/nouveau/nvkm/engine/gr
|
@ -1050,15 +1050,12 @@ gf100_grctx_generate_r419cb8(struct gf100_gr *gr)
|
|||
}
|
||||
|
||||
void
|
||||
gf100_grctx_generate_bundle(struct gf100_grctx *info)
|
||||
gf100_grctx_generate_bundle(struct gf100_gr_chan *chan, u64 addr, u32 size)
|
||||
{
|
||||
const struct gf100_grctx_func *grctx = info->gr->func->grctx;
|
||||
const int s = 8;
|
||||
const int b = mmio_vram(info, grctx->bundle_size, (1 << s), true);
|
||||
mmio_refn(info, 0x408004, 0x00000000, s, b);
|
||||
mmio_wr32(info, 0x408008, 0x80000000 | (grctx->bundle_size >> s));
|
||||
mmio_refn(info, 0x418808, 0x00000000, s, b);
|
||||
mmio_wr32(info, 0x41880c, 0x80000000 | (grctx->bundle_size >> s));
|
||||
gf100_grctx_patch_wr32(chan, 0x408004, addr >> 8);
|
||||
gf100_grctx_patch_wr32(chan, 0x408008, 0x80000000 | (size >> 8));
|
||||
gf100_grctx_patch_wr32(chan, 0x418808, addr >> 8);
|
||||
gf100_grctx_patch_wr32(chan, 0x41880c, 0x80000000 | (size >> 8));
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -1396,7 +1393,7 @@ gf100_grctx_generate_main(struct gf100_gr_chan *chan, struct gf100_grctx *info)
|
|||
idle_timeout = nvkm_mask(device, 0x404154, 0xffffffff, 0x00000000);
|
||||
|
||||
grctx->pagepool(chan, chan->pagepool->addr);
|
||||
grctx->bundle(info);
|
||||
grctx->bundle(chan, chan->bundle_cb->addr, grctx->bundle_size);
|
||||
grctx->attrib(info);
|
||||
if (grctx->patch_ltc)
|
||||
grctx->patch_ltc(info);
|
||||
|
|
|
@ -39,7 +39,7 @@ struct gf100_grctx_func {
|
|||
const struct gf100_gr_pack *mthd;
|
||||
const struct gf100_gr_pack *sw_veid_bundle_init;
|
||||
/* bundle circular buffer */
|
||||
void (*bundle)(struct gf100_grctx *);
|
||||
void (*bundle)(struct gf100_gr_chan *, u64 addr, u32 size);
|
||||
u32 bundle_size;
|
||||
u32 bundle_min_gpm_fifo_depth;
|
||||
u32 bundle_token_limit;
|
||||
|
@ -84,8 +84,8 @@ struct gf100_grctx_func {
|
|||
extern const struct gf100_grctx_func gf100_grctx;
|
||||
int gf100_grctx_generate(struct gf100_gr *, struct gf100_gr_chan *, struct nvkm_gpuobj *inst);
|
||||
void gf100_grctx_generate_main(struct gf100_gr_chan *, struct gf100_grctx *);
|
||||
void gf100_grctx_generate_bundle(struct gf100_grctx *);
|
||||
void gf100_grctx_generate_pagepool(struct gf100_gr_chan *, u64);
|
||||
void gf100_grctx_generate_bundle(struct gf100_gr_chan *, u64, u32);
|
||||
void gf100_grctx_generate_attrib(struct gf100_grctx *);
|
||||
void gf100_grctx_generate_unkn(struct gf100_gr *);
|
||||
void gf100_grctx_generate_floorsweep(struct gf100_gr *);
|
||||
|
@ -116,8 +116,8 @@ void gk104_grctx_generate_alpha_beta_tables(struct gf100_gr *);
|
|||
void gk104_grctx_generate_gpc_tpc_nr(struct gf100_gr *);
|
||||
|
||||
extern const struct gf100_grctx_func gk20a_grctx;
|
||||
void gk104_grctx_generate_bundle(struct gf100_grctx *);
|
||||
void gk104_grctx_generate_pagepool(struct gf100_gr_chan *, u64);
|
||||
void gk104_grctx_generate_bundle(struct gf100_gr_chan *, u64, u32);
|
||||
void gk104_grctx_generate_patch_ltc(struct gf100_grctx *);
|
||||
void gk104_grctx_generate_unkn(struct gf100_gr *);
|
||||
void gk104_grctx_generate_r418800(struct gf100_gr *);
|
||||
|
@ -129,8 +129,8 @@ extern const struct gf100_grctx_func gk110b_grctx;
|
|||
extern const struct gf100_grctx_func gk208_grctx;
|
||||
|
||||
extern const struct gf100_grctx_func gm107_grctx;
|
||||
void gm107_grctx_generate_bundle(struct gf100_grctx *);
|
||||
void gm107_grctx_generate_pagepool(struct gf100_gr_chan *, u64);
|
||||
void gm107_grctx_generate_bundle(struct gf100_gr_chan *, u64, u32);
|
||||
void gm107_grctx_generate_attrib(struct gf100_grctx *);
|
||||
void gm107_grctx_generate_sm_id(struct gf100_gr *, int, int, int);
|
||||
|
||||
|
|
|
@ -872,19 +872,14 @@ gk104_grctx_generate_patch_ltc(struct gf100_grctx *info)
|
|||
}
|
||||
|
||||
void
|
||||
gk104_grctx_generate_bundle(struct gf100_grctx *info)
|
||||
gk104_grctx_generate_bundle(struct gf100_gr_chan *chan, u64 addr, u32 size)
|
||||
{
|
||||
const struct gf100_grctx_func *grctx = info->gr->func->grctx;
|
||||
const u32 state_limit = min(grctx->bundle_min_gpm_fifo_depth,
|
||||
grctx->bundle_size / 0x20);
|
||||
const struct gf100_grctx_func *grctx = chan->gr->func->grctx;
|
||||
const u32 state_limit = min(grctx->bundle_min_gpm_fifo_depth, size / 0x20);
|
||||
const u32 token_limit = grctx->bundle_token_limit;
|
||||
const int s = 8;
|
||||
const int b = mmio_vram(info, grctx->bundle_size, (1 << s), true);
|
||||
mmio_refn(info, 0x408004, 0x00000000, s, b);
|
||||
mmio_wr32(info, 0x408008, 0x80000000 | (grctx->bundle_size >> s));
|
||||
mmio_refn(info, 0x418808, 0x00000000, s, b);
|
||||
mmio_wr32(info, 0x41880c, 0x80000000 | (grctx->bundle_size >> s));
|
||||
mmio_wr32(info, 0x4064c8, (state_limit << 16) | token_limit);
|
||||
|
||||
gf100_grctx_generate_bundle(chan, addr, size);
|
||||
gf100_grctx_patch_wr32(chan, 0x4064c8, (state_limit << 16) | token_limit);
|
||||
}
|
||||
|
||||
void
|
||||
|
|
|
@ -62,7 +62,7 @@ gk20a_grctx_generate_main(struct gf100_gr_chan *chan, struct gf100_grctx *info)
|
|||
|
||||
gf100_gr_icmd(gr, gr->bundle);
|
||||
grctx->pagepool(chan, chan->pagepool->addr);
|
||||
grctx->bundle(info);
|
||||
grctx->bundle(chan, chan->bundle_cb->addr, grctx->bundle_size);
|
||||
}
|
||||
|
||||
const struct gf100_grctx_func
|
||||
|
|
|
@ -876,19 +876,17 @@ gm107_grctx_generate_r419e00(struct gf100_gr *gr)
|
|||
}
|
||||
|
||||
void
|
||||
gm107_grctx_generate_bundle(struct gf100_grctx *info)
|
||||
gm107_grctx_generate_bundle(struct gf100_gr_chan *chan, u64 addr, u32 size)
|
||||
{
|
||||
const struct gf100_grctx_func *grctx = info->gr->func->grctx;
|
||||
const u32 state_limit = min(grctx->bundle_min_gpm_fifo_depth,
|
||||
grctx->bundle_size / 0x20);
|
||||
const struct gf100_grctx_func *grctx = chan->gr->func->grctx;
|
||||
const u32 state_limit = min(grctx->bundle_min_gpm_fifo_depth, size / 0x20);
|
||||
const u32 token_limit = grctx->bundle_token_limit;
|
||||
const int s = 8;
|
||||
const int b = mmio_vram(info, grctx->bundle_size, (1 << s), true);
|
||||
mmio_refn(info, 0x408004, 0x00000000, s, b);
|
||||
mmio_wr32(info, 0x408008, 0x80000000 | (grctx->bundle_size >> s));
|
||||
mmio_refn(info, 0x418e24, 0x00000000, s, b);
|
||||
mmio_wr32(info, 0x418e28, 0x80000000 | (grctx->bundle_size >> s));
|
||||
mmio_wr32(info, 0x4064c8, (state_limit << 16) | token_limit);
|
||||
|
||||
gf100_grctx_patch_wr32(chan, 0x408004, addr >> 8);
|
||||
gf100_grctx_patch_wr32(chan, 0x408008, 0x80000000 | (size >> 8));
|
||||
gf100_grctx_patch_wr32(chan, 0x418e24, addr >> 8);
|
||||
gf100_grctx_patch_wr32(chan, 0x418e28, 0x80000000 | (size >> 8));
|
||||
gf100_grctx_patch_wr32(chan, 0x4064c8, (state_limit << 16) | token_limit);
|
||||
}
|
||||
|
||||
void
|
||||
|
|
|
@ -65,7 +65,7 @@ gm20b_grctx_generate_main(struct gf100_gr_chan *chan, struct gf100_grctx *info)
|
|||
|
||||
gf100_gr_icmd(gr, gr->bundle);
|
||||
grctx->pagepool(chan, chan->pagepool->addr);
|
||||
grctx->bundle(info);
|
||||
grctx->bundle(chan, chan->bundle_cb->addr, grctx->bundle_size);
|
||||
}
|
||||
|
||||
const struct gf100_grctx_func
|
||||
|
|
|
@ -365,6 +365,7 @@ gf100_gr_chan_dtor(struct nvkm_object *object)
|
|||
nvkm_vmm_put(chan->vmm, &chan->mmio_vma);
|
||||
nvkm_memory_unref(&chan->mmio);
|
||||
|
||||
nvkm_vmm_put(chan->vmm, &chan->bundle_cb);
|
||||
nvkm_vmm_put(chan->vmm, &chan->pagepool);
|
||||
nvkm_vmm_unref(&chan->vmm);
|
||||
return chan;
|
||||
|
@ -405,6 +406,15 @@ gf100_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Map bundle circular buffer. */
|
||||
ret = nvkm_vmm_get(chan->vmm, 12, nvkm_memory_size(gr->bundle_cb), &chan->bundle_cb);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nvkm_memory_map(gr->bundle_cb, 0, chan->vmm, chan->bundle_cb, &args, sizeof(args));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Generate golden context image. */
|
||||
mutex_lock(&gr->fecs.mutex);
|
||||
if (gr->data == NULL) {
|
||||
|
@ -461,6 +471,7 @@ gf100_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
|
|||
/* finally, fill in the mmio list and point the context at it */
|
||||
nvkm_kmap(chan->mmio);
|
||||
gr->func->grctx->pagepool(chan, chan->pagepool->addr);
|
||||
gr->func->grctx->bundle(chan, chan->bundle_cb->addr, gr->func->grctx->bundle_size);
|
||||
for (i = 0; mmio->addr && i < ARRAY_SIZE(gr->mmio_list); i++) {
|
||||
u32 addr = mmio->addr;
|
||||
u32 data = mmio->data;
|
||||
|
@ -1982,6 +1993,11 @@ gf100_gr_oneinit(struct nvkm_gr *base)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, gr->func->grctx->bundle_size,
|
||||
0x100, false, &gr->bundle_cb);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
memset(gr->tile, 0xff, sizeof(gr->tile));
|
||||
gr->func->oneinit_tiles(gr);
|
||||
gr->func->oneinit_sm_id(gr);
|
||||
|
@ -2051,6 +2067,7 @@ gf100_gr_dtor(struct nvkm_gr *base)
|
|||
|
||||
kfree(gr->data);
|
||||
|
||||
nvkm_memory_unref(&gr->bundle_cb);
|
||||
nvkm_memory_unref(&gr->pagepool);
|
||||
|
||||
nvkm_falcon_dtor(&gr->gpccs.falcon);
|
||||
|
|
|
@ -122,6 +122,7 @@ struct gf100_gr {
|
|||
u8 ppc_tpc_max;
|
||||
|
||||
struct nvkm_memory *pagepool;
|
||||
struct nvkm_memory *bundle_cb;
|
||||
|
||||
u8 screen_tile_row_offset;
|
||||
u8 tile[TPC_MAX];
|
||||
|
@ -261,6 +262,7 @@ struct gf100_gr_chan {
|
|||
struct nvkm_vmm *vmm;
|
||||
|
||||
struct nvkm_vma *pagepool;
|
||||
struct nvkm_vma *bundle_cb;
|
||||
|
||||
struct nvkm_memory *mmio;
|
||||
struct nvkm_vma *mmio_vma;
|
||||
|
|
Loading…
Reference in New Issue