drm/i915/gen8: Update pdp switch and point unused PDPs to scratch page
One important part of this patch is we now write a scratch page directory into any unused PDP descriptors. This matters for 2 reasons, first, we're not allowed to just use 0, or an invalid pointer, and second, we must wipe out any previous contents from the last context. The latter point only matters with full PPGTT. The former point only effect platforms with less than 4GB memory. v2: Updated commit message to point that we must set unused PDPs to the scratch page. v3: Unmap scratch_pd in gen8_ppgtt_free. v4: Initialize scratch_pd. (Mika) Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com> Signed-off-by: Ben Widawsky <ben@bwidawsk.net> Signed-off-by: Michel Thierry <michel.thierry@intel.com> (v2+) Reviewed-by: Mika Kuoppala <mika.kuoppala@intel.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
This commit is contained in:
parent
5441f0cbe1
commit
7cb6d7ac63
|
@ -457,8 +457,9 @@ static struct i915_page_directory *alloc_pd_single(void)
|
|||
}
|
||||
|
||||
/* Broadwell Page Directory Pointer Descriptors */
|
||||
static int gen8_write_pdp(struct intel_engine_cs *ring, unsigned entry,
|
||||
uint64_t val)
|
||||
static int gen8_write_pdp(struct intel_engine_cs *ring,
|
||||
unsigned entry,
|
||||
dma_addr_t addr)
|
||||
{
|
||||
int ret;
|
||||
|
||||
|
@ -470,10 +471,10 @@ static int gen8_write_pdp(struct intel_engine_cs *ring, unsigned entry,
|
|||
|
||||
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
|
||||
intel_ring_emit(ring, GEN8_RING_PDP_UDW(ring, entry));
|
||||
intel_ring_emit(ring, (u32)(val >> 32));
|
||||
intel_ring_emit(ring, upper_32_bits(addr));
|
||||
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
|
||||
intel_ring_emit(ring, GEN8_RING_PDP_LDW(ring, entry));
|
||||
intel_ring_emit(ring, (u32)(val));
|
||||
intel_ring_emit(ring, lower_32_bits(addr));
|
||||
intel_ring_advance(ring);
|
||||
|
||||
return 0;
|
||||
|
@ -484,12 +485,12 @@ static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt,
|
|||
{
|
||||
int i, ret;
|
||||
|
||||
/* bit of a hack to find the actual last used pd */
|
||||
int used_pd = ppgtt->num_pd_entries / I915_PDES;
|
||||
|
||||
for (i = used_pd - 1; i >= 0; i--) {
|
||||
dma_addr_t addr = ppgtt->pdp.page_directory[i]->daddr;
|
||||
ret = gen8_write_pdp(ring, i, addr);
|
||||
for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) {
|
||||
struct i915_page_directory *pd = ppgtt->pdp.page_directory[i];
|
||||
dma_addr_t pd_daddr = pd ? pd->daddr : ppgtt->scratch_pd->daddr;
|
||||
/* The page directory might be NULL, but we need to clear out
|
||||
* whatever the previous context might have used. */
|
||||
ret = gen8_write_pdp(ring, i, pd_daddr);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
@ -664,6 +665,7 @@ static void gen8_ppgtt_free(struct i915_hw_ppgtt *ppgtt)
|
|||
unmap_and_free_pd(ppgtt->pdp.page_directory[i]);
|
||||
}
|
||||
|
||||
unmap_and_free_pd(ppgtt->scratch_pd);
|
||||
unmap_and_free_pt(ppgtt->scratch_pt, ppgtt->base.dev);
|
||||
}
|
||||
|
||||
|
@ -880,12 +882,20 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
|
|||
if (IS_ERR(ppgtt->scratch_pt))
|
||||
return PTR_ERR(ppgtt->scratch_pt);
|
||||
|
||||
ppgtt->scratch_pd = alloc_pd_single();
|
||||
if (IS_ERR(ppgtt->scratch_pd))
|
||||
return PTR_ERR(ppgtt->scratch_pd);
|
||||
|
||||
gen8_initialize_pt(&ppgtt->base, ppgtt->scratch_pt);
|
||||
gen8_initialize_pd(&ppgtt->base, ppgtt->scratch_pd);
|
||||
|
||||
/* 1. Do all our allocations for page directories and page tables. */
|
||||
ret = gen8_ppgtt_alloc(ppgtt, ppgtt->base.start, ppgtt->base.total);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
unmap_and_free_pd(ppgtt->scratch_pd);
|
||||
unmap_and_free_pt(ppgtt->scratch_pt, ppgtt->base.dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* 2. Create DMA mappings for the page directories and page tables.
|
||||
|
|
|
@ -322,6 +322,7 @@ struct i915_hw_ppgtt {
|
|||
};
|
||||
|
||||
struct i915_page_table *scratch_pt;
|
||||
struct i915_page_directory *scratch_pd;
|
||||
|
||||
struct drm_i915_file_private *file_priv;
|
||||
|
||||
|
|
Loading…
Reference in New Issue