gma500: Ensure the frame buffer has a linear virtual mapping

We need this for the framebuffer in order to ensure that the kernel
framebuffer layer can handle it when using KMS. Except for the base
framebuffer this isn't a concern.

Add an npage field to the gtt as too many copies of the page calculation
are getting spread around the code.

Signed-off-by: Alan Cox <alan@linux.intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
This commit is contained in:
Alan Cox 2011-07-05 15:34:03 +01:00 committed by Greg Kroah-Hartman
parent 0c9e98af5e
commit 9460e84a91
4 changed files with 42 additions and 29 deletions

View File

@ -254,17 +254,13 @@ static int psbfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
vma->vm_pgoff, fb_screen_base,
dev_priv->vram_addr);
/* FIXME: ultimately this needs to become 'if entirely stolen memory' */
if (1 || fb_screen_base == dev_priv->vram_addr) {
/* If this is a GEM object then info->screen_base is the virtual
kernel remapping of the object. FIXME: Review if this is
suitable for our mmap work */
vma->vm_ops = &psbfb_vm_ops;
vma->vm_private_data = (void *)psbfb;
vma->vm_flags |= VM_RESERVED | VM_IO |
VM_MIXEDMAP | VM_DONTEXPAND;
} else {
/* GTT memory backed by kernel/user pages, needs a different
approach ? - GEM ? */
}
return 0;
}
@ -349,8 +345,6 @@ err:
*
* FIXME: console speed up - allocate twice the space if room and use
* hardware scrolling for acceleration.
* FIXME: we need to vm_map_ram a linear mapping if the object has to
* be GEM host mapped, otherwise the cfb layer's brain will fall out.
*/
static struct gtt_range *psbfb_alloc(struct drm_device *dev, int aligned_size)
{
@ -439,10 +433,22 @@ static int psbfb_create(struct psb_fbdev *fbdev,
info->fix.smem_start = dev->mode_config.fb_base;
info->fix.smem_len = size;
/* Accessed via stolen memory directly, This only works for stolem
memory however. Need to address this once we start using gtt
pages we allocate. FIXME: vm_map_ram for that case */
info->screen_base = (char *)dev_priv->vram_addr + backing->offset;
if (backing->stolen) {
/* Accessed stolen memory directly */
info->screen_base = (char *)dev_priv->vram_addr +
backing->offset;
} else {
/* Pin the pages into the GTT and create a mapping to them */
psb_gtt_pin(backing);
info->screen_base = vm_map_ram(backing->pages, backing->npage,
-1, PAGE_KERNEL);
if (info->screen_base == NULL) {
psb_gtt_unpin(backing);
ret = -ENOMEM;
goto out_err0;
}
psbfb->vm_map = 1;
}
info->screen_size = size;
memset(info->screen_base, 0, size);
@ -570,6 +576,13 @@ int psb_fbdev_destroy(struct drm_device *dev, struct psb_fbdev *fbdev)
if (fbdev->psb_fb_helper.fbdev) {
info = fbdev->psb_fb_helper.fbdev;
/* If this is our base framebuffer then kill any virtual map
for the framebuffer layer and unpin it */
if (psbfb->vm_map) {
vm_unmap_ram(info->screen_base, psbfb->gtt->npage);
psb_gtt_unpin(psbfb->gtt);
}
/* FIXME: this is a bit more inside knowledge than I'd like
but I don't see how to make a fake GEM object of the
stolen space nicely */

View File

@ -33,6 +33,7 @@ struct psb_framebuffer {
struct address_space *addr_space;
struct fb_info *fbdev;
struct gtt_range *gtt;
bool vm_map; /* True if we must undo a vm_map_ram */
};
struct psb_fbdev {

View File

@ -79,7 +79,6 @@ u32 *psb_gtt_entry(struct drm_device *dev, struct gtt_range *r)
static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r)
{
u32 *gtt_slot, pte;
int numpages = (r->resource.end + 1 - r->resource.start) >> PAGE_SHIFT;
struct page **pages;
int i;
@ -94,10 +93,10 @@ static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r)
pages = r->pages;
/* Make sure changes are visible to the GPU */
set_pages_array_uc(pages, numpages);
set_pages_array_uc(pages, r->npage);
/* Write our page entries into the GTT itself */
for (i = 0; i < numpages; i++) {
for (i = 0; i < r->npage; i++) {
pte = psb_gtt_mask_pte(page_to_pfn(*pages++), 0/*type*/);
iowrite32(pte, gtt_slot++);
}
@ -120,7 +119,6 @@ static void psb_gtt_remove(struct drm_device *dev, struct gtt_range *r)
{
struct drm_psb_private *dev_priv = dev->dev_private;
u32 *gtt_slot, pte;
int numpages = (r->resource.end + 1 - r->resource.start) >> PAGE_SHIFT;
int i;
WARN_ON(r->stolen);
@ -128,10 +126,10 @@ static void psb_gtt_remove(struct drm_device *dev, struct gtt_range *r)
gtt_slot = psb_gtt_entry(dev, r);
pte = psb_gtt_mask_pte(page_to_pfn(dev_priv->scratch_page), 0);
for (i = 0; i < numpages; i++)
for (i = 0; i < r->npage; i++)
iowrite32(pte, gtt_slot++);
ioread32(gtt_slot - 1);
set_pages_array_wb(r->pages, numpages);
set_pages_array_wb(r->pages, r->npage);
}
/**
@ -149,7 +147,7 @@ static int psb_gtt_attach_pages(struct gtt_range *gt)
struct address_space *mapping;
int i;
struct page *p;
int pages = (gt->resource.end + 1 - gt->resource.start) >> PAGE_SHIFT;
int pages = gt->gem.size / PAGE_SIZE;
WARN_ON(gt->pages);
@ -160,6 +158,8 @@ static int psb_gtt_attach_pages(struct gtt_range *gt)
gt->pages = kmalloc(pages * sizeof(struct page *), GFP_KERNEL);
if (gt->pages == NULL)
return -ENOMEM;
gt->npage = pages;
for (i = 0; i < pages; i++) {
/* FIXME: review flags later */
p = read_cache_page_gfp(mapping, i,
@ -191,9 +191,7 @@ err:
static void psb_gtt_detach_pages(struct gtt_range *gt)
{
int i;
int pages = (gt->resource.end + 1 - gt->resource.start) >> PAGE_SHIFT;
for (i = 0; i < pages; i++) {
for (i = 0; i < gt->npage; i++) {
/* FIXME: do we need to force dirty */
set_page_dirty(gt->pages[i]);
/* Undo the reference we took when populating the table */

View File

@ -49,6 +49,7 @@ struct gtt_range {
bool stolen; /* Backed from stolen RAM */
bool mmapping; /* Is mmappable */
struct page **pages; /* Backing pages if present */
int npage; /* Number of backing pages */
};
extern struct gtt_range *psb_gtt_alloc_range(struct drm_device *dev, int len,