intel-gtt: export api for drm/i915

Just some minor shuffling to get rid of any agp traces in the
exported functions.

Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
This commit is contained in:
Daniel Vetter 2010-11-06 11:18:58 +01:00 committed by Chris Wilson
parent 7c2e6fdf45
commit 4080775b60
2 changed files with 81 additions and 53 deletions

View File

@ -87,41 +87,29 @@ static struct _intel_private {
#define IS_IRONLAKE intel_private.driver->is_ironlake #define IS_IRONLAKE intel_private.driver->is_ironlake
#define HAS_PGTBL_EN intel_private.driver->has_pgtbl_enable #define HAS_PGTBL_EN intel_private.driver->has_pgtbl_enable
static void intel_agp_free_sglist(struct agp_memory *mem) int intel_gtt_map_memory(struct page **pages, unsigned int num_entries,
{ struct scatterlist **sg_list, int *num_sg)
struct sg_table st;
st.sgl = mem->sg_list;
st.orig_nents = st.nents = mem->page_count;
sg_free_table(&st);
mem->sg_list = NULL;
mem->num_sg = 0;
}
static int intel_agp_map_memory(struct agp_memory *mem)
{ {
struct sg_table st; struct sg_table st;
struct scatterlist *sg; struct scatterlist *sg;
int i; int i;
if (mem->sg_list) if (*sg_list)
return 0; /* already mapped (for e.g. resume */ return 0; /* already mapped (for e.g. resume */
DBG("try mapping %lu pages\n", (unsigned long)mem->page_count); DBG("try mapping %lu pages\n", (unsigned long)num_entries);
if (sg_alloc_table(&st, mem->page_count, GFP_KERNEL)) if (sg_alloc_table(&st, num_entries, GFP_KERNEL))
goto err; goto err;
mem->sg_list = sg = st.sgl; *sg_list = sg = st.sgl;
for (i = 0 ; i < mem->page_count; i++, sg = sg_next(sg)) for (i = 0 ; i < num_entries; i++, sg = sg_next(sg))
sg_set_page(sg, mem->pages[i], PAGE_SIZE, 0); sg_set_page(sg, pages[i], PAGE_SIZE, 0);
mem->num_sg = pci_map_sg(intel_private.pcidev, mem->sg_list, *num_sg = pci_map_sg(intel_private.pcidev, *sg_list,
mem->page_count, PCI_DMA_BIDIRECTIONAL); num_entries, PCI_DMA_BIDIRECTIONAL);
if (unlikely(!mem->num_sg)) if (unlikely(!*num_sg))
goto err; goto err;
return 0; return 0;
@ -130,15 +118,22 @@ err:
sg_free_table(&st); sg_free_table(&st);
return -ENOMEM; return -ENOMEM;
} }
EXPORT_SYMBOL(intel_gtt_map_memory);
static void intel_agp_unmap_memory(struct agp_memory *mem) void intel_gtt_unmap_memory(struct scatterlist *sg_list, int num_sg)
{ {
struct sg_table st;
DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count); DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count);
pci_unmap_sg(intel_private.pcidev, mem->sg_list, pci_unmap_sg(intel_private.pcidev, sg_list,
mem->page_count, PCI_DMA_BIDIRECTIONAL); num_sg, PCI_DMA_BIDIRECTIONAL);
intel_agp_free_sglist(mem);
st.sgl = sg_list;
st.orig_nents = st.nents = num_sg;
sg_free_table(&st);
} }
EXPORT_SYMBOL(intel_gtt_unmap_memory);
static void intel_fake_agp_enable(struct agp_bridge_data *bridge, u32 mode) static void intel_fake_agp_enable(struct agp_bridge_data *bridge, u32 mode)
{ {
@ -307,7 +302,7 @@ static int intel_gtt_setup_scratch_page(void)
get_page(page); get_page(page);
set_pages_uc(page, 1); set_pages_uc(page, 1);
if (USE_PCI_DMA_API && INTEL_GTT_GEN > 2) { if (intel_private.base.needs_dmar) {
dma_addr = pci_map_page(intel_private.pcidev, page, 0, dma_addr = pci_map_page(intel_private.pcidev, page, 0,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
if (pci_dma_mapping_error(intel_private.pcidev, dma_addr)) if (pci_dma_mapping_error(intel_private.pcidev, dma_addr))
@ -699,6 +694,8 @@ static int intel_gtt_init(void)
return ret; return ret;
} }
intel_private.base.needs_dmar = USE_PCI_DMA_API && INTEL_GTT_GEN > 2;
return 0; return 0;
} }
@ -892,7 +889,7 @@ static bool i830_check_flags(unsigned int flags)
return false; return false;
} }
static void intel_gtt_insert_sg_entries(struct scatterlist *sg_list, void intel_gtt_insert_sg_entries(struct scatterlist *sg_list,
unsigned int sg_len, unsigned int sg_len,
unsigned int pg_start, unsigned int pg_start,
unsigned int flags) unsigned int flags)
@ -916,11 +913,25 @@ static void intel_gtt_insert_sg_entries(struct scatterlist *sg_list,
} }
readl(intel_private.gtt+j-1); readl(intel_private.gtt+j-1);
} }
EXPORT_SYMBOL(intel_gtt_insert_sg_entries);
void intel_gtt_insert_pages(unsigned int first_entry, unsigned int num_entries,
struct page **pages, unsigned int flags)
{
int i, j;
for (i = 0, j = first_entry; i < num_entries; i++, j++) {
dma_addr_t addr = page_to_phys(pages[i]);
intel_private.driver->write_entry(addr,
j, flags);
}
readl(intel_private.gtt+j-1);
}
EXPORT_SYMBOL(intel_gtt_insert_pages);
static int intel_fake_agp_insert_entries(struct agp_memory *mem, static int intel_fake_agp_insert_entries(struct agp_memory *mem,
off_t pg_start, int type) off_t pg_start, int type)
{ {
int i, j;
int ret = -EINVAL; int ret = -EINVAL;
if (INTEL_GTT_GEN == 1 && type == AGP_DCACHE_MEMORY) if (INTEL_GTT_GEN == 1 && type == AGP_DCACHE_MEMORY)
@ -941,21 +952,17 @@ static int intel_fake_agp_insert_entries(struct agp_memory *mem,
if (!mem->is_flushed) if (!mem->is_flushed)
global_cache_flush(); global_cache_flush();
if (USE_PCI_DMA_API && INTEL_GTT_GEN > 2) { if (intel_private.base.needs_dmar) {
ret = intel_agp_map_memory(mem); ret = intel_gtt_map_memory(mem->pages, mem->page_count,
&mem->sg_list, &mem->num_sg);
if (ret != 0) if (ret != 0)
return ret; return ret;
intel_gtt_insert_sg_entries(mem->sg_list, mem->num_sg, intel_gtt_insert_sg_entries(mem->sg_list, mem->num_sg,
pg_start, type); pg_start, type);
} else { } else
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { intel_gtt_insert_pages(pg_start, mem->page_count, mem->pages,
dma_addr_t addr = page_to_phys(mem->pages[i]); type);
intel_private.driver->write_entry(addr,
j, type);
}
readl(intel_private.gtt+j-1);
}
out: out:
ret = 0; ret = 0;
@ -964,22 +971,31 @@ out_err:
return ret; return ret;
} }
static int intel_fake_agp_remove_entries(struct agp_memory *mem, void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries)
off_t pg_start, int type)
{ {
int i; unsigned int i;
if (mem->page_count == 0) for (i = first_entry; i < (first_entry + num_entries); i++) {
return 0;
if (USE_PCI_DMA_API && INTEL_GTT_GEN > 2)
intel_agp_unmap_memory(mem);
for (i = pg_start; i < (mem->page_count + pg_start); i++) {
intel_private.driver->write_entry(intel_private.scratch_page_dma, intel_private.driver->write_entry(intel_private.scratch_page_dma,
i, 0); i, 0);
} }
readl(intel_private.gtt+i-1); readl(intel_private.gtt+i-1);
}
EXPORT_SYMBOL(intel_gtt_clear_range);
static int intel_fake_agp_remove_entries(struct agp_memory *mem,
off_t pg_start, int type)
{
if (mem->page_count == 0)
return 0;
if (intel_private.base.needs_dmar) {
intel_gtt_unmap_memory(mem->sg_list, mem->num_sg);
mem->sg_list = NULL;
mem->num_sg = 0;
}
intel_gtt_clear_range(pg_start, mem->page_count);
return 0; return 0;
} }

View File

@ -11,9 +11,21 @@ const struct intel_gtt {
/* Part of the gtt that is mappable by the cpu, for those chips where /* Part of the gtt that is mappable by the cpu, for those chips where
* this is not the full gtt. */ * this is not the full gtt. */
unsigned int gtt_mappable_entries; unsigned int gtt_mappable_entries;
/* Whether i915 needs to use the dmar apis or not. */
unsigned int needs_dmar : 1;
} *intel_gtt_get(void); } *intel_gtt_get(void);
void intel_gtt_chipset_flush(void); void intel_gtt_chipset_flush(void);
void intel_gtt_unmap_memory(struct scatterlist *sg_list, int num_sg);
void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries);
int intel_gtt_map_memory(struct page **pages, unsigned int num_entries,
struct scatterlist **sg_list, int *num_sg);
void intel_gtt_insert_sg_entries(struct scatterlist *sg_list,
unsigned int sg_len,
unsigned int pg_start,
unsigned int flags);
void intel_gtt_insert_pages(unsigned int first_entry, unsigned int num_entries,
struct page **pages, unsigned int flags);
/* Special gtt memory types */ /* Special gtt memory types */
#define AGP_DCACHE_MEMORY 1 #define AGP_DCACHE_MEMORY 1