swiotlb: pass a gfp_mask argument to swiotlb_init_late
Let the caller chose a zone to allocate from. This will be used later on by the xen-swiotlb initialization on arm. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Anshuman Khandual <anshuman.khandual@arm.com> Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Tested-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
This commit is contained in:
parent
8ba2ed1be9
commit
742519538e
|
@ -57,7 +57,7 @@ static void sta2x11_new_instance(struct pci_dev *pdev)
|
|||
int size = STA2X11_SWIOTLB_SIZE;
|
||||
/* First instance: register your own swiotlb area */
|
||||
dev_info(&pdev->dev, "Using SWIOTLB (size %i)\n", size);
|
||||
if (swiotlb_init_late(size))
|
||||
if (swiotlb_init_late(size, GFP_DMA))
|
||||
dev_emerg(&pdev->dev, "init swiotlb failed\n");
|
||||
}
|
||||
list_add(&instance->list, &sta2x11_instance_list);
|
||||
|
|
|
@ -37,7 +37,7 @@ struct scatterlist;
|
|||
int swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, unsigned int flags);
|
||||
unsigned long swiotlb_size_or_default(void);
|
||||
extern int swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs);
|
||||
int swiotlb_init_late(size_t size);
|
||||
int swiotlb_init_late(size_t size, gfp_t gfp_mask);
|
||||
extern void __init swiotlb_update_mem_attributes(void);
|
||||
|
||||
phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t phys,
|
||||
|
|
|
@ -292,7 +292,7 @@ fail:
|
|||
* initialize the swiotlb later using the slab allocator if needed.
|
||||
* This should be just like above, but with some error catching.
|
||||
*/
|
||||
int swiotlb_init_late(size_t size)
|
||||
int swiotlb_init_late(size_t size, gfp_t gfp_mask)
|
||||
{
|
||||
unsigned long nslabs = ALIGN(size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE);
|
||||
unsigned long bytes;
|
||||
|
@ -303,15 +303,12 @@ int swiotlb_init_late(size_t size)
|
|||
if (swiotlb_force_disable)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Get IO TLB memory from the low pages
|
||||
*/
|
||||
order = get_order(nslabs << IO_TLB_SHIFT);
|
||||
nslabs = SLABS_PER_PAGE << order;
|
||||
bytes = nslabs << IO_TLB_SHIFT;
|
||||
|
||||
while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
|
||||
vstart = (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN,
|
||||
vstart = (void *)__get_free_pages(gfp_mask | __GFP_NOWARN,
|
||||
order);
|
||||
if (vstart)
|
||||
break;
|
||||
|
|
Loading…
Reference in New Issue