Merge tag 'arm-memremap-for-v4.7' of git://git.linaro.org/people/ard.biesheuvel/linux-arm into devel-stable
This series wires up the generic memremap() function for ARM in a way that allows it to be used as intended, i.e., without regard for whether the region being mapped is covered by a struct page and/or the linear mapping (lowmem)
This commit is contained in:
commit
e31db4c756
|
@ -392,9 +392,18 @@ void __iomem *ioremap(resource_size_t res_cookie, size_t size);
|
|||
#define ioremap ioremap
|
||||
#define ioremap_nocache ioremap
|
||||
|
||||
/*
|
||||
* Do not use ioremap_cache for mapping memory. Use memremap instead.
|
||||
*/
|
||||
void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size);
|
||||
#define ioremap_cache ioremap_cache
|
||||
|
||||
/*
|
||||
* Do not use ioremap_cached in new code. Provided for the benefit of
|
||||
* the pxa2xx-flash MTD driver only.
|
||||
*/
|
||||
void __iomem *ioremap_cached(resource_size_t res_cookie, size_t size);
|
||||
|
||||
void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size);
|
||||
#define ioremap_wc ioremap_wc
|
||||
#define ioremap_wt ioremap_wc
|
||||
|
@ -402,6 +411,9 @@ void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size);
|
|||
void iounmap(volatile void __iomem *iomem_cookie);
|
||||
#define iounmap iounmap
|
||||
|
||||
void *arch_memremap_wb(phys_addr_t phys_addr, size_t size);
|
||||
#define arch_memremap_wb arch_memremap_wb
|
||||
|
||||
/*
|
||||
* io{read,write}{16,32}be() macros
|
||||
*/
|
||||
|
|
|
@ -297,9 +297,10 @@ static void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
|
|||
}
|
||||
|
||||
/*
|
||||
* Don't allow RAM to be mapped - this causes problems with ARMv6+
|
||||
* Don't allow RAM to be mapped with mismatched attributes - this
|
||||
* causes problems with ARMv6+
|
||||
*/
|
||||
if (WARN_ON(pfn_valid(pfn)))
|
||||
if (WARN_ON(pfn_valid(pfn) && mtype != MT_MEMORY_RW))
|
||||
return NULL;
|
||||
|
||||
area = get_vm_area_caller(size, VM_IOREMAP, caller);
|
||||
|
@ -380,11 +381,15 @@ void __iomem *ioremap(resource_size_t res_cookie, size_t size)
|
|||
EXPORT_SYMBOL(ioremap);
|
||||
|
||||
void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size)
|
||||
__alias(ioremap_cached);
|
||||
|
||||
void __iomem *ioremap_cached(resource_size_t res_cookie, size_t size)
|
||||
{
|
||||
return arch_ioremap_caller(res_cookie, size, MT_DEVICE_CACHED,
|
||||
__builtin_return_address(0));
|
||||
}
|
||||
EXPORT_SYMBOL(ioremap_cache);
|
||||
EXPORT_SYMBOL(ioremap_cached);
|
||||
|
||||
void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size)
|
||||
{
|
||||
|
@ -414,6 +419,13 @@ __arm_ioremap_exec(phys_addr_t phys_addr, size_t size, bool cached)
|
|||
__builtin_return_address(0));
|
||||
}
|
||||
|
||||
void *arch_memremap_wb(phys_addr_t phys_addr, size_t size)
|
||||
{
|
||||
return (__force void *)arch_ioremap_caller(phys_addr, size,
|
||||
MT_MEMORY_RW,
|
||||
__builtin_return_address(0));
|
||||
}
|
||||
|
||||
void __iounmap(volatile void __iomem *io_addr)
|
||||
{
|
||||
void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
|
||||
|
|
|
@ -367,11 +367,15 @@ void __iomem *ioremap(resource_size_t res_cookie, size_t size)
|
|||
EXPORT_SYMBOL(ioremap);
|
||||
|
||||
void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size)
|
||||
__alias(ioremap_cached);
|
||||
|
||||
void __iomem *ioremap_cached(resource_size_t res_cookie, size_t size)
|
||||
{
|
||||
return __arm_ioremap_caller(res_cookie, size, MT_DEVICE_CACHED,
|
||||
__builtin_return_address(0));
|
||||
}
|
||||
EXPORT_SYMBOL(ioremap_cache);
|
||||
EXPORT_SYMBOL(ioremap_cached);
|
||||
|
||||
void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size)
|
||||
{
|
||||
|
@ -380,6 +384,11 @@ void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size)
|
|||
}
|
||||
EXPORT_SYMBOL(ioremap_wc);
|
||||
|
||||
void *arch_memremap_wb(phys_addr_t phys_addr, size_t size)
|
||||
{
|
||||
return (void *)phys_addr;
|
||||
}
|
||||
|
||||
void __iounmap(volatile void __iomem *addr)
|
||||
{
|
||||
}
|
||||
|
|
|
@ -71,8 +71,8 @@ static int pxa2xx_flash_probe(struct platform_device *pdev)
|
|||
info->map.name);
|
||||
return -ENOMEM;
|
||||
}
|
||||
info->map.cached = memremap(info->map.phys, info->map.size,
|
||||
MEMREMAP_WB);
|
||||
info->map.cached =
|
||||
ioremap_cached(info->map.phys, info->map.size);
|
||||
if (!info->map.cached)
|
||||
printk(KERN_WARNING "Failed to ioremap cached %s\n",
|
||||
info->map.name);
|
||||
|
@ -111,7 +111,7 @@ static int pxa2xx_flash_remove(struct platform_device *dev)
|
|||
map_destroy(info->mtd);
|
||||
iounmap(info->map.virt);
|
||||
if (info->map.cached)
|
||||
memunmap(info->map.cached);
|
||||
iounmap(info->map.cached);
|
||||
kfree(info);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -27,6 +27,13 @@ __weak void __iomem *ioremap_cache(resource_size_t offset, unsigned long size)
|
|||
}
|
||||
#endif
|
||||
|
||||
#ifndef arch_memremap_wb
|
||||
static void *arch_memremap_wb(resource_size_t offset, unsigned long size)
|
||||
{
|
||||
return (__force void *)ioremap_cache(offset, size);
|
||||
}
|
||||
#endif
|
||||
|
||||
static void *try_ram_remap(resource_size_t offset, size_t size)
|
||||
{
|
||||
unsigned long pfn = PHYS_PFN(offset);
|
||||
|
@ -34,7 +41,7 @@ static void *try_ram_remap(resource_size_t offset, size_t size)
|
|||
/* In the simple case just return the existing linear address */
|
||||
if (pfn_valid(pfn) && !PageHighMem(pfn_to_page(pfn)))
|
||||
return __va(offset);
|
||||
return NULL; /* fallback to ioremap_cache */
|
||||
return NULL; /* fallback to arch_memremap_wb */
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -90,7 +97,7 @@ void *memremap(resource_size_t offset, size_t size, unsigned long flags)
|
|||
if (is_ram == REGION_INTERSECTS)
|
||||
addr = try_ram_remap(offset, size);
|
||||
if (!addr)
|
||||
addr = ioremap_cache(offset, size);
|
||||
addr = arch_memremap_wb(offset, size);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
Loading…
Reference in New Issue