ARM: memremap: implement arch_memremap_wb()
The generic memremap() falls back to using ioremap_cache() to create MEMREMAP_WB mappings if the requested region is not already covered by the linear mapping, unless the architecture provides an implementation of arch_memremap_wb(). Since ioremap_cache() is not appropriate on ARM to map memory with the same attributes used for the linear mapping, implement arch_memremap_wb() which does exactly that. Also, relax the WARN() check to allow MT_MEMORY_RW mappings of pfn_valid() pages. Cc: Russell King <rmk+kernel@arm.linux.org.uk> Acked-by: Dan Williams <dan.j.williams@intel.com> Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
This commit is contained in:
parent
c269cba35b
commit
9ab9e4fce4
|
@ -392,6 +392,9 @@ void __iomem *ioremap(resource_size_t res_cookie, size_t size);
|
|||
#define ioremap ioremap
|
||||
#define ioremap_nocache ioremap
|
||||
|
||||
/*
|
||||
* Do not use ioremap_cache for mapping memory. Use memremap instead.
|
||||
*/
|
||||
void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size);
|
||||
#define ioremap_cache ioremap_cache
|
||||
|
||||
|
@ -408,6 +411,9 @@ void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size);
|
|||
void iounmap(volatile void __iomem *iomem_cookie);
|
||||
#define iounmap iounmap
|
||||
|
||||
void *arch_memremap_wb(phys_addr_t phys_addr, size_t size);
|
||||
#define arch_memremap_wb arch_memremap_wb
|
||||
|
||||
/*
|
||||
* io{read,write}{16,32}be() macros
|
||||
*/
|
||||
|
|
|
@ -297,9 +297,10 @@ static void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
|
|||
}
|
||||
|
||||
/*
|
||||
* Don't allow RAM to be mapped - this causes problems with ARMv6+
|
||||
* Don't allow RAM to be mapped with mismatched attributes - this
|
||||
* causes problems with ARMv6+
|
||||
*/
|
||||
if (WARN_ON(pfn_valid(pfn)))
|
||||
if (WARN_ON(pfn_valid(pfn) && mtype != MT_MEMORY_RW))
|
||||
return NULL;
|
||||
|
||||
area = get_vm_area_caller(size, VM_IOREMAP, caller);
|
||||
|
@ -418,6 +419,13 @@ __arm_ioremap_exec(phys_addr_t phys_addr, size_t size, bool cached)
|
|||
__builtin_return_address(0));
|
||||
}
|
||||
|
||||
void *arch_memremap_wb(phys_addr_t phys_addr, size_t size)
|
||||
{
|
||||
return (__force void *)arch_ioremap_caller(phys_addr, size,
|
||||
MT_MEMORY_RW,
|
||||
__builtin_return_address(0));
|
||||
}
|
||||
|
||||
void __iounmap(volatile void __iomem *io_addr)
|
||||
{
|
||||
void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
|
||||
|
|
|
@ -384,6 +384,11 @@ void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size)
|
|||
}
|
||||
EXPORT_SYMBOL(ioremap_wc);
|
||||
|
||||
void *arch_memremap_wb(phys_addr_t phys_addr, size_t size)
|
||||
{
|
||||
return (void *)phys_addr;
|
||||
}
|
||||
|
||||
void __iounmap(volatile void __iomem *addr)
|
||||
{
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue