lib: provide a simple generic ioremap implementation
A lot of architectures reuse the same simple ioremap implementation, so start lifting the most simple variant to lib/ioremap.c. It provides ioremap_prot and iounmap, plus a default ioremap that uses prot_noncached, although that can be overridden by asm/io.h. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Arnd Bergmann <arnd@arndb.de> Reviewed-by: Palmer Dabbelt <palmer@dabbelt.com>
This commit is contained in:
parent
98c90e5ea3
commit
80b0ca98f9
|
@ -923,9 +923,10 @@ static inline void *phys_to_virt(unsigned long address)
|
|||
* DOC: ioremap() and ioremap_*() variants
|
||||
*
|
||||
* Architectures with an MMU are expected to provide ioremap() and iounmap()
|
||||
* themselves. For NOMMU architectures we provide a default nop-op
|
||||
* implementation that expect that the physical address used for MMIO are
|
||||
* already marked as uncached, and can be used as kernel virtual addresses.
|
||||
* themselves or rely on GENERIC_IOREMAP. For NOMMU architectures we provide
|
||||
* a default nop-op implementation that expect that the physical address used
|
||||
* for MMIO are already marked as uncached, and can be used as kernel virtual
|
||||
* addresses.
|
||||
*
|
||||
* ioremap_wc() and ioremap_wt() can provide more relaxed caching attributes
|
||||
* for specific drivers if the architecture choses to implement them. If they
|
||||
|
@ -946,7 +947,18 @@ static inline void iounmap(void __iomem *addr)
|
|||
{
|
||||
}
|
||||
#endif
|
||||
#endif /* CONFIG_MMU */
|
||||
#elif defined(CONFIG_GENERIC_IOREMAP)
|
||||
#include <asm/pgtable.h>
|
||||
|
||||
void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long prot);
|
||||
void iounmap(volatile void __iomem *addr);
|
||||
|
||||
static inline void __iomem *ioremap(phys_addr_t addr, size_t size)
|
||||
{
|
||||
/* _PAGE_IOREMAP needs to be supplied by the architecture */
|
||||
return ioremap_prot(addr, size, _PAGE_IOREMAP);
|
||||
}
|
||||
#endif /* !CONFIG_MMU || CONFIG_GENERIC_IOREMAP */
|
||||
|
||||
#ifndef ioremap_nocache
|
||||
#define ioremap_nocache ioremap
|
||||
|
|
|
@ -637,6 +637,9 @@ config STRING_SELFTEST
|
|||
|
||||
endmenu
|
||||
|
||||
config GENERIC_IOREMAP
|
||||
bool
|
||||
|
||||
config GENERIC_LIB_ASHLDI3
|
||||
bool
|
||||
|
||||
|
|
|
@ -231,3 +231,42 @@ int ioremap_page_range(unsigned long addr,
|
|||
|
||||
return err;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_GENERIC_IOREMAP
|
||||
void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long prot)
|
||||
{
|
||||
unsigned long offset, vaddr;
|
||||
phys_addr_t last_addr;
|
||||
struct vm_struct *area;
|
||||
|
||||
/* Disallow wrap-around or zero size */
|
||||
last_addr = addr + size - 1;
|
||||
if (!size || last_addr < addr)
|
||||
return NULL;
|
||||
|
||||
/* Page-align mappings */
|
||||
offset = addr & (~PAGE_MASK);
|
||||
addr -= offset;
|
||||
size = PAGE_ALIGN(size + offset);
|
||||
|
||||
area = get_vm_area_caller(size, VM_IOREMAP,
|
||||
__builtin_return_address(0));
|
||||
if (!area)
|
||||
return NULL;
|
||||
vaddr = (unsigned long)area->addr;
|
||||
|
||||
if (ioremap_page_range(vaddr, vaddr + size, addr, __pgprot(prot))) {
|
||||
free_vm_area(area);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return (void __iomem *)(vaddr + offset);
|
||||
}
|
||||
EXPORT_SYMBOL(ioremap_prot);
|
||||
|
||||
void iounmap(volatile void __iomem *addr)
|
||||
{
|
||||
vunmap((void *)((unsigned long)addr & PAGE_MASK));
|
||||
}
|
||||
EXPORT_SYMBOL(iounmap);
|
||||
#endif /* CONFIG_GENERIC_IOREMAP */
|
||||
|
|
Loading…
Reference in New Issue