x86: fix ioremap API
Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
parent
266b9f8727
commit
5f8681529c
|
@ -106,7 +106,7 @@ static int ioremap_change_attr(unsigned long phys_addr, unsigned long size,
|
|||
* have to convert them into an offset in a page-aligned mapping, but the
|
||||
* caller shouldn't need to know that small detail.
|
||||
*/
|
||||
void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
|
||||
static void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
|
||||
unsigned long flags)
|
||||
{
|
||||
void __iomem *addr;
|
||||
|
@ -164,7 +164,6 @@ void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
|
|||
|
||||
return (void __iomem *) (offset + (char __iomem *)addr);
|
||||
}
|
||||
EXPORT_SYMBOL(__ioremap);
|
||||
|
||||
/**
|
||||
* ioremap_nocache - map bus memory into CPU space
|
||||
|
@ -193,6 +192,12 @@ void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size)
|
|||
}
|
||||
EXPORT_SYMBOL(ioremap_nocache);
|
||||
|
||||
void __iomem *ioremap_cache(unsigned long phys_addr, unsigned long size)
|
||||
{
|
||||
return __ioremap(phys_addr, size, 0);
|
||||
}
|
||||
EXPORT_SYMBOL(ioremap_cache);
|
||||
|
||||
/**
|
||||
* iounmap - Free a IO remapping
|
||||
* @addr: virtual address from ioremap_*
|
||||
|
|
|
@ -100,8 +100,6 @@ static inline void * phys_to_virt(unsigned long address)
|
|||
*/
|
||||
#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
|
||||
|
||||
extern void __iomem * __ioremap(unsigned long offset, unsigned long size, unsigned long flags);
|
||||
|
||||
/**
|
||||
* ioremap - map bus memory into CPU space
|
||||
* @offset: bus address of the memory
|
||||
|
@ -116,18 +114,13 @@ extern void __iomem * __ioremap(unsigned long offset, unsigned long size, unsign
|
|||
* If the area you are trying to map is a PCI BAR you should have a
|
||||
* look at pci_iomap().
|
||||
*/
|
||||
extern void __iomem * ioremap_nocache(unsigned long offset, unsigned long size);
|
||||
|
||||
static inline void __iomem *
|
||||
ioremap_cache(unsigned long offset, unsigned long size)
|
||||
{
|
||||
return __ioremap(offset, size, 0);
|
||||
}
|
||||
extern void __iomem *ioremap_nocache(unsigned long offset, unsigned long size);
|
||||
extern void __iomem *ioremap_cache(unsigned long offset, unsigned long size);
|
||||
|
||||
/*
|
||||
* The default ioremap() behavior is non-cached:
|
||||
*/
|
||||
static inline void __iomem * ioremap(unsigned long offset, unsigned long size)
|
||||
static inline void __iomem *ioremap(unsigned long offset, unsigned long size)
|
||||
{
|
||||
return ioremap_nocache(offset, size);
|
||||
}
|
||||
|
|
|
@ -150,8 +150,6 @@ static inline void * phys_to_virt(unsigned long address)
|
|||
|
||||
#include <asm-generic/iomap.h>
|
||||
|
||||
extern void __iomem *__ioremap(unsigned long offset, unsigned long size, unsigned long flags);
|
||||
|
||||
extern void *early_ioremap(unsigned long addr, unsigned long size);
|
||||
extern void early_iounmap(void *addr, unsigned long size);
|
||||
|
||||
|
@ -160,18 +158,13 @@ extern void early_iounmap(void *addr, unsigned long size);
|
|||
* it's useful if some control registers are in such an area and write combining
|
||||
* or read caching is not desirable:
|
||||
*/
|
||||
extern void __iomem * ioremap_nocache(unsigned long offset, unsigned long size);
|
||||
|
||||
static inline void __iomem *
|
||||
ioremap_cache(unsigned long offset, unsigned long size)
|
||||
{
|
||||
return __ioremap(offset, size, 0);
|
||||
}
|
||||
extern void __iomem *ioremap_nocache(unsigned long offset, unsigned long size);
|
||||
extern void __iomem *ioremap_cache(unsigned long offset, unsigned long size);
|
||||
|
||||
/*
|
||||
* The default ioremap() behavior is non-cached:
|
||||
*/
|
||||
static inline void __iomem * ioremap(unsigned long offset, unsigned long size)
|
||||
static inline void __iomem *ioremap(unsigned long offset, unsigned long size)
|
||||
{
|
||||
return ioremap_cache(offset, size);
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue