sh: Prevent 64-bit pgprot clobbering across ioremap implementations.

Presently 'flags' gets passed around a lot between the various ioremap
helpers and implementations, which is only 32-bits. In the X2TLB case
we use 64-bit pgprots which presently results in the upper 32bits being
chopped off (which handily include our read/write/exec permissions).

As such, we convert everything internally to using pgprot_t directly and
simply convert over with pgprot_val() where needed. With this in place,
transparent fixmap utilization for early ioremap works as expected.

Signed-off-by: Paul Mundt <lethal@linux-sh.org>
This commit is contained in:
Paul Mundt 2010-01-19 13:34:38 +09:00
parent af1415314a
commit d57d64080d
6 changed files with 41 additions and 33 deletions

View File

@ -21,6 +21,7 @@
#include <linux/i2c-algo-pca.h> #include <linux/i2c-algo-pca.h>
#include <linux/usb/r8a66597.h> #include <linux/usb/r8a66597.h>
#include <linux/irq.h> #include <linux/irq.h>
#include <linux/io.h>
#include <linux/clk.h> #include <linux/clk.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <mach/sh7785lcr.h> #include <mach/sh7785lcr.h>
@ -332,15 +333,14 @@ static void __init sh7785lcr_setup(char **cmdline_p)
pm_power_off = sh7785lcr_power_off; pm_power_off = sh7785lcr_power_off;
/* sm501 DRAM configuration */ /* sm501 DRAM configuration */
sm501_reg = ioremap_fixed(SM107_REG_ADDR, SM501_DRAM_CONTROL, sm501_reg = ioremap_nocache(SM107_REG_ADDR, SM501_DRAM_CONTROL);
PAGE_KERNEL);
if (!sm501_reg) { if (!sm501_reg) {
printk(KERN_ERR "%s: ioremap error.\n", __func__); printk(KERN_ERR "%s: ioremap error.\n", __func__);
return; return;
} }
writel(0x000307c2, sm501_reg + SM501_DRAM_CONTROL); writel(0x000307c2, sm501_reg + SM501_DRAM_CONTROL);
iounmap_fixed(sm501_reg); iounmap(sm501_reg);
} }
/* Return the board specific boot mode pin configuration */ /* Return the board specific boot mode pin configuration */

View File

@ -63,7 +63,7 @@ static int __init landisk_devices_setup(void)
/* open I/O area window */ /* open I/O area window */
paddrbase = virt_to_phys((void *)PA_AREA5_IO); paddrbase = virt_to_phys((void *)PA_AREA5_IO);
prot = PAGE_KERNEL_PCC(1, _PAGE_PCC_IO16); prot = PAGE_KERNEL_PCC(1, _PAGE_PCC_IO16);
cf_ide_base = p3_ioremap(paddrbase, PAGE_SIZE, prot.pgprot); cf_ide_base = p3_ioremap(paddrbase, PAGE_SIZE, prot);
if (!cf_ide_base) { if (!cf_ide_base) {
printk("allocate_cf_area : can't open CF I/O window!\n"); printk("allocate_cf_area : can't open CF I/O window!\n");
return -ENOMEM; return -ENOMEM;

View File

@ -57,7 +57,7 @@ static int __init lboxre2_devices_setup(void)
paddrbase = virt_to_phys((void*)PA_AREA5_IO); paddrbase = virt_to_phys((void*)PA_AREA5_IO);
psize = PAGE_SIZE; psize = PAGE_SIZE;
prot = PAGE_KERNEL_PCC( 1 , _PAGE_PCC_IO16); prot = PAGE_KERNEL_PCC( 1 , _PAGE_PCC_IO16);
cf0_io_base = (u32)p3_ioremap(paddrbase, psize, prot.pgprot); cf0_io_base = (u32)p3_ioremap(paddrbase, psize, prot);
if (!cf0_io_base) { if (!cf0_io_base) {
printk(KERN_ERR "%s : can't open CF I/O window!\n" , __func__ ); printk(KERN_ERR "%s : can't open CF I/O window!\n" , __func__ );
return -ENOMEM; return -ENOMEM;

View File

@ -82,7 +82,7 @@ static int __init sh03_devices_setup(void)
/* open I/O area window */ /* open I/O area window */
paddrbase = virt_to_phys((void *)PA_AREA5_IO); paddrbase = virt_to_phys((void *)PA_AREA5_IO);
prot = PAGE_KERNEL_PCC(1, _PAGE_PCC_IO16); prot = PAGE_KERNEL_PCC(1, _PAGE_PCC_IO16);
cf_ide_base = p3_ioremap(paddrbase, PAGE_SIZE, prot.pgprot); cf_ide_base = p3_ioremap(paddrbase, PAGE_SIZE, prot);
if (!cf_ide_base) { if (!cf_ide_base) {
printk("allocate_cf_area : can't open CF I/O window!\n"); printk("allocate_cf_area : can't open CF I/O window!\n");
return -ENOMEM; return -ENOMEM;

View File

@ -235,7 +235,7 @@ unsigned long long poke_real_address_q(unsigned long long addr,
*/ */
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
void __iomem *__ioremap_caller(unsigned long offset, unsigned long size, void __iomem *__ioremap_caller(unsigned long offset, unsigned long size,
unsigned long flags, void *caller); pgprot_t prot, void *caller);
void __iounmap(void __iomem *addr); void __iounmap(void __iomem *addr);
#ifdef CONFIG_IOREMAP_FIXED #ifdef CONFIG_IOREMAP_FIXED
@ -254,13 +254,13 @@ static inline int iounmap_fixed(void __iomem *addr) { return -EINVAL; }
#endif #endif
static inline void __iomem * static inline void __iomem *
__ioremap(unsigned long offset, unsigned long size, unsigned long flags) __ioremap(unsigned long offset, unsigned long size, pgprot_t prot)
{ {
return __ioremap_caller(offset, size, flags, __builtin_return_address(0)); return __ioremap_caller(offset, size, prot, __builtin_return_address(0));
} }
static inline void __iomem * static inline void __iomem *
__ioremap_29bit(unsigned long offset, unsigned long size, unsigned long flags) __ioremap_29bit(unsigned long offset, unsigned long size, pgprot_t prot)
{ {
#ifdef CONFIG_29BIT #ifdef CONFIG_29BIT
unsigned long last_addr = offset + size - 1; unsigned long last_addr = offset + size - 1;
@ -272,7 +272,7 @@ __ioremap_29bit(unsigned long offset, unsigned long size, unsigned long flags)
* mapping must be done by the PMB or by using page tables. * mapping must be done by the PMB or by using page tables.
*/ */
if (likely(PXSEG(offset) < P3SEG && PXSEG(last_addr) < P3SEG)) { if (likely(PXSEG(offset) < P3SEG && PXSEG(last_addr) < P3SEG)) {
if (unlikely(flags & _PAGE_CACHABLE)) if (unlikely(pgprot_val(prot) & _PAGE_CACHABLE))
return (void __iomem *)P1SEGADDR(offset); return (void __iomem *)P1SEGADDR(offset);
return (void __iomem *)P2SEGADDR(offset); return (void __iomem *)P2SEGADDR(offset);
@ -287,7 +287,7 @@ __ioremap_29bit(unsigned long offset, unsigned long size, unsigned long flags)
} }
static inline void __iomem * static inline void __iomem *
__ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags) __ioremap_mode(unsigned long offset, unsigned long size, pgprot_t prot)
{ {
void __iomem *ret; void __iomem *ret;
@ -295,30 +295,39 @@ __ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags)
if (ret) if (ret)
return ret; return ret;
ret = __ioremap_29bit(offset, size, flags); ret = __ioremap_29bit(offset, size, prot);
if (ret) if (ret)
return ret; return ret;
return __ioremap(offset, size, flags); return __ioremap(offset, size, prot);
} }
#else #else
#define __ioremap(offset, size, flags) ((void __iomem *)(offset)) #define __ioremap(offset, size, prot) ((void __iomem *)(offset))
#define __ioremap_mode(offset, size, flags) ((void __iomem *)(offset)) #define __ioremap_mode(offset, size, prot) ((void __iomem *)(offset))
#define __iounmap(addr) do { } while (0) #define __iounmap(addr) do { } while (0)
#endif /* CONFIG_MMU */ #endif /* CONFIG_MMU */
#define ioremap(offset, size) \ static inline void __iomem *
__ioremap_mode((offset), (size), 0) ioremap(unsigned long offset, unsigned long size)
#define ioremap_nocache(offset, size) \ {
__ioremap_mode((offset), (size), 0) return __ioremap_mode(offset, size, PAGE_KERNEL_NOCACHE);
#define ioremap_cache(offset, size) \ }
__ioremap_mode((offset), (size), _PAGE_CACHABLE)
#define p3_ioremap(offset, size, flags) \ static inline void __iomem *
__ioremap((offset), (size), (flags)) ioremap_cache(unsigned long offset, unsigned long size)
#define ioremap_prot(offset, size, flags) \ {
__ioremap_mode((offset), (size), (flags)) return __ioremap_mode(offset, size, PAGE_KERNEL);
#define iounmap(addr) \ }
__iounmap((addr))
static inline void __iomem *
ioremap_prot(resource_size_t offset, unsigned long size, unsigned long flags)
{
return __ioremap_mode(offset, size, __pgprot(flags));
}
#define ioremap_nocache ioremap
#define p3_ioremap __ioremap
#define iounmap __iounmap
#define maybebadio(port) \ #define maybebadio(port) \
printk(KERN_ERR "bad PC-like io %s:%u for port 0x%lx at 0x%08x\n", \ printk(KERN_ERR "bad PC-like io %s:%u for port 0x%lx at 0x%08x\n", \

View File

@ -35,11 +35,10 @@
*/ */
void __iomem * __init_refok void __iomem * __init_refok
__ioremap_caller(unsigned long phys_addr, unsigned long size, __ioremap_caller(unsigned long phys_addr, unsigned long size,
unsigned long flags, void *caller) pgprot_t pgprot, void *caller)
{ {
struct vm_struct *area; struct vm_struct *area;
unsigned long offset, last_addr, addr, orig_addr; unsigned long offset, last_addr, addr, orig_addr;
pgprot_t pgprot;
/* Don't allow wraparound or zero size */ /* Don't allow wraparound or zero size */
last_addr = phys_addr + size - 1; last_addr = phys_addr + size - 1;
@ -69,7 +68,7 @@ __ioremap_caller(unsigned long phys_addr, unsigned long size,
* If we can't yet use the regular approach, go the fixmap route. * If we can't yet use the regular approach, go the fixmap route.
*/ */
if (!mem_init_done) if (!mem_init_done)
return ioremap_fixed(phys_addr, size, __pgprot(flags)); return ioremap_fixed(phys_addr, size, pgprot);
/* /*
* Ok, go for it.. * Ok, go for it..
@ -91,8 +90,9 @@ __ioremap_caller(unsigned long phys_addr, unsigned long size,
* PMB entries are all pre-faulted. * PMB entries are all pre-faulted.
*/ */
if (unlikely(phys_addr >= P1SEG)) { if (unlikely(phys_addr >= P1SEG)) {
unsigned long mapped = pmb_remap(addr, phys_addr, size, flags); unsigned long mapped;
mapped = pmb_remap(addr, phys_addr, size, pgprot_val(pgprot));
if (likely(mapped)) { if (likely(mapped)) {
addr += mapped; addr += mapped;
phys_addr += mapped; phys_addr += mapped;
@ -101,7 +101,6 @@ __ioremap_caller(unsigned long phys_addr, unsigned long size,
} }
#endif #endif
pgprot = __pgprot(pgprot_val(PAGE_KERNEL_NOCACHE) | flags);
if (likely(size)) if (likely(size))
if (ioremap_page_range(addr, addr + size, phys_addr, pgprot)) { if (ioremap_page_range(addr, addr + size, phys_addr, pgprot)) {
vunmap((void *)orig_addr); vunmap((void *)orig_addr);