Merge master.kernel.org:/home/rmk/linux-2.6-arm
* master.kernel.org:/home/rmk/linux-2.6-arm: [ARM] xsc3: fix xsc3_l2_inv_range [ARM] mm: fix page table initialization [ARM] fix naming of MODULE_START / MODULE_END ARM: OMAP: Fix define for twl4030 irqs ARM: OMAP: Fix get_irqnr_and_base to clear spurious interrupt bits ARM: OMAP: Fix debugfs_create_*'s error checking method for arm/plat-omap ARM: OMAP: Fix compiler warnings in gpmc.c [ARM] fix VFP+softfloat binaries
This commit is contained in:
commit
9144f3821d
|
@ -44,10 +44,10 @@
|
||||||
* The module space lives between the addresses given by TASK_SIZE
|
* The module space lives between the addresses given by TASK_SIZE
|
||||||
* and PAGE_OFFSET - it must be within 32MB of the kernel text.
|
* and PAGE_OFFSET - it must be within 32MB of the kernel text.
|
||||||
*/
|
*/
|
||||||
#define MODULE_END (PAGE_OFFSET)
|
#define MODULES_END (PAGE_OFFSET)
|
||||||
#define MODULE_START (MODULE_END - 16*1048576)
|
#define MODULES_VADDR (MODULES_END - 16*1048576)
|
||||||
|
|
||||||
#if TASK_SIZE > MODULE_START
|
#if TASK_SIZE > MODULES_VADDR
|
||||||
#error Top of user space clashes with start of module space
|
#error Top of user space clashes with start of module space
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -56,7 +56,7 @@
|
||||||
* Since we use sections to map it, this macro replaces the physical address
|
* Since we use sections to map it, this macro replaces the physical address
|
||||||
* with its virtual address while keeping offset from the base section.
|
* with its virtual address while keeping offset from the base section.
|
||||||
*/
|
*/
|
||||||
#define XIP_VIRT_ADDR(physaddr) (MODULE_START + ((physaddr) & 0x000fffff))
|
#define XIP_VIRT_ADDR(physaddr) (MODULES_VADDR + ((physaddr) & 0x000fffff))
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Allow 16MB-aligned ioremap pages
|
* Allow 16MB-aligned ioremap pages
|
||||||
|
@ -94,8 +94,8 @@
|
||||||
/*
|
/*
|
||||||
* The module can be at any place in ram in nommu mode.
|
* The module can be at any place in ram in nommu mode.
|
||||||
*/
|
*/
|
||||||
#define MODULE_END (END_MEM)
|
#define MODULES_END (END_MEM)
|
||||||
#define MODULE_START (PHYS_OFFSET)
|
#define MODULES_VADDR (PHYS_OFFSET)
|
||||||
|
|
||||||
#endif /* !CONFIG_MMU */
|
#endif /* !CONFIG_MMU */
|
||||||
|
|
||||||
|
|
|
@ -42,6 +42,10 @@
|
||||||
#define CR_U (1 << 22) /* Unaligned access operation */
|
#define CR_U (1 << 22) /* Unaligned access operation */
|
||||||
#define CR_XP (1 << 23) /* Extended page tables */
|
#define CR_XP (1 << 23) /* Extended page tables */
|
||||||
#define CR_VE (1 << 24) /* Vectored interrupts */
|
#define CR_VE (1 << 24) /* Vectored interrupts */
|
||||||
|
#define CR_EE (1 << 25) /* Exception (Big) Endian */
|
||||||
|
#define CR_TRE (1 << 28) /* TEX remap enable */
|
||||||
|
#define CR_AFE (1 << 29) /* Access flag enable */
|
||||||
|
#define CR_TE (1 << 30) /* Thumb exception enable */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This is used to ensure the compiler did actually allocate the register we
|
* This is used to ensure the compiler did actually allocate the register we
|
||||||
|
|
|
@ -21,12 +21,16 @@ int elf_check_arch(const struct elf32_hdr *x)
|
||||||
|
|
||||||
eflags = x->e_flags;
|
eflags = x->e_flags;
|
||||||
if ((eflags & EF_ARM_EABI_MASK) == EF_ARM_EABI_UNKNOWN) {
|
if ((eflags & EF_ARM_EABI_MASK) == EF_ARM_EABI_UNKNOWN) {
|
||||||
|
unsigned int flt_fmt;
|
||||||
|
|
||||||
/* APCS26 is only allowed if the CPU supports it */
|
/* APCS26 is only allowed if the CPU supports it */
|
||||||
if ((eflags & EF_ARM_APCS_26) && !(elf_hwcap & HWCAP_26BIT))
|
if ((eflags & EF_ARM_APCS_26) && !(elf_hwcap & HWCAP_26BIT))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
flt_fmt = eflags & (EF_ARM_VFP_FLOAT | EF_ARM_SOFT_FLOAT);
|
||||||
|
|
||||||
/* VFP requires the supporting code */
|
/* VFP requires the supporting code */
|
||||||
if ((eflags & EF_ARM_VFP_FLOAT) && !(elf_hwcap & HWCAP_VFP))
|
if (flt_fmt == EF_ARM_VFP_FLOAT && !(elf_hwcap & HWCAP_VFP))
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
return 1;
|
return 1;
|
||||||
|
|
|
@ -26,12 +26,12 @@
|
||||||
/*
|
/*
|
||||||
* The XIP kernel text is mapped in the module area for modules and
|
* The XIP kernel text is mapped in the module area for modules and
|
||||||
* some other stuff to work without any indirect relocations.
|
* some other stuff to work without any indirect relocations.
|
||||||
* MODULE_START is redefined here and not in asm/memory.h to avoid
|
* MODULES_VADDR is redefined here and not in asm/memory.h to avoid
|
||||||
* recompiling the whole kernel when CONFIG_XIP_KERNEL is turned on/off.
|
* recompiling the whole kernel when CONFIG_XIP_KERNEL is turned on/off.
|
||||||
*/
|
*/
|
||||||
extern void _etext;
|
extern void _etext;
|
||||||
#undef MODULE_START
|
#undef MODULES_VADDR
|
||||||
#define MODULE_START (((unsigned long)&_etext + ~PGDIR_MASK) & PGDIR_MASK)
|
#define MODULES_VADDR (((unsigned long)&_etext + ~PGDIR_MASK) & PGDIR_MASK)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_MMU
|
#ifdef CONFIG_MMU
|
||||||
|
@ -43,7 +43,7 @@ void *module_alloc(unsigned long size)
|
||||||
if (!size)
|
if (!size)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
area = __get_vm_area(size, VM_ALLOC, MODULE_START, MODULE_END);
|
area = __get_vm_area(size, VM_ALLOC, MODULES_VADDR, MODULES_END);
|
||||||
if (!area)
|
if (!area)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
|
|
@ -429,18 +429,16 @@ void __init gpmc_init(void)
|
||||||
gpmc_l3_clk = clk_get(NULL, ck);
|
gpmc_l3_clk = clk_get(NULL, ck);
|
||||||
if (IS_ERR(gpmc_l3_clk)) {
|
if (IS_ERR(gpmc_l3_clk)) {
|
||||||
printk(KERN_ERR "Could not get GPMC clock %s\n", ck);
|
printk(KERN_ERR "Could not get GPMC clock %s\n", ck);
|
||||||
return -ENODEV;
|
BUG();
|
||||||
}
|
}
|
||||||
|
|
||||||
gpmc_base = ioremap(l, SZ_4K);
|
gpmc_base = ioremap(l, SZ_4K);
|
||||||
if (!gpmc_base) {
|
if (!gpmc_base) {
|
||||||
clk_put(gpmc_l3_clk);
|
clk_put(gpmc_l3_clk);
|
||||||
printk(KERN_ERR "Could not get GPMC register memory\n");
|
printk(KERN_ERR "Could not get GPMC register memory\n");
|
||||||
return -ENOMEM;
|
BUG();
|
||||||
}
|
}
|
||||||
|
|
||||||
BUG_ON(IS_ERR(gpmc_l3_clk));
|
|
||||||
|
|
||||||
l = gpmc_read_reg(GPMC_REVISION);
|
l = gpmc_read_reg(GPMC_REVISION);
|
||||||
printk(KERN_INFO "GPMC revision %d.%d\n", (l >> 4) & 0x0f, l & 0x0f);
|
printk(KERN_INFO "GPMC revision %d.%d\n", (l >> 4) & 0x0f, l & 0x0f);
|
||||||
/* Set smart idle mode and automatic L3 clock gating */
|
/* Set smart idle mode and automatic L3 clock gating */
|
||||||
|
|
|
@ -98,7 +98,7 @@ static void xsc3_l2_inv_range(unsigned long start, unsigned long end)
|
||||||
/*
|
/*
|
||||||
* Clean and invalidate partial last cache line.
|
* Clean and invalidate partial last cache line.
|
||||||
*/
|
*/
|
||||||
if (end & (CACHE_LINE_SIZE - 1)) {
|
if (start < end && (end & (CACHE_LINE_SIZE - 1))) {
|
||||||
xsc3_l2_clean_pa(end & ~(CACHE_LINE_SIZE - 1));
|
xsc3_l2_clean_pa(end & ~(CACHE_LINE_SIZE - 1));
|
||||||
xsc3_l2_inv_pa(end & ~(CACHE_LINE_SIZE - 1));
|
xsc3_l2_inv_pa(end & ~(CACHE_LINE_SIZE - 1));
|
||||||
end &= ~(CACHE_LINE_SIZE - 1);
|
end &= ~(CACHE_LINE_SIZE - 1);
|
||||||
|
@ -107,7 +107,7 @@ static void xsc3_l2_inv_range(unsigned long start, unsigned long end)
|
||||||
/*
|
/*
|
||||||
* Invalidate all full cache lines between 'start' and 'end'.
|
* Invalidate all full cache lines between 'start' and 'end'.
|
||||||
*/
|
*/
|
||||||
while (start != end) {
|
while (start < end) {
|
||||||
xsc3_l2_inv_pa(start);
|
xsc3_l2_inv_pa(start);
|
||||||
start += CACHE_LINE_SIZE;
|
start += CACHE_LINE_SIZE;
|
||||||
}
|
}
|
||||||
|
|
|
@ -180,20 +180,20 @@ void adjust_cr(unsigned long mask, unsigned long set)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_WRITE
|
#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_WRITE
|
||||||
#define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_XN|PMD_SECT_AP_WRITE
|
#define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
|
||||||
|
|
||||||
static struct mem_type mem_types[] = {
|
static struct mem_type mem_types[] = {
|
||||||
[MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
|
[MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
|
||||||
.prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
|
.prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
|
||||||
L_PTE_SHARED,
|
L_PTE_SHARED,
|
||||||
.prot_l1 = PMD_TYPE_TABLE,
|
.prot_l1 = PMD_TYPE_TABLE,
|
||||||
.prot_sect = PROT_SECT_DEVICE | PMD_SECT_UNCACHED,
|
.prot_sect = PROT_SECT_DEVICE | PMD_SECT_S,
|
||||||
.domain = DOMAIN_IO,
|
.domain = DOMAIN_IO,
|
||||||
},
|
},
|
||||||
[MT_DEVICE_NONSHARED] = { /* ARMv6 non-shared device */
|
[MT_DEVICE_NONSHARED] = { /* ARMv6 non-shared device */
|
||||||
.prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_NONSHARED,
|
.prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_NONSHARED,
|
||||||
.prot_l1 = PMD_TYPE_TABLE,
|
.prot_l1 = PMD_TYPE_TABLE,
|
||||||
.prot_sect = PROT_SECT_DEVICE | PMD_SECT_TEX(2),
|
.prot_sect = PROT_SECT_DEVICE,
|
||||||
.domain = DOMAIN_IO,
|
.domain = DOMAIN_IO,
|
||||||
},
|
},
|
||||||
[MT_DEVICE_CACHED] = { /* ioremap_cached */
|
[MT_DEVICE_CACHED] = { /* ioremap_cached */
|
||||||
|
@ -205,7 +205,7 @@ static struct mem_type mem_types[] = {
|
||||||
[MT_DEVICE_WC] = { /* ioremap_wc */
|
[MT_DEVICE_WC] = { /* ioremap_wc */
|
||||||
.prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_WC,
|
.prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_WC,
|
||||||
.prot_l1 = PMD_TYPE_TABLE,
|
.prot_l1 = PMD_TYPE_TABLE,
|
||||||
.prot_sect = PROT_SECT_DEVICE | PMD_SECT_BUFFERABLE,
|
.prot_sect = PROT_SECT_DEVICE,
|
||||||
.domain = DOMAIN_IO,
|
.domain = DOMAIN_IO,
|
||||||
},
|
},
|
||||||
[MT_CACHECLEAN] = {
|
[MT_CACHECLEAN] = {
|
||||||
|
@ -273,22 +273,23 @@ static void __init build_mem_type_table(void)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* On non-Xscale3 ARMv5-and-older systems, use CB=01
|
* Strip out features not present on earlier architectures.
|
||||||
* (Uncached/Buffered) for ioremap_wc() mappings. On XScale3
|
* Pre-ARMv5 CPUs don't have TEX bits. Pre-ARMv6 CPUs or those
|
||||||
* and ARMv6+, use TEXCB=00100 mappings (Inner/Outer Uncacheable
|
* without extended page tables don't have the 'Shared' bit.
|
||||||
* in xsc3 parlance, Uncached Normal in ARMv6 parlance).
|
|
||||||
*/
|
*/
|
||||||
if (cpu_is_xsc3() || cpu_arch >= CPU_ARCH_ARMv6) {
|
if (cpu_arch < CPU_ARCH_ARMv5)
|
||||||
mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
|
for (i = 0; i < ARRAY_SIZE(mem_types); i++)
|
||||||
mem_types[MT_DEVICE_WC].prot_sect &= ~PMD_SECT_BUFFERABLE;
|
mem_types[i].prot_sect &= ~PMD_SECT_TEX(7);
|
||||||
}
|
if ((cpu_arch < CPU_ARCH_ARMv6 || !(cr & CR_XP)) && !cpu_is_xsc3())
|
||||||
|
for (i = 0; i < ARRAY_SIZE(mem_types); i++)
|
||||||
|
mem_types[i].prot_sect &= ~PMD_SECT_S;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* ARMv5 and lower, bit 4 must be set for page tables.
|
* ARMv5 and lower, bit 4 must be set for page tables (was: cache
|
||||||
* (was: cache "update-able on write" bit on ARM610)
|
* "update-able on write" bit on ARM610). However, Xscale and
|
||||||
* However, Xscale cores require this bit to be cleared.
|
* Xscale3 require this bit to be cleared.
|
||||||
*/
|
*/
|
||||||
if (cpu_is_xscale()) {
|
if (cpu_is_xscale() || cpu_is_xsc3()) {
|
||||||
for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
|
for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
|
||||||
mem_types[i].prot_sect &= ~PMD_BIT4;
|
mem_types[i].prot_sect &= ~PMD_BIT4;
|
||||||
mem_types[i].prot_l1 &= ~PMD_BIT4;
|
mem_types[i].prot_l1 &= ~PMD_BIT4;
|
||||||
|
@ -302,6 +303,64 @@ static void __init build_mem_type_table(void)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Mark the device areas according to the CPU/architecture.
|
||||||
|
*/
|
||||||
|
if (cpu_is_xsc3() || (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP))) {
|
||||||
|
if (!cpu_is_xsc3()) {
|
||||||
|
/*
|
||||||
|
* Mark device regions on ARMv6+ as execute-never
|
||||||
|
* to prevent speculative instruction fetches.
|
||||||
|
*/
|
||||||
|
mem_types[MT_DEVICE].prot_sect |= PMD_SECT_XN;
|
||||||
|
mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN;
|
||||||
|
mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN;
|
||||||
|
mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN;
|
||||||
|
}
|
||||||
|
if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
|
||||||
|
/*
|
||||||
|
* For ARMv7 with TEX remapping,
|
||||||
|
* - shared device is SXCB=1100
|
||||||
|
* - nonshared device is SXCB=0100
|
||||||
|
* - write combine device mem is SXCB=0001
|
||||||
|
* (Uncached Normal memory)
|
||||||
|
*/
|
||||||
|
mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1);
|
||||||
|
mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(1);
|
||||||
|
mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
|
||||||
|
} else if (cpu_is_xsc3()) {
|
||||||
|
/*
|
||||||
|
* For Xscale3,
|
||||||
|
* - shared device is TEXCB=00101
|
||||||
|
* - nonshared device is TEXCB=01000
|
||||||
|
* - write combine device mem is TEXCB=00100
|
||||||
|
* (Inner/Outer Uncacheable in xsc3 parlance)
|
||||||
|
*/
|
||||||
|
mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1) | PMD_SECT_BUFFERED;
|
||||||
|
mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
|
||||||
|
mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
|
||||||
|
} else {
|
||||||
|
/*
|
||||||
|
* For ARMv6 and ARMv7 without TEX remapping,
|
||||||
|
* - shared device is TEXCB=00001
|
||||||
|
* - nonshared device is TEXCB=01000
|
||||||
|
* - write combine device mem is TEXCB=00100
|
||||||
|
* (Uncached Normal in ARMv6 parlance).
|
||||||
|
*/
|
||||||
|
mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED;
|
||||||
|
mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
|
||||||
|
mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
/*
|
||||||
|
* On others, write combining is "Uncached/Buffered"
|
||||||
|
*/
|
||||||
|
mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Now deal with the memory-type mappings
|
||||||
|
*/
|
||||||
cp = &cache_policies[cachepolicy];
|
cp = &cache_policies[cachepolicy];
|
||||||
vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
|
vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
|
||||||
|
|
||||||
|
@ -317,12 +376,8 @@ static void __init build_mem_type_table(void)
|
||||||
* Enable CPU-specific coherency if supported.
|
* Enable CPU-specific coherency if supported.
|
||||||
* (Only available on XSC3 at the moment.)
|
* (Only available on XSC3 at the moment.)
|
||||||
*/
|
*/
|
||||||
if (arch_is_coherent()) {
|
if (arch_is_coherent() && cpu_is_xsc3())
|
||||||
if (cpu_is_xsc3()) {
|
mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
|
||||||
mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
|
|
||||||
mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* ARMv6 and above have extended page tables.
|
* ARMv6 and above have extended page tables.
|
||||||
|
@ -336,11 +391,6 @@ static void __init build_mem_type_table(void)
|
||||||
mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
|
mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
|
||||||
mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
|
mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
|
||||||
|
|
||||||
/*
|
|
||||||
* Mark the device area as "shared device"
|
|
||||||
*/
|
|
||||||
mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED;
|
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
/*
|
/*
|
||||||
* Mark memory with the "shared" attribute for SMP systems
|
* Mark memory with the "shared" attribute for SMP systems
|
||||||
|
@ -360,9 +410,6 @@ static void __init build_mem_type_table(void)
|
||||||
mem_types[MT_LOW_VECTORS].prot_pte |= vecs_pgprot;
|
mem_types[MT_LOW_VECTORS].prot_pte |= vecs_pgprot;
|
||||||
mem_types[MT_HIGH_VECTORS].prot_pte |= vecs_pgprot;
|
mem_types[MT_HIGH_VECTORS].prot_pte |= vecs_pgprot;
|
||||||
|
|
||||||
if (cpu_arch < CPU_ARCH_ARMv5)
|
|
||||||
mem_types[MT_MINICLEAN].prot_sect &= ~PMD_SECT_TEX(1);
|
|
||||||
|
|
||||||
pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot);
|
pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot);
|
||||||
pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
|
pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
|
||||||
L_PTE_DIRTY | L_PTE_WRITE |
|
L_PTE_DIRTY | L_PTE_WRITE |
|
||||||
|
@ -654,7 +701,7 @@ static inline void prepare_page_table(struct meminfo *mi)
|
||||||
/*
|
/*
|
||||||
* Clear out all the mappings below the kernel image.
|
* Clear out all the mappings below the kernel image.
|
||||||
*/
|
*/
|
||||||
for (addr = 0; addr < MODULE_START; addr += PGDIR_SIZE)
|
for (addr = 0; addr < MODULES_VADDR; addr += PGDIR_SIZE)
|
||||||
pmd_clear(pmd_off_k(addr));
|
pmd_clear(pmd_off_k(addr));
|
||||||
|
|
||||||
#ifdef CONFIG_XIP_KERNEL
|
#ifdef CONFIG_XIP_KERNEL
|
||||||
|
@ -766,7 +813,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
|
||||||
*/
|
*/
|
||||||
#ifdef CONFIG_XIP_KERNEL
|
#ifdef CONFIG_XIP_KERNEL
|
||||||
map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
|
map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
|
||||||
map.virtual = MODULE_START;
|
map.virtual = MODULES_VADDR;
|
||||||
map.length = ((unsigned long)&_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK;
|
map.length = ((unsigned long)&_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK;
|
||||||
map.type = MT_ROM;
|
map.type = MT_ROM;
|
||||||
create_mapping(&map);
|
create_mapping(&map);
|
||||||
|
|
|
@ -115,7 +115,7 @@ ENTRY(cpu_v7_set_pte_ext)
|
||||||
orr r3, r3, r2
|
orr r3, r3, r2
|
||||||
orr r3, r3, #PTE_EXT_AP0 | 2
|
orr r3, r3, #PTE_EXT_AP0 | 2
|
||||||
|
|
||||||
tst r2, #1 << 4
|
tst r1, #1 << 4
|
||||||
orrne r3, r3, #PTE_EXT_TEX(1)
|
orrne r3, r3, #PTE_EXT_TEX(1)
|
||||||
|
|
||||||
tst r1, #L_PTE_WRITE
|
tst r1, #L_PTE_WRITE
|
||||||
|
@ -192,11 +192,11 @@ __v7_setup:
|
||||||
mov pc, lr @ return to head.S:__ret
|
mov pc, lr @ return to head.S:__ret
|
||||||
ENDPROC(__v7_setup)
|
ENDPROC(__v7_setup)
|
||||||
|
|
||||||
/*
|
/* AT
|
||||||
* V X F I D LR
|
* TFR EV X F I D LR
|
||||||
* .... ...E PUI. .T.T 4RVI ZFRS BLDP WCAM
|
* .EEE ..EE PUI. .T.T 4RVI ZFRS BLDP WCAM
|
||||||
* rrrr rrrx xxx0 0101 xxxx xxxx x111 xxxx < forced
|
* rxxx rrxx xxx0 0101 xxxx xxxx x111 xxxx < forced
|
||||||
* 0 110 0011 1.00 .111 1101 < we want
|
* 1 0 110 0011 1.00 .111 1101 < we want
|
||||||
*/
|
*/
|
||||||
.type v7_crval, #object
|
.type v7_crval, #object
|
||||||
v7_crval:
|
v7_crval:
|
||||||
|
|
|
@ -428,23 +428,23 @@ static int clk_debugfs_register_one(struct clk *c)
|
||||||
if (c->id != 0)
|
if (c->id != 0)
|
||||||
sprintf(p, ":%d", c->id);
|
sprintf(p, ":%d", c->id);
|
||||||
d = debugfs_create_dir(s, pa ? pa->dent : clk_debugfs_root);
|
d = debugfs_create_dir(s, pa ? pa->dent : clk_debugfs_root);
|
||||||
if (IS_ERR(d))
|
if (!d)
|
||||||
return PTR_ERR(d);
|
return -ENOMEM;
|
||||||
c->dent = d;
|
c->dent = d;
|
||||||
|
|
||||||
d = debugfs_create_u8("usecount", S_IRUGO, c->dent, (u8 *)&c->usecount);
|
d = debugfs_create_u8("usecount", S_IRUGO, c->dent, (u8 *)&c->usecount);
|
||||||
if (IS_ERR(d)) {
|
if (!d) {
|
||||||
err = PTR_ERR(d);
|
err = -ENOMEM;
|
||||||
goto err_out;
|
goto err_out;
|
||||||
}
|
}
|
||||||
d = debugfs_create_u32("rate", S_IRUGO, c->dent, (u32 *)&c->rate);
|
d = debugfs_create_u32("rate", S_IRUGO, c->dent, (u32 *)&c->rate);
|
||||||
if (IS_ERR(d)) {
|
if (!d) {
|
||||||
err = PTR_ERR(d);
|
err = -ENOMEM;
|
||||||
goto err_out;
|
goto err_out;
|
||||||
}
|
}
|
||||||
d = debugfs_create_x32("flags", S_IRUGO, c->dent, (u32 *)&c->flags);
|
d = debugfs_create_x32("flags", S_IRUGO, c->dent, (u32 *)&c->flags);
|
||||||
if (IS_ERR(d)) {
|
if (!d) {
|
||||||
err = PTR_ERR(d);
|
err = -ENOMEM;
|
||||||
goto err_out;
|
goto err_out;
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -483,8 +483,8 @@ static int __init clk_debugfs_init(void)
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
d = debugfs_create_dir("clock", NULL);
|
d = debugfs_create_dir("clock", NULL);
|
||||||
if (IS_ERR(d))
|
if (!d)
|
||||||
return PTR_ERR(d);
|
return -ENOMEM;
|
||||||
clk_debugfs_root = d;
|
clk_debugfs_root = d;
|
||||||
|
|
||||||
list_for_each_entry(c, &clocks, node) {
|
list_for_each_entry(c, &clocks, node) {
|
||||||
|
|
|
@ -65,7 +65,8 @@
|
||||||
#include <mach/omap34xx.h>
|
#include <mach/omap34xx.h>
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define INTCPS_SIR_IRQ_OFFSET 0x0040 /* Active interrupt number */
|
#define INTCPS_SIR_IRQ_OFFSET 0x0040 /* Active interrupt offset */
|
||||||
|
#define ACTIVEIRQ_MASK 0x7f /* Active interrupt bits */
|
||||||
|
|
||||||
.macro disable_fiq
|
.macro disable_fiq
|
||||||
.endm
|
.endm
|
||||||
|
@ -88,6 +89,7 @@
|
||||||
cmp \irqnr, #0x0
|
cmp \irqnr, #0x0
|
||||||
2222:
|
2222:
|
||||||
ldrne \irqnr, [\base, #INTCPS_SIR_IRQ_OFFSET]
|
ldrne \irqnr, [\base, #INTCPS_SIR_IRQ_OFFSET]
|
||||||
|
and \irqnr, \irqnr, #ACTIVEIRQ_MASK /* Clear spurious bits */
|
||||||
|
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
|
|
|
@ -372,7 +372,7 @@
|
||||||
|
|
||||||
/* External TWL4030 gpio interrupts are optional */
|
/* External TWL4030 gpio interrupts are optional */
|
||||||
#define TWL4030_GPIO_IRQ_BASE TWL4030_PWR_IRQ_END
|
#define TWL4030_GPIO_IRQ_BASE TWL4030_PWR_IRQ_END
|
||||||
#ifdef CONFIG_TWL4030_GPIO
|
#ifdef CONFIG_GPIO_TWL4030
|
||||||
#define TWL4030_GPIO_NR_IRQS 18
|
#define TWL4030_GPIO_NR_IRQS 18
|
||||||
#else
|
#else
|
||||||
#define TWL4030_GPIO_NR_IRQS 0
|
#define TWL4030_GPIO_NR_IRQS 0
|
||||||
|
|
|
@ -178,7 +178,7 @@ static int vmap_page_range(unsigned long addr, unsigned long end,
|
||||||
static inline int is_vmalloc_or_module_addr(const void *x)
|
static inline int is_vmalloc_or_module_addr(const void *x)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* x86-64 and sparc64 put modules in a special place,
|
* ARM, x86-64 and sparc64 put modules in a special place,
|
||||||
* and fall back on vmalloc() if that fails. Others
|
* and fall back on vmalloc() if that fails. Others
|
||||||
* just put it in the vmalloc space.
|
* just put it in the vmalloc space.
|
||||||
*/
|
*/
|
||||||
|
|
Loading…
Reference in New Issue