sh64: Fixup the nommu build.
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
This commit is contained in:
parent
9141d30a48
commit
ccd8058741
|
@ -143,12 +143,22 @@ resvec_save_area:
|
|||
trap_jtable:
|
||||
.long do_exception_error /* 0x000 */
|
||||
.long do_exception_error /* 0x020 */
|
||||
#ifdef CONFIG_MMU
|
||||
.long tlb_miss_load /* 0x040 */
|
||||
.long tlb_miss_store /* 0x060 */
|
||||
#else
|
||||
.long do_exception_error
|
||||
.long do_exception_error
|
||||
#endif
|
||||
! ARTIFICIAL pseudo-EXPEVT setting
|
||||
.long do_debug_interrupt /* 0x080 */
|
||||
#ifdef CONFIG_MMU
|
||||
.long tlb_miss_load /* 0x0A0 */
|
||||
.long tlb_miss_store /* 0x0C0 */
|
||||
#else
|
||||
.long do_exception_error
|
||||
.long do_exception_error
|
||||
#endif
|
||||
.long do_address_error_load /* 0x0E0 */
|
||||
.long do_address_error_store /* 0x100 */
|
||||
#ifdef CONFIG_SH_FPU
|
||||
|
@ -185,10 +195,18 @@ trap_jtable:
|
|||
.endr
|
||||
.long do_IRQ /* 0xA00 */
|
||||
.long do_IRQ /* 0xA20 */
|
||||
#ifdef CONFIG_MMU
|
||||
.long itlb_miss_or_IRQ /* 0xA40 */
|
||||
#else
|
||||
.long do_IRQ
|
||||
#endif
|
||||
.long do_IRQ /* 0xA60 */
|
||||
.long do_IRQ /* 0xA80 */
|
||||
#ifdef CONFIG_MMU
|
||||
.long itlb_miss_or_IRQ /* 0xAA0 */
|
||||
#else
|
||||
.long do_IRQ
|
||||
#endif
|
||||
.long do_exception_error /* 0xAC0 */
|
||||
.long do_address_error_exec /* 0xAE0 */
|
||||
.rept 8
|
||||
|
@ -274,6 +292,7 @@ not_a_tlb_miss:
|
|||
* Instead of '.space 1024-TEXT_SIZE' place the RESVEC
|
||||
* block making sure the final alignment is correct.
|
||||
*/
|
||||
#ifdef CONFIG_MMU
|
||||
tlb_miss:
|
||||
synco /* TAKum03020 (but probably a good idea anyway.) */
|
||||
putcon SP, KCR1
|
||||
|
@ -377,6 +396,9 @@ fixup_to_invoke_general_handler:
|
|||
getcon KCR1, SP
|
||||
pta handle_exception, tr0
|
||||
blink tr0, ZERO
|
||||
#else /* CONFIG_MMU */
|
||||
.balign 256
|
||||
#endif
|
||||
|
||||
/* NB TAKE GREAT CARE HERE TO ENSURE THAT THE INTERRUPT CODE
|
||||
DOES END UP AT VBR+0x600 */
|
||||
|
@ -1103,6 +1125,7 @@ restore_all:
|
|||
* fpu_error_or_IRQ? is a helper to deflect to the right cause.
|
||||
*
|
||||
*/
|
||||
#ifdef CONFIG_MMU
|
||||
tlb_miss_load:
|
||||
or SP, ZERO, r2
|
||||
or ZERO, ZERO, r3 /* Read */
|
||||
|
@ -1132,6 +1155,7 @@ call_do_page_fault:
|
|||
movi do_page_fault, r6
|
||||
ptabs r6, tr0
|
||||
blink tr0, ZERO
|
||||
#endif /* CONFIG_MMU */
|
||||
|
||||
fpu_error_or_IRQA:
|
||||
pta its_IRQ, tr0
|
||||
|
@ -1481,6 +1505,7 @@ poke_real_address_q:
|
|||
ptabs LINK, tr0
|
||||
blink tr0, r63
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
/*
|
||||
* --- User Access Handling Section
|
||||
*/
|
||||
|
@ -1604,6 +1629,7 @@ ___clear_user_exit:
|
|||
ptabs LINK, tr0
|
||||
blink tr0, ZERO
|
||||
|
||||
#endif /* CONFIG_MMU */
|
||||
|
||||
/*
|
||||
* int __strncpy_from_user(unsigned long __dest, unsigned long __src,
|
||||
|
@ -2014,9 +2040,11 @@ sa_default_restorer:
|
|||
.global asm_uaccess_start /* Just a marker */
|
||||
asm_uaccess_start:
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
.long ___copy_user1, ___copy_user_exit
|
||||
.long ___copy_user2, ___copy_user_exit
|
||||
.long ___clear_user1, ___clear_user_exit
|
||||
#endif
|
||||
.long ___strncpy_from_user1, ___strncpy_from_user_exit
|
||||
.long ___strnlen_user1, ___strnlen_user_exit
|
||||
.long ___get_user_asm_b1, ___get_user_asm_b_exit
|
||||
|
|
|
@ -2,10 +2,11 @@
|
|||
# Makefile for the Linux SuperH-specific parts of the memory manager.
|
||||
#
|
||||
|
||||
obj-y := init.o extable_64.o consistent.o
|
||||
obj-y := init.o consistent.o
|
||||
|
||||
mmu-y := tlb-nommu.o pg-nommu.o
|
||||
mmu-$(CONFIG_MMU) := fault_64.o ioremap_64.o tlbflush_64.o tlb-sh5.o
|
||||
mmu-y := tlb-nommu.o pg-nommu.o extable_32.o
|
||||
mmu-$(CONFIG_MMU) := fault_64.o ioremap_64.o tlbflush_64.o tlb-sh5.o \
|
||||
extable_64.o
|
||||
|
||||
ifndef CONFIG_CACHE_OFF
|
||||
obj-y += cache-sh5.o
|
||||
|
|
|
@ -714,6 +714,7 @@ void flush_cache_sigtramp(unsigned long vaddr)
|
|||
sh64_icache_inv_current_user_range(vaddr, end);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
/*
|
||||
* These *MUST* lie in an area of virtual address space that's otherwise
|
||||
* unused.
|
||||
|
@ -830,3 +831,4 @@ void clear_user_page(void *to, unsigned long address, struct page *page)
|
|||
else
|
||||
sh64_clear_user_page_coloured(to, address);
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -268,11 +268,6 @@ unsigned long long peek_real_address_q(unsigned long long addr);
|
|||
unsigned long long poke_real_address_q(unsigned long long addr,
|
||||
unsigned long long val);
|
||||
|
||||
/* arch/sh/mm/ioremap_64.c */
|
||||
unsigned long onchip_remap(unsigned long addr, unsigned long size,
|
||||
const char *name);
|
||||
extern void onchip_unmap(unsigned long vaddr);
|
||||
|
||||
#if !defined(CONFIG_MMU)
|
||||
#define virt_to_phys(address) ((unsigned long)(address))
|
||||
#define phys_to_virt(address) ((void *)(address))
|
||||
|
@ -302,9 +297,16 @@ extern void onchip_unmap(unsigned long vaddr);
|
|||
void __iomem *__ioremap(unsigned long offset, unsigned long size,
|
||||
unsigned long flags);
|
||||
void __iounmap(void __iomem *addr);
|
||||
|
||||
/* arch/sh/mm/ioremap_64.c */
|
||||
unsigned long onchip_remap(unsigned long addr, unsigned long size,
|
||||
const char *name);
|
||||
extern void onchip_unmap(unsigned long vaddr);
|
||||
#else
|
||||
#define __ioremap(offset, size, flags) ((void __iomem *)(offset))
|
||||
#define __iounmap(addr) do { } while (0)
|
||||
#define onchip_remap(addr, size, name) (addr)
|
||||
#define onchip_unmap(addr) do { } while (0)
|
||||
#endif /* CONFIG_MMU */
|
||||
|
||||
static inline void __iomem *
|
||||
|
|
|
@ -27,6 +27,7 @@
|
|||
/* ASID is 8-bit value, so it can't be 0x100 */
|
||||
#define MMU_NO_ASID 0x100
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
#define asid_cache(cpu) (cpu_data[cpu].asid_cache)
|
||||
#define cpu_context(cpu, mm) ((mm)->context.id[cpu])
|
||||
|
||||
|
@ -38,7 +39,6 @@
|
|||
*/
|
||||
#define MMU_VPN_MASK 0xfffff000
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
#if defined(CONFIG_SUPERH32)
|
||||
#include "mmu_context_32.h"
|
||||
#else
|
||||
|
@ -129,6 +129,8 @@ static inline void switch_mm(struct mm_struct *prev,
|
|||
#define destroy_context(mm) do { } while (0)
|
||||
#define set_asid(asid) do { } while (0)
|
||||
#define get_asid() (0)
|
||||
#define cpu_asid(cpu, mm) ({ (void)cpu; 0; })
|
||||
#define switch_and_save_asid(asid) (0)
|
||||
#define set_TTB(pgd) do { } while (0)
|
||||
#define get_TTB() (0)
|
||||
#define activate_context(mm,cpu) do { } while (0)
|
||||
|
|
|
@ -56,6 +56,7 @@ static inline void __flush_tlb_slot(unsigned long long slot)
|
|||
__asm__ __volatile__ ("putcfg %0, 0, r63\n" : : "r" (slot));
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
/* arch/sh64/mm/tlb.c */
|
||||
int sh64_tlb_init(void);
|
||||
unsigned long long sh64_next_free_dtlb_entry(void);
|
||||
|
@ -64,6 +65,13 @@ int sh64_put_wired_dtlb_entry(unsigned long long entry);
|
|||
void sh64_setup_tlb_slot(unsigned long long config_addr, unsigned long eaddr,
|
||||
unsigned long asid, unsigned long paddr);
|
||||
void sh64_teardown_tlb_slot(unsigned long long config_addr);
|
||||
|
||||
#else
|
||||
#define sh64_tlb_init() do { } while (0)
|
||||
#define sh64_next_free_dtlb_entry() (0)
|
||||
#define sh64_get_wired_dtlb_entry() (0)
|
||||
#define sh64_put_wired_dtlb_entry(entry) do { } while (0)
|
||||
#define sh64_setup_tlb_slot(conf, virt, asid, phys) do { } while (0)
|
||||
#define sh64_teardown_tlb_slot(addr) do { } while (0)
|
||||
#endif /* CONFIG_MMU */
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* __ASM_SH_TLB_64_H */
|
||||
|
|
|
@ -274,7 +274,9 @@ struct exception_table_entry
|
|||
unsigned long insn, fixup;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
#define ARCH_HAS_SEARCH_EXTABLE
|
||||
#endif
|
||||
|
||||
/* Returns 0 if exception not found and fixup.unit otherwise. */
|
||||
extern unsigned long search_exception_table(unsigned long addr);
|
||||
|
|
Loading…
Reference in New Issue