diff --git a/arch/sh/mm/tlbex_64.c b/arch/sh/mm/tlbex_64.c index 24dd4ab33dec..8557548fc53e 100644 --- a/arch/sh/mm/tlbex_64.c +++ b/arch/sh/mm/tlbex_64.c @@ -108,6 +108,17 @@ static struct expevt_lookup expevt_lookup_table = { .is_text_access = {1, 1, 0, 0, 0, 0, 0, 0} }; +static inline unsigned int +expevt_to_fault_code(unsigned long expevt) +{ + if (expevt == 0xa40) + return FAULT_CODE_ITLB; + else if (expevt == 0x060) + return FAULT_CODE_WRITE; + + return 0; +} + /* This routine handles page faults that can be serviced just by refilling a TLB entry from an existing page table entry. (This case represents a very @@ -123,6 +134,7 @@ do_fast_page_fault(unsigned long long ssr_md, unsigned long long expevt, unsigned long long protection_flags; unsigned long long index; unsigned long long expevt4; + unsigned int fault_code; /* The next few lines implement a way of hashing EXPEVT into a * small array index which can be used to lookup parameters @@ -139,10 +151,16 @@ do_fast_page_fault(unsigned long long ssr_md, unsigned long long expevt, index = expevt4 ^ (expevt4 >> 5); index &= 7; + fault_code = expevt_to_fault_code(expevt); + protection_flags = expevt_lookup_table.protection_flags[index]; if (expevt_lookup_table.is_text_access[index]) - set_thread_fault_code(FAULT_CODE_ITLB); + fault_code |= FAULT_CODE_ITLB; + if (!ssr_md) + fault_code |= FAULT_CODE_USER; + + set_thread_fault_code(fault_code); return handle_tlbmiss(protection_flags, address); }