powerpc/mm: Simplify update_mmu_cache() on BOOK3S32
On BOOK3S32, hash_preload() neither use is_exec nor trap, so drop those parameters and simplify update_mmu_cached(). Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/35f143c6fe29f9fd25c7f3cd4448ae401029ce3c.1565933217.git.christophe.leroy@c-s.fr
This commit is contained in:
parent
e5a1edb9fe
commit
f49f4e2b68
|
@ -297,8 +297,7 @@ void __init setbat(int index, unsigned long virt, phys_addr_t phys,
|
|||
/*
|
||||
* Preload a translation in the hash table
|
||||
*/
|
||||
void hash_preload(struct mm_struct *mm, unsigned long ea,
|
||||
bool is_exec, unsigned long trap)
|
||||
void hash_preload(struct mm_struct *mm, unsigned long ea)
|
||||
{
|
||||
pmd_t *pmd;
|
||||
|
||||
|
@ -324,35 +323,20 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
|
|||
* We don't need to worry about _PAGE_PRESENT here because we are
|
||||
* called with either mm->page_table_lock held or ptl lock held
|
||||
*/
|
||||
unsigned long trap;
|
||||
bool is_exec;
|
||||
|
||||
/* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
|
||||
if (!pte_young(*ptep) || address >= TASK_SIZE)
|
||||
return;
|
||||
|
||||
/*
|
||||
* We try to figure out if we are coming from an instruction
|
||||
* access fault and pass that down to __hash_page so we avoid
|
||||
* double-faulting on execution of fresh text. We have to test
|
||||
* for regs NULL since init will get here first thing at boot.
|
||||
*
|
||||
* We also avoid filling the hash if not coming from a fault.
|
||||
*/
|
||||
|
||||
trap = current->thread.regs ? TRAP(current->thread.regs) : 0UL;
|
||||
switch (trap) {
|
||||
case 0x300:
|
||||
is_exec = false;
|
||||
break;
|
||||
case 0x400:
|
||||
is_exec = true;
|
||||
break;
|
||||
default:
|
||||
/* We have to test for regs NULL since init will get here first thing at boot */
|
||||
if (!current->thread.regs)
|
||||
return;
|
||||
}
|
||||
|
||||
hash_preload(vma->vm_mm, address, is_exec, trap);
|
||||
/* We also avoid filling the hash if not coming from a fault */
|
||||
if (TRAP(current->thread.regs) != 0x300 && TRAP(current->thread.regs) != 0x400)
|
||||
return;
|
||||
|
||||
hash_preload(vma->vm_mm, address);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -91,8 +91,7 @@ void print_system_hash_info(void);
|
|||
|
||||
#ifdef CONFIG_PPC32
|
||||
|
||||
void hash_preload(struct mm_struct *mm, unsigned long ea,
|
||||
bool is_exec, unsigned long trap);
|
||||
void hash_preload(struct mm_struct *mm, unsigned long ea);
|
||||
|
||||
extern void mapin_ram(void);
|
||||
extern void setbat(int index, unsigned long virt, phys_addr_t phys,
|
||||
|
|
|
@ -252,7 +252,7 @@ static void __init __mapin_ram_chunk(unsigned long offset, unsigned long top)
|
|||
map_kernel_page(v, p, ktext ? PAGE_KERNEL_TEXT : PAGE_KERNEL);
|
||||
#ifdef CONFIG_PPC_BOOK3S_32
|
||||
if (ktext)
|
||||
hash_preload(&init_mm, v, false, 0x300);
|
||||
hash_preload(&init_mm, v);
|
||||
#endif
|
||||
v += PAGE_SIZE;
|
||||
p += PAGE_SIZE;
|
||||
|
|
Loading…
Reference in New Issue