powerpc/32s: Split and inline flush_tlb_mm() and flush_tlb_page()

flush_tlb_mm() and flush_tlb_page() handle both the MMU_FTR_HPTE_TABLE
case and the other case.

The non MMU_FTR_HPTE_TABLE case is trivial as it is only a call
to _tlbie()/_tlbia() which is not worth a dedicated function.

Make flush_tlb_mm() and flush_tlb_page() hash specific and call
them from tlbflush.h based on mmu_has_feature(MMU_FTR_HPTE_TABLE).

Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/11e932ded41ba6d9b251d89b7afa33cc060d3aa4.1603348103.git.christophe.leroy@csgroup.eu
This commit is contained in:
Christophe Leroy 2020-10-22 06:29:36 +00:00 committed by Michael Ellerman
parent f265512582
commit fd1b4b7f51
2 changed files with 22 additions and 15 deletions

View File

@ -6,8 +6,8 @@
/*
* TLB flushing for "classic" hash-MMU 32-bit CPUs, 6xx, 7xx, 7xxx
*/
extern void flush_tlb_mm(struct mm_struct *mm);
extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
void hash__flush_tlb_mm(struct mm_struct *mm);
void hash__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end);
extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
@ -22,6 +22,22 @@ static inline void _tlbie(unsigned long address)
#endif
void _tlbia(void);
static inline void flush_tlb_mm(struct mm_struct *mm)
{
if (mmu_has_feature(MMU_FTR_HPTE_TABLE))
hash__flush_tlb_mm(mm);
else
_tlbia();
}
static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
{
if (mmu_has_feature(MMU_FTR_HPTE_TABLE))
hash__flush_tlb_page(vma, vmaddr);
else
_tlbie(vmaddr);
}
static inline void local_flush_tlb_page(struct vm_area_struct *vma,
unsigned long vmaddr)
{

View File

@ -118,15 +118,10 @@ EXPORT_SYMBOL(flush_tlb_kernel_range);
/*
* Flush all the (user) entries for the address space described by mm.
*/
void flush_tlb_mm(struct mm_struct *mm)
void hash__flush_tlb_mm(struct mm_struct *mm)
{
struct vm_area_struct *mp;
if (!mmu_has_feature(MMU_FTR_HPTE_TABLE)) {
_tlbia();
return;
}
/*
* It is safe to go down the mm's list of vmas when called
* from dup_mmap, holding mmap_lock. It would also be safe from
@ -136,23 +131,19 @@ void flush_tlb_mm(struct mm_struct *mm)
for (mp = mm->mmap; mp != NULL; mp = mp->vm_next)
flush_range(mp->vm_mm, mp->vm_start, mp->vm_end);
}
EXPORT_SYMBOL(flush_tlb_mm);
EXPORT_SYMBOL(hash__flush_tlb_mm);
void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
void hash__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
{
struct mm_struct *mm;
pmd_t *pmd;
if (!mmu_has_feature(MMU_FTR_HPTE_TABLE)) {
_tlbie(vmaddr);
return;
}
mm = (vmaddr < TASK_SIZE)? vma->vm_mm: &init_mm;
pmd = pmd_off(mm, vmaddr);
if (!pmd_none(*pmd))
flush_hash_pages(mm->context.id, vmaddr, pmd_val(*pmd), 1);
}
EXPORT_SYMBOL(flush_tlb_page);
EXPORT_SYMBOL(hash__flush_tlb_page);
/*
* For each address in the range, find the pte for the address