powerpc/mm/radix: Change pte relax sequence to handle nest MMU hang
When relaxing access (read -> read_write update), pte needs to be marked invalid to handle a nest MMU bug. We also need to do a tlb flush after the pte is marked invalid before updating the pte with new access bits. We also move tlb flush to platform specific __ptep_set_access_flags. This will help us to gerid of unnecessary tlb flush on BOOK3S 64 later. We don't do that in this patch. This also helps in avoiding multiple tlbies with coprocessor attached. Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
This commit is contained in:
parent
e4c1112c3f
commit
bd5050e38a
|
@ -245,6 +245,8 @@ static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
|
|||
unsigned long clr = ~pte_val(entry) & _PAGE_RO;
|
||||
|
||||
pte_update(ptep, clr, set);
|
||||
|
||||
flush_tlb_page(vma, address);
|
||||
}
|
||||
|
||||
#define __HAVE_ARCH_PTE_SAME
|
||||
|
|
|
@ -266,6 +266,8 @@ static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
|
|||
unsigned long clr = ~pte_val(entry) & (_PAGE_RO | _PAGE_NA);
|
||||
|
||||
pte_update(ptep, clr, set);
|
||||
|
||||
flush_tlb_page(vma, address);
|
||||
}
|
||||
|
||||
static inline int pte_young(pte_t pte)
|
||||
|
|
|
@ -304,6 +304,8 @@ static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
|
|||
unsigned long old = pte_val(*ptep);
|
||||
*ptep = __pte(old | bits);
|
||||
#endif
|
||||
|
||||
flush_tlb_page(vma, address);
|
||||
}
|
||||
|
||||
#define __HAVE_ARCH_PTE_SAME
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
#include <asm/processor.h> /* For TASK_SIZE */
|
||||
#include <asm/mmu.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
||||
struct mm_struct;
|
||||
|
||||
|
|
|
@ -52,7 +52,6 @@ int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
|
|||
*/
|
||||
__ptep_set_access_flags(vma, pmdp_ptep(pmdp),
|
||||
pmd_pte(entry), address, MMU_PAGE_2M);
|
||||
flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
|
||||
}
|
||||
return changed;
|
||||
}
|
||||
|
|
|
@ -1091,8 +1091,12 @@ void radix__ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep,
|
|||
struct mm_struct *mm = vma->vm_mm;
|
||||
unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_ACCESSED |
|
||||
_PAGE_RW | _PAGE_EXEC);
|
||||
|
||||
if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
|
||||
/*
|
||||
* To avoid NMMU hang while relaxing access, we need mark
|
||||
* the pte invalid in between.
|
||||
*/
|
||||
if (cpu_has_feature(CPU_FTR_POWER9_DD1) ||
|
||||
atomic_read(&mm->context.copros) > 0) {
|
||||
unsigned long old_pte, new_pte;
|
||||
|
||||
old_pte = __radix_pte_update(ptep, ~0, 0);
|
||||
|
@ -1100,9 +1104,11 @@ void radix__ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep,
|
|||
* new value of pte
|
||||
*/
|
||||
new_pte = old_pte | set;
|
||||
radix__flush_tlb_pte_p9_dd1(old_pte, mm, address);
|
||||
radix__flush_tlb_page_psize(mm, address, psize);
|
||||
__radix_pte_update(ptep, 0, new_pte);
|
||||
} else
|
||||
} else {
|
||||
__radix_pte_update(ptep, 0, set);
|
||||
radix__flush_tlb_page_psize(mm, address, psize);
|
||||
}
|
||||
asm volatile("ptesync" : : : "memory");
|
||||
}
|
||||
|
|
|
@ -224,7 +224,6 @@ int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
|
|||
assert_pte_locked(vma->vm_mm, address);
|
||||
__ptep_set_access_flags(vma, ptep, entry,
|
||||
address, mmu_virtual_psize);
|
||||
flush_tlb_page(vma, address);
|
||||
}
|
||||
return changed;
|
||||
}
|
||||
|
@ -263,7 +262,6 @@ extern int huge_ptep_set_access_flags(struct vm_area_struct *vma,
|
|||
assert_spin_locked(&vma->vm_mm->page_table_lock);
|
||||
#endif
|
||||
__ptep_set_access_flags(vma, ptep, pte, addr, psize);
|
||||
flush_hugetlb_page(vma, addr);
|
||||
}
|
||||
return changed;
|
||||
#endif
|
||||
|
|
Loading…
Reference in New Issue