powerpc/mm/thp: Make page table walk safe against thp split/collapse
We can disable a THP split or a hugepage collapse by disabling irq. We do send IPI to all the cpus in the early part of split/collapse, and disabling local irq ensure we don't make progress with split/collapse. If the THP is getting split we return NULL from find_linux_pte_or_hugepte(). For all the current callers it should be ok. We need to be careful if we want to use returned pte_t pointer outside the irq disabled region. W.r.t to THP split, the pfn remains the same, but then a hugepage collapse will result in a pfn change. There are few steps we can take to avoid a hugepage collapse.One way is to take page reference inside the irq disable region. Other option is to take mmap_sem so that a parallel collapse will not happen. We can also disable collapse by taking pmd_lock. Another method used by kvm subsystem is to check whether we had a mmu_notifer update in between using mmu_notifier_retry(). Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
This commit is contained in:
parent
dac5657067
commit
691e95fd73
|
@ -247,8 +247,17 @@ extern int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
|
||||||
#define pmd_large(pmd) 0
|
#define pmd_large(pmd) 0
|
||||||
#define has_transparent_hugepage() 0
|
#define has_transparent_hugepage() 0
|
||||||
#endif
|
#endif
|
||||||
pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
|
pte_t *__find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
|
||||||
unsigned *shift);
|
unsigned *shift);
|
||||||
|
static inline pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
|
||||||
|
unsigned *shift)
|
||||||
|
{
|
||||||
|
if (!arch_irqs_disabled()) {
|
||||||
|
pr_info("%s called with irq enabled\n", __func__);
|
||||||
|
dump_stack();
|
||||||
|
}
|
||||||
|
return __find_linux_pte_or_hugepte(pgdir, ea, shift);
|
||||||
|
}
|
||||||
#endif /* __ASSEMBLY__ */
|
#endif /* __ASSEMBLY__ */
|
||||||
|
|
||||||
#endif /* __KERNEL__ */
|
#endif /* __KERNEL__ */
|
||||||
|
|
|
@ -334,9 +334,11 @@ static inline unsigned long eeh_token_to_phys(unsigned long token)
|
||||||
int hugepage_shift;
|
int hugepage_shift;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We won't find hugepages here, iomem
|
* We won't find hugepages here(this is iomem). Hence we are not
|
||||||
|
* worried about _PAGE_SPLITTING/collapse. Also we will not hit
|
||||||
|
* page table free, because of init_mm.
|
||||||
*/
|
*/
|
||||||
ptep = find_linux_pte_or_hugepte(init_mm.pgd, token, &hugepage_shift);
|
ptep = __find_linux_pte_or_hugepte(init_mm.pgd, token, &hugepage_shift);
|
||||||
if (!ptep)
|
if (!ptep)
|
||||||
return token;
|
return token;
|
||||||
WARN_ON(hugepage_shift);
|
WARN_ON(hugepage_shift);
|
||||||
|
|
|
@ -71,15 +71,15 @@ struct iowa_bus *iowa_mem_find_bus(const PCI_IO_ADDR addr)
|
||||||
vaddr = (unsigned long)PCI_FIX_ADDR(addr);
|
vaddr = (unsigned long)PCI_FIX_ADDR(addr);
|
||||||
if (vaddr < PHB_IO_BASE || vaddr >= PHB_IO_END)
|
if (vaddr < PHB_IO_BASE || vaddr >= PHB_IO_END)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
/*
|
||||||
ptep = find_linux_pte_or_hugepte(init_mm.pgd, vaddr,
|
* We won't find huge pages here (iomem). Also can't hit
|
||||||
|
* a page table free due to init_mm
|
||||||
|
*/
|
||||||
|
ptep = __find_linux_pte_or_hugepte(init_mm.pgd, vaddr,
|
||||||
&hugepage_shift);
|
&hugepage_shift);
|
||||||
if (ptep == NULL)
|
if (ptep == NULL)
|
||||||
paddr = 0;
|
paddr = 0;
|
||||||
else {
|
else {
|
||||||
/*
|
|
||||||
* we don't have hugepages backing iomem
|
|
||||||
*/
|
|
||||||
WARN_ON(hugepage_shift);
|
WARN_ON(hugepage_shift);
|
||||||
paddr = pte_pfn(*ptep) << PAGE_SHIFT;
|
paddr = pte_pfn(*ptep) << PAGE_SHIFT;
|
||||||
}
|
}
|
||||||
|
|
|
@ -539,12 +539,13 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||||
if (!writing && hpte_is_writable(r)) {
|
if (!writing && hpte_is_writable(r)) {
|
||||||
unsigned int hugepage_shift;
|
unsigned int hugepage_shift;
|
||||||
pte_t *ptep, pte;
|
pte_t *ptep, pte;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We need to protect against page table destruction
|
* We need to protect against page table destruction
|
||||||
* while looking up and updating the pte.
|
* while looking up and updating the pte.
|
||||||
*/
|
*/
|
||||||
rcu_read_lock_sched();
|
local_irq_save(flags);
|
||||||
ptep = find_linux_pte_or_hugepte(current->mm->pgd,
|
ptep = find_linux_pte_or_hugepte(current->mm->pgd,
|
||||||
hva, &hugepage_shift);
|
hva, &hugepage_shift);
|
||||||
if (ptep) {
|
if (ptep) {
|
||||||
|
@ -553,7 +554,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||||
if (pte_write(pte))
|
if (pte_write(pte))
|
||||||
write_ok = 1;
|
write_ok = 1;
|
||||||
}
|
}
|
||||||
rcu_read_unlock_sched();
|
local_irq_restore(flags);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -26,11 +26,14 @@ static void *real_vmalloc_addr(void *x)
|
||||||
{
|
{
|
||||||
unsigned long addr = (unsigned long) x;
|
unsigned long addr = (unsigned long) x;
|
||||||
pte_t *p;
|
pte_t *p;
|
||||||
|
/*
|
||||||
p = find_linux_pte_or_hugepte(swapper_pg_dir, addr, NULL);
|
* assume we don't have huge pages in vmalloc space...
|
||||||
|
* So don't worry about THP collapse/split. Called
|
||||||
|
* Only in realmode, hence won't need irq_save/restore.
|
||||||
|
*/
|
||||||
|
p = __find_linux_pte_or_hugepte(swapper_pg_dir, addr, NULL);
|
||||||
if (!p || !pte_present(*p))
|
if (!p || !pte_present(*p))
|
||||||
return NULL;
|
return NULL;
|
||||||
/* assume we don't have huge pages in vmalloc space... */
|
|
||||||
addr = (pte_pfn(*p) << PAGE_SHIFT) | (addr & ~PAGE_MASK);
|
addr = (pte_pfn(*p) << PAGE_SHIFT) | (addr & ~PAGE_MASK);
|
||||||
return __va(addr);
|
return __va(addr);
|
||||||
}
|
}
|
||||||
|
@ -153,7 +156,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
|
||||||
pte_t *ptep;
|
pte_t *ptep;
|
||||||
unsigned int writing;
|
unsigned int writing;
|
||||||
unsigned long mmu_seq;
|
unsigned long mmu_seq;
|
||||||
unsigned long rcbits;
|
unsigned long rcbits, irq_flags = 0;
|
||||||
|
|
||||||
psize = hpte_page_size(pteh, ptel);
|
psize = hpte_page_size(pteh, ptel);
|
||||||
if (!psize)
|
if (!psize)
|
||||||
|
@ -189,7 +192,16 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
|
||||||
|
|
||||||
/* Translate to host virtual address */
|
/* Translate to host virtual address */
|
||||||
hva = __gfn_to_hva_memslot(memslot, gfn);
|
hva = __gfn_to_hva_memslot(memslot, gfn);
|
||||||
ptep = find_linux_pte_or_hugepte(pgdir, hva, &hpage_shift);
|
/*
|
||||||
|
* If we had a page table table change after lookup, we would
|
||||||
|
* retry via mmu_notifier_retry.
|
||||||
|
*/
|
||||||
|
if (realmode)
|
||||||
|
ptep = __find_linux_pte_or_hugepte(pgdir, hva, &hpage_shift);
|
||||||
|
else {
|
||||||
|
local_irq_save(irq_flags);
|
||||||
|
ptep = find_linux_pte_or_hugepte(pgdir, hva, &hpage_shift);
|
||||||
|
}
|
||||||
if (ptep) {
|
if (ptep) {
|
||||||
pte_t pte;
|
pte_t pte;
|
||||||
unsigned int host_pte_size;
|
unsigned int host_pte_size;
|
||||||
|
@ -202,9 +214,11 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
|
||||||
* We should always find the guest page size
|
* We should always find the guest page size
|
||||||
* to <= host page size, if host is using hugepage
|
* to <= host page size, if host is using hugepage
|
||||||
*/
|
*/
|
||||||
if (host_pte_size < psize)
|
if (host_pte_size < psize) {
|
||||||
|
if (!realmode)
|
||||||
|
local_irq_restore(flags);
|
||||||
return H_PARAMETER;
|
return H_PARAMETER;
|
||||||
|
}
|
||||||
pte = kvmppc_read_update_linux_pte(ptep, writing, hpage_shift);
|
pte = kvmppc_read_update_linux_pte(ptep, writing, hpage_shift);
|
||||||
if (pte_present(pte) && !pte_protnone(pte)) {
|
if (pte_present(pte) && !pte_protnone(pte)) {
|
||||||
if (writing && !pte_write(pte))
|
if (writing && !pte_write(pte))
|
||||||
|
@ -216,6 +230,8 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
|
||||||
pa |= gpa & ~PAGE_MASK;
|
pa |= gpa & ~PAGE_MASK;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if (!realmode)
|
||||||
|
local_irq_restore(irq_flags);
|
||||||
|
|
||||||
ptel &= ~(HPTE_R_PP0 - psize);
|
ptel &= ~(HPTE_R_PP0 - psize);
|
||||||
ptel |= pa;
|
ptel |= pa;
|
||||||
|
|
|
@ -338,6 +338,7 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
|
||||||
pte_t *ptep;
|
pte_t *ptep;
|
||||||
unsigned int wimg = 0;
|
unsigned int wimg = 0;
|
||||||
pgd_t *pgdir;
|
pgd_t *pgdir;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
/* used to check for invalidations in progress */
|
/* used to check for invalidations in progress */
|
||||||
mmu_seq = kvm->mmu_notifier_seq;
|
mmu_seq = kvm->mmu_notifier_seq;
|
||||||
|
@ -468,14 +469,23 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
|
||||||
|
|
||||||
|
|
||||||
pgdir = vcpu_e500->vcpu.arch.pgdir;
|
pgdir = vcpu_e500->vcpu.arch.pgdir;
|
||||||
|
/*
|
||||||
|
* We are just looking at the wimg bits, so we don't
|
||||||
|
* care much about the trans splitting bit.
|
||||||
|
* We are holding kvm->mmu_lock so a notifier invalidate
|
||||||
|
* can't run hence pfn won't change.
|
||||||
|
*/
|
||||||
|
local_irq_save(flags);
|
||||||
ptep = find_linux_pte_or_hugepte(pgdir, hva, NULL);
|
ptep = find_linux_pte_or_hugepte(pgdir, hva, NULL);
|
||||||
if (ptep) {
|
if (ptep) {
|
||||||
pte_t pte = READ_ONCE(*ptep);
|
pte_t pte = READ_ONCE(*ptep);
|
||||||
|
|
||||||
if (pte_present(pte))
|
if (pte_present(pte)) {
|
||||||
wimg = (pte_val(pte) >> PTE_WIMGE_SHIFT) &
|
wimg = (pte_val(pte) >> PTE_WIMGE_SHIFT) &
|
||||||
MAS2_WIMGE_MASK;
|
MAS2_WIMGE_MASK;
|
||||||
else {
|
local_irq_restore(flags);
|
||||||
|
} else {
|
||||||
|
local_irq_restore(flags);
|
||||||
pr_err_ratelimited("%s: pte not present: gfn %lx,pfn %lx\n",
|
pr_err_ratelimited("%s: pte not present: gfn %lx,pfn %lx\n",
|
||||||
__func__, (long)gfn, pfn);
|
__func__, (long)gfn, pfn);
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
|
|
|
@ -1066,7 +1066,7 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea,
|
||||||
#endif /* CONFIG_PPC_64K_PAGES */
|
#endif /* CONFIG_PPC_64K_PAGES */
|
||||||
|
|
||||||
/* Get PTE and page size from page tables */
|
/* Get PTE and page size from page tables */
|
||||||
ptep = find_linux_pte_or_hugepte(pgdir, ea, &hugeshift);
|
ptep = __find_linux_pte_or_hugepte(pgdir, ea, &hugeshift);
|
||||||
if (ptep == NULL || !pte_present(*ptep)) {
|
if (ptep == NULL || !pte_present(*ptep)) {
|
||||||
DBG_LOW(" no PTE !\n");
|
DBG_LOW(" no PTE !\n");
|
||||||
rc = 1;
|
rc = 1;
|
||||||
|
|
|
@ -109,7 +109,7 @@ int pgd_huge(pgd_t pgd)
|
||||||
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
|
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
|
||||||
{
|
{
|
||||||
/* Only called for hugetlbfs pages, hence can ignore THP */
|
/* Only called for hugetlbfs pages, hence can ignore THP */
|
||||||
return find_linux_pte_or_hugepte(mm->pgd, addr, NULL);
|
return __find_linux_pte_or_hugepte(mm->pgd, addr, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
|
static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
|
||||||
|
@ -682,28 +682,35 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb,
|
||||||
} while (addr = next, addr != end);
|
} while (addr = next, addr != end);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We are holding mmap_sem, so a parallel huge page collapse cannot run.
|
||||||
|
* To prevent hugepage split, disable irq.
|
||||||
|
*/
|
||||||
struct page *
|
struct page *
|
||||||
follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
|
follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
|
||||||
{
|
{
|
||||||
pte_t *ptep;
|
pte_t *ptep;
|
||||||
struct page *page;
|
struct page *page;
|
||||||
unsigned shift;
|
unsigned shift;
|
||||||
unsigned long mask;
|
unsigned long mask, flags;
|
||||||
/*
|
/*
|
||||||
* Transparent hugepages are handled by generic code. We can skip them
|
* Transparent hugepages are handled by generic code. We can skip them
|
||||||
* here.
|
* here.
|
||||||
*/
|
*/
|
||||||
|
local_irq_save(flags);
|
||||||
ptep = find_linux_pte_or_hugepte(mm->pgd, address, &shift);
|
ptep = find_linux_pte_or_hugepte(mm->pgd, address, &shift);
|
||||||
|
|
||||||
/* Verify it is a huge page else bail. */
|
/* Verify it is a huge page else bail. */
|
||||||
if (!ptep || !shift || pmd_trans_huge(*(pmd_t *)ptep))
|
if (!ptep || !shift || pmd_trans_huge(*(pmd_t *)ptep)) {
|
||||||
|
local_irq_restore(flags);
|
||||||
return ERR_PTR(-EINVAL);
|
return ERR_PTR(-EINVAL);
|
||||||
|
}
|
||||||
mask = (1UL << shift) - 1;
|
mask = (1UL << shift) - 1;
|
||||||
page = pte_page(*ptep);
|
page = pte_page(*ptep);
|
||||||
if (page)
|
if (page)
|
||||||
page += (address & mask) / PAGE_SIZE;
|
page += (address & mask) / PAGE_SIZE;
|
||||||
|
|
||||||
|
local_irq_restore(flags);
|
||||||
return page;
|
return page;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -950,9 +957,12 @@ void flush_dcache_icache_hugepage(struct page *page)
|
||||||
*
|
*
|
||||||
* So long as we atomically load page table pointers we are safe against teardown,
|
* So long as we atomically load page table pointers we are safe against teardown,
|
||||||
* we can follow the address down to the the page and take a ref on it.
|
* we can follow the address down to the the page and take a ref on it.
|
||||||
|
* This function need to be called with interrupts disabled. We use this variant
|
||||||
|
* when we have MSR[EE] = 0 but the paca->soft_enabled = 1
|
||||||
*/
|
*/
|
||||||
|
|
||||||
pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift)
|
pte_t *__find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
|
||||||
|
unsigned *shift)
|
||||||
{
|
{
|
||||||
pgd_t pgd, *pgdp;
|
pgd_t pgd, *pgdp;
|
||||||
pud_t pud, *pudp;
|
pud_t pud, *pudp;
|
||||||
|
@ -1031,7 +1041,7 @@ out:
|
||||||
*shift = pdshift;
|
*shift = pdshift;
|
||||||
return ret_pte;
|
return ret_pte;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(find_linux_pte_or_hugepte);
|
EXPORT_SYMBOL_GPL(__find_linux_pte_or_hugepte);
|
||||||
|
|
||||||
int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
|
int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
|
||||||
unsigned long end, int write, struct page **pages, int *nr)
|
unsigned long end, int write, struct page **pages, int *nr)
|
||||||
|
|
|
@ -111,41 +111,45 @@ perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
|
||||||
* interrupt context, so if the access faults, we read the page tables
|
* interrupt context, so if the access faults, we read the page tables
|
||||||
* to find which page (if any) is mapped and access it directly.
|
* to find which page (if any) is mapped and access it directly.
|
||||||
*/
|
*/
|
||||||
static int read_user_stack_slow(void __user *ptr, void *ret, int nb)
|
static int read_user_stack_slow(void __user *ptr, void *buf, int nb)
|
||||||
{
|
{
|
||||||
|
int ret = -EFAULT;
|
||||||
pgd_t *pgdir;
|
pgd_t *pgdir;
|
||||||
pte_t *ptep, pte;
|
pte_t *ptep, pte;
|
||||||
unsigned shift;
|
unsigned shift;
|
||||||
unsigned long addr = (unsigned long) ptr;
|
unsigned long addr = (unsigned long) ptr;
|
||||||
unsigned long offset;
|
unsigned long offset;
|
||||||
unsigned long pfn;
|
unsigned long pfn, flags;
|
||||||
void *kaddr;
|
void *kaddr;
|
||||||
|
|
||||||
pgdir = current->mm->pgd;
|
pgdir = current->mm->pgd;
|
||||||
if (!pgdir)
|
if (!pgdir)
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
||||||
|
local_irq_save(flags);
|
||||||
ptep = find_linux_pte_or_hugepte(pgdir, addr, &shift);
|
ptep = find_linux_pte_or_hugepte(pgdir, addr, &shift);
|
||||||
|
if (!ptep)
|
||||||
|
goto err_out;
|
||||||
if (!shift)
|
if (!shift)
|
||||||
shift = PAGE_SHIFT;
|
shift = PAGE_SHIFT;
|
||||||
|
|
||||||
/* align address to page boundary */
|
/* align address to page boundary */
|
||||||
offset = addr & ((1UL << shift) - 1);
|
offset = addr & ((1UL << shift) - 1);
|
||||||
addr -= offset;
|
|
||||||
|
|
||||||
if (ptep == NULL)
|
pte = READ_ONCE(*ptep);
|
||||||
return -EFAULT;
|
|
||||||
pte = *ptep;
|
|
||||||
if (!pte_present(pte) || !(pte_val(pte) & _PAGE_USER))
|
if (!pte_present(pte) || !(pte_val(pte) & _PAGE_USER))
|
||||||
return -EFAULT;
|
goto err_out;
|
||||||
pfn = pte_pfn(pte);
|
pfn = pte_pfn(pte);
|
||||||
if (!page_is_ram(pfn))
|
if (!page_is_ram(pfn))
|
||||||
return -EFAULT;
|
goto err_out;
|
||||||
|
|
||||||
/* no highmem to worry about here */
|
/* no highmem to worry about here */
|
||||||
kaddr = pfn_to_kaddr(pfn);
|
kaddr = pfn_to_kaddr(pfn);
|
||||||
memcpy(ret, kaddr + offset, nb);
|
memcpy(buf, kaddr + offset, nb);
|
||||||
return 0;
|
ret = 0;
|
||||||
|
err_out:
|
||||||
|
local_irq_restore(flags);
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int read_user_stack_64(unsigned long __user *ptr, unsigned long *ret)
|
static int read_user_stack_64(unsigned long __user *ptr, unsigned long *ret)
|
||||||
|
|
Loading…
Reference in New Issue