[IA64] Port OOM changes to ia64_do_page_fault

Commit d065bd810b
(mm: retry page fault when blocking on disk transfer) and
commit 37b23e0525
(x86,mm: make pagefault killable)

The above commits introduced changes into the x86 pagefault handler
for making the page fault handler retryable as well as killable.

These changes reduce the mmap_sem hold time, which is crucial
during OOM killer invocation.

Port these changes to ia64.

Signed-off-by: Kautuk Consul <consul.kautuk@gmail.com>
Signed-off-by: Tony Luck <tony.luck@intel.com>
This commit is contained in:
Kautuk Consul 2012-06-14 13:11:37 -07:00 committed by Tony Luck
parent cfaf025112
commit f28fa72914
1 changed files with 34 additions and 12 deletions

View File

@ -72,6 +72,10 @@ mapped_kernel_page_is_present (unsigned long address)
return pte_present(pte); return pte_present(pte);
} }
# define VM_READ_BIT 0
# define VM_WRITE_BIT 1
# define VM_EXEC_BIT 2
void __kprobes void __kprobes
ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs) ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
{ {
@ -81,6 +85,12 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
struct siginfo si; struct siginfo si;
unsigned long mask; unsigned long mask;
int fault; int fault;
unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
mask = ((((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
| (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
flags |= ((mask & VM_WRITE) ? FAULT_FLAG_WRITE : 0);
/* mmap_sem is performance critical.... */ /* mmap_sem is performance critical.... */
prefetchw(&mm->mmap_sem); prefetchw(&mm->mmap_sem);
@ -109,6 +119,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
if (notify_page_fault(regs, TRAP_BRKPT)) if (notify_page_fault(regs, TRAP_BRKPT))
return; return;
retry:
down_read(&mm->mmap_sem); down_read(&mm->mmap_sem);
vma = find_vma_prev(mm, address, &prev_vma); vma = find_vma_prev(mm, address, &prev_vma);
@ -130,10 +141,6 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
/* OK, we've got a good vm_area for this memory area. Check the access permissions: */ /* OK, we've got a good vm_area for this memory area. Check the access permissions: */
# define VM_READ_BIT 0
# define VM_WRITE_BIT 1
# define VM_EXEC_BIT 2
# if (((1 << VM_READ_BIT) != VM_READ || (1 << VM_WRITE_BIT) != VM_WRITE) \ # if (((1 << VM_READ_BIT) != VM_READ || (1 << VM_WRITE_BIT) != VM_WRITE) \
|| (1 << VM_EXEC_BIT) != VM_EXEC) || (1 << VM_EXEC_BIT) != VM_EXEC)
# error File is out of sync with <linux/mm.h>. Please update. # error File is out of sync with <linux/mm.h>. Please update.
@ -142,9 +149,6 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE)))) if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
goto bad_area; goto bad_area;
mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
| (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
if ((vma->vm_flags & mask) != mask) if ((vma->vm_flags & mask) != mask)
goto bad_area; goto bad_area;
@ -153,7 +157,11 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
* sure we exit gracefully rather than endlessly redo the * sure we exit gracefully rather than endlessly redo the
* fault. * fault.
*/ */
fault = handle_mm_fault(mm, vma, address, (mask & VM_WRITE) ? FAULT_FLAG_WRITE : 0); fault = handle_mm_fault(mm, vma, address, flags);
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
return;
if (unlikely(fault & VM_FAULT_ERROR)) { if (unlikely(fault & VM_FAULT_ERROR)) {
/* /*
* We ran out of memory, or some other thing happened * We ran out of memory, or some other thing happened
@ -168,10 +176,24 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
} }
BUG(); BUG();
} }
if (fault & VM_FAULT_MAJOR)
current->maj_flt++; if (flags & FAULT_FLAG_ALLOW_RETRY) {
else if (fault & VM_FAULT_MAJOR)
current->min_flt++; current->maj_flt++;
else
current->min_flt++;
if (fault & VM_FAULT_RETRY) {
flags &= ~FAULT_FLAG_ALLOW_RETRY;
/* No need to up_read(&mm->mmap_sem) as we would
* have already released it in __lock_page_or_retry
* in mm/filemap.c.
*/
goto retry;
}
}
up_read(&mm->mmap_sem); up_read(&mm->mmap_sem);
return; return;