mm: run the fault-around code under the VMA lock
The map_pages fs method should be safe to run under the VMA lock instead of the mmap lock. This should have a measurable reduction in contention on the mmap lock. Link: https://lkml.kernel.org/r/20230724185410.1124082-9-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Suren Baghdasaryan <surenb@google.com> Cc: Arjun Roy <arjunroy@google.com> Cc: Eric Dumazet <edumazet@google.com> Cc: Punit Agrawal <punit.agrawal@bytedance.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
61a4b8d320
commit
f5617ffeb4
10
mm/memory.c
10
mm/memory.c
|
@ -4533,11 +4533,6 @@ static vm_fault_t do_read_fault(struct vm_fault *vmf)
|
|||
vm_fault_t ret = 0;
|
||||
struct folio *folio;
|
||||
|
||||
if (vmf->flags & FAULT_FLAG_VMA_LOCK) {
|
||||
vma_end_read(vmf->vma);
|
||||
return VM_FAULT_RETRY;
|
||||
}
|
||||
|
||||
/*
|
||||
* Let's call ->map_pages() first and use ->fault() as fallback
|
||||
* if page by the offset is not ready to be mapped (cold cache or
|
||||
|
@ -4549,6 +4544,11 @@ static vm_fault_t do_read_fault(struct vm_fault *vmf)
|
|||
return ret;
|
||||
}
|
||||
|
||||
if (vmf->flags & FAULT_FLAG_VMA_LOCK) {
|
||||
vma_end_read(vmf->vma);
|
||||
return VM_FAULT_RETRY;
|
||||
}
|
||||
|
||||
ret = __do_fault(vmf);
|
||||
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
|
||||
return ret;
|
||||
|
|
Loading…
Reference in New Issue