mm: thp: optimize memcg charge in khugepaged

We don't need to hold the mmmap_sem through mem_cgroup_newpage_charge(),
the mmap_sem is only hold for keeping the vma stable and we don't need the
vma stable anymore after we return from alloc_hugepage_vma().

Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
Cc: Johannes Weiner <jweiner@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Minchan Kim <minchan.kim@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Andrea Arcangeli 2011-05-24 17:12:14 -07:00 committed by Linus Torvalds
parent 9547d01bfb
commit 692e0b3542
1 changed files with 14 additions and 13 deletions

View File

@ -1771,12 +1771,9 @@ static void collapse_huge_page(struct mm_struct *mm,
VM_BUG_ON(address & ~HPAGE_PMD_MASK); VM_BUG_ON(address & ~HPAGE_PMD_MASK);
#ifndef CONFIG_NUMA #ifndef CONFIG_NUMA
up_read(&mm->mmap_sem);
VM_BUG_ON(!*hpage); VM_BUG_ON(!*hpage);
new_page = *hpage; new_page = *hpage;
if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
up_read(&mm->mmap_sem);
return;
}
#else #else
VM_BUG_ON(*hpage); VM_BUG_ON(*hpage);
/* /*
@ -1791,22 +1788,26 @@ static void collapse_huge_page(struct mm_struct *mm,
*/ */
new_page = alloc_hugepage_vma(khugepaged_defrag(), vma, address, new_page = alloc_hugepage_vma(khugepaged_defrag(), vma, address,
node, __GFP_OTHER_NODE); node, __GFP_OTHER_NODE);
if (unlikely(!new_page)) {
/*
* After allocating the hugepage, release the mmap_sem read lock in
* preparation for taking it in write mode.
*/
up_read(&mm->mmap_sem); up_read(&mm->mmap_sem);
if (unlikely(!new_page)) {
count_vm_event(THP_COLLAPSE_ALLOC_FAILED); count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
*hpage = ERR_PTR(-ENOMEM); *hpage = ERR_PTR(-ENOMEM);
return; return;
} }
count_vm_event(THP_COLLAPSE_ALLOC);
if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
up_read(&mm->mmap_sem);
put_page(new_page);
return;
}
#endif #endif
/* after allocating the hugepage upgrade to mmap_sem write mode */ count_vm_event(THP_COLLAPSE_ALLOC);
up_read(&mm->mmap_sem); if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
#ifdef CONFIG_NUMA
put_page(new_page);
#endif
return;
}
/* /*
* Prevent all access to pagetables with the exception of * Prevent all access to pagetables with the exception of