mm: move mem_cgroup_uncharge out of __page_cache_release()
A later patch makes THP deferred split shrinker memcg aware, but it needs page->mem_cgroup information in THP destructor, which is called after mem_cgroup_uncharge() now. So move mem_cgroup_uncharge() from __page_cache_release() to compound page destructor, which is called by both THP and other compound pages except HugeTLB. And call it in __put_single_page() for single order page. Link: http://lkml.kernel.org/r/1565144277-36240-3-git-send-email-yang.shi@linux.alibaba.com Signed-off-by: Yang Shi <yang.shi@linux.alibaba.com> Suggested-by: "Kirill A . Shutemov" <kirill.shutemov@linux.intel.com> Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Reviewed-by: Kirill Tkhai <ktkhai@virtuozzo.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@suse.com> Cc: Hugh Dickins <hughd@google.com> Cc: Shakeel Butt <shakeelb@google.com> Cc: David Rientjes <rientjes@google.com> Cc: Qian Cai <cai@lca.pw> Cc: Vladimir Davydov <vdavydov.dev@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
364c1eebe4
commit
7ae88534cd
|
@ -670,6 +670,7 @@ out:
|
||||||
|
|
||||||
void free_compound_page(struct page *page)
|
void free_compound_page(struct page *page)
|
||||||
{
|
{
|
||||||
|
mem_cgroup_uncharge(page);
|
||||||
__free_pages_ok(page, compound_order(page));
|
__free_pages_ok(page, compound_order(page));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -71,12 +71,12 @@ static void __page_cache_release(struct page *page)
|
||||||
spin_unlock_irqrestore(&pgdat->lru_lock, flags);
|
spin_unlock_irqrestore(&pgdat->lru_lock, flags);
|
||||||
}
|
}
|
||||||
__ClearPageWaiters(page);
|
__ClearPageWaiters(page);
|
||||||
mem_cgroup_uncharge(page);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __put_single_page(struct page *page)
|
static void __put_single_page(struct page *page)
|
||||||
{
|
{
|
||||||
__page_cache_release(page);
|
__page_cache_release(page);
|
||||||
|
mem_cgroup_uncharge(page);
|
||||||
free_unref_page(page);
|
free_unref_page(page);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1487,10 +1487,9 @@ free_it:
|
||||||
* Is there need to periodically free_page_list? It would
|
* Is there need to periodically free_page_list? It would
|
||||||
* appear not as the counts should be low
|
* appear not as the counts should be low
|
||||||
*/
|
*/
|
||||||
if (unlikely(PageTransHuge(page))) {
|
if (unlikely(PageTransHuge(page)))
|
||||||
mem_cgroup_uncharge(page);
|
|
||||||
(*get_compound_page_dtor(page))(page);
|
(*get_compound_page_dtor(page))(page);
|
||||||
} else
|
else
|
||||||
list_add(&page->lru, &free_pages);
|
list_add(&page->lru, &free_pages);
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
@ -1911,7 +1910,6 @@ static unsigned noinline_for_stack move_pages_to_lru(struct lruvec *lruvec,
|
||||||
|
|
||||||
if (unlikely(PageCompound(page))) {
|
if (unlikely(PageCompound(page))) {
|
||||||
spin_unlock_irq(&pgdat->lru_lock);
|
spin_unlock_irq(&pgdat->lru_lock);
|
||||||
mem_cgroup_uncharge(page);
|
|
||||||
(*get_compound_page_dtor(page))(page);
|
(*get_compound_page_dtor(page))(page);
|
||||||
spin_lock_irq(&pgdat->lru_lock);
|
spin_lock_irq(&pgdat->lru_lock);
|
||||||
} else
|
} else
|
||||||
|
|
Loading…
Reference in New Issue