tmpfs: insert tmpfs cache pages to inactive list at first
Shaohua Li reported parallel file copy on tmpfs can lead to OOM killer. This is regression of caused by commit9ff473b9a7
("vmscan: evict streaming IO first"). Wow, It is 2 years old patch! Currently, tmpfs file cache is inserted active list at first. This means that the insertion doesn't only increase numbers of pages in anon LRU, but it also reduces anon scanning ratio. Therefore, vmscan will get totally confused. It scans almost only file LRU even though the system has plenty unused tmpfs pages. Historically, lru_cache_add_active_anon() was used for two reasons. 1) Intend to priotize shmem page rather than regular file cache. 2) Intend to avoid reclaim priority inversion of used once pages. But we've lost both motivation because (1) Now we have separate anon and file LRU list. then, to insert active list doesn't help such priotize. (2) In past, one pte access bit will cause page activation. then to insert inactive list with pte access bit mean higher priority than to insert active list. Its priority inversion may lead to uninteded lru chun. but it was already solved by commit645747462
(vmscan: detect mapped file pages used only once). (Thanks Hannes, you are great!) Thus, now we can use lru_cache_add_anon() instead. Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Reported-by: Shaohua Li <shaohua.li@intel.com> Reviewed-by: Wu Fengguang <fengguang.wu@intel.com> Reviewed-by: Johannes Weiner <hannes@cmpxchg.org> Reviewed-by: Rik van Riel <riel@redhat.com> Reviewed-by: Minchan Kim <minchan.kim@gmail.com> Acked-by: Hugh Dickins <hughd@google.com> Cc: Henrique de Moraes Holschuh <hmh@hmh.eng.br> Cc: <stable@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
1f0a738868
commit
e9d6c15738
|
@ -224,21 +224,11 @@ static inline void lru_cache_add_anon(struct page *page)
|
|||
__lru_cache_add(page, LRU_INACTIVE_ANON);
|
||||
}
|
||||
|
||||
static inline void lru_cache_add_active_anon(struct page *page)
|
||||
{
|
||||
__lru_cache_add(page, LRU_ACTIVE_ANON);
|
||||
}
|
||||
|
||||
static inline void lru_cache_add_file(struct page *page)
|
||||
{
|
||||
__lru_cache_add(page, LRU_INACTIVE_FILE);
|
||||
}
|
||||
|
||||
static inline void lru_cache_add_active_file(struct page *page)
|
||||
{
|
||||
__lru_cache_add(page, LRU_ACTIVE_FILE);
|
||||
}
|
||||
|
||||
/* linux/mm/vmscan.c */
|
||||
extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
|
||||
gfp_t gfp_mask, nodemask_t *mask);
|
||||
|
|
|
@ -441,7 +441,7 @@ int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
|
|||
/*
|
||||
* Splice_read and readahead add shmem/tmpfs pages into the page cache
|
||||
* before shmem_readpage has a chance to mark them as SwapBacked: they
|
||||
* need to go on the active_anon lru below, and mem_cgroup_cache_charge
|
||||
* need to go on the anon lru below, and mem_cgroup_cache_charge
|
||||
* (called in add_to_page_cache) needs to know where they're going too.
|
||||
*/
|
||||
if (mapping_cap_swap_backed(mapping))
|
||||
|
@ -452,7 +452,7 @@ int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
|
|||
if (page_is_file_cache(page))
|
||||
lru_cache_add_file(page);
|
||||
else
|
||||
lru_cache_add_active_anon(page);
|
||||
lru_cache_add_anon(page);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue