[PATCH] page invalidation cleanup
Clean up the invalidate code, and use a common function to safely remove the page from pagecache. Signed-off-by: Nick Piggin <npiggin@suse.de> Cc: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
5b99cd0eff
commit
0fd0e6b05a
|
@ -9,6 +9,7 @@
|
||||||
|
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
#include <linux/mm.h>
|
#include <linux/mm.h>
|
||||||
|
#include <linux/swap.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/pagemap.h>
|
#include <linux/pagemap.h>
|
||||||
#include <linux/pagevec.h>
|
#include <linux/pagevec.h>
|
||||||
|
@ -52,36 +53,26 @@ truncate_complete_page(struct address_space *mapping, struct page *page)
|
||||||
/*
|
/*
|
||||||
* This is for invalidate_inode_pages(). That function can be called at
|
* This is for invalidate_inode_pages(). That function can be called at
|
||||||
* any time, and is not supposed to throw away dirty pages. But pages can
|
* any time, and is not supposed to throw away dirty pages. But pages can
|
||||||
* be marked dirty at any time too. So we re-check the dirtiness inside
|
* be marked dirty at any time too, so use remove_mapping which safely
|
||||||
* ->tree_lock. That provides exclusion against the __set_page_dirty
|
* discards clean, unused pages.
|
||||||
* functions.
|
|
||||||
*
|
*
|
||||||
* Returns non-zero if the page was successfully invalidated.
|
* Returns non-zero if the page was successfully invalidated.
|
||||||
*/
|
*/
|
||||||
static int
|
static int
|
||||||
invalidate_complete_page(struct address_space *mapping, struct page *page)
|
invalidate_complete_page(struct address_space *mapping, struct page *page)
|
||||||
{
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
if (page->mapping != mapping)
|
if (page->mapping != mapping)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (PagePrivate(page) && !try_to_release_page(page, 0))
|
if (PagePrivate(page) && !try_to_release_page(page, 0))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
write_lock_irq(&mapping->tree_lock);
|
ret = remove_mapping(mapping, page);
|
||||||
if (PageDirty(page))
|
|
||||||
goto failed;
|
|
||||||
if (page_count(page) != 2) /* caller's ref + pagecache ref */
|
|
||||||
goto failed;
|
|
||||||
|
|
||||||
BUG_ON(PagePrivate(page));
|
|
||||||
__remove_from_page_cache(page);
|
|
||||||
write_unlock_irq(&mapping->tree_lock);
|
|
||||||
ClearPageUptodate(page);
|
ClearPageUptodate(page);
|
||||||
page_cache_release(page); /* pagecache ref */
|
|
||||||
return 1;
|
return ret;
|
||||||
failed:
|
|
||||||
write_unlock_irq(&mapping->tree_lock);
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
27
mm/vmscan.c
27
mm/vmscan.c
|
@ -384,11 +384,30 @@ int remove_mapping(struct address_space *mapping, struct page *page)
|
||||||
BUG_ON(mapping != page_mapping(page));
|
BUG_ON(mapping != page_mapping(page));
|
||||||
|
|
||||||
write_lock_irq(&mapping->tree_lock);
|
write_lock_irq(&mapping->tree_lock);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The non-racy check for busy page. It is critical to check
|
* The non racy check for a busy page.
|
||||||
* PageDirty _after_ making sure that the page is freeable and
|
*
|
||||||
* not in use by anybody. (pagecache + us == 2)
|
* Must be careful with the order of the tests. When someone has
|
||||||
|
* a ref to the page, it may be possible that they dirty it then
|
||||||
|
* drop the reference. So if PageDirty is tested before page_count
|
||||||
|
* here, then the following race may occur:
|
||||||
|
*
|
||||||
|
* get_user_pages(&page);
|
||||||
|
* [user mapping goes away]
|
||||||
|
* write_to(page);
|
||||||
|
* !PageDirty(page) [good]
|
||||||
|
* SetPageDirty(page);
|
||||||
|
* put_page(page);
|
||||||
|
* !page_count(page) [good, discard it]
|
||||||
|
*
|
||||||
|
* [oops, our write_to data is lost]
|
||||||
|
*
|
||||||
|
* Reversing the order of the tests ensures such a situation cannot
|
||||||
|
* escape unnoticed. The smp_rmb is needed to ensure the page->flags
|
||||||
|
* load is not satisfied before that of page->_count.
|
||||||
|
*
|
||||||
|
* Note that if SetPageDirty is always performed via set_page_dirty,
|
||||||
|
* and thus under tree_lock, then this ordering is not required.
|
||||||
*/
|
*/
|
||||||
if (unlikely(page_count(page) != 2))
|
if (unlikely(page_count(page) != 2))
|
||||||
goto cannot_free;
|
goto cannot_free;
|
||||||
|
|
Loading…
Reference in New Issue