Fix race in create_empty_buffers() vs __set_page_dirty_buffers()

Nick Piggin noticed this (very unlikely) race between setting a page
dirty and creating the buffers for it - we need to hold the mapping
private_lock until we've set the page dirty bit in order to make sure
that create_empty_buffers() might not build up a set of buffers without
the dirty bits set when the page is dirty.

I doubt anybody has ever hit this race (and it didn't solve the issue
Nick was looking at), but as Nick says: "Still, it does appear to solve
a real race, which we should close."

Acked-by: Nick Piggin <nickpiggin@yahoo.com.au>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Linus Torvalds 2009-03-19 11:32:05 -07:00
parent 68df3755e3
commit a8e7d49aa7
1 changed files with 11 additions and 12 deletions

View File

@ -760,15 +760,9 @@ EXPORT_SYMBOL(mark_buffer_dirty_inode);
* If warn is true, then emit a warning if the page is not uptodate and has * If warn is true, then emit a warning if the page is not uptodate and has
* not been truncated. * not been truncated.
*/ */
static int __set_page_dirty(struct page *page, static void __set_page_dirty(struct page *page,
struct address_space *mapping, int warn) struct address_space *mapping, int warn)
{ {
if (unlikely(!mapping))
return !TestSetPageDirty(page);
if (TestSetPageDirty(page))
return 0;
spin_lock_irq(&mapping->tree_lock); spin_lock_irq(&mapping->tree_lock);
if (page->mapping) { /* Race with truncate? */ if (page->mapping) { /* Race with truncate? */
WARN_ON_ONCE(warn && !PageUptodate(page)); WARN_ON_ONCE(warn && !PageUptodate(page));
@ -785,8 +779,6 @@ static int __set_page_dirty(struct page *page,
} }
spin_unlock_irq(&mapping->tree_lock); spin_unlock_irq(&mapping->tree_lock);
__mark_inode_dirty(mapping->host, I_DIRTY_PAGES); __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
return 1;
} }
/* /*
@ -816,6 +808,7 @@ static int __set_page_dirty(struct page *page,
*/ */
int __set_page_dirty_buffers(struct page *page) int __set_page_dirty_buffers(struct page *page)
{ {
int newly_dirty;
struct address_space *mapping = page_mapping(page); struct address_space *mapping = page_mapping(page);
if (unlikely(!mapping)) if (unlikely(!mapping))
@ -831,9 +824,12 @@ int __set_page_dirty_buffers(struct page *page)
bh = bh->b_this_page; bh = bh->b_this_page;
} while (bh != head); } while (bh != head);
} }
newly_dirty = !TestSetPageDirty(page);
spin_unlock(&mapping->private_lock); spin_unlock(&mapping->private_lock);
return __set_page_dirty(page, mapping, 1); if (newly_dirty)
__set_page_dirty(page, mapping, 1);
return newly_dirty;
} }
EXPORT_SYMBOL(__set_page_dirty_buffers); EXPORT_SYMBOL(__set_page_dirty_buffers);
@ -1262,8 +1258,11 @@ void mark_buffer_dirty(struct buffer_head *bh)
return; return;
} }
if (!test_set_buffer_dirty(bh)) if (!test_set_buffer_dirty(bh)) {
__set_page_dirty(bh->b_page, page_mapping(bh->b_page), 0); struct page *page = bh->b_page;
if (!TestSetPageDirty(page))
__set_page_dirty(page, page_mapping(page), 0);
}
} }
/* /*