mm: page lock use lock bitops
trylock_page, unlock_page open and close a critical section. Hence, we can use the lock bitops to get the desired memory ordering. Also, mark trylock as likely to succeed (and remove the annotation from callers). Signed-off-by: Nick Piggin <npiggin@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
a978d6f521
commit
8413ac9d8c
|
@ -311,7 +311,7 @@ static inline void __clear_page_locked(struct page *page)
|
|||
|
||||
static inline int trylock_page(struct page *page)
|
||||
{
|
||||
return !test_and_set_bit(PG_locked, &page->flags);
|
||||
return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
13
mm/filemap.c
13
mm/filemap.c
|
@ -573,17 +573,14 @@ EXPORT_SYMBOL(wait_on_page_bit);
|
|||
* mechananism between PageLocked pages and PageWriteback pages is shared.
|
||||
* But that's OK - sleepers in wait_on_page_writeback() just go back to sleep.
|
||||
*
|
||||
* The first mb is necessary to safely close the critical section opened by the
|
||||
* test_and_set_bit() to lock the page; the second mb is necessary to enforce
|
||||
* ordering between the clear_bit and the read of the waitqueue (to avoid SMP
|
||||
* races with a parallel wait_on_page_locked()).
|
||||
* The mb is necessary to enforce ordering between the clear_bit and the read
|
||||
* of the waitqueue (to avoid SMP races with a parallel wait_on_page_locked()).
|
||||
*/
|
||||
void unlock_page(struct page *page)
|
||||
{
|
||||
smp_mb__before_clear_bit();
|
||||
if (!test_and_clear_bit(PG_locked, &page->flags))
|
||||
BUG();
|
||||
smp_mb__after_clear_bit();
|
||||
VM_BUG_ON(!PageLocked(page));
|
||||
clear_bit_unlock(PG_locked, &page->flags);
|
||||
smp_mb__after_clear_bit();
|
||||
wake_up_page(page, PG_locked);
|
||||
}
|
||||
EXPORT_SYMBOL(unlock_page);
|
||||
|
|
|
@ -422,7 +422,7 @@ void free_swap_and_cache(swp_entry_t entry)
|
|||
if (p) {
|
||||
if (swap_entry_free(p, swp_offset(entry)) == 1) {
|
||||
page = find_get_page(&swapper_space, entry.val);
|
||||
if (page && unlikely(!trylock_page(page))) {
|
||||
if (page && !trylock_page(page)) {
|
||||
page_cache_release(page);
|
||||
page = NULL;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue