mm: Convert get_page_unless_zero() to return bool
atomic_add_unless() returns bool, so remove the widening casts to int in page_ref_add_unless() and get_page_unless_zero(). This causes gcc to produce slightly larger code in isolate_migratepages_block(), but it's not clear that it's worse code. Net +19 bytes of text. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Christoph Hellwig <hch@lst.de> Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Reviewed-by: David Howells <dhowells@redhat.com> Acked-by: Vlastimil Babka <vbabka@suse.cz>
This commit is contained in:
parent
5816b3e657
commit
c25303281d
|
@ -754,7 +754,7 @@ static inline int put_page_testzero(struct page *page)
|
|||
* This can be called when MMU is off so it must not access
|
||||
* any of the virtual mappings.
|
||||
*/
|
||||
static inline int get_page_unless_zero(struct page *page)
|
||||
static inline bool get_page_unless_zero(struct page *page)
|
||||
{
|
||||
return page_ref_add_unless(page, 1, 0);
|
||||
}
|
||||
|
|
|
@ -161,9 +161,9 @@ static inline int page_ref_dec_return(struct page *page)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static inline int page_ref_add_unless(struct page *page, int nr, int u)
|
||||
static inline bool page_ref_add_unless(struct page *page, int nr, int u)
|
||||
{
|
||||
int ret = atomic_add_unless(&page->_refcount, nr, u);
|
||||
bool ret = atomic_add_unless(&page->_refcount, nr, u);
|
||||
|
||||
if (page_ref_tracepoint_active(page_ref_mod_unless))
|
||||
__page_ref_mod_unless(page, nr, ret);
|
||||
|
|
Loading…
Reference in New Issue