mm/filemap: Add folio_lock_killable()
This is like lock_page_killable() but for use by callers who know they have a folio. Convert __lock_page_killable() to be __folio_lock_killable(). This saves one call to compound_head() per contended call to lock_page_killable(). __folio_lock_killable() is 19 bytes smaller than __lock_page_killable() was. filemap_fault() shrinks by 74 bytes and __lock_page_or_retry() shrinks by 71 bytes. That's a total of 164 bytes of text saved. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Christoph Hellwig <hch@lst.de> Acked-by: Jeff Layton <jlayton@kernel.org> Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Reviewed-by: William Kucharski <william.kucharski@oracle.com> Acked-by: Mike Rapoport <rppt@linux.ibm.com> Reviewed-by: David Howells <dhowells@redhat.com>
This commit is contained in:
parent
7c23c782d5
commit
af7f29d9e1
|
@ -653,7 +653,7 @@ static inline bool wake_page_match(struct wait_page_queue *wait_page,
|
|||
}
|
||||
|
||||
void __folio_lock(struct folio *folio);
|
||||
extern int __lock_page_killable(struct page *page);
|
||||
int __folio_lock_killable(struct folio *folio);
|
||||
extern int __lock_page_async(struct page *page, struct wait_page_queue *wait);
|
||||
extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
|
||||
unsigned int flags);
|
||||
|
@ -693,6 +693,14 @@ static inline void lock_page(struct page *page)
|
|||
__folio_lock(folio);
|
||||
}
|
||||
|
||||
static inline int folio_lock_killable(struct folio *folio)
|
||||
{
|
||||
might_sleep();
|
||||
if (!folio_trylock(folio))
|
||||
return __folio_lock_killable(folio);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* lock_page_killable is like lock_page but can be interrupted by fatal
|
||||
* signals. It returns 0 if it locked the page and -EINTR if it was
|
||||
|
@ -700,10 +708,7 @@ static inline void lock_page(struct page *page)
|
|||
*/
|
||||
static inline int lock_page_killable(struct page *page)
|
||||
{
|
||||
might_sleep();
|
||||
if (!trylock_page(page))
|
||||
return __lock_page_killable(page);
|
||||
return 0;
|
||||
return folio_lock_killable(page_folio(page));
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
17
mm/filemap.c
17
mm/filemap.c
|
@ -1644,14 +1644,13 @@ void __folio_lock(struct folio *folio)
|
|||
}
|
||||
EXPORT_SYMBOL(__folio_lock);
|
||||
|
||||
int __lock_page_killable(struct page *__page)
|
||||
int __folio_lock_killable(struct folio *folio)
|
||||
{
|
||||
struct page *page = compound_head(__page);
|
||||
wait_queue_head_t *q = page_waitqueue(page);
|
||||
return wait_on_page_bit_common(q, page, PG_locked, TASK_KILLABLE,
|
||||
wait_queue_head_t *q = page_waitqueue(&folio->page);
|
||||
return wait_on_page_bit_common(q, &folio->page, PG_locked, TASK_KILLABLE,
|
||||
EXCLUSIVE);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__lock_page_killable);
|
||||
EXPORT_SYMBOL_GPL(__folio_lock_killable);
|
||||
|
||||
int __lock_page_async(struct page *page, struct wait_page_queue *wait)
|
||||
{
|
||||
|
@ -1693,6 +1692,8 @@ int __lock_page_async(struct page *page, struct wait_page_queue *wait)
|
|||
int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
|
||||
unsigned int flags)
|
||||
{
|
||||
struct folio *folio = page_folio(page);
|
||||
|
||||
if (fault_flag_allow_retry_first(flags)) {
|
||||
/*
|
||||
* CAUTION! In this case, mmap_lock is not released
|
||||
|
@ -1711,13 +1712,13 @@ int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
|
|||
if (flags & FAULT_FLAG_KILLABLE) {
|
||||
int ret;
|
||||
|
||||
ret = __lock_page_killable(page);
|
||||
ret = __folio_lock_killable(folio);
|
||||
if (ret) {
|
||||
mmap_read_unlock(mm);
|
||||
return 0;
|
||||
}
|
||||
} else {
|
||||
__folio_lock(page_folio(page));
|
||||
__folio_lock(folio);
|
||||
}
|
||||
|
||||
return 1;
|
||||
|
@ -2929,7 +2930,7 @@ static int lock_page_maybe_drop_mmap(struct vm_fault *vmf, struct page *page,
|
|||
|
||||
*fpin = maybe_unlock_mmap_for_io(vmf, *fpin);
|
||||
if (vmf->flags & FAULT_FLAG_KILLABLE) {
|
||||
if (__lock_page_killable(&folio->page)) {
|
||||
if (__folio_lock_killable(folio)) {
|
||||
/*
|
||||
* We didn't have the right flags to drop the mmap_lock,
|
||||
* but all fault_handlers only check for fatal signals
|
||||
|
|
Loading…
Reference in New Issue