mm/khugepaged: fix crashes due to misaccounted holes
Huge tmpfs testing on a shortish file mapped into a pmd-rounded extent hit shmem_evict_inode()'s WARN_ON(inode->i_blocks) followed by clear_inode()'s BUG_ON(inode->i_data.nrpages) when the file was later closed and unlinked. khugepaged's collapse_shmem() was forgetting to update mapping->nrpages on the rollback path, after it had added but then needs to undo some holes. There is indeed an irritating asymmetry between shmem_charge(), whose callers want it to increment nrpages after successfully accounting blocks, and shmem_uncharge(), when __delete_from_page_cache() already decremented nrpages itself: oh well, just add a comment on that to them both. And shmem_recalc_inode() is supposed to be called when the accounting is expected to be in balance (so it can deduce from imbalance that reclaim discarded some pages): so change shmem_charge() to update nrpages earlier (though it's rare for the difference to matter at all). Link: http://lkml.kernel.org/r/alpine.LSU.2.11.1811261523450.2275@eggly.anvils Fixes:800d8c63b2
("shmem: add huge pages support") Fixes:f3f0e1d215
("khugepaged: add support of collapse for tmpfs/shmem pages") Signed-off-by: Hugh Dickins <hughd@google.com> Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Jerome Glisse <jglisse@redhat.com> Cc: Konstantin Khlebnikov <khlebnikov@yandex-team.ru> Cc: Matthew Wilcox <willy@infradead.org> Cc: <stable@vger.kernel.org> [4.8+] Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
701270fa19
commit
aaa52e3400
|
@ -1506,9 +1506,12 @@ xa_unlocked:
|
|||
khugepaged_pages_collapsed++;
|
||||
} else {
|
||||
struct page *page;
|
||||
|
||||
/* Something went wrong: roll back page cache changes */
|
||||
shmem_uncharge(mapping->host, nr_none);
|
||||
xas_lock_irq(&xas);
|
||||
mapping->nrpages -= nr_none;
|
||||
shmem_uncharge(mapping->host, nr_none);
|
||||
|
||||
xas_set(&xas, start);
|
||||
xas_for_each(&xas, page, end - 1) {
|
||||
page = list_first_entry_or_null(&pagelist,
|
||||
|
|
|
@ -297,12 +297,14 @@ bool shmem_charge(struct inode *inode, long pages)
|
|||
if (!shmem_inode_acct_block(inode, pages))
|
||||
return false;
|
||||
|
||||
/* nrpages adjustment first, then shmem_recalc_inode() when balanced */
|
||||
inode->i_mapping->nrpages += pages;
|
||||
|
||||
spin_lock_irqsave(&info->lock, flags);
|
||||
info->alloced += pages;
|
||||
inode->i_blocks += pages * BLOCKS_PER_PAGE;
|
||||
shmem_recalc_inode(inode);
|
||||
spin_unlock_irqrestore(&info->lock, flags);
|
||||
inode->i_mapping->nrpages += pages;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -312,6 +314,8 @@ void shmem_uncharge(struct inode *inode, long pages)
|
|||
struct shmem_inode_info *info = SHMEM_I(inode);
|
||||
unsigned long flags;
|
||||
|
||||
/* nrpages adjustment done by __delete_from_page_cache() or caller */
|
||||
|
||||
spin_lock_irqsave(&info->lock, flags);
|
||||
info->alloced -= pages;
|
||||
inode->i_blocks -= pages * BLOCKS_PER_PAGE;
|
||||
|
|
Loading…
Reference in New Issue