Seventeen hotfixes. Mostly memory management things. Ten patches are
cc:stable, addressing pre-6.0 issues. -----BEGIN PGP SIGNATURE----- iHUEABYKAB0WIQTTMBEPP41GrTpTJgfdBJ7gKXxAjgUCYwvgrAAKCRDdBJ7gKXxA jlweAQC9dzE08Elxl4F7Uvxe+62JWVeflBRrT7sJ6jU1Gu3QcQEAhhI1Xit3/MGq pRytDBObGADxlA67c9eNq6J5pCT/7gE= =pD67 -----END PGP SIGNATURE----- Merge tag 'mm-hotfixes-stable-2022-08-28' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm Pull more hotfixes from Andrew Morton: "Seventeen hotfixes. Mostly memory management things. Ten patches are cc:stable, addressing pre-6.0 issues" * tag 'mm-hotfixes-stable-2022-08-28' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: .mailmap: update Luca Ceresoli's e-mail address mm/mprotect: only reference swap pfn page if type match squashfs: don't call kmalloc in decompressors mm/damon/dbgfs: avoid duplicate context directory creation mailmap: update email address for Colin King asm-generic: sections: refactor memory_intersects bootmem: remove the vmemmap pages from kmemleak in put_page_bootmem ocfs2: fix freeing uninitialized resource on ocfs2_dlm_shutdown Revert "memcg: cleanup racy sum avoidance code" mm/zsmalloc: do not attempt to free IS_ERR handle binder_alloc: add missing mmap_lock calls when using the VMA mm: re-allow pinning of zero pfns (again) vmcoreinfo: add kallsyms_num_syms symbol mailmap: update Guilherme G. Piccoli's email addresses writeback: avoid use-after-free after removing device shmem: update folio if shmem_replace_page() updates the page mm/hugetlb: avoid corrupting page->mapping in hugetlb_mcopy_atomic_pte
This commit is contained in:
commit
b467192ec7
6
.mailmap
6
.mailmap
|
@ -98,8 +98,7 @@ Christian Brauner <brauner@kernel.org> <christian.brauner@ubuntu.com>
|
|||
Christian Marangi <ansuelsmth@gmail.com>
|
||||
Christophe Ricard <christophe.ricard@gmail.com>
|
||||
Christoph Hellwig <hch@lst.de>
|
||||
Colin Ian King <colin.king@intel.com> <colin.king@canonical.com>
|
||||
Colin Ian King <colin.king@intel.com> <colin.i.king@gmail.com>
|
||||
Colin Ian King <colin.i.king@gmail.com> <colin.king@canonical.com>
|
||||
Corey Minyard <minyard@acm.org>
|
||||
Damian Hobson-Garcia <dhobsong@igel.co.jp>
|
||||
Daniel Borkmann <daniel@iogearbox.net> <danborkmann@googlemail.com>
|
||||
|
@ -150,6 +149,8 @@ Greg Kroah-Hartman <gregkh@suse.de>
|
|||
Greg Kroah-Hartman <greg@kroah.com>
|
||||
Greg Kurz <groug@kaod.org> <gkurz@linux.vnet.ibm.com>
|
||||
Gregory CLEMENT <gregory.clement@bootlin.com> <gregory.clement@free-electrons.com>
|
||||
Guilherme G. Piccoli <kernel@gpiccoli.net> <gpiccoli@linux.vnet.ibm.com>
|
||||
Guilherme G. Piccoli <kernel@gpiccoli.net> <gpiccoli@canonical.com>
|
||||
Guo Ren <guoren@kernel.org> <guoren@linux.alibaba.com>
|
||||
Guo Ren <guoren@kernel.org> <ren_guo@c-sky.com>
|
||||
Gustavo Padovan <gustavo@las.ic.unicamp.br>
|
||||
|
@ -253,6 +254,7 @@ Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@web.de>
|
|||
Li Yang <leoyang.li@nxp.com> <leoli@freescale.com>
|
||||
Li Yang <leoyang.li@nxp.com> <leo@zh-kernel.org>
|
||||
Lorenzo Pieralisi <lpieralisi@kernel.org> <lorenzo.pieralisi@arm.com>
|
||||
Luca Ceresoli <luca.ceresoli@bootlin.com> <luca@lucaceresoli.net>
|
||||
Lukasz Luba <lukasz.luba@arm.com> <l.luba@partner.samsung.com>
|
||||
Maciej W. Rozycki <macro@mips.com> <macro@imgtec.com>
|
||||
Maciej W. Rozycki <macro@orcam.me.uk> <macro@linux-mips.org>
|
||||
|
|
|
@ -402,12 +402,15 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
|
|||
size_t size, data_offsets_size;
|
||||
int ret;
|
||||
|
||||
mmap_read_lock(alloc->vma_vm_mm);
|
||||
if (!binder_alloc_get_vma(alloc)) {
|
||||
mmap_read_unlock(alloc->vma_vm_mm);
|
||||
binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
|
||||
"%d: binder_alloc_buf, no vma\n",
|
||||
alloc->pid);
|
||||
return ERR_PTR(-ESRCH);
|
||||
}
|
||||
mmap_read_unlock(alloc->vma_vm_mm);
|
||||
|
||||
data_offsets_size = ALIGN(data_size, sizeof(void *)) +
|
||||
ALIGN(offsets_size, sizeof(void *));
|
||||
|
@ -929,17 +932,25 @@ void binder_alloc_print_pages(struct seq_file *m,
|
|||
* Make sure the binder_alloc is fully initialized, otherwise we might
|
||||
* read inconsistent state.
|
||||
*/
|
||||
if (binder_alloc_get_vma(alloc) != NULL) {
|
||||
for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
|
||||
page = &alloc->pages[i];
|
||||
if (!page->page_ptr)
|
||||
free++;
|
||||
else if (list_empty(&page->lru))
|
||||
active++;
|
||||
else
|
||||
lru++;
|
||||
}
|
||||
|
||||
mmap_read_lock(alloc->vma_vm_mm);
|
||||
if (binder_alloc_get_vma(alloc) == NULL) {
|
||||
mmap_read_unlock(alloc->vma_vm_mm);
|
||||
goto uninitialized;
|
||||
}
|
||||
|
||||
mmap_read_unlock(alloc->vma_vm_mm);
|
||||
for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
|
||||
page = &alloc->pages[i];
|
||||
if (!page->page_ptr)
|
||||
free++;
|
||||
else if (list_empty(&page->lru))
|
||||
active++;
|
||||
else
|
||||
lru++;
|
||||
}
|
||||
|
||||
uninitialized:
|
||||
mutex_unlock(&alloc->mutex);
|
||||
seq_printf(m, " pages: %d:%d:%d\n", active, lru, free);
|
||||
seq_printf(m, " pages high watermark: %zu\n", alloc->pages_high);
|
||||
|
|
|
@ -134,10 +134,10 @@ static bool inode_io_list_move_locked(struct inode *inode,
|
|||
|
||||
static void wb_wakeup(struct bdi_writeback *wb)
|
||||
{
|
||||
spin_lock_bh(&wb->work_lock);
|
||||
spin_lock_irq(&wb->work_lock);
|
||||
if (test_bit(WB_registered, &wb->state))
|
||||
mod_delayed_work(bdi_wq, &wb->dwork, 0);
|
||||
spin_unlock_bh(&wb->work_lock);
|
||||
spin_unlock_irq(&wb->work_lock);
|
||||
}
|
||||
|
||||
static void finish_writeback_work(struct bdi_writeback *wb,
|
||||
|
@ -164,7 +164,7 @@ static void wb_queue_work(struct bdi_writeback *wb,
|
|||
if (work->done)
|
||||
atomic_inc(&work->done->cnt);
|
||||
|
||||
spin_lock_bh(&wb->work_lock);
|
||||
spin_lock_irq(&wb->work_lock);
|
||||
|
||||
if (test_bit(WB_registered, &wb->state)) {
|
||||
list_add_tail(&work->list, &wb->work_list);
|
||||
|
@ -172,7 +172,7 @@ static void wb_queue_work(struct bdi_writeback *wb,
|
|||
} else
|
||||
finish_writeback_work(wb, work);
|
||||
|
||||
spin_unlock_bh(&wb->work_lock);
|
||||
spin_unlock_irq(&wb->work_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -2082,13 +2082,13 @@ static struct wb_writeback_work *get_next_work_item(struct bdi_writeback *wb)
|
|||
{
|
||||
struct wb_writeback_work *work = NULL;
|
||||
|
||||
spin_lock_bh(&wb->work_lock);
|
||||
spin_lock_irq(&wb->work_lock);
|
||||
if (!list_empty(&wb->work_list)) {
|
||||
work = list_entry(wb->work_list.next,
|
||||
struct wb_writeback_work, list);
|
||||
list_del_init(&work->list);
|
||||
}
|
||||
spin_unlock_bh(&wb->work_lock);
|
||||
spin_unlock_irq(&wb->work_lock);
|
||||
return work;
|
||||
}
|
||||
|
||||
|
|
|
@ -3403,10 +3403,12 @@ void ocfs2_dlm_shutdown(struct ocfs2_super *osb,
|
|||
ocfs2_lock_res_free(&osb->osb_nfs_sync_lockres);
|
||||
ocfs2_lock_res_free(&osb->osb_orphan_scan.os_lockres);
|
||||
|
||||
ocfs2_cluster_disconnect(osb->cconn, hangup_pending);
|
||||
osb->cconn = NULL;
|
||||
if (osb->cconn) {
|
||||
ocfs2_cluster_disconnect(osb->cconn, hangup_pending);
|
||||
osb->cconn = NULL;
|
||||
|
||||
ocfs2_dlm_shutdown_debug(osb);
|
||||
ocfs2_dlm_shutdown_debug(osb);
|
||||
}
|
||||
}
|
||||
|
||||
static int ocfs2_drop_lock(struct ocfs2_super *osb,
|
||||
|
|
|
@ -1914,8 +1914,7 @@ static void ocfs2_dismount_volume(struct super_block *sb, int mnt_err)
|
|||
!ocfs2_is_hard_readonly(osb))
|
||||
hangup_needed = 1;
|
||||
|
||||
if (osb->cconn)
|
||||
ocfs2_dlm_shutdown(osb, hangup_needed);
|
||||
ocfs2_dlm_shutdown(osb, hangup_needed);
|
||||
|
||||
ocfs2_blockcheck_stats_debugfs_remove(&osb->osb_ecc_stats);
|
||||
debugfs_remove_recursive(osb->osb_debug_root);
|
||||
|
|
|
@ -593,7 +593,7 @@ static void squashfs_readahead(struct readahead_control *ractl)
|
|||
|
||||
res = squashfs_read_data(inode->i_sb, block, bsize, NULL, actor);
|
||||
|
||||
kfree(actor);
|
||||
squashfs_page_actor_free(actor);
|
||||
|
||||
if (res == expected) {
|
||||
int bytes;
|
||||
|
|
|
@ -74,7 +74,7 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize,
|
|||
/* Decompress directly into the page cache buffers */
|
||||
res = squashfs_read_data(inode->i_sb, block, bsize, NULL, actor);
|
||||
|
||||
kfree(actor);
|
||||
squashfs_page_actor_free(actor);
|
||||
|
||||
if (res < 0)
|
||||
goto mark_errored;
|
||||
|
|
|
@ -52,6 +52,7 @@ struct squashfs_page_actor *squashfs_page_actor_init(void **buffer,
|
|||
actor->buffer = buffer;
|
||||
actor->pages = pages;
|
||||
actor->next_page = 0;
|
||||
actor->tmp_buffer = NULL;
|
||||
actor->squashfs_first_page = cache_first_page;
|
||||
actor->squashfs_next_page = cache_next_page;
|
||||
actor->squashfs_finish_page = cache_finish_page;
|
||||
|
@ -68,20 +69,9 @@ static void *handle_next_page(struct squashfs_page_actor *actor)
|
|||
|
||||
if ((actor->next_page == actor->pages) ||
|
||||
(actor->next_index != actor->page[actor->next_page]->index)) {
|
||||
if (actor->alloc_buffer) {
|
||||
void *tmp_buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);
|
||||
|
||||
if (tmp_buffer) {
|
||||
actor->tmp_buffer = tmp_buffer;
|
||||
actor->next_index++;
|
||||
actor->returned_pages++;
|
||||
return tmp_buffer;
|
||||
}
|
||||
}
|
||||
|
||||
actor->next_index++;
|
||||
actor->returned_pages++;
|
||||
return ERR_PTR(-ENOMEM);
|
||||
return actor->alloc_buffer ? actor->tmp_buffer : ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
actor->next_index++;
|
||||
|
@ -96,11 +86,10 @@ static void *direct_first_page(struct squashfs_page_actor *actor)
|
|||
|
||||
static void *direct_next_page(struct squashfs_page_actor *actor)
|
||||
{
|
||||
if (actor->pageaddr)
|
||||
if (actor->pageaddr) {
|
||||
kunmap_local(actor->pageaddr);
|
||||
|
||||
kfree(actor->tmp_buffer);
|
||||
actor->pageaddr = actor->tmp_buffer = NULL;
|
||||
actor->pageaddr = NULL;
|
||||
}
|
||||
|
||||
return handle_next_page(actor);
|
||||
}
|
||||
|
@ -109,8 +98,6 @@ static void direct_finish_page(struct squashfs_page_actor *actor)
|
|||
{
|
||||
if (actor->pageaddr)
|
||||
kunmap_local(actor->pageaddr);
|
||||
|
||||
kfree(actor->tmp_buffer);
|
||||
}
|
||||
|
||||
struct squashfs_page_actor *squashfs_page_actor_init_special(struct squashfs_sb_info *msblk,
|
||||
|
@ -121,6 +108,16 @@ struct squashfs_page_actor *squashfs_page_actor_init_special(struct squashfs_sb_
|
|||
if (actor == NULL)
|
||||
return NULL;
|
||||
|
||||
if (msblk->decompressor->alloc_buffer) {
|
||||
actor->tmp_buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);
|
||||
|
||||
if (actor->tmp_buffer == NULL) {
|
||||
kfree(actor);
|
||||
return NULL;
|
||||
}
|
||||
} else
|
||||
actor->tmp_buffer = NULL;
|
||||
|
||||
actor->length = length ? : pages * PAGE_SIZE;
|
||||
actor->page = page;
|
||||
actor->pages = pages;
|
||||
|
@ -128,7 +125,6 @@ struct squashfs_page_actor *squashfs_page_actor_init_special(struct squashfs_sb_
|
|||
actor->returned_pages = 0;
|
||||
actor->next_index = page[0]->index & ~((1 << (msblk->block_log - PAGE_SHIFT)) - 1);
|
||||
actor->pageaddr = NULL;
|
||||
actor->tmp_buffer = NULL;
|
||||
actor->alloc_buffer = msblk->decompressor->alloc_buffer;
|
||||
actor->squashfs_first_page = direct_first_page;
|
||||
actor->squashfs_next_page = direct_next_page;
|
||||
|
|
|
@ -29,6 +29,11 @@ extern struct squashfs_page_actor *squashfs_page_actor_init(void **buffer,
|
|||
extern struct squashfs_page_actor *squashfs_page_actor_init_special(
|
||||
struct squashfs_sb_info *msblk,
|
||||
struct page **page, int pages, int length);
|
||||
static inline void squashfs_page_actor_free(struct squashfs_page_actor *actor)
|
||||
{
|
||||
kfree(actor->tmp_buffer);
|
||||
kfree(actor);
|
||||
}
|
||||
static inline void *squashfs_first_page(struct squashfs_page_actor *actor)
|
||||
{
|
||||
return actor->squashfs_first_page(actor);
|
||||
|
|
|
@ -97,7 +97,7 @@ static inline bool memory_contains(void *begin, void *end, void *virt,
|
|||
/**
|
||||
* memory_intersects - checks if the region occupied by an object intersects
|
||||
* with another memory region
|
||||
* @begin: virtual address of the beginning of the memory regien
|
||||
* @begin: virtual address of the beginning of the memory region
|
||||
* @end: virtual address of the end of the memory region
|
||||
* @virt: virtual address of the memory object
|
||||
* @size: size of the memory object
|
||||
|
@ -110,7 +110,10 @@ static inline bool memory_intersects(void *begin, void *end, void *virt,
|
|||
{
|
||||
void *vend = virt + size;
|
||||
|
||||
return (virt >= begin && virt < end) || (vend >= begin && vend < end);
|
||||
if (virt < end && vend > begin)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -987,19 +987,30 @@ static inline void mod_memcg_page_state(struct page *page,
|
|||
|
||||
static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
|
||||
{
|
||||
return READ_ONCE(memcg->vmstats.state[idx]);
|
||||
long x = READ_ONCE(memcg->vmstats.state[idx]);
|
||||
#ifdef CONFIG_SMP
|
||||
if (x < 0)
|
||||
x = 0;
|
||||
#endif
|
||||
return x;
|
||||
}
|
||||
|
||||
static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
|
||||
enum node_stat_item idx)
|
||||
{
|
||||
struct mem_cgroup_per_node *pn;
|
||||
long x;
|
||||
|
||||
if (mem_cgroup_disabled())
|
||||
return node_page_state(lruvec_pgdat(lruvec), idx);
|
||||
|
||||
pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
|
||||
return READ_ONCE(pn->lruvec_stats.state[idx]);
|
||||
x = READ_ONCE(pn->lruvec_stats.state[idx]);
|
||||
#ifdef CONFIG_SMP
|
||||
if (x < 0)
|
||||
x = 0;
|
||||
#endif
|
||||
return x;
|
||||
}
|
||||
|
||||
static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
|
||||
|
|
|
@ -1544,9 +1544,16 @@ static inline bool is_longterm_pinnable_page(struct page *page)
|
|||
if (mt == MIGRATE_CMA || mt == MIGRATE_ISOLATE)
|
||||
return false;
|
||||
#endif
|
||||
return !(is_device_coherent_page(page) ||
|
||||
is_zone_movable_page(page) ||
|
||||
is_zero_pfn(page_to_pfn(page)));
|
||||
/* The zero page may always be pinned */
|
||||
if (is_zero_pfn(page_to_pfn(page)))
|
||||
return true;
|
||||
|
||||
/* Coherent device memory must always allow eviction. */
|
||||
if (is_device_coherent_page(page))
|
||||
return false;
|
||||
|
||||
/* Otherwise, non-movable zone pages can be pinned. */
|
||||
return !is_zone_movable_page(page);
|
||||
}
|
||||
#else
|
||||
static inline bool is_longterm_pinnable_page(struct page *page)
|
||||
|
|
|
@ -494,6 +494,7 @@ static int __init crash_save_vmcoreinfo_init(void)
|
|||
|
||||
#ifdef CONFIG_KALLSYMS
|
||||
VMCOREINFO_SYMBOL(kallsyms_names);
|
||||
VMCOREINFO_SYMBOL(kallsyms_num_syms);
|
||||
VMCOREINFO_SYMBOL(kallsyms_token_table);
|
||||
VMCOREINFO_SYMBOL(kallsyms_token_index);
|
||||
#ifdef CONFIG_KALLSYMS_BASE_RELATIVE
|
||||
|
|
|
@ -260,10 +260,10 @@ void wb_wakeup_delayed(struct bdi_writeback *wb)
|
|||
unsigned long timeout;
|
||||
|
||||
timeout = msecs_to_jiffies(dirty_writeback_interval * 10);
|
||||
spin_lock_bh(&wb->work_lock);
|
||||
spin_lock_irq(&wb->work_lock);
|
||||
if (test_bit(WB_registered, &wb->state))
|
||||
queue_delayed_work(bdi_wq, &wb->dwork, timeout);
|
||||
spin_unlock_bh(&wb->work_lock);
|
||||
spin_unlock_irq(&wb->work_lock);
|
||||
}
|
||||
|
||||
static void wb_update_bandwidth_workfn(struct work_struct *work)
|
||||
|
@ -334,12 +334,12 @@ static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb);
|
|||
static void wb_shutdown(struct bdi_writeback *wb)
|
||||
{
|
||||
/* Make sure nobody queues further work */
|
||||
spin_lock_bh(&wb->work_lock);
|
||||
spin_lock_irq(&wb->work_lock);
|
||||
if (!test_and_clear_bit(WB_registered, &wb->state)) {
|
||||
spin_unlock_bh(&wb->work_lock);
|
||||
spin_unlock_irq(&wb->work_lock);
|
||||
return;
|
||||
}
|
||||
spin_unlock_bh(&wb->work_lock);
|
||||
spin_unlock_irq(&wb->work_lock);
|
||||
|
||||
cgwb_remove_from_bdi_list(wb);
|
||||
/*
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
#include <linux/memblock.h>
|
||||
#include <linux/bootmem_info.h>
|
||||
#include <linux/memory_hotplug.h>
|
||||
#include <linux/kmemleak.h>
|
||||
|
||||
void get_page_bootmem(unsigned long info, struct page *page, unsigned long type)
|
||||
{
|
||||
|
@ -33,6 +34,7 @@ void put_page_bootmem(struct page *page)
|
|||
ClearPagePrivate(page);
|
||||
set_page_private(page, 0);
|
||||
INIT_LIST_HEAD(&page->lru);
|
||||
kmemleak_free_part(page_to_virt(page), PAGE_SIZE);
|
||||
free_reserved_page(page);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -818,6 +818,9 @@ static int dbgfs_mk_context(char *name)
|
|||
return -ENOENT;
|
||||
|
||||
new_dir = debugfs_create_dir(name, root);
|
||||
/* Below check is required for a potential duplicated name case */
|
||||
if (IS_ERR(new_dir))
|
||||
return PTR_ERR(new_dir);
|
||||
dbgfs_dirs[dbgfs_nr_ctxs] = new_dir;
|
||||
|
||||
new_ctx = dbgfs_new_ctx();
|
||||
|
|
|
@ -6041,7 +6041,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
|
|||
if (!huge_pte_none_mostly(huge_ptep_get(dst_pte)))
|
||||
goto out_release_unlock;
|
||||
|
||||
if (vm_shared) {
|
||||
if (page_in_pagecache) {
|
||||
page_dup_file_rmap(page, true);
|
||||
} else {
|
||||
ClearHPageRestoreReserve(page);
|
||||
|
|
|
@ -196,10 +196,11 @@ static unsigned long change_pte_range(struct mmu_gather *tlb,
|
|||
pages++;
|
||||
} else if (is_swap_pte(oldpte)) {
|
||||
swp_entry_t entry = pte_to_swp_entry(oldpte);
|
||||
struct page *page = pfn_swap_entry_to_page(entry);
|
||||
pte_t newpte;
|
||||
|
||||
if (is_writable_migration_entry(entry)) {
|
||||
struct page *page = pfn_swap_entry_to_page(entry);
|
||||
|
||||
/*
|
||||
* A protection check is difficult so
|
||||
* just be safe and disable write
|
||||
|
|
|
@ -2892,6 +2892,7 @@ static void wb_inode_writeback_start(struct bdi_writeback *wb)
|
|||
|
||||
static void wb_inode_writeback_end(struct bdi_writeback *wb)
|
||||
{
|
||||
unsigned long flags;
|
||||
atomic_dec(&wb->writeback_inodes);
|
||||
/*
|
||||
* Make sure estimate of writeback throughput gets updated after
|
||||
|
@ -2900,7 +2901,10 @@ static void wb_inode_writeback_end(struct bdi_writeback *wb)
|
|||
* that if multiple inodes end writeback at a similar time, they get
|
||||
* batched into one bandwidth update.
|
||||
*/
|
||||
queue_delayed_work(bdi_wq, &wb->bw_dwork, BANDWIDTH_INTERVAL);
|
||||
spin_lock_irqsave(&wb->work_lock, flags);
|
||||
if (test_bit(WB_registered, &wb->state))
|
||||
queue_delayed_work(bdi_wq, &wb->bw_dwork, BANDWIDTH_INTERVAL);
|
||||
spin_unlock_irqrestore(&wb->work_lock, flags);
|
||||
}
|
||||
|
||||
bool __folio_end_writeback(struct folio *folio)
|
||||
|
|
|
@ -1782,6 +1782,7 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
|
|||
|
||||
if (shmem_should_replace_folio(folio, gfp)) {
|
||||
error = shmem_replace_page(&page, gfp, info, index);
|
||||
folio = page_folio(page);
|
||||
if (error)
|
||||
goto failed;
|
||||
}
|
||||
|
|
|
@ -1487,7 +1487,7 @@ void zs_free(struct zs_pool *pool, unsigned long handle)
|
|||
struct size_class *class;
|
||||
enum fullness_group fullness;
|
||||
|
||||
if (unlikely(!handle))
|
||||
if (IS_ERR_OR_NULL((void *)handle))
|
||||
return;
|
||||
|
||||
/*
|
||||
|
|
Loading…
Reference in New Issue