Six hotfixes. Five are cc:stable: four for MM, one for nilfs2. Also a
MAINTAINERS update. -----BEGIN PGP SIGNATURE----- iHUEABYIAB0WIQTTMBEPP41GrTpTJgfdBJ7gKXxAjgUCY/AK0AAKCRDdBJ7gKXxA jg4SAQCw/Udkt+UgtFzQ+oXg8FAw3ivrniGnOwaMfDDbiVz3KgD+Mkvnw6nb7PMT G9iFA5ZRBISCv0ahXxnNrxbtmcFcewQ= =fFg9 -----END PGP SIGNATURE----- Merge tag 'mm-hotfixes-stable-2023-02-17-15-16-2' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm Pull misc fixes from Andrew Morton: "Six hotfixes. Five are cc:stable: four for MM, one for nilfs2. Also a MAINTAINERS update" * tag 'mm-hotfixes-stable-2023-02-17-15-16-2' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: nilfs2: fix underflow in second superblock position calculations hugetlb: check for undefined shift on 32 bit architectures mm/migrate: fix wrongly apply write bit after mkdirty on sparc64 MAINTAINERS: update FPU EMULATOR web page mm/MADV_COLLAPSE: set EAGAIN on unexpected page refcount mm/filemap: fix page end in filemap_get_read_batch
This commit is contained in:
commit
38f8ccde04
|
@ -8202,7 +8202,7 @@ F: drivers/fpga/microchip-spi.c
|
|||
FPU EMULATOR
|
||||
M: Bill Metzenthen <billm@melbpc.org.au>
|
||||
S: Maintained
|
||||
W: http://floatingpoint.sourceforge.net/emulator/index.html
|
||||
W: https://floatingpoint.billm.au/
|
||||
F: arch/x86/math-emu/
|
||||
|
||||
FRAMEBUFFER CORE
|
||||
|
|
|
@ -1114,7 +1114,14 @@ static int nilfs_ioctl_set_alloc_range(struct inode *inode, void __user *argp)
|
|||
|
||||
minseg = range[0] + segbytes - 1;
|
||||
do_div(minseg, segbytes);
|
||||
|
||||
if (range[1] < 4096)
|
||||
goto out;
|
||||
|
||||
maxseg = NILFS_SB2_OFFSET_BYTES(range[1]);
|
||||
if (maxseg < segbytes)
|
||||
goto out;
|
||||
|
||||
do_div(maxseg, segbytes);
|
||||
maxseg--;
|
||||
|
||||
|
|
|
@ -408,6 +408,15 @@ int nilfs_resize_fs(struct super_block *sb, __u64 newsize)
|
|||
if (newsize > devsize)
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* Prevent underflow in second superblock position calculation.
|
||||
* The exact minimum size check is done in nilfs_sufile_resize().
|
||||
*/
|
||||
if (newsize < 4096) {
|
||||
ret = -ENOSPC;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* Write lock is required to protect some functions depending
|
||||
* on the number of segments, the number of reserved segments,
|
||||
|
|
|
@ -544,9 +544,15 @@ static int nilfs_load_super_block(struct the_nilfs *nilfs,
|
|||
{
|
||||
struct nilfs_super_block **sbp = nilfs->ns_sbp;
|
||||
struct buffer_head **sbh = nilfs->ns_sbh;
|
||||
u64 sb2off = NILFS_SB2_OFFSET_BYTES(bdev_nr_bytes(nilfs->ns_bdev));
|
||||
u64 sb2off, devsize = bdev_nr_bytes(nilfs->ns_bdev);
|
||||
int valid[2], swp = 0;
|
||||
|
||||
if (devsize < NILFS_SEG_MIN_BLOCKS * NILFS_MIN_BLOCK_SIZE + 4096) {
|
||||
nilfs_err(sb, "device size too small");
|
||||
return -EINVAL;
|
||||
}
|
||||
sb2off = NILFS_SB2_OFFSET_BYTES(devsize);
|
||||
|
||||
sbp[0] = nilfs_read_super_block(sb, NILFS_SB_OFFSET_BYTES, blocksize,
|
||||
&sbh[0]);
|
||||
sbp[1] = nilfs_read_super_block(sb, sb2off, blocksize, &sbh[1]);
|
||||
|
|
|
@ -743,7 +743,10 @@ static inline struct hstate *hstate_sizelog(int page_size_log)
|
|||
if (!page_size_log)
|
||||
return &default_hstate;
|
||||
|
||||
return size_to_hstate(1UL << page_size_log);
|
||||
if (page_size_log < BITS_PER_LONG)
|
||||
return size_to_hstate(1UL << page_size_log);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
|
||||
|
|
|
@ -2588,18 +2588,19 @@ static int filemap_get_pages(struct kiocb *iocb, struct iov_iter *iter,
|
|||
struct folio *folio;
|
||||
int err = 0;
|
||||
|
||||
/* "last_index" is the index of the page beyond the end of the read */
|
||||
last_index = DIV_ROUND_UP(iocb->ki_pos + iter->count, PAGE_SIZE);
|
||||
retry:
|
||||
if (fatal_signal_pending(current))
|
||||
return -EINTR;
|
||||
|
||||
filemap_get_read_batch(mapping, index, last_index, fbatch);
|
||||
filemap_get_read_batch(mapping, index, last_index - 1, fbatch);
|
||||
if (!folio_batch_count(fbatch)) {
|
||||
if (iocb->ki_flags & IOCB_NOIO)
|
||||
return -EAGAIN;
|
||||
page_cache_sync_readahead(mapping, ra, filp, index,
|
||||
last_index - index);
|
||||
filemap_get_read_batch(mapping, index, last_index, fbatch);
|
||||
filemap_get_read_batch(mapping, index, last_index - 1, fbatch);
|
||||
}
|
||||
if (!folio_batch_count(fbatch)) {
|
||||
if (iocb->ki_flags & (IOCB_NOWAIT | IOCB_WAITQ))
|
||||
|
|
|
@ -3272,8 +3272,6 @@ void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
|
|||
pmde = mk_huge_pmd(new, READ_ONCE(vma->vm_page_prot));
|
||||
if (pmd_swp_soft_dirty(*pvmw->pmd))
|
||||
pmde = pmd_mksoft_dirty(pmde);
|
||||
if (is_writable_migration_entry(entry))
|
||||
pmde = maybe_pmd_mkwrite(pmde, vma);
|
||||
if (pmd_swp_uffd_wp(*pvmw->pmd))
|
||||
pmde = pmd_wrprotect(pmd_mkuffd_wp(pmde));
|
||||
if (!is_migration_entry_young(entry))
|
||||
|
@ -3281,6 +3279,10 @@ void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
|
|||
/* NOTE: this may contain setting soft-dirty on some archs */
|
||||
if (PageDirty(new) && is_migration_entry_dirty(entry))
|
||||
pmde = pmd_mkdirty(pmde);
|
||||
if (is_writable_migration_entry(entry))
|
||||
pmde = maybe_pmd_mkwrite(pmde, vma);
|
||||
else
|
||||
pmde = pmd_wrprotect(pmde);
|
||||
|
||||
if (PageAnon(new)) {
|
||||
rmap_t rmap_flags = RMAP_COMPOUND;
|
||||
|
|
|
@ -2611,6 +2611,7 @@ static int madvise_collapse_errno(enum scan_result r)
|
|||
case SCAN_CGROUP_CHARGE_FAIL:
|
||||
return -EBUSY;
|
||||
/* Resource temporary unavailable - trying again might succeed */
|
||||
case SCAN_PAGE_COUNT:
|
||||
case SCAN_PAGE_LOCK:
|
||||
case SCAN_PAGE_LRU:
|
||||
case SCAN_DEL_PAGE_LRU:
|
||||
|
|
|
@ -224,6 +224,8 @@ static bool remove_migration_pte(struct folio *folio,
|
|||
pte = maybe_mkwrite(pte, vma);
|
||||
else if (pte_swp_uffd_wp(*pvmw.pte))
|
||||
pte = pte_mkuffd_wp(pte);
|
||||
else
|
||||
pte = pte_wrprotect(pte);
|
||||
|
||||
if (folio_test_anon(folio) && !is_readable_migration_entry(entry))
|
||||
rmap_flags |= RMAP_EXCLUSIVE;
|
||||
|
|
Loading…
Reference in New Issue