Thirteen fixes, almost all for MM. Seven of these are cc:stable and the
remainder fix up the changes which went into this -rc cycle. -----BEGIN PGP SIGNATURE----- iHUEABYKAB0WIQTTMBEPP41GrTpTJgfdBJ7gKXxAjgUCYwQZcgAKCRDdBJ7gKXxA jnCxAQCk8L6PPm0L2KvKr5Vu3M/T0o9SvfxfM5yho80zM68fHQD/eLxz+nd3m+N5 K7Mdbcb2u6F46qQaS+S5RialEWKpsw8= =WtBo -----END PGP SIGNATURE----- Merge tag 'mm-hotfixes-stable-2022-08-22' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm Pull misc fixes from Andrew Morton: "Thirteen fixes, almost all for MM. Seven of these are cc:stable and the remainder fix up the changes which went into this -rc cycle" * tag 'mm-hotfixes-stable-2022-08-22' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: kprobes: don't call disarm_kprobe() for disabled kprobes mm/shmem: shmem_replace_page() remember NR_SHMEM mm/shmem: tmpfs fallocate use file_modified() mm/shmem: fix chattr fsflags support in tmpfs mm/hugetlb: support write-faults in shared mappings mm/hugetlb: fix hugetlb not supporting softdirty tracking mm/uffd: reset write protection when unregister with wp-mode mm/smaps: don't access young/dirty bit if pte unpresent mm: add DEVICE_ZONE to FOR_ALL_ZONES kernel/sys_ni: add compat entry for fadvise64_64 mm/gup: fix FOLL_FORCE COW security issue and remove FOLL_COW Revert "zram: remove double compression logic" get_maintainer: add Alan to .get_maintainer.ignore
This commit is contained in:
commit
95607ad99b
|
@ -1,2 +1,4 @@
|
|||
Alan Cox <alan@lxorguk.ukuu.org.uk>
|
||||
Alan Cox <root@hraefn.swansea.linux.org.uk>
|
||||
Christoph Hellwig <hch@lst.de>
|
||||
Marc Gonzalez <marc.w.gonzalez@free.fr>
|
||||
|
|
|
@ -1146,14 +1146,15 @@ static ssize_t bd_stat_show(struct device *dev,
|
|||
static ssize_t debug_stat_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
int version = 2;
|
||||
int version = 1;
|
||||
struct zram *zram = dev_to_zram(dev);
|
||||
ssize_t ret;
|
||||
|
||||
down_read(&zram->init_lock);
|
||||
ret = scnprintf(buf, PAGE_SIZE,
|
||||
"version: %d\n%8llu\n",
|
||||
"version: %d\n%8llu %8llu\n",
|
||||
version,
|
||||
(u64)atomic64_read(&zram->stats.writestall),
|
||||
(u64)atomic64_read(&zram->stats.miss_free));
|
||||
up_read(&zram->init_lock);
|
||||
|
||||
|
@ -1351,7 +1352,7 @@ static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
|
|||
{
|
||||
int ret = 0;
|
||||
unsigned long alloced_pages;
|
||||
unsigned long handle = 0;
|
||||
unsigned long handle = -ENOMEM;
|
||||
unsigned int comp_len = 0;
|
||||
void *src, *dst, *mem;
|
||||
struct zcomp_strm *zstrm;
|
||||
|
@ -1369,6 +1370,7 @@ static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
|
|||
}
|
||||
kunmap_atomic(mem);
|
||||
|
||||
compress_again:
|
||||
zstrm = zcomp_stream_get(zram->comp);
|
||||
src = kmap_atomic(page);
|
||||
ret = zcomp_compress(zstrm, src, &comp_len);
|
||||
|
@ -1377,20 +1379,39 @@ static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
|
|||
if (unlikely(ret)) {
|
||||
zcomp_stream_put(zram->comp);
|
||||
pr_err("Compression failed! err=%d\n", ret);
|
||||
zs_free(zram->mem_pool, handle);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (comp_len >= huge_class_size)
|
||||
comp_len = PAGE_SIZE;
|
||||
|
||||
handle = zs_malloc(zram->mem_pool, comp_len,
|
||||
__GFP_KSWAPD_RECLAIM |
|
||||
__GFP_NOWARN |
|
||||
__GFP_HIGHMEM |
|
||||
__GFP_MOVABLE);
|
||||
|
||||
/*
|
||||
* handle allocation has 2 paths:
|
||||
* a) fast path is executed with preemption disabled (for
|
||||
* per-cpu streams) and has __GFP_DIRECT_RECLAIM bit clear,
|
||||
* since we can't sleep;
|
||||
* b) slow path enables preemption and attempts to allocate
|
||||
* the page with __GFP_DIRECT_RECLAIM bit set. we have to
|
||||
* put per-cpu compression stream and, thus, to re-do
|
||||
* the compression once handle is allocated.
|
||||
*
|
||||
* if we have a 'non-null' handle here then we are coming
|
||||
* from the slow path and handle has already been allocated.
|
||||
*/
|
||||
if (IS_ERR((void *)handle))
|
||||
handle = zs_malloc(zram->mem_pool, comp_len,
|
||||
__GFP_KSWAPD_RECLAIM |
|
||||
__GFP_NOWARN |
|
||||
__GFP_HIGHMEM |
|
||||
__GFP_MOVABLE);
|
||||
if (IS_ERR((void *)handle)) {
|
||||
zcomp_stream_put(zram->comp);
|
||||
atomic64_inc(&zram->stats.writestall);
|
||||
handle = zs_malloc(zram->mem_pool, comp_len,
|
||||
GFP_NOIO | __GFP_HIGHMEM |
|
||||
__GFP_MOVABLE);
|
||||
if (!IS_ERR((void *)handle))
|
||||
goto compress_again;
|
||||
return PTR_ERR((void *)handle);
|
||||
}
|
||||
|
||||
|
@ -1948,6 +1969,7 @@ static int zram_add(void)
|
|||
if (ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE)
|
||||
blk_queue_max_write_zeroes_sectors(zram->disk->queue, UINT_MAX);
|
||||
|
||||
blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, zram->disk->queue);
|
||||
ret = device_add_disk(NULL, zram->disk, zram_disk_groups);
|
||||
if (ret)
|
||||
goto out_cleanup_disk;
|
||||
|
|
|
@ -81,6 +81,7 @@ struct zram_stats {
|
|||
atomic64_t huge_pages_since; /* no. of huge pages since zram set up */
|
||||
atomic64_t pages_stored; /* no. of pages currently stored */
|
||||
atomic_long_t max_used_pages; /* no. of maximum pages stored */
|
||||
atomic64_t writestall; /* no. of write slow paths */
|
||||
atomic64_t miss_free; /* no. of missed free */
|
||||
#ifdef CONFIG_ZRAM_WRITEBACK
|
||||
atomic64_t bd_count; /* no. of pages in backing device */
|
||||
|
|
|
@ -527,10 +527,12 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
|
|||
struct vm_area_struct *vma = walk->vma;
|
||||
bool locked = !!(vma->vm_flags & VM_LOCKED);
|
||||
struct page *page = NULL;
|
||||
bool migration = false;
|
||||
bool migration = false, young = false, dirty = false;
|
||||
|
||||
if (pte_present(*pte)) {
|
||||
page = vm_normal_page(vma, addr, *pte);
|
||||
young = pte_young(*pte);
|
||||
dirty = pte_dirty(*pte);
|
||||
} else if (is_swap_pte(*pte)) {
|
||||
swp_entry_t swpent = pte_to_swp_entry(*pte);
|
||||
|
||||
|
@ -560,8 +562,7 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
|
|||
if (!page)
|
||||
return;
|
||||
|
||||
smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte),
|
||||
locked, migration);
|
||||
smaps_account(mss, page, false, young, dirty, locked, migration);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
|
|
|
@ -1601,6 +1601,10 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
|
|||
wake_userfault(vma->vm_userfaultfd_ctx.ctx, &range);
|
||||
}
|
||||
|
||||
/* Reset ptes for the whole vma range if wr-protected */
|
||||
if (userfaultfd_wp(vma))
|
||||
uffd_wp_range(mm, vma, start, vma_end - start, false);
|
||||
|
||||
new_flags = vma->vm_flags & ~__VM_UFFD_FLAGS;
|
||||
prev = vma_merge(mm, prev, start, vma_end, new_flags,
|
||||
vma->anon_vma, vma->vm_file, vma->vm_pgoff,
|
||||
|
|
|
@ -2885,7 +2885,6 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
|
|||
#define FOLL_MIGRATION 0x400 /* wait for page to replace migration entry */
|
||||
#define FOLL_TRIED 0x800 /* a retry, previous pass started an IO */
|
||||
#define FOLL_REMOTE 0x2000 /* we are working on non-current tsk/mm */
|
||||
#define FOLL_COW 0x4000 /* internal GUP flag */
|
||||
#define FOLL_ANON 0x8000 /* don't do file mappings */
|
||||
#define FOLL_LONGTERM 0x10000 /* mapping lifetime is indefinite: see below */
|
||||
#define FOLL_SPLIT_PMD 0x20000 /* split huge pmd before returning */
|
||||
|
|
|
@ -29,15 +29,10 @@ struct shmem_inode_info {
|
|||
struct inode vfs_inode;
|
||||
};
|
||||
|
||||
#define SHMEM_FL_USER_VISIBLE FS_FL_USER_VISIBLE
|
||||
#define SHMEM_FL_USER_MODIFIABLE FS_FL_USER_MODIFIABLE
|
||||
#define SHMEM_FL_INHERITED FS_FL_USER_MODIFIABLE
|
||||
|
||||
/* Flags that are appropriate for regular files (all but dir-specific ones). */
|
||||
#define SHMEM_REG_FLMASK (~(FS_DIRSYNC_FL | FS_TOPDIR_FL))
|
||||
|
||||
/* Flags that are appropriate for non-directories/regular files. */
|
||||
#define SHMEM_OTHER_FLMASK (FS_NODUMP_FL | FS_NOATIME_FL)
|
||||
#define SHMEM_FL_USER_VISIBLE FS_FL_USER_VISIBLE
|
||||
#define SHMEM_FL_USER_MODIFIABLE \
|
||||
(FS_IMMUTABLE_FL | FS_APPEND_FL | FS_NODUMP_FL | FS_NOATIME_FL)
|
||||
#define SHMEM_FL_INHERITED (FS_NODUMP_FL | FS_NOATIME_FL)
|
||||
|
||||
struct shmem_sb_info {
|
||||
unsigned long max_blocks; /* How many blocks are allowed */
|
||||
|
|
|
@ -73,6 +73,8 @@ extern ssize_t mcopy_continue(struct mm_struct *dst_mm, unsigned long dst_start,
|
|||
extern int mwriteprotect_range(struct mm_struct *dst_mm,
|
||||
unsigned long start, unsigned long len,
|
||||
bool enable_wp, atomic_t *mmap_changing);
|
||||
extern void uffd_wp_range(struct mm_struct *dst_mm, struct vm_area_struct *vma,
|
||||
unsigned long start, unsigned long len, bool enable_wp);
|
||||
|
||||
/* mm helpers */
|
||||
static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma,
|
||||
|
|
|
@ -20,12 +20,19 @@
|
|||
#define HIGHMEM_ZONE(xx)
|
||||
#endif
|
||||
|
||||
#define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL, HIGHMEM_ZONE(xx) xx##_MOVABLE
|
||||
#ifdef CONFIG_ZONE_DEVICE
|
||||
#define DEVICE_ZONE(xx) xx##_DEVICE,
|
||||
#else
|
||||
#define DEVICE_ZONE(xx)
|
||||
#endif
|
||||
|
||||
#define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL, \
|
||||
HIGHMEM_ZONE(xx) xx##_MOVABLE, DEVICE_ZONE(xx)
|
||||
|
||||
enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
|
||||
FOR_ALL_ZONES(PGALLOC),
|
||||
FOR_ALL_ZONES(ALLOCSTALL),
|
||||
FOR_ALL_ZONES(PGSCAN_SKIP),
|
||||
FOR_ALL_ZONES(PGALLOC)
|
||||
FOR_ALL_ZONES(ALLOCSTALL)
|
||||
FOR_ALL_ZONES(PGSCAN_SKIP)
|
||||
PGFREE, PGACTIVATE, PGDEACTIVATE, PGLAZYFREE,
|
||||
PGFAULT, PGMAJFAULT,
|
||||
PGLAZYFREED,
|
||||
|
|
|
@ -1707,11 +1707,12 @@ static struct kprobe *__disable_kprobe(struct kprobe *p)
|
|||
/* Try to disarm and disable this/parent probe */
|
||||
if (p == orig_p || aggr_kprobe_disabled(orig_p)) {
|
||||
/*
|
||||
* If 'kprobes_all_disarmed' is set, 'orig_p'
|
||||
* should have already been disarmed, so
|
||||
* skip unneed disarming process.
|
||||
* Don't be lazy here. Even if 'kprobes_all_disarmed'
|
||||
* is false, 'orig_p' might not have been armed yet.
|
||||
* Note arm_all_kprobes() __tries__ to arm all kprobes
|
||||
* on the best effort basis.
|
||||
*/
|
||||
if (!kprobes_all_disarmed) {
|
||||
if (!kprobes_all_disarmed && !kprobe_disabled(orig_p)) {
|
||||
ret = disarm_kprobe(orig_p, true);
|
||||
if (ret) {
|
||||
p->flags &= ~KPROBE_FLAG_DISABLED;
|
||||
|
|
|
@ -277,6 +277,7 @@ COND_SYSCALL(landlock_restrict_self);
|
|||
|
||||
/* mm/fadvise.c */
|
||||
COND_SYSCALL(fadvise64_64);
|
||||
COND_SYSCALL_COMPAT(fadvise64_64);
|
||||
|
||||
/* mm/, CONFIG_MMU only */
|
||||
COND_SYSCALL(swapon);
|
||||
|
|
68
mm/gup.c
68
mm/gup.c
|
@ -478,14 +478,42 @@ static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
|
|||
return -EEXIST;
|
||||
}
|
||||
|
||||
/*
|
||||
* FOLL_FORCE can write to even unwritable pte's, but only
|
||||
* after we've gone through a COW cycle and they are dirty.
|
||||
*/
|
||||
static inline bool can_follow_write_pte(pte_t pte, unsigned int flags)
|
||||
/* FOLL_FORCE can write to even unwritable PTEs in COW mappings. */
|
||||
static inline bool can_follow_write_pte(pte_t pte, struct page *page,
|
||||
struct vm_area_struct *vma,
|
||||
unsigned int flags)
|
||||
{
|
||||
return pte_write(pte) ||
|
||||
((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte));
|
||||
/* If the pte is writable, we can write to the page. */
|
||||
if (pte_write(pte))
|
||||
return true;
|
||||
|
||||
/* Maybe FOLL_FORCE is set to override it? */
|
||||
if (!(flags & FOLL_FORCE))
|
||||
return false;
|
||||
|
||||
/* But FOLL_FORCE has no effect on shared mappings */
|
||||
if (vma->vm_flags & (VM_MAYSHARE | VM_SHARED))
|
||||
return false;
|
||||
|
||||
/* ... or read-only private ones */
|
||||
if (!(vma->vm_flags & VM_MAYWRITE))
|
||||
return false;
|
||||
|
||||
/* ... or already writable ones that just need to take a write fault */
|
||||
if (vma->vm_flags & VM_WRITE)
|
||||
return false;
|
||||
|
||||
/*
|
||||
* See can_change_pte_writable(): we broke COW and could map the page
|
||||
* writable if we have an exclusive anonymous page ...
|
||||
*/
|
||||
if (!page || !PageAnon(page) || !PageAnonExclusive(page))
|
||||
return false;
|
||||
|
||||
/* ... and a write-fault isn't required for other reasons. */
|
||||
if (vma_soft_dirty_enabled(vma) && !pte_soft_dirty(pte))
|
||||
return false;
|
||||
return !userfaultfd_pte_wp(vma, pte);
|
||||
}
|
||||
|
||||
static struct page *follow_page_pte(struct vm_area_struct *vma,
|
||||
|
@ -528,12 +556,19 @@ retry:
|
|||
}
|
||||
if ((flags & FOLL_NUMA) && pte_protnone(pte))
|
||||
goto no_page;
|
||||
if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, flags)) {
|
||||
pte_unmap_unlock(ptep, ptl);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
page = vm_normal_page(vma, address, pte);
|
||||
|
||||
/*
|
||||
* We only care about anon pages in can_follow_write_pte() and don't
|
||||
* have to worry about pte_devmap() because they are never anon.
|
||||
*/
|
||||
if ((flags & FOLL_WRITE) &&
|
||||
!can_follow_write_pte(pte, page, vma, flags)) {
|
||||
page = NULL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!page && pte_devmap(pte) && (flags & (FOLL_GET | FOLL_PIN))) {
|
||||
/*
|
||||
* Only return device mapping pages in the FOLL_GET or FOLL_PIN
|
||||
|
@ -986,17 +1021,6 @@ static int faultin_page(struct vm_area_struct *vma,
|
|||
return -EBUSY;
|
||||
}
|
||||
|
||||
/*
|
||||
* The VM_FAULT_WRITE bit tells us that do_wp_page has broken COW when
|
||||
* necessary, even if maybe_mkwrite decided not to set pte_write. We
|
||||
* can thus safely do subsequent page lookups as if they were reads.
|
||||
* But only do so when looping for pte_write is futile: in some cases
|
||||
* userspace may also be wanting to write to the gotten user page,
|
||||
* which a read fault here might prevent (a readonly page might get
|
||||
* reCOWed by userspace write).
|
||||
*/
|
||||
if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE))
|
||||
*flags |= FOLL_COW;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -1040,12 +1040,6 @@ struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
|
|||
|
||||
assert_spin_locked(pmd_lockptr(mm, pmd));
|
||||
|
||||
/*
|
||||
* When we COW a devmap PMD entry, we split it into PTEs, so we should
|
||||
* not be in this function with `flags & FOLL_COW` set.
|
||||
*/
|
||||
WARN_ONCE(flags & FOLL_COW, "mm: In follow_devmap_pmd with FOLL_COW set");
|
||||
|
||||
/* FOLL_GET and FOLL_PIN are mutually exclusive. */
|
||||
if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) ==
|
||||
(FOLL_PIN | FOLL_GET)))
|
||||
|
@ -1395,14 +1389,42 @@ fallback:
|
|||
return VM_FAULT_FALLBACK;
|
||||
}
|
||||
|
||||
/*
|
||||
* FOLL_FORCE can write to even unwritable pmd's, but only
|
||||
* after we've gone through a COW cycle and they are dirty.
|
||||
*/
|
||||
static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags)
|
||||
/* FOLL_FORCE can write to even unwritable PMDs in COW mappings. */
|
||||
static inline bool can_follow_write_pmd(pmd_t pmd, struct page *page,
|
||||
struct vm_area_struct *vma,
|
||||
unsigned int flags)
|
||||
{
|
||||
return pmd_write(pmd) ||
|
||||
((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd));
|
||||
/* If the pmd is writable, we can write to the page. */
|
||||
if (pmd_write(pmd))
|
||||
return true;
|
||||
|
||||
/* Maybe FOLL_FORCE is set to override it? */
|
||||
if (!(flags & FOLL_FORCE))
|
||||
return false;
|
||||
|
||||
/* But FOLL_FORCE has no effect on shared mappings */
|
||||
if (vma->vm_flags & (VM_MAYSHARE | VM_SHARED))
|
||||
return false;
|
||||
|
||||
/* ... or read-only private ones */
|
||||
if (!(vma->vm_flags & VM_MAYWRITE))
|
||||
return false;
|
||||
|
||||
/* ... or already writable ones that just need to take a write fault */
|
||||
if (vma->vm_flags & VM_WRITE)
|
||||
return false;
|
||||
|
||||
/*
|
||||
* See can_change_pte_writable(): we broke COW and could map the page
|
||||
* writable if we have an exclusive anonymous page ...
|
||||
*/
|
||||
if (!page || !PageAnon(page) || !PageAnonExclusive(page))
|
||||
return false;
|
||||
|
||||
/* ... and a write-fault isn't required for other reasons. */
|
||||
if (vma_soft_dirty_enabled(vma) && !pmd_soft_dirty(pmd))
|
||||
return false;
|
||||
return !userfaultfd_huge_pmd_wp(vma, pmd);
|
||||
}
|
||||
|
||||
struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
|
||||
|
@ -1411,12 +1433,16 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
|
|||
unsigned int flags)
|
||||
{
|
||||
struct mm_struct *mm = vma->vm_mm;
|
||||
struct page *page = NULL;
|
||||
struct page *page;
|
||||
|
||||
assert_spin_locked(pmd_lockptr(mm, pmd));
|
||||
|
||||
if (flags & FOLL_WRITE && !can_follow_write_pmd(*pmd, flags))
|
||||
goto out;
|
||||
page = pmd_page(*pmd);
|
||||
VM_BUG_ON_PAGE(!PageHead(page) && !is_zone_device_page(page), page);
|
||||
|
||||
if ((flags & FOLL_WRITE) &&
|
||||
!can_follow_write_pmd(*pmd, page, vma, flags))
|
||||
return NULL;
|
||||
|
||||
/* Avoid dumping huge zero page */
|
||||
if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd))
|
||||
|
@ -1424,10 +1450,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
|
|||
|
||||
/* Full NUMA hinting faults to serialise migration in fault paths */
|
||||
if ((flags & FOLL_NUMA) && pmd_protnone(*pmd))
|
||||
goto out;
|
||||
|
||||
page = pmd_page(*pmd);
|
||||
VM_BUG_ON_PAGE(!PageHead(page) && !is_zone_device_page(page), page);
|
||||
return NULL;
|
||||
|
||||
if (!pmd_write(*pmd) && gup_must_unshare(flags, page))
|
||||
return ERR_PTR(-EMLINK);
|
||||
|
@ -1444,7 +1467,6 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
|
|||
page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
|
||||
VM_BUG_ON_PAGE(!PageCompound(page) && !is_zone_device_page(page), page);
|
||||
|
||||
out:
|
||||
return page;
|
||||
}
|
||||
|
||||
|
|
26
mm/hugetlb.c
26
mm/hugetlb.c
|
@ -5241,6 +5241,21 @@ static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma,
|
|||
VM_BUG_ON(unshare && (flags & FOLL_WRITE));
|
||||
VM_BUG_ON(!unshare && !(flags & FOLL_WRITE));
|
||||
|
||||
/*
|
||||
* hugetlb does not support FOLL_FORCE-style write faults that keep the
|
||||
* PTE mapped R/O such as maybe_mkwrite() would do.
|
||||
*/
|
||||
if (WARN_ON_ONCE(!unshare && !(vma->vm_flags & VM_WRITE)))
|
||||
return VM_FAULT_SIGSEGV;
|
||||
|
||||
/* Let's take out MAP_SHARED mappings first. */
|
||||
if (vma->vm_flags & VM_MAYSHARE) {
|
||||
if (unlikely(unshare))
|
||||
return 0;
|
||||
set_huge_ptep_writable(vma, haddr, ptep);
|
||||
return 0;
|
||||
}
|
||||
|
||||
pte = huge_ptep_get(ptep);
|
||||
old_page = pte_page(pte);
|
||||
|
||||
|
@ -5781,12 +5796,11 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
|||
* If we are going to COW/unshare the mapping later, we examine the
|
||||
* pending reservations for this page now. This will ensure that any
|
||||
* allocations necessary to record that reservation occur outside the
|
||||
* spinlock. For private mappings, we also lookup the pagecache
|
||||
* page now as it is used to determine if a reservation has been
|
||||
* consumed.
|
||||
* spinlock. Also lookup the pagecache page now as it is used to
|
||||
* determine if a reservation has been consumed.
|
||||
*/
|
||||
if ((flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) &&
|
||||
!huge_pte_write(entry)) {
|
||||
!(vma->vm_flags & VM_MAYSHARE) && !huge_pte_write(entry)) {
|
||||
if (vma_needs_reservation(h, vma, haddr) < 0) {
|
||||
ret = VM_FAULT_OOM;
|
||||
goto out_mutex;
|
||||
|
@ -5794,9 +5808,7 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
|||
/* Just decrements count, does not deallocate */
|
||||
vma_end_reservation(h, vma, haddr);
|
||||
|
||||
if (!(vma->vm_flags & VM_MAYSHARE))
|
||||
pagecache_page = hugetlbfs_pagecache_page(h,
|
||||
vma, haddr);
|
||||
pagecache_page = hugetlbfs_pagecache_page(h, vma, haddr);
|
||||
}
|
||||
|
||||
ptl = huge_pte_lock(h, mm, ptep);
|
||||
|
|
|
@ -1646,8 +1646,11 @@ int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot)
|
|||
pgprot_val(vm_pgprot_modify(vm_page_prot, vm_flags)))
|
||||
return 0;
|
||||
|
||||
/* Do we need to track softdirty? */
|
||||
if (vma_soft_dirty_enabled(vma))
|
||||
/*
|
||||
* Do we need to track softdirty? hugetlb does not support softdirty
|
||||
* tracking yet.
|
||||
*/
|
||||
if (vma_soft_dirty_enabled(vma) && !is_vm_hugetlb_page(vma))
|
||||
return 1;
|
||||
|
||||
/* Specialty mapping? */
|
||||
|
|
59
mm/shmem.c
59
mm/shmem.c
|
@ -1659,7 +1659,9 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp,
|
|||
new = page_folio(newpage);
|
||||
mem_cgroup_migrate(old, new);
|
||||
__inc_lruvec_page_state(newpage, NR_FILE_PAGES);
|
||||
__inc_lruvec_page_state(newpage, NR_SHMEM);
|
||||
__dec_lruvec_page_state(oldpage, NR_FILE_PAGES);
|
||||
__dec_lruvec_page_state(oldpage, NR_SHMEM);
|
||||
}
|
||||
xa_unlock_irq(&swap_mapping->i_pages);
|
||||
|
||||
|
@ -2281,16 +2283,34 @@ static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* Mask out flags that are inappropriate for the given type of inode. */
|
||||
static unsigned shmem_mask_flags(umode_t mode, __u32 flags)
|
||||
#ifdef CONFIG_TMPFS_XATTR
|
||||
static int shmem_initxattrs(struct inode *, const struct xattr *, void *);
|
||||
|
||||
/*
|
||||
* chattr's fsflags are unrelated to extended attributes,
|
||||
* but tmpfs has chosen to enable them under the same config option.
|
||||
*/
|
||||
static void shmem_set_inode_flags(struct inode *inode, unsigned int fsflags)
|
||||
{
|
||||
if (S_ISDIR(mode))
|
||||
return flags;
|
||||
else if (S_ISREG(mode))
|
||||
return flags & SHMEM_REG_FLMASK;
|
||||
else
|
||||
return flags & SHMEM_OTHER_FLMASK;
|
||||
unsigned int i_flags = 0;
|
||||
|
||||
if (fsflags & FS_NOATIME_FL)
|
||||
i_flags |= S_NOATIME;
|
||||
if (fsflags & FS_APPEND_FL)
|
||||
i_flags |= S_APPEND;
|
||||
if (fsflags & FS_IMMUTABLE_FL)
|
||||
i_flags |= S_IMMUTABLE;
|
||||
/*
|
||||
* But FS_NODUMP_FL does not require any action in i_flags.
|
||||
*/
|
||||
inode_set_flags(inode, i_flags, S_NOATIME | S_APPEND | S_IMMUTABLE);
|
||||
}
|
||||
#else
|
||||
static void shmem_set_inode_flags(struct inode *inode, unsigned int fsflags)
|
||||
{
|
||||
}
|
||||
#define shmem_initxattrs NULL
|
||||
#endif
|
||||
|
||||
static struct inode *shmem_get_inode(struct super_block *sb, struct inode *dir,
|
||||
umode_t mode, dev_t dev, unsigned long flags)
|
||||
|
@ -2319,7 +2339,8 @@ static struct inode *shmem_get_inode(struct super_block *sb, struct inode *dir,
|
|||
info->i_crtime = inode->i_mtime;
|
||||
info->fsflags = (dir == NULL) ? 0 :
|
||||
SHMEM_I(dir)->fsflags & SHMEM_FL_INHERITED;
|
||||
info->fsflags = shmem_mask_flags(mode, info->fsflags);
|
||||
if (info->fsflags)
|
||||
shmem_set_inode_flags(inode, info->fsflags);
|
||||
INIT_LIST_HEAD(&info->shrinklist);
|
||||
INIT_LIST_HEAD(&info->swaplist);
|
||||
simple_xattrs_init(&info->xattrs);
|
||||
|
@ -2468,12 +2489,6 @@ out_unacct_blocks:
|
|||
static const struct inode_operations shmem_symlink_inode_operations;
|
||||
static const struct inode_operations shmem_short_symlink_operations;
|
||||
|
||||
#ifdef CONFIG_TMPFS_XATTR
|
||||
static int shmem_initxattrs(struct inode *, const struct xattr *, void *);
|
||||
#else
|
||||
#define shmem_initxattrs NULL
|
||||
#endif
|
||||
|
||||
static int
|
||||
shmem_write_begin(struct file *file, struct address_space *mapping,
|
||||
loff_t pos, unsigned len,
|
||||
|
@ -2826,12 +2841,13 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
|
|||
|
||||
if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
|
||||
i_size_write(inode, offset + len);
|
||||
inode->i_ctime = current_time(inode);
|
||||
undone:
|
||||
spin_lock(&inode->i_lock);
|
||||
inode->i_private = NULL;
|
||||
spin_unlock(&inode->i_lock);
|
||||
out:
|
||||
if (!error)
|
||||
file_modified(file);
|
||||
inode_unlock(inode);
|
||||
return error;
|
||||
}
|
||||
|
@ -3179,18 +3195,13 @@ static int shmem_fileattr_set(struct user_namespace *mnt_userns,
|
|||
|
||||
if (fileattr_has_fsx(fa))
|
||||
return -EOPNOTSUPP;
|
||||
if (fa->flags & ~SHMEM_FL_USER_MODIFIABLE)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
info->fsflags = (info->fsflags & ~SHMEM_FL_USER_MODIFIABLE) |
|
||||
(fa->flags & SHMEM_FL_USER_MODIFIABLE);
|
||||
|
||||
inode->i_flags &= ~(S_APPEND | S_IMMUTABLE | S_NOATIME);
|
||||
if (info->fsflags & FS_APPEND_FL)
|
||||
inode->i_flags |= S_APPEND;
|
||||
if (info->fsflags & FS_IMMUTABLE_FL)
|
||||
inode->i_flags |= S_IMMUTABLE;
|
||||
if (info->fsflags & FS_NOATIME_FL)
|
||||
inode->i_flags |= S_NOATIME;
|
||||
|
||||
shmem_set_inode_flags(inode, info->fsflags);
|
||||
inode->i_ctime = current_time(inode);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -703,14 +703,29 @@ ssize_t mcopy_continue(struct mm_struct *dst_mm, unsigned long start,
|
|||
mmap_changing, 0);
|
||||
}
|
||||
|
||||
void uffd_wp_range(struct mm_struct *dst_mm, struct vm_area_struct *dst_vma,
|
||||
unsigned long start, unsigned long len, bool enable_wp)
|
||||
{
|
||||
struct mmu_gather tlb;
|
||||
pgprot_t newprot;
|
||||
|
||||
if (enable_wp)
|
||||
newprot = vm_get_page_prot(dst_vma->vm_flags & ~(VM_WRITE));
|
||||
else
|
||||
newprot = vm_get_page_prot(dst_vma->vm_flags);
|
||||
|
||||
tlb_gather_mmu(&tlb, dst_mm);
|
||||
change_protection(&tlb, dst_vma, start, start + len, newprot,
|
||||
enable_wp ? MM_CP_UFFD_WP : MM_CP_UFFD_WP_RESOLVE);
|
||||
tlb_finish_mmu(&tlb);
|
||||
}
|
||||
|
||||
int mwriteprotect_range(struct mm_struct *dst_mm, unsigned long start,
|
||||
unsigned long len, bool enable_wp,
|
||||
atomic_t *mmap_changing)
|
||||
{
|
||||
struct vm_area_struct *dst_vma;
|
||||
unsigned long page_mask;
|
||||
struct mmu_gather tlb;
|
||||
pgprot_t newprot;
|
||||
int err;
|
||||
|
||||
/*
|
||||
|
@ -750,15 +765,7 @@ int mwriteprotect_range(struct mm_struct *dst_mm, unsigned long start,
|
|||
goto out_unlock;
|
||||
}
|
||||
|
||||
if (enable_wp)
|
||||
newprot = vm_get_page_prot(dst_vma->vm_flags & ~(VM_WRITE));
|
||||
else
|
||||
newprot = vm_get_page_prot(dst_vma->vm_flags);
|
||||
|
||||
tlb_gather_mmu(&tlb, dst_mm);
|
||||
change_protection(&tlb, dst_vma, start, start + len, newprot,
|
||||
enable_wp ? MM_CP_UFFD_WP : MM_CP_UFFD_WP_RESOLVE);
|
||||
tlb_finish_mmu(&tlb);
|
||||
uffd_wp_range(dst_mm, dst_vma, start, len, enable_wp);
|
||||
|
||||
err = 0;
|
||||
out_unlock:
|
||||
|
|
|
@ -1168,8 +1168,15 @@ int fragmentation_index(struct zone *zone, unsigned int order)
|
|||
#define TEXT_FOR_HIGHMEM(xx)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ZONE_DEVICE
|
||||
#define TEXT_FOR_DEVICE(xx) xx "_device",
|
||||
#else
|
||||
#define TEXT_FOR_DEVICE(xx)
|
||||
#endif
|
||||
|
||||
#define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \
|
||||
TEXT_FOR_HIGHMEM(xx) xx "_movable",
|
||||
TEXT_FOR_HIGHMEM(xx) xx "_movable", \
|
||||
TEXT_FOR_DEVICE(xx)
|
||||
|
||||
const char * const vmstat_text[] = {
|
||||
/* enum zone_stat_item counters */
|
||||
|
|
Loading…
Reference in New Issue