mm: userfaultfd: don't pass around both mm and vma

Quite a few userfaultfd functions took both mm and vma pointers as
arguments.  Since the mm is trivially accessible via vma->vm_mm, there's
no reason to pass both; it just needlessly extends the already long
argument list.

Get rid of the mm pointer, where possible, to shorten the argument list.

Link: https://lkml.kernel.org/r/20230314221250.682452-3-axelrasmussen@google.com
Signed-off-by: Axel Rasmussen <axelrasmussen@google.com>
Acked-by: Peter Xu <peterx@redhat.com>
Acked-by: Mike Rapoport (IBM) <rppt@kernel.org>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Hugh Dickins <hughd@google.com>
Cc: James Houghton <jthoughton@google.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Liam R. Howlett <Liam.Howlett@oracle.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Nadav Amit <namit@vmware.com>
Cc: Shuah Khan <shuah@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Axel Rasmussen 2023-03-14 15:12:48 -07:00 committed by Andrew Morton
parent a734991cca
commit 61c5004022
7 changed files with 41 additions and 46 deletions

View File

@ -1644,7 +1644,7 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
/* Reset ptes for the whole vma range if wr-protected */
if (userfaultfd_wp(vma))
uffd_wp_range(mm, vma, start, vma_end - start, false);
uffd_wp_range(vma, start, vma_end - start, false);
new_flags = vma->vm_flags & ~__VM_UFFD_FLAGS;
prev = vma_merge(&vmi, mm, prev, start, vma_end, new_flags,

View File

@ -158,7 +158,7 @@ unsigned long hugetlb_total_pages(void);
vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, unsigned int flags);
#ifdef CONFIG_USERFAULTFD
int hugetlb_mfill_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte,
int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
struct vm_area_struct *dst_vma,
unsigned long dst_addr,
unsigned long src_addr,
@ -393,8 +393,7 @@ static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
}
#ifdef CONFIG_USERFAULTFD
static inline int hugetlb_mfill_atomic_pte(struct mm_struct *dst_mm,
pte_t *dst_pte,
static inline int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
struct vm_area_struct *dst_vma,
unsigned long dst_addr,
unsigned long src_addr,

View File

@ -152,14 +152,14 @@ extern void shmem_uncharge(struct inode *inode, long pages);
#ifdef CONFIG_USERFAULTFD
#ifdef CONFIG_SHMEM
extern int shmem_mfill_atomic_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd,
extern int shmem_mfill_atomic_pte(pmd_t *dst_pmd,
struct vm_area_struct *dst_vma,
unsigned long dst_addr,
unsigned long src_addr,
bool zeropage, bool wp_copy,
struct page **pagep);
#else /* !CONFIG_SHMEM */
#define shmem_mfill_atomic_pte(dst_mm, dst_pmd, dst_vma, dst_addr, \
#define shmem_mfill_atomic_pte(dst_pmd, dst_vma, dst_addr, \
src_addr, zeropage, wp_copy, pagep) ({ BUG(); 0; })
#endif /* CONFIG_SHMEM */
#endif /* CONFIG_USERFAULTFD */

View File

@ -56,7 +56,7 @@ enum mcopy_atomic_mode {
MCOPY_ATOMIC_CONTINUE,
};
extern int mfill_atomic_install_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd,
extern int mfill_atomic_install_pte(pmd_t *dst_pmd,
struct vm_area_struct *dst_vma,
unsigned long dst_addr, struct page *page,
bool newly_allocated, bool wp_copy);
@ -73,7 +73,7 @@ extern ssize_t mfill_atomic_continue(struct mm_struct *dst_mm, unsigned long dst
extern int mwriteprotect_range(struct mm_struct *dst_mm,
unsigned long start, unsigned long len,
bool enable_wp, atomic_t *mmap_changing);
extern long uffd_wp_range(struct mm_struct *dst_mm, struct vm_area_struct *vma,
extern long uffd_wp_range(struct vm_area_struct *vma,
unsigned long start, unsigned long len, bool enable_wp);
/* mm helpers */

View File

@ -6159,8 +6159,7 @@ out_mutex:
* Used by userfaultfd UFFDIO_* ioctls. Based on userfaultfd's mfill_atomic_pte
* with modifications for hugetlb pages.
*/
int hugetlb_mfill_atomic_pte(struct mm_struct *dst_mm,
pte_t *dst_pte,
int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
struct vm_area_struct *dst_vma,
unsigned long dst_addr,
unsigned long src_addr,
@ -6168,6 +6167,7 @@ int hugetlb_mfill_atomic_pte(struct mm_struct *dst_mm,
struct page **pagep,
bool wp_copy)
{
struct mm_struct *dst_mm = dst_vma->vm_mm;
bool is_continue = (mode == MCOPY_ATOMIC_CONTINUE);
struct hstate *h = hstate_vma(dst_vma);
struct address_space *mapping = dst_vma->vm_file->f_mapping;

View File

@ -2429,8 +2429,7 @@ static struct inode *shmem_get_inode(struct mnt_idmap *idmap, struct super_block
}
#ifdef CONFIG_USERFAULTFD
int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
pmd_t *dst_pmd,
int shmem_mfill_atomic_pte(pmd_t *dst_pmd,
struct vm_area_struct *dst_vma,
unsigned long dst_addr,
unsigned long src_addr,
@ -2520,11 +2519,11 @@ int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
goto out_release;
ret = shmem_add_to_page_cache(folio, mapping, pgoff, NULL,
gfp & GFP_RECLAIM_MASK, dst_mm);
gfp & GFP_RECLAIM_MASK, dst_vma->vm_mm);
if (ret)
goto out_release;
ret = mfill_atomic_install_pte(dst_mm, dst_pmd, dst_vma, dst_addr,
ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr,
&folio->page, true, wp_copy);
if (ret)
goto out_delete_from_cache;

View File

@ -55,12 +55,13 @@ struct vm_area_struct *find_dst_vma(struct mm_struct *dst_mm,
* This function handles both MCOPY_ATOMIC_NORMAL and _CONTINUE for both shmem
* and anon, and for both shared and private VMAs.
*/
int mfill_atomic_install_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd,
int mfill_atomic_install_pte(pmd_t *dst_pmd,
struct vm_area_struct *dst_vma,
unsigned long dst_addr, struct page *page,
bool newly_allocated, bool wp_copy)
{
int ret;
struct mm_struct *dst_mm = dst_vma->vm_mm;
pte_t _dst_pte, *dst_pte;
bool writable = dst_vma->vm_flags & VM_WRITE;
bool vm_shared = dst_vma->vm_flags & VM_SHARED;
@ -127,8 +128,7 @@ out_unlock:
return ret;
}
static int mfill_atomic_pte_copy(struct mm_struct *dst_mm,
pmd_t *dst_pmd,
static int mfill_atomic_pte_copy(pmd_t *dst_pmd,
struct vm_area_struct *dst_vma,
unsigned long dst_addr,
unsigned long src_addr,
@ -190,10 +190,10 @@ static int mfill_atomic_pte_copy(struct mm_struct *dst_mm,
__SetPageUptodate(page);
ret = -ENOMEM;
if (mem_cgroup_charge(page_folio(page), dst_mm, GFP_KERNEL))
if (mem_cgroup_charge(page_folio(page), dst_vma->vm_mm, GFP_KERNEL))
goto out_release;
ret = mfill_atomic_install_pte(dst_mm, dst_pmd, dst_vma, dst_addr,
ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr,
page, true, wp_copy);
if (ret)
goto out_release;
@ -204,8 +204,7 @@ out_release:
goto out;
}
static int mfill_atomic_pte_zeropage(struct mm_struct *dst_mm,
pmd_t *dst_pmd,
static int mfill_atomic_pte_zeropage(pmd_t *dst_pmd,
struct vm_area_struct *dst_vma,
unsigned long dst_addr)
{
@ -217,7 +216,7 @@ static int mfill_atomic_pte_zeropage(struct mm_struct *dst_mm,
_dst_pte = pte_mkspecial(pfn_pte(my_zero_pfn(dst_addr),
dst_vma->vm_page_prot));
dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
dst_pte = pte_offset_map_lock(dst_vma->vm_mm, dst_pmd, dst_addr, &ptl);
if (dst_vma->vm_file) {
/* the shmem MAP_PRIVATE case requires checking the i_size */
inode = dst_vma->vm_file->f_inode;
@ -230,7 +229,7 @@ static int mfill_atomic_pte_zeropage(struct mm_struct *dst_mm,
ret = -EEXIST;
if (!pte_none(*dst_pte))
goto out_unlock;
set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
set_pte_at(dst_vma->vm_mm, dst_addr, dst_pte, _dst_pte);
/* No need to invalidate - it was non-present before */
update_mmu_cache(dst_vma, dst_addr, dst_pte);
ret = 0;
@ -240,8 +239,7 @@ out_unlock:
}
/* Handles UFFDIO_CONTINUE for all shmem VMAs (shared or private). */
static int mfill_atomic_pte_continue(struct mm_struct *dst_mm,
pmd_t *dst_pmd,
static int mfill_atomic_pte_continue(pmd_t *dst_pmd,
struct vm_area_struct *dst_vma,
unsigned long dst_addr,
bool wp_copy)
@ -269,7 +267,7 @@ static int mfill_atomic_pte_continue(struct mm_struct *dst_mm,
goto out_release;
}
ret = mfill_atomic_install_pte(dst_mm, dst_pmd, dst_vma, dst_addr,
ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr,
page, false, wp_copy);
if (ret)
goto out_release;
@ -310,7 +308,7 @@ static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address)
* mfill_atomic processing for HUGETLB vmas. Note that this routine is
* called with mmap_lock held, it will release mmap_lock before returning.
*/
static __always_inline ssize_t mfill_atomic_hugetlb(struct mm_struct *dst_mm,
static __always_inline ssize_t mfill_atomic_hugetlb(
struct vm_area_struct *dst_vma,
unsigned long dst_start,
unsigned long src_start,
@ -318,6 +316,7 @@ static __always_inline ssize_t mfill_atomic_hugetlb(struct mm_struct *dst_mm,
enum mcopy_atomic_mode mode,
bool wp_copy)
{
struct mm_struct *dst_mm = dst_vma->vm_mm;
int vm_shared = dst_vma->vm_flags & VM_SHARED;
ssize_t err;
pte_t *dst_pte;
@ -411,7 +410,7 @@ retry:
goto out_unlock;
}
err = hugetlb_mfill_atomic_pte(dst_mm, dst_pte, dst_vma,
err = hugetlb_mfill_atomic_pte(dst_pte, dst_vma,
dst_addr, src_addr, mode, &page,
wp_copy);
@ -463,17 +462,15 @@ out:
}
#else /* !CONFIG_HUGETLB_PAGE */
/* fail at build time if gcc attempts to use this */
extern ssize_t mfill_atomic_hugetlb(struct mm_struct *dst_mm,
struct vm_area_struct *dst_vma,
unsigned long dst_start,
unsigned long src_start,
unsigned long len,
enum mcopy_atomic_mode mode,
bool wp_copy);
extern ssize_t mfill_atomic_hugetlb(struct vm_area_struct *dst_vma,
unsigned long dst_start,
unsigned long src_start,
unsigned long len,
enum mcopy_atomic_mode mode,
bool wp_copy);
#endif /* CONFIG_HUGETLB_PAGE */
static __always_inline ssize_t mfill_atomic_pte(struct mm_struct *dst_mm,
pmd_t *dst_pmd,
static __always_inline ssize_t mfill_atomic_pte(pmd_t *dst_pmd,
struct vm_area_struct *dst_vma,
unsigned long dst_addr,
unsigned long src_addr,
@ -484,7 +481,7 @@ static __always_inline ssize_t mfill_atomic_pte(struct mm_struct *dst_mm,
ssize_t err;
if (mode == MCOPY_ATOMIC_CONTINUE) {
return mfill_atomic_pte_continue(dst_mm, dst_pmd, dst_vma,
return mfill_atomic_pte_continue(dst_pmd, dst_vma,
dst_addr, wp_copy);
}
@ -500,14 +497,14 @@ static __always_inline ssize_t mfill_atomic_pte(struct mm_struct *dst_mm,
*/
if (!(dst_vma->vm_flags & VM_SHARED)) {
if (mode == MCOPY_ATOMIC_NORMAL)
err = mfill_atomic_pte_copy(dst_mm, dst_pmd, dst_vma,
err = mfill_atomic_pte_copy(dst_pmd, dst_vma,
dst_addr, src_addr, page,
wp_copy);
else
err = mfill_atomic_pte_zeropage(dst_mm, dst_pmd,
err = mfill_atomic_pte_zeropage(dst_pmd,
dst_vma, dst_addr);
} else {
err = shmem_mfill_atomic_pte(dst_mm, dst_pmd, dst_vma,
err = shmem_mfill_atomic_pte(dst_pmd, dst_vma,
dst_addr, src_addr,
mode != MCOPY_ATOMIC_NORMAL,
wp_copy, page);
@ -588,7 +585,7 @@ retry:
* If this is a HUGETLB vma, pass off to appropriate routine
*/
if (is_vm_hugetlb_page(dst_vma))
return mfill_atomic_hugetlb(dst_mm, dst_vma, dst_start,
return mfill_atomic_hugetlb(dst_vma, dst_start,
src_start, len, mcopy_mode,
wp_copy);
@ -641,7 +638,7 @@ retry:
BUG_ON(pmd_none(*dst_pmd));
BUG_ON(pmd_trans_huge(*dst_pmd));
err = mfill_atomic_pte(dst_mm, dst_pmd, dst_vma, dst_addr,
err = mfill_atomic_pte(dst_pmd, dst_vma, dst_addr,
src_addr, &page, mcopy_mode, wp_copy);
cond_resched();
@ -710,7 +707,7 @@ ssize_t mfill_atomic_continue(struct mm_struct *dst_mm, unsigned long start,
mmap_changing, 0);
}
long uffd_wp_range(struct mm_struct *dst_mm, struct vm_area_struct *dst_vma,
long uffd_wp_range(struct vm_area_struct *dst_vma,
unsigned long start, unsigned long len, bool enable_wp)
{
unsigned int mm_cp_flags;
@ -732,7 +729,7 @@ long uffd_wp_range(struct mm_struct *dst_mm, struct vm_area_struct *dst_vma,
*/
if (!enable_wp && vma_wants_manual_pte_write_upgrade(dst_vma))
mm_cp_flags |= MM_CP_TRY_CHANGE_WRITABLE;
tlb_gather_mmu(&tlb, dst_mm);
tlb_gather_mmu(&tlb, dst_vma->vm_mm);
ret = change_protection(&tlb, dst_vma, start, start + len, mm_cp_flags);
tlb_finish_mmu(&tlb);
@ -788,7 +785,7 @@ int mwriteprotect_range(struct mm_struct *dst_mm, unsigned long start,
_start = max(dst_vma->vm_start, start);
_end = min(dst_vma->vm_end, end);
err = uffd_wp_range(dst_mm, dst_vma, _start, _end - _start, enable_wp);
err = uffd_wp_range(dst_vma, _start, _end - _start, enable_wp);
/* Return 0 on success, <0 on failures */
if (err < 0)