mm/vma: introduce VM_ACCESS_FLAGS

There are many places where all basic VMA access flags (read, write,
exec) are initialized or checked against as a group.  One such example
is during page fault.  Existing vma_is_accessible() wrapper already
creates the notion of VMA accessibility as a group access permissions.

Hence lets just create VM_ACCESS_FLAGS (VM_READ|VM_WRITE|VM_EXEC) which
will not only reduce code duplication but also extend the VMA
accessibility concept in general.

Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Mark Salter <msalter@redhat.com>
Cc: Nick Hu <nickhu@andestech.com>
Cc: Ley Foon Tan <ley.foon.tan@intel.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
Cc: Guan Xuetao <gxt@pku.edu.cn>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Rob Springer <rspringer@google.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Link: http://lkml.kernel.org/r/1583391014-8170-3-git-send-email-anshuman.khandual@arm.com
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Anshuman Khandual 2020-04-10 14:33:09 -07:00 committed by Linus Torvalds
parent c62da0c35d
commit 6cb4d9a287
11 changed files with 16 additions and 12 deletions

View File

@ -189,7 +189,7 @@ void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
*/ */
static inline bool access_error(unsigned int fsr, struct vm_area_struct *vma) static inline bool access_error(unsigned int fsr, struct vm_area_struct *vma)
{ {
unsigned int mask = VM_READ | VM_WRITE | VM_EXEC; unsigned int mask = VM_ACCESS_FLAGS;
if ((fsr & FSR_WRITE) && !(fsr & FSR_CM)) if ((fsr & FSR_WRITE) && !(fsr & FSR_CM))
mask = VM_WRITE; mask = VM_WRITE;

View File

@ -445,7 +445,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
const struct fault_info *inf; const struct fault_info *inf;
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
vm_fault_t fault, major = 0; vm_fault_t fault, major = 0;
unsigned long vm_flags = VM_READ | VM_WRITE | VM_EXEC; unsigned long vm_flags = VM_ACCESS_FLAGS;
unsigned int mm_flags = FAULT_FLAG_DEFAULT; unsigned int mm_flags = FAULT_FLAG_DEFAULT;
if (kprobe_page_fault(regs, esr)) if (kprobe_page_fault(regs, esr))

View File

@ -79,7 +79,7 @@ void do_page_fault(unsigned long entry, unsigned long addr,
struct vm_area_struct *vma; struct vm_area_struct *vma;
int si_code; int si_code;
vm_fault_t fault; vm_fault_t fault;
unsigned int mask = VM_READ | VM_WRITE | VM_EXEC; unsigned int mask = VM_ACCESS_FLAGS;
unsigned int flags = FAULT_FLAG_DEFAULT; unsigned int flags = FAULT_FLAG_DEFAULT;
error_code = error_code & (ITYPE_mskINST | ITYPE_mskETYPE); error_code = error_code & (ITYPE_mskINST | ITYPE_mskETYPE);

View File

@ -315,7 +315,7 @@ int __execute_only_pkey(struct mm_struct *mm)
static inline bool vma_is_pkey_exec_only(struct vm_area_struct *vma) static inline bool vma_is_pkey_exec_only(struct vm_area_struct *vma)
{ {
/* Do this check first since the vm_flags should be hot */ /* Do this check first since the vm_flags should be hot */
if ((vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) != VM_EXEC) if ((vma->vm_flags & VM_ACCESS_FLAGS) != VM_EXEC)
return false; return false;
return (vma_pkey(vma) == vma->vm_mm->context.execute_only_pkey); return (vma_pkey(vma) == vma->vm_mm->context.execute_only_pkey);

View File

@ -580,7 +580,7 @@ void do_dat_exception(struct pt_regs *regs)
int access; int access;
vm_fault_t fault; vm_fault_t fault;
access = VM_READ | VM_EXEC | VM_WRITE; access = VM_ACCESS_FLAGS;
fault = do_exception(regs, access); fault = do_exception(regs, access);
if (unlikely(fault)) if (unlikely(fault))
do_fault_error(regs, access, fault); do_fault_error(regs, access, fault);

View File

@ -149,7 +149,7 @@ void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
*/ */
static inline bool access_error(unsigned int fsr, struct vm_area_struct *vma) static inline bool access_error(unsigned int fsr, struct vm_area_struct *vma)
{ {
unsigned int mask = VM_READ | VM_WRITE | VM_EXEC; unsigned int mask = VM_ACCESS_FLAGS;
if (!(fsr ^ 0x12)) /* write? */ if (!(fsr ^ 0x12)) /* write? */
mask = VM_WRITE; mask = VM_WRITE;

View File

@ -63,7 +63,7 @@ int __execute_only_pkey(struct mm_struct *mm)
static inline bool vma_is_pkey_exec_only(struct vm_area_struct *vma) static inline bool vma_is_pkey_exec_only(struct vm_area_struct *vma)
{ {
/* Do this check first since the vm_flags should be hot */ /* Do this check first since the vm_flags should be hot */
if ((vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) != VM_EXEC) if ((vma->vm_flags & VM_ACCESS_FLAGS) != VM_EXEC)
return false; return false;
if (vma_pkey(vma) != vma->vm_mm->context.execute_only_pkey) if (vma_pkey(vma) != vma->vm_mm->context.execute_only_pkey)
return false; return false;

View File

@ -689,7 +689,7 @@ static bool gasket_mmap_has_permissions(struct gasket_dev *gasket_dev,
/* Make sure that no wrong flags are set. */ /* Make sure that no wrong flags are set. */
requested_permissions = requested_permissions =
(vma->vm_flags & (VM_WRITE | VM_READ | VM_EXEC)); (vma->vm_flags & VM_ACCESS_FLAGS);
if (requested_permissions & ~(bar_permissions)) { if (requested_permissions & ~(bar_permissions)) {
dev_dbg(gasket_dev->dev, dev_dbg(gasket_dev->dev,
"Attempting to map a region with requested permissions 0x%x, but region has permissions 0x%x.\n", "Attempting to map a region with requested permissions 0x%x, but region has permissions 0x%x.\n",

View File

@ -369,6 +369,10 @@ extern unsigned int kobjsize(const void *objp);
#define VM_STACK_FLAGS (VM_STACK | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT) #define VM_STACK_FLAGS (VM_STACK | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
/* VMA basic access permission flags */
#define VM_ACCESS_FLAGS (VM_READ | VM_WRITE | VM_EXEC)
/* /*
* Special vmas that are non-mergable, non-mlock()able. * Special vmas that are non-mergable, non-mlock()able.
*/ */
@ -646,7 +650,7 @@ static inline bool vma_is_foreign(struct vm_area_struct *vma)
static inline bool vma_is_accessible(struct vm_area_struct *vma) static inline bool vma_is_accessible(struct vm_area_struct *vma)
{ {
return vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC); return vma->vm_flags & VM_ACCESS_FLAGS;
} }
#ifdef CONFIG_SHMEM #ifdef CONFIG_SHMEM

View File

@ -1224,7 +1224,7 @@ static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *
return a->vm_end == b->vm_start && return a->vm_end == b->vm_start &&
mpol_equal(vma_policy(a), vma_policy(b)) && mpol_equal(vma_policy(a), vma_policy(b)) &&
a->vm_file == b->vm_file && a->vm_file == b->vm_file &&
!((a->vm_flags ^ b->vm_flags) & ~(VM_READ|VM_WRITE|VM_EXEC|VM_SOFTDIRTY)) && !((a->vm_flags ^ b->vm_flags) & ~(VM_ACCESS_FLAGS | VM_SOFTDIRTY)) &&
b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT); b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT);
} }

View File

@ -419,7 +419,7 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
*/ */
if (arch_has_pfn_modify_check() && if (arch_has_pfn_modify_check() &&
(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) && (vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
(newflags & (VM_READ|VM_WRITE|VM_EXEC)) == 0) { (newflags & VM_ACCESS_FLAGS) == 0) {
pgprot_t new_pgprot = vm_get_page_prot(newflags); pgprot_t new_pgprot = vm_get_page_prot(newflags);
error = walk_page_range(current->mm, start, end, error = walk_page_range(current->mm, start, end,
@ -598,7 +598,7 @@ static int do_mprotect_pkey(unsigned long start, size_t len,
newflags |= (vma->vm_flags & ~mask_off_old_flags); newflags |= (vma->vm_flags & ~mask_off_old_flags);
/* newflags >> 4 shift VM_MAY% in place of VM_% */ /* newflags >> 4 shift VM_MAY% in place of VM_% */
if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) { if ((newflags & ~(newflags >> 4)) & VM_ACCESS_FLAGS) {
error = -EACCES; error = -EACCES;
goto out; goto out;
} }