OpenCloudOS-Kernel/arch/mips/mm/mmap.c

130 lines
3.4 KiB
C
Raw Normal View History

/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2011 Wind River Systems,
* written by Ralf Baechle <ralf@linux-mips.org>
*/
#include <linux/compiler.h>
#include <linux/elf-randomize.h>
#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/export.h>
#include <linux/personality.h>
#include <linux/random.h>
#include <linux/sched/signal.h>
#include <linux/sched/mm.h>
unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */
EXPORT_SYMBOL(shm_align_mask);
#define COLOUR_ALIGN(addr, pgoff) \
((((addr) + shm_align_mask) & ~shm_align_mask) + \
(((pgoff) << PAGE_SHIFT) & shm_align_mask))
enum mmap_allocation_direction {UP, DOWN};
static unsigned long arch_get_unmapped_area_common(struct file *filp,
unsigned long addr0, unsigned long len, unsigned long pgoff,
unsigned long flags, enum mmap_allocation_direction dir)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
unsigned long addr = addr0;
int do_color_align;
struct vm_unmapped_area_info info;
if (unlikely(len > TASK_SIZE))
return -ENOMEM;
if (flags & MAP_FIXED) {
/* Even MAP_FIXED mappings must reside within TASK_SIZE */
if (TASK_SIZE - len < addr)
return -EINVAL;
/*
* We do not accept a shared mapping if it would violate
* cache aliasing constraints.
*/
if ((flags & MAP_SHARED) &&
((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
return -EINVAL;
return addr;
}
do_color_align = 0;
if (filp || (flags & MAP_SHARED))
do_color_align = 1;
/* requesting a specific address */
if (addr) {
if (do_color_align)
addr = COLOUR_ALIGN(addr, pgoff);
else
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
mm: larger stack guard gap, between vmas Stack guard page is a useful feature to reduce a risk of stack smashing into a different mapping. We have been using a single page gap which is sufficient to prevent having stack adjacent to a different mapping. But this seems to be insufficient in the light of the stack usage in userspace. E.g. glibc uses as large as 64kB alloca() in many commonly used functions. Others use constructs liks gid_t buffer[NGROUPS_MAX] which is 256kB or stack strings with MAX_ARG_STRLEN. This will become especially dangerous for suid binaries and the default no limit for the stack size limit because those applications can be tricked to consume a large portion of the stack and a single glibc call could jump over the guard page. These attacks are not theoretical, unfortunatelly. Make those attacks less probable by increasing the stack guard gap to 1MB (on systems with 4k pages; but make it depend on the page size because systems with larger base pages might cap stack allocations in the PAGE_SIZE units) which should cover larger alloca() and VLA stack allocations. It is obviously not a full fix because the problem is somehow inherent, but it should reduce attack space a lot. One could argue that the gap size should be configurable from userspace, but that can be done later when somebody finds that the new 1MB is wrong for some special case applications. For now, add a kernel command line option (stack_guard_gap) to specify the stack gap size (in page units). Implementation wise, first delete all the old code for stack guard page: because although we could get away with accounting one extra page in a stack vma, accounting a larger gap can break userspace - case in point, a program run with "ulimit -S -v 20000" failed when the 1MB gap was counted for RLIMIT_AS; similar problems could come with RLIMIT_MLOCK and strict non-overcommit mode. Instead of keeping gap inside the stack vma, maintain the stack guard gap as a gap between vmas: using vm_start_gap() in place of vm_start (or vm_end_gap() in place of vm_end if VM_GROWSUP) in just those few places which need to respect the gap - mainly arch_get_unmapped_area(), and and the vma tree's subtree_gap support for that. Original-patch-by: Oleg Nesterov <oleg@redhat.com> Original-patch-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Hugh Dickins <hughd@google.com> Acked-by: Michal Hocko <mhocko@suse.com> Tested-by: Helge Deller <deller@gmx.de> # parisc Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2017-06-19 19:03:24 +08:00
(!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
info.length = len;
info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0;
info.align_offset = pgoff << PAGE_SHIFT;
if (dir == DOWN) {
info.flags = VM_UNMAPPED_AREA_TOPDOWN;
info.low_limit = PAGE_SIZE;
info.high_limit = mm->mmap_base;
addr = vm_unmapped_area(&info);
if (!(addr & ~PAGE_MASK))
return addr;
/*
* A failed mmap() very likely causes application failure,
* so fall back to the bottom-up function here. This scenario
* can happen with large stack limits and large mmap()
* allocations.
*/
}
info.flags = 0;
info.low_limit = mm->mmap_base;
info.high_limit = TASK_SIZE;
return vm_unmapped_area(&info);
}
unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr0,
unsigned long len, unsigned long pgoff, unsigned long flags)
{
return arch_get_unmapped_area_common(filp,
addr0, len, pgoff, flags, UP);
}
/*
* There is no need to export this but sched.h declares the function as
* extern so making it static here results in an error.
*/
unsigned long arch_get_unmapped_area_topdown(struct file *filp,
unsigned long addr0, unsigned long len, unsigned long pgoff,
unsigned long flags)
{
return arch_get_unmapped_area_common(filp,
addr0, len, pgoff, flags, DOWN);
}
bool __virt_addr_valid(const volatile void *kaddr)
{
unsigned long vaddr = (unsigned long)kaddr;
MIPS: Bounds check virt_addr_valid The virt_addr_valid() function is meant to return true iff virt_to_page() will return a valid struct page reference. This is true iff the address provided is found within the unmapped address range between PAGE_OFFSET & MAP_BASE, but we don't currently check for that condition. Instead we simply mask the address to obtain what will be a physical address if the virtual address is indeed in the desired range, shift it to form a PFN & then call pfn_valid(). This can incorrectly return true if called with a virtual address which, after masking, happens to form a physical address corresponding to a valid PFN. For example we may vmalloc an address in the kernel mapped region starting a MAP_BASE & obtain the virtual address: addr = 0xc000000000002000 When masked by virt_to_phys(), which uses __pa() & in turn CPHYSADDR(), we obtain the following (bogus) physical address: addr = 0x2000 In a common system with PHYS_OFFSET=0 this will correspond to a valid struct page which should really be accessed by virtual address PAGE_OFFSET+0x2000, causing virt_addr_valid() to incorrectly return 1 indicating that the original address corresponds to a struct page. This is equivalent to the ARM64 change made in commit ca219452c6b8 ("arm64: Correctly bounds check virt_addr_valid"). This fixes fallout when hardened usercopy is enabled caused by the related commit 517e1fbeb65f ("mm/usercopy: Drop extra is_vmalloc_or_module() check") which removed a check for the vmalloc range that was present from the introduction of the hardened usercopy feature. Signed-off-by: Paul Burton <paul.burton@mips.com> References: ca219452c6b8 ("arm64: Correctly bounds check virt_addr_valid") References: 517e1fbeb65f ("mm/usercopy: Drop extra is_vmalloc_or_module() check") Reported-by: Julien Cristau <jcristau@debian.org> Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> Tested-by: YunQiang Su <ysu@wavecomp.com> URL: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=929366 Cc: stable@vger.kernel.org # v4.12+ Cc: linux-mips@vger.kernel.org Cc: Yunqiang Su <ysu@wavecomp.com>
2019-05-29 01:05:03 +08:00
if ((vaddr < PAGE_OFFSET) || (vaddr >= MAP_BASE))
return false;
MIPS: Bounds check virt_addr_valid The virt_addr_valid() function is meant to return true iff virt_to_page() will return a valid struct page reference. This is true iff the address provided is found within the unmapped address range between PAGE_OFFSET & MAP_BASE, but we don't currently check for that condition. Instead we simply mask the address to obtain what will be a physical address if the virtual address is indeed in the desired range, shift it to form a PFN & then call pfn_valid(). This can incorrectly return true if called with a virtual address which, after masking, happens to form a physical address corresponding to a valid PFN. For example we may vmalloc an address in the kernel mapped region starting a MAP_BASE & obtain the virtual address: addr = 0xc000000000002000 When masked by virt_to_phys(), which uses __pa() & in turn CPHYSADDR(), we obtain the following (bogus) physical address: addr = 0x2000 In a common system with PHYS_OFFSET=0 this will correspond to a valid struct page which should really be accessed by virtual address PAGE_OFFSET+0x2000, causing virt_addr_valid() to incorrectly return 1 indicating that the original address corresponds to a struct page. This is equivalent to the ARM64 change made in commit ca219452c6b8 ("arm64: Correctly bounds check virt_addr_valid"). This fixes fallout when hardened usercopy is enabled caused by the related commit 517e1fbeb65f ("mm/usercopy: Drop extra is_vmalloc_or_module() check") which removed a check for the vmalloc range that was present from the introduction of the hardened usercopy feature. Signed-off-by: Paul Burton <paul.burton@mips.com> References: ca219452c6b8 ("arm64: Correctly bounds check virt_addr_valid") References: 517e1fbeb65f ("mm/usercopy: Drop extra is_vmalloc_or_module() check") Reported-by: Julien Cristau <jcristau@debian.org> Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> Tested-by: YunQiang Su <ysu@wavecomp.com> URL: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=929366 Cc: stable@vger.kernel.org # v4.12+ Cc: linux-mips@vger.kernel.org Cc: Yunqiang Su <ysu@wavecomp.com>
2019-05-29 01:05:03 +08:00
return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
}
EXPORT_SYMBOL_GPL(__virt_addr_valid);