Merge branch 'akpm' (patches from Andrew)
Merge misc fixes from Andrew Morton: "8 fixes" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: lib/test_kmod.c: fix limit check on number of test devices created selftests/vm/run_vmtests: adjust hugetlb size according to nr_cpus mm/page_alloc: fix memmap_init_zone pageblock alignment mm/memblock.c: hardcode the end_pfn being -1 mm/gup.c: teach get_user_pages_unlocked to handle FOLL_NOWAIT lib/bug.c: exclude non-BUG/WARN exceptions from report_bug() bug: use %pB in BUG and stack protector failure hugetlb: fix surplus pages accounting
This commit is contained in:
commit
cfc79ae844
|
@ -640,7 +640,7 @@ device_initcall(register_warn_debugfs);
|
|||
*/
|
||||
__visible void __stack_chk_fail(void)
|
||||
{
|
||||
panic("stack-protector: Kernel stack is corrupted in: %p\n",
|
||||
panic("stack-protector: Kernel stack is corrupted in: %pB\n",
|
||||
__builtin_return_address(0));
|
||||
}
|
||||
EXPORT_SYMBOL(__stack_chk_fail);
|
||||
|
|
|
@ -150,6 +150,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
|
|||
return BUG_TRAP_TYPE_NONE;
|
||||
|
||||
bug = find_bug(bugaddr);
|
||||
if (!bug)
|
||||
return BUG_TRAP_TYPE_NONE;
|
||||
|
||||
file = NULL;
|
||||
line = 0;
|
||||
|
@ -191,7 +193,7 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
|
|||
if (file)
|
||||
pr_crit("kernel BUG at %s:%u!\n", file, line);
|
||||
else
|
||||
pr_crit("Kernel BUG at %p [verbose debug info unavailable]\n",
|
||||
pr_crit("Kernel BUG at %pB [verbose debug info unavailable]\n",
|
||||
(void *)bugaddr);
|
||||
|
||||
return BUG_TRAP_TYPE_BUG;
|
||||
|
|
|
@ -1141,7 +1141,7 @@ static struct kmod_test_device *register_test_dev_kmod(void)
|
|||
mutex_lock(®_dev_mutex);
|
||||
|
||||
/* int should suffice for number of devices, test for wrap */
|
||||
if (unlikely(num_test_devs + 1) < 0) {
|
||||
if (num_test_devs + 1 == INT_MAX) {
|
||||
pr_err("reached limit of number of test devices\n");
|
||||
goto out;
|
||||
}
|
||||
|
|
7
mm/gup.c
7
mm/gup.c
|
@ -516,7 +516,7 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
|
|||
}
|
||||
|
||||
if (ret & VM_FAULT_RETRY) {
|
||||
if (nonblocking)
|
||||
if (nonblocking && !(fault_flags & FAULT_FLAG_RETRY_NOWAIT))
|
||||
*nonblocking = 0;
|
||||
return -EBUSY;
|
||||
}
|
||||
|
@ -890,7 +890,10 @@ static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
|
|||
break;
|
||||
}
|
||||
if (*locked) {
|
||||
/* VM_FAULT_RETRY didn't trigger */
|
||||
/*
|
||||
* VM_FAULT_RETRY didn't trigger or it was a
|
||||
* FOLL_NOWAIT.
|
||||
*/
|
||||
if (!pages_done)
|
||||
pages_done = ret;
|
||||
break;
|
||||
|
|
|
@ -1583,7 +1583,7 @@ static struct page *alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask,
|
|||
page = NULL;
|
||||
} else {
|
||||
h->surplus_huge_pages++;
|
||||
h->nr_huge_pages_node[page_to_nid(page)]++;
|
||||
h->surplus_huge_pages_node[page_to_nid(page)]++;
|
||||
}
|
||||
|
||||
out_unlock:
|
||||
|
|
|
@ -1107,7 +1107,7 @@ unsigned long __init_memblock memblock_next_valid_pfn(unsigned long pfn,
|
|||
struct memblock_type *type = &memblock.memory;
|
||||
unsigned int right = type->cnt;
|
||||
unsigned int mid, left = 0;
|
||||
phys_addr_t addr = PFN_PHYS(pfn + 1);
|
||||
phys_addr_t addr = PFN_PHYS(++pfn);
|
||||
|
||||
do {
|
||||
mid = (right + left) / 2;
|
||||
|
@ -1118,15 +1118,15 @@ unsigned long __init_memblock memblock_next_valid_pfn(unsigned long pfn,
|
|||
type->regions[mid].size))
|
||||
left = mid + 1;
|
||||
else {
|
||||
/* addr is within the region, so pfn + 1 is valid */
|
||||
return min(pfn + 1, max_pfn);
|
||||
/* addr is within the region, so pfn is valid */
|
||||
return pfn;
|
||||
}
|
||||
} while (left < right);
|
||||
|
||||
if (right == type->cnt)
|
||||
return max_pfn;
|
||||
return -1UL;
|
||||
else
|
||||
return min(PHYS_PFN(type->regions[right].base), max_pfn);
|
||||
return PHYS_PFN(type->regions[right].base);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -5359,9 +5359,14 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
|
|||
/*
|
||||
* Skip to the pfn preceding the next valid one (or
|
||||
* end_pfn), such that we hit a valid pfn (or end_pfn)
|
||||
* on our next iteration of the loop.
|
||||
* on our next iteration of the loop. Note that it needs
|
||||
* to be pageblock aligned even when the region itself
|
||||
* is not. move_freepages_block() can shift ahead of
|
||||
* the valid region but still depends on correct page
|
||||
* metadata.
|
||||
*/
|
||||
pfn = memblock_next_valid_pfn(pfn, end_pfn) - 1;
|
||||
pfn = (memblock_next_valid_pfn(pfn, end_pfn) &
|
||||
~(pageblock_nr_pages-1)) - 1;
|
||||
#endif
|
||||
continue;
|
||||
}
|
||||
|
|
|
@ -2,25 +2,33 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
#please run as root
|
||||
|
||||
#we need 256M, below is the size in kB
|
||||
needmem=262144
|
||||
mnt=./huge
|
||||
exitcode=0
|
||||
|
||||
#get pagesize and freepages from /proc/meminfo
|
||||
#get huge pagesize and freepages from /proc/meminfo
|
||||
while read name size unit; do
|
||||
if [ "$name" = "HugePages_Free:" ]; then
|
||||
freepgs=$size
|
||||
fi
|
||||
if [ "$name" = "Hugepagesize:" ]; then
|
||||
pgsize=$size
|
||||
hpgsize_KB=$size
|
||||
fi
|
||||
done < /proc/meminfo
|
||||
|
||||
# Simple hugetlbfs tests have a hardcoded minimum requirement of
|
||||
# huge pages totaling 256MB (262144KB) in size. The userfaultfd
|
||||
# hugetlb test requires a minimum of 2 * nr_cpus huge pages. Take
|
||||
# both of these requirements into account and attempt to increase
|
||||
# number of huge pages available.
|
||||
nr_cpus=$(nproc)
|
||||
hpgsize_MB=$((hpgsize_KB / 1024))
|
||||
half_ufd_size_MB=$((((nr_cpus * hpgsize_MB + 127) / 128) * 128))
|
||||
needmem_KB=$((half_ufd_size_MB * 2 * 1024))
|
||||
|
||||
#set proper nr_hugepages
|
||||
if [ -n "$freepgs" ] && [ -n "$pgsize" ]; then
|
||||
if [ -n "$freepgs" ] && [ -n "$hpgsize_KB" ]; then
|
||||
nr_hugepgs=`cat /proc/sys/vm/nr_hugepages`
|
||||
needpgs=`expr $needmem / $pgsize`
|
||||
needpgs=$((needmem_KB / hpgsize_KB))
|
||||
tries=2
|
||||
while [ $tries -gt 0 ] && [ $freepgs -lt $needpgs ]; do
|
||||
lackpgs=$(( $needpgs - $freepgs ))
|
||||
|
@ -107,8 +115,9 @@ fi
|
|||
echo "---------------------------"
|
||||
echo "running userfaultfd_hugetlb"
|
||||
echo "---------------------------"
|
||||
# 256MB total huge pages == 128MB src and 128MB dst
|
||||
./userfaultfd hugetlb 128 32 $mnt/ufd_test_file
|
||||
# Test requires source and destination huge pages. Size of source
|
||||
# (half_ufd_size_MB) is passed as argument to test.
|
||||
./userfaultfd hugetlb $half_ufd_size_MB 32 $mnt/ufd_test_file
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "[FAIL]"
|
||||
exitcode=1
|
||||
|
|
Loading…
Reference in New Issue