Merge branch 'akpm' (patches from Andrew)
Merge misc fixes from Andrew Morton: "11 patches. Subsystems affected by this: misc, mm/hugetlb, mm/vmalloc, mm/misc, romfs, relay, uprobes, squashfs, mm/cma, mm/pagealloc" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: mm, page_alloc: fix core hung in free_pcppages_bulk() mm: include CMA pages in lowmem_reserve at boot squashfs: avoid bio_alloc() failure with 1Mbyte blocks uprobes: __replace_page() avoid BUG in munlock_vma_page() kernel/relay.c: fix memleak on destroy relay channel romfs: fix uninitialized memory leak in romfs_dev_read() mm/rodata_test.c: fix missing function declaration mm/vunmap: add cond_resched() in vunmap_pmd_range khugepaged: adjust VM_BUG_ON_MM() in __khugepaged_enter() hugetlb_cgroup: convert comma to semicolon mailmap: add Andi Kleen
This commit is contained in:
commit
349111f050
1
.mailmap
1
.mailmap
|
@ -32,6 +32,7 @@ Alex Shi <alex.shi@linux.alibaba.com> <alex.shi@intel.com>
|
||||||
Alex Shi <alex.shi@linux.alibaba.com> <alex.shi@linaro.org>
|
Alex Shi <alex.shi@linux.alibaba.com> <alex.shi@linaro.org>
|
||||||
Al Viro <viro@ftp.linux.org.uk>
|
Al Viro <viro@ftp.linux.org.uk>
|
||||||
Al Viro <viro@zenIV.linux.org.uk>
|
Al Viro <viro@zenIV.linux.org.uk>
|
||||||
|
Andi Kleen <ak@linux.intel.com> <ak@suse.de>
|
||||||
Andi Shyti <andi@etezian.org> <andi.shyti@samsung.com>
|
Andi Shyti <andi@etezian.org> <andi.shyti@samsung.com>
|
||||||
Andreas Herrmann <aherrman@de.ibm.com>
|
Andreas Herrmann <aherrman@de.ibm.com>
|
||||||
Andrew Morton <akpm@linux-foundation.org>
|
Andrew Morton <akpm@linux-foundation.org>
|
||||||
|
|
|
@ -217,10 +217,8 @@ int romfs_dev_read(struct super_block *sb, unsigned long pos,
|
||||||
size_t limit;
|
size_t limit;
|
||||||
|
|
||||||
limit = romfs_maxsize(sb);
|
limit = romfs_maxsize(sb);
|
||||||
if (pos >= limit)
|
if (pos >= limit || buflen > limit - pos)
|
||||||
return -EIO;
|
return -EIO;
|
||||||
if (buflen > limit - pos)
|
|
||||||
buflen = limit - pos;
|
|
||||||
|
|
||||||
#ifdef CONFIG_ROMFS_ON_MTD
|
#ifdef CONFIG_ROMFS_ON_MTD
|
||||||
if (sb->s_mtd)
|
if (sb->s_mtd)
|
||||||
|
|
|
@ -87,7 +87,11 @@ static int squashfs_bio_read(struct super_block *sb, u64 index, int length,
|
||||||
int error, i;
|
int error, i;
|
||||||
struct bio *bio;
|
struct bio *bio;
|
||||||
|
|
||||||
|
if (page_count <= BIO_MAX_PAGES)
|
||||||
bio = bio_alloc(GFP_NOIO, page_count);
|
bio = bio_alloc(GFP_NOIO, page_count);
|
||||||
|
else
|
||||||
|
bio = bio_kmalloc(GFP_NOIO, page_count);
|
||||||
|
|
||||||
if (!bio)
|
if (!bio)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
|
|
@ -205,7 +205,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
|
||||||
try_to_free_swap(old_page);
|
try_to_free_swap(old_page);
|
||||||
page_vma_mapped_walk_done(&pvmw);
|
page_vma_mapped_walk_done(&pvmw);
|
||||||
|
|
||||||
if (vma->vm_flags & VM_LOCKED)
|
if ((vma->vm_flags & VM_LOCKED) && !PageCompound(old_page))
|
||||||
munlock_vma_page(old_page);
|
munlock_vma_page(old_page);
|
||||||
put_page(old_page);
|
put_page(old_page);
|
||||||
|
|
||||||
|
|
|
@ -197,6 +197,7 @@ free_buf:
|
||||||
static void relay_destroy_channel(struct kref *kref)
|
static void relay_destroy_channel(struct kref *kref)
|
||||||
{
|
{
|
||||||
struct rchan *chan = container_of(kref, struct rchan, kref);
|
struct rchan *chan = container_of(kref, struct rchan, kref);
|
||||||
|
free_percpu(chan->buf);
|
||||||
kfree(chan);
|
kfree(chan);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -655,7 +655,7 @@ static void __init __hugetlb_cgroup_file_dfl_init(int idx)
|
||||||
snprintf(cft->name, MAX_CFTYPE_NAME, "%s.events", buf);
|
snprintf(cft->name, MAX_CFTYPE_NAME, "%s.events", buf);
|
||||||
cft->private = MEMFILE_PRIVATE(idx, 0);
|
cft->private = MEMFILE_PRIVATE(idx, 0);
|
||||||
cft->seq_show = hugetlb_events_show;
|
cft->seq_show = hugetlb_events_show;
|
||||||
cft->file_offset = offsetof(struct hugetlb_cgroup, events_file[idx]),
|
cft->file_offset = offsetof(struct hugetlb_cgroup, events_file[idx]);
|
||||||
cft->flags = CFTYPE_NOT_ON_ROOT;
|
cft->flags = CFTYPE_NOT_ON_ROOT;
|
||||||
|
|
||||||
/* Add the events.local file */
|
/* Add the events.local file */
|
||||||
|
@ -664,7 +664,7 @@ static void __init __hugetlb_cgroup_file_dfl_init(int idx)
|
||||||
cft->private = MEMFILE_PRIVATE(idx, 0);
|
cft->private = MEMFILE_PRIVATE(idx, 0);
|
||||||
cft->seq_show = hugetlb_events_local_show;
|
cft->seq_show = hugetlb_events_local_show;
|
||||||
cft->file_offset = offsetof(struct hugetlb_cgroup,
|
cft->file_offset = offsetof(struct hugetlb_cgroup,
|
||||||
events_local_file[idx]),
|
events_local_file[idx]);
|
||||||
cft->flags = CFTYPE_NOT_ON_ROOT;
|
cft->flags = CFTYPE_NOT_ON_ROOT;
|
||||||
|
|
||||||
/* NULL terminate the last cft */
|
/* NULL terminate the last cft */
|
||||||
|
|
|
@ -466,7 +466,7 @@ int __khugepaged_enter(struct mm_struct *mm)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
/* __khugepaged_exit() must not run from under us */
|
/* __khugepaged_exit() must not run from under us */
|
||||||
VM_BUG_ON_MM(khugepaged_test_exit(mm), mm);
|
VM_BUG_ON_MM(atomic_read(&mm->mm_users) == 0, mm);
|
||||||
if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
|
if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
|
||||||
free_mm_slot(mm_slot);
|
free_mm_slot(mm_slot);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -1302,6 +1302,11 @@ static void free_pcppages_bulk(struct zone *zone, int count,
|
||||||
struct page *page, *tmp;
|
struct page *page, *tmp;
|
||||||
LIST_HEAD(head);
|
LIST_HEAD(head);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Ensure proper count is passed which otherwise would stuck in the
|
||||||
|
* below while (list_empty(list)) loop.
|
||||||
|
*/
|
||||||
|
count = min(pcp->count, count);
|
||||||
while (count) {
|
while (count) {
|
||||||
struct list_head *list;
|
struct list_head *list;
|
||||||
|
|
||||||
|
@ -7888,7 +7893,7 @@ int __meminit init_per_zone_wmark_min(void)
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
core_initcall(init_per_zone_wmark_min)
|
postcore_initcall(init_per_zone_wmark_min)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
|
* min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
|
||||||
|
|
|
@ -7,6 +7,7 @@
|
||||||
*/
|
*/
|
||||||
#define pr_fmt(fmt) "rodata_test: " fmt
|
#define pr_fmt(fmt) "rodata_test: " fmt
|
||||||
|
|
||||||
|
#include <linux/rodata_test.h>
|
||||||
#include <linux/uaccess.h>
|
#include <linux/uaccess.h>
|
||||||
#include <asm/sections.h>
|
#include <asm/sections.h>
|
||||||
|
|
||||||
|
|
|
@ -104,6 +104,8 @@ static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
|
||||||
if (pmd_none_or_clear_bad(pmd))
|
if (pmd_none_or_clear_bad(pmd))
|
||||||
continue;
|
continue;
|
||||||
vunmap_pte_range(pmd, addr, next, mask);
|
vunmap_pte_range(pmd, addr, next, mask);
|
||||||
|
|
||||||
|
cond_resched();
|
||||||
} while (pmd++, addr = next, addr != end);
|
} while (pmd++, addr = next, addr != end);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue