Merge branch 'akpm' (patches from Andrew)
Merge misc fixes from Andrew Morton: "11 fixes" MM fixes and one xz decompressor fix. * emailed patches from Andrew Morton <akpm@linux-foundation.org>: mm/debug.c: PageAnon() is true for PageKsm() pages mm/debug.c: __dump_page() prints an extra line mm/page_io.c: do not free shared swap slots mm/memory_hotplug: fix try_offline_node() mm,thp: recheck each page before collapsing file THP mm: slub: really fix slab walking for init_on_free mm: hugetlb: switch to css_tryget() in hugetlb_cgroup_charge_cgroup() mm: memcg: switch to css_tryget() in get_mem_cgroup_from_mm() lib/xz: fix XZ_DYNALLOC to avoid useless memory reallocations mm: fix trying to reclaim unevictable lru page when calling madvise_pageout mm: mempolicy: fix the wrong return value and potential pages leak of mbind
This commit is contained in:
commit
bec8b6e944
|
@ -872,3 +872,39 @@ int walk_memory_blocks(unsigned long start, unsigned long size,
|
|||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct for_each_memory_block_cb_data {
|
||||
walk_memory_blocks_func_t func;
|
||||
void *arg;
|
||||
};
|
||||
|
||||
static int for_each_memory_block_cb(struct device *dev, void *data)
|
||||
{
|
||||
struct memory_block *mem = to_memory_block(dev);
|
||||
struct for_each_memory_block_cb_data *cb_data = data;
|
||||
|
||||
return cb_data->func(mem, cb_data->arg);
|
||||
}
|
||||
|
||||
/**
|
||||
* for_each_memory_block - walk through all present memory blocks
|
||||
*
|
||||
* @arg: argument passed to func
|
||||
* @func: callback for each memory block walked
|
||||
*
|
||||
* This function walks through all present memory blocks, calling func on
|
||||
* each memory block.
|
||||
*
|
||||
* In case func() returns an error, walking is aborted and the error is
|
||||
* returned.
|
||||
*/
|
||||
int for_each_memory_block(void *arg, walk_memory_blocks_func_t func)
|
||||
{
|
||||
struct for_each_memory_block_cb_data cb_data = {
|
||||
.func = func,
|
||||
.arg = arg,
|
||||
};
|
||||
|
||||
return bus_for_each_dev(&memory_subsys, NULL, &cb_data,
|
||||
for_each_memory_block_cb);
|
||||
}
|
||||
|
|
|
@ -119,6 +119,7 @@ extern struct memory_block *find_memory_block(struct mem_section *);
|
|||
typedef int (*walk_memory_blocks_func_t)(struct memory_block *, void *);
|
||||
extern int walk_memory_blocks(unsigned long start, unsigned long size,
|
||||
void *arg, walk_memory_blocks_func_t func);
|
||||
extern int for_each_memory_block(void *arg, walk_memory_blocks_func_t func);
|
||||
#define CONFIG_MEM_BLOCK_SIZE (PAGES_PER_SECTION<<PAGE_SHIFT)
|
||||
#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
|
||||
|
||||
|
|
|
@ -1146,6 +1146,7 @@ XZ_EXTERN enum xz_ret xz_dec_lzma2_reset(struct xz_dec_lzma2 *s, uint8_t props)
|
|||
|
||||
if (DEC_IS_DYNALLOC(s->dict.mode)) {
|
||||
if (s->dict.allocated < s->dict.size) {
|
||||
s->dict.allocated = s->dict.size;
|
||||
vfree(s->dict.buf);
|
||||
s->dict.buf = vmalloc(s->dict.size);
|
||||
if (s->dict.buf == NULL) {
|
||||
|
|
31
mm/debug.c
31
mm/debug.c
|
@ -67,28 +67,31 @@ void __dump_page(struct page *page, const char *reason)
|
|||
*/
|
||||
mapcount = PageSlab(page) ? 0 : page_mapcount(page);
|
||||
|
||||
pr_warn("page:%px refcount:%d mapcount:%d mapping:%px index:%#lx",
|
||||
page, page_ref_count(page), mapcount,
|
||||
page->mapping, page_to_pgoff(page));
|
||||
if (PageCompound(page))
|
||||
pr_cont(" compound_mapcount: %d", compound_mapcount(page));
|
||||
pr_cont("\n");
|
||||
if (PageAnon(page))
|
||||
pr_warn("anon ");
|
||||
else if (PageKsm(page))
|
||||
pr_warn("ksm ");
|
||||
pr_warn("page:%px refcount:%d mapcount:%d mapping:%px "
|
||||
"index:%#lx compound_mapcount: %d\n",
|
||||
page, page_ref_count(page), mapcount,
|
||||
page->mapping, page_to_pgoff(page),
|
||||
compound_mapcount(page));
|
||||
else
|
||||
pr_warn("page:%px refcount:%d mapcount:%d mapping:%px index:%#lx\n",
|
||||
page, page_ref_count(page), mapcount,
|
||||
page->mapping, page_to_pgoff(page));
|
||||
if (PageKsm(page))
|
||||
pr_warn("ksm flags: %#lx(%pGp)\n", page->flags, &page->flags);
|
||||
else if (PageAnon(page))
|
||||
pr_warn("anon flags: %#lx(%pGp)\n", page->flags, &page->flags);
|
||||
else if (mapping) {
|
||||
pr_warn("%ps ", mapping->a_ops);
|
||||
if (mapping->host && mapping->host->i_dentry.first) {
|
||||
struct dentry *dentry;
|
||||
dentry = container_of(mapping->host->i_dentry.first, struct dentry, d_u.d_alias);
|
||||
pr_warn("name:\"%pd\" ", dentry);
|
||||
}
|
||||
pr_warn("%ps name:\"%pd\"\n", mapping->a_ops, dentry);
|
||||
} else
|
||||
pr_warn("%ps\n", mapping->a_ops);
|
||||
pr_warn("flags: %#lx(%pGp)\n", page->flags, &page->flags);
|
||||
}
|
||||
BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS + 1);
|
||||
|
||||
pr_warn("flags: %#lx(%pGp)\n", page->flags, &page->flags);
|
||||
|
||||
hex_only:
|
||||
print_hex_dump(KERN_WARNING, "raw: ", DUMP_PREFIX_NONE, 32,
|
||||
sizeof(unsigned long), page,
|
||||
|
|
|
@ -196,7 +196,7 @@ int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
|
|||
again:
|
||||
rcu_read_lock();
|
||||
h_cg = hugetlb_cgroup_from_task(current);
|
||||
if (!css_tryget_online(&h_cg->css)) {
|
||||
if (!css_tryget(&h_cg->css)) {
|
||||
rcu_read_unlock();
|
||||
goto again;
|
||||
}
|
||||
|
|
|
@ -1602,17 +1602,6 @@ static void collapse_file(struct mm_struct *mm,
|
|||
result = SCAN_FAIL;
|
||||
goto xa_unlocked;
|
||||
}
|
||||
} else if (!PageUptodate(page)) {
|
||||
xas_unlock_irq(&xas);
|
||||
wait_on_page_locked(page);
|
||||
if (!trylock_page(page)) {
|
||||
result = SCAN_PAGE_LOCK;
|
||||
goto xa_unlocked;
|
||||
}
|
||||
get_page(page);
|
||||
} else if (PageDirty(page)) {
|
||||
result = SCAN_FAIL;
|
||||
goto xa_locked;
|
||||
} else if (trylock_page(page)) {
|
||||
get_page(page);
|
||||
xas_unlock_irq(&xas);
|
||||
|
@ -1627,7 +1616,12 @@ static void collapse_file(struct mm_struct *mm,
|
|||
* without racing with truncate.
|
||||
*/
|
||||
VM_BUG_ON_PAGE(!PageLocked(page), page);
|
||||
VM_BUG_ON_PAGE(!PageUptodate(page), page);
|
||||
|
||||
/* make sure the page is up to date */
|
||||
if (unlikely(!PageUptodate(page))) {
|
||||
result = SCAN_FAIL;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
/*
|
||||
* If file was truncated then extended, or hole-punched, before
|
||||
|
@ -1643,6 +1637,16 @@ static void collapse_file(struct mm_struct *mm,
|
|||
goto out_unlock;
|
||||
}
|
||||
|
||||
if (!is_shmem && PageDirty(page)) {
|
||||
/*
|
||||
* khugepaged only works on read-only fd, so this
|
||||
* page is dirty because it hasn't been flushed
|
||||
* since first write.
|
||||
*/
|
||||
result = SCAN_FAIL;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
if (isolate_lru_page(page)) {
|
||||
result = SCAN_DEL_PAGE_LRU;
|
||||
goto out_unlock;
|
||||
|
|
16
mm/madvise.c
16
mm/madvise.c
|
@ -363,8 +363,12 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
|
|||
ClearPageReferenced(page);
|
||||
test_and_clear_page_young(page);
|
||||
if (pageout) {
|
||||
if (!isolate_lru_page(page))
|
||||
list_add(&page->lru, &page_list);
|
||||
if (!isolate_lru_page(page)) {
|
||||
if (PageUnevictable(page))
|
||||
putback_lru_page(page);
|
||||
else
|
||||
list_add(&page->lru, &page_list);
|
||||
}
|
||||
} else
|
||||
deactivate_page(page);
|
||||
huge_unlock:
|
||||
|
@ -441,8 +445,12 @@ regular_page:
|
|||
ClearPageReferenced(page);
|
||||
test_and_clear_page_young(page);
|
||||
if (pageout) {
|
||||
if (!isolate_lru_page(page))
|
||||
list_add(&page->lru, &page_list);
|
||||
if (!isolate_lru_page(page)) {
|
||||
if (PageUnevictable(page))
|
||||
putback_lru_page(page);
|
||||
else
|
||||
list_add(&page->lru, &page_list);
|
||||
}
|
||||
} else
|
||||
deactivate_page(page);
|
||||
}
|
||||
|
|
|
@ -960,7 +960,7 @@ struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
|
|||
if (unlikely(!memcg))
|
||||
memcg = root_mem_cgroup;
|
||||
}
|
||||
} while (!css_tryget_online(&memcg->css));
|
||||
} while (!css_tryget(&memcg->css));
|
||||
rcu_read_unlock();
|
||||
return memcg;
|
||||
}
|
||||
|
|
|
@ -1646,6 +1646,18 @@ static int check_cpu_on_node(pg_data_t *pgdat)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int check_no_memblock_for_node_cb(struct memory_block *mem, void *arg)
|
||||
{
|
||||
int nid = *(int *)arg;
|
||||
|
||||
/*
|
||||
* If a memory block belongs to multiple nodes, the stored nid is not
|
||||
* reliable. However, such blocks are always online (e.g., cannot get
|
||||
* offlined) and, therefore, are still spanned by the node.
|
||||
*/
|
||||
return mem->nid == nid ? -EEXIST : 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* try_offline_node
|
||||
* @nid: the node ID
|
||||
|
@ -1658,25 +1670,24 @@ static int check_cpu_on_node(pg_data_t *pgdat)
|
|||
void try_offline_node(int nid)
|
||||
{
|
||||
pg_data_t *pgdat = NODE_DATA(nid);
|
||||
unsigned long start_pfn = pgdat->node_start_pfn;
|
||||
unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages;
|
||||
unsigned long pfn;
|
||||
int rc;
|
||||
|
||||
for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
|
||||
unsigned long section_nr = pfn_to_section_nr(pfn);
|
||||
|
||||
if (!present_section_nr(section_nr))
|
||||
continue;
|
||||
|
||||
if (pfn_to_nid(pfn) != nid)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* some memory sections of this node are not removed, and we
|
||||
* can't offline node now.
|
||||
*/
|
||||
/*
|
||||
* If the node still spans pages (especially ZONE_DEVICE), don't
|
||||
* offline it. A node spans memory after move_pfn_range_to_zone(),
|
||||
* e.g., after the memory block was onlined.
|
||||
*/
|
||||
if (pgdat->node_spanned_pages)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Especially offline memory blocks might not be spanned by the
|
||||
* node. They will get spanned by the node once they get onlined.
|
||||
* However, they link to the node in sysfs and can get onlined later.
|
||||
*/
|
||||
rc = for_each_memory_block(&nid, check_no_memblock_for_node_cb);
|
||||
if (rc)
|
||||
return;
|
||||
}
|
||||
|
||||
if (check_cpu_on_node(pgdat))
|
||||
return;
|
||||
|
|
|
@ -672,7 +672,9 @@ static const struct mm_walk_ops queue_pages_walk_ops = {
|
|||
* 1 - there is unmovable page, but MPOL_MF_MOVE* & MPOL_MF_STRICT were
|
||||
* specified.
|
||||
* 0 - queue pages successfully or no misplaced page.
|
||||
* -EIO - there is misplaced page and only MPOL_MF_STRICT was specified.
|
||||
* errno - i.e. misplaced pages with MPOL_MF_STRICT specified (-EIO) or
|
||||
* memory range specified by nodemask and maxnode points outside
|
||||
* your accessible address space (-EFAULT)
|
||||
*/
|
||||
static int
|
||||
queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
|
||||
|
@ -1286,7 +1288,7 @@ static long do_mbind(unsigned long start, unsigned long len,
|
|||
flags | MPOL_MF_INVERT, &pagelist);
|
||||
|
||||
if (ret < 0) {
|
||||
err = -EIO;
|
||||
err = ret;
|
||||
goto up_out;
|
||||
}
|
||||
|
||||
|
@ -1305,10 +1307,12 @@ static long do_mbind(unsigned long start, unsigned long len,
|
|||
|
||||
if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT)))
|
||||
err = -EIO;
|
||||
} else
|
||||
putback_movable_pages(&pagelist);
|
||||
|
||||
} else {
|
||||
up_out:
|
||||
if (!list_empty(&pagelist))
|
||||
putback_movable_pages(&pagelist);
|
||||
}
|
||||
|
||||
up_write(&mm->mmap_sem);
|
||||
mpol_out:
|
||||
mpol_put(new);
|
||||
|
|
|
@ -73,6 +73,7 @@ static void swap_slot_free_notify(struct page *page)
|
|||
{
|
||||
struct swap_info_struct *sis;
|
||||
struct gendisk *disk;
|
||||
swp_entry_t entry;
|
||||
|
||||
/*
|
||||
* There is no guarantee that the page is in swap cache - the software
|
||||
|
@ -104,11 +105,10 @@ static void swap_slot_free_notify(struct page *page)
|
|||
* we again wish to reclaim it.
|
||||
*/
|
||||
disk = sis->bdev->bd_disk;
|
||||
if (disk->fops->swap_slot_free_notify) {
|
||||
swp_entry_t entry;
|
||||
entry.val = page_private(page);
|
||||
if (disk->fops->swap_slot_free_notify && __swap_count(entry) == 1) {
|
||||
unsigned long offset;
|
||||
|
||||
entry.val = page_private(page);
|
||||
offset = swp_offset(entry);
|
||||
|
||||
SetPageDirty(page);
|
||||
|
|
39
mm/slub.c
39
mm/slub.c
|
@ -1433,12 +1433,15 @@ static inline bool slab_free_freelist_hook(struct kmem_cache *s,
|
|||
void *old_tail = *tail ? *tail : *head;
|
||||
int rsize;
|
||||
|
||||
if (slab_want_init_on_free(s)) {
|
||||
void *p = NULL;
|
||||
/* Head and tail of the reconstructed freelist */
|
||||
*head = NULL;
|
||||
*tail = NULL;
|
||||
|
||||
do {
|
||||
object = next;
|
||||
next = get_freepointer(s, object);
|
||||
do {
|
||||
object = next;
|
||||
next = get_freepointer(s, object);
|
||||
|
||||
if (slab_want_init_on_free(s)) {
|
||||
/*
|
||||
* Clear the object and the metadata, but don't touch
|
||||
* the redzone.
|
||||
|
@ -1448,29 +1451,8 @@ static inline bool slab_free_freelist_hook(struct kmem_cache *s,
|
|||
: 0;
|
||||
memset((char *)object + s->inuse, 0,
|
||||
s->size - s->inuse - rsize);
|
||||
set_freepointer(s, object, p);
|
||||
p = object;
|
||||
} while (object != old_tail);
|
||||
}
|
||||
|
||||
/*
|
||||
* Compiler cannot detect this function can be removed if slab_free_hook()
|
||||
* evaluates to nothing. Thus, catch all relevant config debug options here.
|
||||
*/
|
||||
#if defined(CONFIG_LOCKDEP) || \
|
||||
defined(CONFIG_DEBUG_KMEMLEAK) || \
|
||||
defined(CONFIG_DEBUG_OBJECTS_FREE) || \
|
||||
defined(CONFIG_KASAN)
|
||||
|
||||
next = *head;
|
||||
|
||||
/* Head and tail of the reconstructed freelist */
|
||||
*head = NULL;
|
||||
*tail = NULL;
|
||||
|
||||
do {
|
||||
object = next;
|
||||
next = get_freepointer(s, object);
|
||||
}
|
||||
/* If object's reuse doesn't have to be delayed */
|
||||
if (!slab_free_hook(s, object)) {
|
||||
/* Move object to the new freelist */
|
||||
|
@ -1485,9 +1467,6 @@ static inline bool slab_free_freelist_hook(struct kmem_cache *s,
|
|||
*tail = NULL;
|
||||
|
||||
return *head != NULL;
|
||||
#else
|
||||
return true;
|
||||
#endif
|
||||
}
|
||||
|
||||
static void *setup_object(struct kmem_cache *s, struct page *page,
|
||||
|
|
Loading…
Reference in New Issue