Merge branch 'akpm' (patches from Andrew)
Merge fixes from Andrew Morton: "14 patches. Subsystems affected by this patch series: mm (migration, vmscan, slub, gup, memcg, hugetlbfs), mailmap, kbuild, reboot, watchdog, panic, and ocfs2" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: ocfs2: initialize ip_next_orphan panic: don't dump stack twice on warn hugetlbfs: fix anon huge page migration race mm: memcontrol: fix missing wakeup polling thread kernel/watchdog: fix watchdog_allowed_mask not used warning reboot: fix overflow parsing reboot cpu number Revert "kernel/reboot.c: convert simple_strtoul to kstrtoint" compiler.h: fix barrier_data() on clang mm/gup: use unpin_user_pages() in __gup_longterm_locked() mm/slub: fix panic in slab_alloc_node() mailmap: fix entry for Dmitry Baryshkov/Eremin-Solenikov mm/vmscan: fix NR_ISOLATED_FILE corruption on 64-bit mm/compaction: stop isolation if too many pages are isolated and we have pages to migrate mm/compaction: count pages and stop correctly during page isolation
This commit is contained in:
commit
e28c0d7c92
5
.mailmap
5
.mailmap
|
@ -82,7 +82,10 @@ Dengcheng Zhu <dzhu@wavecomp.com> <dengcheng.zhu@gmail.com>
|
|||
Dengcheng Zhu <dzhu@wavecomp.com> <dengcheng.zhu@imgtec.com>
|
||||
Dengcheng Zhu <dzhu@wavecomp.com> <dengcheng.zhu@mips.com>
|
||||
<dev.kurt@vandijck-laurijssen.be> <kurt.van.dijck@eia.be>
|
||||
Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
|
||||
Dmitry Baryshkov <dbaryshkov@gmail.com>
|
||||
Dmitry Baryshkov <dbaryshkov@gmail.com> <[dbaryshkov@gmail.com]>
|
||||
Dmitry Baryshkov <dbaryshkov@gmail.com> <dmitry_baryshkov@mentor.com>
|
||||
Dmitry Baryshkov <dbaryshkov@gmail.com> <dmitry_eremin@mentor.com>
|
||||
Dmitry Safonov <0x7f454c46@gmail.com> <dima@arista.com>
|
||||
Dmitry Safonov <0x7f454c46@gmail.com> <d.safonov@partner.samsung.com>
|
||||
Dmitry Safonov <0x7f454c46@gmail.com> <dsafonov@virtuozzo.com>
|
||||
|
|
|
@ -1713,6 +1713,7 @@ static void ocfs2_inode_init_once(void *data)
|
|||
|
||||
oi->ip_blkno = 0ULL;
|
||||
oi->ip_clusters = 0;
|
||||
oi->ip_next_orphan = NULL;
|
||||
|
||||
ocfs2_resv_init_once(&oi->ip_la_data_resv);
|
||||
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <asm/rwonce.h>
|
||||
|
||||
#ifndef nop
|
||||
|
|
|
@ -60,12 +60,6 @@
|
|||
#define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1
|
||||
#endif
|
||||
|
||||
/* The following are for compatibility with GCC, from compiler-gcc.h,
|
||||
* and may be redefined here because they should not be shared with other
|
||||
* compilers, like ICC.
|
||||
*/
|
||||
#define barrier() __asm__ __volatile__("" : : : "memory")
|
||||
|
||||
#if __has_feature(shadow_call_stack)
|
||||
# define __noscs __attribute__((__no_sanitize__("shadow-call-stack")))
|
||||
#endif
|
||||
|
|
|
@ -15,25 +15,6 @@
|
|||
# error Sorry, your version of GCC is too old - please use 4.9 or newer.
|
||||
#endif
|
||||
|
||||
/* Optimization barrier */
|
||||
|
||||
/* The "volatile" is due to gcc bugs */
|
||||
#define barrier() __asm__ __volatile__("": : :"memory")
|
||||
/*
|
||||
* This version is i.e. to prevent dead stores elimination on @ptr
|
||||
* where gcc and llvm may behave differently when otherwise using
|
||||
* normal barrier(): while gcc behavior gets along with a normal
|
||||
* barrier(), llvm needs an explicit input variable to be assumed
|
||||
* clobbered. The issue is as follows: while the inline asm might
|
||||
* access any memory it wants, the compiler could have fit all of
|
||||
* @ptr into memory registers instead, and since @ptr never escaped
|
||||
* from that, it proved that the inline asm wasn't touching any of
|
||||
* it. This version works well with both compilers, i.e. we're telling
|
||||
* the compiler that the inline asm absolutely may see the contents
|
||||
* of @ptr. See also: https://llvm.org/bugs/show_bug.cgi?id=15495
|
||||
*/
|
||||
#define barrier_data(ptr) __asm__ __volatile__("": :"r"(ptr) :"memory")
|
||||
|
||||
/*
|
||||
* This macro obfuscates arithmetic on a variable address so that gcc
|
||||
* shouldn't recognize the original var, and make assumptions about it.
|
||||
|
|
|
@ -80,11 +80,25 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
|
|||
|
||||
/* Optimization barrier */
|
||||
#ifndef barrier
|
||||
# define barrier() __memory_barrier()
|
||||
/* The "volatile" is due to gcc bugs */
|
||||
# define barrier() __asm__ __volatile__("": : :"memory")
|
||||
#endif
|
||||
|
||||
#ifndef barrier_data
|
||||
# define barrier_data(ptr) barrier()
|
||||
/*
|
||||
* This version is i.e. to prevent dead stores elimination on @ptr
|
||||
* where gcc and llvm may behave differently when otherwise using
|
||||
* normal barrier(): while gcc behavior gets along with a normal
|
||||
* barrier(), llvm needs an explicit input variable to be assumed
|
||||
* clobbered. The issue is as follows: while the inline asm might
|
||||
* access any memory it wants, the compiler could have fit all of
|
||||
* @ptr into memory registers instead, and since @ptr never escaped
|
||||
* from that, it proved that the inline asm wasn't touching any of
|
||||
* it. This version works well with both compilers, i.e. we're telling
|
||||
* the compiler that the inline asm absolutely may see the contents
|
||||
* of @ptr. See also: https://llvm.org/bugs/show_bug.cgi?id=15495
|
||||
*/
|
||||
# define barrier_data(ptr) __asm__ __volatile__("": :"r"(ptr) :"memory")
|
||||
#endif
|
||||
|
||||
/* workaround for GCC PR82365 if needed */
|
||||
|
|
|
@ -900,12 +900,19 @@ static inline void count_memcg_event_mm(struct mm_struct *mm,
|
|||
static inline void memcg_memory_event(struct mem_cgroup *memcg,
|
||||
enum memcg_memory_event event)
|
||||
{
|
||||
bool swap_event = event == MEMCG_SWAP_HIGH || event == MEMCG_SWAP_MAX ||
|
||||
event == MEMCG_SWAP_FAIL;
|
||||
|
||||
atomic_long_inc(&memcg->memory_events_local[event]);
|
||||
cgroup_file_notify(&memcg->events_local_file);
|
||||
if (!swap_event)
|
||||
cgroup_file_notify(&memcg->events_local_file);
|
||||
|
||||
do {
|
||||
atomic_long_inc(&memcg->memory_events[event]);
|
||||
cgroup_file_notify(&memcg->events_file);
|
||||
if (swap_event)
|
||||
cgroup_file_notify(&memcg->swap_events_file);
|
||||
else
|
||||
cgroup_file_notify(&memcg->events_file);
|
||||
|
||||
if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
|
||||
break;
|
||||
|
|
|
@ -605,7 +605,8 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
|
|||
panic("panic_on_warn set ...\n");
|
||||
}
|
||||
|
||||
dump_stack();
|
||||
if (!regs)
|
||||
dump_stack();
|
||||
|
||||
print_irqtrace_events(current);
|
||||
|
||||
|
|
|
@ -551,22 +551,22 @@ static int __init reboot_setup(char *str)
|
|||
break;
|
||||
|
||||
case 's':
|
||||
{
|
||||
int rc;
|
||||
|
||||
if (isdigit(*(str+1))) {
|
||||
rc = kstrtoint(str+1, 0, &reboot_cpu);
|
||||
if (rc)
|
||||
return rc;
|
||||
} else if (str[1] == 'm' && str[2] == 'p' &&
|
||||
isdigit(*(str+3))) {
|
||||
rc = kstrtoint(str+3, 0, &reboot_cpu);
|
||||
if (rc)
|
||||
return rc;
|
||||
} else
|
||||
if (isdigit(*(str+1)))
|
||||
reboot_cpu = simple_strtoul(str+1, NULL, 0);
|
||||
else if (str[1] == 'm' && str[2] == 'p' &&
|
||||
isdigit(*(str+3)))
|
||||
reboot_cpu = simple_strtoul(str+3, NULL, 0);
|
||||
else
|
||||
*mode = REBOOT_SOFT;
|
||||
if (reboot_cpu >= num_possible_cpus()) {
|
||||
pr_err("Ignoring the CPU number in reboot= option. "
|
||||
"CPU %d exceeds possible cpu number %d\n",
|
||||
reboot_cpu, num_possible_cpus());
|
||||
reboot_cpu = 0;
|
||||
break;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case 'g':
|
||||
*mode = REBOOT_GPIO;
|
||||
break;
|
||||
|
|
|
@ -44,8 +44,6 @@ int __read_mostly soft_watchdog_user_enabled = 1;
|
|||
int __read_mostly watchdog_thresh = 10;
|
||||
static int __read_mostly nmi_watchdog_available;
|
||||
|
||||
static struct cpumask watchdog_allowed_mask __read_mostly;
|
||||
|
||||
struct cpumask watchdog_cpumask __read_mostly;
|
||||
unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask);
|
||||
|
||||
|
@ -162,6 +160,8 @@ static void lockup_detector_update_enable(void)
|
|||
int __read_mostly sysctl_softlockup_all_cpu_backtrace;
|
||||
#endif
|
||||
|
||||
static struct cpumask watchdog_allowed_mask __read_mostly;
|
||||
|
||||
/* Global variables, exported for sysctl */
|
||||
unsigned int __read_mostly softlockup_panic =
|
||||
CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE;
|
||||
|
|
|
@ -817,6 +817,10 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
|
|||
* delay for some time until fewer pages are isolated
|
||||
*/
|
||||
while (unlikely(too_many_isolated(pgdat))) {
|
||||
/* stop isolation if there are still pages not migrated */
|
||||
if (cc->nr_migratepages)
|
||||
return 0;
|
||||
|
||||
/* async migration should just abort */
|
||||
if (cc->mode == MIGRATE_ASYNC)
|
||||
return 0;
|
||||
|
@ -1012,8 +1016,8 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
|
|||
|
||||
isolate_success:
|
||||
list_add(&page->lru, &cc->migratepages);
|
||||
cc->nr_migratepages++;
|
||||
nr_isolated++;
|
||||
cc->nr_migratepages += compound_nr(page);
|
||||
nr_isolated += compound_nr(page);
|
||||
|
||||
/*
|
||||
* Avoid isolating too much unless this block is being
|
||||
|
@ -1021,7 +1025,7 @@ isolate_success:
|
|||
* or a lock is contended. For contention, isolate quickly to
|
||||
* potentially remove one source of contention.
|
||||
*/
|
||||
if (cc->nr_migratepages == COMPACT_CLUSTER_MAX &&
|
||||
if (cc->nr_migratepages >= COMPACT_CLUSTER_MAX &&
|
||||
!cc->rescan && !cc->contended) {
|
||||
++low_pfn;
|
||||
break;
|
||||
|
@ -1132,7 +1136,7 @@ isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn,
|
|||
if (!pfn)
|
||||
break;
|
||||
|
||||
if (cc->nr_migratepages == COMPACT_CLUSTER_MAX)
|
||||
if (cc->nr_migratepages >= COMPACT_CLUSTER_MAX)
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
14
mm/gup.c
14
mm/gup.c
|
@ -1647,8 +1647,11 @@ check_again:
|
|||
/*
|
||||
* drop the above get_user_pages reference.
|
||||
*/
|
||||
for (i = 0; i < nr_pages; i++)
|
||||
put_page(pages[i]);
|
||||
if (gup_flags & FOLL_PIN)
|
||||
unpin_user_pages(pages, nr_pages);
|
||||
else
|
||||
for (i = 0; i < nr_pages; i++)
|
||||
put_page(pages[i]);
|
||||
|
||||
if (migrate_pages(&cma_page_list, alloc_migration_target, NULL,
|
||||
(unsigned long)&mtc, MIGRATE_SYNC, MR_CONTIG_RANGE)) {
|
||||
|
@ -1728,8 +1731,11 @@ static long __gup_longterm_locked(struct mm_struct *mm,
|
|||
goto out;
|
||||
|
||||
if (check_dax_vmas(vmas_tmp, rc)) {
|
||||
for (i = 0; i < rc; i++)
|
||||
put_page(pages[i]);
|
||||
if (gup_flags & FOLL_PIN)
|
||||
unpin_user_pages(pages, rc);
|
||||
else
|
||||
for (i = 0; i < rc; i++)
|
||||
put_page(pages[i]);
|
||||
rc = -EOPNOTSUPP;
|
||||
goto out;
|
||||
}
|
||||
|
|
90
mm/hugetlb.c
90
mm/hugetlb.c
|
@ -1567,104 +1567,24 @@ int PageHeadHuge(struct page *page_head)
|
|||
return page_head[1].compound_dtor == HUGETLB_PAGE_DTOR;
|
||||
}
|
||||
|
||||
/*
|
||||
* Find address_space associated with hugetlbfs page.
|
||||
* Upon entry page is locked and page 'was' mapped although mapped state
|
||||
* could change. If necessary, use anon_vma to find vma and associated
|
||||
* address space. The returned mapping may be stale, but it can not be
|
||||
* invalid as page lock (which is held) is required to destroy mapping.
|
||||
*/
|
||||
static struct address_space *_get_hugetlb_page_mapping(struct page *hpage)
|
||||
{
|
||||
struct anon_vma *anon_vma;
|
||||
pgoff_t pgoff_start, pgoff_end;
|
||||
struct anon_vma_chain *avc;
|
||||
struct address_space *mapping = page_mapping(hpage);
|
||||
|
||||
/* Simple file based mapping */
|
||||
if (mapping)
|
||||
return mapping;
|
||||
|
||||
/*
|
||||
* Even anonymous hugetlbfs mappings are associated with an
|
||||
* underlying hugetlbfs file (see hugetlb_file_setup in mmap
|
||||
* code). Find a vma associated with the anonymous vma, and
|
||||
* use the file pointer to get address_space.
|
||||
*/
|
||||
anon_vma = page_lock_anon_vma_read(hpage);
|
||||
if (!anon_vma)
|
||||
return mapping; /* NULL */
|
||||
|
||||
/* Use first found vma */
|
||||
pgoff_start = page_to_pgoff(hpage);
|
||||
pgoff_end = pgoff_start + pages_per_huge_page(page_hstate(hpage)) - 1;
|
||||
anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root,
|
||||
pgoff_start, pgoff_end) {
|
||||
struct vm_area_struct *vma = avc->vma;
|
||||
|
||||
mapping = vma->vm_file->f_mapping;
|
||||
break;
|
||||
}
|
||||
|
||||
anon_vma_unlock_read(anon_vma);
|
||||
return mapping;
|
||||
}
|
||||
|
||||
/*
|
||||
* Find and lock address space (mapping) in write mode.
|
||||
*
|
||||
* Upon entry, the page is locked which allows us to find the mapping
|
||||
* even in the case of an anon page. However, locking order dictates
|
||||
* the i_mmap_rwsem be acquired BEFORE the page lock. This is hugetlbfs
|
||||
* specific. So, we first try to lock the sema while still holding the
|
||||
* page lock. If this works, great! If not, then we need to drop the
|
||||
* page lock and then acquire i_mmap_rwsem and reacquire page lock. Of
|
||||
* course, need to revalidate state along the way.
|
||||
* Upon entry, the page is locked which means that page_mapping() is
|
||||
* stable. Due to locking order, we can only trylock_write. If we can
|
||||
* not get the lock, simply return NULL to caller.
|
||||
*/
|
||||
struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage)
|
||||
{
|
||||
struct address_space *mapping, *mapping2;
|
||||
struct address_space *mapping = page_mapping(hpage);
|
||||
|
||||
mapping = _get_hugetlb_page_mapping(hpage);
|
||||
retry:
|
||||
if (!mapping)
|
||||
return mapping;
|
||||
|
||||
/*
|
||||
* If no contention, take lock and return
|
||||
*/
|
||||
if (i_mmap_trylock_write(mapping))
|
||||
return mapping;
|
||||
|
||||
/*
|
||||
* Must drop page lock and wait on mapping sema.
|
||||
* Note: Once page lock is dropped, mapping could become invalid.
|
||||
* As a hack, increase map count until we lock page again.
|
||||
*/
|
||||
atomic_inc(&hpage->_mapcount);
|
||||
unlock_page(hpage);
|
||||
i_mmap_lock_write(mapping);
|
||||
lock_page(hpage);
|
||||
atomic_add_negative(-1, &hpage->_mapcount);
|
||||
|
||||
/* verify page is still mapped */
|
||||
if (!page_mapped(hpage)) {
|
||||
i_mmap_unlock_write(mapping);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Get address space again and verify it is the same one
|
||||
* we locked. If not, drop lock and retry.
|
||||
*/
|
||||
mapping2 = _get_hugetlb_page_mapping(hpage);
|
||||
if (mapping2 != mapping) {
|
||||
i_mmap_unlock_write(mapping);
|
||||
mapping = mapping2;
|
||||
goto retry;
|
||||
}
|
||||
|
||||
return mapping;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
pgoff_t __basepage_index(struct page *page)
|
||||
|
|
|
@ -1057,27 +1057,25 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
|
|||
if (!PageHuge(hpage)) {
|
||||
unmap_success = try_to_unmap(hpage, ttu);
|
||||
} else {
|
||||
/*
|
||||
* For hugetlb pages, try_to_unmap could potentially call
|
||||
* huge_pmd_unshare. Because of this, take semaphore in
|
||||
* write mode here and set TTU_RMAP_LOCKED to indicate we
|
||||
* have taken the lock at this higer level.
|
||||
*
|
||||
* Note that the call to hugetlb_page_mapping_lock_write
|
||||
* is necessary even if mapping is already set. It handles
|
||||
* ugliness of potentially having to drop page lock to obtain
|
||||
* i_mmap_rwsem.
|
||||
*/
|
||||
mapping = hugetlb_page_mapping_lock_write(hpage);
|
||||
|
||||
if (mapping) {
|
||||
unmap_success = try_to_unmap(hpage,
|
||||
if (!PageAnon(hpage)) {
|
||||
/*
|
||||
* For hugetlb pages in shared mappings, try_to_unmap
|
||||
* could potentially call huge_pmd_unshare. Because of
|
||||
* this, take semaphore in write mode here and set
|
||||
* TTU_RMAP_LOCKED to indicate we have taken the lock
|
||||
* at this higer level.
|
||||
*/
|
||||
mapping = hugetlb_page_mapping_lock_write(hpage);
|
||||
if (mapping) {
|
||||
unmap_success = try_to_unmap(hpage,
|
||||
ttu|TTU_RMAP_LOCKED);
|
||||
i_mmap_unlock_write(mapping);
|
||||
i_mmap_unlock_write(mapping);
|
||||
} else {
|
||||
pr_info("Memory failure: %#lx: could not lock mapping for mapped huge page\n", pfn);
|
||||
unmap_success = false;
|
||||
}
|
||||
} else {
|
||||
pr_info("Memory failure: %#lx: could not find mapping for mapped huge page\n",
|
||||
pfn);
|
||||
unmap_success = false;
|
||||
unmap_success = try_to_unmap(hpage, ttu);
|
||||
}
|
||||
}
|
||||
if (!unmap_success)
|
||||
|
|
44
mm/migrate.c
44
mm/migrate.c
|
@ -1328,34 +1328,38 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
|
|||
goto put_anon;
|
||||
|
||||
if (page_mapped(hpage)) {
|
||||
/*
|
||||
* try_to_unmap could potentially call huge_pmd_unshare.
|
||||
* Because of this, take semaphore in write mode here and
|
||||
* set TTU_RMAP_LOCKED to let lower levels know we have
|
||||
* taken the lock.
|
||||
*/
|
||||
mapping = hugetlb_page_mapping_lock_write(hpage);
|
||||
if (unlikely(!mapping))
|
||||
goto unlock_put_anon;
|
||||
bool mapping_locked = false;
|
||||
enum ttu_flags ttu = TTU_MIGRATION|TTU_IGNORE_MLOCK|
|
||||
TTU_IGNORE_ACCESS;
|
||||
|
||||
try_to_unmap(hpage,
|
||||
TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS|
|
||||
TTU_RMAP_LOCKED);
|
||||
if (!PageAnon(hpage)) {
|
||||
/*
|
||||
* In shared mappings, try_to_unmap could potentially
|
||||
* call huge_pmd_unshare. Because of this, take
|
||||
* semaphore in write mode here and set TTU_RMAP_LOCKED
|
||||
* to let lower levels know we have taken the lock.
|
||||
*/
|
||||
mapping = hugetlb_page_mapping_lock_write(hpage);
|
||||
if (unlikely(!mapping))
|
||||
goto unlock_put_anon;
|
||||
|
||||
mapping_locked = true;
|
||||
ttu |= TTU_RMAP_LOCKED;
|
||||
}
|
||||
|
||||
try_to_unmap(hpage, ttu);
|
||||
page_was_mapped = 1;
|
||||
/*
|
||||
* Leave mapping locked until after subsequent call to
|
||||
* remove_migration_ptes()
|
||||
*/
|
||||
|
||||
if (mapping_locked)
|
||||
i_mmap_unlock_write(mapping);
|
||||
}
|
||||
|
||||
if (!page_mapped(hpage))
|
||||
rc = move_to_new_page(new_hpage, hpage, mode);
|
||||
|
||||
if (page_was_mapped) {
|
||||
if (page_was_mapped)
|
||||
remove_migration_ptes(hpage,
|
||||
rc == MIGRATEPAGE_SUCCESS ? new_hpage : hpage, true);
|
||||
i_mmap_unlock_write(mapping);
|
||||
}
|
||||
rc == MIGRATEPAGE_SUCCESS ? new_hpage : hpage, false);
|
||||
|
||||
unlock_put_anon:
|
||||
unlock_page(new_hpage);
|
||||
|
|
|
@ -1413,9 +1413,6 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
|
|||
/*
|
||||
* If sharing is possible, start and end will be adjusted
|
||||
* accordingly.
|
||||
*
|
||||
* If called for a huge page, caller must hold i_mmap_rwsem
|
||||
* in write mode as it is possible to call huge_pmd_unshare.
|
||||
*/
|
||||
adjust_range_if_pmd_sharing_possible(vma, &range.start,
|
||||
&range.end);
|
||||
|
@ -1462,7 +1459,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
|
|||
subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte);
|
||||
address = pvmw.address;
|
||||
|
||||
if (PageHuge(page)) {
|
||||
if (PageHuge(page) && !PageAnon(page)) {
|
||||
/*
|
||||
* To call huge_pmd_unshare, i_mmap_rwsem must be
|
||||
* held in write mode. Caller needs to explicitly
|
||||
|
|
|
@ -2852,7 +2852,7 @@ redo:
|
|||
|
||||
object = c->freelist;
|
||||
page = c->page;
|
||||
if (unlikely(!object || !node_match(page, node))) {
|
||||
if (unlikely(!object || !page || !node_match(page, node))) {
|
||||
object = __slab_alloc(s, gfpflags, node, addr, c);
|
||||
} else {
|
||||
void *next_object = get_freepointer_safe(s, object);
|
||||
|
|
|
@ -1516,7 +1516,8 @@ unsigned int reclaim_clean_pages_from_list(struct zone *zone,
|
|||
nr_reclaimed = shrink_page_list(&clean_pages, zone->zone_pgdat, &sc,
|
||||
TTU_IGNORE_ACCESS, &stat, true);
|
||||
list_splice(&clean_pages, page_list);
|
||||
mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE, -nr_reclaimed);
|
||||
mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE,
|
||||
-(long)nr_reclaimed);
|
||||
/*
|
||||
* Since lazyfree pages are isolated from file LRU from the beginning,
|
||||
* they will rotate back to anonymous LRU in the end if it failed to
|
||||
|
@ -1526,7 +1527,7 @@ unsigned int reclaim_clean_pages_from_list(struct zone *zone,
|
|||
mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_ANON,
|
||||
stat.nr_lazyfree_fail);
|
||||
mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE,
|
||||
-stat.nr_lazyfree_fail);
|
||||
-(long)stat.nr_lazyfree_fail);
|
||||
return nr_reclaimed;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue