Merge branch 'akpm' (patches from Andrew)
Merge misc fixes from Andrew Morton: "16 fixes" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: mm: don't defer struct page initialization for Xen pv guests lib/Kconfig.debug: enable RUNTIME_TESTING_MENU vmalloc: fix __GFP_HIGHMEM usage for vmalloc_32 on 32b systems selftests/memfd: add run_fuse_test.sh to TEST_FILES bug.h: work around GCC PR82365 in BUG() mm/swap.c: make functions and their kernel-doc agree (again) mm/zpool.c: zpool_evictable: fix mismatch in parameter name and kernel-doc ida: do zeroing in ida_pre_get() mm, swap, frontswap: fix THP swap if frontswap enabled certs/blacklist_nohashes.c: fix const confusion in certs blacklist kernel/relay.c: limit kmalloc size to KMALLOC_MAX_SIZE mm, mlock, vmscan: no more skipping pagevecs mm: memcontrol: fix NR_WRITEBACK leak in memcg and system stats Kbuild: always define endianess in kconfig.h include/linux/sched/mm.h: re-inline mmdrop() tools: fix cross-compile var clobbering
This commit is contained in:
commit
238ca35707
|
@ -23,7 +23,8 @@ void die(const char *str, struct pt_regs *regs, unsigned long address);
|
|||
|
||||
#define BUG() do { \
|
||||
pr_warn("BUG: failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); \
|
||||
dump_stack(); \
|
||||
barrier_before_unreachable(); \
|
||||
__builtin_trap(); \
|
||||
} while (0)
|
||||
|
||||
#define HAVE_ARCH_BUG
|
||||
|
|
|
@ -44,18 +44,25 @@ struct bug_frame {
|
|||
* not be used like this with newer versions of gcc.
|
||||
*/
|
||||
#define BUG() \
|
||||
do { \
|
||||
__asm__ __volatile__ ("clear.d [" __stringify(BUG_MAGIC) "]\n\t"\
|
||||
"movu.w " __stringify(__LINE__) ",$r0\n\t"\
|
||||
"jump 0f\n\t" \
|
||||
".section .rodata\n" \
|
||||
"0:\t.string \"" __FILE__ "\"\n\t" \
|
||||
".previous")
|
||||
".previous"); \
|
||||
unreachable(); \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
#else
|
||||
|
||||
/* This just causes an oops. */
|
||||
#define BUG() (*(int *)0 = 0)
|
||||
#define BUG() \
|
||||
do { \
|
||||
barrier_before_unreachable(); \
|
||||
__builtin_trap(); \
|
||||
} while (0)
|
||||
|
||||
#endif
|
||||
|
||||
|
|
|
@ -4,7 +4,11 @@
|
|||
|
||||
#ifdef CONFIG_BUG
|
||||
#define ia64_abort() __builtin_trap()
|
||||
#define BUG() do { printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); ia64_abort(); } while (0)
|
||||
#define BUG() do { \
|
||||
printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \
|
||||
barrier_before_unreachable(); \
|
||||
ia64_abort(); \
|
||||
} while (0)
|
||||
|
||||
/* should this BUG be made generic? */
|
||||
#define HAVE_ARCH_BUG
|
||||
|
|
|
@ -8,16 +8,19 @@
|
|||
#ifndef CONFIG_SUN3
|
||||
#define BUG() do { \
|
||||
pr_crit("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \
|
||||
barrier_before_unreachable(); \
|
||||
__builtin_trap(); \
|
||||
} while (0)
|
||||
#else
|
||||
#define BUG() do { \
|
||||
pr_crit("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \
|
||||
barrier_before_unreachable(); \
|
||||
panic("BUG!"); \
|
||||
} while (0)
|
||||
#endif
|
||||
#else
|
||||
#define BUG() do { \
|
||||
barrier_before_unreachable(); \
|
||||
__builtin_trap(); \
|
||||
} while (0)
|
||||
#endif
|
||||
|
|
|
@ -9,10 +9,14 @@
|
|||
void do_BUG(const char *file, int line);
|
||||
#define BUG() do { \
|
||||
do_BUG(__FILE__, __LINE__); \
|
||||
barrier_before_unreachable(); \
|
||||
__builtin_trap(); \
|
||||
} while (0)
|
||||
#else
|
||||
#define BUG() __builtin_trap()
|
||||
#define BUG() do { \
|
||||
barrier_before_unreachable(); \
|
||||
__builtin_trap(); \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
#define HAVE_ARCH_BUG
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include "blacklist.h"
|
||||
|
||||
const char __initdata *const blacklist_hashes[] = {
|
||||
const char __initconst *const blacklist_hashes[] = {
|
||||
NULL
|
||||
};
|
||||
|
|
|
@ -284,6 +284,10 @@ static int tmem_frontswap_store(unsigned type, pgoff_t offset,
|
|||
int pool = tmem_frontswap_poolid;
|
||||
int ret;
|
||||
|
||||
/* THP isn't supported */
|
||||
if (PageTransHuge(page))
|
||||
return -1;
|
||||
|
||||
if (pool < 0)
|
||||
return -1;
|
||||
if (ind64 != ind)
|
||||
|
|
|
@ -52,6 +52,7 @@ struct bug_entry {
|
|||
#ifndef HAVE_ARCH_BUG
|
||||
#define BUG() do { \
|
||||
printk("BUG: failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); \
|
||||
barrier_before_unreachable(); \
|
||||
panic("BUG!"); \
|
||||
} while (0)
|
||||
#endif
|
||||
|
|
|
@ -207,6 +207,15 @@
|
|||
#endif
|
||||
#endif
|
||||
|
||||
/*
|
||||
* calling noreturn functions, __builtin_unreachable() and __builtin_trap()
|
||||
* confuse the stack allocation in gcc, leading to overly large stack
|
||||
* frames, see https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82365
|
||||
*
|
||||
* Adding an empty inline assembly before it works around the problem
|
||||
*/
|
||||
#define barrier_before_unreachable() asm volatile("")
|
||||
|
||||
/*
|
||||
* Mark a position in code as unreachable. This can be used to
|
||||
* suppress control flow warnings after asm blocks that transfer
|
||||
|
@ -217,7 +226,11 @@
|
|||
* unreleased. Really, we need to have autoconf for the kernel.
|
||||
*/
|
||||
#define unreachable() \
|
||||
do { annotate_unreachable(); __builtin_unreachable(); } while (0)
|
||||
do { \
|
||||
annotate_unreachable(); \
|
||||
barrier_before_unreachable(); \
|
||||
__builtin_unreachable(); \
|
||||
} while (0)
|
||||
|
||||
/* Mark a function definition as prohibited from being cloned. */
|
||||
#define __noclone __attribute__((__noclone__, __optimize__("no-tracer")))
|
||||
|
|
|
@ -86,6 +86,11 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
|
|||
# define barrier_data(ptr) barrier()
|
||||
#endif
|
||||
|
||||
/* workaround for GCC PR82365 if needed */
|
||||
#ifndef barrier_before_unreachable
|
||||
# define barrier_before_unreachable() do { } while (0)
|
||||
#endif
|
||||
|
||||
/* Unreachable code */
|
||||
#ifdef CONFIG_STACK_VALIDATION
|
||||
/*
|
||||
|
|
|
@ -4,6 +4,12 @@
|
|||
|
||||
#include <generated/autoconf.h>
|
||||
|
||||
#ifdef CONFIG_CPU_BIG_ENDIAN
|
||||
#define __BIG_ENDIAN 4321
|
||||
#else
|
||||
#define __LITTLE_ENDIAN 1234
|
||||
#endif
|
||||
|
||||
#define __ARG_PLACEHOLDER_1 0,
|
||||
#define __take_second_arg(__ignored, val, ...) val
|
||||
|
||||
|
|
|
@ -523,9 +523,11 @@ static inline void __mod_memcg_state(struct mem_cgroup *memcg,
|
|||
static inline void mod_memcg_state(struct mem_cgroup *memcg,
|
||||
int idx, int val)
|
||||
{
|
||||
preempt_disable();
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
__mod_memcg_state(memcg, idx, val);
|
||||
preempt_enable();
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -606,9 +608,11 @@ static inline void __mod_lruvec_state(struct lruvec *lruvec,
|
|||
static inline void mod_lruvec_state(struct lruvec *lruvec,
|
||||
enum node_stat_item idx, int val)
|
||||
{
|
||||
preempt_disable();
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
__mod_lruvec_state(lruvec, idx, val);
|
||||
preempt_enable();
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static inline void __mod_lruvec_page_state(struct page *page,
|
||||
|
@ -630,9 +634,11 @@ static inline void __mod_lruvec_page_state(struct page *page,
|
|||
static inline void mod_lruvec_page_state(struct page *page,
|
||||
enum node_stat_item idx, int val)
|
||||
{
|
||||
preempt_disable();
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
__mod_lruvec_page_state(page, idx, val);
|
||||
preempt_enable();
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
|
||||
|
@ -659,9 +665,11 @@ static inline void __count_memcg_events(struct mem_cgroup *memcg,
|
|||
static inline void count_memcg_events(struct mem_cgroup *memcg,
|
||||
int idx, unsigned long count)
|
||||
{
|
||||
preempt_disable();
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
__count_memcg_events(memcg, idx, count);
|
||||
preempt_enable();
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
/* idx can be of type enum memcg_event_item or vm_event_item */
|
||||
|
|
|
@ -36,7 +36,18 @@ static inline void mmgrab(struct mm_struct *mm)
|
|||
atomic_inc(&mm->mm_count);
|
||||
}
|
||||
|
||||
extern void mmdrop(struct mm_struct *mm);
|
||||
extern void __mmdrop(struct mm_struct *mm);
|
||||
|
||||
static inline void mmdrop(struct mm_struct *mm)
|
||||
{
|
||||
/*
|
||||
* The implicit full barrier implied by atomic_dec_and_test() is
|
||||
* required by the membarrier system call before returning to
|
||||
* user-space, after storing to rq->curr.
|
||||
*/
|
||||
if (unlikely(atomic_dec_and_test(&mm->mm_count)))
|
||||
__mmdrop(mm);
|
||||
}
|
||||
|
||||
/**
|
||||
* mmget() - Pin the address space associated with a &struct mm_struct.
|
||||
|
|
|
@ -337,8 +337,6 @@ extern void deactivate_file_page(struct page *page);
|
|||
extern void mark_page_lazyfree(struct page *page);
|
||||
extern void swap_setup(void);
|
||||
|
||||
extern void add_page_to_unevictable_list(struct page *page);
|
||||
|
||||
extern void lru_cache_add_active_or_unevictable(struct page *page,
|
||||
struct vm_area_struct *vma);
|
||||
|
||||
|
|
|
@ -592,7 +592,7 @@ static void check_mm(struct mm_struct *mm)
|
|||
* is dropped: either by a lazy thread or by
|
||||
* mmput. Free the page directory and the mm.
|
||||
*/
|
||||
static void __mmdrop(struct mm_struct *mm)
|
||||
void __mmdrop(struct mm_struct *mm)
|
||||
{
|
||||
BUG_ON(mm == &init_mm);
|
||||
mm_free_pgd(mm);
|
||||
|
@ -603,18 +603,7 @@ static void __mmdrop(struct mm_struct *mm)
|
|||
put_user_ns(mm->user_ns);
|
||||
free_mm(mm);
|
||||
}
|
||||
|
||||
void mmdrop(struct mm_struct *mm)
|
||||
{
|
||||
/*
|
||||
* The implicit full barrier implied by atomic_dec_and_test() is
|
||||
* required by the membarrier system call before returning to
|
||||
* user-space, after storing to rq->curr.
|
||||
*/
|
||||
if (unlikely(atomic_dec_and_test(&mm->mm_count)))
|
||||
__mmdrop(mm);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mmdrop);
|
||||
EXPORT_SYMBOL_GPL(__mmdrop);
|
||||
|
||||
static void mmdrop_async_fn(struct work_struct *work)
|
||||
{
|
||||
|
|
|
@ -163,7 +163,7 @@ static struct rchan_buf *relay_create_buf(struct rchan *chan)
|
|||
{
|
||||
struct rchan_buf *buf;
|
||||
|
||||
if (chan->n_subbufs > UINT_MAX / sizeof(size_t *))
|
||||
if (chan->n_subbufs > KMALLOC_MAX_SIZE / sizeof(size_t *))
|
||||
return NULL;
|
||||
|
||||
buf = kzalloc(sizeof(struct rchan_buf), GFP_KERNEL);
|
||||
|
|
|
@ -1642,6 +1642,7 @@ config DMA_API_DEBUG
|
|||
|
||||
menuconfig RUNTIME_TESTING_MENU
|
||||
bool "Runtime Testing"
|
||||
def_bool y
|
||||
|
||||
if RUNTIME_TESTING_MENU
|
||||
|
||||
|
|
|
@ -431,7 +431,6 @@ int ida_get_new_above(struct ida *ida, int start, int *id)
|
|||
bitmap = this_cpu_xchg(ida_bitmap, NULL);
|
||||
if (!bitmap)
|
||||
return -EAGAIN;
|
||||
memset(bitmap, 0, sizeof(*bitmap));
|
||||
bitmap->bitmap[0] = tmp >> RADIX_TREE_EXCEPTIONAL_SHIFT;
|
||||
rcu_assign_pointer(*slot, bitmap);
|
||||
}
|
||||
|
@ -464,7 +463,6 @@ int ida_get_new_above(struct ida *ida, int start, int *id)
|
|||
bitmap = this_cpu_xchg(ida_bitmap, NULL);
|
||||
if (!bitmap)
|
||||
return -EAGAIN;
|
||||
memset(bitmap, 0, sizeof(*bitmap));
|
||||
__set_bit(bit, bitmap->bitmap);
|
||||
radix_tree_iter_replace(root, &iter, slot, bitmap);
|
||||
}
|
||||
|
|
|
@ -2125,7 +2125,7 @@ int ida_pre_get(struct ida *ida, gfp_t gfp)
|
|||
preempt_enable();
|
||||
|
||||
if (!this_cpu_read(ida_bitmap)) {
|
||||
struct ida_bitmap *bitmap = kmalloc(sizeof(*bitmap), gfp);
|
||||
struct ida_bitmap *bitmap = kzalloc(sizeof(*bitmap), gfp);
|
||||
if (!bitmap)
|
||||
return 0;
|
||||
if (this_cpu_cmpxchg(ida_bitmap, NULL, bitmap))
|
||||
|
|
|
@ -64,6 +64,12 @@ void clear_page_mlock(struct page *page)
|
|||
mod_zone_page_state(page_zone(page), NR_MLOCK,
|
||||
-hpage_nr_pages(page));
|
||||
count_vm_event(UNEVICTABLE_PGCLEARED);
|
||||
/*
|
||||
* The previous TestClearPageMlocked() corresponds to the smp_mb()
|
||||
* in __pagevec_lru_add_fn().
|
||||
*
|
||||
* See __pagevec_lru_add_fn for more explanation.
|
||||
*/
|
||||
if (!isolate_lru_page(page)) {
|
||||
putback_lru_page(page);
|
||||
} else {
|
||||
|
|
|
@ -46,6 +46,7 @@
|
|||
#include <linux/stop_machine.h>
|
||||
#include <linux/sort.h>
|
||||
#include <linux/pfn.h>
|
||||
#include <xen/xen.h>
|
||||
#include <linux/backing-dev.h>
|
||||
#include <linux/fault-inject.h>
|
||||
#include <linux/page-isolation.h>
|
||||
|
@ -347,6 +348,9 @@ static inline bool update_defer_init(pg_data_t *pgdat,
|
|||
/* Always populate low zones for address-constrained allocations */
|
||||
if (zone_end < pgdat_end_pfn(pgdat))
|
||||
return true;
|
||||
/* Xen PV domains need page structures early */
|
||||
if (xen_pv_domain())
|
||||
return true;
|
||||
(*nr_initialised)++;
|
||||
if ((*nr_initialised > pgdat->static_init_pgcnt) &&
|
||||
(pfn & (PAGES_PER_SECTION - 1)) == 0) {
|
||||
|
|
84
mm/swap.c
84
mm/swap.c
|
@ -445,30 +445,6 @@ void lru_cache_add(struct page *page)
|
|||
__lru_cache_add(page);
|
||||
}
|
||||
|
||||
/**
|
||||
* add_page_to_unevictable_list - add a page to the unevictable list
|
||||
* @page: the page to be added to the unevictable list
|
||||
*
|
||||
* Add page directly to its zone's unevictable list. To avoid races with
|
||||
* tasks that might be making the page evictable, through eg. munlock,
|
||||
* munmap or exit, while it's not on the lru, we want to add the page
|
||||
* while it's locked or otherwise "invisible" to other tasks. This is
|
||||
* difficult to do when using the pagevec cache, so bypass that.
|
||||
*/
|
||||
void add_page_to_unevictable_list(struct page *page)
|
||||
{
|
||||
struct pglist_data *pgdat = page_pgdat(page);
|
||||
struct lruvec *lruvec;
|
||||
|
||||
spin_lock_irq(&pgdat->lru_lock);
|
||||
lruvec = mem_cgroup_page_lruvec(page, pgdat);
|
||||
ClearPageActive(page);
|
||||
SetPageUnevictable(page);
|
||||
SetPageLRU(page);
|
||||
add_page_to_lru_list(page, lruvec, LRU_UNEVICTABLE);
|
||||
spin_unlock_irq(&pgdat->lru_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* lru_cache_add_active_or_unevictable
|
||||
* @page: the page to be added to LRU
|
||||
|
@ -484,13 +460,9 @@ void lru_cache_add_active_or_unevictable(struct page *page,
|
|||
{
|
||||
VM_BUG_ON_PAGE(PageLRU(page), page);
|
||||
|
||||
if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED)) {
|
||||
if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED))
|
||||
SetPageActive(page);
|
||||
lru_cache_add(page);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!TestSetPageMlocked(page)) {
|
||||
else if (!TestSetPageMlocked(page)) {
|
||||
/*
|
||||
* We use the irq-unsafe __mod_zone_page_stat because this
|
||||
* counter is not modified from interrupt context, and the pte
|
||||
|
@ -500,7 +472,7 @@ void lru_cache_add_active_or_unevictable(struct page *page,
|
|||
hpage_nr_pages(page));
|
||||
count_vm_event(UNEVICTABLE_PGMLOCKED);
|
||||
}
|
||||
add_page_to_unevictable_list(page);
|
||||
lru_cache_add(page);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -886,15 +858,55 @@ void lru_add_page_tail(struct page *page, struct page *page_tail,
|
|||
static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec,
|
||||
void *arg)
|
||||
{
|
||||
int file = page_is_file_cache(page);
|
||||
int active = PageActive(page);
|
||||
enum lru_list lru = page_lru(page);
|
||||
enum lru_list lru;
|
||||
int was_unevictable = TestClearPageUnevictable(page);
|
||||
|
||||
VM_BUG_ON_PAGE(PageLRU(page), page);
|
||||
|
||||
SetPageLRU(page);
|
||||
/*
|
||||
* Page becomes evictable in two ways:
|
||||
* 1) Within LRU lock [munlock_vma_pages() and __munlock_pagevec()].
|
||||
* 2) Before acquiring LRU lock to put the page to correct LRU and then
|
||||
* a) do PageLRU check with lock [check_move_unevictable_pages]
|
||||
* b) do PageLRU check before lock [clear_page_mlock]
|
||||
*
|
||||
* (1) & (2a) are ok as LRU lock will serialize them. For (2b), we need
|
||||
* following strict ordering:
|
||||
*
|
||||
* #0: __pagevec_lru_add_fn #1: clear_page_mlock
|
||||
*
|
||||
* SetPageLRU() TestClearPageMlocked()
|
||||
* smp_mb() // explicit ordering // above provides strict
|
||||
* // ordering
|
||||
* PageMlocked() PageLRU()
|
||||
*
|
||||
*
|
||||
* if '#1' does not observe setting of PG_lru by '#0' and fails
|
||||
* isolation, the explicit barrier will make sure that page_evictable
|
||||
* check will put the page in correct LRU. Without smp_mb(), SetPageLRU
|
||||
* can be reordered after PageMlocked check and can make '#1' to fail
|
||||
* the isolation of the page whose Mlocked bit is cleared (#0 is also
|
||||
* looking at the same page) and the evictable page will be stranded
|
||||
* in an unevictable LRU.
|
||||
*/
|
||||
smp_mb();
|
||||
|
||||
if (page_evictable(page)) {
|
||||
lru = page_lru(page);
|
||||
update_page_reclaim_stat(lruvec, page_is_file_cache(page),
|
||||
PageActive(page));
|
||||
if (was_unevictable)
|
||||
count_vm_event(UNEVICTABLE_PGRESCUED);
|
||||
} else {
|
||||
lru = LRU_UNEVICTABLE;
|
||||
ClearPageActive(page);
|
||||
SetPageUnevictable(page);
|
||||
if (!was_unevictable)
|
||||
count_vm_event(UNEVICTABLE_PGCULLED);
|
||||
}
|
||||
|
||||
add_page_to_lru_list(page, lruvec, lru);
|
||||
update_page_reclaim_stat(lruvec, file, active);
|
||||
trace_mm_lru_insertion(page, lru);
|
||||
}
|
||||
|
||||
|
@ -913,7 +925,7 @@ EXPORT_SYMBOL(__pagevec_lru_add);
|
|||
* @pvec: Where the resulting entries are placed
|
||||
* @mapping: The address_space to search
|
||||
* @start: The starting entry index
|
||||
* @nr_pages: The maximum number of pages
|
||||
* @nr_entries: The maximum number of pages
|
||||
* @indices: The cache indices corresponding to the entries in @pvec
|
||||
*
|
||||
* pagevec_lookup_entries() will search for and return a group of up
|
||||
|
|
10
mm/vmalloc.c
10
mm/vmalloc.c
|
@ -1943,11 +1943,15 @@ void *vmalloc_exec(unsigned long size)
|
|||
}
|
||||
|
||||
#if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
|
||||
#define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL
|
||||
#define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
|
||||
#elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
|
||||
#define GFP_VMALLOC32 GFP_DMA | GFP_KERNEL
|
||||
#define GFP_VMALLOC32 (GFP_DMA | GFP_KERNEL)
|
||||
#else
|
||||
#define GFP_VMALLOC32 GFP_KERNEL
|
||||
/*
|
||||
* 64b systems should always have either DMA or DMA32 zones. For others
|
||||
* GFP_DMA32 should do the right thing and use the normal zone.
|
||||
*/
|
||||
#define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL
|
||||
#endif
|
||||
|
||||
/**
|
||||
|
|
59
mm/vmscan.c
59
mm/vmscan.c
|
@ -769,64 +769,7 @@ int remove_mapping(struct address_space *mapping, struct page *page)
|
|||
*/
|
||||
void putback_lru_page(struct page *page)
|
||||
{
|
||||
bool is_unevictable;
|
||||
int was_unevictable = PageUnevictable(page);
|
||||
|
||||
VM_BUG_ON_PAGE(PageLRU(page), page);
|
||||
|
||||
redo:
|
||||
ClearPageUnevictable(page);
|
||||
|
||||
if (page_evictable(page)) {
|
||||
/*
|
||||
* For evictable pages, we can use the cache.
|
||||
* In event of a race, worst case is we end up with an
|
||||
* unevictable page on [in]active list.
|
||||
* We know how to handle that.
|
||||
*/
|
||||
is_unevictable = false;
|
||||
lru_cache_add(page);
|
||||
} else {
|
||||
/*
|
||||
* Put unevictable pages directly on zone's unevictable
|
||||
* list.
|
||||
*/
|
||||
is_unevictable = true;
|
||||
add_page_to_unevictable_list(page);
|
||||
/*
|
||||
* When racing with an mlock or AS_UNEVICTABLE clearing
|
||||
* (page is unlocked) make sure that if the other thread
|
||||
* does not observe our setting of PG_lru and fails
|
||||
* isolation/check_move_unevictable_pages,
|
||||
* we see PG_mlocked/AS_UNEVICTABLE cleared below and move
|
||||
* the page back to the evictable list.
|
||||
*
|
||||
* The other side is TestClearPageMlocked() or shmem_lock().
|
||||
*/
|
||||
smp_mb();
|
||||
}
|
||||
|
||||
/*
|
||||
* page's status can change while we move it among lru. If an evictable
|
||||
* page is on unevictable list, it never be freed. To avoid that,
|
||||
* check after we added it to the list, again.
|
||||
*/
|
||||
if (is_unevictable && page_evictable(page)) {
|
||||
if (!isolate_lru_page(page)) {
|
||||
put_page(page);
|
||||
goto redo;
|
||||
}
|
||||
/* This means someone else dropped this page from LRU
|
||||
* So, it will be freed or putback to LRU again. There is
|
||||
* nothing to do here.
|
||||
*/
|
||||
}
|
||||
|
||||
if (was_unevictable && !is_unevictable)
|
||||
count_vm_event(UNEVICTABLE_PGRESCUED);
|
||||
else if (!was_unevictable && is_unevictable)
|
||||
count_vm_event(UNEVICTABLE_PGCULLED);
|
||||
|
||||
lru_cache_add(page);
|
||||
put_page(page); /* drop ref from isolate */
|
||||
}
|
||||
|
||||
|
|
|
@ -360,7 +360,7 @@ u64 zpool_get_total_size(struct zpool *zpool)
|
|||
|
||||
/**
|
||||
* zpool_evictable() - Test if zpool is potentially evictable
|
||||
* @pool The zpool to test
|
||||
* @zpool: The zpool to test
|
||||
*
|
||||
* Zpool is only potentially evictable when it's created with struct
|
||||
* zpool_ops.evict and its driver implements struct zpool_driver.shrink.
|
||||
|
|
|
@ -1007,6 +1007,12 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
|
|||
u8 *src, *dst;
|
||||
struct zswap_header zhdr = { .swpentry = swp_entry(type, offset) };
|
||||
|
||||
/* THP isn't supported */
|
||||
if (PageTransHuge(page)) {
|
||||
ret = -EINVAL;
|
||||
goto reject;
|
||||
}
|
||||
|
||||
if (!zswap_enabled || !tree) {
|
||||
ret = -ENODEV;
|
||||
goto reject;
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
# Makefile for cgroup tools
|
||||
|
||||
CC = $(CROSS_COMPILE)gcc
|
||||
CFLAGS = -Wall -Wextra
|
||||
|
||||
all: cgroup_event_listener
|
||||
|
|
|
@ -12,8 +12,6 @@ endif
|
|||
# (this improves performance and avoids hard-to-debug behaviour);
|
||||
MAKEFLAGS += -r
|
||||
|
||||
CC = $(CROSS_COMPILE)gcc
|
||||
LD = $(CROSS_COMPILE)ld
|
||||
CFLAGS += -O2 -Wall -g -D_GNU_SOURCE -I$(OUTPUT)include
|
||||
|
||||
ALL_TARGETS := lsgpio gpio-hammer gpio-event-mon
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
# Makefile for Hyper-V tools
|
||||
|
||||
CC = $(CROSS_COMPILE)gcc
|
||||
WARNINGS = -Wall -Wextra
|
||||
CFLAGS = $(WARNINGS) -g $(shell getconf LFS_CFLAGS)
|
||||
|
||||
|
|
|
@ -12,8 +12,6 @@ endif
|
|||
# (this improves performance and avoids hard-to-debug behaviour);
|
||||
MAKEFLAGS += -r
|
||||
|
||||
CC = $(CROSS_COMPILE)gcc
|
||||
LD = $(CROSS_COMPILE)ld
|
||||
CFLAGS += -O2 -Wall -g -D_GNU_SOURCE -I$(OUTPUT)include
|
||||
|
||||
ALL_TARGETS := iio_event_monitor lsiio iio_generic_buffer
|
||||
|
|
|
@ -2,7 +2,6 @@
|
|||
PREFIX ?= /usr
|
||||
SBINDIR ?= sbin
|
||||
INSTALL ?= install
|
||||
CC = $(CROSS_COMPILE)gcc
|
||||
|
||||
TARGET = freefall
|
||||
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
# Makefile for LEDs tools
|
||||
|
||||
CC = $(CROSS_COMPILE)gcc
|
||||
CFLAGS = -Wall -Wextra -g -I../../include/uapi
|
||||
|
||||
all: uledmon led_hw_brightness_mon
|
||||
|
|
|
@ -146,12 +146,6 @@ define allow-override
|
|||
$(eval $(1) = $(2)))
|
||||
endef
|
||||
|
||||
# Allow setting CC and AR and LD, or setting CROSS_COMPILE as a prefix.
|
||||
$(call allow-override,CC,$(CROSS_COMPILE)gcc)
|
||||
$(call allow-override,AR,$(CROSS_COMPILE)ar)
|
||||
$(call allow-override,LD,$(CROSS_COMPILE)ld)
|
||||
$(call allow-override,CXX,$(CROSS_COMPILE)g++)
|
||||
|
||||
LD += $(EXTRA_LDFLAGS)
|
||||
|
||||
HOSTCC ?= gcc
|
||||
|
|
|
@ -56,9 +56,6 @@ INSTALL_SCRIPT = ${INSTALL_PROGRAM}
|
|||
# to compile vs uClibc, that can be done here as well.
|
||||
CROSS = #/usr/i386-linux-uclibc/usr/bin/i386-uclibc-
|
||||
CROSS_COMPILE ?= $(CROSS)
|
||||
CC = $(CROSS_COMPILE)gcc
|
||||
LD = $(CROSS_COMPILE)gcc
|
||||
STRIP = $(CROSS_COMPILE)strip
|
||||
HOSTCC = gcc
|
||||
|
||||
# check if compiler option is supported
|
||||
|
|
|
@ -42,6 +42,24 @@ EXTRA_WARNINGS += -Wformat
|
|||
|
||||
CC_NO_CLANG := $(shell $(CC) -dM -E -x c /dev/null | grep -Fq "__clang__"; echo $$?)
|
||||
|
||||
# Makefiles suck: This macro sets a default value of $(2) for the
|
||||
# variable named by $(1), unless the variable has been set by
|
||||
# environment or command line. This is necessary for CC and AR
|
||||
# because make sets default values, so the simpler ?= approach
|
||||
# won't work as expected.
|
||||
define allow-override
|
||||
$(if $(or $(findstring environment,$(origin $(1))),\
|
||||
$(findstring command line,$(origin $(1)))),,\
|
||||
$(eval $(1) = $(2)))
|
||||
endef
|
||||
|
||||
# Allow setting various cross-compile vars or setting CROSS_COMPILE as a prefix.
|
||||
$(call allow-override,CC,$(CROSS_COMPILE)gcc)
|
||||
$(call allow-override,AR,$(CROSS_COMPILE)ar)
|
||||
$(call allow-override,LD,$(CROSS_COMPILE)ld)
|
||||
$(call allow-override,CXX,$(CROSS_COMPILE)g++)
|
||||
$(call allow-override,STRIP,$(CROSS_COMPILE)strip)
|
||||
|
||||
ifeq ($(CC_NO_CLANG), 1)
|
||||
EXTRA_WARNINGS += -Wstrict-aliasing=3
|
||||
endif
|
||||
|
|
|
@ -11,8 +11,6 @@ endif
|
|||
# (this improves performance and avoids hard-to-debug behaviour);
|
||||
MAKEFLAGS += -r
|
||||
|
||||
CC = $(CROSS_COMPILE)gcc
|
||||
LD = $(CROSS_COMPILE)ld
|
||||
CFLAGS += -O2 -Wall -g -D_GNU_SOURCE -I$(OUTPUT)include
|
||||
|
||||
ALL_TARGETS := spidev_test spidev_fdx
|
||||
|
|
|
@ -5,6 +5,7 @@ CFLAGS += -I../../../../include/
|
|||
CFLAGS += -I../../../../usr/include/
|
||||
|
||||
TEST_PROGS := run_tests.sh
|
||||
TEST_FILES := run_fuse_test.sh
|
||||
TEST_GEN_FILES := memfd_test fuse_mnt fuse_test
|
||||
|
||||
fuse_mnt.o: CFLAGS += $(shell pkg-config fuse --cflags)
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
# Makefile for USB tools
|
||||
|
||||
CC = $(CROSS_COMPILE)gcc
|
||||
PTHREAD_LIBS = -lpthread
|
||||
WARNINGS = -Wall -Wextra
|
||||
CFLAGS = $(WARNINGS) -g -I../include
|
||||
|
|
|
@ -6,7 +6,6 @@ TARGETS=page-types slabinfo page_owner_sort
|
|||
LIB_DIR = ../lib/api
|
||||
LIBS = $(LIB_DIR)/libapi.a
|
||||
|
||||
CC = $(CROSS_COMPILE)gcc
|
||||
CFLAGS = -Wall -Wextra -I../lib/
|
||||
LDFLAGS = $(LIBS)
|
||||
|
||||
|
|
|
@ -2,7 +2,6 @@ PREFIX ?= /usr
|
|||
SBINDIR ?= sbin
|
||||
INSTALL ?= install
|
||||
CFLAGS += -D__EXPORTED_HEADERS__ -I../../include/uapi -I../../include
|
||||
CC = $(CROSS_COMPILE)gcc
|
||||
|
||||
TARGET = dell-smbios-example
|
||||
|
||||
|
|
Loading…
Reference in New Issue