mm: Extend gfp masking to the page allocator
The page allocator also needs the masking of gfp flags during boot, so this moves it out of slab/slub and uses it with the page allocator as well. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Acked-by: Pekka Enberg <penberg@cs.helsinki.fi> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
9729a6eb58
commit
dcce284a25
|
@ -99,7 +99,7 @@ struct vm_area_struct;
|
|||
__GFP_NORETRY|__GFP_NOMEMALLOC)
|
||||
|
||||
/* Control slab gfp mask during early boot */
|
||||
#define SLAB_GFP_BOOT_MASK __GFP_BITS_MASK & ~(__GFP_WAIT|__GFP_IO|__GFP_FS)
|
||||
#define GFP_BOOT_MASK __GFP_BITS_MASK & ~(__GFP_WAIT|__GFP_IO|__GFP_FS)
|
||||
|
||||
/* Control allocation constraints */
|
||||
#define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE)
|
||||
|
@ -348,4 +348,11 @@ static inline void oom_killer_enable(void)
|
|||
oom_killer_disabled = false;
|
||||
}
|
||||
|
||||
extern gfp_t gfp_allowed_mask;
|
||||
|
||||
static inline void set_gfp_allowed_mask(gfp_t mask)
|
||||
{
|
||||
gfp_allowed_mask = mask;
|
||||
}
|
||||
|
||||
#endif /* __LINUX_GFP_H */
|
||||
|
|
|
@ -642,6 +642,10 @@ asmlinkage void __init start_kernel(void)
|
|||
"enabled early\n");
|
||||
early_boot_irqs_on();
|
||||
local_irq_enable();
|
||||
|
||||
/* Interrupts are enabled now so all GFP allocations are safe. */
|
||||
set_gfp_allowed_mask(__GFP_BITS_MASK);
|
||||
|
||||
kmem_cache_init_late();
|
||||
|
||||
/*
|
||||
|
|
|
@ -73,6 +73,7 @@ unsigned long totalram_pages __read_mostly;
|
|||
unsigned long totalreserve_pages __read_mostly;
|
||||
unsigned long highest_memmap_pfn __read_mostly;
|
||||
int percpu_pagelist_fraction;
|
||||
gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
|
||||
|
||||
#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
|
||||
int pageblock_order __read_mostly;
|
||||
|
@ -1863,6 +1864,8 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
|
|||
struct page *page;
|
||||
int migratetype = allocflags_to_migratetype(gfp_mask);
|
||||
|
||||
gfp_mask &= gfp_allowed_mask;
|
||||
|
||||
lockdep_trace_alloc(gfp_mask);
|
||||
|
||||
might_sleep_if(gfp_mask & __GFP_WAIT);
|
||||
|
|
15
mm/slab.c
15
mm/slab.c
|
@ -304,12 +304,6 @@ struct kmem_list3 {
|
|||
int free_touched; /* updated without locking */
|
||||
};
|
||||
|
||||
/*
|
||||
* The slab allocator is initialized with interrupts disabled. Therefore, make
|
||||
* sure early boot allocations don't accidentally enable interrupts.
|
||||
*/
|
||||
static gfp_t slab_gfp_mask __read_mostly = SLAB_GFP_BOOT_MASK;
|
||||
|
||||
/*
|
||||
* Need this for bootstrapping a per node allocator.
|
||||
*/
|
||||
|
@ -1559,11 +1553,6 @@ void __init kmem_cache_init_late(void)
|
|||
{
|
||||
struct kmem_cache *cachep;
|
||||
|
||||
/*
|
||||
* Interrupts are enabled now so all GFP allocations are safe.
|
||||
*/
|
||||
slab_gfp_mask = __GFP_BITS_MASK;
|
||||
|
||||
/* 6) resize the head arrays to their final sizes */
|
||||
mutex_lock(&cache_chain_mutex);
|
||||
list_for_each_entry(cachep, &cache_chain, next)
|
||||
|
@ -3307,7 +3296,7 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
|
|||
unsigned long save_flags;
|
||||
void *ptr;
|
||||
|
||||
flags &= slab_gfp_mask;
|
||||
flags &= gfp_allowed_mask;
|
||||
|
||||
lockdep_trace_alloc(flags);
|
||||
|
||||
|
@ -3392,7 +3381,7 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
|
|||
unsigned long save_flags;
|
||||
void *objp;
|
||||
|
||||
flags &= slab_gfp_mask;
|
||||
flags &= gfp_allowed_mask;
|
||||
|
||||
lockdep_trace_alloc(flags);
|
||||
|
||||
|
|
12
mm/slub.c
12
mm/slub.c
|
@ -179,12 +179,6 @@ static enum {
|
|||
SYSFS /* Sysfs up */
|
||||
} slab_state = DOWN;
|
||||
|
||||
/*
|
||||
* The slab allocator is initialized with interrupts disabled. Therefore, make
|
||||
* sure early boot allocations don't accidentally enable interrupts.
|
||||
*/
|
||||
static gfp_t slab_gfp_mask __read_mostly = SLAB_GFP_BOOT_MASK;
|
||||
|
||||
/* A list of all slab caches on the system */
|
||||
static DECLARE_RWSEM(slub_lock);
|
||||
static LIST_HEAD(slab_caches);
|
||||
|
@ -1692,7 +1686,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
|
|||
unsigned long flags;
|
||||
unsigned int objsize;
|
||||
|
||||
gfpflags &= slab_gfp_mask;
|
||||
gfpflags &= gfp_allowed_mask;
|
||||
|
||||
lockdep_trace_alloc(gfpflags);
|
||||
might_sleep_if(gfpflags & __GFP_WAIT);
|
||||
|
@ -3220,10 +3214,6 @@ void __init kmem_cache_init(void)
|
|||
|
||||
void __init kmem_cache_init_late(void)
|
||||
{
|
||||
/*
|
||||
* Interrupts are enabled now so all GFP allocations are safe.
|
||||
*/
|
||||
slab_gfp_mask = __GFP_BITS_MASK;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
Loading…
Reference in New Issue