mm: NUMA aware alloc_thread_info_node()
Add a node parameter to alloc_thread_info(), and change its name to alloc_thread_info_node() This change is needed to allow NUMA aware kthread_create_on_cpu() Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Acked-by: David S. Miller <davem@davemloft.net> Reviewed-by: Andi Kleen <ak@linux.intel.com> Acked-by: Rusty Russell <rusty@rustcorp.com.au> Cc: Tejun Heo <tj@kernel.org> Cc: Tony Luck <tony.luck@intel.com> Cc: Fenghua Yu <fenghua.yu@intel.com> Cc: David Howells <dhowells@redhat.com> Cc: <linux-arch@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
504f52b543
commit
b6a84016bd
|
@ -68,7 +68,7 @@ struct thread_info {
|
||||||
#define init_thread_info (init_thread_union.thread_info)
|
#define init_thread_info (init_thread_union.thread_info)
|
||||||
|
|
||||||
/* thread information allocation */
|
/* thread information allocation */
|
||||||
#define alloc_thread_info(tsk) ((struct thread_info *) __get_free_pages(GFP_KERNEL,1))
|
#define alloc_thread_info(tsk, node) ((struct thread_info *) __get_free_pages(GFP_KERNEL,1))
|
||||||
#define free_thread_info(ti) free_pages((unsigned long) (ti), 1)
|
#define free_thread_info(ti) free_pages((unsigned long) (ti), 1)
|
||||||
|
|
||||||
#endif /* !__ASSEMBLY__ */
|
#endif /* !__ASSEMBLY__ */
|
||||||
|
|
|
@ -84,16 +84,11 @@ register struct thread_info *__current_thread_info asm("gr15");
|
||||||
|
|
||||||
/* thread information allocation */
|
/* thread information allocation */
|
||||||
#ifdef CONFIG_DEBUG_STACK_USAGE
|
#ifdef CONFIG_DEBUG_STACK_USAGE
|
||||||
#define alloc_thread_info(tsk) \
|
#define alloc_thread_info_node(tsk, node) \
|
||||||
({ \
|
kzalloc_node(THREAD_SIZE, GFP_KERNEL, node)
|
||||||
struct thread_info *ret; \
|
|
||||||
\
|
|
||||||
ret = kzalloc(THREAD_SIZE, GFP_KERNEL); \
|
|
||||||
\
|
|
||||||
ret; \
|
|
||||||
})
|
|
||||||
#else
|
#else
|
||||||
#define alloc_thread_info(tsk) kmalloc(THREAD_SIZE, GFP_KERNEL)
|
#define alloc_thread_info_node(tsk) \
|
||||||
|
kmalloc_node(THREAD_SIZE, GFP_KERNEL, node)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define free_thread_info(info) kfree(info)
|
#define free_thread_info(info) kfree(info)
|
||||||
|
|
|
@ -59,11 +59,12 @@ struct thread_info {
|
||||||
#ifndef ASM_OFFSETS_C
|
#ifndef ASM_OFFSETS_C
|
||||||
/* how to get the thread information struct from C */
|
/* how to get the thread information struct from C */
|
||||||
#define current_thread_info() ((struct thread_info *) ((char *) current + IA64_TASK_SIZE))
|
#define current_thread_info() ((struct thread_info *) ((char *) current + IA64_TASK_SIZE))
|
||||||
#define alloc_thread_info(tsk) ((struct thread_info *) ((char *) (tsk) + IA64_TASK_SIZE))
|
#define alloc_thread_info_node(tsk, node) \
|
||||||
|
((struct thread_info *) ((char *) (tsk) + IA64_TASK_SIZE))
|
||||||
#define task_thread_info(tsk) ((struct thread_info *) ((char *) (tsk) + IA64_TASK_SIZE))
|
#define task_thread_info(tsk) ((struct thread_info *) ((char *) (tsk) + IA64_TASK_SIZE))
|
||||||
#else
|
#else
|
||||||
#define current_thread_info() ((struct thread_info *) 0)
|
#define current_thread_info() ((struct thread_info *) 0)
|
||||||
#define alloc_thread_info(tsk) ((struct thread_info *) 0)
|
#define alloc_thread_info_node(tsk, node) ((struct thread_info *) 0)
|
||||||
#define task_thread_info(tsk) ((struct thread_info *) 0)
|
#define task_thread_info(tsk) ((struct thread_info *) 0)
|
||||||
#endif
|
#endif
|
||||||
#define free_thread_info(ti) /* nothing */
|
#define free_thread_info(ti) /* nothing */
|
||||||
|
|
|
@ -96,16 +96,11 @@ static inline struct thread_info *current_thread_info(void)
|
||||||
|
|
||||||
/* thread information allocation */
|
/* thread information allocation */
|
||||||
#ifdef CONFIG_DEBUG_STACK_USAGE
|
#ifdef CONFIG_DEBUG_STACK_USAGE
|
||||||
#define alloc_thread_info(tsk) \
|
#define alloc_thread_info_node(tsk, node) \
|
||||||
({ \
|
kzalloc_node(THREAD_SIZE, GFP_KERNEL, node)
|
||||||
struct thread_info *ret; \
|
|
||||||
\
|
|
||||||
ret = kzalloc(THREAD_SIZE, GFP_KERNEL); \
|
|
||||||
\
|
|
||||||
ret; \
|
|
||||||
})
|
|
||||||
#else
|
#else
|
||||||
#define alloc_thread_info(tsk) kmalloc(THREAD_SIZE, GFP_KERNEL)
|
#define alloc_thread_info_node(tsk, node) \
|
||||||
|
kmalloc_node(THREAD_SIZE, GFP_KERNEL, node)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define free_thread_info(info) kfree(info)
|
#define free_thread_info(info) kfree(info)
|
||||||
|
|
|
@ -88,9 +88,11 @@ register struct thread_info *__current_thread_info __asm__("$28");
|
||||||
#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
|
#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_STACK_USAGE
|
#ifdef CONFIG_DEBUG_STACK_USAGE
|
||||||
#define alloc_thread_info(tsk) kzalloc(THREAD_SIZE, GFP_KERNEL)
|
#define alloc_thread_info_node(tsk, node) \
|
||||||
|
kzalloc_node(THREAD_SIZE, GFP_KERNEL, node)
|
||||||
#else
|
#else
|
||||||
#define alloc_thread_info(tsk) kmalloc(THREAD_SIZE, GFP_KERNEL)
|
#define alloc_thread_info_node(tsk, node) \
|
||||||
|
kmalloc_node(THREAD_SIZE, GFP_KERNEL, node)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define free_thread_info(info) kfree(info)
|
#define free_thread_info(info) kfree(info)
|
||||||
|
|
|
@ -124,9 +124,11 @@ static inline unsigned long current_stack_pointer(void)
|
||||||
|
|
||||||
/* thread information allocation */
|
/* thread information allocation */
|
||||||
#ifdef CONFIG_DEBUG_STACK_USAGE
|
#ifdef CONFIG_DEBUG_STACK_USAGE
|
||||||
#define alloc_thread_info(tsk) kzalloc(THREAD_SIZE, GFP_KERNEL)
|
#define alloc_thread_info_node(tsk, node) \
|
||||||
|
kzalloc_node(THREAD_SIZE, GFP_KERNEL, node)
|
||||||
#else
|
#else
|
||||||
#define alloc_thread_info(tsk) kmalloc(THREAD_SIZE, GFP_KERNEL)
|
#define alloc_thread_info_node(tsk, node) \
|
||||||
|
kmalloc_node(THREAD_SIZE, GFP_KERNEL, node)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define free_thread_info(ti) kfree((ti))
|
#define free_thread_info(ti) kfree((ti))
|
||||||
|
|
|
@ -72,7 +72,7 @@ struct thread_info {
|
||||||
|
|
||||||
#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
|
#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
|
||||||
|
|
||||||
extern struct thread_info *alloc_thread_info(struct task_struct *tsk);
|
extern struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node);
|
||||||
extern void free_thread_info(struct thread_info *ti);
|
extern void free_thread_info(struct thread_info *ti);
|
||||||
|
|
||||||
#endif /* THREAD_SHIFT < PAGE_SHIFT */
|
#endif /* THREAD_SHIFT < PAGE_SHIFT */
|
||||||
|
|
|
@ -1218,11 +1218,11 @@ void __ppc64_runlatch_off(void)
|
||||||
|
|
||||||
static struct kmem_cache *thread_info_cache;
|
static struct kmem_cache *thread_info_cache;
|
||||||
|
|
||||||
struct thread_info *alloc_thread_info(struct task_struct *tsk)
|
struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node)
|
||||||
{
|
{
|
||||||
struct thread_info *ti;
|
struct thread_info *ti;
|
||||||
|
|
||||||
ti = kmem_cache_alloc(thread_info_cache, GFP_KERNEL);
|
ti = kmem_cache_alloc_node(thread_info_cache, GFP_KERNEL, node);
|
||||||
if (unlikely(ti == NULL))
|
if (unlikely(ti == NULL))
|
||||||
return NULL;
|
return NULL;
|
||||||
#ifdef CONFIG_DEBUG_STACK_USAGE
|
#ifdef CONFIG_DEBUG_STACK_USAGE
|
||||||
|
|
|
@ -71,7 +71,7 @@ struct thread_info {
|
||||||
register struct thread_info *__current_thread_info __asm__("r28");
|
register struct thread_info *__current_thread_info __asm__("r28");
|
||||||
#define current_thread_info() __current_thread_info
|
#define current_thread_info() __current_thread_info
|
||||||
|
|
||||||
#define alloc_thread_info(tsk) kmalloc(THREAD_SIZE, GFP_KERNEL)
|
#define alloc_thread_info_node(tsk, node) kmalloc_node(THREAD_SIZE, GFP_KERNEL, node)
|
||||||
#define free_thread_info(info) kfree(info)
|
#define free_thread_info(info) kfree(info)
|
||||||
|
|
||||||
#endif /* !__ASSEMBLY__ */
|
#endif /* !__ASSEMBLY__ */
|
||||||
|
|
|
@ -95,7 +95,7 @@ static inline struct thread_info *current_thread_info(void)
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
extern struct thread_info *alloc_thread_info(struct task_struct *tsk);
|
extern struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node);
|
||||||
extern void free_thread_info(struct thread_info *ti);
|
extern void free_thread_info(struct thread_info *ti);
|
||||||
extern void arch_task_cache_init(void);
|
extern void arch_task_cache_init(void);
|
||||||
#define arch_task_cache_init arch_task_cache_init
|
#define arch_task_cache_init arch_task_cache_init
|
||||||
|
|
|
@ -32,16 +32,16 @@ void free_thread_xstate(struct task_struct *tsk)
|
||||||
#if THREAD_SHIFT < PAGE_SHIFT
|
#if THREAD_SHIFT < PAGE_SHIFT
|
||||||
static struct kmem_cache *thread_info_cache;
|
static struct kmem_cache *thread_info_cache;
|
||||||
|
|
||||||
struct thread_info *alloc_thread_info(struct task_struct *tsk)
|
struct thread_info *alloc_thread_info(struct task_struct *tsk, int node)
|
||||||
{
|
{
|
||||||
struct thread_info *ti;
|
struct thread_info *ti;
|
||||||
|
|
||||||
ti = kmem_cache_alloc(thread_info_cache, GFP_KERNEL);
|
|
||||||
if (unlikely(ti == NULL))
|
|
||||||
return NULL;
|
|
||||||
#ifdef CONFIG_DEBUG_STACK_USAGE
|
#ifdef CONFIG_DEBUG_STACK_USAGE
|
||||||
memset(ti, 0, THREAD_SIZE);
|
gfp_t mask = GFP_KERNEL | __GFP_ZERO;
|
||||||
|
#else
|
||||||
|
gfp_t mask = GFP_KERNEL;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
ti = kmem_cache_alloc_node(thread_info_cache, mask, node);
|
||||||
return ti;
|
return ti;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -64,7 +64,9 @@ struct thread_info *alloc_thread_info(struct task_struct *tsk)
|
||||||
#else
|
#else
|
||||||
gfp_t mask = GFP_KERNEL;
|
gfp_t mask = GFP_KERNEL;
|
||||||
#endif
|
#endif
|
||||||
return (struct thread_info *)__get_free_pages(mask, THREAD_SIZE_ORDER);
|
struct page *page = alloc_pages_node(node, mask, THREAD_SIZE_ORDER);
|
||||||
|
|
||||||
|
return page ? page_address(page) : NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
void free_thread_info(struct thread_info *ti)
|
void free_thread_info(struct thread_info *ti)
|
||||||
|
|
|
@ -82,8 +82,8 @@ register struct thread_info *current_thread_info_reg asm("g6");
|
||||||
|
|
||||||
#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
|
#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
|
||||||
|
|
||||||
BTFIXUPDEF_CALL(struct thread_info *, alloc_thread_info, void)
|
BTFIXUPDEF_CALL(struct thread_info *, alloc_thread_info_node, int)
|
||||||
#define alloc_thread_info(tsk) BTFIXUP_CALL(alloc_thread_info)()
|
#define alloc_thread_info_node(tsk, node) BTFIXUP_CALL(alloc_thread_info_node)(node)
|
||||||
|
|
||||||
BTFIXUPDEF_CALL(void, free_thread_info, struct thread_info *)
|
BTFIXUPDEF_CALL(void, free_thread_info, struct thread_info *)
|
||||||
#define free_thread_info(ti) BTFIXUP_CALL(free_thread_info)(ti)
|
#define free_thread_info(ti) BTFIXUP_CALL(free_thread_info)(ti)
|
||||||
|
@ -92,7 +92,7 @@ BTFIXUPDEF_CALL(void, free_thread_info, struct thread_info *)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Size of kernel stack for each process.
|
* Size of kernel stack for each process.
|
||||||
* Observe the order of get_free_pages() in alloc_thread_info().
|
* Observe the order of get_free_pages() in alloc_thread_info_node().
|
||||||
* The sun4 has 8K stack too, because it's short on memory, and 16K is a waste.
|
* The sun4 has 8K stack too, because it's short on memory, and 16K is a waste.
|
||||||
*/
|
*/
|
||||||
#define THREAD_SIZE 8192
|
#define THREAD_SIZE 8192
|
||||||
|
|
|
@ -146,21 +146,21 @@ register struct thread_info *current_thread_info_reg asm("g6");
|
||||||
#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
|
#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_STACK_USAGE
|
#ifdef CONFIG_DEBUG_STACK_USAGE
|
||||||
#define alloc_thread_info(tsk) \
|
#define THREAD_FLAGS (GFP_KERNEL | __GFP_ZERO)
|
||||||
({ \
|
|
||||||
struct thread_info *ret; \
|
|
||||||
\
|
|
||||||
ret = (struct thread_info *) \
|
|
||||||
__get_free_pages(GFP_KERNEL, __THREAD_INFO_ORDER); \
|
|
||||||
if (ret) \
|
|
||||||
memset(ret, 0, PAGE_SIZE<<__THREAD_INFO_ORDER); \
|
|
||||||
ret; \
|
|
||||||
})
|
|
||||||
#else
|
#else
|
||||||
#define alloc_thread_info(tsk) \
|
#define THREAD_FLAGS (GFP_KERNEL)
|
||||||
((struct thread_info *)__get_free_pages(GFP_KERNEL, __THREAD_INFO_ORDER))
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#define alloc_thread_info_node(tsk, node) \
|
||||||
|
({ \
|
||||||
|
struct page *page = alloc_pages_node(node, THREAD_FLAGS, \
|
||||||
|
__THREAD_INFO_ORDER); \
|
||||||
|
struct thread_info *ret; \
|
||||||
|
\
|
||||||
|
ret = page ? page_address(page) : NULL; \
|
||||||
|
ret; \
|
||||||
|
})
|
||||||
|
|
||||||
#define free_thread_info(ti) \
|
#define free_thread_info(ti) \
|
||||||
free_pages((unsigned long)(ti),__THREAD_INFO_ORDER)
|
free_pages((unsigned long)(ti),__THREAD_INFO_ORDER)
|
||||||
|
|
||||||
|
|
|
@ -650,7 +650,7 @@ static void srmmu_unmapiorange(unsigned long virt_addr, unsigned int len)
|
||||||
* mappings on the kernel stack without any special code as we did
|
* mappings on the kernel stack without any special code as we did
|
||||||
* need on the sun4c.
|
* need on the sun4c.
|
||||||
*/
|
*/
|
||||||
static struct thread_info *srmmu_alloc_thread_info(void)
|
static struct thread_info *srmmu_alloc_thread_info_node(int node)
|
||||||
{
|
{
|
||||||
struct thread_info *ret;
|
struct thread_info *ret;
|
||||||
|
|
||||||
|
@ -2271,7 +2271,7 @@ void __init ld_mmu_srmmu(void)
|
||||||
|
|
||||||
BTFIXUPSET_CALL(mmu_info, srmmu_mmu_info, BTFIXUPCALL_NORM);
|
BTFIXUPSET_CALL(mmu_info, srmmu_mmu_info, BTFIXUPCALL_NORM);
|
||||||
|
|
||||||
BTFIXUPSET_CALL(alloc_thread_info, srmmu_alloc_thread_info, BTFIXUPCALL_NORM);
|
BTFIXUPSET_CALL(alloc_thread_info_node, srmmu_alloc_thread_info_node, BTFIXUPCALL_NORM);
|
||||||
BTFIXUPSET_CALL(free_thread_info, srmmu_free_thread_info, BTFIXUPCALL_NORM);
|
BTFIXUPSET_CALL(free_thread_info, srmmu_free_thread_info, BTFIXUPCALL_NORM);
|
||||||
|
|
||||||
BTFIXUPSET_CALL(pte_to_pgoff, srmmu_pte_to_pgoff, BTFIXUPCALL_NORM);
|
BTFIXUPSET_CALL(pte_to_pgoff, srmmu_pte_to_pgoff, BTFIXUPCALL_NORM);
|
||||||
|
|
|
@ -922,7 +922,7 @@ static inline void garbage_collect(int entry)
|
||||||
free_locked_segment(BUCKET_ADDR(entry));
|
free_locked_segment(BUCKET_ADDR(entry));
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct thread_info *sun4c_alloc_thread_info(void)
|
static struct thread_info *sun4c_alloc_thread_info_node(int node)
|
||||||
{
|
{
|
||||||
unsigned long addr, pages;
|
unsigned long addr, pages;
|
||||||
int entry;
|
int entry;
|
||||||
|
@ -2155,7 +2155,7 @@ void __init ld_mmu_sun4c(void)
|
||||||
BTFIXUPSET_CALL(__swp_offset, sun4c_swp_offset, BTFIXUPCALL_NORM);
|
BTFIXUPSET_CALL(__swp_offset, sun4c_swp_offset, BTFIXUPCALL_NORM);
|
||||||
BTFIXUPSET_CALL(__swp_entry, sun4c_swp_entry, BTFIXUPCALL_NORM);
|
BTFIXUPSET_CALL(__swp_entry, sun4c_swp_entry, BTFIXUPCALL_NORM);
|
||||||
|
|
||||||
BTFIXUPSET_CALL(alloc_thread_info, sun4c_alloc_thread_info, BTFIXUPCALL_NORM);
|
BTFIXUPSET_CALL(alloc_thread_info_node, sun4c_alloc_thread_info_node, BTFIXUPCALL_NORM);
|
||||||
BTFIXUPSET_CALL(free_thread_info, sun4c_free_thread_info, BTFIXUPCALL_NORM);
|
BTFIXUPSET_CALL(free_thread_info, sun4c_free_thread_info, BTFIXUPCALL_NORM);
|
||||||
|
|
||||||
BTFIXUPSET_CALL(mmu_info, sun4c_mmu_info, BTFIXUPCALL_NORM);
|
BTFIXUPSET_CALL(mmu_info, sun4c_mmu_info, BTFIXUPCALL_NORM);
|
||||||
|
|
|
@ -84,7 +84,7 @@ register unsigned long stack_pointer __asm__("sp");
|
||||||
((struct thread_info *)(stack_pointer & -THREAD_SIZE))
|
((struct thread_info *)(stack_pointer & -THREAD_SIZE))
|
||||||
|
|
||||||
#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
|
#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
|
||||||
extern struct thread_info *alloc_thread_info(struct task_struct *task);
|
extern struct thread_info *alloc_thread_info_node(struct task_struct *task, int node);
|
||||||
extern void free_thread_info(struct thread_info *info);
|
extern void free_thread_info(struct thread_info *info);
|
||||||
|
|
||||||
/* Sit on a nap instruction until interrupted. */
|
/* Sit on a nap instruction until interrupted. */
|
||||||
|
|
|
@ -109,7 +109,7 @@ void cpu_idle(void)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
struct thread_info *alloc_thread_info(struct task_struct *task)
|
struct thread_info *alloc_thread_info_node(struct task_struct *task, int node)
|
||||||
{
|
{
|
||||||
struct page *page;
|
struct page *page;
|
||||||
gfp_t flags = GFP_KERNEL;
|
gfp_t flags = GFP_KERNEL;
|
||||||
|
@ -118,7 +118,7 @@ struct thread_info *alloc_thread_info(struct task_struct *task)
|
||||||
flags |= __GFP_ZERO;
|
flags |= __GFP_ZERO;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
page = alloc_pages(flags, THREAD_SIZE_ORDER);
|
page = alloc_pages_node(node, flags, THREAD_SIZE_ORDER);
|
||||||
if (!page)
|
if (!page)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
|
|
@ -161,8 +161,14 @@ struct thread_info {
|
||||||
|
|
||||||
#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
|
#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
|
||||||
|
|
||||||
#define alloc_thread_info(tsk) \
|
#define alloc_thread_info_node(tsk, node) \
|
||||||
((struct thread_info *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER))
|
({ \
|
||||||
|
struct page *page = alloc_pages_node(node, THREAD_FLAGS, \
|
||||||
|
THREAD_ORDER); \
|
||||||
|
struct thread_info *ret = page ? page_address(page) : NULL; \
|
||||||
|
\
|
||||||
|
ret; \
|
||||||
|
})
|
||||||
|
|
||||||
#ifdef CONFIG_X86_32
|
#ifdef CONFIG_X86_32
|
||||||
|
|
||||||
|
|
|
@ -117,14 +117,17 @@ static struct kmem_cache *task_struct_cachep;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifndef __HAVE_ARCH_THREAD_INFO_ALLOCATOR
|
#ifndef __HAVE_ARCH_THREAD_INFO_ALLOCATOR
|
||||||
static inline struct thread_info *alloc_thread_info(struct task_struct *tsk)
|
static struct thread_info *alloc_thread_info_node(struct task_struct *tsk,
|
||||||
|
int node)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_DEBUG_STACK_USAGE
|
#ifdef CONFIG_DEBUG_STACK_USAGE
|
||||||
gfp_t mask = GFP_KERNEL | __GFP_ZERO;
|
gfp_t mask = GFP_KERNEL | __GFP_ZERO;
|
||||||
#else
|
#else
|
||||||
gfp_t mask = GFP_KERNEL;
|
gfp_t mask = GFP_KERNEL;
|
||||||
#endif
|
#endif
|
||||||
return (struct thread_info *)__get_free_pages(mask, THREAD_SIZE_ORDER);
|
struct page *page = alloc_pages_node(node, mask, THREAD_SIZE_ORDER);
|
||||||
|
|
||||||
|
return page ? page_address(page) : NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void free_thread_info(struct thread_info *ti)
|
static inline void free_thread_info(struct thread_info *ti)
|
||||||
|
@ -260,7 +263,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
|
||||||
if (!tsk)
|
if (!tsk)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
ti = alloc_thread_info(tsk);
|
ti = alloc_thread_info_node(tsk, node);
|
||||||
if (!ti) {
|
if (!ti) {
|
||||||
free_task_struct(tsk);
|
free_task_struct(tsk);
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
Loading…
Reference in New Issue