sh: Always provide thread_info allocators.
Presently the thread_info allocators are special cased, depending on THREAD_SHIFT < PAGE_SHIFT. This provides a sensible definition for them regardless of configuration, in preparation for extended CPU state. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
This commit is contained in:
parent
70e068eef9
commit
cbf6b1ba7a
|
@ -93,14 +93,12 @@ static inline struct thread_info *current_thread_info(void)
|
|||
|
||||
#define THREAD_SIZE_ORDER (THREAD_SHIFT - PAGE_SHIFT)
|
||||
|
||||
#else /* THREAD_SHIFT < PAGE_SHIFT */
|
||||
|
||||
#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
|
||||
#endif
|
||||
|
||||
extern struct thread_info *alloc_thread_info(struct task_struct *tsk);
|
||||
extern void free_thread_info(struct thread_info *ti);
|
||||
|
||||
#endif /* THREAD_SHIFT < PAGE_SHIFT */
|
||||
#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
|
|
|
@ -13,8 +13,8 @@ CFLAGS_REMOVE_return_address.o = -pg
|
|||
|
||||
obj-y := debugtraps.o dma-nommu.o dumpstack.o \
|
||||
idle.o io.o io_generic.o irq.o \
|
||||
irq_$(BITS).o machvec.o nmi_debug.o process_$(BITS).o \
|
||||
ptrace_$(BITS).o return_address.o \
|
||||
irq_$(BITS).o machvec.o nmi_debug.o process.o \
|
||||
process_$(BITS).o ptrace_$(BITS).o return_address.o \
|
||||
setup.o signal_$(BITS).o sys_sh.o sys_sh$(BITS).o \
|
||||
syscalls_$(BITS).o time.o topology.o traps.o \
|
||||
traps_$(BITS).o unwinder.o
|
||||
|
|
|
@ -0,0 +1,47 @@
|
|||
#include <linux/mm.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/sched.h>
|
||||
|
||||
#if THREAD_SHIFT < PAGE_SHIFT
|
||||
static struct kmem_cache *thread_info_cache;
|
||||
|
||||
struct thread_info *alloc_thread_info(struct task_struct *tsk)
|
||||
{
|
||||
struct thread_info *ti;
|
||||
|
||||
ti = kmem_cache_alloc(thread_info_cache, GFP_KERNEL);
|
||||
if (unlikely(ti == NULL))
|
||||
return NULL;
|
||||
#ifdef CONFIG_DEBUG_STACK_USAGE
|
||||
memset(ti, 0, THREAD_SIZE);
|
||||
#endif
|
||||
return ti;
|
||||
}
|
||||
|
||||
void free_thread_info(struct thread_info *ti)
|
||||
{
|
||||
kmem_cache_free(thread_info_cache, ti);
|
||||
}
|
||||
|
||||
void thread_info_cache_init(void)
|
||||
{
|
||||
thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
|
||||
THREAD_SIZE, 0, NULL);
|
||||
BUG_ON(thread_info_cache == NULL);
|
||||
}
|
||||
#else
|
||||
struct thread_info *alloc_thread_info(struct task_struct *tsk)
|
||||
{
|
||||
#ifdef CONFIG_DEBUG_STACK_USAGE
|
||||
gfp_t mask = GFP_KERNEL | __GFP_ZERO;
|
||||
#else
|
||||
gfp_t mask = GFP_KERNEL;
|
||||
#endif
|
||||
return (struct thread_info *)__get_free_pages(mask, THREAD_SIZE_ORDER);
|
||||
}
|
||||
|
||||
void free_thread_info(struct thread_info *ti)
|
||||
{
|
||||
free_pages((unsigned long)ti, THREAD_SIZE_ORDER);
|
||||
}
|
||||
#endif /* THREAD_SHIFT < PAGE_SHIFT */
|
|
@ -283,35 +283,6 @@ void free_initrd_mem(unsigned long start, unsigned long end)
|
|||
}
|
||||
#endif
|
||||
|
||||
#if THREAD_SHIFT < PAGE_SHIFT
|
||||
static struct kmem_cache *thread_info_cache;
|
||||
|
||||
struct thread_info *alloc_thread_info(struct task_struct *tsk)
|
||||
{
|
||||
struct thread_info *ti;
|
||||
|
||||
ti = kmem_cache_alloc(thread_info_cache, GFP_KERNEL);
|
||||
if (unlikely(ti == NULL))
|
||||
return NULL;
|
||||
#ifdef CONFIG_DEBUG_STACK_USAGE
|
||||
memset(ti, 0, THREAD_SIZE);
|
||||
#endif
|
||||
return ti;
|
||||
}
|
||||
|
||||
void free_thread_info(struct thread_info *ti)
|
||||
{
|
||||
kmem_cache_free(thread_info_cache, ti);
|
||||
}
|
||||
|
||||
void thread_info_cache_init(void)
|
||||
{
|
||||
thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
|
||||
THREAD_SIZE, 0, NULL);
|
||||
BUG_ON(thread_info_cache == NULL);
|
||||
}
|
||||
#endif /* THREAD_SHIFT < PAGE_SHIFT */
|
||||
|
||||
#ifdef CONFIG_MEMORY_HOTPLUG
|
||||
int arch_add_memory(int nid, u64 start, u64 size)
|
||||
{
|
||||
|
|
Loading…
Reference in New Issue