[PATCH] sched: add cacheflush() asm
Add per-arch sched_cacheflush() which is a write-back cacheflush used by the migration-cost calibration code at bootup time. Signed-off-by: Ingo Molnar <mingo@elte.hu> Cc: Nick Piggin <nickpiggin@yahoo.com.au> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
c6b44d10f2
commit
4dc7a0bbeb
|
@ -60,6 +60,7 @@
|
||||||
#include <asm/smp.h>
|
#include <asm/smp.h>
|
||||||
#include <asm/system.h>
|
#include <asm/system.h>
|
||||||
#include <asm/unistd.h>
|
#include <asm/unistd.h>
|
||||||
|
#include <asm/system.h>
|
||||||
|
|
||||||
#if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE)
|
#if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE)
|
||||||
# error "struct cpuinfo_ia64 too big!"
|
# error "struct cpuinfo_ia64 too big!"
|
||||||
|
@ -870,6 +871,15 @@ cpu_init (void)
|
||||||
pm_idle = default_idle;
|
pm_idle = default_idle;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* On SMP systems, when the scheduler does migration-cost autodetection,
|
||||||
|
* it needs a way to flush as much of the CPU's caches as possible.
|
||||||
|
*/
|
||||||
|
void sched_cacheflush(void)
|
||||||
|
{
|
||||||
|
ia64_sal_cache_flush(3);
|
||||||
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
check_bugs (void)
|
check_bugs (void)
|
||||||
{
|
{
|
||||||
|
|
|
@ -140,6 +140,16 @@ extern void halt(void) __attribute__((noreturn));
|
||||||
struct task_struct;
|
struct task_struct;
|
||||||
extern struct task_struct *alpha_switch_to(unsigned long, struct task_struct*);
|
extern struct task_struct *alpha_switch_to(unsigned long, struct task_struct*);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* On SMP systems, when the scheduler does migration-cost autodetection,
|
||||||
|
* it needs a way to flush as much of the CPU's caches as possible.
|
||||||
|
*
|
||||||
|
* TODO: fill this in!
|
||||||
|
*/
|
||||||
|
static inline void sched_cacheflush(void)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
#define imb() \
|
#define imb() \
|
||||||
__asm__ __volatile__ ("call_pal %0 #imb" : : "i" (PAL_imb) : "memory")
|
__asm__ __volatile__ ("call_pal %0 #imb" : : "i" (PAL_imb) : "memory")
|
||||||
|
|
||||||
|
|
|
@ -171,6 +171,16 @@ do { \
|
||||||
last = __switch_to(prev,prev->thread_info,next->thread_info); \
|
last = __switch_to(prev,prev->thread_info,next->thread_info); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* On SMP systems, when the scheduler does migration-cost autodetection,
|
||||||
|
* it needs a way to flush as much of the CPU's caches as possible.
|
||||||
|
*
|
||||||
|
* TODO: fill this in!
|
||||||
|
*/
|
||||||
|
static inline void sched_cacheflush(void)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* CPU interrupt mask handling.
|
* CPU interrupt mask handling.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -114,6 +114,16 @@ do { \
|
||||||
last = __switch_to(prev,prev->thread_info,next->thread_info); \
|
last = __switch_to(prev,prev->thread_info,next->thread_info); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* On SMP systems, when the scheduler does migration-cost autodetection,
|
||||||
|
* it needs a way to flush as much of the CPU's caches as possible.
|
||||||
|
*
|
||||||
|
* TODO: fill this in!
|
||||||
|
*/
|
||||||
|
static inline void sched_cacheflush(void)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Save the current interrupt enable state & disable IRQs
|
* Save the current interrupt enable state & disable IRQs
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -548,6 +548,15 @@ void enable_hlt(void);
|
||||||
extern int es7000_plat;
|
extern int es7000_plat;
|
||||||
void cpu_idle_wait(void);
|
void cpu_idle_wait(void);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* On SMP systems, when the scheduler does migration-cost autodetection,
|
||||||
|
* it needs a way to flush as much of the CPU's caches as possible:
|
||||||
|
*/
|
||||||
|
static inline void sched_cacheflush(void)
|
||||||
|
{
|
||||||
|
wbinvd();
|
||||||
|
}
|
||||||
|
|
||||||
extern unsigned long arch_align_stack(unsigned long sp);
|
extern unsigned long arch_align_stack(unsigned long sp);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -279,6 +279,7 @@ extern void ia64_load_extra (struct task_struct *task);
|
||||||
#define ia64_platform_is(x) (strcmp(x, platform_name) == 0)
|
#define ia64_platform_is(x) (strcmp(x, platform_name) == 0)
|
||||||
|
|
||||||
void cpu_idle_wait(void);
|
void cpu_idle_wait(void);
|
||||||
|
void sched_cacheflush(void);
|
||||||
|
|
||||||
#define arch_align_stack(x) (x)
|
#define arch_align_stack(x) (x)
|
||||||
|
|
||||||
|
|
|
@ -68,6 +68,16 @@
|
||||||
last = __last; \
|
last = __last; \
|
||||||
} while(0)
|
} while(0)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* On SMP systems, when the scheduler does migration-cost autodetection,
|
||||||
|
* it needs a way to flush as much of the CPU's caches as possible.
|
||||||
|
*
|
||||||
|
* TODO: fill this in!
|
||||||
|
*/
|
||||||
|
static inline void sched_cacheflush(void)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
/* Interrupt Control */
|
/* Interrupt Control */
|
||||||
#if !defined(CONFIG_CHIP_M32102) && !defined(CONFIG_CHIP_M32104)
|
#if !defined(CONFIG_CHIP_M32102) && !defined(CONFIG_CHIP_M32104)
|
||||||
#define local_irq_enable() \
|
#define local_irq_enable() \
|
||||||
|
|
|
@ -164,6 +164,16 @@ do { \
|
||||||
__restore_dsp(current); \
|
__restore_dsp(current); \
|
||||||
} while(0)
|
} while(0)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* On SMP systems, when the scheduler does migration-cost autodetection,
|
||||||
|
* it needs a way to flush as much of the CPU's caches as possible.
|
||||||
|
*
|
||||||
|
* TODO: fill this in!
|
||||||
|
*/
|
||||||
|
static inline void sched_cacheflush(void)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
|
static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
|
||||||
{
|
{
|
||||||
__u32 retval;
|
__u32 retval;
|
||||||
|
|
|
@ -49,6 +49,15 @@ extern struct task_struct *_switch_to(struct task_struct *, struct task_struct *
|
||||||
(last) = _switch_to(prev, next); \
|
(last) = _switch_to(prev, next); \
|
||||||
} while(0)
|
} while(0)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* On SMP systems, when the scheduler does migration-cost autodetection,
|
||||||
|
* it needs a way to flush as much of the CPU's caches as possible.
|
||||||
|
*
|
||||||
|
* TODO: fill this in!
|
||||||
|
*/
|
||||||
|
static inline void sched_cacheflush(void)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
/* interrupt control */
|
/* interrupt control */
|
||||||
|
|
|
@ -175,6 +175,16 @@ struct thread_struct;
|
||||||
extern struct task_struct *_switch(struct thread_struct *prev,
|
extern struct task_struct *_switch(struct thread_struct *prev,
|
||||||
struct thread_struct *next);
|
struct thread_struct *next);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* On SMP systems, when the scheduler does migration-cost autodetection,
|
||||||
|
* it needs a way to flush as much of the CPU's caches as possible.
|
||||||
|
*
|
||||||
|
* TODO: fill this in!
|
||||||
|
*/
|
||||||
|
static inline void sched_cacheflush(void)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
extern unsigned int rtas_data;
|
extern unsigned int rtas_data;
|
||||||
extern int mem_init_done; /* set on boot once kmalloc can be called */
|
extern int mem_init_done; /* set on boot once kmalloc can be called */
|
||||||
extern unsigned long memory_limit;
|
extern unsigned long memory_limit;
|
||||||
|
|
|
@ -123,6 +123,16 @@ extern struct task_struct *__switch_to(struct task_struct *,
|
||||||
struct task_struct *);
|
struct task_struct *);
|
||||||
#define switch_to(prev, next, last) ((last) = __switch_to((prev), (next)))
|
#define switch_to(prev, next, last) ((last) = __switch_to((prev), (next)))
|
||||||
|
|
||||||
|
/*
|
||||||
|
* On SMP systems, when the scheduler does migration-cost autodetection,
|
||||||
|
* it needs a way to flush as much of the CPU's caches as possible.
|
||||||
|
*
|
||||||
|
* TODO: fill this in!
|
||||||
|
*/
|
||||||
|
static inline void sched_cacheflush(void)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
struct thread_struct;
|
struct thread_struct;
|
||||||
extern struct task_struct *_switch(struct thread_struct *prev,
|
extern struct task_struct *_switch(struct thread_struct *prev,
|
||||||
struct thread_struct *next);
|
struct thread_struct *next);
|
||||||
|
|
|
@ -104,6 +104,16 @@ static inline void restore_access_regs(unsigned int *acrs)
|
||||||
prev = __switch_to(prev,next); \
|
prev = __switch_to(prev,next); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* On SMP systems, when the scheduler does migration-cost autodetection,
|
||||||
|
* it needs a way to flush as much of the CPU's caches as possible.
|
||||||
|
*
|
||||||
|
* TODO: fill this in!
|
||||||
|
*/
|
||||||
|
static inline void sched_cacheflush(void)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
|
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
|
||||||
extern void account_user_vtime(struct task_struct *);
|
extern void account_user_vtime(struct task_struct *);
|
||||||
extern void account_system_vtime(struct task_struct *);
|
extern void account_system_vtime(struct task_struct *);
|
||||||
|
|
|
@ -57,6 +57,16 @@
|
||||||
last = __last; \
|
last = __last; \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* On SMP systems, when the scheduler does migration-cost autodetection,
|
||||||
|
* it needs a way to flush as much of the CPU's caches as possible.
|
||||||
|
*
|
||||||
|
* TODO: fill this in!
|
||||||
|
*/
|
||||||
|
static inline void sched_cacheflush(void)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
#define nop() __asm__ __volatile__ ("nop")
|
#define nop() __asm__ __volatile__ ("nop")
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -165,6 +165,16 @@ extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
|
||||||
"o0", "o1", "o2", "o3", "o7"); \
|
"o0", "o1", "o2", "o3", "o7"); \
|
||||||
} while(0)
|
} while(0)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* On SMP systems, when the scheduler does migration-cost autodetection,
|
||||||
|
* it needs a way to flush as much of the CPU's caches as possible.
|
||||||
|
*
|
||||||
|
* TODO: fill this in!
|
||||||
|
*/
|
||||||
|
static inline void sched_cacheflush(void)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Changing the IRQ level on the Sparc.
|
* Changing the IRQ level on the Sparc.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -253,6 +253,16 @@ do { if (test_thread_flag(TIF_PERFCTR)) { \
|
||||||
} \
|
} \
|
||||||
} while(0)
|
} while(0)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* On SMP systems, when the scheduler does migration-cost autodetection,
|
||||||
|
* it needs a way to flush as much of the CPU's caches as possible.
|
||||||
|
*
|
||||||
|
* TODO: fill this in!
|
||||||
|
*/
|
||||||
|
static inline void sched_cacheflush(void)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
static inline unsigned long xchg32(__volatile__ unsigned int *m, unsigned int val)
|
static inline unsigned long xchg32(__volatile__ unsigned int *m, unsigned int val)
|
||||||
{
|
{
|
||||||
unsigned long tmp1, tmp2;
|
unsigned long tmp1, tmp2;
|
||||||
|
|
|
@ -193,6 +193,15 @@ static inline void write_cr4(unsigned long val)
|
||||||
#define wbinvd() \
|
#define wbinvd() \
|
||||||
__asm__ __volatile__ ("wbinvd": : :"memory");
|
__asm__ __volatile__ ("wbinvd": : :"memory");
|
||||||
|
|
||||||
|
/*
|
||||||
|
* On SMP systems, when the scheduler does migration-cost autodetection,
|
||||||
|
* it needs a way to flush as much of the CPU's caches as possible.
|
||||||
|
*/
|
||||||
|
static inline void sched_cacheflush(void)
|
||||||
|
{
|
||||||
|
wbinvd();
|
||||||
|
}
|
||||||
|
|
||||||
#endif /* __KERNEL__ */
|
#endif /* __KERNEL__ */
|
||||||
|
|
||||||
#define nop() __asm__ __volatile__ ("nop")
|
#define nop() __asm__ __volatile__ ("nop")
|
||||||
|
|
Loading…
Reference in New Issue