Merge git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched
* git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched: [PATCH] sched: implement cpu_clock(cpu) high-speed time source [PATCH] sched: fix the all pinned logic in load_balance_newidle() [PATCH] sched: fix newly idle load balance in case of SMT [PATCH] sched: sched_cacheflush is now unused
This commit is contained in:
commit
ff86303e30
|
@ -980,15 +980,6 @@ cpu_init (void)
|
||||||
pm_idle = default_idle;
|
pm_idle = default_idle;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* On SMP systems, when the scheduler does migration-cost autodetection,
|
|
||||||
* it needs a way to flush as much of the CPU's caches as possible.
|
|
||||||
*/
|
|
||||||
void sched_cacheflush(void)
|
|
||||||
{
|
|
||||||
ia64_sal_cache_flush(3);
|
|
||||||
}
|
|
||||||
|
|
||||||
void __init
|
void __init
|
||||||
check_bugs (void)
|
check_bugs (void)
|
||||||
{
|
{
|
||||||
|
|
|
@ -139,16 +139,6 @@ extern void halt(void) __attribute__((noreturn));
|
||||||
struct task_struct;
|
struct task_struct;
|
||||||
extern struct task_struct *alpha_switch_to(unsigned long, struct task_struct*);
|
extern struct task_struct *alpha_switch_to(unsigned long, struct task_struct*);
|
||||||
|
|
||||||
/*
|
|
||||||
* On SMP systems, when the scheduler does migration-cost autodetection,
|
|
||||||
* it needs a way to flush as much of the CPU's caches as possible.
|
|
||||||
*
|
|
||||||
* TODO: fill this in!
|
|
||||||
*/
|
|
||||||
static inline void sched_cacheflush(void)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
#define imb() \
|
#define imb() \
|
||||||
__asm__ __volatile__ ("call_pal %0 #imb" : : "i" (PAL_imb) : "memory")
|
__asm__ __volatile__ ("call_pal %0 #imb" : : "i" (PAL_imb) : "memory")
|
||||||
|
|
||||||
|
|
|
@ -254,16 +254,6 @@ do { \
|
||||||
last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \
|
last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
/*
|
|
||||||
* On SMP systems, when the scheduler does migration-cost autodetection,
|
|
||||||
* it needs a way to flush as much of the CPU's caches as possible.
|
|
||||||
*
|
|
||||||
* TODO: fill this in!
|
|
||||||
*/
|
|
||||||
static inline void sched_cacheflush(void)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
#if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110)
|
#if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110)
|
||||||
/*
|
/*
|
||||||
* On the StrongARM, "swp" is terminally broken since it bypasses the
|
* On the StrongARM, "swp" is terminally broken since it bypasses the
|
||||||
|
|
|
@ -109,16 +109,6 @@ do { \
|
||||||
last = __switch_to(prev,task_thread_info(prev),task_thread_info(next)); \
|
last = __switch_to(prev,task_thread_info(prev),task_thread_info(next)); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
/*
|
|
||||||
* On SMP systems, when the scheduler does migration-cost autodetection,
|
|
||||||
* it needs a way to flush as much of the CPU's caches as possible.
|
|
||||||
*
|
|
||||||
* TODO: fill this in!
|
|
||||||
*/
|
|
||||||
static inline void sched_cacheflush(void)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Save the current interrupt enable state & disable IRQs
|
* Save the current interrupt enable state & disable IRQs
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -310,15 +310,6 @@ void enable_hlt(void);
|
||||||
extern int es7000_plat;
|
extern int es7000_plat;
|
||||||
void cpu_idle_wait(void);
|
void cpu_idle_wait(void);
|
||||||
|
|
||||||
/*
|
|
||||||
* On SMP systems, when the scheduler does migration-cost autodetection,
|
|
||||||
* it needs a way to flush as much of the CPU's caches as possible:
|
|
||||||
*/
|
|
||||||
static inline void sched_cacheflush(void)
|
|
||||||
{
|
|
||||||
wbinvd();
|
|
||||||
}
|
|
||||||
|
|
||||||
extern unsigned long arch_align_stack(unsigned long sp);
|
extern unsigned long arch_align_stack(unsigned long sp);
|
||||||
extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
|
extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
|
||||||
|
|
||||||
|
|
|
@ -259,7 +259,6 @@ extern void ia64_load_extra (struct task_struct *task);
|
||||||
#define ia64_platform_is(x) (strcmp(x, platform_name) == 0)
|
#define ia64_platform_is(x) (strcmp(x, platform_name) == 0)
|
||||||
|
|
||||||
void cpu_idle_wait(void);
|
void cpu_idle_wait(void);
|
||||||
void sched_cacheflush(void);
|
|
||||||
|
|
||||||
#define arch_align_stack(x) (x)
|
#define arch_align_stack(x) (x)
|
||||||
|
|
||||||
|
|
|
@ -54,16 +54,6 @@
|
||||||
); \
|
); \
|
||||||
} while(0)
|
} while(0)
|
||||||
|
|
||||||
/*
|
|
||||||
* On SMP systems, when the scheduler does migration-cost autodetection,
|
|
||||||
* it needs a way to flush as much of the CPU's caches as possible.
|
|
||||||
*
|
|
||||||
* TODO: fill this in!
|
|
||||||
*/
|
|
||||||
static inline void sched_cacheflush(void)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Interrupt Control */
|
/* Interrupt Control */
|
||||||
#if !defined(CONFIG_CHIP_M32102) && !defined(CONFIG_CHIP_M32104)
|
#if !defined(CONFIG_CHIP_M32102) && !defined(CONFIG_CHIP_M32104)
|
||||||
#define local_irq_enable() \
|
#define local_irq_enable() \
|
||||||
|
|
|
@ -71,16 +71,6 @@ do { \
|
||||||
write_c0_userlocal(task_thread_info(current)->tp_value);\
|
write_c0_userlocal(task_thread_info(current)->tp_value);\
|
||||||
} while(0)
|
} while(0)
|
||||||
|
|
||||||
/*
|
|
||||||
* On SMP systems, when the scheduler does migration-cost autodetection,
|
|
||||||
* it needs a way to flush as much of the CPU's caches as possible.
|
|
||||||
*
|
|
||||||
* TODO: fill this in!
|
|
||||||
*/
|
|
||||||
static inline void sched_cacheflush(void)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
|
static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
|
||||||
{
|
{
|
||||||
__u32 retval;
|
__u32 retval;
|
||||||
|
|
|
@ -48,17 +48,6 @@ extern struct task_struct *_switch_to(struct task_struct *, struct task_struct *
|
||||||
(last) = _switch_to(prev, next); \
|
(last) = _switch_to(prev, next); \
|
||||||
} while(0)
|
} while(0)
|
||||||
|
|
||||||
/*
|
|
||||||
* On SMP systems, when the scheduler does migration-cost autodetection,
|
|
||||||
* it needs a way to flush as much of the CPU's caches as possible.
|
|
||||||
*
|
|
||||||
* TODO: fill this in!
|
|
||||||
*/
|
|
||||||
static inline void sched_cacheflush(void)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
/* interrupt control */
|
/* interrupt control */
|
||||||
#define local_save_flags(x) __asm__ __volatile__("ssm 0, %0" : "=r" (x) : : "memory")
|
#define local_save_flags(x) __asm__ __volatile__("ssm 0, %0" : "=r" (x) : : "memory")
|
||||||
#define local_irq_disable() __asm__ __volatile__("rsm %0,%%r0\n" : : "i" (PSW_I) : "memory" )
|
#define local_irq_disable() __asm__ __volatile__("rsm %0,%%r0\n" : : "i" (PSW_I) : "memory" )
|
||||||
|
|
|
@ -184,16 +184,6 @@ struct thread_struct;
|
||||||
extern struct task_struct *_switch(struct thread_struct *prev,
|
extern struct task_struct *_switch(struct thread_struct *prev,
|
||||||
struct thread_struct *next);
|
struct thread_struct *next);
|
||||||
|
|
||||||
/*
|
|
||||||
* On SMP systems, when the scheduler does migration-cost autodetection,
|
|
||||||
* it needs a way to flush as much of the CPU's caches as possible.
|
|
||||||
*
|
|
||||||
* TODO: fill this in!
|
|
||||||
*/
|
|
||||||
static inline void sched_cacheflush(void)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
extern unsigned int rtas_data;
|
extern unsigned int rtas_data;
|
||||||
extern int mem_init_done; /* set on boot once kmalloc can be called */
|
extern int mem_init_done; /* set on boot once kmalloc can be called */
|
||||||
extern unsigned long memory_limit;
|
extern unsigned long memory_limit;
|
||||||
|
|
|
@ -129,16 +129,6 @@ extern struct task_struct *__switch_to(struct task_struct *,
|
||||||
struct task_struct *);
|
struct task_struct *);
|
||||||
#define switch_to(prev, next, last) ((last) = __switch_to((prev), (next)))
|
#define switch_to(prev, next, last) ((last) = __switch_to((prev), (next)))
|
||||||
|
|
||||||
/*
|
|
||||||
* On SMP systems, when the scheduler does migration-cost autodetection,
|
|
||||||
* it needs a way to flush as much of the CPU's caches as possible.
|
|
||||||
*
|
|
||||||
* TODO: fill this in!
|
|
||||||
*/
|
|
||||||
static inline void sched_cacheflush(void)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
struct thread_struct;
|
struct thread_struct;
|
||||||
extern struct task_struct *_switch(struct thread_struct *prev,
|
extern struct task_struct *_switch(struct thread_struct *prev,
|
||||||
struct thread_struct *next);
|
struct thread_struct *next);
|
||||||
|
|
|
@ -97,16 +97,6 @@ static inline void restore_access_regs(unsigned int *acrs)
|
||||||
prev = __switch_to(prev,next); \
|
prev = __switch_to(prev,next); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
/*
|
|
||||||
* On SMP systems, when the scheduler does migration-cost autodetection,
|
|
||||||
* it needs a way to flush as much of the CPU's caches as possible.
|
|
||||||
*
|
|
||||||
* TODO: fill this in!
|
|
||||||
*/
|
|
||||||
static inline void sched_cacheflush(void)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
|
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
|
||||||
extern void account_vtime(struct task_struct *);
|
extern void account_vtime(struct task_struct *);
|
||||||
extern void account_tick_vtime(struct task_struct *);
|
extern void account_tick_vtime(struct task_struct *);
|
||||||
|
|
|
@ -64,16 +64,6 @@ struct task_struct *__switch_to(struct task_struct *prev,
|
||||||
last = __last; \
|
last = __last; \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
/*
|
|
||||||
* On SMP systems, when the scheduler does migration-cost autodetection,
|
|
||||||
* it needs a way to flush as much of the CPU's caches as possible.
|
|
||||||
*
|
|
||||||
* TODO: fill this in!
|
|
||||||
*/
|
|
||||||
static inline void sched_cacheflush(void)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef CONFIG_CPU_SH4A
|
#ifdef CONFIG_CPU_SH4A
|
||||||
#define __icbi() \
|
#define __icbi() \
|
||||||
{ \
|
{ \
|
||||||
|
|
|
@ -164,16 +164,6 @@ extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
|
||||||
"o0", "o1", "o2", "o3", "o7"); \
|
"o0", "o1", "o2", "o3", "o7"); \
|
||||||
} while(0)
|
} while(0)
|
||||||
|
|
||||||
/*
|
|
||||||
* On SMP systems, when the scheduler does migration-cost autodetection,
|
|
||||||
* it needs a way to flush as much of the CPU's caches as possible.
|
|
||||||
*
|
|
||||||
* TODO: fill this in!
|
|
||||||
*/
|
|
||||||
static inline void sched_cacheflush(void)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Changing the IRQ level on the Sparc.
|
* Changing the IRQ level on the Sparc.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -204,16 +204,6 @@ do { if (test_thread_flag(TIF_PERFCTR)) { \
|
||||||
} \
|
} \
|
||||||
} while(0)
|
} while(0)
|
||||||
|
|
||||||
/*
|
|
||||||
* On SMP systems, when the scheduler does migration-cost autodetection,
|
|
||||||
* it needs a way to flush as much of the CPU's caches as possible.
|
|
||||||
*
|
|
||||||
* TODO: fill this in!
|
|
||||||
*/
|
|
||||||
static inline void sched_cacheflush(void)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline unsigned long xchg32(__volatile__ unsigned int *m, unsigned int val)
|
static inline unsigned long xchg32(__volatile__ unsigned int *m, unsigned int val)
|
||||||
{
|
{
|
||||||
unsigned long tmp1, tmp2;
|
unsigned long tmp1, tmp2;
|
||||||
|
|
|
@ -111,15 +111,6 @@ static inline void write_cr4(unsigned long val)
|
||||||
#define wbinvd() \
|
#define wbinvd() \
|
||||||
__asm__ __volatile__ ("wbinvd": : :"memory");
|
__asm__ __volatile__ ("wbinvd": : :"memory");
|
||||||
|
|
||||||
/*
|
|
||||||
* On SMP systems, when the scheduler does migration-cost autodetection,
|
|
||||||
* it needs a way to flush as much of the CPU's caches as possible.
|
|
||||||
*/
|
|
||||||
static inline void sched_cacheflush(void)
|
|
||||||
{
|
|
||||||
wbinvd();
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* __KERNEL__ */
|
#endif /* __KERNEL__ */
|
||||||
|
|
||||||
#define nop() __asm__ __volatile__ ("nop")
|
#define nop() __asm__ __volatile__ ("nop")
|
||||||
|
|
|
@ -1348,6 +1348,13 @@ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
extern unsigned long long sched_clock(void);
|
extern unsigned long long sched_clock(void);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* For kernel-internal use: high-speed (but slightly incorrect) per-cpu
|
||||||
|
* clock constructed from sched_clock():
|
||||||
|
*/
|
||||||
|
extern unsigned long long cpu_clock(int cpu);
|
||||||
|
|
||||||
extern unsigned long long
|
extern unsigned long long
|
||||||
task_sched_runtime(struct task_struct *task);
|
task_sched_runtime(struct task_struct *task);
|
||||||
|
|
||||||
|
|
|
@ -379,6 +379,23 @@ static inline unsigned long long rq_clock(struct rq *rq)
|
||||||
#define task_rq(p) cpu_rq(task_cpu(p))
|
#define task_rq(p) cpu_rq(task_cpu(p))
|
||||||
#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
|
#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* For kernel-internal use: high-speed (but slightly incorrect) per-cpu
|
||||||
|
* clock constructed from sched_clock():
|
||||||
|
*/
|
||||||
|
unsigned long long cpu_clock(int cpu)
|
||||||
|
{
|
||||||
|
struct rq *rq = cpu_rq(cpu);
|
||||||
|
unsigned long long now;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
|
spin_lock_irqsave(&rq->lock, flags);
|
||||||
|
now = rq_clock(rq);
|
||||||
|
spin_unlock_irqrestore(&rq->lock, flags);
|
||||||
|
|
||||||
|
return now;
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||||
/* Change a task's ->cfs_rq if it moves across CPUs */
|
/* Change a task's ->cfs_rq if it moves across CPUs */
|
||||||
static inline void set_task_cfs_rq(struct task_struct *p)
|
static inline void set_task_cfs_rq(struct task_struct *p)
|
||||||
|
@ -2235,7 +2252,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
|
||||||
|
|
||||||
rq = cpu_rq(i);
|
rq = cpu_rq(i);
|
||||||
|
|
||||||
if (*sd_idle && !idle_cpu(i))
|
if (*sd_idle && rq->nr_running)
|
||||||
*sd_idle = 0;
|
*sd_idle = 0;
|
||||||
|
|
||||||
/* Bias balancing toward cpus of our domain */
|
/* Bias balancing toward cpus of our domain */
|
||||||
|
@ -2257,9 +2274,11 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
|
||||||
/*
|
/*
|
||||||
* First idle cpu or the first cpu(busiest) in this sched group
|
* First idle cpu or the first cpu(busiest) in this sched group
|
||||||
* is eligible for doing load balancing at this and above
|
* is eligible for doing load balancing at this and above
|
||||||
* domains.
|
* domains. In the newly idle case, we will allow all the cpu's
|
||||||
|
* to do the newly idle load balance.
|
||||||
*/
|
*/
|
||||||
if (local_group && balance_cpu != this_cpu && balance) {
|
if (idle != CPU_NEWLY_IDLE && local_group &&
|
||||||
|
balance_cpu != this_cpu && balance) {
|
||||||
*balance = 0;
|
*balance = 0;
|
||||||
goto ret;
|
goto ret;
|
||||||
}
|
}
|
||||||
|
@ -2677,6 +2696,7 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd)
|
||||||
unsigned long imbalance;
|
unsigned long imbalance;
|
||||||
int nr_moved = 0;
|
int nr_moved = 0;
|
||||||
int sd_idle = 0;
|
int sd_idle = 0;
|
||||||
|
int all_pinned = 0;
|
||||||
cpumask_t cpus = CPU_MASK_ALL;
|
cpumask_t cpus = CPU_MASK_ALL;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -2715,10 +2735,11 @@ redo:
|
||||||
double_lock_balance(this_rq, busiest);
|
double_lock_balance(this_rq, busiest);
|
||||||
nr_moved = move_tasks(this_rq, this_cpu, busiest,
|
nr_moved = move_tasks(this_rq, this_cpu, busiest,
|
||||||
minus_1_or_zero(busiest->nr_running),
|
minus_1_or_zero(busiest->nr_running),
|
||||||
imbalance, sd, CPU_NEWLY_IDLE, NULL);
|
imbalance, sd, CPU_NEWLY_IDLE,
|
||||||
|
&all_pinned);
|
||||||
spin_unlock(&busiest->lock);
|
spin_unlock(&busiest->lock);
|
||||||
|
|
||||||
if (!nr_moved) {
|
if (unlikely(all_pinned)) {
|
||||||
cpu_clear(cpu_of(busiest), cpus);
|
cpu_clear(cpu_of(busiest), cpus);
|
||||||
if (!cpus_empty(cpus))
|
if (!cpus_empty(cpus))
|
||||||
goto redo;
|
goto redo;
|
||||||
|
|
Loading…
Reference in New Issue