Merge git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched
* git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched: latencytop: change /proc task_struct access method latencytop: fix memory leak on latency proc file latencytop: fix kernel panic while reading latency proc file sched: add declaration of sched_tail to sched.h sched: fix signedness warnings in sched.c sched: clean up __pick_last_entity() a bit sched: remove duplicate code from sched_fair.c sched: make early bootup sched_clock() use safer
This commit is contained in:
commit
37c00b84d0
|
@ -128,8 +128,6 @@ void *get_current(void)
|
|||
return current;
|
||||
}
|
||||
|
||||
extern void schedule_tail(struct task_struct *prev);
|
||||
|
||||
/*
|
||||
* This is called magically, by its address being stuffed in a jmp_buf
|
||||
* and being longjmp-d to.
|
||||
|
|
|
@ -314,9 +314,12 @@ static int proc_pid_schedstat(struct task_struct *task, char *buffer)
|
|||
static int lstats_show_proc(struct seq_file *m, void *v)
|
||||
{
|
||||
int i;
|
||||
struct task_struct *task = m->private;
|
||||
seq_puts(m, "Latency Top version : v0.1\n");
|
||||
struct inode *inode = m->private;
|
||||
struct task_struct *task = get_proc_task(inode);
|
||||
|
||||
if (!task)
|
||||
return -ESRCH;
|
||||
seq_puts(m, "Latency Top version : v0.1\n");
|
||||
for (i = 0; i < 32; i++) {
|
||||
if (task->latency_record[i].backtrace[0]) {
|
||||
int q;
|
||||
|
@ -341,32 +344,24 @@ static int lstats_show_proc(struct seq_file *m, void *v)
|
|||
}
|
||||
|
||||
}
|
||||
put_task_struct(task);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int lstats_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
int ret;
|
||||
struct seq_file *m;
|
||||
struct task_struct *task = get_proc_task(inode);
|
||||
|
||||
ret = single_open(file, lstats_show_proc, NULL);
|
||||
if (!ret) {
|
||||
m = file->private_data;
|
||||
m->private = task;
|
||||
}
|
||||
return ret;
|
||||
return single_open(file, lstats_show_proc, inode);
|
||||
}
|
||||
|
||||
static ssize_t lstats_write(struct file *file, const char __user *buf,
|
||||
size_t count, loff_t *offs)
|
||||
{
|
||||
struct seq_file *m;
|
||||
struct task_struct *task;
|
||||
struct task_struct *task = get_proc_task(file->f_dentry->d_inode);
|
||||
|
||||
m = file->private_data;
|
||||
task = m->private;
|
||||
if (!task)
|
||||
return -ESRCH;
|
||||
clear_all_latency_tracing(task);
|
||||
put_task_struct(task);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
|
|
@ -242,6 +242,7 @@ struct task_struct;
|
|||
|
||||
extern void sched_init(void);
|
||||
extern void sched_init_smp(void);
|
||||
extern asmlinkage void schedule_tail(struct task_struct *prev);
|
||||
extern void init_idle(struct task_struct *idle, int cpu);
|
||||
extern void init_idle_bootup_task(struct task_struct *idle);
|
||||
|
||||
|
|
|
@ -668,6 +668,8 @@ const_debug unsigned int sysctl_sched_nr_migrate = 32;
|
|||
*/
|
||||
unsigned int sysctl_sched_rt_period = 1000000;
|
||||
|
||||
static __read_mostly int scheduler_running;
|
||||
|
||||
/*
|
||||
* part of the period that we allow rt tasks to run in us.
|
||||
* default: 0.95s
|
||||
|
@ -689,14 +691,16 @@ unsigned long long cpu_clock(int cpu)
|
|||
unsigned long flags;
|
||||
struct rq *rq;
|
||||
|
||||
local_irq_save(flags);
|
||||
rq = cpu_rq(cpu);
|
||||
/*
|
||||
* Only call sched_clock() if the scheduler has already been
|
||||
* initialized (some code might call cpu_clock() very early):
|
||||
*/
|
||||
if (rq->idle)
|
||||
update_rq_clock(rq);
|
||||
if (unlikely(!scheduler_running))
|
||||
return 0;
|
||||
|
||||
local_irq_save(flags);
|
||||
rq = cpu_rq(cpu);
|
||||
update_rq_clock(rq);
|
||||
now = rq->clock;
|
||||
local_irq_restore(flags);
|
||||
|
||||
|
@ -3885,7 +3889,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev)
|
|||
asmlinkage void __sched schedule(void)
|
||||
{
|
||||
struct task_struct *prev, *next;
|
||||
long *switch_count;
|
||||
unsigned long *switch_count;
|
||||
struct rq *rq;
|
||||
int cpu;
|
||||
|
||||
|
@ -7284,6 +7288,8 @@ void __init sched_init(void)
|
|||
* During early bootup we pretend to be a normal task:
|
||||
*/
|
||||
current->sched_class = &fair_sched_class;
|
||||
|
||||
scheduler_running = 1;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
|
||||
|
|
|
@ -202,17 +202,12 @@ static struct sched_entity *__pick_next_entity(struct cfs_rq *cfs_rq)
|
|||
|
||||
static inline struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
|
||||
{
|
||||
struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
|
||||
struct sched_entity *se = NULL;
|
||||
struct rb_node *parent;
|
||||
struct rb_node *last = rb_last(&cfs_rq->tasks_timeline);
|
||||
|
||||
while (*link) {
|
||||
parent = *link;
|
||||
se = rb_entry(parent, struct sched_entity, run_node);
|
||||
link = &parent->rb_right;
|
||||
}
|
||||
if (!last)
|
||||
return NULL;
|
||||
|
||||
return se;
|
||||
return rb_entry(last, struct sched_entity, run_node);
|
||||
}
|
||||
|
||||
/**************************************************************
|
||||
|
|
Loading…
Reference in New Issue