2005-04-17 06:20:36 +08:00
|
|
|
#ifndef _LINUX_KERNEL_STAT_H
|
|
|
|
#define _LINUX_KERNEL_STAT_H
|
|
|
|
|
|
|
|
#include <linux/smp.h>
|
|
|
|
#include <linux/threads.h>
|
|
|
|
#include <linux/percpu.h>
|
2005-11-07 16:59:29 +08:00
|
|
|
#include <linux/cpumask.h>
|
2009-06-18 07:25:52 +08:00
|
|
|
#include <linux/interrupt.h>
|
2008-03-27 05:11:31 +08:00
|
|
|
#include <asm/irq.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <asm/cputime.h>
|
|
|
|
|
|
|
|
/*
|
|
|
|
* 'kernel_stat.h' contains the definitions needed for doing
|
|
|
|
* some kernel statistics (CPU usage, context switches ...),
|
|
|
|
* used by rstatd/perfmeter
|
|
|
|
*/
|
|
|
|
|
|
|
|
struct cpu_usage_stat {
|
|
|
|
cputime64_t user;
|
|
|
|
cputime64_t nice;
|
|
|
|
cputime64_t system;
|
|
|
|
cputime64_t softirq;
|
|
|
|
cputime64_t irq;
|
|
|
|
cputime64_t idle;
|
|
|
|
cputime64_t iowait;
|
|
|
|
cputime64_t steal;
|
2007-10-15 23:00:19 +08:00
|
|
|
cputime64_t guest;
|
2009-10-24 00:20:10 +08:00
|
|
|
cputime64_t guest_nice;
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
struct kernel_stat {
|
|
|
|
struct cpu_usage_stat cpustat;
|
2009-01-08 07:03:13 +08:00
|
|
|
#ifndef CONFIG_GENERIC_HARDIRQS
|
2008-12-06 10:58:31 +08:00
|
|
|
unsigned int irqs[NR_IRQS];
|
|
|
|
#endif
|
2010-10-28 06:34:13 +08:00
|
|
|
unsigned long irqs_sum;
|
2009-06-18 07:25:52 +08:00
|
|
|
unsigned int softirqs[NR_SOFTIRQS];
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
DECLARE_PER_CPU(struct kernel_stat, kstat);
|
|
|
|
|
|
|
|
#define kstat_cpu(cpu) per_cpu(kstat, cpu)
|
|
|
|
/* Must have preemption disabled for this to be meaningful. */
|
|
|
|
#define kstat_this_cpu __get_cpu_var(kstat)
|
|
|
|
|
|
|
|
extern unsigned long long nr_context_switches(void);
|
|
|
|
|
2009-01-08 07:03:13 +08:00
|
|
|
#ifndef CONFIG_GENERIC_HARDIRQS
|
2008-12-06 10:58:31 +08:00
|
|
|
#define kstat_irqs_this_cpu(irq) \
|
2010-12-08 23:22:55 +08:00
|
|
|
(this_cpu_read(kstat.irqs[irq])
|
2008-12-06 10:58:31 +08:00
|
|
|
|
2008-10-15 21:27:23 +08:00
|
|
|
struct irq_desc;
|
2008-08-26 03:41:19 +08:00
|
|
|
|
2008-10-15 21:27:23 +08:00
|
|
|
static inline void kstat_incr_irqs_this_cpu(unsigned int irq,
|
|
|
|
struct irq_desc *desc)
|
|
|
|
{
|
|
|
|
kstat_this_cpu.irqs[irq]++;
|
2010-10-28 06:34:13 +08:00
|
|
|
kstat_this_cpu.irqs_sum++;
|
2008-10-15 21:27:23 +08:00
|
|
|
}
|
2008-08-26 03:41:19 +08:00
|
|
|
|
2008-08-20 11:50:09 +08:00
|
|
|
static inline unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
|
|
|
|
{
|
|
|
|
return kstat_cpu(cpu).irqs[irq];
|
|
|
|
}
|
2008-12-06 10:58:31 +08:00
|
|
|
#else
|
2009-01-22 16:38:56 +08:00
|
|
|
#include <linux/irq.h>
|
2008-12-06 10:58:31 +08:00
|
|
|
extern unsigned int kstat_irqs_cpu(unsigned int irq, int cpu);
|
2009-01-22 16:38:56 +08:00
|
|
|
#define kstat_irqs_this_cpu(DESC) \
|
|
|
|
((DESC)->kstat_irqs[smp_processor_id()])
|
2010-10-28 06:34:13 +08:00
|
|
|
#define kstat_incr_irqs_this_cpu(irqno, DESC) do {\
|
|
|
|
((DESC)->kstat_irqs[smp_processor_id()]++);\
|
|
|
|
kstat_this_cpu.irqs_sum++; } while (0)
|
2009-01-22 16:38:56 +08:00
|
|
|
|
2008-12-06 10:58:31 +08:00
|
|
|
#endif
|
2008-08-20 11:50:09 +08:00
|
|
|
|
2009-06-18 07:25:52 +08:00
|
|
|
static inline void kstat_incr_softirqs_this_cpu(unsigned int irq)
|
|
|
|
{
|
|
|
|
kstat_this_cpu.softirqs[irq]++;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline unsigned int kstat_softirqs_cpu(unsigned int irq, int cpu)
|
|
|
|
{
|
|
|
|
return kstat_cpu(cpu).softirqs[irq];
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Number of interrupts per specific IRQ source, since bootup
|
|
|
|
*/
|
2010-10-28 06:34:15 +08:00
|
|
|
#ifndef CONFIG_GENERIC_HARDIRQS
|
2008-08-20 11:50:09 +08:00
|
|
|
static inline unsigned int kstat_irqs(unsigned int irq)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2008-08-20 11:50:09 +08:00
|
|
|
unsigned int sum = 0;
|
|
|
|
int cpu;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-03-28 17:56:37 +08:00
|
|
|
for_each_possible_cpu(cpu)
|
2008-08-20 11:50:09 +08:00
|
|
|
sum += kstat_irqs_cpu(irq, cpu);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
return sum;
|
|
|
|
}
|
2010-10-28 06:34:15 +08:00
|
|
|
#else
|
|
|
|
extern unsigned int kstat_irqs(unsigned int irq);
|
|
|
|
#endif
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2010-10-28 06:34:13 +08:00
|
|
|
/*
|
|
|
|
* Number of interrupts per cpu, since bootup
|
|
|
|
*/
|
|
|
|
static inline unsigned int kstat_cpu_irqs_sum(unsigned int cpu)
|
|
|
|
{
|
|
|
|
return kstat_cpu(cpu).irqs_sum;
|
|
|
|
}
|
2008-12-17 21:10:57 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Lock/unlock the current runqueue - to extract task statistics:
|
|
|
|
*/
|
2008-09-13 00:54:39 +08:00
|
|
|
extern unsigned long long task_delta_exec(struct task_struct *);
|
2008-12-17 21:10:57 +08:00
|
|
|
|
2008-12-31 22:11:37 +08:00
|
|
|
extern void account_user_time(struct task_struct *, cputime_t, cputime_t);
|
|
|
|
extern void account_system_time(struct task_struct *, int, cputime_t, cputime_t);
|
2008-12-31 22:11:38 +08:00
|
|
|
extern void account_steal_time(cputime_t);
|
|
|
|
extern void account_idle_time(cputime_t);
|
|
|
|
|
|
|
|
extern void account_process_tick(struct task_struct *, int user);
|
|
|
|
extern void account_steal_ticks(unsigned long ticks);
|
|
|
|
extern void account_idle_ticks(unsigned long ticks);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
#endif /* _LINUX_KERNEL_STAT_H */
|