Merge branch 'sched/core-v2' of git://git.kernel.org/pub/scm/linux/kernel/git/frederic/random-tracing into sched/core
This commit is contained in:
commit
1ecc818c51
|
@ -409,7 +409,7 @@ cond_resched(); /* Will sleep */
|
|||
|
||||
<para>
|
||||
You should always compile your kernel
|
||||
<symbol>CONFIG_DEBUG_SPINLOCK_SLEEP</symbol> on, and it will warn
|
||||
<symbol>CONFIG_DEBUG_ATOMIC_SLEEP</symbol> on, and it will warn
|
||||
you if you break these rules. If you <emphasis>do</emphasis> break
|
||||
the rules, you will eventually lock up your box.
|
||||
</para>
|
||||
|
|
|
@ -53,7 +53,7 @@ kernel patches.
|
|||
|
||||
12: Has been tested with CONFIG_PREEMPT, CONFIG_DEBUG_PREEMPT,
|
||||
CONFIG_DEBUG_SLAB, CONFIG_DEBUG_PAGEALLOC, CONFIG_DEBUG_MUTEXES,
|
||||
CONFIG_DEBUG_SPINLOCK, CONFIG_DEBUG_SPINLOCK_SLEEP all simultaneously
|
||||
CONFIG_DEBUG_SPINLOCK, CONFIG_DEBUG_ATOMIC_SLEEP all simultaneously
|
||||
enabled.
|
||||
|
||||
13: Has been build- and runtime tested with and without CONFIG_SMP and
|
||||
|
|
|
@ -244,7 +244,7 @@ testing purposes. In particular, you should turn on:
|
|||
- DEBUG_SLAB can find a variety of memory allocation and use errors; it
|
||||
should be used on most development kernels.
|
||||
|
||||
- DEBUG_SPINLOCK, DEBUG_SPINLOCK_SLEEP, and DEBUG_MUTEXES will find a
|
||||
- DEBUG_SPINLOCK, DEBUG_ATOMIC_SLEEP, and DEBUG_MUTEXES will find a
|
||||
number of common locking errors.
|
||||
|
||||
There are quite a few other debugging options, some of which will be
|
||||
|
|
|
@ -68,7 +68,7 @@ Linux カーネルパッチ投稿者向けチェックリスト
|
|||
|
||||
12: CONFIG_PREEMPT, CONFIG_DEBUG_PREEMPT, CONFIG_DEBUG_SLAB,
|
||||
CONFIG_DEBUG_PAGEALLOC, CONFIG_DEBUG_MUTEXES, CONFIG_DEBUG_SPINLOCK,
|
||||
CONFIG_DEBUG_SPINLOCK_SLEEP これら全てを同時に有効にして動作確認を
|
||||
CONFIG_DEBUG_ATOMIC_SLEEP これら全てを同時に有効にして動作確認を
|
||||
行ってください。
|
||||
|
||||
13: CONFIG_SMP, CONFIG_PREEMPT を有効にした場合と無効にした場合の両方で
|
||||
|
|
|
@ -67,7 +67,7 @@ Linux
|
|||
|
||||
12:已经通过CONFIG_PREEMPT, CONFIG_DEBUG_PREEMPT,
|
||||
CONFIG_DEBUG_SLAB, CONFIG_DEBUG_PAGEALLOC, CONFIG_DEBUG_MUTEXES,
|
||||
CONFIG_DEBUG_SPINLOCK, CONFIG_DEBUG_SPINLOCK_SLEEP测试,并且同时都
|
||||
CONFIG_DEBUG_SPINLOCK, CONFIG_DEBUG_ATOMIC_SLEEP测试,并且同时都
|
||||
使能。
|
||||
|
||||
13:已经都构建并且使用或者不使用 CONFIG_SMP 和 CONFIG_PREEMPT测试执行时间。
|
||||
|
|
|
@ -88,7 +88,7 @@ static inline int bit_spin_is_locked(int bitnum, unsigned long *addr)
|
|||
{
|
||||
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
|
||||
return test_bit(bitnum, addr);
|
||||
#elif defined CONFIG_PREEMPT
|
||||
#elif defined CONFIG_PREEMPT_COUNT
|
||||
return preempt_count();
|
||||
#else
|
||||
return 1;
|
||||
|
|
|
@ -93,7 +93,7 @@
|
|||
*/
|
||||
#define in_nmi() (preempt_count() & NMI_MASK)
|
||||
|
||||
#if defined(CONFIG_PREEMPT)
|
||||
#if defined(CONFIG_PREEMPT_COUNT)
|
||||
# define PREEMPT_CHECK_OFFSET 1
|
||||
#else
|
||||
# define PREEMPT_CHECK_OFFSET 0
|
||||
|
@ -115,7 +115,7 @@
|
|||
#define in_atomic_preempt_off() \
|
||||
((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_CHECK_OFFSET)
|
||||
|
||||
#ifdef CONFIG_PREEMPT
|
||||
#ifdef CONFIG_PREEMPT_COUNT
|
||||
# define preemptible() (preempt_count() == 0 && !irqs_disabled())
|
||||
# define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1)
|
||||
#else
|
||||
|
|
|
@ -121,7 +121,7 @@ extern int _cond_resched(void);
|
|||
# define might_resched() do { } while (0)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
|
||||
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
|
||||
void __might_sleep(const char *file, int line, int preempt_offset);
|
||||
/**
|
||||
* might_sleep - annotation for functions that can sleep
|
||||
|
|
|
@ -134,7 +134,7 @@ static inline int page_cache_get_speculative(struct page *page)
|
|||
VM_BUG_ON(in_interrupt());
|
||||
|
||||
#if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
|
||||
# ifdef CONFIG_PREEMPT
|
||||
# ifdef CONFIG_PREEMPT_COUNT
|
||||
VM_BUG_ON(!in_atomic());
|
||||
# endif
|
||||
/*
|
||||
|
@ -172,7 +172,7 @@ static inline int page_cache_add_speculative(struct page *page, int count)
|
|||
VM_BUG_ON(in_interrupt());
|
||||
|
||||
#if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
|
||||
# ifdef CONFIG_PREEMPT
|
||||
# ifdef CONFIG_PREEMPT_COUNT
|
||||
VM_BUG_ON(!in_atomic());
|
||||
# endif
|
||||
VM_BUG_ON(page_count(page) == 0);
|
||||
|
|
|
@ -27,6 +27,21 @@
|
|||
|
||||
asmlinkage void preempt_schedule(void);
|
||||
|
||||
#define preempt_check_resched() \
|
||||
do { \
|
||||
if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \
|
||||
preempt_schedule(); \
|
||||
} while (0)
|
||||
|
||||
#else /* !CONFIG_PREEMPT */
|
||||
|
||||
#define preempt_check_resched() do { } while (0)
|
||||
|
||||
#endif /* CONFIG_PREEMPT */
|
||||
|
||||
|
||||
#ifdef CONFIG_PREEMPT_COUNT
|
||||
|
||||
#define preempt_disable() \
|
||||
do { \
|
||||
inc_preempt_count(); \
|
||||
|
@ -39,12 +54,6 @@ do { \
|
|||
dec_preempt_count(); \
|
||||
} while (0)
|
||||
|
||||
#define preempt_check_resched() \
|
||||
do { \
|
||||
if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \
|
||||
preempt_schedule(); \
|
||||
} while (0)
|
||||
|
||||
#define preempt_enable() \
|
||||
do { \
|
||||
preempt_enable_no_resched(); \
|
||||
|
@ -80,18 +89,17 @@ do { \
|
|||
preempt_check_resched(); \
|
||||
} while (0)
|
||||
|
||||
#else
|
||||
#else /* !CONFIG_PREEMPT_COUNT */
|
||||
|
||||
#define preempt_disable() do { } while (0)
|
||||
#define preempt_enable_no_resched() do { } while (0)
|
||||
#define preempt_enable() do { } while (0)
|
||||
#define preempt_check_resched() do { } while (0)
|
||||
|
||||
#define preempt_disable_notrace() do { } while (0)
|
||||
#define preempt_enable_no_resched_notrace() do { } while (0)
|
||||
#define preempt_enable_notrace() do { } while (0)
|
||||
|
||||
#endif
|
||||
#endif /* CONFIG_PREEMPT_COUNT */
|
||||
|
||||
#ifdef CONFIG_PREEMPT_NOTIFIERS
|
||||
|
||||
|
|
|
@ -239,7 +239,7 @@ extern int rcu_read_lock_bh_held(void);
|
|||
* Check debug_lockdep_rcu_enabled() to prevent false positives during boot
|
||||
* and while lockdep is disabled.
|
||||
*/
|
||||
#ifdef CONFIG_PREEMPT
|
||||
#ifdef CONFIG_PREEMPT_COUNT
|
||||
static inline int rcu_read_lock_sched_held(void)
|
||||
{
|
||||
int lockdep_opinion = 0;
|
||||
|
@ -250,12 +250,12 @@ static inline int rcu_read_lock_sched_held(void)
|
|||
lockdep_opinion = lock_is_held(&rcu_sched_lock_map);
|
||||
return lockdep_opinion || preempt_count() != 0 || irqs_disabled();
|
||||
}
|
||||
#else /* #ifdef CONFIG_PREEMPT */
|
||||
#else /* #ifdef CONFIG_PREEMPT_COUNT */
|
||||
static inline int rcu_read_lock_sched_held(void)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
#endif /* #else #ifdef CONFIG_PREEMPT */
|
||||
#endif /* #else #ifdef CONFIG_PREEMPT_COUNT */
|
||||
|
||||
#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
|
||||
|
||||
|
@ -276,17 +276,17 @@ static inline int rcu_read_lock_bh_held(void)
|
|||
return 1;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PREEMPT
|
||||
#ifdef CONFIG_PREEMPT_COUNT
|
||||
static inline int rcu_read_lock_sched_held(void)
|
||||
{
|
||||
return preempt_count() != 0 || irqs_disabled();
|
||||
}
|
||||
#else /* #ifdef CONFIG_PREEMPT */
|
||||
#else /* #ifdef CONFIG_PREEMPT_COUNT */
|
||||
static inline int rcu_read_lock_sched_held(void)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
#endif /* #else #ifdef CONFIG_PREEMPT */
|
||||
#endif /* #else #ifdef CONFIG_PREEMPT_COUNT */
|
||||
|
||||
#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
|
||||
|
||||
|
|
|
@ -2501,7 +2501,7 @@ extern int _cond_resched(void);
|
|||
|
||||
extern int __cond_resched_lock(spinlock_t *lock);
|
||||
|
||||
#ifdef CONFIG_PREEMPT
|
||||
#ifdef CONFIG_PREEMPT_COUNT
|
||||
#define PREEMPT_LOCK_OFFSET PREEMPT_OFFSET
|
||||
#else
|
||||
#define PREEMPT_LOCK_OFFSET 0
|
||||
|
|
|
@ -35,6 +35,7 @@ config PREEMPT_VOLUNTARY
|
|||
|
||||
config PREEMPT
|
||||
bool "Preemptible Kernel (Low-Latency Desktop)"
|
||||
select PREEMPT_COUNT
|
||||
help
|
||||
This option reduces the latency of the kernel by making
|
||||
all kernel code (that is not executing in a critical section)
|
||||
|
@ -52,3 +53,5 @@ config PREEMPT
|
|||
|
||||
endchoice
|
||||
|
||||
config PREEMPT_COUNT
|
||||
bool
|
|
@ -2854,7 +2854,7 @@ void sched_fork(struct task_struct *p)
|
|||
#if defined(CONFIG_SMP)
|
||||
p->on_cpu = 0;
|
||||
#endif
|
||||
#ifdef CONFIG_PREEMPT
|
||||
#ifdef CONFIG_PREEMPT_COUNT
|
||||
/* Want to start with kernel preemption disabled. */
|
||||
task_thread_info(p)->preempt_count = 1;
|
||||
#endif
|
||||
|
@ -8022,7 +8022,7 @@ void __init sched_init(void)
|
|||
scheduler_running = 1;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
|
||||
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
|
||||
static inline int preempt_count_equals(int preempt_offset)
|
||||
{
|
||||
int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth();
|
||||
|
@ -8032,7 +8032,6 @@ static inline int preempt_count_equals(int preempt_offset)
|
|||
|
||||
void __might_sleep(const char *file, int line, int preempt_offset)
|
||||
{
|
||||
#ifdef in_atomic
|
||||
static unsigned long prev_jiffy; /* ratelimiting */
|
||||
|
||||
if ((preempt_count_equals(preempt_offset) && !irqs_disabled()) ||
|
||||
|
@ -8054,7 +8053,6 @@ void __might_sleep(const char *file, int line, int preempt_offset)
|
|||
if (irqs_disabled())
|
||||
print_irqtrace_events(current);
|
||||
dump_stack();
|
||||
#endif
|
||||
}
|
||||
EXPORT_SYMBOL(__might_sleep);
|
||||
#endif
|
||||
|
|
|
@ -648,12 +648,15 @@ config TRACE_IRQFLAGS
|
|||
Enables hooks to interrupt enabling and disabling for
|
||||
either tracing or lock debugging.
|
||||
|
||||
config DEBUG_SPINLOCK_SLEEP
|
||||
bool "Spinlock debugging: sleep-inside-spinlock checking"
|
||||
config DEBUG_ATOMIC_SLEEP
|
||||
bool "Sleep inside atomic section checking"
|
||||
select PREEMPT_COUNT
|
||||
depends on DEBUG_KERNEL
|
||||
help
|
||||
If you say Y here, various routines which may sleep will become very
|
||||
noisy if they are called with a spinlock held.
|
||||
noisy if they are called inside atomic sections: when a spinlock is
|
||||
held, inside an rcu read side critical section, inside preempt disabled
|
||||
sections, inside an interrupt, etc...
|
||||
|
||||
config DEBUG_LOCKING_API_SELFTESTS
|
||||
bool "Locking API boot-time self-tests"
|
||||
|
|
Loading…
Reference in New Issue