Merge branch 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: lockdep: Fix lockdep_no_validate against IRQ states mutex: Make mutex_destroy() an inline function plist: Remove the need to supply locks to plist heads lockup detector: Fix reference to the non-existent CONFIG_DETECT_SOFTLOCKUP option
This commit is contained in:
commit
75b56ec294
|
@ -92,7 +92,7 @@ do { \
|
|||
\
|
||||
__mutex_init((mutex), #mutex, &__key); \
|
||||
} while (0)
|
||||
# define mutex_destroy(mutex) do { } while (0)
|
||||
static inline void mutex_destroy(struct mutex *lock) {}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
|
|
|
@ -77,14 +77,9 @@
|
|||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/spinlock_types.h>
|
||||
|
||||
struct plist_head {
|
||||
struct list_head node_list;
|
||||
#ifdef CONFIG_DEBUG_PI_LIST
|
||||
raw_spinlock_t *rawlock;
|
||||
spinlock_t *spinlock;
|
||||
#endif
|
||||
};
|
||||
|
||||
struct plist_node {
|
||||
|
@ -93,37 +88,13 @@ struct plist_node {
|
|||
struct list_head node_list;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_DEBUG_PI_LIST
|
||||
# define PLIST_HEAD_LOCK_INIT(_lock) .spinlock = _lock
|
||||
# define PLIST_HEAD_LOCK_INIT_RAW(_lock) .rawlock = _lock
|
||||
#else
|
||||
# define PLIST_HEAD_LOCK_INIT(_lock)
|
||||
# define PLIST_HEAD_LOCK_INIT_RAW(_lock)
|
||||
#endif
|
||||
|
||||
#define _PLIST_HEAD_INIT(head) \
|
||||
.node_list = LIST_HEAD_INIT((head).node_list)
|
||||
|
||||
/**
|
||||
* PLIST_HEAD_INIT - static struct plist_head initializer
|
||||
* @head: struct plist_head variable name
|
||||
* @_lock: lock to initialize for this list
|
||||
*/
|
||||
#define PLIST_HEAD_INIT(head, _lock) \
|
||||
#define PLIST_HEAD_INIT(head) \
|
||||
{ \
|
||||
_PLIST_HEAD_INIT(head), \
|
||||
PLIST_HEAD_LOCK_INIT(&(_lock)) \
|
||||
}
|
||||
|
||||
/**
|
||||
* PLIST_HEAD_INIT_RAW - static struct plist_head initializer
|
||||
* @head: struct plist_head variable name
|
||||
* @_lock: lock to initialize for this list
|
||||
*/
|
||||
#define PLIST_HEAD_INIT_RAW(head, _lock) \
|
||||
{ \
|
||||
_PLIST_HEAD_INIT(head), \
|
||||
PLIST_HEAD_LOCK_INIT_RAW(&(_lock)) \
|
||||
.node_list = LIST_HEAD_INIT((head).node_list) \
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -141,31 +112,11 @@ struct plist_node {
|
|||
/**
|
||||
* plist_head_init - dynamic struct plist_head initializer
|
||||
* @head: &struct plist_head pointer
|
||||
* @lock: spinlock protecting the list (debugging)
|
||||
*/
|
||||
static inline void
|
||||
plist_head_init(struct plist_head *head, spinlock_t *lock)
|
||||
plist_head_init(struct plist_head *head)
|
||||
{
|
||||
INIT_LIST_HEAD(&head->node_list);
|
||||
#ifdef CONFIG_DEBUG_PI_LIST
|
||||
head->spinlock = lock;
|
||||
head->rawlock = NULL;
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* plist_head_init_raw - dynamic struct plist_head initializer
|
||||
* @head: &struct plist_head pointer
|
||||
* @lock: raw_spinlock protecting the list (debugging)
|
||||
*/
|
||||
static inline void
|
||||
plist_head_init_raw(struct plist_head *head, raw_spinlock_t *lock)
|
||||
{
|
||||
INIT_LIST_HEAD(&head->node_list);
|
||||
#ifdef CONFIG_DEBUG_PI_LIST
|
||||
head->rawlock = lock;
|
||||
head->spinlock = NULL;
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -66,7 +66,7 @@ struct hrtimer_sleeper;
|
|||
|
||||
#define __RT_MUTEX_INITIALIZER(mutexname) \
|
||||
{ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
|
||||
, .wait_list = PLIST_HEAD_INIT_RAW(mutexname.wait_list, mutexname.wait_lock) \
|
||||
, .wait_list = PLIST_HEAD_INIT(mutexname.wait_list) \
|
||||
, .owner = NULL \
|
||||
__DEBUG_RT_MUTEX_INITIALIZER(mutexname)}
|
||||
|
||||
|
@ -100,7 +100,7 @@ extern void rt_mutex_unlock(struct rt_mutex *lock);
|
|||
|
||||
#ifdef CONFIG_RT_MUTEXES
|
||||
# define INIT_RT_MUTEXES(tsk) \
|
||||
.pi_waiters = PLIST_HEAD_INIT(tsk.pi_waiters, tsk.pi_lock), \
|
||||
.pi_waiters = PLIST_HEAD_INIT(tsk.pi_waiters), \
|
||||
INIT_RT_MUTEX_DEBUG(tsk)
|
||||
#else
|
||||
# define INIT_RT_MUTEXES(tsk)
|
||||
|
|
|
@ -1012,7 +1012,7 @@ static void rt_mutex_init_task(struct task_struct *p)
|
|||
{
|
||||
raw_spin_lock_init(&p->pi_lock);
|
||||
#ifdef CONFIG_RT_MUTEXES
|
||||
plist_head_init_raw(&p->pi_waiters, &p->pi_lock);
|
||||
plist_head_init(&p->pi_waiters);
|
||||
p->pi_blocked_on = NULL;
|
||||
#endif
|
||||
}
|
||||
|
|
|
@ -2697,7 +2697,7 @@ static int __init futex_init(void)
|
|||
futex_cmpxchg_enabled = 1;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
|
||||
plist_head_init(&futex_queues[i].chain, &futex_queues[i].lock);
|
||||
plist_head_init(&futex_queues[i].chain);
|
||||
spin_lock_init(&futex_queues[i].lock);
|
||||
}
|
||||
|
||||
|
|
|
@ -2468,6 +2468,9 @@ mark_held_locks(struct task_struct *curr, enum mark_type mark)
|
|||
|
||||
BUG_ON(usage_bit >= LOCK_USAGE_STATES);
|
||||
|
||||
if (hlock_class(hlock)->key == &__lockdep_no_validate__)
|
||||
continue;
|
||||
|
||||
if (!mark_lock(curr, hlock, usage_bit))
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -74,7 +74,7 @@ static DEFINE_SPINLOCK(pm_qos_lock);
|
|||
static struct pm_qos_object null_pm_qos;
|
||||
static BLOCKING_NOTIFIER_HEAD(cpu_dma_lat_notifier);
|
||||
static struct pm_qos_object cpu_dma_pm_qos = {
|
||||
.requests = PLIST_HEAD_INIT(cpu_dma_pm_qos.requests, pm_qos_lock),
|
||||
.requests = PLIST_HEAD_INIT(cpu_dma_pm_qos.requests),
|
||||
.notifiers = &cpu_dma_lat_notifier,
|
||||
.name = "cpu_dma_latency",
|
||||
.target_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE,
|
||||
|
@ -84,7 +84,7 @@ static struct pm_qos_object cpu_dma_pm_qos = {
|
|||
|
||||
static BLOCKING_NOTIFIER_HEAD(network_lat_notifier);
|
||||
static struct pm_qos_object network_lat_pm_qos = {
|
||||
.requests = PLIST_HEAD_INIT(network_lat_pm_qos.requests, pm_qos_lock),
|
||||
.requests = PLIST_HEAD_INIT(network_lat_pm_qos.requests),
|
||||
.notifiers = &network_lat_notifier,
|
||||
.name = "network_latency",
|
||||
.target_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE,
|
||||
|
@ -95,7 +95,7 @@ static struct pm_qos_object network_lat_pm_qos = {
|
|||
|
||||
static BLOCKING_NOTIFIER_HEAD(network_throughput_notifier);
|
||||
static struct pm_qos_object network_throughput_pm_qos = {
|
||||
.requests = PLIST_HEAD_INIT(network_throughput_pm_qos.requests, pm_qos_lock),
|
||||
.requests = PLIST_HEAD_INIT(network_throughput_pm_qos.requests),
|
||||
.notifiers = &network_throughput_notifier,
|
||||
.name = "network_throughput",
|
||||
.target_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE,
|
||||
|
|
|
@ -890,7 +890,7 @@ void __rt_mutex_init(struct rt_mutex *lock, const char *name)
|
|||
{
|
||||
lock->owner = NULL;
|
||||
raw_spin_lock_init(&lock->wait_lock);
|
||||
plist_head_init_raw(&lock->wait_list, &lock->wait_lock);
|
||||
plist_head_init(&lock->wait_list);
|
||||
|
||||
debug_rt_mutex_init(lock, name);
|
||||
}
|
||||
|
|
|
@ -7937,7 +7937,7 @@ static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
|
|||
#ifdef CONFIG_SMP
|
||||
rt_rq->rt_nr_migratory = 0;
|
||||
rt_rq->overloaded = 0;
|
||||
plist_head_init_raw(&rt_rq->pushable_tasks, &rq->lock);
|
||||
plist_head_init(&rt_rq->pushable_tasks);
|
||||
#endif
|
||||
|
||||
rt_rq->rt_time = 0;
|
||||
|
@ -8142,7 +8142,7 @@ void __init sched_init(void)
|
|||
#endif
|
||||
|
||||
#ifdef CONFIG_RT_MUTEXES
|
||||
plist_head_init_raw(&init_task.pi_waiters, &init_task.pi_lock);
|
||||
plist_head_init(&init_task.pi_waiters);
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
|
|
@ -227,7 +227,7 @@ config BOOTPARAM_SOFTLOCKUP_PANIC_VALUE
|
|||
config DETECT_HUNG_TASK
|
||||
bool "Detect Hung Tasks"
|
||||
depends on DEBUG_KERNEL
|
||||
default DETECT_SOFTLOCKUP
|
||||
default LOCKUP_DETECTOR
|
||||
help
|
||||
Say Y here to enable the kernel to detect "hung tasks",
|
||||
which are bugs that cause the task to be stuck in
|
||||
|
@ -866,7 +866,7 @@ config BOOT_PRINTK_DELAY
|
|||
system, and then set "lpj=M" before setting "boot_delay=N".
|
||||
NOTE: Using this option may adversely affect SMP systems.
|
||||
I.e., processors other than the first one may not boot up.
|
||||
BOOT_PRINTK_DELAY also may cause DETECT_SOFTLOCKUP to detect
|
||||
BOOT_PRINTK_DELAY also may cause LOCKUP_DETECTOR to detect
|
||||
what it believes to be lockup conditions.
|
||||
|
||||
config RCU_TORTURE_TEST
|
||||
|
|
|
@ -56,11 +56,6 @@ static void plist_check_list(struct list_head *top)
|
|||
|
||||
static void plist_check_head(struct plist_head *head)
|
||||
{
|
||||
WARN_ON(head != &test_head && !head->rawlock && !head->spinlock);
|
||||
if (head->rawlock)
|
||||
WARN_ON_SMP(!raw_spin_is_locked(head->rawlock));
|
||||
if (head->spinlock)
|
||||
WARN_ON_SMP(!spin_is_locked(head->spinlock));
|
||||
if (!plist_head_empty(head))
|
||||
plist_check_list(&plist_first(head)->prio_list);
|
||||
plist_check_list(&head->node_list);
|
||||
|
@ -180,7 +175,7 @@ static int __init plist_test(void)
|
|||
unsigned int r = local_clock();
|
||||
|
||||
printk(KERN_INFO "start plist test\n");
|
||||
plist_head_init(&test_head, NULL);
|
||||
plist_head_init(&test_head);
|
||||
for (i = 0; i < ARRAY_SIZE(test_node); i++)
|
||||
plist_node_init(test_node + i, 0);
|
||||
|
||||
|
|
Loading…
Reference in New Issue