diff --git a/include/linux/lglock.h b/include/linux/lglock.h index 0081f000e34b..c92ebd100d9b 100644 --- a/include/linux/lglock.h +++ b/include/linux/lglock.h @@ -52,10 +52,15 @@ struct lglock { static struct lglock name = { .lock = &name ## _lock } void lg_lock_init(struct lglock *lg, char *name); + void lg_local_lock(struct lglock *lg); void lg_local_unlock(struct lglock *lg); void lg_local_lock_cpu(struct lglock *lg, int cpu); void lg_local_unlock_cpu(struct lglock *lg, int cpu); + +void lg_double_lock(struct lglock *lg, int cpu1, int cpu2); +void lg_double_unlock(struct lglock *lg, int cpu1, int cpu2); + void lg_global_lock(struct lglock *lg); void lg_global_unlock(struct lglock *lg); diff --git a/kernel/locking/lglock.c b/kernel/locking/lglock.c index 86ae2aebf004..951cfcd10b4a 100644 --- a/kernel/locking/lglock.c +++ b/kernel/locking/lglock.c @@ -60,6 +60,28 @@ void lg_local_unlock_cpu(struct lglock *lg, int cpu) } EXPORT_SYMBOL(lg_local_unlock_cpu); +void lg_double_lock(struct lglock *lg, int cpu1, int cpu2) +{ + BUG_ON(cpu1 == cpu2); + + /* lock in cpu order, just like lg_global_lock */ + if (cpu2 < cpu1) + swap(cpu1, cpu2); + + preempt_disable(); + lock_acquire_shared(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_); + arch_spin_lock(per_cpu_ptr(lg->lock, cpu1)); + arch_spin_lock(per_cpu_ptr(lg->lock, cpu2)); +} + +void lg_double_unlock(struct lglock *lg, int cpu1, int cpu2) +{ + lock_release(&lg->lock_dep_map, 1, _RET_IP_); + arch_spin_unlock(per_cpu_ptr(lg->lock, cpu1)); + arch_spin_unlock(per_cpu_ptr(lg->lock, cpu2)); + preempt_enable(); +} + void lg_global_lock(struct lglock *lg) { int i; diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c index 695f0c6cd169..fd643d8c4b42 100644 --- a/kernel/stop_machine.c +++ b/kernel/stop_machine.c @@ -211,25 +211,6 @@ static int multi_cpu_stop(void *data) return err; } -struct irq_cpu_stop_queue_work_info { - int cpu1; - int cpu2; - struct cpu_stop_work *work1; - struct cpu_stop_work *work2; -}; - -/* - * This function is always run with irqs and preemption disabled. - * This guarantees that both work1 and work2 get queued, before - * our local migrate thread gets the chance to preempt us. - */ -static void irq_cpu_stop_queue_work(void *arg) -{ - struct irq_cpu_stop_queue_work_info *info = arg; - cpu_stop_queue_work(info->cpu1, info->work1); - cpu_stop_queue_work(info->cpu2, info->work2); -} - /** * stop_two_cpus - stops two cpus * @cpu1: the cpu to stop @@ -245,7 +226,6 @@ int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void * { struct cpu_stop_done done; struct cpu_stop_work work1, work2; - struct irq_cpu_stop_queue_work_info call_args; struct multi_stop_data msdata; preempt_disable(); @@ -262,13 +242,6 @@ int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void * .done = &done }; - call_args = (struct irq_cpu_stop_queue_work_info){ - .cpu1 = cpu1, - .cpu2 = cpu2, - .work1 = &work1, - .work2 = &work2, - }; - cpu_stop_init_done(&done, 2); set_state(&msdata, MULTI_STOP_PREPARE); @@ -285,16 +258,11 @@ int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void * return -ENOENT; } - lg_local_lock(&stop_cpus_lock); - /* - * Queuing needs to be done by the lowest numbered CPU, to ensure - * that works are always queued in the same order on every CPU. - * This prevents deadlocks. - */ - smp_call_function_single(min(cpu1, cpu2), - &irq_cpu_stop_queue_work, - &call_args, 1); - lg_local_unlock(&stop_cpus_lock); + lg_double_lock(&stop_cpus_lock, cpu1, cpu2); + cpu_stop_queue_work(cpu1, &work1); + cpu_stop_queue_work(cpu2, &work2); + lg_double_unlock(&stop_cpus_lock, cpu1, cpu2); + preempt_enable(); wait_for_completion(&done.completion);