[S390] cpu topology: fix locking
cpu_coregroup_map used to grab a mutex on s390 since it was only
called from process context.
Since c7c22e4d5c
"block: add support
for IO CPU affinity" this is not true anymore.
It now also gets called from softirq context.
To prevent possible deadlocks change this in architecture code and
use a spinlock instead of a mutex.
Cc: stable@kernel.org
Cc: Jens Axboe <jens.axboe@oracle.com>
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
This commit is contained in:
parent
85acc407bf
commit
74af283102
|
@ -65,18 +65,21 @@ static int machine_has_topology_irq;
|
||||||
static struct timer_list topology_timer;
|
static struct timer_list topology_timer;
|
||||||
static void set_topology_timer(void);
|
static void set_topology_timer(void);
|
||||||
static DECLARE_WORK(topology_work, topology_work_fn);
|
static DECLARE_WORK(topology_work, topology_work_fn);
|
||||||
|
/* topology_lock protects the core linked list */
|
||||||
|
static DEFINE_SPINLOCK(topology_lock);
|
||||||
|
|
||||||
cpumask_t cpu_core_map[NR_CPUS];
|
cpumask_t cpu_core_map[NR_CPUS];
|
||||||
|
|
||||||
cpumask_t cpu_coregroup_map(unsigned int cpu)
|
cpumask_t cpu_coregroup_map(unsigned int cpu)
|
||||||
{
|
{
|
||||||
struct core_info *core = &core_info;
|
struct core_info *core = &core_info;
|
||||||
|
unsigned long flags;
|
||||||
cpumask_t mask;
|
cpumask_t mask;
|
||||||
|
|
||||||
cpus_clear(mask);
|
cpus_clear(mask);
|
||||||
if (!machine_has_topology)
|
if (!machine_has_topology)
|
||||||
return cpu_present_map;
|
return cpu_present_map;
|
||||||
mutex_lock(&smp_cpu_state_mutex);
|
spin_lock_irqsave(&topology_lock, flags);
|
||||||
while (core) {
|
while (core) {
|
||||||
if (cpu_isset(cpu, core->mask)) {
|
if (cpu_isset(cpu, core->mask)) {
|
||||||
mask = core->mask;
|
mask = core->mask;
|
||||||
|
@ -84,7 +87,7 @@ cpumask_t cpu_coregroup_map(unsigned int cpu)
|
||||||
}
|
}
|
||||||
core = core->next;
|
core = core->next;
|
||||||
}
|
}
|
||||||
mutex_unlock(&smp_cpu_state_mutex);
|
spin_unlock_irqrestore(&topology_lock, flags);
|
||||||
if (cpus_empty(mask))
|
if (cpus_empty(mask))
|
||||||
mask = cpumask_of_cpu(cpu);
|
mask = cpumask_of_cpu(cpu);
|
||||||
return mask;
|
return mask;
|
||||||
|
@ -133,7 +136,7 @@ static void tl_to_cores(struct tl_info *info)
|
||||||
union tl_entry *tle, *end;
|
union tl_entry *tle, *end;
|
||||||
struct core_info *core = &core_info;
|
struct core_info *core = &core_info;
|
||||||
|
|
||||||
mutex_lock(&smp_cpu_state_mutex);
|
spin_lock_irq(&topology_lock);
|
||||||
clear_cores();
|
clear_cores();
|
||||||
tle = info->tle;
|
tle = info->tle;
|
||||||
end = (union tl_entry *)((unsigned long)info + info->length);
|
end = (union tl_entry *)((unsigned long)info + info->length);
|
||||||
|
@ -157,7 +160,7 @@ static void tl_to_cores(struct tl_info *info)
|
||||||
}
|
}
|
||||||
tle = next_tle(tle);
|
tle = next_tle(tle);
|
||||||
}
|
}
|
||||||
mutex_unlock(&smp_cpu_state_mutex);
|
spin_unlock_irq(&topology_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void topology_update_polarization_simple(void)
|
static void topology_update_polarization_simple(void)
|
||||||
|
|
Loading…
Reference in New Issue