dm: disable interrupt when taking map_lock
This patch disables interrupt when taking map_lock to avoid lockdep warnings in request-based dm. request-based dm takes map_lock after taking queue_lock with disabling interrupt: spin_lock_irqsave(queue_lock) q->request_fn() == dm_request_fn() => dm_get_table() => read_lock(map_lock) while queue_lock could be (but isn't) taken in interrupt context. Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com> Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com> Acked-by: Christof Schmitt <christof.schmitt@de.ibm.com> Acked-by: Hannes Reinecke <hare@suse.de> Signed-off-by: Alasdair G Kergon <agk@redhat.com>
This commit is contained in:
parent
5d67aa2366
commit
523d9297d4
|
@ -512,12 +512,13 @@ static void queue_io(struct mapped_device *md, struct bio *bio)
|
||||||
struct dm_table *dm_get_table(struct mapped_device *md)
|
struct dm_table *dm_get_table(struct mapped_device *md)
|
||||||
{
|
{
|
||||||
struct dm_table *t;
|
struct dm_table *t;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
read_lock(&md->map_lock);
|
read_lock_irqsave(&md->map_lock, flags);
|
||||||
t = md->map;
|
t = md->map;
|
||||||
if (t)
|
if (t)
|
||||||
dm_table_get(t);
|
dm_table_get(t);
|
||||||
read_unlock(&md->map_lock);
|
read_unlock_irqrestore(&md->map_lock, flags);
|
||||||
|
|
||||||
return t;
|
return t;
|
||||||
}
|
}
|
||||||
|
@ -1910,6 +1911,7 @@ static int __bind(struct mapped_device *md, struct dm_table *t,
|
||||||
{
|
{
|
||||||
struct request_queue *q = md->queue;
|
struct request_queue *q = md->queue;
|
||||||
sector_t size;
|
sector_t size;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
size = dm_table_get_size(t);
|
size = dm_table_get_size(t);
|
||||||
|
|
||||||
|
@ -1940,10 +1942,10 @@ static int __bind(struct mapped_device *md, struct dm_table *t,
|
||||||
|
|
||||||
__bind_mempools(md, t);
|
__bind_mempools(md, t);
|
||||||
|
|
||||||
write_lock(&md->map_lock);
|
write_lock_irqsave(&md->map_lock, flags);
|
||||||
md->map = t;
|
md->map = t;
|
||||||
dm_table_set_restrictions(t, q, limits);
|
dm_table_set_restrictions(t, q, limits);
|
||||||
write_unlock(&md->map_lock);
|
write_unlock_irqrestore(&md->map_lock, flags);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -1951,14 +1953,15 @@ static int __bind(struct mapped_device *md, struct dm_table *t,
|
||||||
static void __unbind(struct mapped_device *md)
|
static void __unbind(struct mapped_device *md)
|
||||||
{
|
{
|
||||||
struct dm_table *map = md->map;
|
struct dm_table *map = md->map;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
if (!map)
|
if (!map)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
dm_table_event_callback(map, NULL, NULL);
|
dm_table_event_callback(map, NULL, NULL);
|
||||||
write_lock(&md->map_lock);
|
write_lock_irqsave(&md->map_lock, flags);
|
||||||
md->map = NULL;
|
md->map = NULL;
|
||||||
write_unlock(&md->map_lock);
|
write_unlock_irqrestore(&md->map_lock, flags);
|
||||||
dm_table_destroy(map);
|
dm_table_destroy(map);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue