dm cache: improve efficiency of quiescing flag management

Make the quiescing flag an atomic_t and stop protecting it with a spin
lock.

Signed-off-by: Joe Thornber <ejt@redhat.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
This commit is contained in:
Joe Thornber 2013-10-30 17:29:30 +00:00 committed by Mike Snitzer
parent 66cb1910df
commit 238f8363b6
1 changed files with 5 additions and 22 deletions

View File

@ -149,6 +149,7 @@ struct cache {
atomic_t nr_migrations; atomic_t nr_migrations;
wait_queue_head_t quiescing_wait; wait_queue_head_t quiescing_wait;
atomic_t quiescing;
atomic_t quiescing_ack; atomic_t quiescing_ack;
/* /*
@ -189,7 +190,6 @@ struct cache {
bool need_tick_bio:1; bool need_tick_bio:1;
bool sized:1; bool sized:1;
bool quiescing:1;
bool commit_requested:1; bool commit_requested:1;
bool loaded_mappings:1; bool loaded_mappings:1;
bool loaded_discards:1; bool loaded_discards:1;
@ -1353,14 +1353,7 @@ static void writeback_some_dirty_blocks(struct cache *cache)
*--------------------------------------------------------------*/ *--------------------------------------------------------------*/
static bool is_quiescing(struct cache *cache) static bool is_quiescing(struct cache *cache)
{ {
int r; return atomic_read(&cache->quiescing);
unsigned long flags;
spin_lock_irqsave(&cache->lock, flags);
r = cache->quiescing;
spin_unlock_irqrestore(&cache->lock, flags);
return r;
} }
static void ack_quiescing(struct cache *cache) static void ack_quiescing(struct cache *cache)
@ -1378,23 +1371,13 @@ static void wait_for_quiescing_ack(struct cache *cache)
static void start_quiescing(struct cache *cache) static void start_quiescing(struct cache *cache)
{ {
unsigned long flags; atomic_inc(&cache->quiescing);
spin_lock_irqsave(&cache->lock, flags);
cache->quiescing = true;
spin_unlock_irqrestore(&cache->lock, flags);
wait_for_quiescing_ack(cache); wait_for_quiescing_ack(cache);
} }
static void stop_quiescing(struct cache *cache) static void stop_quiescing(struct cache *cache)
{ {
unsigned long flags; atomic_set(&cache->quiescing, 0);
spin_lock_irqsave(&cache->lock, flags);
cache->quiescing = false;
spin_unlock_irqrestore(&cache->lock, flags);
atomic_set(&cache->quiescing_ack, 0); atomic_set(&cache->quiescing_ack, 0);
} }
@ -2030,6 +2013,7 @@ static int cache_create(struct cache_args *ca, struct cache **result)
init_waitqueue_head(&cache->migration_wait); init_waitqueue_head(&cache->migration_wait);
init_waitqueue_head(&cache->quiescing_wait); init_waitqueue_head(&cache->quiescing_wait);
atomic_set(&cache->quiescing, 0);
atomic_set(&cache->quiescing_ack, 0); atomic_set(&cache->quiescing_ack, 0);
r = -ENOMEM; r = -ENOMEM;
@ -2091,7 +2075,6 @@ static int cache_create(struct cache_args *ca, struct cache **result)
cache->need_tick_bio = true; cache->need_tick_bio = true;
cache->sized = false; cache->sized = false;
cache->quiescing = false;
cache->commit_requested = false; cache->commit_requested = false;
cache->loaded_mappings = false; cache->loaded_mappings = false;
cache->loaded_discards = false; cache->loaded_discards = false;