sunrpc/cache: change deferred-request hash table to use hlist.

Being a hash table, hlist is the best option.

There is currently some ugliness were we treat "->next == NULL" as
a special case to avoid having to initialise the whole array.
This change nicely gets rid of that case.

Signed-off-by: NeilBrown <neilb@suse.de>
Signed-off-by: J. Bruce Fields <bfields@redhat.com>
This commit is contained in:
NeilBrown 2010-08-12 17:04:08 +10:00 committed by J. Bruce Fields
parent 2ed5282cd9
commit 1117449276
2 changed files with 11 additions and 19 deletions

View File

@ -133,7 +133,7 @@ struct cache_req {
* delayed awaiting cache-fill * delayed awaiting cache-fill
*/ */
struct cache_deferred_req { struct cache_deferred_req {
struct list_head hash; /* on hash chain */ struct hlist_node hash; /* on hash chain */
struct list_head recent; /* on fifo */ struct list_head recent; /* on fifo */
struct cache_head *item; /* cache item we wait on */ struct cache_head *item; /* cache item we wait on */
void *owner; /* we might need to discard all defered requests void *owner; /* we might need to discard all defered requests

View File

@ -506,13 +506,13 @@ EXPORT_SYMBOL_GPL(cache_purge);
static DEFINE_SPINLOCK(cache_defer_lock); static DEFINE_SPINLOCK(cache_defer_lock);
static LIST_HEAD(cache_defer_list); static LIST_HEAD(cache_defer_list);
static struct list_head cache_defer_hash[DFR_HASHSIZE]; static struct hlist_head cache_defer_hash[DFR_HASHSIZE];
static int cache_defer_cnt; static int cache_defer_cnt;
static void __unhash_deferred_req(struct cache_deferred_req *dreq) static void __unhash_deferred_req(struct cache_deferred_req *dreq)
{ {
list_del_init(&dreq->recent); list_del_init(&dreq->recent);
list_del_init(&dreq->hash); hlist_del_init(&dreq->hash);
cache_defer_cnt--; cache_defer_cnt--;
} }
@ -521,9 +521,7 @@ static void __hash_deferred_req(struct cache_deferred_req *dreq, struct cache_he
int hash = DFR_HASH(item); int hash = DFR_HASH(item);
list_add(&dreq->recent, &cache_defer_list); list_add(&dreq->recent, &cache_defer_list);
if (cache_defer_hash[hash].next == NULL) hlist_add_head(&dreq->hash, &cache_defer_hash[hash]);
INIT_LIST_HEAD(&cache_defer_hash[hash]);
list_add(&dreq->hash, &cache_defer_hash[hash]);
} }
static int setup_deferral(struct cache_deferred_req *dreq, struct cache_head *item) static int setup_deferral(struct cache_deferred_req *dreq, struct cache_head *item)
@ -588,7 +586,7 @@ static int cache_wait_req(struct cache_req *req, struct cache_head *item)
* to clean up * to clean up
*/ */
spin_lock(&cache_defer_lock); spin_lock(&cache_defer_lock);
if (!list_empty(&sleeper.handle.hash)) { if (!hlist_unhashed(&sleeper.handle.hash)) {
__unhash_deferred_req(&sleeper.handle); __unhash_deferred_req(&sleeper.handle);
spin_unlock(&cache_defer_lock); spin_unlock(&cache_defer_lock);
} else { } else {
@ -642,24 +640,18 @@ static void cache_revisit_request(struct cache_head *item)
{ {
struct cache_deferred_req *dreq; struct cache_deferred_req *dreq;
struct list_head pending; struct list_head pending;
struct hlist_node *lp, *tmp;
struct list_head *lp;
int hash = DFR_HASH(item); int hash = DFR_HASH(item);
INIT_LIST_HEAD(&pending); INIT_LIST_HEAD(&pending);
spin_lock(&cache_defer_lock); spin_lock(&cache_defer_lock);
lp = cache_defer_hash[hash].next; hlist_for_each_entry_safe(dreq, lp, tmp, &cache_defer_hash[hash], hash)
if (lp) { if (dreq->item == item) {
while (lp != &cache_defer_hash[hash]) { __unhash_deferred_req(dreq);
dreq = list_entry(lp, struct cache_deferred_req, hash); list_add(&dreq->recent, &pending);
lp = lp->next;
if (dreq->item == item) {
__unhash_deferred_req(dreq);
list_add(&dreq->recent, &pending);
}
} }
}
spin_unlock(&cache_defer_lock); spin_unlock(&cache_defer_lock);
while (!list_empty(&pending)) { while (!list_empty(&pending)) {