knfsd: reply cache cleanups

Make REQHASH() an inline function.  Rename hash_list to cache_hash.
Fix an obsolete comment.

Signed-off-by: Greg Banks <gnb@sgi.com>
Signed-off-by: J. Bruce Fields <bfields@citi.umich.edu>
This commit is contained in:
Greg Banks 2009-04-01 07:28:13 +11:00 committed by J. Bruce Fields
parent dd4dc82d4c
commit fca4217c5b
2 changed files with 20 additions and 12 deletions

View File

@ -29,15 +29,24 @@
*/ */
#define CACHESIZE 1024 #define CACHESIZE 1024
#define HASHSIZE 64 #define HASHSIZE 64
#define REQHASH(xid) (((((__force __u32)xid) >> 24) ^ ((__force __u32)xid)) & (HASHSIZE-1))
static struct hlist_head * hash_list; static struct hlist_head * cache_hash;
static struct list_head lru_head; static struct list_head lru_head;
static int cache_disabled = 1; static int cache_disabled = 1;
/*
* Calculate the hash index from an XID.
*/
static inline u32 request_hash(u32 xid)
{
u32 h = xid;
h ^= (xid >> 24);
return h & (HASHSIZE-1);
}
static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec); static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec);
/* /*
* locking for the reply cache: * locking for the reply cache:
* A cache entry is "single use" if c_state == RC_INPROG * A cache entry is "single use" if c_state == RC_INPROG
* Otherwise, it when accessing _prev or _next, the lock must be held. * Otherwise, it when accessing _prev or _next, the lock must be held.
@ -62,8 +71,8 @@ int nfsd_reply_cache_init(void)
i--; i--;
} }
hash_list = kcalloc (HASHSIZE, sizeof(struct hlist_head), GFP_KERNEL); cache_hash = kcalloc (HASHSIZE, sizeof(struct hlist_head), GFP_KERNEL);
if (!hash_list) if (!cache_hash)
goto out_nomem; goto out_nomem;
cache_disabled = 0; cache_disabled = 0;
@ -88,8 +97,8 @@ void nfsd_reply_cache_shutdown(void)
cache_disabled = 1; cache_disabled = 1;
kfree (hash_list); kfree (cache_hash);
hash_list = NULL; cache_hash = NULL;
} }
/* /*
@ -108,7 +117,7 @@ static void
hash_refile(struct svc_cacherep *rp) hash_refile(struct svc_cacherep *rp)
{ {
hlist_del_init(&rp->c_hash); hlist_del_init(&rp->c_hash);
hlist_add_head(&rp->c_hash, hash_list + REQHASH(rp->c_xid)); hlist_add_head(&rp->c_hash, cache_hash + request_hash(rp->c_xid));
} }
/* /*
@ -138,7 +147,7 @@ nfsd_cache_lookup(struct svc_rqst *rqstp, int type)
spin_lock(&cache_lock); spin_lock(&cache_lock);
rtn = RC_DOIT; rtn = RC_DOIT;
rh = &hash_list[REQHASH(xid)]; rh = &cache_hash[request_hash(xid)];
hlist_for_each_entry(rp, hn, rh, c_hash) { hlist_for_each_entry(rp, hn, rh, c_hash) {
if (rp->c_state != RC_UNUSED && if (rp->c_state != RC_UNUSED &&
xid == rp->c_xid && proc == rp->c_proc && xid == rp->c_xid && proc == rp->c_proc &&
@ -264,7 +273,7 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
len = resv->iov_len - ((char*)statp - (char*)resv->iov_base); len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
len >>= 2; len >>= 2;
/* Don't cache excessive amounts of data and XDR failures */ /* Don't cache excessive amounts of data and XDR failures */
if (!statp || len > (256 >> 2)) { if (!statp || len > (256 >> 2)) {
rp->c_state = RC_UNUSED; rp->c_state = RC_UNUSED;

View File

@ -14,8 +14,7 @@
#include <linux/uio.h> #include <linux/uio.h>
/* /*
* Representation of a reply cache entry. The first two members *must* * Representation of a reply cache entry.
* be hash_next and hash_prev.
*/ */
struct svc_cacherep { struct svc_cacherep {
struct hlist_node c_hash; struct hlist_node c_hash;