nfsd: eliminate one of the DRC cache searches

The most common case is to do a search of the cache, followed by an
insert. In the case where we have to allocate an entry off the slab,
then we end up having to redo the search, which is wasteful.

Better optimize the code for the common case by eliminating the initial
search of the cache and always preallocating an entry. In the case of a
cache hit, we'll end up just freeing that entry but that's preferable to
an extra search.

Signed-off-by: Jeff Layton <jlayton@redhat.com>
Signed-off-by: J. Bruce Fields <bfields@redhat.com>
This commit is contained in:
Jeff Layton 2013-03-27 10:15:37 -04:00 committed by J. Bruce Fields
parent 64a817cfbd
commit 0b9ea37f24
1 changed files with 19 additions and 22 deletions

View File

@ -318,55 +318,53 @@ nfsd_cache_lookup(struct svc_rqst *rqstp)
__wsum csum; __wsum csum;
unsigned long age; unsigned long age;
int type = rqstp->rq_cachetype; int type = rqstp->rq_cachetype;
int rtn; int rtn = RC_DOIT;
rqstp->rq_cacherep = NULL; rqstp->rq_cacherep = NULL;
if (type == RC_NOCACHE) { if (type == RC_NOCACHE) {
nfsdstats.rcnocache++; nfsdstats.rcnocache++;
return RC_DOIT; return rtn;
} }
csum = nfsd_cache_csum(rqstp); csum = nfsd_cache_csum(rqstp);
/*
* Since the common case is a cache miss followed by an insert,
* preallocate an entry. First, try to reuse the first entry on the LRU
* if it works, then go ahead and prune the LRU list.
*/
spin_lock(&cache_lock); spin_lock(&cache_lock);
rtn = RC_DOIT;
rp = nfsd_cache_search(rqstp, csum);
if (rp)
goto found_entry;
/* Try to use the first entry on the LRU */
if (!list_empty(&lru_head)) { if (!list_empty(&lru_head)) {
rp = list_first_entry(&lru_head, struct svc_cacherep, c_lru); rp = list_first_entry(&lru_head, struct svc_cacherep, c_lru);
if (nfsd_cache_entry_expired(rp) || if (nfsd_cache_entry_expired(rp) ||
num_drc_entries >= max_drc_entries) { num_drc_entries >= max_drc_entries) {
lru_put_end(rp); lru_put_end(rp);
prune_cache_entries(); prune_cache_entries();
goto setup_entry; goto search_cache;
} }
} }
/* Drop the lock and allocate a new entry */ /* No expired ones available, allocate a new one. */
spin_unlock(&cache_lock); spin_unlock(&cache_lock);
rp = nfsd_reply_cache_alloc(); rp = nfsd_reply_cache_alloc();
if (!rp) {
dprintk("nfsd: unable to allocate DRC entry!\n");
return RC_DOIT;
}
spin_lock(&cache_lock); spin_lock(&cache_lock);
++num_drc_entries; if (likely(rp))
++num_drc_entries;
/* search_cache:
* Must search again just in case someone inserted one
* after we dropped the lock above.
*/
found = nfsd_cache_search(rqstp, csum); found = nfsd_cache_search(rqstp, csum);
if (found) { if (found) {
nfsd_reply_cache_free_locked(rp); if (likely(rp))
nfsd_reply_cache_free_locked(rp);
rp = found; rp = found;
goto found_entry; goto found_entry;
} }
if (!rp) {
dprintk("nfsd: unable to allocate DRC entry!\n");
goto out;
}
/* /*
* We're keeping the one we just allocated. Are we now over the * We're keeping the one we just allocated. Are we now over the
* limit? Prune one off the tip of the LRU in trade for the one we * limit? Prune one off the tip of the LRU in trade for the one we
@ -376,7 +374,6 @@ nfsd_cache_lookup(struct svc_rqst *rqstp)
nfsd_reply_cache_free_locked(list_first_entry(&lru_head, nfsd_reply_cache_free_locked(list_first_entry(&lru_head,
struct svc_cacherep, c_lru)); struct svc_cacherep, c_lru));
setup_entry:
nfsdstats.rcmisses++; nfsdstats.rcmisses++;
rqstp->rq_cacherep = rp; rqstp->rq_cacherep = rp;
rp->c_state = RC_INPROG; rp->c_state = RC_INPROG;