lockd: convert nsm_mutex to a spinlock

There's no reason for a mutex here, except to allow an allocation under
the lock, which we can avoid with the usual trick of preallocating
memory for the new object and freeing it if it turns out to be
unnecessary.

Signed-off-by: J. Bruce Fields <bfields@citi.umich.edu>
This commit is contained in:
J. Bruce Fields 2008-02-20 15:40:15 -05:00
parent a95e56e72c
commit d842120212
1 changed files with 19 additions and 15 deletions

View File

@ -457,7 +457,7 @@ nlm_gc_hosts(void)
* Manage NSM handles * Manage NSM handles
*/ */
static LIST_HEAD(nsm_handles); static LIST_HEAD(nsm_handles);
static DEFINE_MUTEX(nsm_mutex); static DEFINE_SPINLOCK(nsm_lock);
static struct nsm_handle * static struct nsm_handle *
__nsm_find(const struct sockaddr_in *sin, __nsm_find(const struct sockaddr_in *sin,
@ -479,7 +479,8 @@ __nsm_find(const struct sockaddr_in *sin,
return NULL; return NULL;
} }
mutex_lock(&nsm_mutex); retry:
spin_lock(&nsm_lock);
list_for_each_entry(pos, &nsm_handles, sm_link) { list_for_each_entry(pos, &nsm_handles, sm_link) {
if (hostname && nsm_use_hostnames) { if (hostname && nsm_use_hostnames) {
@ -489,28 +490,32 @@ __nsm_find(const struct sockaddr_in *sin,
} else if (!nlm_cmp_addr(&pos->sm_addr, sin)) } else if (!nlm_cmp_addr(&pos->sm_addr, sin))
continue; continue;
atomic_inc(&pos->sm_count); atomic_inc(&pos->sm_count);
kfree(nsm);
nsm = pos; nsm = pos;
goto out; goto found;
} }
if (nsm) {
list_add(&nsm->sm_link, &nsm_handles);
goto found;
}
spin_unlock(&nsm_lock);
if (!create) { if (!create)
nsm = NULL; return NULL;
goto out;
}
nsm = kzalloc(sizeof(*nsm) + hostname_len + 1, GFP_KERNEL); nsm = kzalloc(sizeof(*nsm) + hostname_len + 1, GFP_KERNEL);
if (nsm == NULL) if (nsm == NULL)
goto out; return NULL;
nsm->sm_addr = *sin; nsm->sm_addr = *sin;
nsm->sm_name = (char *) (nsm + 1); nsm->sm_name = (char *) (nsm + 1);
memcpy(nsm->sm_name, hostname, hostname_len); memcpy(nsm->sm_name, hostname, hostname_len);
nsm->sm_name[hostname_len] = '\0'; nsm->sm_name[hostname_len] = '\0';
atomic_set(&nsm->sm_count, 1); atomic_set(&nsm->sm_count, 1);
goto retry;
list_add(&nsm->sm_link, &nsm_handles); found:
spin_unlock(&nsm_lock);
out:
mutex_unlock(&nsm_mutex);
return nsm; return nsm;
} }
@ -529,10 +534,9 @@ nsm_release(struct nsm_handle *nsm)
{ {
if (!nsm) if (!nsm)
return; return;
mutex_lock(&nsm_mutex); if (atomic_dec_and_lock(&nsm->sm_count, &nsm_lock)) {
if (atomic_dec_and_test(&nsm->sm_count)) {
list_del(&nsm->sm_link); list_del(&nsm->sm_link);
spin_unlock(&nsm_lock);
kfree(nsm); kfree(nsm);
} }
mutex_unlock(&nsm_mutex);
} }