[PATCH] sem2mutex: mm/slab.c

Convert mm/swapfile.c's swapon_sem to swapon_mutex.

Signed-off-by: Ingo Molnar <mingo@elte.hu>
Cc: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
Ingo Molnar 2006-01-18 17:42:33 -08:00 committed by Linus Torvalds
parent 1743660b91
commit fc0abb1451
2 changed files with 32 additions and 31 deletions

View File

@ -68,7 +68,7 @@
* Further notes from the original documentation:
*
* 11 April '97. Started multi-threading - markhe
* The global cache-chain is protected by the semaphore 'cache_chain_sem'.
* The global cache-chain is protected by the mutex 'cache_chain_mutex'.
* The sem is only needed when accessing/extending the cache-chain, which
* can never happen inside an interrupt (kmem_cache_create(),
* kmem_cache_shrink() and kmem_cache_reap()).
@ -103,6 +103,7 @@
#include <linux/rcupdate.h>
#include <linux/string.h>
#include <linux/nodemask.h>
#include <linux/mutex.h>
#include <asm/uaccess.h>
#include <asm/cacheflush.h>
@ -631,7 +632,7 @@ static kmem_cache_t cache_cache = {
};
/* Guard access to the cache-chain. */
static struct semaphore cache_chain_sem;
static DEFINE_MUTEX(cache_chain_mutex);
static struct list_head cache_chain;
/*
@ -857,7 +858,7 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
switch (action) {
case CPU_UP_PREPARE:
down(&cache_chain_sem);
mutex_lock(&cache_chain_mutex);
/* we need to do this right in the beginning since
* alloc_arraycache's are going to use this list.
* kmalloc_node allows us to add the slab to the right
@ -912,7 +913,7 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
l3->shared = nc;
}
}
up(&cache_chain_sem);
mutex_unlock(&cache_chain_mutex);
break;
case CPU_ONLINE:
start_cpu_timer(cpu);
@ -921,7 +922,7 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
case CPU_DEAD:
/* fall thru */
case CPU_UP_CANCELED:
down(&cache_chain_sem);
mutex_lock(&cache_chain_mutex);
list_for_each_entry(cachep, &cache_chain, next) {
struct array_cache *nc;
@ -973,13 +974,13 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
spin_unlock_irq(&cachep->spinlock);
kfree(nc);
}
up(&cache_chain_sem);
mutex_unlock(&cache_chain_mutex);
break;
#endif
}
return NOTIFY_OK;
bad:
up(&cache_chain_sem);
mutex_unlock(&cache_chain_mutex);
return NOTIFY_BAD;
}
@ -1047,7 +1048,6 @@ void __init kmem_cache_init(void)
*/
/* 1) create the cache_cache */
init_MUTEX(&cache_chain_sem);
INIT_LIST_HEAD(&cache_chain);
list_add(&cache_cache.next, &cache_chain);
cache_cache.colour_off = cache_line_size();
@ -1168,10 +1168,10 @@ void __init kmem_cache_init(void)
/* 6) resize the head arrays to their final sizes */
{
kmem_cache_t *cachep;
down(&cache_chain_sem);
mutex_lock(&cache_chain_mutex);
list_for_each_entry(cachep, &cache_chain, next)
enable_cpucache(cachep);
up(&cache_chain_sem);
mutex_unlock(&cache_chain_mutex);
}
/* Done! */
@ -1590,7 +1590,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
BUG();
}
down(&cache_chain_sem);
mutex_lock(&cache_chain_mutex);
list_for_each(p, &cache_chain) {
kmem_cache_t *pc = list_entry(p, kmem_cache_t, next);
@ -1856,7 +1856,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
if (!cachep && (flags & SLAB_PANIC))
panic("kmem_cache_create(): failed to create slab `%s'\n",
name);
up(&cache_chain_sem);
mutex_unlock(&cache_chain_mutex);
return cachep;
}
EXPORT_SYMBOL(kmem_cache_create);
@ -2044,18 +2044,18 @@ int kmem_cache_destroy(kmem_cache_t *cachep)
lock_cpu_hotplug();
/* Find the cache in the chain of caches. */
down(&cache_chain_sem);
mutex_lock(&cache_chain_mutex);
/*
* the chain is never empty, cache_cache is never destroyed
*/
list_del(&cachep->next);
up(&cache_chain_sem);
mutex_unlock(&cache_chain_mutex);
if (__cache_shrink(cachep)) {
slab_error(cachep, "Can't free all objects");
down(&cache_chain_sem);
mutex_lock(&cache_chain_mutex);
list_add(&cachep->next, &cache_chain);
up(&cache_chain_sem);
mutex_unlock(&cache_chain_mutex);
unlock_cpu_hotplug();
return 1;
}
@ -3314,7 +3314,7 @@ static void drain_array_locked(kmem_cache_t *cachep, struct array_cache *ac,
* - clear the per-cpu caches for this CPU.
* - return freeable pages to the main free memory pool.
*
* If we cannot acquire the cache chain semaphore then just give up - we'll
* If we cannot acquire the cache chain mutex then just give up - we'll
* try again on the next iteration.
*/
static void cache_reap(void *unused)
@ -3322,7 +3322,7 @@ static void cache_reap(void *unused)
struct list_head *walk;
struct kmem_list3 *l3;
if (down_trylock(&cache_chain_sem)) {
if (!mutex_trylock(&cache_chain_mutex)) {
/* Give up. Setup the next iteration. */
schedule_delayed_work(&__get_cpu_var(reap_work),
REAPTIMEOUT_CPUC);
@ -3393,7 +3393,7 @@ static void cache_reap(void *unused)
cond_resched();
}
check_irq_on();
up(&cache_chain_sem);
mutex_unlock(&cache_chain_mutex);
drain_remote_pages();
/* Setup the next iteration */
schedule_delayed_work(&__get_cpu_var(reap_work), REAPTIMEOUT_CPUC);
@ -3429,7 +3429,7 @@ static void *s_start(struct seq_file *m, loff_t *pos)
loff_t n = *pos;
struct list_head *p;
down(&cache_chain_sem);
mutex_lock(&cache_chain_mutex);
if (!n)
print_slabinfo_header(m);
p = cache_chain.next;
@ -3451,7 +3451,7 @@ static void *s_next(struct seq_file *m, void *p, loff_t *pos)
static void s_stop(struct seq_file *m, void *p)
{
up(&cache_chain_sem);
mutex_unlock(&cache_chain_mutex);
}
static int s_show(struct seq_file *m, void *p)
@ -3603,7 +3603,7 @@ ssize_t slabinfo_write(struct file *file, const char __user * buffer,
return -EINVAL;
/* Find the cache in the chain of caches. */
down(&cache_chain_sem);
mutex_lock(&cache_chain_mutex);
res = -EINVAL;
list_for_each(p, &cache_chain) {
kmem_cache_t *cachep = list_entry(p, kmem_cache_t, next);
@ -3620,7 +3620,7 @@ ssize_t slabinfo_write(struct file *file, const char __user * buffer,
break;
}
}
up(&cache_chain_sem);
mutex_unlock(&cache_chain_mutex);
if (res >= 0)
res = count;
return res;

View File

@ -25,6 +25,7 @@
#include <linux/rmap.h>
#include <linux/security.h>
#include <linux/backing-dev.h>
#include <linux/mutex.h>
#include <linux/capability.h>
#include <linux/syscalls.h>
@ -46,12 +47,12 @@ struct swap_list_t swap_list = {-1, -1};
struct swap_info_struct swap_info[MAX_SWAPFILES];
static DECLARE_MUTEX(swapon_sem);
static DEFINE_MUTEX(swapon_mutex);
/*
* We need this because the bdev->unplug_fn can sleep and we cannot
* hold swap_lock while calling the unplug_fn. And swap_lock
* cannot be turned into a semaphore.
* cannot be turned into a mutex.
*/
static DECLARE_RWSEM(swap_unplug_sem);
@ -1161,7 +1162,7 @@ asmlinkage long sys_swapoff(const char __user * specialfile)
up_write(&swap_unplug_sem);
destroy_swap_extents(p);
down(&swapon_sem);
mutex_lock(&swapon_mutex);
spin_lock(&swap_lock);
drain_mmlist();
@ -1180,7 +1181,7 @@ asmlinkage long sys_swapoff(const char __user * specialfile)
p->swap_map = NULL;
p->flags = 0;
spin_unlock(&swap_lock);
up(&swapon_sem);
mutex_unlock(&swapon_mutex);
vfree(swap_map);
inode = mapping->host;
if (S_ISBLK(inode->i_mode)) {
@ -1209,7 +1210,7 @@ static void *swap_start(struct seq_file *swap, loff_t *pos)
int i;
loff_t l = *pos;
down(&swapon_sem);
mutex_lock(&swapon_mutex);
for (i = 0; i < nr_swapfiles; i++, ptr++) {
if (!(ptr->flags & SWP_USED) || !ptr->swap_map)
@ -1238,7 +1239,7 @@ static void *swap_next(struct seq_file *swap, void *v, loff_t *pos)
static void swap_stop(struct seq_file *swap, void *v)
{
up(&swapon_sem);
mutex_unlock(&swapon_mutex);
}
static int swap_show(struct seq_file *swap, void *v)
@ -1540,7 +1541,7 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags)
goto bad_swap;
}
down(&swapon_sem);
mutex_lock(&swapon_mutex);
spin_lock(&swap_lock);
p->flags = SWP_ACTIVE;
nr_swap_pages += nr_good_pages;
@ -1566,7 +1567,7 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags)
swap_info[prev].next = p - swap_info;
}
spin_unlock(&swap_lock);
up(&swapon_sem);
mutex_unlock(&swapon_mutex);
error = 0;
goto out;
bad_swap: