2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Cache operations for Coda.
|
|
|
|
* For Linux 2.1: (C) 1997 Carnegie Mellon University
|
|
|
|
* For Linux 2.3: (C) 2000 Carnegie Mellon University
|
|
|
|
*
|
|
|
|
* Carnegie Mellon encourages users of this code to contribute improvements
|
|
|
|
* to the Coda project http://www.coda.cs.cmu.edu/ <coda@cs.cmu.edu>.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/types.h>
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/time.h>
|
|
|
|
#include <linux/fs.h>
|
|
|
|
#include <linux/stat.h>
|
|
|
|
#include <linux/errno.h>
|
|
|
|
#include <asm/uaccess.h>
|
|
|
|
#include <linux/string.h>
|
|
|
|
#include <linux/list.h>
|
Detach sched.h from mm.h
First thing mm.h does is including sched.h solely for can_do_mlock() inline
function which has "current" dereference inside. By dealing with can_do_mlock()
mm.h can be detached from sched.h which is good. See below, why.
This patch
a) removes unconditional inclusion of sched.h from mm.h
b) makes can_do_mlock() normal function in mm/mlock.c
c) exports can_do_mlock() to not break compilation
d) adds sched.h inclusions back to files that were getting it indirectly.
e) adds less bloated headers to some files (asm/signal.h, jiffies.h) that were
getting them indirectly
Net result is:
a) mm.h users would get less code to open, read, preprocess, parse, ... if
they don't need sched.h
b) sched.h stops being dependency for significant number of files:
on x86_64 allmodconfig touching sched.h results in recompile of 4083 files,
after patch it's only 3744 (-8.3%).
Cross-compile tested on
all arm defconfigs, all mips defconfigs, all powerpc defconfigs,
alpha alpha-up
arm
i386 i386-up i386-defconfig i386-allnoconfig
ia64 ia64-up
m68k
mips
parisc parisc-up
powerpc powerpc-up
s390 s390-up
sparc sparc-up
sparc64 sparc64-up
um-x86_64
x86_64 x86_64-up x86_64-defconfig x86_64-allnoconfig
as well as my two usual configs.
Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-05-21 05:22:52 +08:00
|
|
|
#include <linux/sched.h>
|
2010-10-25 14:03:44 +08:00
|
|
|
#include <linux/spinlock.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
#include <linux/coda.h>
|
|
|
|
#include <linux/coda_psdev.h>
|
2011-01-13 05:36:09 +08:00
|
|
|
#include "coda_linux.h"
|
|
|
|
#include "coda_cache.h"
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
static atomic_t permission_epoch = ATOMIC_INIT(0);
|
|
|
|
|
|
|
|
/* replace or extend an acl cache hit */
|
|
|
|
void coda_cache_enter(struct inode *inode, int mask)
|
|
|
|
{
|
|
|
|
struct coda_inode_info *cii = ITOC(inode);
|
|
|
|
|
2010-10-25 14:03:44 +08:00
|
|
|
spin_lock(&cii->c_lock);
|
2005-04-17 06:20:36 +08:00
|
|
|
cii->c_cached_epoch = atomic_read(&permission_epoch);
|
2008-11-14 07:38:48 +08:00
|
|
|
if (cii->c_uid != current_fsuid()) {
|
|
|
|
cii->c_uid = current_fsuid();
|
2005-04-17 06:20:36 +08:00
|
|
|
cii->c_cached_perm = mask;
|
|
|
|
} else
|
|
|
|
cii->c_cached_perm |= mask;
|
2010-10-25 14:03:44 +08:00
|
|
|
spin_unlock(&cii->c_lock);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* remove cached acl from an inode */
|
|
|
|
void coda_cache_clear_inode(struct inode *inode)
|
|
|
|
{
|
|
|
|
struct coda_inode_info *cii = ITOC(inode);
|
2010-10-25 14:03:44 +08:00
|
|
|
spin_lock(&cii->c_lock);
|
2007-07-19 16:48:42 +08:00
|
|
|
cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
|
2010-10-25 14:03:44 +08:00
|
|
|
spin_unlock(&cii->c_lock);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* remove all acl caches */
|
|
|
|
void coda_cache_clear_all(struct super_block *sb)
|
|
|
|
{
|
|
|
|
atomic_inc(&permission_epoch);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/* check if the mask has been matched against the acl already */
|
|
|
|
int coda_cache_check(struct inode *inode, int mask)
|
|
|
|
{
|
|
|
|
struct coda_inode_info *cii = ITOC(inode);
|
2010-10-25 14:03:44 +08:00
|
|
|
int hit;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2010-10-25 14:03:44 +08:00
|
|
|
spin_lock(&cii->c_lock);
|
|
|
|
hit = (mask & cii->c_cached_perm) == mask &&
|
|
|
|
cii->c_uid == current_fsuid() &&
|
|
|
|
cii->c_cached_epoch == atomic_read(&permission_epoch);
|
|
|
|
spin_unlock(&cii->c_lock);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2010-10-25 14:03:44 +08:00
|
|
|
return hit;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/* Purging dentries and children */
|
|
|
|
/* The following routines drop dentries which are not
|
|
|
|
in use and flag dentries which are in use to be
|
|
|
|
zapped later.
|
|
|
|
|
|
|
|
The flags are detected by:
|
|
|
|
- coda_dentry_revalidate (for lookups) if the flag is C_PURGE
|
|
|
|
- coda_dentry_delete: to remove dentry from the cache when d_count
|
|
|
|
falls to zero
|
|
|
|
- an inode method coda_revalidate (for attributes) if the
|
|
|
|
flag is C_VATTR
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* this won't do any harm: just flag all children */
|
|
|
|
static void coda_flag_children(struct dentry *parent, int flag)
|
|
|
|
{
|
|
|
|
struct list_head *child;
|
|
|
|
struct dentry *de;
|
|
|
|
|
2011-01-07 14:49:34 +08:00
|
|
|
spin_lock(&parent->d_lock);
|
2005-04-17 06:20:36 +08:00
|
|
|
list_for_each(child, &parent->d_subdirs)
|
|
|
|
{
|
[PATCH] shrink dentry struct
Some long time ago, dentry struct was carefully tuned so that on 32 bits
UP, sizeof(struct dentry) was exactly 128, ie a power of 2, and a multiple
of memory cache lines.
Then RCU was added and dentry struct enlarged by two pointers, with nice
results for SMP, but not so good on UP, because breaking the above tuning
(128 + 8 = 136 bytes)
This patch reverts this unwanted side effect, by using an union (d_u),
where d_rcu and d_child are placed so that these two fields can share their
memory needs.
At the time d_free() is called (and d_rcu is really used), d_child is known
to be empty and not touched by the dentry freeing.
Lockless lookups only access d_name, d_parent, d_lock, d_op, d_flags (so
the previous content of d_child is not needed if said dentry was unhashed
but still accessed by a CPU because of RCU constraints)
As dentry cache easily contains millions of entries, a size reduction is
worth the extra complexity of the ugly C union.
Signed-off-by: Eric Dumazet <dada1@cosmosbay.com>
Cc: Dipankar Sarma <dipankar@in.ibm.com>
Cc: Maneesh Soni <maneesh@in.ibm.com>
Cc: Miklos Szeredi <miklos@szeredi.hu>
Cc: "Paul E. McKenney" <paulmck@us.ibm.com>
Cc: Ian Kent <raven@themaw.net>
Cc: Paul Jackson <pj@sgi.com>
Cc: Al Viro <viro@ftp.linux.org.uk>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Trond Myklebust <trond.myklebust@fys.uio.no>
Cc: Neil Brown <neilb@cse.unsw.edu.au>
Cc: James Morris <jmorris@namei.org>
Cc: Stephen Smalley <sds@epoch.ncsc.mil>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-01-08 17:03:32 +08:00
|
|
|
de = list_entry(child, struct dentry, d_u.d_child);
|
2005-04-17 06:20:36 +08:00
|
|
|
/* don't know what to do with negative dentries */
|
|
|
|
if ( ! de->d_inode )
|
|
|
|
continue;
|
|
|
|
coda_flag_inode(de->d_inode, flag);
|
|
|
|
}
|
2011-01-07 14:49:34 +08:00
|
|
|
spin_unlock(&parent->d_lock);
|
2005-04-17 06:20:36 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
void coda_flag_inode_children(struct inode *inode, int flag)
|
|
|
|
{
|
|
|
|
struct dentry *alias_de;
|
|
|
|
|
|
|
|
if ( !inode || !S_ISDIR(inode->i_mode))
|
|
|
|
return;
|
|
|
|
|
|
|
|
alias_de = d_find_alias(inode);
|
|
|
|
if (!alias_de)
|
|
|
|
return;
|
|
|
|
coda_flag_children(alias_de, flag);
|
|
|
|
shrink_dcache_parent(alias_de);
|
|
|
|
dput(alias_de);
|
|
|
|
}
|
|
|
|
|