Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
Pull vfs bits and pieces from Al Viro: "Assorted bits that got missed in the first pull request + fixes for a couple of coredump regressions" * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs: fold try_to_ascend() into the sole remaining caller dcache.c: get rid of pointless macros take read_seqbegin_or_lock() and friends to seqlock.h consolidate simple ->d_delete() instances gfs2: endianness misannotations dump_emit(): use __kernel_write(), not vfs_write() dump_align(): fix the dumb braino
This commit is contained in:
commit
b5898cd057
|
@ -2166,12 +2166,6 @@ static const struct file_operations pfm_file_ops = {
|
|||
.flush = pfm_flush
|
||||
};
|
||||
|
||||
static int
|
||||
pfmfs_delete_dentry(const struct dentry *dentry)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
static char *pfmfs_dname(struct dentry *dentry, char *buffer, int buflen)
|
||||
{
|
||||
return dynamic_dname(dentry, buffer, buflen, "pfm:[%lu]",
|
||||
|
@ -2179,7 +2173,7 @@ static char *pfmfs_dname(struct dentry *dentry, char *buffer, int buflen)
|
|||
}
|
||||
|
||||
static const struct dentry_operations pfmfs_dentry_operations = {
|
||||
.d_delete = pfmfs_delete_dentry,
|
||||
.d_delete = always_delete_dentry,
|
||||
.d_dname = pfmfs_dname,
|
||||
};
|
||||
|
||||
|
|
|
@ -42,23 +42,6 @@
|
|||
#include "v9fs_vfs.h"
|
||||
#include "fid.h"
|
||||
|
||||
/**
|
||||
* v9fs_dentry_delete - called when dentry refcount equals 0
|
||||
* @dentry: dentry in question
|
||||
*
|
||||
* By returning 1 here we should remove cacheing of unused
|
||||
* dentry components.
|
||||
*
|
||||
*/
|
||||
|
||||
static int v9fs_dentry_delete(const struct dentry *dentry)
|
||||
{
|
||||
p9_debug(P9_DEBUG_VFS, " dentry: %s (%p)\n",
|
||||
dentry->d_name.name, dentry);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* v9fs_cached_dentry_delete - called when dentry refcount equals 0
|
||||
* @dentry: dentry in question
|
||||
|
@ -134,6 +117,6 @@ const struct dentry_operations v9fs_cached_dentry_operations = {
|
|||
};
|
||||
|
||||
const struct dentry_operations v9fs_dentry_operations = {
|
||||
.d_delete = v9fs_dentry_delete,
|
||||
.d_delete = always_delete_dentry,
|
||||
.d_release = v9fs_dentry_release,
|
||||
};
|
||||
|
|
|
@ -66,19 +66,9 @@ static void configfs_d_iput(struct dentry * dentry,
|
|||
iput(inode);
|
||||
}
|
||||
|
||||
/*
|
||||
* We _must_ delete our dentries on last dput, as the chain-to-parent
|
||||
* behavior is required to clear the parents of default_groups.
|
||||
*/
|
||||
static int configfs_d_delete(const struct dentry *dentry)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
const struct dentry_operations configfs_dentry_ops = {
|
||||
.d_iput = configfs_d_iput,
|
||||
/* simple_delete_dentry() isn't exported */
|
||||
.d_delete = configfs_d_delete,
|
||||
.d_delete = always_delete_dentry,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_LOCKDEP
|
||||
|
|
|
@ -695,7 +695,7 @@ int dump_emit(struct coredump_params *cprm, const void *addr, int nr)
|
|||
while (nr) {
|
||||
if (dump_interrupted())
|
||||
return 0;
|
||||
n = vfs_write(file, addr, nr, &pos);
|
||||
n = __kernel_write(file, addr, nr, &pos);
|
||||
if (n <= 0)
|
||||
return 0;
|
||||
file->f_pos = pos;
|
||||
|
@ -733,7 +733,7 @@ int dump_align(struct coredump_params *cprm, int align)
|
|||
{
|
||||
unsigned mod = cprm->written & (align - 1);
|
||||
if (align & (align - 1))
|
||||
return -EINVAL;
|
||||
return mod ? dump_skip(cprm, align - mod) : 0;
|
||||
return 0;
|
||||
return mod ? dump_skip(cprm, align - mod) : 1;
|
||||
}
|
||||
EXPORT_SYMBOL(dump_align);
|
||||
|
|
84
fs/dcache.c
84
fs/dcache.c
|
@ -88,35 +88,6 @@ EXPORT_SYMBOL(rename_lock);
|
|||
|
||||
static struct kmem_cache *dentry_cache __read_mostly;
|
||||
|
||||
/**
|
||||
* read_seqbegin_or_lock - begin a sequence number check or locking block
|
||||
* @lock: sequence lock
|
||||
* @seq : sequence number to be checked
|
||||
*
|
||||
* First try it once optimistically without taking the lock. If that fails,
|
||||
* take the lock. The sequence number is also used as a marker for deciding
|
||||
* whether to be a reader (even) or writer (odd).
|
||||
* N.B. seq must be initialized to an even number to begin with.
|
||||
*/
|
||||
static inline void read_seqbegin_or_lock(seqlock_t *lock, int *seq)
|
||||
{
|
||||
if (!(*seq & 1)) /* Even */
|
||||
*seq = read_seqbegin(lock);
|
||||
else /* Odd */
|
||||
read_seqlock_excl(lock);
|
||||
}
|
||||
|
||||
static inline int need_seqretry(seqlock_t *lock, int seq)
|
||||
{
|
||||
return !(seq & 1) && read_seqretry(lock, seq);
|
||||
}
|
||||
|
||||
static inline void done_seqretry(seqlock_t *lock, int seq)
|
||||
{
|
||||
if (seq & 1)
|
||||
read_sequnlock_excl(lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* This is the single most critical data structure when it comes
|
||||
* to the dcache: the hashtable for lookups. Somebody should try
|
||||
|
@ -125,8 +96,6 @@ static inline void done_seqretry(seqlock_t *lock, int seq)
|
|||
* This hash-function tries to avoid losing too many bits of hash
|
||||
* information, yet avoid using a prime hash-size or similar.
|
||||
*/
|
||||
#define D_HASHBITS d_hash_shift
|
||||
#define D_HASHMASK d_hash_mask
|
||||
|
||||
static unsigned int d_hash_mask __read_mostly;
|
||||
static unsigned int d_hash_shift __read_mostly;
|
||||
|
@ -137,8 +106,8 @@ static inline struct hlist_bl_head *d_hash(const struct dentry *parent,
|
|||
unsigned int hash)
|
||||
{
|
||||
hash += (unsigned long) parent / L1_CACHE_BYTES;
|
||||
hash = hash + (hash >> D_HASHBITS);
|
||||
return dentry_hashtable + (hash & D_HASHMASK);
|
||||
hash = hash + (hash >> d_hash_shift);
|
||||
return dentry_hashtable + (hash & d_hash_mask);
|
||||
}
|
||||
|
||||
/* Statistics gathering. */
|
||||
|
@ -469,7 +438,7 @@ static struct dentry *d_kill(struct dentry *dentry, struct dentry *parent)
|
|||
{
|
||||
list_del(&dentry->d_u.d_child);
|
||||
/*
|
||||
* Inform try_to_ascend() that we are no longer attached to the
|
||||
* Inform d_walk() that we are no longer attached to the
|
||||
* dentry tree
|
||||
*/
|
||||
dentry->d_flags |= DCACHE_DENTRY_KILLED;
|
||||
|
@ -1069,34 +1038,6 @@ void shrink_dcache_sb(struct super_block *sb)
|
|||
}
|
||||
EXPORT_SYMBOL(shrink_dcache_sb);
|
||||
|
||||
/*
|
||||
* This tries to ascend one level of parenthood, but
|
||||
* we can race with renaming, so we need to re-check
|
||||
* the parenthood after dropping the lock and check
|
||||
* that the sequence number still matches.
|
||||
*/
|
||||
static struct dentry *try_to_ascend(struct dentry *old, unsigned seq)
|
||||
{
|
||||
struct dentry *new = old->d_parent;
|
||||
|
||||
rcu_read_lock();
|
||||
spin_unlock(&old->d_lock);
|
||||
spin_lock(&new->d_lock);
|
||||
|
||||
/*
|
||||
* might go back up the wrong parent if we have had a rename
|
||||
* or deletion
|
||||
*/
|
||||
if (new != old->d_parent ||
|
||||
(old->d_flags & DCACHE_DENTRY_KILLED) ||
|
||||
need_seqretry(&rename_lock, seq)) {
|
||||
spin_unlock(&new->d_lock);
|
||||
new = NULL;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
return new;
|
||||
}
|
||||
|
||||
/**
|
||||
* enum d_walk_ret - action to talke during tree walk
|
||||
* @D_WALK_CONTINUE: contrinue walk
|
||||
|
@ -1185,9 +1126,24 @@ resume:
|
|||
*/
|
||||
if (this_parent != parent) {
|
||||
struct dentry *child = this_parent;
|
||||
this_parent = try_to_ascend(this_parent, seq);
|
||||
if (!this_parent)
|
||||
this_parent = child->d_parent;
|
||||
|
||||
rcu_read_lock();
|
||||
spin_unlock(&child->d_lock);
|
||||
spin_lock(&this_parent->d_lock);
|
||||
|
||||
/*
|
||||
* might go back up the wrong parent if we have had a rename
|
||||
* or deletion
|
||||
*/
|
||||
if (this_parent != child->d_parent ||
|
||||
(child->d_flags & DCACHE_DENTRY_KILLED) ||
|
||||
need_seqretry(&rename_lock, seq)) {
|
||||
spin_unlock(&this_parent->d_lock);
|
||||
rcu_read_unlock();
|
||||
goto rename_retry;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
next = child->d_u.d_child.next;
|
||||
goto resume;
|
||||
}
|
||||
|
|
|
@ -83,19 +83,10 @@ static int efivarfs_d_hash(const struct dentry *dentry, struct qstr *qstr)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Retaining negative dentries for an in-memory filesystem just wastes
|
||||
* memory and lookup time: arrange for them to be deleted immediately.
|
||||
*/
|
||||
static int efivarfs_delete_dentry(const struct dentry *dentry)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
static struct dentry_operations efivarfs_d_ops = {
|
||||
.d_compare = efivarfs_d_compare,
|
||||
.d_hash = efivarfs_d_hash,
|
||||
.d_delete = efivarfs_delete_dentry,
|
||||
.d_delete = always_delete_dentry,
|
||||
};
|
||||
|
||||
static struct dentry *efivarfs_alloc_dentry(struct dentry *parent, char *name)
|
||||
|
|
|
@ -466,19 +466,19 @@ static void gdlm_cancel(struct gfs2_glock *gl)
|
|||
static void control_lvb_read(struct lm_lockstruct *ls, uint32_t *lvb_gen,
|
||||
char *lvb_bits)
|
||||
{
|
||||
uint32_t gen;
|
||||
__le32 gen;
|
||||
memcpy(lvb_bits, ls->ls_control_lvb, GDLM_LVB_SIZE);
|
||||
memcpy(&gen, lvb_bits, sizeof(uint32_t));
|
||||
memcpy(&gen, lvb_bits, sizeof(__le32));
|
||||
*lvb_gen = le32_to_cpu(gen);
|
||||
}
|
||||
|
||||
static void control_lvb_write(struct lm_lockstruct *ls, uint32_t lvb_gen,
|
||||
char *lvb_bits)
|
||||
{
|
||||
uint32_t gen;
|
||||
__le32 gen;
|
||||
memcpy(ls->ls_control_lvb, lvb_bits, GDLM_LVB_SIZE);
|
||||
gen = cpu_to_le32(lvb_gen);
|
||||
memcpy(ls->ls_control_lvb, &gen, sizeof(uint32_t));
|
||||
memcpy(ls->ls_control_lvb, &gen, sizeof(__le32));
|
||||
}
|
||||
|
||||
static int all_jid_bits_clear(char *lvb)
|
||||
|
|
|
@ -667,7 +667,7 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
|
|||
struct buffer_head *bh;
|
||||
struct page *page;
|
||||
void *kaddr, *ptr;
|
||||
struct gfs2_quota q, *qp;
|
||||
struct gfs2_quota q;
|
||||
int err, nbytes;
|
||||
u64 size;
|
||||
|
||||
|
@ -683,28 +683,25 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
|
|||
return err;
|
||||
|
||||
err = -EIO;
|
||||
qp = &q;
|
||||
qp->qu_value = be64_to_cpu(qp->qu_value);
|
||||
qp->qu_value += change;
|
||||
qp->qu_value = cpu_to_be64(qp->qu_value);
|
||||
qd->qd_qb.qb_value = qp->qu_value;
|
||||
be64_add_cpu(&q.qu_value, change);
|
||||
qd->qd_qb.qb_value = q.qu_value;
|
||||
if (fdq) {
|
||||
if (fdq->d_fieldmask & FS_DQ_BSOFT) {
|
||||
qp->qu_warn = cpu_to_be64(fdq->d_blk_softlimit >> sdp->sd_fsb2bb_shift);
|
||||
qd->qd_qb.qb_warn = qp->qu_warn;
|
||||
q.qu_warn = cpu_to_be64(fdq->d_blk_softlimit >> sdp->sd_fsb2bb_shift);
|
||||
qd->qd_qb.qb_warn = q.qu_warn;
|
||||
}
|
||||
if (fdq->d_fieldmask & FS_DQ_BHARD) {
|
||||
qp->qu_limit = cpu_to_be64(fdq->d_blk_hardlimit >> sdp->sd_fsb2bb_shift);
|
||||
qd->qd_qb.qb_limit = qp->qu_limit;
|
||||
q.qu_limit = cpu_to_be64(fdq->d_blk_hardlimit >> sdp->sd_fsb2bb_shift);
|
||||
qd->qd_qb.qb_limit = q.qu_limit;
|
||||
}
|
||||
if (fdq->d_fieldmask & FS_DQ_BCOUNT) {
|
||||
qp->qu_value = cpu_to_be64(fdq->d_bcount >> sdp->sd_fsb2bb_shift);
|
||||
qd->qd_qb.qb_value = qp->qu_value;
|
||||
q.qu_value = cpu_to_be64(fdq->d_bcount >> sdp->sd_fsb2bb_shift);
|
||||
qd->qd_qb.qb_value = q.qu_value;
|
||||
}
|
||||
}
|
||||
|
||||
/* Write the quota into the quota file on disk */
|
||||
ptr = qp;
|
||||
ptr = &q;
|
||||
nbytes = sizeof(struct gfs2_quota);
|
||||
get_a_page:
|
||||
page = find_or_create_page(mapping, index, GFP_NOFS);
|
||||
|
|
|
@ -1127,7 +1127,7 @@ int gfs2_rgrp_bh_get(struct gfs2_rgrpd *rgd)
|
|||
rgd->rd_flags |= (GFS2_RDF_UPTODATE | GFS2_RDF_CHECK);
|
||||
rgd->rd_free_clone = rgd->rd_free;
|
||||
}
|
||||
if (be32_to_cpu(GFS2_MAGIC) != rgd->rd_rgl->rl_magic) {
|
||||
if (cpu_to_be32(GFS2_MAGIC) != rgd->rd_rgl->rl_magic) {
|
||||
rgd->rd_rgl->rl_unlinked = cpu_to_be32(count_unlinked(rgd));
|
||||
gfs2_rgrp_ondisk2lvb(rgd->rd_rgl,
|
||||
rgd->rd_bits[0].bi_bh->b_data);
|
||||
|
@ -1161,7 +1161,7 @@ int update_rgrp_lvb(struct gfs2_rgrpd *rgd)
|
|||
if (rgd->rd_flags & GFS2_RDF_UPTODATE)
|
||||
return 0;
|
||||
|
||||
if (be32_to_cpu(GFS2_MAGIC) != rgd->rd_rgl->rl_magic)
|
||||
if (cpu_to_be32(GFS2_MAGIC) != rgd->rd_rgl->rl_magic)
|
||||
return gfs2_rgrp_bh_get(rgd);
|
||||
|
||||
rl_flags = be32_to_cpu(rgd->rd_rgl->rl_flags);
|
||||
|
|
|
@ -33,15 +33,6 @@ static inline struct hostfs_inode_info *HOSTFS_I(struct inode *inode)
|
|||
|
||||
#define FILE_HOSTFS_I(file) HOSTFS_I(file_inode(file))
|
||||
|
||||
static int hostfs_d_delete(const struct dentry *dentry)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
static const struct dentry_operations hostfs_dentry_ops = {
|
||||
.d_delete = hostfs_d_delete,
|
||||
};
|
||||
|
||||
/* Changed in hostfs_args before the kernel starts running */
|
||||
static char *root_ino = "";
|
||||
static int append = 0;
|
||||
|
@ -925,7 +916,7 @@ static int hostfs_fill_sb_common(struct super_block *sb, void *d, int silent)
|
|||
sb->s_blocksize_bits = 10;
|
||||
sb->s_magic = HOSTFS_SUPER_MAGIC;
|
||||
sb->s_op = &hostfs_sbops;
|
||||
sb->s_d_op = &hostfs_dentry_ops;
|
||||
sb->s_d_op = &simple_dentry_operations;
|
||||
sb->s_maxbytes = MAX_LFS_FILESIZE;
|
||||
|
||||
/* NULL is printed as <NULL> by sprintf: avoid that. */
|
||||
|
|
12
fs/libfs.c
12
fs/libfs.c
|
@ -47,10 +47,16 @@ EXPORT_SYMBOL(simple_statfs);
|
|||
* Retaining negative dentries for an in-memory filesystem just wastes
|
||||
* memory and lookup time: arrange for them to be deleted immediately.
|
||||
*/
|
||||
static int simple_delete_dentry(const struct dentry *dentry)
|
||||
int always_delete_dentry(const struct dentry *dentry)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
EXPORT_SYMBOL(always_delete_dentry);
|
||||
|
||||
const struct dentry_operations simple_dentry_operations = {
|
||||
.d_delete = always_delete_dentry,
|
||||
};
|
||||
EXPORT_SYMBOL(simple_dentry_operations);
|
||||
|
||||
/*
|
||||
* Lookup the data. This is trivial - if the dentry didn't already
|
||||
|
@ -58,10 +64,6 @@ static int simple_delete_dentry(const struct dentry *dentry)
|
|||
*/
|
||||
struct dentry *simple_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
|
||||
{
|
||||
static const struct dentry_operations simple_dentry_operations = {
|
||||
.d_delete = simple_delete_dentry,
|
||||
};
|
||||
|
||||
if (dentry->d_name.len > NAME_MAX)
|
||||
return ERR_PTR(-ENAMETOOLONG);
|
||||
if (!dentry->d_sb->s_d_op)
|
||||
|
|
|
@ -174,22 +174,6 @@ static const struct inode_operations proc_link_inode_operations = {
|
|||
.follow_link = proc_follow_link,
|
||||
};
|
||||
|
||||
/*
|
||||
* As some entries in /proc are volatile, we want to
|
||||
* get rid of unused dentries. This could be made
|
||||
* smarter: we could keep a "volatile" flag in the
|
||||
* inode to indicate which ones to keep.
|
||||
*/
|
||||
static int proc_delete_dentry(const struct dentry * dentry)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
static const struct dentry_operations proc_dentry_operations =
|
||||
{
|
||||
.d_delete = proc_delete_dentry,
|
||||
};
|
||||
|
||||
/*
|
||||
* Don't create negative dentries here, return -ENOENT by hand
|
||||
* instead.
|
||||
|
@ -209,7 +193,7 @@ struct dentry *proc_lookup_de(struct proc_dir_entry *de, struct inode *dir,
|
|||
inode = proc_get_inode(dir->i_sb, de);
|
||||
if (!inode)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
d_set_d_op(dentry, &proc_dentry_operations);
|
||||
d_set_d_op(dentry, &simple_dentry_operations);
|
||||
d_add(dentry, inode);
|
||||
return NULL;
|
||||
}
|
||||
|
|
|
@ -42,12 +42,6 @@ static const struct inode_operations ns_inode_operations = {
|
|||
.setattr = proc_setattr,
|
||||
};
|
||||
|
||||
static int ns_delete_dentry(const struct dentry *dentry)
|
||||
{
|
||||
/* Don't cache namespace inodes when not in use */
|
||||
return 1;
|
||||
}
|
||||
|
||||
static char *ns_dname(struct dentry *dentry, char *buffer, int buflen)
|
||||
{
|
||||
struct inode *inode = dentry->d_inode;
|
||||
|
@ -59,7 +53,7 @@ static char *ns_dname(struct dentry *dentry, char *buffer, int buflen)
|
|||
|
||||
const struct dentry_operations ns_dentry_operations =
|
||||
{
|
||||
.d_delete = ns_delete_dentry,
|
||||
.d_delete = always_delete_dentry,
|
||||
.d_dname = ns_dname,
|
||||
};
|
||||
|
||||
|
|
|
@ -2622,7 +2622,9 @@ extern int simple_write_begin(struct file *file, struct address_space *mapping,
|
|||
extern int simple_write_end(struct file *file, struct address_space *mapping,
|
||||
loff_t pos, unsigned len, unsigned copied,
|
||||
struct page *page, void *fsdata);
|
||||
extern int always_delete_dentry(const struct dentry *);
|
||||
extern struct inode *alloc_anon_inode(struct super_block *);
|
||||
extern const struct dentry_operations simple_dentry_operations;
|
||||
|
||||
extern struct dentry *simple_lookup(struct inode *, struct dentry *, unsigned int flags);
|
||||
extern ssize_t generic_read_dir(struct file *, char __user *, size_t, loff_t *);
|
||||
|
|
|
@ -354,6 +354,35 @@ static inline void read_sequnlock_excl(seqlock_t *sl)
|
|||
spin_unlock(&sl->lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* read_seqbegin_or_lock - begin a sequence number check or locking block
|
||||
* @lock: sequence lock
|
||||
* @seq : sequence number to be checked
|
||||
*
|
||||
* First try it once optimistically without taking the lock. If that fails,
|
||||
* take the lock. The sequence number is also used as a marker for deciding
|
||||
* whether to be a reader (even) or writer (odd).
|
||||
* N.B. seq must be initialized to an even number to begin with.
|
||||
*/
|
||||
static inline void read_seqbegin_or_lock(seqlock_t *lock, int *seq)
|
||||
{
|
||||
if (!(*seq & 1)) /* Even */
|
||||
*seq = read_seqbegin(lock);
|
||||
else /* Odd */
|
||||
read_seqlock_excl(lock);
|
||||
}
|
||||
|
||||
static inline int need_seqretry(seqlock_t *lock, int seq)
|
||||
{
|
||||
return !(seq & 1) && read_seqretry(lock, seq);
|
||||
}
|
||||
|
||||
static inline void done_seqretry(seqlock_t *lock, int seq)
|
||||
{
|
||||
if (seq & 1)
|
||||
read_sequnlock_excl(lock);
|
||||
}
|
||||
|
||||
static inline void read_seqlock_excl_bh(seqlock_t *sl)
|
||||
{
|
||||
spin_lock_bh(&sl->lock);
|
||||
|
|
|
@ -895,11 +895,6 @@ static void cgroup_diput(struct dentry *dentry, struct inode *inode)
|
|||
iput(inode);
|
||||
}
|
||||
|
||||
static int cgroup_delete(const struct dentry *d)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void remove_dir(struct dentry *d)
|
||||
{
|
||||
struct dentry *parent = dget(d->d_parent);
|
||||
|
@ -1486,7 +1481,7 @@ static int cgroup_get_rootdir(struct super_block *sb)
|
|||
{
|
||||
static const struct dentry_operations cgroup_dops = {
|
||||
.d_iput = cgroup_diput,
|
||||
.d_delete = cgroup_delete,
|
||||
.d_delete = always_delete_dentry,
|
||||
};
|
||||
|
||||
struct inode *inode =
|
||||
|
|
|
@ -471,15 +471,6 @@ struct rpc_filelist {
|
|||
umode_t mode;
|
||||
};
|
||||
|
||||
static int rpc_delete_dentry(const struct dentry *dentry)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
static const struct dentry_operations rpc_dentry_operations = {
|
||||
.d_delete = rpc_delete_dentry,
|
||||
};
|
||||
|
||||
static struct inode *
|
||||
rpc_get_inode(struct super_block *sb, umode_t mode)
|
||||
{
|
||||
|
@ -1266,7 +1257,7 @@ rpc_fill_super(struct super_block *sb, void *data, int silent)
|
|||
sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
|
||||
sb->s_magic = RPCAUTH_GSSMAGIC;
|
||||
sb->s_op = &s_ops;
|
||||
sb->s_d_op = &rpc_dentry_operations;
|
||||
sb->s_d_op = &simple_dentry_operations;
|
||||
sb->s_time_gran = 1;
|
||||
|
||||
inode = rpc_get_inode(sb, S_IFDIR | S_IRUGO | S_IXUGO);
|
||||
|
|
Loading…
Reference in New Issue