2019-05-27 14:55:05 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Copyright (C) International Business Machines Corp., 2000-2004
|
|
|
|
* Portions Copyright (C) Christoph Hellwig, 2001-2002
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/fs.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/parser.h>
|
|
|
|
#include <linux/completion.h>
|
|
|
|
#include <linux/vfs.h>
|
2008-07-25 16:46:51 +08:00
|
|
|
#include <linux/quotaops.h>
|
2005-09-07 06:16:54 +08:00
|
|
|
#include <linux/mount.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/moduleparam.h>
|
2006-02-16 02:49:04 +08:00
|
|
|
#include <linux/kthread.h>
|
2005-06-23 15:10:19 +08:00
|
|
|
#include <linux/posix_acl.h>
|
2006-07-27 03:52:13 +08:00
|
|
|
#include <linux/buffer_head.h>
|
2007-07-17 19:04:28 +08:00
|
|
|
#include <linux/exportfs.h>
|
2009-01-21 00:05:39 +08:00
|
|
|
#include <linux/crc32.h>
|
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h
percpu.h is included by sched.h and module.h and thus ends up being
included when building most .c files. percpu.h includes slab.h which
in turn includes gfp.h making everything defined by the two files
universally available and complicating inclusion dependencies.
percpu.h -> slab.h dependency is about to be removed. Prepare for
this change by updating users of gfp and slab facilities include those
headers directly instead of assuming availability. As this conversion
needs to touch large number of source files, the following script is
used as the basis of conversion.
http://userweb.kernel.org/~tj/misc/slabh-sweep.py
The script does the followings.
* Scan files for gfp and slab usages and update includes such that
only the necessary includes are there. ie. if only gfp is used,
gfp.h, if slab is used, slab.h.
* When the script inserts a new include, it looks at the include
blocks and try to put the new include such that its order conforms
to its surrounding. It's put in the include block which contains
core kernel includes, in the same order that the rest are ordered -
alphabetical, Christmas tree, rev-Xmas-tree or at the end if there
doesn't seem to be any matching order.
* If the script can't find a place to put a new include (mostly
because the file doesn't have fitting include block), it prints out
an error message indicating which .h file needs to be added to the
file.
The conversion was done in the following steps.
1. The initial automatic conversion of all .c files updated slightly
over 4000 files, deleting around 700 includes and adding ~480 gfp.h
and ~3000 slab.h inclusions. The script emitted errors for ~400
files.
2. Each error was manually checked. Some didn't need the inclusion,
some needed manual addition while adding it to implementation .h or
embedding .c file was more appropriate for others. This step added
inclusions to around 150 files.
3. The script was run again and the output was compared to the edits
from #2 to make sure no file was left behind.
4. Several build tests were done and a couple of problems were fixed.
e.g. lib/decompress_*.c used malloc/free() wrappers around slab
APIs requiring slab.h to be added manually.
5. The script was run on all .h files but without automatically
editing them as sprinkling gfp.h and slab.h inclusions around .h
files could easily lead to inclusion dependency hell. Most gfp.h
inclusion directives were ignored as stuff from gfp.h was usually
wildly available and often used in preprocessor macros. Each
slab.h inclusion directive was examined and added manually as
necessary.
6. percpu.h was updated not to include slab.h.
7. Build test were done on the following configurations and failures
were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my
distributed build env didn't work with gcov compiles) and a few
more options had to be turned off depending on archs to make things
build (like ipr on powerpc/64 which failed due to missing writeq).
* x86 and x86_64 UP and SMP allmodconfig and a custom test config.
* powerpc and powerpc64 SMP allmodconfig
* sparc and sparc64 SMP allmodconfig
* ia64 SMP allmodconfig
* s390 SMP allmodconfig
* alpha SMP allmodconfig
* um on x86_64 SMP allmodconfig
8. percpu.h modifications were reverted so that it could be applied as
a separate patch and serve as bisection point.
Given the fact that I had only a couple of failures from tests on step
6, I'm fairly confident about the coverage of this conversion patch.
If there is a breakage, it's likely to be something in one of the arch
headers which should be easily discoverable easily on most builds of
the specific arch.
Signed-off-by: Tejun Heo <tj@kernel.org>
Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 16:04:11 +08:00
|
|
|
#include <linux/slab.h>
|
2016-12-25 03:46:01 +08:00
|
|
|
#include <linux/uaccess.h>
|
2005-09-07 06:16:54 +08:00
|
|
|
#include <linux/seq_file.h>
|
2012-09-18 00:58:19 +08:00
|
|
|
#include <linux/blkdev.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
#include "jfs_incore.h"
|
|
|
|
#include "jfs_filsys.h"
|
2005-05-05 04:29:35 +08:00
|
|
|
#include "jfs_inode.h"
|
2005-04-17 06:20:36 +08:00
|
|
|
#include "jfs_metapage.h"
|
|
|
|
#include "jfs_superblock.h"
|
|
|
|
#include "jfs_dmap.h"
|
|
|
|
#include "jfs_imap.h"
|
|
|
|
#include "jfs_acl.h"
|
|
|
|
#include "jfs_debug.h"
|
2013-12-20 21:16:51 +08:00
|
|
|
#include "jfs_xattr.h"
|
2017-04-11 22:21:01 +08:00
|
|
|
#include "jfs_dinode.h"
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
MODULE_DESCRIPTION("The Journaled Filesystem (JFS)");
|
|
|
|
MODULE_AUTHOR("Steve Best/Dave Kleikamp/Barry Arndt, IBM");
|
|
|
|
MODULE_LICENSE("GPL");
|
|
|
|
|
2014-05-22 08:42:24 +08:00
|
|
|
static struct kmem_cache *jfs_inode_cachep;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2007-02-12 16:55:41 +08:00
|
|
|
static const struct super_operations jfs_super_operations;
|
2007-10-22 07:42:17 +08:00
|
|
|
static const struct export_operations jfs_export_operations;
|
2005-04-17 06:20:36 +08:00
|
|
|
static struct file_system_type jfs_fs_type;
|
|
|
|
|
|
|
|
#define MAX_COMMIT_THREADS 64
|
2014-05-22 08:42:24 +08:00
|
|
|
static int commit_threads;
|
2005-04-17 06:20:36 +08:00
|
|
|
module_param(commit_threads, int, 0);
|
|
|
|
MODULE_PARM_DESC(commit_threads, "Number of commit threads");
|
|
|
|
|
2006-02-16 02:49:04 +08:00
|
|
|
static struct task_struct *jfsCommitThread[MAX_COMMIT_THREADS];
|
|
|
|
struct task_struct *jfsIOthread;
|
|
|
|
struct task_struct *jfsSyncThread;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
#ifdef CONFIG_JFS_DEBUG
|
|
|
|
int jfsloglevel = JFS_LOGLEVEL_WARN;
|
|
|
|
module_param(jfsloglevel, int, 0644);
|
|
|
|
MODULE_PARM_DESC(jfsloglevel, "Specify JFS loglevel (0, 1 or 2)");
|
|
|
|
#endif
|
|
|
|
|
|
|
|
static void jfs_handle_error(struct super_block *sb)
|
|
|
|
{
|
|
|
|
struct jfs_sb_info *sbi = JFS_SBI(sb);
|
|
|
|
|
2017-07-17 15:45:34 +08:00
|
|
|
if (sb_rdonly(sb))
|
2005-04-17 06:20:36 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
updateSuper(sb, FM_DIRTY);
|
|
|
|
|
|
|
|
if (sbi->flag & JFS_ERR_PANIC)
|
|
|
|
panic("JFS (device %s): panic forced after error\n",
|
|
|
|
sb->s_id);
|
|
|
|
else if (sbi->flag & JFS_ERR_REMOUNT_RO) {
|
2016-03-30 20:23:16 +08:00
|
|
|
jfs_err("ERROR: (device %s): remounting filesystem as read-only",
|
2005-04-17 06:20:36 +08:00
|
|
|
sb->s_id);
|
2017-11-28 05:05:09 +08:00
|
|
|
sb->s_flags |= SB_RDONLY;
|
2006-10-02 22:55:27 +08:00
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* nothing is done for continue beyond marking the superblock dirty */
|
|
|
|
}
|
|
|
|
|
2013-06-05 07:39:15 +08:00
|
|
|
void jfs_error(struct super_block *sb, const char *fmt, ...)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2013-06-05 07:39:15 +08:00
|
|
|
struct va_format vaf;
|
2005-04-17 06:20:36 +08:00
|
|
|
va_list args;
|
|
|
|
|
2013-06-05 07:39:15 +08:00
|
|
|
va_start(args, fmt);
|
|
|
|
|
|
|
|
vaf.fmt = fmt;
|
|
|
|
vaf.va = &args;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2015-03-12 11:13:54 +08:00
|
|
|
pr_err("ERROR: (device %s): %ps: %pV\n",
|
2013-06-05 07:39:15 +08:00
|
|
|
sb->s_id, __builtin_return_address(0), &vaf);
|
|
|
|
|
|
|
|
va_end(args);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
jfs_handle_error(sb);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct inode *jfs_alloc_inode(struct super_block *sb)
|
|
|
|
{
|
|
|
|
struct jfs_inode_info *jfs_inode;
|
|
|
|
|
2022-03-23 05:41:03 +08:00
|
|
|
jfs_inode = alloc_inode_sb(sb, jfs_inode_cachep, GFP_NOFS);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (!jfs_inode)
|
|
|
|
return NULL;
|
2014-10-01 17:19:12 +08:00
|
|
|
#ifdef CONFIG_QUOTA
|
|
|
|
memset(&jfs_inode->i_dquot, 0, sizeof(jfs_inode->i_dquot));
|
|
|
|
#endif
|
2005-04-17 06:20:36 +08:00
|
|
|
return &jfs_inode->vfs_inode;
|
|
|
|
}
|
|
|
|
|
2019-04-16 10:48:59 +08:00
|
|
|
static void jfs_free_inode(struct inode *inode)
|
2011-01-07 14:49:49 +08:00
|
|
|
{
|
2019-04-16 10:48:59 +08:00
|
|
|
kmem_cache_free(jfs_inode_cachep, JFS_IP(inode));
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2006-06-23 17:02:58 +08:00
|
|
|
static int jfs_statfs(struct dentry *dentry, struct kstatfs *buf)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2006-06-23 17:02:58 +08:00
|
|
|
struct jfs_sb_info *sbi = JFS_SBI(dentry->d_sb);
|
2005-04-17 06:20:36 +08:00
|
|
|
s64 maxinodes;
|
|
|
|
struct inomap *imap = JFS_IP(sbi->ipimap)->i_imap;
|
|
|
|
|
|
|
|
jfs_info("In jfs_statfs");
|
|
|
|
buf->f_type = JFS_SUPER_MAGIC;
|
|
|
|
buf->f_bsize = sbi->bsize;
|
|
|
|
buf->f_blocks = sbi->bmap->db_mapsize;
|
|
|
|
buf->f_bfree = sbi->bmap->db_nfree;
|
|
|
|
buf->f_bavail = sbi->bmap->db_nfree;
|
|
|
|
/*
|
|
|
|
* If we really return the number of allocated & free inodes, some
|
|
|
|
* applications will fail because they won't see enough free inodes.
|
2013-01-17 05:55:48 +08:00
|
|
|
* We'll try to calculate some guess as to how many inodes we can
|
2005-04-17 06:20:36 +08:00
|
|
|
* really allocate
|
|
|
|
*
|
|
|
|
* buf->f_files = atomic_read(&imap->im_numinos);
|
|
|
|
* buf->f_ffree = atomic_read(&imap->im_numfree);
|
|
|
|
*/
|
|
|
|
maxinodes = min((s64) atomic_read(&imap->im_numinos) +
|
|
|
|
((sbi->bmap->db_nfree >> imap->im_l2nbperiext)
|
|
|
|
<< L2INOSPEREXT), (s64) 0xffffffffLL);
|
|
|
|
buf->f_files = maxinodes;
|
|
|
|
buf->f_ffree = maxinodes - (atomic_read(&imap->im_numinos) -
|
|
|
|
atomic_read(&imap->im_numfree));
|
2019-01-10 21:41:53 +08:00
|
|
|
buf->f_fsid.val[0] = crc32_le(0, (char *)&sbi->uuid,
|
|
|
|
sizeof(sbi->uuid)/2);
|
|
|
|
buf->f_fsid.val[1] = crc32_le(0,
|
|
|
|
(char *)&sbi->uuid + sizeof(sbi->uuid)/2,
|
|
|
|
sizeof(sbi->uuid)/2);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
buf->f_namelen = JFS_NAME_MAX;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-04-11 22:21:01 +08:00
|
|
|
#ifdef CONFIG_QUOTA
|
|
|
|
static int jfs_quota_off(struct super_block *sb, int type);
|
|
|
|
static int jfs_quota_on(struct super_block *sb, int type, int format_id,
|
|
|
|
const struct path *path);
|
|
|
|
|
|
|
|
static void jfs_quota_off_umount(struct super_block *sb)
|
|
|
|
{
|
|
|
|
int type;
|
|
|
|
|
|
|
|
for (type = 0; type < MAXQUOTAS; type++)
|
|
|
|
jfs_quota_off(sb, type);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct quotactl_ops jfs_quotactl_ops = {
|
|
|
|
.quota_on = jfs_quota_on,
|
|
|
|
.quota_off = jfs_quota_off,
|
|
|
|
.quota_sync = dquot_quota_sync,
|
|
|
|
.get_state = dquot_get_state,
|
|
|
|
.set_info = dquot_set_dqinfo,
|
|
|
|
.get_dqblk = dquot_get_dqblk,
|
|
|
|
.set_dqblk = dquot_set_dqblk,
|
|
|
|
.get_nextdqblk = dquot_get_next_dqblk,
|
|
|
|
};
|
|
|
|
#else
|
|
|
|
static inline void jfs_quota_off_umount(struct super_block *sb)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
static void jfs_put_super(struct super_block *sb)
|
|
|
|
{
|
|
|
|
struct jfs_sb_info *sbi = JFS_SBI(sb);
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
jfs_info("In jfs_put_super");
|
push BKL down into ->put_super
Move BKL into ->put_super from the only caller. A couple of
filesystems had trivial enough ->put_super (only kfree and NULLing of
s_fs_info + stuff in there) to not get any locking: coda, cramfs, efs,
hugetlbfs, omfs, qnx4, shmem, all others got the full treatment. Most
of them probably don't need it, but I'd rather sort that out individually.
Preferably after all the other BKL pushdowns in that area.
[AV: original used to move lock_super() down as well; these changes are
removed since we don't do lock_super() at all in generic_shutdown_super()
now]
[AV: fuse, btrfs and xfs are known to need no damn BKL, exempt]
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
2009-05-05 21:40:36 +08:00
|
|
|
|
2017-04-11 22:21:01 +08:00
|
|
|
jfs_quota_off_umount(sb);
|
2010-05-19 19:16:42 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
rc = jfs_umount(sb);
|
|
|
|
if (rc)
|
|
|
|
jfs_err("jfs_umount failed with return code %d", rc);
|
2009-08-17 05:05:08 +08:00
|
|
|
|
|
|
|
unload_nls(sbi->nls_tab);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2005-05-03 02:25:02 +08:00
|
|
|
truncate_inode_pages(sbi->direct_inode->i_mapping, 0);
|
|
|
|
iput(sbi->direct_inode);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
kfree(sbi);
|
|
|
|
}
|
|
|
|
|
|
|
|
enum {
|
|
|
|
Opt_integrity, Opt_nointegrity, Opt_iocharset, Opt_resize,
|
2005-09-07 06:16:54 +08:00
|
|
|
Opt_resize_nosize, Opt_errors, Opt_ignore, Opt_err, Opt_quota,
|
2012-09-18 00:58:19 +08:00
|
|
|
Opt_usrquota, Opt_grpquota, Opt_uid, Opt_gid, Opt_umask,
|
|
|
|
Opt_discard, Opt_nodiscard, Opt_discard_minblk
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
|
|
|
|
2008-10-13 17:46:57 +08:00
|
|
|
static const match_table_t tokens = {
|
2005-04-17 06:20:36 +08:00
|
|
|
{Opt_integrity, "integrity"},
|
|
|
|
{Opt_nointegrity, "nointegrity"},
|
|
|
|
{Opt_iocharset, "iocharset=%s"},
|
|
|
|
{Opt_resize, "resize=%u"},
|
|
|
|
{Opt_resize_nosize, "resize"},
|
|
|
|
{Opt_errors, "errors=%s"},
|
|
|
|
{Opt_ignore, "noquota"},
|
2018-09-09 18:15:56 +08:00
|
|
|
{Opt_quota, "quota"},
|
2005-09-07 06:16:54 +08:00
|
|
|
{Opt_usrquota, "usrquota"},
|
|
|
|
{Opt_grpquota, "grpquota"},
|
2006-03-10 03:59:30 +08:00
|
|
|
{Opt_uid, "uid=%u"},
|
|
|
|
{Opt_gid, "gid=%u"},
|
|
|
|
{Opt_umask, "umask=%u"},
|
2012-09-18 00:58:19 +08:00
|
|
|
{Opt_discard, "discard"},
|
|
|
|
{Opt_nodiscard, "nodiscard"},
|
|
|
|
{Opt_discard_minblk, "discard=%u"},
|
2005-04-17 06:20:36 +08:00
|
|
|
{Opt_err, NULL}
|
|
|
|
};
|
|
|
|
|
|
|
|
static int parse_options(char *options, struct super_block *sb, s64 *newLVSize,
|
|
|
|
int *flag)
|
|
|
|
{
|
|
|
|
void *nls_map = (void *)-1; /* -1: no change; NULL: none */
|
|
|
|
char *p;
|
|
|
|
struct jfs_sb_info *sbi = JFS_SBI(sb);
|
|
|
|
|
|
|
|
*newLVSize = 0;
|
|
|
|
|
|
|
|
if (!options)
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
while ((p = strsep(&options, ",")) != NULL) {
|
|
|
|
substring_t args[MAX_OPT_ARGS];
|
|
|
|
int token;
|
|
|
|
if (!*p)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
token = match_token(p, tokens, args);
|
|
|
|
switch (token) {
|
|
|
|
case Opt_integrity:
|
|
|
|
*flag &= ~JFS_NOINTEGRITY;
|
|
|
|
break;
|
|
|
|
case Opt_nointegrity:
|
|
|
|
*flag |= JFS_NOINTEGRITY;
|
|
|
|
break;
|
|
|
|
case Opt_ignore:
|
|
|
|
/* Silently ignore the quota options */
|
|
|
|
/* Don't do anything ;-) */
|
|
|
|
break;
|
|
|
|
case Opt_iocharset:
|
|
|
|
if (nls_map && nls_map != (void *) -1)
|
|
|
|
unload_nls(nls_map);
|
|
|
|
if (!strcmp(args[0].from, "none"))
|
|
|
|
nls_map = NULL;
|
|
|
|
else {
|
|
|
|
nls_map = load_nls(args[0].from);
|
|
|
|
if (!nls_map) {
|
2012-09-18 00:58:19 +08:00
|
|
|
pr_err("JFS: charset not found\n");
|
2005-04-17 06:20:36 +08:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case Opt_resize:
|
|
|
|
{
|
|
|
|
char *resize = args[0].from;
|
2014-05-22 02:29:29 +08:00
|
|
|
int rc = kstrtoll(resize, 0, newLVSize);
|
|
|
|
|
|
|
|
if (rc)
|
|
|
|
goto cleanup;
|
2005-04-17 06:20:36 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case Opt_resize_nosize:
|
|
|
|
{
|
2021-10-18 18:11:27 +08:00
|
|
|
*newLVSize = sb_bdev_nr_blocks(sb);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (*newLVSize == 0)
|
2012-09-18 00:58:19 +08:00
|
|
|
pr_err("JFS: Cannot determine volume size\n");
|
2005-04-17 06:20:36 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case Opt_errors:
|
|
|
|
{
|
|
|
|
char *errors = args[0].from;
|
|
|
|
if (!errors || !*errors)
|
|
|
|
goto cleanup;
|
|
|
|
if (!strcmp(errors, "continue")) {
|
|
|
|
*flag &= ~JFS_ERR_REMOUNT_RO;
|
|
|
|
*flag &= ~JFS_ERR_PANIC;
|
|
|
|
*flag |= JFS_ERR_CONTINUE;
|
|
|
|
} else if (!strcmp(errors, "remount-ro")) {
|
|
|
|
*flag &= ~JFS_ERR_CONTINUE;
|
|
|
|
*flag &= ~JFS_ERR_PANIC;
|
|
|
|
*flag |= JFS_ERR_REMOUNT_RO;
|
|
|
|
} else if (!strcmp(errors, "panic")) {
|
|
|
|
*flag &= ~JFS_ERR_CONTINUE;
|
|
|
|
*flag &= ~JFS_ERR_REMOUNT_RO;
|
|
|
|
*flag |= JFS_ERR_PANIC;
|
|
|
|
} else {
|
2012-09-18 00:58:19 +08:00
|
|
|
pr_err("JFS: %s is an invalid error handler\n",
|
2005-04-17 06:20:36 +08:00
|
|
|
errors);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
2005-09-07 06:16:54 +08:00
|
|
|
|
2006-07-27 03:52:13 +08:00
|
|
|
#ifdef CONFIG_QUOTA
|
2005-09-07 06:16:54 +08:00
|
|
|
case Opt_quota:
|
|
|
|
case Opt_usrquota:
|
|
|
|
*flag |= JFS_USRQUOTA;
|
|
|
|
break;
|
|
|
|
case Opt_grpquota:
|
|
|
|
*flag |= JFS_GRPQUOTA;
|
|
|
|
break;
|
|
|
|
#else
|
|
|
|
case Opt_usrquota:
|
|
|
|
case Opt_grpquota:
|
|
|
|
case Opt_quota:
|
2012-09-18 00:58:19 +08:00
|
|
|
pr_err("JFS: quota operations not supported\n");
|
2005-09-07 06:16:54 +08:00
|
|
|
break;
|
|
|
|
#endif
|
2006-03-10 03:59:30 +08:00
|
|
|
case Opt_uid:
|
|
|
|
{
|
|
|
|
char *uid = args[0].from;
|
2014-05-22 02:29:29 +08:00
|
|
|
uid_t val;
|
|
|
|
int rc = kstrtouint(uid, 0, &val);
|
|
|
|
|
|
|
|
if (rc)
|
|
|
|
goto cleanup;
|
2012-02-11 03:40:34 +08:00
|
|
|
sbi->uid = make_kuid(current_user_ns(), val);
|
|
|
|
if (!uid_valid(sbi->uid))
|
|
|
|
goto cleanup;
|
2006-03-10 03:59:30 +08:00
|
|
|
break;
|
|
|
|
}
|
2012-09-18 00:58:19 +08:00
|
|
|
|
2006-03-10 03:59:30 +08:00
|
|
|
case Opt_gid:
|
|
|
|
{
|
|
|
|
char *gid = args[0].from;
|
2014-05-22 02:29:29 +08:00
|
|
|
gid_t val;
|
|
|
|
int rc = kstrtouint(gid, 0, &val);
|
|
|
|
|
|
|
|
if (rc)
|
|
|
|
goto cleanup;
|
2012-02-11 03:40:34 +08:00
|
|
|
sbi->gid = make_kgid(current_user_ns(), val);
|
|
|
|
if (!gid_valid(sbi->gid))
|
|
|
|
goto cleanup;
|
2006-03-10 03:59:30 +08:00
|
|
|
break;
|
|
|
|
}
|
2012-09-18 00:58:19 +08:00
|
|
|
|
2006-03-10 03:59:30 +08:00
|
|
|
case Opt_umask:
|
|
|
|
{
|
|
|
|
char *umask = args[0].from;
|
2014-05-22 02:29:29 +08:00
|
|
|
int rc = kstrtouint(umask, 8, &sbi->umask);
|
|
|
|
|
|
|
|
if (rc)
|
|
|
|
goto cleanup;
|
2006-03-10 03:59:30 +08:00
|
|
|
if (sbi->umask & ~0777) {
|
2012-09-18 00:58:19 +08:00
|
|
|
pr_err("JFS: Invalid value of umask\n");
|
2006-03-10 03:59:30 +08:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
2012-09-18 00:58:19 +08:00
|
|
|
|
|
|
|
case Opt_discard:
|
|
|
|
/* if set to 1, even copying files will cause
|
|
|
|
* trimming :O
|
|
|
|
* -> user has more control over the online trimming
|
|
|
|
*/
|
|
|
|
sbi->minblks_trim = 64;
|
2022-04-15 12:52:55 +08:00
|
|
|
if (bdev_max_discard_sectors(sb->s_bdev))
|
2012-09-18 00:58:19 +08:00
|
|
|
*flag |= JFS_DISCARD;
|
2014-05-22 08:42:24 +08:00
|
|
|
else
|
|
|
|
pr_err("JFS: discard option not supported on device\n");
|
2012-09-18 00:58:19 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
case Opt_nodiscard:
|
|
|
|
*flag &= ~JFS_DISCARD;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case Opt_discard_minblk:
|
|
|
|
{
|
|
|
|
char *minblks_trim = args[0].from;
|
2014-05-22 02:29:29 +08:00
|
|
|
int rc;
|
2022-04-15 12:52:55 +08:00
|
|
|
if (bdev_max_discard_sectors(sb->s_bdev)) {
|
2012-09-18 00:58:19 +08:00
|
|
|
*flag |= JFS_DISCARD;
|
2014-05-22 02:29:29 +08:00
|
|
|
rc = kstrtouint(minblks_trim, 0,
|
|
|
|
&sbi->minblks_trim);
|
|
|
|
if (rc)
|
|
|
|
goto cleanup;
|
|
|
|
} else
|
2014-05-22 08:42:24 +08:00
|
|
|
pr_err("JFS: discard option not supported on device\n");
|
2012-09-18 00:58:19 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
default:
|
2014-05-22 08:42:24 +08:00
|
|
|
printk("jfs: Unrecognized mount option \"%s\" or missing value\n",
|
|
|
|
p);
|
2005-04-17 06:20:36 +08:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (nls_map != (void *) -1) {
|
|
|
|
/* Discard old (if remount) */
|
2009-08-17 05:05:08 +08:00
|
|
|
unload_nls(sbi->nls_tab);
|
2005-04-17 06:20:36 +08:00
|
|
|
sbi->nls_tab = nls_map;
|
|
|
|
}
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
if (nls_map && nls_map != (void *) -1)
|
|
|
|
unload_nls(nls_map);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int jfs_remount(struct super_block *sb, int *flags, char *data)
|
|
|
|
{
|
|
|
|
s64 newLVSize = 0;
|
|
|
|
int rc = 0;
|
|
|
|
int flag = JFS_SBI(sb)->flag;
|
2009-05-12 21:10:54 +08:00
|
|
|
int ret;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2014-03-13 22:14:33 +08:00
|
|
|
sync_filesystem(sb);
|
2014-05-22 08:42:24 +08:00
|
|
|
if (!parse_options(data, sb, &newLVSize, &flag))
|
2005-04-17 06:20:36 +08:00
|
|
|
return -EINVAL;
|
2010-02-24 20:25:31 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
if (newLVSize) {
|
2017-07-17 15:45:34 +08:00
|
|
|
if (sb_rdonly(sb)) {
|
2014-05-22 08:42:24 +08:00
|
|
|
pr_err("JFS: resize requires volume to be mounted read-write\n");
|
2005-04-17 06:20:36 +08:00
|
|
|
return -EROFS;
|
|
|
|
}
|
|
|
|
rc = jfs_extendfs(sb, newLVSize, 0);
|
2010-02-24 20:25:31 +08:00
|
|
|
if (rc)
|
2005-04-17 06:20:36 +08:00
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2017-11-28 05:05:09 +08:00
|
|
|
if (sb_rdonly(sb) && !(*flags & SB_RDONLY)) {
|
2005-05-03 02:25:02 +08:00
|
|
|
/*
|
|
|
|
* Invalidate any previously read metadata. fsck may have
|
|
|
|
* changed the on-disk data since we mounted r/o
|
|
|
|
*/
|
|
|
|
truncate_inode_pages(JFS_SBI(sb)->direct_inode->i_mapping, 0);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
JFS_SBI(sb)->flag = flag;
|
2009-05-12 21:10:54 +08:00
|
|
|
ret = jfs_mount_rw(sb, 1);
|
2010-05-19 19:16:40 +08:00
|
|
|
|
|
|
|
/* mark the fs r/w for quota activity */
|
2017-11-28 05:05:09 +08:00
|
|
|
sb->s_flags &= ~SB_RDONLY;
|
2010-05-19 19:16:40 +08:00
|
|
|
|
2010-05-19 19:16:41 +08:00
|
|
|
dquot_resume(sb, -1);
|
2009-05-12 21:10:54 +08:00
|
|
|
return ret;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2017-11-28 05:05:09 +08:00
|
|
|
if (!sb_rdonly(sb) && (*flags & SB_RDONLY)) {
|
2010-05-19 19:16:41 +08:00
|
|
|
rc = dquot_suspend(sb, -1);
|
2014-05-22 08:42:24 +08:00
|
|
|
if (rc < 0)
|
2010-05-19 19:16:41 +08:00
|
|
|
return rc;
|
2005-04-17 06:20:36 +08:00
|
|
|
rc = jfs_umount_rw(sb);
|
|
|
|
JFS_SBI(sb)->flag = flag;
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
if ((JFS_SBI(sb)->flag & JFS_NOINTEGRITY) != (flag & JFS_NOINTEGRITY))
|
2017-07-17 15:45:34 +08:00
|
|
|
if (!sb_rdonly(sb)) {
|
2005-04-17 06:20:36 +08:00
|
|
|
rc = jfs_umount_rw(sb);
|
2010-02-24 20:25:31 +08:00
|
|
|
if (rc)
|
2005-04-17 06:20:36 +08:00
|
|
|
return rc;
|
2010-02-24 20:25:31 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
JFS_SBI(sb)->flag = flag;
|
2009-05-12 21:10:54 +08:00
|
|
|
ret = jfs_mount_rw(sb, 1);
|
|
|
|
return ret;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
JFS_SBI(sb)->flag = flag;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int jfs_fill_super(struct super_block *sb, void *data, int silent)
|
|
|
|
{
|
|
|
|
struct jfs_sb_info *sbi;
|
|
|
|
struct inode *inode;
|
|
|
|
int rc;
|
|
|
|
s64 newLVSize = 0;
|
2008-02-07 16:15:43 +08:00
|
|
|
int flag, ret = -EINVAL;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
jfs_info("In jfs_read_super: s_flags=0x%lx", sb->s_flags);
|
|
|
|
|
2014-05-22 08:42:24 +08:00
|
|
|
sbi = kzalloc(sizeof(struct jfs_sb_info), GFP_KERNEL);
|
2010-02-24 20:25:31 +08:00
|
|
|
if (!sbi)
|
2006-09-14 22:22:38 +08:00
|
|
|
return -ENOMEM;
|
2010-02-24 20:25:31 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
sb->s_fs_info = sbi;
|
2012-02-07 01:45:27 +08:00
|
|
|
sb->s_max_links = JFS_LINK_MAX;
|
2019-07-30 23:22:29 +08:00
|
|
|
sb->s_time_min = 0;
|
|
|
|
sb->s_time_max = U32_MAX;
|
2005-04-17 06:20:36 +08:00
|
|
|
sbi->sb = sb;
|
2012-02-11 03:40:34 +08:00
|
|
|
sbi->uid = INVALID_UID;
|
|
|
|
sbi->gid = INVALID_GID;
|
|
|
|
sbi->umask = -1;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* initialize the mount flag and determine the default error handler */
|
|
|
|
flag = JFS_ERR_REMOUNT_RO;
|
|
|
|
|
2010-04-13 07:44:08 +08:00
|
|
|
if (!parse_options((char *) data, sb, &newLVSize, &flag))
|
|
|
|
goto out_kfree;
|
2005-04-17 06:20:36 +08:00
|
|
|
sbi->flag = flag;
|
|
|
|
|
|
|
|
#ifdef CONFIG_JFS_POSIX_ACL
|
2017-11-28 05:05:09 +08:00
|
|
|
sb->s_flags |= SB_POSIXACL;
|
2005-04-17 06:20:36 +08:00
|
|
|
#endif
|
|
|
|
|
|
|
|
if (newLVSize) {
|
2012-09-18 00:58:19 +08:00
|
|
|
pr_err("resize option for remount only\n");
|
2010-04-13 07:44:08 +08:00
|
|
|
goto out_kfree;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Initialize blocksize to 4K.
|
|
|
|
*/
|
|
|
|
sb_set_blocksize(sb, PSIZE);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Set method vectors.
|
|
|
|
*/
|
|
|
|
sb->s_op = &jfs_super_operations;
|
|
|
|
sb->s_export_op = &jfs_export_operations;
|
2013-12-20 21:16:51 +08:00
|
|
|
sb->s_xattr = jfs_xattr_handlers;
|
2010-05-19 19:16:44 +08:00
|
|
|
#ifdef CONFIG_QUOTA
|
|
|
|
sb->dq_op = &dquot_operations;
|
2017-04-11 22:21:01 +08:00
|
|
|
sb->s_qcop = &jfs_quotactl_ops;
|
2014-10-01 17:19:12 +08:00
|
|
|
sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP;
|
2010-05-19 19:16:44 +08:00
|
|
|
#endif
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2005-05-03 02:25:02 +08:00
|
|
|
/*
|
|
|
|
* Initialize direct-mapping inode/address-space
|
|
|
|
*/
|
|
|
|
inode = new_inode(sb);
|
2008-02-07 16:15:43 +08:00
|
|
|
if (inode == NULL) {
|
|
|
|
ret = -ENOMEM;
|
2010-04-13 07:44:08 +08:00
|
|
|
goto out_unload;
|
2008-02-07 16:15:43 +08:00
|
|
|
}
|
2021-10-18 18:11:17 +08:00
|
|
|
inode->i_size = bdev_nr_bytes(sb->s_bdev);
|
2005-05-03 02:25:02 +08:00
|
|
|
inode->i_mapping->a_ops = &jfs_metapage_aops;
|
2018-06-30 07:36:57 +08:00
|
|
|
inode_fake_hash(inode);
|
2005-05-03 02:25:02 +08:00
|
|
|
mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
|
|
|
|
|
|
|
|
sbi->direct_inode = inode;
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
rc = jfs_mount(sb);
|
|
|
|
if (rc) {
|
2014-05-22 08:42:24 +08:00
|
|
|
if (!silent)
|
2005-04-17 06:20:36 +08:00
|
|
|
jfs_err("jfs_mount failed w/return code = %d", rc);
|
2005-05-03 02:25:02 +08:00
|
|
|
goto out_mount_failed;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2017-07-17 15:45:34 +08:00
|
|
|
if (sb_rdonly(sb))
|
2005-04-17 06:20:36 +08:00
|
|
|
sbi->log = NULL;
|
|
|
|
else {
|
|
|
|
rc = jfs_mount_rw(sb, 0);
|
|
|
|
if (rc) {
|
|
|
|
if (!silent) {
|
|
|
|
jfs_err("jfs_mount_rw failed, return code = %d",
|
|
|
|
rc);
|
|
|
|
}
|
|
|
|
goto out_no_rw;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
sb->s_magic = JFS_SUPER_MAGIC;
|
|
|
|
|
2010-12-18 23:59:31 +08:00
|
|
|
if (sbi->mntflag & JFS_OS2)
|
|
|
|
sb->s_d_op = &jfs_ci_dentry_operations;
|
|
|
|
|
2008-02-07 16:15:43 +08:00
|
|
|
inode = jfs_iget(sb, ROOT_I);
|
|
|
|
if (IS_ERR(inode)) {
|
|
|
|
ret = PTR_ERR(inode);
|
2008-05-21 23:45:16 +08:00
|
|
|
goto out_no_rw;
|
2008-02-07 16:15:43 +08:00
|
|
|
}
|
2012-01-09 11:15:13 +08:00
|
|
|
sb->s_root = d_make_root(inode);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (!sb->s_root)
|
|
|
|
goto out_no_root;
|
|
|
|
|
2017-09-01 05:46:59 +08:00
|
|
|
/* logical blocks are represented by 40 bits in pxd_t, etc.
|
|
|
|
* and page cache is indexed by long
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2017-09-01 05:46:59 +08:00
|
|
|
sb->s_maxbytes = min(((loff_t)sb->s_blocksize) << 40, MAX_LFS_FILESIZE);
|
2005-04-17 06:20:36 +08:00
|
|
|
sb->s_time_gran = 1;
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
out_no_root:
|
2008-05-21 23:45:16 +08:00
|
|
|
jfs_err("jfs_read_super: get root dentry failed");
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
out_no_rw:
|
|
|
|
rc = jfs_umount(sb);
|
2014-05-22 08:42:24 +08:00
|
|
|
if (rc)
|
2005-04-17 06:20:36 +08:00
|
|
|
jfs_err("jfs_umount failed with return code %d", rc);
|
2005-05-03 02:25:02 +08:00
|
|
|
out_mount_failed:
|
[PATCH] Fix and add EXPORT_SYMBOL(filemap_write_and_wait)
This patch add EXPORT_SYMBOL(filemap_write_and_wait) and use it.
See mm/filemap.c:
And changes the filemap_write_and_wait() and filemap_write_and_wait_range().
Current filemap_write_and_wait() doesn't wait if filemap_fdatawrite()
returns error. However, even if filemap_fdatawrite() returned an
error, it may have submitted the partially data pages to the device.
(e.g. in the case of -ENOSPC)
<quotation>
Andrew Morton writes,
If filemap_fdatawrite() returns an error, this might be due to some
I/O problem: dead disk, unplugged cable, etc. Given the generally
crappy quality of the kernel's handling of such exceptions, there's a
good chance that the filemap_fdatawait() will get stuck in D state
forever.
</quotation>
So, this patch doesn't wait if filemap_fdatawrite() returns the -EIO.
Trond, could you please review the nfs part? Especially I'm not sure,
nfs must use the "filemap_fdatawrite(inode->i_mapping) == 0", or not.
Acked-by: Trond Myklebust <trond.myklebust@fys.uio.no>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-01-08 17:02:14 +08:00
|
|
|
filemap_write_and_wait(sbi->direct_inode->i_mapping);
|
2005-05-03 02:25:02 +08:00
|
|
|
truncate_inode_pages(sbi->direct_inode->i_mapping, 0);
|
|
|
|
make_bad_inode(sbi->direct_inode);
|
|
|
|
iput(sbi->direct_inode);
|
|
|
|
sbi->direct_inode = NULL;
|
2010-04-13 07:44:08 +08:00
|
|
|
out_unload:
|
2015-02-02 00:00:24 +08:00
|
|
|
unload_nls(sbi->nls_tab);
|
2010-04-13 07:44:08 +08:00
|
|
|
out_kfree:
|
2005-04-17 06:20:36 +08:00
|
|
|
kfree(sbi);
|
2008-02-07 16:15:43 +08:00
|
|
|
return ret;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2009-01-10 08:40:58 +08:00
|
|
|
static int jfs_freeze(struct super_block *sb)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct jfs_sb_info *sbi = JFS_SBI(sb);
|
|
|
|
struct jfs_log *log = sbi->log;
|
2013-05-24 16:57:12 +08:00
|
|
|
int rc = 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2017-07-17 15:45:34 +08:00
|
|
|
if (!sb_rdonly(sb)) {
|
2005-04-17 06:20:36 +08:00
|
|
|
txQuiesce(sb);
|
2013-05-24 16:57:12 +08:00
|
|
|
rc = lmLogShutdown(log);
|
|
|
|
if (rc) {
|
2013-06-05 07:39:15 +08:00
|
|
|
jfs_error(sb, "lmLogShutdown failed\n");
|
2013-05-24 16:57:12 +08:00
|
|
|
|
|
|
|
/* let operations fail rather than hang */
|
|
|
|
txResume(sb);
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
rc = updateSuper(sb, FM_CLEAN);
|
|
|
|
if (rc) {
|
2016-03-30 20:23:16 +08:00
|
|
|
jfs_err("jfs_freeze: updateSuper failed");
|
2013-05-24 16:57:12 +08:00
|
|
|
/*
|
|
|
|
* Don't fail here. Everything succeeded except
|
|
|
|
* marking the superblock clean, so there's really
|
|
|
|
* no harm in leaving it frozen for now.
|
|
|
|
*/
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2009-01-10 08:40:58 +08:00
|
|
|
return 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2009-01-10 08:40:58 +08:00
|
|
|
static int jfs_unfreeze(struct super_block *sb)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct jfs_sb_info *sbi = JFS_SBI(sb);
|
|
|
|
struct jfs_log *log = sbi->log;
|
|
|
|
int rc = 0;
|
|
|
|
|
2017-07-17 15:45:34 +08:00
|
|
|
if (!sb_rdonly(sb)) {
|
2013-05-24 16:57:12 +08:00
|
|
|
rc = updateSuper(sb, FM_MOUNT);
|
|
|
|
if (rc) {
|
2013-06-05 07:39:15 +08:00
|
|
|
jfs_error(sb, "updateSuper failed\n");
|
2013-05-24 16:57:12 +08:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
rc = lmLogInit(log);
|
|
|
|
if (rc)
|
2013-06-05 07:39:15 +08:00
|
|
|
jfs_error(sb, "lmLogInit failed\n");
|
2013-05-24 16:57:12 +08:00
|
|
|
out:
|
|
|
|
txResume(sb);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2013-05-24 16:57:12 +08:00
|
|
|
return rc;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2010-07-25 04:46:55 +08:00
|
|
|
static struct dentry *jfs_do_mount(struct file_system_type *fs_type,
|
|
|
|
int flags, const char *dev_name, void *data)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2010-07-25 04:46:55 +08:00
|
|
|
return mount_bdev(fs_type, flags, dev_name, data, jfs_fill_super);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int jfs_sync_fs(struct super_block *sb, int wait)
|
|
|
|
{
|
|
|
|
struct jfs_log *log = JFS_SBI(sb)->log;
|
|
|
|
|
|
|
|
/* log == NULL indicates read-only mount */
|
2005-05-03 02:25:08 +08:00
|
|
|
if (log) {
|
2012-07-03 22:45:29 +08:00
|
|
|
/*
|
|
|
|
* Write quota structures to quota file, sync_blockdev() will
|
|
|
|
* write them to disk later
|
|
|
|
*/
|
|
|
|
dquot_writeback_dquots(sb, -1);
|
2005-04-17 06:20:36 +08:00
|
|
|
jfs_flush_journal(log, wait);
|
2005-07-27 22:17:57 +08:00
|
|
|
jfs_syncpt(log, 0);
|
2005-05-03 02:25:08 +08:00
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-12-09 10:32:45 +08:00
|
|
|
static int jfs_show_options(struct seq_file *seq, struct dentry *root)
|
2005-09-07 06:16:54 +08:00
|
|
|
{
|
2011-12-09 10:32:45 +08:00
|
|
|
struct jfs_sb_info *sbi = JFS_SBI(root->d_sb);
|
2005-09-07 06:16:54 +08:00
|
|
|
|
2012-02-11 03:40:34 +08:00
|
|
|
if (uid_valid(sbi->uid))
|
|
|
|
seq_printf(seq, ",uid=%d", from_kuid(&init_user_ns, sbi->uid));
|
|
|
|
if (gid_valid(sbi->gid))
|
|
|
|
seq_printf(seq, ",gid=%d", from_kgid(&init_user_ns, sbi->gid));
|
2006-03-10 03:59:30 +08:00
|
|
|
if (sbi->umask != -1)
|
|
|
|
seq_printf(seq, ",umask=%03o", sbi->umask);
|
2005-09-07 06:16:54 +08:00
|
|
|
if (sbi->flag & JFS_NOINTEGRITY)
|
|
|
|
seq_puts(seq, ",nointegrity");
|
2012-09-18 00:58:19 +08:00
|
|
|
if (sbi->flag & JFS_DISCARD)
|
|
|
|
seq_printf(seq, ",discard=%u", sbi->minblks_trim);
|
2008-01-25 06:13:21 +08:00
|
|
|
if (sbi->nls_tab)
|
|
|
|
seq_printf(seq, ",iocharset=%s", sbi->nls_tab->charset);
|
|
|
|
if (sbi->flag & JFS_ERR_CONTINUE)
|
|
|
|
seq_printf(seq, ",errors=continue");
|
|
|
|
if (sbi->flag & JFS_ERR_PANIC)
|
|
|
|
seq_printf(seq, ",errors=panic");
|
2005-09-07 06:16:54 +08:00
|
|
|
|
2006-07-27 03:52:13 +08:00
|
|
|
#ifdef CONFIG_QUOTA
|
2005-09-07 06:16:54 +08:00
|
|
|
if (sbi->flag & JFS_USRQUOTA)
|
|
|
|
seq_puts(seq, ",usrquota");
|
|
|
|
|
|
|
|
if (sbi->flag & JFS_GRPQUOTA)
|
|
|
|
seq_puts(seq, ",grpquota");
|
|
|
|
#endif
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2006-07-27 03:52:13 +08:00
|
|
|
#ifdef CONFIG_QUOTA
|
|
|
|
|
|
|
|
/* Read data from quotafile - avoid pagecache and such because we cannot afford
|
|
|
|
* acquiring the locks... As quota files are never truncated and quota code
|
2011-03-31 09:57:33 +08:00
|
|
|
* itself serializes the operations (and no one else should touch the files)
|
2006-07-27 03:52:13 +08:00
|
|
|
* we don't have to be afraid of races */
|
|
|
|
static ssize_t jfs_quota_read(struct super_block *sb, int type, char *data,
|
|
|
|
size_t len, loff_t off)
|
|
|
|
{
|
|
|
|
struct inode *inode = sb_dqopt(sb)->files[type];
|
|
|
|
sector_t blk = off >> sb->s_blocksize_bits;
|
|
|
|
int err = 0;
|
|
|
|
int offset = off & (sb->s_blocksize - 1);
|
|
|
|
int tocopy;
|
|
|
|
size_t toread;
|
|
|
|
struct buffer_head tmp_bh;
|
|
|
|
struct buffer_head *bh;
|
|
|
|
loff_t i_size = i_size_read(inode);
|
|
|
|
|
|
|
|
if (off > i_size)
|
|
|
|
return 0;
|
|
|
|
if (off+len > i_size)
|
|
|
|
len = i_size-off;
|
|
|
|
toread = len;
|
|
|
|
while (toread > 0) {
|
|
|
|
tocopy = sb->s_blocksize - offset < toread ?
|
|
|
|
sb->s_blocksize - offset : toread;
|
|
|
|
|
|
|
|
tmp_bh.b_state = 0;
|
2017-02-28 06:28:32 +08:00
|
|
|
tmp_bh.b_size = i_blocksize(inode);
|
2006-07-27 03:52:13 +08:00
|
|
|
err = jfs_get_block(inode, blk, &tmp_bh, 0);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
if (!buffer_mapped(&tmp_bh)) /* A hole? */
|
|
|
|
memset(data, 0, tocopy);
|
|
|
|
else {
|
|
|
|
bh = sb_bread(sb, tmp_bh.b_blocknr);
|
|
|
|
if (!bh)
|
|
|
|
return -EIO;
|
|
|
|
memcpy(data, bh->b_data+offset, tocopy);
|
|
|
|
brelse(bh);
|
|
|
|
}
|
|
|
|
offset = 0;
|
|
|
|
toread -= tocopy;
|
|
|
|
data += tocopy;
|
|
|
|
blk++;
|
|
|
|
}
|
|
|
|
return len;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Write to quotafile */
|
|
|
|
static ssize_t jfs_quota_write(struct super_block *sb, int type,
|
|
|
|
const char *data, size_t len, loff_t off)
|
|
|
|
{
|
|
|
|
struct inode *inode = sb_dqopt(sb)->files[type];
|
|
|
|
sector_t blk = off >> sb->s_blocksize_bits;
|
|
|
|
int err = 0;
|
|
|
|
int offset = off & (sb->s_blocksize - 1);
|
|
|
|
int tocopy;
|
|
|
|
size_t towrite = len;
|
|
|
|
struct buffer_head tmp_bh;
|
|
|
|
struct buffer_head *bh;
|
|
|
|
|
2016-01-23 04:40:57 +08:00
|
|
|
inode_lock(inode);
|
2006-07-27 03:52:13 +08:00
|
|
|
while (towrite > 0) {
|
|
|
|
tocopy = sb->s_blocksize - offset < towrite ?
|
|
|
|
sb->s_blocksize - offset : towrite;
|
|
|
|
|
|
|
|
tmp_bh.b_state = 0;
|
2017-02-28 06:28:32 +08:00
|
|
|
tmp_bh.b_size = i_blocksize(inode);
|
2006-07-27 03:52:13 +08:00
|
|
|
err = jfs_get_block(inode, blk, &tmp_bh, 1);
|
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
if (offset || tocopy != sb->s_blocksize)
|
|
|
|
bh = sb_bread(sb, tmp_bh.b_blocknr);
|
|
|
|
else
|
|
|
|
bh = sb_getblk(sb, tmp_bh.b_blocknr);
|
|
|
|
if (!bh) {
|
|
|
|
err = -EIO;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
lock_buffer(bh);
|
|
|
|
memcpy(bh->b_data+offset, data, tocopy);
|
|
|
|
flush_dcache_page(bh->b_page);
|
|
|
|
set_buffer_uptodate(bh);
|
|
|
|
mark_buffer_dirty(bh);
|
|
|
|
unlock_buffer(bh);
|
|
|
|
brelse(bh);
|
|
|
|
offset = 0;
|
|
|
|
towrite -= tocopy;
|
|
|
|
data += tocopy;
|
|
|
|
blk++;
|
|
|
|
}
|
|
|
|
out:
|
2009-04-07 19:48:16 +08:00
|
|
|
if (len == towrite) {
|
2016-01-23 04:40:57 +08:00
|
|
|
inode_unlock(inode);
|
2006-07-27 03:52:13 +08:00
|
|
|
return err;
|
2009-04-07 19:48:16 +08:00
|
|
|
}
|
2006-07-27 03:52:13 +08:00
|
|
|
if (inode->i_size < off+len-towrite)
|
|
|
|
i_size_write(inode, off+len-towrite);
|
2016-09-14 22:48:04 +08:00
|
|
|
inode->i_mtime = inode->i_ctime = current_time(inode);
|
2006-07-27 03:52:13 +08:00
|
|
|
mark_inode_dirty(inode);
|
2016-01-23 04:40:57 +08:00
|
|
|
inode_unlock(inode);
|
2006-07-27 03:52:13 +08:00
|
|
|
return len - towrite;
|
|
|
|
}
|
|
|
|
|
2014-10-01 17:19:12 +08:00
|
|
|
static struct dquot **jfs_get_dquots(struct inode *inode)
|
|
|
|
{
|
|
|
|
return JFS_IP(inode)->i_dquot;
|
|
|
|
}
|
2017-04-11 22:21:01 +08:00
|
|
|
|
|
|
|
static int jfs_quota_on(struct super_block *sb, int type, int format_id,
|
|
|
|
const struct path *path)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
struct inode *inode;
|
|
|
|
|
|
|
|
err = dquot_quota_on(sb, type, format_id, path);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
inode = d_inode(path->dentry);
|
|
|
|
inode_lock(inode);
|
|
|
|
JFS_IP(inode)->mode2 |= JFS_NOATIME_FL | JFS_IMMUTABLE_FL;
|
|
|
|
inode_set_flags(inode, S_NOATIME | S_IMMUTABLE,
|
|
|
|
S_NOATIME | S_IMMUTABLE);
|
|
|
|
inode_unlock(inode);
|
|
|
|
mark_inode_dirty(inode);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int jfs_quota_off(struct super_block *sb, int type)
|
|
|
|
{
|
|
|
|
struct inode *inode = sb_dqopt(sb)->files[type];
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (!inode || !igrab(inode))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
err = dquot_quota_off(sb, type);
|
|
|
|
if (err)
|
|
|
|
goto out_put;
|
|
|
|
|
|
|
|
inode_lock(inode);
|
|
|
|
JFS_IP(inode)->mode2 &= ~(JFS_NOATIME_FL | JFS_IMMUTABLE_FL);
|
|
|
|
inode_set_flags(inode, 0, S_NOATIME | S_IMMUTABLE);
|
|
|
|
inode_unlock(inode);
|
|
|
|
mark_inode_dirty(inode);
|
|
|
|
out_put:
|
|
|
|
iput(inode);
|
|
|
|
return err;
|
|
|
|
out:
|
|
|
|
return dquot_quota_off(sb, type);
|
|
|
|
}
|
2006-07-27 03:52:13 +08:00
|
|
|
#endif
|
|
|
|
|
2007-02-12 16:55:41 +08:00
|
|
|
static const struct super_operations jfs_super_operations = {
|
2005-04-17 06:20:36 +08:00
|
|
|
.alloc_inode = jfs_alloc_inode,
|
2019-04-16 10:48:59 +08:00
|
|
|
.free_inode = jfs_free_inode,
|
2005-04-17 06:20:36 +08:00
|
|
|
.dirty_inode = jfs_dirty_inode,
|
|
|
|
.write_inode = jfs_write_inode,
|
2010-06-07 12:28:54 +08:00
|
|
|
.evict_inode = jfs_evict_inode,
|
2005-04-17 06:20:36 +08:00
|
|
|
.put_super = jfs_put_super,
|
|
|
|
.sync_fs = jfs_sync_fs,
|
2009-01-10 08:40:58 +08:00
|
|
|
.freeze_fs = jfs_freeze,
|
|
|
|
.unfreeze_fs = jfs_unfreeze,
|
2005-04-17 06:20:36 +08:00
|
|
|
.statfs = jfs_statfs,
|
|
|
|
.remount_fs = jfs_remount,
|
2006-07-27 03:52:13 +08:00
|
|
|
.show_options = jfs_show_options,
|
|
|
|
#ifdef CONFIG_QUOTA
|
|
|
|
.quota_read = jfs_quota_read,
|
|
|
|
.quota_write = jfs_quota_write,
|
2014-10-01 17:19:12 +08:00
|
|
|
.get_dquots = jfs_get_dquots,
|
2006-07-27 03:52:13 +08:00
|
|
|
#endif
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
|
|
|
|
2007-10-22 07:42:17 +08:00
|
|
|
static const struct export_operations jfs_export_operations = {
|
2007-10-22 07:42:09 +08:00
|
|
|
.fh_to_dentry = jfs_fh_to_dentry,
|
|
|
|
.fh_to_parent = jfs_fh_to_parent,
|
2005-04-17 06:20:36 +08:00
|
|
|
.get_parent = jfs_get_parent,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct file_system_type jfs_fs_type = {
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
.name = "jfs",
|
2010-07-25 04:46:55 +08:00
|
|
|
.mount = jfs_do_mount,
|
2005-04-17 06:20:36 +08:00
|
|
|
.kill_sb = kill_block_super,
|
|
|
|
.fs_flags = FS_REQUIRES_DEV,
|
|
|
|
};
|
2013-03-03 11:39:14 +08:00
|
|
|
MODULE_ALIAS_FS("jfs");
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-07-26 10:45:34 +08:00
|
|
|
static void init_once(void *foo)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct jfs_inode_info *jfs_ip = (struct jfs_inode_info *) foo;
|
|
|
|
|
2007-05-17 13:10:57 +08:00
|
|
|
memset(jfs_ip, 0, sizeof(struct jfs_inode_info));
|
|
|
|
INIT_LIST_HEAD(&jfs_ip->anon_inode_list);
|
|
|
|
init_rwsem(&jfs_ip->rdwrlock);
|
|
|
|
mutex_init(&jfs_ip->commit_mutex);
|
|
|
|
init_rwsem(&jfs_ip->xattr_sem);
|
|
|
|
spin_lock_init(&jfs_ip->ag_lock);
|
|
|
|
jfs_ip->active_ag = -1;
|
|
|
|
inode_init_once(&jfs_ip->vfs_inode);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int __init init_jfs_fs(void)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
jfs_inode_cachep =
|
jfs: Define usercopy region in jfs_ip slab cache
The jfs symlink pathnames, stored in struct jfs_inode_info.i_inline and
therefore contained in the jfs_ip slab cache, need to be copied to/from
userspace.
cache object allocation:
fs/jfs/super.c:
jfs_alloc_inode(...):
...
jfs_inode = kmem_cache_alloc(jfs_inode_cachep, GFP_NOFS);
...
return &jfs_inode->vfs_inode;
fs/jfs/jfs_incore.h:
JFS_IP(struct inode *inode):
return container_of(inode, struct jfs_inode_info, vfs_inode);
fs/jfs/inode.c:
jfs_iget(...):
...
inode->i_link = JFS_IP(inode)->i_inline;
example usage trace:
readlink_copy+0x43/0x70
vfs_readlink+0x62/0x110
SyS_readlinkat+0x100/0x130
fs/namei.c:
readlink_copy(..., link):
...
copy_to_user(..., link, len);
(inlined in vfs_readlink)
generic_readlink(dentry, ...):
struct inode *inode = d_inode(dentry);
const char *link = inode->i_link;
...
readlink_copy(..., link);
In support of usercopy hardening, this patch defines a region in the
jfs_ip slab cache in which userspace copy operations are allowed.
This region is known as the slab cache's usercopy region. Slab caches
can now check that each dynamically sized copy operation involving
cache-managed memory falls entirely within the slab's usercopy region.
This patch is modified from Brad Spengler/PaX Team's PAX_USERCOPY
whitelisting code in the last public patch of grsecurity/PaX based on my
understanding of the code. Changes or omissions from the original code are
mine and don't reflect the original grsecurity/PaX code.
Signed-off-by: David Windsor <dave@nullcore.net>
[kees: adjust commit log, provide usage trace]
Cc: Dave Kleikamp <shaggy@kernel.org>
Cc: jfs-discussion@lists.sourceforge.net
Signed-off-by: Kees Cook <keescook@chromium.org>
Acked-by: Dave Kleikamp <dave.kleikamp@oracle.com>
2017-06-11 10:50:38 +08:00
|
|
|
kmem_cache_create_usercopy("jfs_ip", sizeof(struct jfs_inode_info),
|
|
|
|
0, SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_ACCOUNT,
|
2021-06-22 07:23:22 +08:00
|
|
|
offsetof(struct jfs_inode_info, i_inline_all),
|
|
|
|
sizeof_field(struct jfs_inode_info, i_inline_all),
|
jfs: Define usercopy region in jfs_ip slab cache
The jfs symlink pathnames, stored in struct jfs_inode_info.i_inline and
therefore contained in the jfs_ip slab cache, need to be copied to/from
userspace.
cache object allocation:
fs/jfs/super.c:
jfs_alloc_inode(...):
...
jfs_inode = kmem_cache_alloc(jfs_inode_cachep, GFP_NOFS);
...
return &jfs_inode->vfs_inode;
fs/jfs/jfs_incore.h:
JFS_IP(struct inode *inode):
return container_of(inode, struct jfs_inode_info, vfs_inode);
fs/jfs/inode.c:
jfs_iget(...):
...
inode->i_link = JFS_IP(inode)->i_inline;
example usage trace:
readlink_copy+0x43/0x70
vfs_readlink+0x62/0x110
SyS_readlinkat+0x100/0x130
fs/namei.c:
readlink_copy(..., link):
...
copy_to_user(..., link, len);
(inlined in vfs_readlink)
generic_readlink(dentry, ...):
struct inode *inode = d_inode(dentry);
const char *link = inode->i_link;
...
readlink_copy(..., link);
In support of usercopy hardening, this patch defines a region in the
jfs_ip slab cache in which userspace copy operations are allowed.
This region is known as the slab cache's usercopy region. Slab caches
can now check that each dynamically sized copy operation involving
cache-managed memory falls entirely within the slab's usercopy region.
This patch is modified from Brad Spengler/PaX Team's PAX_USERCOPY
whitelisting code in the last public patch of grsecurity/PaX based on my
understanding of the code. Changes or omissions from the original code are
mine and don't reflect the original grsecurity/PaX code.
Signed-off-by: David Windsor <dave@nullcore.net>
[kees: adjust commit log, provide usage trace]
Cc: Dave Kleikamp <shaggy@kernel.org>
Cc: jfs-discussion@lists.sourceforge.net
Signed-off-by: Kees Cook <keescook@chromium.org>
Acked-by: Dave Kleikamp <dave.kleikamp@oracle.com>
2017-06-11 10:50:38 +08:00
|
|
|
init_once);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (jfs_inode_cachep == NULL)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Metapage initialization
|
|
|
|
*/
|
|
|
|
rc = metapage_init();
|
|
|
|
if (rc) {
|
|
|
|
jfs_err("metapage_init failed w/rc = %d", rc);
|
|
|
|
goto free_slab;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Transaction Manager initialization
|
|
|
|
*/
|
|
|
|
rc = txInit();
|
|
|
|
if (rc) {
|
|
|
|
jfs_err("txInit failed w/rc = %d", rc);
|
|
|
|
goto free_metapage;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* I/O completion thread (endio)
|
|
|
|
*/
|
2006-02-16 02:49:04 +08:00
|
|
|
jfsIOthread = kthread_run(jfsIOWait, NULL, "jfsIO");
|
|
|
|
if (IS_ERR(jfsIOthread)) {
|
|
|
|
rc = PTR_ERR(jfsIOthread);
|
|
|
|
jfs_err("init_jfs_fs: fork failed w/rc = %d", rc);
|
2005-04-17 06:20:36 +08:00
|
|
|
goto end_txmngr;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (commit_threads < 1)
|
|
|
|
commit_threads = num_online_cpus();
|
|
|
|
if (commit_threads > MAX_COMMIT_THREADS)
|
|
|
|
commit_threads = MAX_COMMIT_THREADS;
|
|
|
|
|
|
|
|
for (i = 0; i < commit_threads; i++) {
|
2014-05-22 08:42:24 +08:00
|
|
|
jfsCommitThread[i] = kthread_run(jfs_lazycommit, NULL,
|
|
|
|
"jfsCommit");
|
2006-02-16 02:49:04 +08:00
|
|
|
if (IS_ERR(jfsCommitThread[i])) {
|
|
|
|
rc = PTR_ERR(jfsCommitThread[i]);
|
|
|
|
jfs_err("init_jfs_fs: fork failed w/rc = %d", rc);
|
2005-04-17 06:20:36 +08:00
|
|
|
commit_threads = i;
|
|
|
|
goto kill_committask;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-02-16 02:49:04 +08:00
|
|
|
jfsSyncThread = kthread_run(jfs_sync, NULL, "jfsSync");
|
|
|
|
if (IS_ERR(jfsSyncThread)) {
|
|
|
|
rc = PTR_ERR(jfsSyncThread);
|
|
|
|
jfs_err("init_jfs_fs: fork failed w/rc = %d", rc);
|
2005-04-17 06:20:36 +08:00
|
|
|
goto kill_committask;
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef PROC_FS_JFS
|
|
|
|
jfs_proc_init();
|
|
|
|
#endif
|
|
|
|
|
2012-03-18 06:14:34 +08:00
|
|
|
rc = register_filesystem(&jfs_fs_type);
|
|
|
|
if (!rc)
|
|
|
|
return 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2012-03-18 06:14:34 +08:00
|
|
|
#ifdef PROC_FS_JFS
|
|
|
|
jfs_proc_clean();
|
|
|
|
#endif
|
|
|
|
kthread_stop(jfsSyncThread);
|
2005-04-17 06:20:36 +08:00
|
|
|
kill_committask:
|
|
|
|
for (i = 0; i < commit_threads; i++)
|
2006-02-16 02:49:04 +08:00
|
|
|
kthread_stop(jfsCommitThread[i]);
|
|
|
|
kthread_stop(jfsIOthread);
|
2005-04-17 06:20:36 +08:00
|
|
|
end_txmngr:
|
|
|
|
txExit();
|
|
|
|
free_metapage:
|
|
|
|
metapage_exit();
|
|
|
|
free_slab:
|
|
|
|
kmem_cache_destroy(jfs_inode_cachep);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __exit exit_jfs_fs(void)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
jfs_info("exit_jfs_fs called");
|
|
|
|
|
|
|
|
txExit();
|
|
|
|
metapage_exit();
|
2006-02-16 02:49:04 +08:00
|
|
|
|
|
|
|
kthread_stop(jfsIOthread);
|
2005-04-17 06:20:36 +08:00
|
|
|
for (i = 0; i < commit_threads; i++)
|
2006-02-16 02:49:04 +08:00
|
|
|
kthread_stop(jfsCommitThread[i]);
|
|
|
|
kthread_stop(jfsSyncThread);
|
2005-04-17 06:20:36 +08:00
|
|
|
#ifdef PROC_FS_JFS
|
|
|
|
jfs_proc_clean();
|
|
|
|
#endif
|
|
|
|
unregister_filesystem(&jfs_fs_type);
|
2012-09-26 09:33:07 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Make sure all delayed rcu free inodes are flushed before we
|
|
|
|
* destroy cache.
|
|
|
|
*/
|
|
|
|
rcu_barrier();
|
2005-04-17 06:20:36 +08:00
|
|
|
kmem_cache_destroy(jfs_inode_cachep);
|
|
|
|
}
|
|
|
|
|
|
|
|
module_init(init_jfs_fs)
|
|
|
|
module_exit(exit_jfs_fs)
|