2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
|
|
|
|
*
|
|
|
|
* Trivial changes by Alan Cox to add the LFS fixes
|
|
|
|
*
|
|
|
|
* Trivial Changes:
|
|
|
|
* Rights granted to Hans Reiser to redistribute under other terms providing
|
|
|
|
* he accepts all liability including but not limited to patent, fitness
|
|
|
|
* for purpose, and direct or indirect claims arising from failure to perform.
|
|
|
|
*
|
|
|
|
* NO WARRANTY
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/module.h>
|
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h
percpu.h is included by sched.h and module.h and thus ends up being
included when building most .c files. percpu.h includes slab.h which
in turn includes gfp.h making everything defined by the two files
universally available and complicating inclusion dependencies.
percpu.h -> slab.h dependency is about to be removed. Prepare for
this change by updating users of gfp and slab facilities include those
headers directly instead of assuming availability. As this conversion
needs to touch large number of source files, the following script is
used as the basis of conversion.
http://userweb.kernel.org/~tj/misc/slabh-sweep.py
The script does the followings.
* Scan files for gfp and slab usages and update includes such that
only the necessary includes are there. ie. if only gfp is used,
gfp.h, if slab is used, slab.h.
* When the script inserts a new include, it looks at the include
blocks and try to put the new include such that its order conforms
to its surrounding. It's put in the include block which contains
core kernel includes, in the same order that the rest are ordered -
alphabetical, Christmas tree, rev-Xmas-tree or at the end if there
doesn't seem to be any matching order.
* If the script can't find a place to put a new include (mostly
because the file doesn't have fitting include block), it prints out
an error message indicating which .h file needs to be added to the
file.
The conversion was done in the following steps.
1. The initial automatic conversion of all .c files updated slightly
over 4000 files, deleting around 700 includes and adding ~480 gfp.h
and ~3000 slab.h inclusions. The script emitted errors for ~400
files.
2. Each error was manually checked. Some didn't need the inclusion,
some needed manual addition while adding it to implementation .h or
embedding .c file was more appropriate for others. This step added
inclusions to around 150 files.
3. The script was run again and the output was compared to the edits
from #2 to make sure no file was left behind.
4. Several build tests were done and a couple of problems were fixed.
e.g. lib/decompress_*.c used malloc/free() wrappers around slab
APIs requiring slab.h to be added manually.
5. The script was run on all .h files but without automatically
editing them as sprinkling gfp.h and slab.h inclusions around .h
files could easily lead to inclusion dependency hell. Most gfp.h
inclusion directives were ignored as stuff from gfp.h was usually
wildly available and often used in preprocessor macros. Each
slab.h inclusion directive was examined and added manually as
necessary.
6. percpu.h was updated not to include slab.h.
7. Build test were done on the following configurations and failures
were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my
distributed build env didn't work with gcov compiles) and a few
more options had to be turned off depending on archs to make things
build (like ipr on powerpc/64 which failed due to missing writeq).
* x86 and x86_64 UP and SMP allmodconfig and a custom test config.
* powerpc and powerpc64 SMP allmodconfig
* sparc and sparc64 SMP allmodconfig
* ia64 SMP allmodconfig
* s390 SMP allmodconfig
* alpha SMP allmodconfig
* um on x86_64 SMP allmodconfig
8. percpu.h modifications were reverted so that it could be applied as
a separate patch and serve as bisection point.
Given the fact that I had only a couple of failures from tests on step
6, I'm fairly confident about the coverage of this conversion patch.
If there is a breakage, it's likely to be something in one of the arch
headers which should be easily discoverable easily on most builds of
the specific arch.
Signed-off-by: Tejun Heo <tj@kernel.org>
Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 16:04:11 +08:00
|
|
|
#include <linux/slab.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/vmalloc.h>
|
|
|
|
#include <linux/time.h>
|
|
|
|
#include <asm/uaccess.h>
|
|
|
|
#include <linux/reiserfs_fs.h>
|
|
|
|
#include <linux/reiserfs_acl.h>
|
|
|
|
#include <linux/reiserfs_xattr.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/blkdev.h>
|
|
|
|
#include <linux/buffer_head.h>
|
2007-07-17 19:04:28 +08:00
|
|
|
#include <linux/exportfs.h>
|
2008-07-25 16:46:51 +08:00
|
|
|
#include <linux/quotaops.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/vfs.h>
|
|
|
|
#include <linux/mount.h>
|
|
|
|
#include <linux/namei.h>
|
2009-04-03 07:59:41 +08:00
|
|
|
#include <linux/crc32.h>
|
2011-12-22 03:17:10 +08:00
|
|
|
#include <linux/seq_file.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
struct file_system_type reiserfs_fs_type;
|
|
|
|
|
|
|
|
static const char reiserfs_3_5_magic_string[] = REISERFS_SUPER_MAGIC_STRING;
|
|
|
|
static const char reiserfs_3_6_magic_string[] = REISER2FS_SUPER_MAGIC_STRING;
|
|
|
|
static const char reiserfs_jr_magic_string[] = REISER2FS_JR_SUPER_MAGIC_STRING;
|
|
|
|
|
2005-07-13 11:21:28 +08:00
|
|
|
int is_reiserfs_3_5(struct reiserfs_super_block *rs)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2005-07-13 11:21:28 +08:00
|
|
|
return !strncmp(rs->s_v1.s_magic, reiserfs_3_5_magic_string,
|
|
|
|
strlen(reiserfs_3_5_magic_string));
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2005-07-13 11:21:28 +08:00
|
|
|
int is_reiserfs_3_6(struct reiserfs_super_block *rs)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2005-07-13 11:21:28 +08:00
|
|
|
return !strncmp(rs->s_v1.s_magic, reiserfs_3_6_magic_string,
|
|
|
|
strlen(reiserfs_3_6_magic_string));
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2005-07-13 11:21:28 +08:00
|
|
|
int is_reiserfs_jr(struct reiserfs_super_block *rs)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2005-07-13 11:21:28 +08:00
|
|
|
return !strncmp(rs->s_v1.s_magic, reiserfs_jr_magic_string,
|
|
|
|
strlen(reiserfs_jr_magic_string));
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2005-07-13 11:21:28 +08:00
|
|
|
static int is_any_reiserfs_magic_string(struct reiserfs_super_block *rs)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2005-07-13 11:21:28 +08:00
|
|
|
return (is_reiserfs_3_5(rs) || is_reiserfs_3_6(rs) ||
|
|
|
|
is_reiserfs_jr(rs));
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2005-07-13 11:21:28 +08:00
|
|
|
static int reiserfs_remount(struct super_block *s, int *flags, char *data);
|
2006-06-23 17:02:58 +08:00
|
|
|
static int reiserfs_statfs(struct dentry *dentry, struct kstatfs *buf);
|
2011-12-22 03:17:10 +08:00
|
|
|
void show_alloc_options(struct seq_file *seq, struct super_block *s);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2005-07-13 11:21:28 +08:00
|
|
|
static int reiserfs_sync_fs(struct super_block *s, int wait)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2009-05-05 21:41:25 +08:00
|
|
|
struct reiserfs_transaction_handle th;
|
|
|
|
|
|
|
|
reiserfs_write_lock(s);
|
|
|
|
if (!journal_begin(&th, s, 1))
|
|
|
|
if (!journal_end_sync(&th, s, 1))
|
|
|
|
reiserfs_flush_old_commits(s);
|
|
|
|
s->s_dirt = 0; /* Even if it's not true.
|
|
|
|
* We'll loop forever in sync_supers otherwise */
|
|
|
|
reiserfs_write_unlock(s);
|
2005-07-13 11:21:28 +08:00
|
|
|
return 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void reiserfs_write_super(struct super_block *s)
|
|
|
|
{
|
2005-07-13 11:21:28 +08:00
|
|
|
reiserfs_sync_fs(s, 1);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2009-01-10 08:40:58 +08:00
|
|
|
static int reiserfs_freeze(struct super_block *s)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2005-07-13 11:21:28 +08:00
|
|
|
struct reiserfs_transaction_handle th;
|
|
|
|
reiserfs_write_lock(s);
|
|
|
|
if (!(s->s_flags & MS_RDONLY)) {
|
|
|
|
int err = journal_begin(&th, s, 1);
|
|
|
|
if (err) {
|
|
|
|
reiserfs_block_writes(&th);
|
|
|
|
} else {
|
|
|
|
reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s),
|
|
|
|
1);
|
|
|
|
journal_mark_dirty(&th, s, SB_BUFFER_WITH_SB(s));
|
|
|
|
reiserfs_block_writes(&th);
|
|
|
|
journal_end_sync(&th, s, 1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
s->s_dirt = 0;
|
|
|
|
reiserfs_write_unlock(s);
|
2009-01-10 08:40:58 +08:00
|
|
|
return 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2009-01-10 08:40:58 +08:00
|
|
|
static int reiserfs_unfreeze(struct super_block *s)
|
2005-07-13 11:21:28 +08:00
|
|
|
{
|
|
|
|
reiserfs_allow_writes(s);
|
2009-01-10 08:40:58 +08:00
|
|
|
return 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2005-07-13 11:21:28 +08:00
|
|
|
extern const struct in_core_key MAX_IN_CORE_KEY;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* this is used to delete "save link" when there are no items of a
|
|
|
|
file it points to. It can either happen if unlink is completed but
|
|
|
|
"save unlink" removal, or if file has both unlink and truncate
|
|
|
|
pending and as unlink completes first (because key of "save link"
|
|
|
|
protecting unlink is bigger that a key lf "save link" which
|
|
|
|
protects truncate), so there left no items to make truncate
|
|
|
|
completion on */
|
2005-07-13 11:21:28 +08:00
|
|
|
static int remove_save_link_only(struct super_block *s,
|
|
|
|
struct reiserfs_key *key, int oid_free)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2005-07-13 11:21:28 +08:00
|
|
|
struct reiserfs_transaction_handle th;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
/* we are going to do one balancing */
|
|
|
|
err = journal_begin(&th, s, JOURNAL_PER_BALANCE_CNT);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
reiserfs_delete_solid_item(&th, NULL, key);
|
|
|
|
if (oid_free)
|
|
|
|
/* removals are protected by direct items */
|
|
|
|
reiserfs_release_objectid(&th, le32_to_cpu(key->k_objectid));
|
|
|
|
|
|
|
|
return journal_end(&th, s, JOURNAL_PER_BALANCE_CNT);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2005-07-13 11:21:28 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
#ifdef CONFIG_QUOTA
|
|
|
|
static int reiserfs_quota_on_mount(struct super_block *, int);
|
|
|
|
#endif
|
2005-07-13 11:21:28 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* look for uncompleted unlinks and truncates and complete them */
|
2005-07-13 11:21:28 +08:00
|
|
|
static int finish_unfinished(struct super_block *s)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2005-07-13 11:21:28 +08:00
|
|
|
INITIALIZE_PATH(path);
|
|
|
|
struct cpu_key max_cpu_key, obj_key;
|
2007-10-17 14:30:04 +08:00
|
|
|
struct reiserfs_key save_link_key, last_inode_key;
|
2005-07-13 11:21:28 +08:00
|
|
|
int retval = 0;
|
|
|
|
struct item_head *ih;
|
|
|
|
struct buffer_head *bh;
|
|
|
|
int item_pos;
|
|
|
|
char *item;
|
|
|
|
int done;
|
|
|
|
struct inode *inode;
|
|
|
|
int truncate;
|
2005-04-17 06:20:36 +08:00
|
|
|
#ifdef CONFIG_QUOTA
|
2005-07-13 11:21:28 +08:00
|
|
|
int i;
|
|
|
|
int ms_active_set;
|
2010-05-22 01:12:51 +08:00
|
|
|
int quota_enabled[MAXQUOTAS];
|
2005-04-17 06:20:36 +08:00
|
|
|
#endif
|
2005-07-13 11:21:28 +08:00
|
|
|
|
|
|
|
/* compose key to look for "save" links */
|
|
|
|
max_cpu_key.version = KEY_FORMAT_3_5;
|
|
|
|
max_cpu_key.on_disk_key.k_dir_id = ~0U;
|
|
|
|
max_cpu_key.on_disk_key.k_objectid = ~0U;
|
|
|
|
set_cpu_key_k_offset(&max_cpu_key, ~0U);
|
|
|
|
max_cpu_key.key_length = 3;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2007-10-17 14:30:04 +08:00
|
|
|
memset(&last_inode_key, 0, sizeof(last_inode_key));
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
#ifdef CONFIG_QUOTA
|
2005-07-13 11:21:28 +08:00
|
|
|
/* Needed for iput() to work correctly and not trash data */
|
|
|
|
if (s->s_flags & MS_ACTIVE) {
|
|
|
|
ms_active_set = 0;
|
|
|
|
} else {
|
|
|
|
ms_active_set = 1;
|
|
|
|
s->s_flags |= MS_ACTIVE;
|
|
|
|
}
|
|
|
|
/* Turn on quotas so that they are updated correctly */
|
|
|
|
for (i = 0; i < MAXQUOTAS; i++) {
|
2010-05-22 01:12:51 +08:00
|
|
|
quota_enabled[i] = 1;
|
2005-07-13 11:21:28 +08:00
|
|
|
if (REISERFS_SB(s)->s_qf_names[i]) {
|
2010-05-22 01:12:51 +08:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (sb_has_quota_active(s, i)) {
|
|
|
|
quota_enabled[i] = 0;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
ret = reiserfs_quota_on_mount(s, i);
|
2005-07-13 11:21:28 +08:00
|
|
|
if (ret < 0)
|
2009-03-31 02:02:21 +08:00
|
|
|
reiserfs_warning(s, "reiserfs-2500",
|
|
|
|
"cannot turn on journaled "
|
|
|
|
"quota: error %d", ret);
|
2005-07-13 11:21:28 +08:00
|
|
|
}
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
#endif
|
2005-07-13 11:21:28 +08:00
|
|
|
|
|
|
|
done = 0;
|
|
|
|
REISERFS_SB(s)->s_is_unlinked_ok = 1;
|
|
|
|
while (!retval) {
|
|
|
|
retval = search_item(s, &max_cpu_key, &path);
|
|
|
|
if (retval != ITEM_NOT_FOUND) {
|
2009-03-31 02:02:28 +08:00
|
|
|
reiserfs_error(s, "vs-2140",
|
|
|
|
"search_by_key returned %d", retval);
|
2005-07-13 11:21:28 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
bh = get_last_bh(&path);
|
|
|
|
item_pos = get_item_pos(&path);
|
|
|
|
if (item_pos != B_NR_ITEMS(bh)) {
|
2009-03-31 02:02:21 +08:00
|
|
|
reiserfs_warning(s, "vs-2060",
|
|
|
|
"wrong position found");
|
2005-07-13 11:21:28 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
item_pos--;
|
|
|
|
ih = B_N_PITEM_HEAD(bh, item_pos);
|
|
|
|
|
|
|
|
if (le32_to_cpu(ih->ih_key.k_dir_id) != MAX_KEY_OBJECTID)
|
|
|
|
/* there are no "save" links anymore */
|
|
|
|
break;
|
|
|
|
|
|
|
|
save_link_key = ih->ih_key;
|
|
|
|
if (is_indirect_le_ih(ih))
|
|
|
|
truncate = 1;
|
|
|
|
else
|
|
|
|
truncate = 0;
|
|
|
|
|
|
|
|
/* reiserfs_iget needs k_dirid and k_objectid only */
|
|
|
|
item = B_I_PITEM(bh, ih);
|
|
|
|
obj_key.on_disk_key.k_dir_id = le32_to_cpu(*(__le32 *) item);
|
|
|
|
obj_key.on_disk_key.k_objectid =
|
|
|
|
le32_to_cpu(ih->ih_key.k_objectid);
|
|
|
|
obj_key.on_disk_key.k_offset = 0;
|
|
|
|
obj_key.on_disk_key.k_type = 0;
|
|
|
|
|
|
|
|
pathrelse(&path);
|
|
|
|
|
|
|
|
inode = reiserfs_iget(s, &obj_key);
|
|
|
|
if (!inode) {
|
|
|
|
/* the unlink almost completed, it just did not manage to remove
|
|
|
|
"save" link and release objectid */
|
2009-03-31 02:02:21 +08:00
|
|
|
reiserfs_warning(s, "vs-2180", "iget failed for %K",
|
2005-07-13 11:21:28 +08:00
|
|
|
&obj_key);
|
|
|
|
retval = remove_save_link_only(s, &save_link_key, 1);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!truncate && inode->i_nlink) {
|
|
|
|
/* file is not unlinked */
|
2009-03-31 02:02:21 +08:00
|
|
|
reiserfs_warning(s, "vs-2185",
|
|
|
|
"file %K is not unlinked",
|
2005-07-13 11:21:28 +08:00
|
|
|
&obj_key);
|
|
|
|
retval = remove_save_link_only(s, &save_link_key, 0);
|
|
|
|
continue;
|
|
|
|
}
|
2010-03-03 22:05:07 +08:00
|
|
|
dquot_initialize(inode);
|
2005-07-13 11:21:28 +08:00
|
|
|
|
|
|
|
if (truncate && S_ISDIR(inode->i_mode)) {
|
|
|
|
/* We got a truncate request for a dir which is impossible.
|
|
|
|
The only imaginable way is to execute unfinished truncate request
|
|
|
|
then boot into old kernel, remove the file and create dir with
|
|
|
|
the same key. */
|
2009-03-31 02:02:21 +08:00
|
|
|
reiserfs_warning(s, "green-2101",
|
|
|
|
"impossible truncate on a "
|
|
|
|
"directory %k. Please report",
|
2005-07-13 11:21:28 +08:00
|
|
|
INODE_PKEY(inode));
|
|
|
|
retval = remove_save_link_only(s, &save_link_key, 0);
|
|
|
|
truncate = 0;
|
|
|
|
iput(inode);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (truncate) {
|
|
|
|
REISERFS_I(inode)->i_flags |=
|
|
|
|
i_link_saved_truncate_mask;
|
|
|
|
/* not completed truncate found. New size was committed together
|
|
|
|
with "save" link */
|
|
|
|
reiserfs_info(s, "Truncating %k to %Ld ..",
|
|
|
|
INODE_PKEY(inode), inode->i_size);
|
|
|
|
reiserfs_truncate_file(inode,
|
|
|
|
0
|
|
|
|
/*don't update modification time */
|
|
|
|
);
|
|
|
|
retval = remove_save_link(inode, truncate);
|
|
|
|
} else {
|
|
|
|
REISERFS_I(inode)->i_flags |= i_link_saved_unlink_mask;
|
|
|
|
/* not completed unlink (rmdir) found */
|
|
|
|
reiserfs_info(s, "Removing %k..", INODE_PKEY(inode));
|
2007-10-17 14:30:04 +08:00
|
|
|
if (memcmp(&last_inode_key, INODE_PKEY(inode),
|
|
|
|
sizeof(last_inode_key))){
|
|
|
|
last_inode_key = *INODE_PKEY(inode);
|
|
|
|
/* removal gets completed in iput */
|
|
|
|
retval = 0;
|
|
|
|
} else {
|
2009-03-31 02:02:21 +08:00
|
|
|
reiserfs_warning(s, "super-2189", "Dead loop "
|
|
|
|
"in finish_unfinished "
|
|
|
|
"detected, just remove "
|
|
|
|
"save link\n");
|
2007-10-17 14:30:04 +08:00
|
|
|
retval = remove_save_link_only(s,
|
|
|
|
&save_link_key, 0);
|
|
|
|
}
|
2005-07-13 11:21:28 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
iput(inode);
|
|
|
|
printk("done\n");
|
|
|
|
done++;
|
|
|
|
}
|
|
|
|
REISERFS_SB(s)->s_is_unlinked_ok = 0;
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
#ifdef CONFIG_QUOTA
|
2005-07-13 11:21:28 +08:00
|
|
|
/* Turn quotas off */
|
|
|
|
for (i = 0; i < MAXQUOTAS; i++) {
|
2010-05-22 01:12:51 +08:00
|
|
|
if (sb_dqopt(s)->files[i] && quota_enabled[i])
|
2010-05-19 19:16:45 +08:00
|
|
|
dquot_quota_off(s, i);
|
2005-07-13 11:21:28 +08:00
|
|
|
}
|
|
|
|
if (ms_active_set)
|
|
|
|
/* Restore the flag back */
|
|
|
|
s->s_flags &= ~MS_ACTIVE;
|
2005-04-17 06:20:36 +08:00
|
|
|
#endif
|
2005-07-13 11:21:28 +08:00
|
|
|
pathrelse(&path);
|
|
|
|
if (done)
|
|
|
|
reiserfs_info(s, "There were %d uncompleted unlinks/truncates. "
|
|
|
|
"Completed\n", done);
|
|
|
|
return retval;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2005-07-13 11:21:28 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* to protect file being unlinked from getting lost we "safe" link files
|
|
|
|
being unlinked. This link will be deleted in the same transaction with last
|
2007-10-20 05:10:43 +08:00
|
|
|
item of file. mounting the filesystem we scan all these links and remove
|
2005-04-17 06:20:36 +08:00
|
|
|
files which almost got lost */
|
2005-07-13 11:21:28 +08:00
|
|
|
void add_save_link(struct reiserfs_transaction_handle *th,
|
|
|
|
struct inode *inode, int truncate)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2005-07-13 11:21:28 +08:00
|
|
|
INITIALIZE_PATH(path);
|
|
|
|
int retval;
|
|
|
|
struct cpu_key key;
|
|
|
|
struct item_head ih;
|
|
|
|
__le32 link;
|
|
|
|
|
|
|
|
BUG_ON(!th->t_trans_id);
|
|
|
|
|
|
|
|
/* file can only get one "save link" of each kind */
|
|
|
|
RFALSE(truncate &&
|
|
|
|
(REISERFS_I(inode)->i_flags & i_link_saved_truncate_mask),
|
|
|
|
"saved link already exists for truncated inode %lx",
|
|
|
|
(long)inode->i_ino);
|
|
|
|
RFALSE(!truncate &&
|
|
|
|
(REISERFS_I(inode)->i_flags & i_link_saved_unlink_mask),
|
|
|
|
"saved link already exists for unlinked inode %lx",
|
|
|
|
(long)inode->i_ino);
|
|
|
|
|
|
|
|
/* setup key of "save" link */
|
|
|
|
key.version = KEY_FORMAT_3_5;
|
|
|
|
key.on_disk_key.k_dir_id = MAX_KEY_OBJECTID;
|
|
|
|
key.on_disk_key.k_objectid = inode->i_ino;
|
|
|
|
if (!truncate) {
|
|
|
|
/* unlink, rmdir, rename */
|
|
|
|
set_cpu_key_k_offset(&key, 1 + inode->i_sb->s_blocksize);
|
|
|
|
set_cpu_key_k_type(&key, TYPE_DIRECT);
|
|
|
|
|
|
|
|
/* item head of "safe" link */
|
|
|
|
make_le_item_head(&ih, &key, key.version,
|
|
|
|
1 + inode->i_sb->s_blocksize, TYPE_DIRECT,
|
|
|
|
4 /*length */ , 0xffff /*free space */ );
|
|
|
|
} else {
|
|
|
|
/* truncate */
|
|
|
|
if (S_ISDIR(inode->i_mode))
|
2009-03-31 02:02:21 +08:00
|
|
|
reiserfs_warning(inode->i_sb, "green-2102",
|
|
|
|
"Adding a truncate savelink for "
|
|
|
|
"a directory %k! Please report",
|
2005-07-13 11:21:28 +08:00
|
|
|
INODE_PKEY(inode));
|
|
|
|
set_cpu_key_k_offset(&key, 1);
|
|
|
|
set_cpu_key_k_type(&key, TYPE_INDIRECT);
|
|
|
|
|
|
|
|
/* item head of "safe" link */
|
|
|
|
make_le_item_head(&ih, &key, key.version, 1, TYPE_INDIRECT,
|
|
|
|
4 /*length */ , 0 /*free space */ );
|
|
|
|
}
|
|
|
|
key.key_length = 3;
|
|
|
|
|
|
|
|
/* look for its place in the tree */
|
|
|
|
retval = search_item(inode->i_sb, &key, &path);
|
|
|
|
if (retval != ITEM_NOT_FOUND) {
|
|
|
|
if (retval != -ENOSPC)
|
2009-03-31 02:02:28 +08:00
|
|
|
reiserfs_error(inode->i_sb, "vs-2100",
|
|
|
|
"search_by_key (%K) returned %d", &key,
|
|
|
|
retval);
|
2005-07-13 11:21:28 +08:00
|
|
|
pathrelse(&path);
|
|
|
|
return;
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2005-07-13 11:21:28 +08:00
|
|
|
/* body of "save" link */
|
|
|
|
link = INODE_PKEY(inode)->k_dir_id;
|
|
|
|
|
2011-03-31 09:57:33 +08:00
|
|
|
/* put "save" link into tree, don't charge quota to anyone */
|
2005-07-13 11:21:28 +08:00
|
|
|
retval =
|
|
|
|
reiserfs_insert_item(th, &path, &key, &ih, NULL, (char *)&link);
|
|
|
|
if (retval) {
|
|
|
|
if (retval != -ENOSPC)
|
2009-03-31 02:02:28 +08:00
|
|
|
reiserfs_error(inode->i_sb, "vs-2120",
|
|
|
|
"insert_item returned %d", retval);
|
2005-07-13 11:21:28 +08:00
|
|
|
} else {
|
|
|
|
if (truncate)
|
|
|
|
REISERFS_I(inode)->i_flags |=
|
|
|
|
i_link_saved_truncate_mask;
|
|
|
|
else
|
|
|
|
REISERFS_I(inode)->i_flags |= i_link_saved_unlink_mask;
|
|
|
|
}
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* this opens transaction unlike add_save_link */
|
2005-07-13 11:21:28 +08:00
|
|
|
int remove_save_link(struct inode *inode, int truncate)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2005-07-13 11:21:28 +08:00
|
|
|
struct reiserfs_transaction_handle th;
|
|
|
|
struct reiserfs_key key;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
/* we are going to do one balancing only */
|
|
|
|
err = journal_begin(&th, inode->i_sb, JOURNAL_PER_BALANCE_CNT);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
/* setup key of "save" link */
|
|
|
|
key.k_dir_id = cpu_to_le32(MAX_KEY_OBJECTID);
|
|
|
|
key.k_objectid = INODE_PKEY(inode)->k_objectid;
|
|
|
|
if (!truncate) {
|
|
|
|
/* unlink, rmdir, rename */
|
|
|
|
set_le_key_k_offset(KEY_FORMAT_3_5, &key,
|
|
|
|
1 + inode->i_sb->s_blocksize);
|
|
|
|
set_le_key_k_type(KEY_FORMAT_3_5, &key, TYPE_DIRECT);
|
|
|
|
} else {
|
|
|
|
/* truncate */
|
|
|
|
set_le_key_k_offset(KEY_FORMAT_3_5, &key, 1);
|
|
|
|
set_le_key_k_type(KEY_FORMAT_3_5, &key, TYPE_INDIRECT);
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2005-07-13 11:21:28 +08:00
|
|
|
if ((truncate &&
|
|
|
|
(REISERFS_I(inode)->i_flags & i_link_saved_truncate_mask)) ||
|
|
|
|
(!truncate &&
|
|
|
|
(REISERFS_I(inode)->i_flags & i_link_saved_unlink_mask)))
|
|
|
|
/* don't take quota bytes from anywhere */
|
|
|
|
reiserfs_delete_solid_item(&th, NULL, &key);
|
|
|
|
if (!truncate) {
|
|
|
|
reiserfs_release_objectid(&th, inode->i_ino);
|
|
|
|
REISERFS_I(inode)->i_flags &= ~i_link_saved_unlink_mask;
|
|
|
|
} else
|
|
|
|
REISERFS_I(inode)->i_flags &= ~i_link_saved_truncate_mask;
|
|
|
|
|
|
|
|
return journal_end(&th, inode->i_sb, JOURNAL_PER_BALANCE_CNT);
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-10-11 16:22:14 +08:00
|
|
|
static void reiserfs_kill_sb(struct super_block *s)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2006-10-11 16:22:14 +08:00
|
|
|
if (REISERFS_SB(s)) {
|
2011-12-22 04:18:43 +08:00
|
|
|
/*
|
|
|
|
* Force any pending inode evictions to occur now. Any
|
|
|
|
* inodes to be removed that have extended attributes
|
|
|
|
* associated with them need to clean them up before
|
|
|
|
* we can release the extended attribute root dentries.
|
|
|
|
* shrink_dcache_for_umount will BUG if we don't release
|
|
|
|
* those before it's called so ->put_super is too late.
|
|
|
|
*/
|
|
|
|
shrink_dcache_sb(s);
|
|
|
|
|
|
|
|
dput(REISERFS_SB(s)->xattr_root);
|
|
|
|
REISERFS_SB(s)->xattr_root = NULL;
|
|
|
|
dput(REISERFS_SB(s)->priv_root);
|
|
|
|
REISERFS_SB(s)->priv_root = NULL;
|
2005-07-13 11:21:28 +08:00
|
|
|
}
|
|
|
|
|
2006-10-11 16:22:14 +08:00
|
|
|
kill_block_super(s);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void reiserfs_put_super(struct super_block *s)
|
|
|
|
{
|
|
|
|
struct reiserfs_transaction_handle th;
|
|
|
|
th.t_trans_id = 0;
|
2005-07-13 11:21:28 +08:00
|
|
|
|
2010-05-19 19:16:42 +08:00
|
|
|
dquot_disable(s, -1, DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
|
|
|
|
|
reiserfs: kill-the-BKL
This patch is an attempt to remove the Bkl based locking scheme from
reiserfs and is intended.
It is a bit inspired from an old attempt by Peter Zijlstra:
http://lkml.indiana.edu/hypermail/linux/kernel/0704.2/2174.html
The bkl is heavily used in this filesystem to prevent from
concurrent write accesses on the filesystem.
Reiserfs makes a deep use of the specific properties of the Bkl:
- It can be acqquired recursively by a same task
- It is released on the schedule() calls and reacquired when schedule() returns
The two properties above are a roadmap for the reiserfs write locking so it's
very hard to simply replace it with a common mutex.
- We need a recursive-able locking unless we want to restructure several blocks
of the code.
- We need to identify the sites where the bkl was implictly relaxed
(schedule, wait, sync, etc...) so that we can in turn release and
reacquire our new lock explicitly.
Such implicit releases of the lock are often required to let other
resources producer/consumer do their job or we can suffer unexpected
starvations or deadlocks.
So the new lock that replaces the bkl here is a per superblock mutex with a
specific property: it can be acquired recursively by a same task, like the
bkl.
For such purpose, we integrate a lock owner and a lock depth field on the
superblock information structure.
The first axis on this patch is to turn reiserfs_write_(un)lock() function
into a wrapper to manage this mutex. Also some explicit calls to
lock_kernel() have been converted to reiserfs_write_lock() helpers.
The second axis is to find the important blocking sites (schedule...(),
wait_on_buffer(), sync_dirty_buffer(), etc...) and then apply an explicit
release of the write lock on these locations before blocking. Then we can
safely wait for those who can give us resources or those who need some.
Typically this is a fight between the current writer, the reiserfs workqueue
(aka the async commiter) and the pdflush threads.
The third axis is a consequence of the second. The write lock is usually
on top of a lock dependency chain which can include the journal lock, the
flush lock or the commit lock. So it's dangerous to release and trying to
reacquire the write lock while we still hold other locks.
This is fine with the bkl:
T1 T2
lock_kernel()
mutex_lock(A)
unlock_kernel()
// do something
lock_kernel()
mutex_lock(A) -> already locked by T1
schedule() (and then unlock_kernel())
lock_kernel()
mutex_unlock(A)
....
This is not fine with a mutex:
T1 T2
mutex_lock(write)
mutex_lock(A)
mutex_unlock(write)
// do something
mutex_lock(write)
mutex_lock(A) -> already locked by T1
schedule()
mutex_lock(write) -> already locked by T2
deadlock
The solution in this patch is to provide a helper which releases the write
lock and sleep a bit if we can't lock a mutex that depend on it. It's another
simulation of the bkl behaviour.
The last axis is to locate the fs callbacks that are called with the bkl held,
according to Documentation/filesystem/Locking.
Those are:
- reiserfs_remount
- reiserfs_fill_super
- reiserfs_put_super
Reiserfs didn't need to explicitly lock because of the context of these callbacks.
But now we must take care of that with the new locking.
After this patch, reiserfs suffers from a slight performance regression (for now).
On UP, a high volume write with dd reports an average of 27 MB/s instead
of 30 MB/s without the patch applied.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Reviewed-by: Ingo Molnar <mingo@elte.hu>
Cc: Jeff Mahoney <jeffm@suse.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Bron Gondwana <brong@fastmail.fm>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
LKML-Reference: <1239070789-13354-1-git-send-email-fweisbec@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-04-07 10:19:49 +08:00
|
|
|
reiserfs_write_lock(s);
|
push BKL down into ->put_super
Move BKL into ->put_super from the only caller. A couple of
filesystems had trivial enough ->put_super (only kfree and NULLing of
s_fs_info + stuff in there) to not get any locking: coda, cramfs, efs,
hugetlbfs, omfs, qnx4, shmem, all others got the full treatment. Most
of them probably don't need it, but I'd rather sort that out individually.
Preferably after all the other BKL pushdowns in that area.
[AV: original used to move lock_super() down as well; these changes are
removed since we don't do lock_super() at all in generic_shutdown_super()
now]
[AV: fuse, btrfs and xfs are known to need no damn BKL, exempt]
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
2009-05-05 21:40:36 +08:00
|
|
|
|
2009-04-29 00:00:26 +08:00
|
|
|
if (s->s_dirt)
|
|
|
|
reiserfs_write_super(s);
|
|
|
|
|
2005-07-13 11:21:28 +08:00
|
|
|
/* change file system state to current state if it was mounted with read-write permissions */
|
|
|
|
if (!(s->s_flags & MS_RDONLY)) {
|
|
|
|
if (!journal_begin(&th, s, 10)) {
|
|
|
|
reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s),
|
|
|
|
1);
|
|
|
|
set_sb_umount_state(SB_DISK_SUPER_BLOCK(s),
|
|
|
|
REISERFS_SB(s)->s_mount_state);
|
|
|
|
journal_mark_dirty(&th, s, SB_BUFFER_WITH_SB(s));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* note, journal_release checks for readonly mount, and can decide not
|
|
|
|
** to do a journal_end
|
|
|
|
*/
|
|
|
|
journal_release(&th, s);
|
|
|
|
|
2006-10-01 14:28:44 +08:00
|
|
|
reiserfs_free_bitmap_cache(s);
|
2005-07-13 11:21:28 +08:00
|
|
|
|
|
|
|
brelse(SB_BUFFER_WITH_SB(s));
|
|
|
|
|
|
|
|
print_statistics(s);
|
|
|
|
|
|
|
|
if (REISERFS_SB(s)->reserved_blocks != 0) {
|
2009-03-31 02:02:21 +08:00
|
|
|
reiserfs_warning(s, "green-2005", "reserved blocks left %d",
|
2005-07-13 11:21:28 +08:00
|
|
|
REISERFS_SB(s)->reserved_blocks);
|
|
|
|
}
|
|
|
|
|
|
|
|
reiserfs_proc_info_done(s);
|
|
|
|
|
reiserfs: kill-the-BKL
This patch is an attempt to remove the Bkl based locking scheme from
reiserfs and is intended.
It is a bit inspired from an old attempt by Peter Zijlstra:
http://lkml.indiana.edu/hypermail/linux/kernel/0704.2/2174.html
The bkl is heavily used in this filesystem to prevent from
concurrent write accesses on the filesystem.
Reiserfs makes a deep use of the specific properties of the Bkl:
- It can be acqquired recursively by a same task
- It is released on the schedule() calls and reacquired when schedule() returns
The two properties above are a roadmap for the reiserfs write locking so it's
very hard to simply replace it with a common mutex.
- We need a recursive-able locking unless we want to restructure several blocks
of the code.
- We need to identify the sites where the bkl was implictly relaxed
(schedule, wait, sync, etc...) so that we can in turn release and
reacquire our new lock explicitly.
Such implicit releases of the lock are often required to let other
resources producer/consumer do their job or we can suffer unexpected
starvations or deadlocks.
So the new lock that replaces the bkl here is a per superblock mutex with a
specific property: it can be acquired recursively by a same task, like the
bkl.
For such purpose, we integrate a lock owner and a lock depth field on the
superblock information structure.
The first axis on this patch is to turn reiserfs_write_(un)lock() function
into a wrapper to manage this mutex. Also some explicit calls to
lock_kernel() have been converted to reiserfs_write_lock() helpers.
The second axis is to find the important blocking sites (schedule...(),
wait_on_buffer(), sync_dirty_buffer(), etc...) and then apply an explicit
release of the write lock on these locations before blocking. Then we can
safely wait for those who can give us resources or those who need some.
Typically this is a fight between the current writer, the reiserfs workqueue
(aka the async commiter) and the pdflush threads.
The third axis is a consequence of the second. The write lock is usually
on top of a lock dependency chain which can include the journal lock, the
flush lock or the commit lock. So it's dangerous to release and trying to
reacquire the write lock while we still hold other locks.
This is fine with the bkl:
T1 T2
lock_kernel()
mutex_lock(A)
unlock_kernel()
// do something
lock_kernel()
mutex_lock(A) -> already locked by T1
schedule() (and then unlock_kernel())
lock_kernel()
mutex_unlock(A)
....
This is not fine with a mutex:
T1 T2
mutex_lock(write)
mutex_lock(A)
mutex_unlock(write)
// do something
mutex_lock(write)
mutex_lock(A) -> already locked by T1
schedule()
mutex_lock(write) -> already locked by T2
deadlock
The solution in this patch is to provide a helper which releases the write
lock and sleep a bit if we can't lock a mutex that depend on it. It's another
simulation of the bkl behaviour.
The last axis is to locate the fs callbacks that are called with the bkl held,
according to Documentation/filesystem/Locking.
Those are:
- reiserfs_remount
- reiserfs_fill_super
- reiserfs_put_super
Reiserfs didn't need to explicitly lock because of the context of these callbacks.
But now we must take care of that with the new locking.
After this patch, reiserfs suffers from a slight performance regression (for now).
On UP, a high volume write with dd reports an average of 27 MB/s instead
of 30 MB/s without the patch applied.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Reviewed-by: Ingo Molnar <mingo@elte.hu>
Cc: Jeff Mahoney <jeffm@suse.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Bron Gondwana <brong@fastmail.fm>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
LKML-Reference: <1239070789-13354-1-git-send-email-fweisbec@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-04-07 10:19:49 +08:00
|
|
|
reiserfs_write_unlock(s);
|
|
|
|
mutex_destroy(&REISERFS_SB(s)->lock);
|
2005-07-13 11:21:28 +08:00
|
|
|
kfree(s->s_fs_info);
|
|
|
|
s->s_fs_info = NULL;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2006-12-07 12:33:20 +08:00
|
|
|
static struct kmem_cache *reiserfs_inode_cachep;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
static struct inode *reiserfs_alloc_inode(struct super_block *sb)
|
|
|
|
{
|
|
|
|
struct reiserfs_inode_info *ei;
|
2005-07-13 11:21:28 +08:00
|
|
|
ei = (struct reiserfs_inode_info *)
|
2006-12-07 12:33:17 +08:00
|
|
|
kmem_cache_alloc(reiserfs_inode_cachep, GFP_KERNEL);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (!ei)
|
|
|
|
return NULL;
|
2010-07-04 16:18:57 +08:00
|
|
|
atomic_set(&ei->openers, 0);
|
|
|
|
mutex_init(&ei->tailpack);
|
2005-04-17 06:20:36 +08:00
|
|
|
return &ei->vfs_inode;
|
|
|
|
}
|
|
|
|
|
2011-01-07 14:49:49 +08:00
|
|
|
static void reiserfs_i_callback(struct rcu_head *head)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2011-01-07 14:49:49 +08:00
|
|
|
struct inode *inode = container_of(head, struct inode, i_rcu);
|
2005-04-17 06:20:36 +08:00
|
|
|
kmem_cache_free(reiserfs_inode_cachep, REISERFS_I(inode));
|
|
|
|
}
|
|
|
|
|
2011-01-07 14:49:49 +08:00
|
|
|
static void reiserfs_destroy_inode(struct inode *inode)
|
|
|
|
{
|
|
|
|
call_rcu(&inode->i_rcu, reiserfs_i_callback);
|
|
|
|
}
|
|
|
|
|
2008-07-26 10:45:34 +08:00
|
|
|
static void init_once(void *foo)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2005-07-13 11:21:28 +08:00
|
|
|
struct reiserfs_inode_info *ei = (struct reiserfs_inode_info *)foo;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2007-05-17 13:10:57 +08:00
|
|
|
INIT_LIST_HEAD(&ei->i_prealloc_list);
|
|
|
|
inode_init_once(&ei->vfs_inode);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2005-07-13 11:21:28 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
static int init_inodecache(void)
|
|
|
|
{
|
|
|
|
reiserfs_inode_cachep = kmem_cache_create("reiser_inode_cache",
|
2005-07-13 11:21:28 +08:00
|
|
|
sizeof(struct
|
|
|
|
reiserfs_inode_info),
|
2006-03-24 19:16:06 +08:00
|
|
|
0, (SLAB_RECLAIM_ACCOUNT|
|
|
|
|
SLAB_MEM_SPREAD),
|
2007-07-20 09:11:58 +08:00
|
|
|
init_once);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (reiserfs_inode_cachep == NULL)
|
|
|
|
return -ENOMEM;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void destroy_inodecache(void)
|
|
|
|
{
|
2006-09-27 16:49:40 +08:00
|
|
|
kmem_cache_destroy(reiserfs_inode_cachep);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* we don't mark inodes dirty, we just log them */
|
2011-05-27 18:53:02 +08:00
|
|
|
static void reiserfs_dirty_inode(struct inode *inode, int flags)
|
2005-07-13 11:21:28 +08:00
|
|
|
{
|
|
|
|
struct reiserfs_transaction_handle th;
|
|
|
|
|
|
|
|
int err = 0;
|
2009-04-14 11:34:25 +08:00
|
|
|
int lock_depth;
|
|
|
|
|
2005-07-13 11:21:28 +08:00
|
|
|
if (inode->i_sb->s_flags & MS_RDONLY) {
|
2009-03-31 02:02:21 +08:00
|
|
|
reiserfs_warning(inode->i_sb, "clm-6006",
|
|
|
|
"writing inode %lu on readonly FS",
|
2005-07-13 11:21:28 +08:00
|
|
|
inode->i_ino);
|
|
|
|
return;
|
|
|
|
}
|
2009-04-14 11:34:25 +08:00
|
|
|
lock_depth = reiserfs_write_lock_once(inode->i_sb);
|
2005-07-13 11:21:28 +08:00
|
|
|
|
|
|
|
/* this is really only used for atime updates, so they don't have
|
|
|
|
** to be included in O_SYNC or fsync
|
|
|
|
*/
|
|
|
|
err = journal_begin(&th, inode->i_sb, 1);
|
2009-04-14 11:34:25 +08:00
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
|
2005-07-13 11:21:28 +08:00
|
|
|
reiserfs_update_sd(&th, inode);
|
|
|
|
journal_end(&th, inode->i_sb, 1);
|
2009-04-14 11:34:25 +08:00
|
|
|
|
|
|
|
out:
|
|
|
|
reiserfs_write_unlock_once(inode->i_sb, lock_depth);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2011-12-22 03:17:10 +08:00
|
|
|
static int reiserfs_show_options(struct seq_file *seq, struct dentry *root)
|
|
|
|
{
|
|
|
|
struct super_block *s = root->d_sb;
|
|
|
|
struct reiserfs_journal *journal = SB_JOURNAL(s);
|
|
|
|
long opts = REISERFS_SB(s)->s_mount_opt;
|
|
|
|
|
|
|
|
if (opts & (1 << REISERFS_LARGETAIL))
|
|
|
|
seq_puts(seq, ",tails=on");
|
|
|
|
else if (!(opts & (1 << REISERFS_SMALLTAIL)))
|
|
|
|
seq_puts(seq, ",notail");
|
|
|
|
/* tails=small is default so we don't show it */
|
|
|
|
|
|
|
|
if (!(opts & (1 << REISERFS_BARRIER_FLUSH)))
|
|
|
|
seq_puts(seq, ",barrier=none");
|
|
|
|
/* barrier=flush is default so we don't show it */
|
|
|
|
|
|
|
|
if (opts & (1 << REISERFS_ERROR_CONTINUE))
|
|
|
|
seq_puts(seq, ",errors=continue");
|
|
|
|
else if (opts & (1 << REISERFS_ERROR_PANIC))
|
|
|
|
seq_puts(seq, ",errors=panic");
|
|
|
|
/* errors=ro is default so we don't show it */
|
|
|
|
|
|
|
|
if (opts & (1 << REISERFS_DATA_LOG))
|
|
|
|
seq_puts(seq, ",data=journal");
|
|
|
|
else if (opts & (1 << REISERFS_DATA_WRITEBACK))
|
|
|
|
seq_puts(seq, ",data=writeback");
|
|
|
|
/* data=ordered is default so we don't show it */
|
|
|
|
|
|
|
|
if (opts & (1 << REISERFS_ATTRS))
|
|
|
|
seq_puts(seq, ",attrs");
|
|
|
|
|
|
|
|
if (opts & (1 << REISERFS_XATTRS_USER))
|
|
|
|
seq_puts(seq, ",user_xattr");
|
|
|
|
|
|
|
|
if (opts & (1 << REISERFS_EXPOSE_PRIVROOT))
|
|
|
|
seq_puts(seq, ",expose_privroot");
|
|
|
|
|
|
|
|
if (opts & (1 << REISERFS_POSIXACL))
|
|
|
|
seq_puts(seq, ",acl");
|
|
|
|
|
|
|
|
if (REISERFS_SB(s)->s_jdev)
|
|
|
|
seq_printf(seq, ",jdev=%s", REISERFS_SB(s)->s_jdev);
|
|
|
|
|
|
|
|
if (journal->j_max_commit_age != journal->j_default_max_commit_age)
|
|
|
|
seq_printf(seq, ",commit=%d", journal->j_max_commit_age);
|
|
|
|
|
|
|
|
#ifdef CONFIG_QUOTA
|
|
|
|
if (REISERFS_SB(s)->s_qf_names[USRQUOTA])
|
|
|
|
seq_printf(seq, ",usrjquota=%s", REISERFS_SB(s)->s_qf_names[USRQUOTA]);
|
|
|
|
else if (opts & (1 << REISERFS_USRQUOTA))
|
|
|
|
seq_puts(seq, ",usrquota");
|
|
|
|
if (REISERFS_SB(s)->s_qf_names[GRPQUOTA])
|
|
|
|
seq_printf(seq, ",grpjquota=%s", REISERFS_SB(s)->s_qf_names[GRPQUOTA]);
|
|
|
|
else if (opts & (1 << REISERFS_GRPQUOTA))
|
|
|
|
seq_puts(seq, ",grpquota");
|
|
|
|
if (REISERFS_SB(s)->s_jquota_fmt) {
|
|
|
|
if (REISERFS_SB(s)->s_jquota_fmt == QFMT_VFS_OLD)
|
|
|
|
seq_puts(seq, ",jqfmt=vfsold");
|
|
|
|
else if (REISERFS_SB(s)->s_jquota_fmt == QFMT_VFS_V0)
|
|
|
|
seq_puts(seq, ",jqfmt=vfsv0");
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* Block allocator options */
|
|
|
|
if (opts & (1 << REISERFS_NO_BORDER))
|
|
|
|
seq_puts(seq, ",block-allocator=noborder");
|
|
|
|
if (opts & (1 << REISERFS_NO_UNHASHED_RELOCATION))
|
|
|
|
seq_puts(seq, ",block-allocator=no_unhashed_relocation");
|
|
|
|
if (opts & (1 << REISERFS_HASHED_RELOCATION))
|
|
|
|
seq_puts(seq, ",block-allocator=hashed_relocation");
|
|
|
|
if (opts & (1 << REISERFS_TEST4))
|
|
|
|
seq_puts(seq, ",block-allocator=test4");
|
|
|
|
show_alloc_options(seq, s);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
#ifdef CONFIG_QUOTA
|
2005-07-13 11:21:28 +08:00
|
|
|
static ssize_t reiserfs_quota_write(struct super_block *, int, const char *,
|
|
|
|
size_t, loff_t);
|
|
|
|
static ssize_t reiserfs_quota_read(struct super_block *, int, char *, size_t,
|
|
|
|
loff_t);
|
2005-04-17 06:20:36 +08:00
|
|
|
#endif
|
|
|
|
|
2007-02-12 16:55:41 +08:00
|
|
|
static const struct super_operations reiserfs_sops = {
|
2005-07-13 11:21:28 +08:00
|
|
|
.alloc_inode = reiserfs_alloc_inode,
|
|
|
|
.destroy_inode = reiserfs_destroy_inode,
|
|
|
|
.write_inode = reiserfs_write_inode,
|
|
|
|
.dirty_inode = reiserfs_dirty_inode,
|
2010-06-07 23:37:37 +08:00
|
|
|
.evict_inode = reiserfs_evict_inode,
|
2005-07-13 11:21:28 +08:00
|
|
|
.put_super = reiserfs_put_super,
|
|
|
|
.write_super = reiserfs_write_super,
|
|
|
|
.sync_fs = reiserfs_sync_fs,
|
2009-01-10 08:40:58 +08:00
|
|
|
.freeze_fs = reiserfs_freeze,
|
|
|
|
.unfreeze_fs = reiserfs_unfreeze,
|
2005-07-13 11:21:28 +08:00
|
|
|
.statfs = reiserfs_statfs,
|
|
|
|
.remount_fs = reiserfs_remount,
|
2011-12-22 03:17:10 +08:00
|
|
|
.show_options = reiserfs_show_options,
|
2005-04-17 06:20:36 +08:00
|
|
|
#ifdef CONFIG_QUOTA
|
2005-07-13 11:21:28 +08:00
|
|
|
.quota_read = reiserfs_quota_read,
|
|
|
|
.quota_write = reiserfs_quota_write,
|
2005-04-17 06:20:36 +08:00
|
|
|
#endif
|
|
|
|
};
|
|
|
|
|
|
|
|
#ifdef CONFIG_QUOTA
|
|
|
|
#define QTYPE2NAME(t) ((t)==USRQUOTA?"user":"group")
|
|
|
|
|
|
|
|
static int reiserfs_write_dquot(struct dquot *);
|
|
|
|
static int reiserfs_acquire_dquot(struct dquot *);
|
|
|
|
static int reiserfs_release_dquot(struct dquot *);
|
|
|
|
static int reiserfs_mark_dquot_dirty(struct dquot *);
|
|
|
|
static int reiserfs_write_info(struct super_block *, int);
|
2010-09-15 23:38:58 +08:00
|
|
|
static int reiserfs_quota_on(struct super_block *, int, int, struct path *);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2009-09-22 08:01:08 +08:00
|
|
|
static const struct dquot_operations reiserfs_quota_operations = {
|
2005-07-13 11:21:28 +08:00
|
|
|
.write_dquot = reiserfs_write_dquot,
|
|
|
|
.acquire_dquot = reiserfs_acquire_dquot,
|
|
|
|
.release_dquot = reiserfs_release_dquot,
|
|
|
|
.mark_dirty = reiserfs_mark_dquot_dirty,
|
|
|
|
.write_info = reiserfs_write_info,
|
2008-11-25 22:31:33 +08:00
|
|
|
.alloc_dquot = dquot_alloc,
|
|
|
|
.destroy_dquot = dquot_destroy,
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
|
|
|
|
2009-09-22 08:01:09 +08:00
|
|
|
static const struct quotactl_ops reiserfs_qctl_operations = {
|
2005-07-13 11:21:28 +08:00
|
|
|
.quota_on = reiserfs_quota_on,
|
2010-05-19 19:16:45 +08:00
|
|
|
.quota_off = dquot_quota_off,
|
|
|
|
.quota_sync = dquot_quota_sync,
|
|
|
|
.get_info = dquot_get_dqinfo,
|
|
|
|
.set_info = dquot_set_dqinfo,
|
|
|
|
.get_dqblk = dquot_get_dqblk,
|
|
|
|
.set_dqblk = dquot_set_dqblk,
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
|
|
|
#endif
|
|
|
|
|
2007-10-22 07:42:17 +08:00
|
|
|
static const struct export_operations reiserfs_export_ops = {
|
2005-07-13 11:21:28 +08:00
|
|
|
.encode_fh = reiserfs_encode_fh,
|
2007-10-22 07:42:13 +08:00
|
|
|
.fh_to_dentry = reiserfs_fh_to_dentry,
|
|
|
|
.fh_to_parent = reiserfs_fh_to_parent,
|
2005-07-13 11:21:28 +08:00
|
|
|
.get_parent = reiserfs_get_parent,
|
|
|
|
};
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* this struct is used in reiserfs_getopt () for containing the value for those
|
|
|
|
mount options that have values rather than being toggles. */
|
|
|
|
typedef struct {
|
2005-07-13 11:21:28 +08:00
|
|
|
char *value;
|
|
|
|
int setmask; /* bitmask which is to set on mount_options bitmask when this
|
|
|
|
value is found, 0 is no bits are to be changed. */
|
|
|
|
int clrmask; /* bitmask which is to clear on mount_options bitmask when this
|
|
|
|
value is found, 0 is no bits are to be changed. This is
|
|
|
|
applied BEFORE setmask */
|
2005-04-17 06:20:36 +08:00
|
|
|
} arg_desc_t;
|
|
|
|
|
|
|
|
/* Set this bit in arg_required to allow empty arguments */
|
|
|
|
#define REISERFS_OPT_ALLOWEMPTY 31
|
|
|
|
|
|
|
|
/* this struct is used in reiserfs_getopt() for describing the set of reiserfs
|
|
|
|
mount options */
|
|
|
|
typedef struct {
|
2005-07-13 11:21:28 +08:00
|
|
|
char *option_name;
|
|
|
|
int arg_required; /* 0 if argument is not required, not 0 otherwise */
|
|
|
|
const arg_desc_t *values; /* list of values accepted by an option */
|
|
|
|
int setmask; /* bitmask which is to set on mount_options bitmask when this
|
|
|
|
value is found, 0 is no bits are to be changed. */
|
|
|
|
int clrmask; /* bitmask which is to clear on mount_options bitmask when this
|
|
|
|
value is found, 0 is no bits are to be changed. This is
|
|
|
|
applied BEFORE setmask */
|
2005-04-17 06:20:36 +08:00
|
|
|
} opt_desc_t;
|
|
|
|
|
|
|
|
/* possible values for -o data= */
|
|
|
|
static const arg_desc_t logging_mode[] = {
|
2005-07-13 11:21:28 +08:00
|
|
|
{"ordered", 1 << REISERFS_DATA_ORDERED,
|
|
|
|
(1 << REISERFS_DATA_LOG | 1 << REISERFS_DATA_WRITEBACK)},
|
|
|
|
{"journal", 1 << REISERFS_DATA_LOG,
|
|
|
|
(1 << REISERFS_DATA_ORDERED | 1 << REISERFS_DATA_WRITEBACK)},
|
|
|
|
{"writeback", 1 << REISERFS_DATA_WRITEBACK,
|
|
|
|
(1 << REISERFS_DATA_ORDERED | 1 << REISERFS_DATA_LOG)},
|
2006-03-25 19:07:15 +08:00
|
|
|
{.value = NULL}
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
/* possible values for -o barrier= */
|
|
|
|
static const arg_desc_t barrier_mode[] = {
|
2005-07-13 11:21:28 +08:00
|
|
|
{"none", 1 << REISERFS_BARRIER_NONE, 1 << REISERFS_BARRIER_FLUSH},
|
|
|
|
{"flush", 1 << REISERFS_BARRIER_FLUSH, 1 << REISERFS_BARRIER_NONE},
|
2006-03-25 19:07:15 +08:00
|
|
|
{.value = NULL}
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
/* possible values for "-o block-allocator=" and bits which are to be set in
|
|
|
|
s_mount_opt of reiserfs specific part of in-core super block */
|
|
|
|
static const arg_desc_t balloc[] = {
|
2005-07-13 11:21:28 +08:00
|
|
|
{"noborder", 1 << REISERFS_NO_BORDER, 0},
|
|
|
|
{"border", 0, 1 << REISERFS_NO_BORDER},
|
|
|
|
{"no_unhashed_relocation", 1 << REISERFS_NO_UNHASHED_RELOCATION, 0},
|
|
|
|
{"hashed_relocation", 1 << REISERFS_HASHED_RELOCATION, 0},
|
|
|
|
{"test4", 1 << REISERFS_TEST4, 0},
|
|
|
|
{"notest4", 0, 1 << REISERFS_TEST4},
|
|
|
|
{NULL, 0, 0}
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static const arg_desc_t tails[] = {
|
2005-07-13 11:21:28 +08:00
|
|
|
{"on", 1 << REISERFS_LARGETAIL, 1 << REISERFS_SMALLTAIL},
|
|
|
|
{"off", 0, (1 << REISERFS_LARGETAIL) | (1 << REISERFS_SMALLTAIL)},
|
|
|
|
{"small", 1 << REISERFS_SMALLTAIL, 1 << REISERFS_LARGETAIL},
|
|
|
|
{NULL, 0, 0}
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static const arg_desc_t error_actions[] = {
|
2005-07-13 11:21:28 +08:00
|
|
|
{"panic", 1 << REISERFS_ERROR_PANIC,
|
|
|
|
(1 << REISERFS_ERROR_RO | 1 << REISERFS_ERROR_CONTINUE)},
|
|
|
|
{"ro-remount", 1 << REISERFS_ERROR_RO,
|
|
|
|
(1 << REISERFS_ERROR_PANIC | 1 << REISERFS_ERROR_CONTINUE)},
|
2005-04-17 06:20:36 +08:00
|
|
|
#ifdef REISERFS_JOURNAL_ERROR_ALLOWS_NO_LOG
|
2005-07-13 11:21:28 +08:00
|
|
|
{"continue", 1 << REISERFS_ERROR_CONTINUE,
|
|
|
|
(1 << REISERFS_ERROR_PANIC | 1 << REISERFS_ERROR_RO)},
|
2005-04-17 06:20:36 +08:00
|
|
|
#endif
|
2005-07-13 11:21:28 +08:00
|
|
|
{NULL, 0, 0},
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
/* proceed only one option from a list *cur - string containing of mount options
|
|
|
|
opts - array of options which are accepted
|
|
|
|
opt_arg - if option is found and requires an argument and if it is specifed
|
|
|
|
in the input - pointer to the argument is stored here
|
|
|
|
bit_flags - if option requires to set a certain bit - it is set here
|
|
|
|
return -1 if unknown option is found, opt->arg_required otherwise */
|
2005-07-13 11:21:28 +08:00
|
|
|
static int reiserfs_getopt(struct super_block *s, char **cur, opt_desc_t * opts,
|
|
|
|
char **opt_arg, unsigned long *bit_flags)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2005-07-13 11:21:28 +08:00
|
|
|
char *p;
|
2009-03-31 02:02:44 +08:00
|
|
|
/* foo=bar,
|
2005-07-13 11:21:28 +08:00
|
|
|
^ ^ ^
|
|
|
|
| | +-- option_end
|
|
|
|
| +-- arg_start
|
|
|
|
+-- option_start
|
|
|
|
*/
|
|
|
|
const opt_desc_t *opt;
|
|
|
|
const arg_desc_t *arg;
|
|
|
|
|
|
|
|
p = *cur;
|
|
|
|
|
|
|
|
/* assume argument cannot contain commas */
|
|
|
|
*cur = strchr(p, ',');
|
|
|
|
if (*cur) {
|
|
|
|
*(*cur) = '\0';
|
|
|
|
(*cur)++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!strncmp(p, "alloc=", 6)) {
|
|
|
|
/* Ugly special case, probably we should redo options parser so that
|
|
|
|
it can understand several arguments for some options, also so that
|
|
|
|
it can fill several bitfields with option values. */
|
|
|
|
if (reiserfs_parse_alloc_options(s, p + 6)) {
|
|
|
|
return -1;
|
|
|
|
} else {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* for every option in the list */
|
|
|
|
for (opt = opts; opt->option_name; opt++) {
|
|
|
|
if (!strncmp(p, opt->option_name, strlen(opt->option_name))) {
|
|
|
|
if (bit_flags) {
|
|
|
|
if (opt->clrmask ==
|
|
|
|
(1 << REISERFS_UNSUPPORTED_OPT))
|
2009-03-31 02:02:21 +08:00
|
|
|
reiserfs_warning(s, "super-6500",
|
|
|
|
"%s not supported.\n",
|
2005-07-13 11:21:28 +08:00
|
|
|
p);
|
|
|
|
else
|
|
|
|
*bit_flags &= ~opt->clrmask;
|
|
|
|
if (opt->setmask ==
|
|
|
|
(1 << REISERFS_UNSUPPORTED_OPT))
|
2009-03-31 02:02:21 +08:00
|
|
|
reiserfs_warning(s, "super-6501",
|
|
|
|
"%s not supported.\n",
|
2005-07-13 11:21:28 +08:00
|
|
|
p);
|
|
|
|
else
|
|
|
|
*bit_flags |= opt->setmask;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!opt->option_name) {
|
2009-03-31 02:02:21 +08:00
|
|
|
reiserfs_warning(s, "super-6502",
|
|
|
|
"unknown mount option \"%s\"", p);
|
2005-07-13 11:21:28 +08:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
p += strlen(opt->option_name);
|
|
|
|
switch (*p) {
|
|
|
|
case '=':
|
|
|
|
if (!opt->arg_required) {
|
2009-03-31 02:02:21 +08:00
|
|
|
reiserfs_warning(s, "super-6503",
|
|
|
|
"the option \"%s\" does not "
|
|
|
|
"require an argument\n",
|
2005-07-13 11:21:28 +08:00
|
|
|
opt->option_name);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 0:
|
|
|
|
if (opt->arg_required) {
|
2009-03-31 02:02:21 +08:00
|
|
|
reiserfs_warning(s, "super-6504",
|
|
|
|
"the option \"%s\" requires an "
|
|
|
|
"argument\n", opt->option_name);
|
2005-07-13 11:21:28 +08:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
2009-03-31 02:02:21 +08:00
|
|
|
reiserfs_warning(s, "super-6505",
|
|
|
|
"head of option \"%s\" is only correct\n",
|
2005-07-13 11:21:28 +08:00
|
|
|
opt->option_name);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* move to the argument, or to next option if argument is not required */
|
|
|
|
p++;
|
|
|
|
|
|
|
|
if (opt->arg_required
|
|
|
|
&& !(opt->arg_required & (1 << REISERFS_OPT_ALLOWEMPTY))
|
|
|
|
&& !strlen(p)) {
|
|
|
|
/* this catches "option=," if not allowed */
|
2009-03-31 02:02:21 +08:00
|
|
|
reiserfs_warning(s, "super-6506",
|
|
|
|
"empty argument for \"%s\"\n",
|
2005-07-13 11:21:28 +08:00
|
|
|
opt->option_name);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!opt->values) {
|
|
|
|
/* *=NULLopt_arg contains pointer to argument */
|
|
|
|
*opt_arg = p;
|
|
|
|
return opt->arg_required & ~(1 << REISERFS_OPT_ALLOWEMPTY);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* values possible for this option are listed in opt->values */
|
|
|
|
for (arg = opt->values; arg->value; arg++) {
|
|
|
|
if (!strcmp(p, arg->value)) {
|
|
|
|
if (bit_flags) {
|
|
|
|
*bit_flags &= ~arg->clrmask;
|
|
|
|
*bit_flags |= arg->setmask;
|
|
|
|
}
|
|
|
|
return opt->arg_required;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-03-31 02:02:21 +08:00
|
|
|
reiserfs_warning(s, "super-6506",
|
|
|
|
"bad value \"%s\" for option \"%s\"\n", p,
|
2005-07-13 11:21:28 +08:00
|
|
|
opt->option_name);
|
2005-04-17 06:20:36 +08:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* returns 0 if something is wrong in option string, 1 - otherwise */
|
2005-07-13 11:21:28 +08:00
|
|
|
static int reiserfs_parse_options(struct super_block *s, char *options, /* string given via mount's -o */
|
|
|
|
unsigned long *mount_options,
|
|
|
|
/* after the parsing phase, contains the
|
|
|
|
collection of bitflags defining what
|
|
|
|
mount options were selected. */
|
|
|
|
unsigned long *blocks, /* strtol-ed from NNN of resize=NNN */
|
|
|
|
char **jdev_name,
|
2008-07-25 16:46:38 +08:00
|
|
|
unsigned int *commit_max_age,
|
|
|
|
char **qf_names,
|
|
|
|
unsigned int *qfmt)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2005-07-13 11:21:28 +08:00
|
|
|
int c;
|
|
|
|
char *arg = NULL;
|
|
|
|
char *pos;
|
|
|
|
opt_desc_t opts[] = {
|
|
|
|
/* Compatibility stuff, so that -o notail for old setups still work */
|
|
|
|
{"tails",.arg_required = 't',.values = tails},
|
|
|
|
{"notail",.clrmask =
|
|
|
|
(1 << REISERFS_LARGETAIL) | (1 << REISERFS_SMALLTAIL)},
|
|
|
|
{"conv",.setmask = 1 << REISERFS_CONVERT},
|
|
|
|
{"attrs",.setmask = 1 << REISERFS_ATTRS},
|
|
|
|
{"noattrs",.clrmask = 1 << REISERFS_ATTRS},
|
2009-05-11 04:05:39 +08:00
|
|
|
{"expose_privroot", .setmask = 1 << REISERFS_EXPOSE_PRIVROOT},
|
2005-04-17 06:20:36 +08:00
|
|
|
#ifdef CONFIG_REISERFS_FS_XATTR
|
2005-07-13 11:21:28 +08:00
|
|
|
{"user_xattr",.setmask = 1 << REISERFS_XATTRS_USER},
|
|
|
|
{"nouser_xattr",.clrmask = 1 << REISERFS_XATTRS_USER},
|
2005-04-17 06:20:36 +08:00
|
|
|
#else
|
2005-07-13 11:21:28 +08:00
|
|
|
{"user_xattr",.setmask = 1 << REISERFS_UNSUPPORTED_OPT},
|
|
|
|
{"nouser_xattr",.clrmask = 1 << REISERFS_UNSUPPORTED_OPT},
|
2005-04-17 06:20:36 +08:00
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_REISERFS_FS_POSIX_ACL
|
2005-07-13 11:21:28 +08:00
|
|
|
{"acl",.setmask = 1 << REISERFS_POSIXACL},
|
|
|
|
{"noacl",.clrmask = 1 << REISERFS_POSIXACL},
|
2005-04-17 06:20:36 +08:00
|
|
|
#else
|
2005-07-13 11:21:28 +08:00
|
|
|
{"acl",.setmask = 1 << REISERFS_UNSUPPORTED_OPT},
|
|
|
|
{"noacl",.clrmask = 1 << REISERFS_UNSUPPORTED_OPT},
|
2005-04-17 06:20:36 +08:00
|
|
|
#endif
|
2006-03-25 19:07:15 +08:00
|
|
|
{.option_name = "nolog"},
|
2005-07-13 11:21:28 +08:00
|
|
|
{"replayonly",.setmask = 1 << REPLAYONLY},
|
|
|
|
{"block-allocator",.arg_required = 'a',.values = balloc},
|
|
|
|
{"data",.arg_required = 'd',.values = logging_mode},
|
|
|
|
{"barrier",.arg_required = 'b',.values = barrier_mode},
|
|
|
|
{"resize",.arg_required = 'r',.values = NULL},
|
|
|
|
{"jdev",.arg_required = 'j',.values = NULL},
|
|
|
|
{"nolargeio",.arg_required = 'w',.values = NULL},
|
|
|
|
{"commit",.arg_required = 'c',.values = NULL},
|
2011-12-22 03:17:10 +08:00
|
|
|
{"usrquota",.setmask = 1 << REISERFS_USRQUOTA},
|
|
|
|
{"grpquota",.setmask = 1 << REISERFS_GRPQUOTA},
|
|
|
|
{"noquota",.clrmask = 1 << REISERFS_USRQUOTA | 1 << REISERFS_GRPQUOTA},
|
2005-07-13 11:21:28 +08:00
|
|
|
{"errors",.arg_required = 'e',.values = error_actions},
|
|
|
|
{"usrjquota",.arg_required =
|
|
|
|
'u' | (1 << REISERFS_OPT_ALLOWEMPTY),.values = NULL},
|
|
|
|
{"grpjquota",.arg_required =
|
|
|
|
'g' | (1 << REISERFS_OPT_ALLOWEMPTY),.values = NULL},
|
|
|
|
{"jqfmt",.arg_required = 'f',.values = NULL},
|
2006-03-25 19:07:15 +08:00
|
|
|
{.option_name = NULL}
|
2005-07-13 11:21:28 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
*blocks = 0;
|
|
|
|
if (!options || !*options)
|
|
|
|
/* use default configuration: create tails, journaling on, no
|
|
|
|
conversion to newest format */
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
for (pos = options; pos;) {
|
|
|
|
c = reiserfs_getopt(s, &pos, opts, &arg, mount_options);
|
|
|
|
if (c == -1)
|
|
|
|
/* wrong option is given */
|
2005-05-01 23:59:05 +08:00
|
|
|
return 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2005-07-13 11:21:28 +08:00
|
|
|
if (c == 'r') {
|
|
|
|
char *p;
|
|
|
|
|
|
|
|
p = NULL;
|
|
|
|
/* "resize=NNN" or "resize=auto" */
|
|
|
|
|
|
|
|
if (!strcmp(arg, "auto")) {
|
|
|
|
/* From JFS code, to auto-get the size. */
|
|
|
|
*blocks =
|
|
|
|
s->s_bdev->bd_inode->i_size >> s->
|
|
|
|
s_blocksize_bits;
|
|
|
|
} else {
|
|
|
|
*blocks = simple_strtoul(arg, &p, 0);
|
|
|
|
if (*p != '\0') {
|
|
|
|
/* NNN does not look like a number */
|
2009-03-31 02:02:21 +08:00
|
|
|
reiserfs_warning(s, "super-6507",
|
|
|
|
"bad value %s for "
|
|
|
|
"-oresize\n", arg);
|
2005-07-13 11:21:28 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2005-07-13 11:21:28 +08:00
|
|
|
if (c == 'c') {
|
|
|
|
char *p = NULL;
|
|
|
|
unsigned long val = simple_strtoul(arg, &p, 0);
|
|
|
|
/* commit=NNN (time in seconds) */
|
|
|
|
if (*p != '\0' || val >= (unsigned int)-1) {
|
2009-03-31 02:02:21 +08:00
|
|
|
reiserfs_warning(s, "super-6508",
|
|
|
|
"bad value %s for -ocommit\n",
|
2005-07-13 11:21:28 +08:00
|
|
|
arg);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
*commit_max_age = (unsigned int)val;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2005-07-13 11:21:28 +08:00
|
|
|
if (c == 'w') {
|
2009-03-31 02:02:21 +08:00
|
|
|
reiserfs_warning(s, "super-6509", "nolargeio option "
|
|
|
|
"is no longer supported");
|
2006-09-27 16:50:50 +08:00
|
|
|
return 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2005-07-13 11:21:28 +08:00
|
|
|
if (c == 'j') {
|
|
|
|
if (arg && *arg && jdev_name) {
|
|
|
|
if (*jdev_name) { //Hm, already assigned?
|
2009-03-31 02:02:21 +08:00
|
|
|
reiserfs_warning(s, "super-6510",
|
|
|
|
"journal device was "
|
|
|
|
"already specified to "
|
|
|
|
"be %s", *jdev_name);
|
2005-07-13 11:21:28 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
*jdev_name = arg;
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2005-07-13 11:21:28 +08:00
|
|
|
#ifdef CONFIG_QUOTA
|
|
|
|
if (c == 'u' || c == 'g') {
|
|
|
|
int qtype = c == 'u' ? USRQUOTA : GRPQUOTA;
|
|
|
|
|
2008-08-21 00:16:36 +08:00
|
|
|
if (sb_any_quota_loaded(s) &&
|
2008-07-25 16:46:38 +08:00
|
|
|
(!*arg != !REISERFS_SB(s)->s_qf_names[qtype])) {
|
2009-03-31 02:02:21 +08:00
|
|
|
reiserfs_warning(s, "super-6511",
|
|
|
|
"cannot change journaled "
|
|
|
|
"quota options when quota "
|
|
|
|
"turned on.");
|
2005-07-13 11:21:28 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
if (*arg) { /* Some filename specified? */
|
|
|
|
if (REISERFS_SB(s)->s_qf_names[qtype]
|
|
|
|
&& strcmp(REISERFS_SB(s)->s_qf_names[qtype],
|
|
|
|
arg)) {
|
2009-03-31 02:02:21 +08:00
|
|
|
reiserfs_warning(s, "super-6512",
|
|
|
|
"%s quota file "
|
|
|
|
"already specified.",
|
2005-07-13 11:21:28 +08:00
|
|
|
QTYPE2NAME(qtype));
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
if (strchr(arg, '/')) {
|
2009-03-31 02:02:21 +08:00
|
|
|
reiserfs_warning(s, "super-6513",
|
|
|
|
"quotafile must be "
|
|
|
|
"on filesystem root.");
|
2005-07-13 11:21:28 +08:00
|
|
|
return 0;
|
|
|
|
}
|
2008-07-25 16:46:38 +08:00
|
|
|
qf_names[qtype] =
|
2005-07-13 11:21:28 +08:00
|
|
|
kmalloc(strlen(arg) + 1, GFP_KERNEL);
|
2008-07-25 16:46:38 +08:00
|
|
|
if (!qf_names[qtype]) {
|
2009-03-31 02:02:21 +08:00
|
|
|
reiserfs_warning(s, "reiserfs-2502",
|
|
|
|
"not enough memory "
|
|
|
|
"for storing "
|
|
|
|
"quotafile name.");
|
2005-07-13 11:21:28 +08:00
|
|
|
return 0;
|
|
|
|
}
|
2008-07-25 16:46:38 +08:00
|
|
|
strcpy(qf_names[qtype], arg);
|
2011-12-22 03:17:10 +08:00
|
|
|
if (qtype == USRQUOTA)
|
|
|
|
*mount_options |= 1 << REISERFS_USRQUOTA;
|
|
|
|
else
|
|
|
|
*mount_options |= 1 << REISERFS_GRPQUOTA;
|
2005-07-13 11:21:28 +08:00
|
|
|
} else {
|
2008-07-25 16:46:38 +08:00
|
|
|
if (qf_names[qtype] !=
|
|
|
|
REISERFS_SB(s)->s_qf_names[qtype])
|
|
|
|
kfree(qf_names[qtype]);
|
|
|
|
qf_names[qtype] = NULL;
|
2011-12-22 03:17:10 +08:00
|
|
|
if (qtype == USRQUOTA)
|
|
|
|
*mount_options &= ~(1 << REISERFS_USRQUOTA);
|
|
|
|
else
|
|
|
|
*mount_options &= ~(1 << REISERFS_GRPQUOTA);
|
2005-07-13 11:21:28 +08:00
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2005-07-13 11:21:28 +08:00
|
|
|
if (c == 'f') {
|
|
|
|
if (!strcmp(arg, "vfsold"))
|
2008-07-25 16:46:38 +08:00
|
|
|
*qfmt = QFMT_VFS_OLD;
|
2005-07-13 11:21:28 +08:00
|
|
|
else if (!strcmp(arg, "vfsv0"))
|
2008-07-25 16:46:38 +08:00
|
|
|
*qfmt = QFMT_VFS_V0;
|
2005-07-13 11:21:28 +08:00
|
|
|
else {
|
2009-03-31 02:02:21 +08:00
|
|
|
reiserfs_warning(s, "super-6514",
|
|
|
|
"unknown quota format "
|
|
|
|
"specified.");
|
2005-07-13 11:21:28 +08:00
|
|
|
return 0;
|
|
|
|
}
|
2008-08-21 00:16:36 +08:00
|
|
|
if (sb_any_quota_loaded(s) &&
|
2008-07-25 16:46:38 +08:00
|
|
|
*qfmt != REISERFS_SB(s)->s_jquota_fmt) {
|
2009-03-31 02:02:21 +08:00
|
|
|
reiserfs_warning(s, "super-6515",
|
|
|
|
"cannot change journaled "
|
|
|
|
"quota options when quota "
|
|
|
|
"turned on.");
|
2008-07-25 16:46:38 +08:00
|
|
|
return 0;
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2005-07-13 11:21:28 +08:00
|
|
|
#else
|
|
|
|
if (c == 'u' || c == 'g' || c == 'f') {
|
2009-03-31 02:02:21 +08:00
|
|
|
reiserfs_warning(s, "reiserfs-2503", "journaled "
|
|
|
|
"quota options not supported.");
|
2005-07-13 11:21:28 +08:00
|
|
|
return 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2005-07-13 11:21:28 +08:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_QUOTA
|
2008-07-25 16:46:38 +08:00
|
|
|
if (!REISERFS_SB(s)->s_jquota_fmt && !*qfmt
|
|
|
|
&& (qf_names[USRQUOTA] || qf_names[GRPQUOTA])) {
|
2009-03-31 02:02:21 +08:00
|
|
|
reiserfs_warning(s, "super-6515",
|
|
|
|
"journaled quota format not specified.");
|
2005-04-17 06:20:36 +08:00
|
|
|
return 0;
|
|
|
|
}
|
2011-12-22 03:17:10 +08:00
|
|
|
if ((!(*mount_options & (1 << REISERFS_USRQUOTA)) &&
|
|
|
|
sb_has_quota_loaded(s, USRQUOTA)) ||
|
|
|
|
(!(*mount_options & (1 << REISERFS_GRPQUOTA)) &&
|
|
|
|
sb_has_quota_loaded(s, GRPQUOTA))) {
|
2009-03-31 02:02:21 +08:00
|
|
|
reiserfs_warning(s, "super-6516", "quota options must "
|
|
|
|
"be present when quota is turned on.");
|
2005-07-13 11:21:28 +08:00
|
|
|
return 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
#endif
|
2005-06-24 13:01:06 +08:00
|
|
|
|
2005-07-13 11:21:28 +08:00
|
|
|
return 1;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2005-07-13 11:21:28 +08:00
|
|
|
static void switch_data_mode(struct super_block *s, unsigned long mode)
|
|
|
|
{
|
|
|
|
REISERFS_SB(s)->s_mount_opt &= ~((1 << REISERFS_DATA_LOG) |
|
|
|
|
(1 << REISERFS_DATA_ORDERED) |
|
|
|
|
(1 << REISERFS_DATA_WRITEBACK));
|
|
|
|
REISERFS_SB(s)->s_mount_opt |= (1 << mode);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void handle_data_mode(struct super_block *s, unsigned long mount_options)
|
|
|
|
{
|
2005-07-13 11:21:28 +08:00
|
|
|
if (mount_options & (1 << REISERFS_DATA_LOG)) {
|
|
|
|
if (!reiserfs_data_log(s)) {
|
|
|
|
switch_data_mode(s, REISERFS_DATA_LOG);
|
|
|
|
reiserfs_info(s, "switching to journaled data mode\n");
|
|
|
|
}
|
|
|
|
} else if (mount_options & (1 << REISERFS_DATA_ORDERED)) {
|
|
|
|
if (!reiserfs_data_ordered(s)) {
|
|
|
|
switch_data_mode(s, REISERFS_DATA_ORDERED);
|
|
|
|
reiserfs_info(s, "switching to ordered data mode\n");
|
|
|
|
}
|
|
|
|
} else if (mount_options & (1 << REISERFS_DATA_WRITEBACK)) {
|
|
|
|
if (!reiserfs_data_writeback(s)) {
|
|
|
|
switch_data_mode(s, REISERFS_DATA_WRITEBACK);
|
|
|
|
reiserfs_info(s, "switching to writeback data mode\n");
|
|
|
|
}
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2005-07-13 11:21:28 +08:00
|
|
|
static void handle_barrier_mode(struct super_block *s, unsigned long bits)
|
|
|
|
{
|
|
|
|
int flush = (1 << REISERFS_BARRIER_FLUSH);
|
|
|
|
int none = (1 << REISERFS_BARRIER_NONE);
|
|
|
|
int all_barrier = flush | none;
|
|
|
|
|
|
|
|
if (bits & all_barrier) {
|
|
|
|
REISERFS_SB(s)->s_mount_opt &= ~all_barrier;
|
|
|
|
if (bits & flush) {
|
|
|
|
REISERFS_SB(s)->s_mount_opt |= flush;
|
|
|
|
printk("reiserfs: enabling write barrier flush mode\n");
|
|
|
|
} else if (bits & none) {
|
|
|
|
REISERFS_SB(s)->s_mount_opt |= none;
|
|
|
|
printk("reiserfs: write barriers turned off\n");
|
|
|
|
}
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2005-07-13 11:21:28 +08:00
|
|
|
static void handle_attrs(struct super_block *s)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2005-07-13 11:21:28 +08:00
|
|
|
struct reiserfs_super_block *rs = SB_DISK_SUPER_BLOCK(s);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2005-07-13 11:21:28 +08:00
|
|
|
if (reiserfs_attrs(s)) {
|
|
|
|
if (old_format_only(s)) {
|
2009-03-31 02:02:21 +08:00
|
|
|
reiserfs_warning(s, "super-6517", "cannot support "
|
|
|
|
"attributes on 3.5.x disk format");
|
2005-07-13 11:21:28 +08:00
|
|
|
REISERFS_SB(s)->s_mount_opt &= ~(1 << REISERFS_ATTRS);
|
2005-04-17 06:20:36 +08:00
|
|
|
return;
|
|
|
|
}
|
2005-07-13 11:21:28 +08:00
|
|
|
if (!(le32_to_cpu(rs->s_flags) & reiserfs_attrs_cleared)) {
|
2009-03-31 02:02:21 +08:00
|
|
|
reiserfs_warning(s, "super-6518", "cannot support "
|
|
|
|
"attributes until flag is set in "
|
|
|
|
"super-block");
|
2005-07-13 11:21:28 +08:00
|
|
|
REISERFS_SB(s)->s_mount_opt &= ~(1 << REISERFS_ATTRS);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-07-25 16:46:38 +08:00
|
|
|
#ifdef CONFIG_QUOTA
|
|
|
|
static void handle_quota_files(struct super_block *s, char **qf_names,
|
|
|
|
unsigned int *qfmt)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < MAXQUOTAS; i++) {
|
|
|
|
if (qf_names[i] != REISERFS_SB(s)->s_qf_names[i])
|
|
|
|
kfree(REISERFS_SB(s)->s_qf_names[i]);
|
|
|
|
REISERFS_SB(s)->s_qf_names[i] = qf_names[i];
|
|
|
|
}
|
2011-12-22 00:35:34 +08:00
|
|
|
if (*qfmt)
|
|
|
|
REISERFS_SB(s)->s_jquota_fmt = *qfmt;
|
2008-07-25 16:46:38 +08:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2005-07-13 11:21:28 +08:00
|
|
|
static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2005-07-13 11:21:28 +08:00
|
|
|
struct reiserfs_super_block *rs;
|
|
|
|
struct reiserfs_transaction_handle th;
|
|
|
|
unsigned long blocks;
|
|
|
|
unsigned long mount_options = REISERFS_SB(s)->s_mount_opt;
|
|
|
|
unsigned long safe_mask = 0;
|
|
|
|
unsigned int commit_max_age = (unsigned int)-1;
|
|
|
|
struct reiserfs_journal *journal = SB_JOURNAL(s);
|
2008-02-08 20:21:47 +08:00
|
|
|
char *new_opts = kstrdup(arg, GFP_KERNEL);
|
2005-07-13 11:21:28 +08:00
|
|
|
int err;
|
2008-07-25 16:46:38 +08:00
|
|
|
char *qf_names[MAXQUOTAS];
|
|
|
|
unsigned int qfmt = 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
#ifdef CONFIG_QUOTA
|
2005-07-13 11:21:28 +08:00
|
|
|
int i;
|
reiserfs: kill-the-BKL
This patch is an attempt to remove the Bkl based locking scheme from
reiserfs and is intended.
It is a bit inspired from an old attempt by Peter Zijlstra:
http://lkml.indiana.edu/hypermail/linux/kernel/0704.2/2174.html
The bkl is heavily used in this filesystem to prevent from
concurrent write accesses on the filesystem.
Reiserfs makes a deep use of the specific properties of the Bkl:
- It can be acqquired recursively by a same task
- It is released on the schedule() calls and reacquired when schedule() returns
The two properties above are a roadmap for the reiserfs write locking so it's
very hard to simply replace it with a common mutex.
- We need a recursive-able locking unless we want to restructure several blocks
of the code.
- We need to identify the sites where the bkl was implictly relaxed
(schedule, wait, sync, etc...) so that we can in turn release and
reacquire our new lock explicitly.
Such implicit releases of the lock are often required to let other
resources producer/consumer do their job or we can suffer unexpected
starvations or deadlocks.
So the new lock that replaces the bkl here is a per superblock mutex with a
specific property: it can be acquired recursively by a same task, like the
bkl.
For such purpose, we integrate a lock owner and a lock depth field on the
superblock information structure.
The first axis on this patch is to turn reiserfs_write_(un)lock() function
into a wrapper to manage this mutex. Also some explicit calls to
lock_kernel() have been converted to reiserfs_write_lock() helpers.
The second axis is to find the important blocking sites (schedule...(),
wait_on_buffer(), sync_dirty_buffer(), etc...) and then apply an explicit
release of the write lock on these locations before blocking. Then we can
safely wait for those who can give us resources or those who need some.
Typically this is a fight between the current writer, the reiserfs workqueue
(aka the async commiter) and the pdflush threads.
The third axis is a consequence of the second. The write lock is usually
on top of a lock dependency chain which can include the journal lock, the
flush lock or the commit lock. So it's dangerous to release and trying to
reacquire the write lock while we still hold other locks.
This is fine with the bkl:
T1 T2
lock_kernel()
mutex_lock(A)
unlock_kernel()
// do something
lock_kernel()
mutex_lock(A) -> already locked by T1
schedule() (and then unlock_kernel())
lock_kernel()
mutex_unlock(A)
....
This is not fine with a mutex:
T1 T2
mutex_lock(write)
mutex_lock(A)
mutex_unlock(write)
// do something
mutex_lock(write)
mutex_lock(A) -> already locked by T1
schedule()
mutex_lock(write) -> already locked by T2
deadlock
The solution in this patch is to provide a helper which releases the write
lock and sleep a bit if we can't lock a mutex that depend on it. It's another
simulation of the bkl behaviour.
The last axis is to locate the fs callbacks that are called with the bkl held,
according to Documentation/filesystem/Locking.
Those are:
- reiserfs_remount
- reiserfs_fill_super
- reiserfs_put_super
Reiserfs didn't need to explicitly lock because of the context of these callbacks.
But now we must take care of that with the new locking.
After this patch, reiserfs suffers from a slight performance regression (for now).
On UP, a high volume write with dd reports an average of 27 MB/s instead
of 30 MB/s without the patch applied.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Reviewed-by: Ingo Molnar <mingo@elte.hu>
Cc: Jeff Mahoney <jeffm@suse.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Bron Gondwana <brong@fastmail.fm>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
LKML-Reference: <1239070789-13354-1-git-send-email-fweisbec@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-04-07 10:19:49 +08:00
|
|
|
#endif
|
2008-07-25 16:46:38 +08:00
|
|
|
|
reiserfs: kill-the-BKL
This patch is an attempt to remove the Bkl based locking scheme from
reiserfs and is intended.
It is a bit inspired from an old attempt by Peter Zijlstra:
http://lkml.indiana.edu/hypermail/linux/kernel/0704.2/2174.html
The bkl is heavily used in this filesystem to prevent from
concurrent write accesses on the filesystem.
Reiserfs makes a deep use of the specific properties of the Bkl:
- It can be acqquired recursively by a same task
- It is released on the schedule() calls and reacquired when schedule() returns
The two properties above are a roadmap for the reiserfs write locking so it's
very hard to simply replace it with a common mutex.
- We need a recursive-able locking unless we want to restructure several blocks
of the code.
- We need to identify the sites where the bkl was implictly relaxed
(schedule, wait, sync, etc...) so that we can in turn release and
reacquire our new lock explicitly.
Such implicit releases of the lock are often required to let other
resources producer/consumer do their job or we can suffer unexpected
starvations or deadlocks.
So the new lock that replaces the bkl here is a per superblock mutex with a
specific property: it can be acquired recursively by a same task, like the
bkl.
For such purpose, we integrate a lock owner and a lock depth field on the
superblock information structure.
The first axis on this patch is to turn reiserfs_write_(un)lock() function
into a wrapper to manage this mutex. Also some explicit calls to
lock_kernel() have been converted to reiserfs_write_lock() helpers.
The second axis is to find the important blocking sites (schedule...(),
wait_on_buffer(), sync_dirty_buffer(), etc...) and then apply an explicit
release of the write lock on these locations before blocking. Then we can
safely wait for those who can give us resources or those who need some.
Typically this is a fight between the current writer, the reiserfs workqueue
(aka the async commiter) and the pdflush threads.
The third axis is a consequence of the second. The write lock is usually
on top of a lock dependency chain which can include the journal lock, the
flush lock or the commit lock. So it's dangerous to release and trying to
reacquire the write lock while we still hold other locks.
This is fine with the bkl:
T1 T2
lock_kernel()
mutex_lock(A)
unlock_kernel()
// do something
lock_kernel()
mutex_lock(A) -> already locked by T1
schedule() (and then unlock_kernel())
lock_kernel()
mutex_unlock(A)
....
This is not fine with a mutex:
T1 T2
mutex_lock(write)
mutex_lock(A)
mutex_unlock(write)
// do something
mutex_lock(write)
mutex_lock(A) -> already locked by T1
schedule()
mutex_lock(write) -> already locked by T2
deadlock
The solution in this patch is to provide a helper which releases the write
lock and sleep a bit if we can't lock a mutex that depend on it. It's another
simulation of the bkl behaviour.
The last axis is to locate the fs callbacks that are called with the bkl held,
according to Documentation/filesystem/Locking.
Those are:
- reiserfs_remount
- reiserfs_fill_super
- reiserfs_put_super
Reiserfs didn't need to explicitly lock because of the context of these callbacks.
But now we must take care of that with the new locking.
After this patch, reiserfs suffers from a slight performance regression (for now).
On UP, a high volume write with dd reports an average of 27 MB/s instead
of 30 MB/s without the patch applied.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Reviewed-by: Ingo Molnar <mingo@elte.hu>
Cc: Jeff Mahoney <jeffm@suse.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Bron Gondwana <brong@fastmail.fm>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
LKML-Reference: <1239070789-13354-1-git-send-email-fweisbec@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-04-07 10:19:49 +08:00
|
|
|
reiserfs_write_lock(s);
|
2008-07-25 16:46:38 +08:00
|
|
|
|
reiserfs: kill-the-BKL
This patch is an attempt to remove the Bkl based locking scheme from
reiserfs and is intended.
It is a bit inspired from an old attempt by Peter Zijlstra:
http://lkml.indiana.edu/hypermail/linux/kernel/0704.2/2174.html
The bkl is heavily used in this filesystem to prevent from
concurrent write accesses on the filesystem.
Reiserfs makes a deep use of the specific properties of the Bkl:
- It can be acqquired recursively by a same task
- It is released on the schedule() calls and reacquired when schedule() returns
The two properties above are a roadmap for the reiserfs write locking so it's
very hard to simply replace it with a common mutex.
- We need a recursive-able locking unless we want to restructure several blocks
of the code.
- We need to identify the sites where the bkl was implictly relaxed
(schedule, wait, sync, etc...) so that we can in turn release and
reacquire our new lock explicitly.
Such implicit releases of the lock are often required to let other
resources producer/consumer do their job or we can suffer unexpected
starvations or deadlocks.
So the new lock that replaces the bkl here is a per superblock mutex with a
specific property: it can be acquired recursively by a same task, like the
bkl.
For such purpose, we integrate a lock owner and a lock depth field on the
superblock information structure.
The first axis on this patch is to turn reiserfs_write_(un)lock() function
into a wrapper to manage this mutex. Also some explicit calls to
lock_kernel() have been converted to reiserfs_write_lock() helpers.
The second axis is to find the important blocking sites (schedule...(),
wait_on_buffer(), sync_dirty_buffer(), etc...) and then apply an explicit
release of the write lock on these locations before blocking. Then we can
safely wait for those who can give us resources or those who need some.
Typically this is a fight between the current writer, the reiserfs workqueue
(aka the async commiter) and the pdflush threads.
The third axis is a consequence of the second. The write lock is usually
on top of a lock dependency chain which can include the journal lock, the
flush lock or the commit lock. So it's dangerous to release and trying to
reacquire the write lock while we still hold other locks.
This is fine with the bkl:
T1 T2
lock_kernel()
mutex_lock(A)
unlock_kernel()
// do something
lock_kernel()
mutex_lock(A) -> already locked by T1
schedule() (and then unlock_kernel())
lock_kernel()
mutex_unlock(A)
....
This is not fine with a mutex:
T1 T2
mutex_lock(write)
mutex_lock(A)
mutex_unlock(write)
// do something
mutex_lock(write)
mutex_lock(A) -> already locked by T1
schedule()
mutex_lock(write) -> already locked by T2
deadlock
The solution in this patch is to provide a helper which releases the write
lock and sleep a bit if we can't lock a mutex that depend on it. It's another
simulation of the bkl behaviour.
The last axis is to locate the fs callbacks that are called with the bkl held,
according to Documentation/filesystem/Locking.
Those are:
- reiserfs_remount
- reiserfs_fill_super
- reiserfs_put_super
Reiserfs didn't need to explicitly lock because of the context of these callbacks.
But now we must take care of that with the new locking.
After this patch, reiserfs suffers from a slight performance regression (for now).
On UP, a high volume write with dd reports an average of 27 MB/s instead
of 30 MB/s without the patch applied.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Reviewed-by: Ingo Molnar <mingo@elte.hu>
Cc: Jeff Mahoney <jeffm@suse.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Bron Gondwana <brong@fastmail.fm>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
LKML-Reference: <1239070789-13354-1-git-send-email-fweisbec@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-04-07 10:19:49 +08:00
|
|
|
#ifdef CONFIG_QUOTA
|
2008-07-25 16:46:38 +08:00
|
|
|
memcpy(qf_names, REISERFS_SB(s)->s_qf_names, sizeof(qf_names));
|
2005-04-17 06:20:36 +08:00
|
|
|
#endif
|
|
|
|
|
2005-07-13 11:21:28 +08:00
|
|
|
rs = SB_DISK_SUPER_BLOCK(s);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2005-07-13 11:21:28 +08:00
|
|
|
if (!reiserfs_parse_options
|
2008-07-25 16:46:38 +08:00
|
|
|
(s, arg, &mount_options, &blocks, NULL, &commit_max_age,
|
|
|
|
qf_names, &qfmt)) {
|
2005-04-17 06:20:36 +08:00
|
|
|
#ifdef CONFIG_QUOTA
|
2008-07-25 16:46:38 +08:00
|
|
|
for (i = 0; i < MAXQUOTAS; i++)
|
|
|
|
if (qf_names[i] != REISERFS_SB(s)->s_qf_names[i])
|
|
|
|
kfree(qf_names[i]);
|
2005-04-17 06:20:36 +08:00
|
|
|
#endif
|
2008-02-08 20:21:47 +08:00
|
|
|
err = -EINVAL;
|
|
|
|
goto out_err;
|
2005-07-13 11:21:28 +08:00
|
|
|
}
|
2008-07-25 16:46:38 +08:00
|
|
|
#ifdef CONFIG_QUOTA
|
|
|
|
handle_quota_files(s, qf_names, &qfmt);
|
|
|
|
#endif
|
2005-07-13 11:21:28 +08:00
|
|
|
|
|
|
|
handle_attrs(s);
|
|
|
|
|
|
|
|
/* Add options that are safe here */
|
|
|
|
safe_mask |= 1 << REISERFS_SMALLTAIL;
|
|
|
|
safe_mask |= 1 << REISERFS_LARGETAIL;
|
|
|
|
safe_mask |= 1 << REISERFS_NO_BORDER;
|
|
|
|
safe_mask |= 1 << REISERFS_NO_UNHASHED_RELOCATION;
|
|
|
|
safe_mask |= 1 << REISERFS_HASHED_RELOCATION;
|
|
|
|
safe_mask |= 1 << REISERFS_TEST4;
|
|
|
|
safe_mask |= 1 << REISERFS_ATTRS;
|
|
|
|
safe_mask |= 1 << REISERFS_XATTRS_USER;
|
|
|
|
safe_mask |= 1 << REISERFS_POSIXACL;
|
|
|
|
safe_mask |= 1 << REISERFS_BARRIER_FLUSH;
|
|
|
|
safe_mask |= 1 << REISERFS_BARRIER_NONE;
|
|
|
|
safe_mask |= 1 << REISERFS_ERROR_RO;
|
|
|
|
safe_mask |= 1 << REISERFS_ERROR_CONTINUE;
|
|
|
|
safe_mask |= 1 << REISERFS_ERROR_PANIC;
|
2011-12-22 03:17:10 +08:00
|
|
|
safe_mask |= 1 << REISERFS_USRQUOTA;
|
|
|
|
safe_mask |= 1 << REISERFS_GRPQUOTA;
|
2005-07-13 11:21:28 +08:00
|
|
|
|
|
|
|
/* Update the bitmask, taking care to keep
|
|
|
|
* the bits we're not allowed to change here */
|
|
|
|
REISERFS_SB(s)->s_mount_opt =
|
|
|
|
(REISERFS_SB(s)->
|
|
|
|
s_mount_opt & ~safe_mask) | (mount_options & safe_mask);
|
|
|
|
|
|
|
|
if (commit_max_age != 0 && commit_max_age != (unsigned int)-1) {
|
|
|
|
journal->j_max_commit_age = commit_max_age;
|
|
|
|
journal->j_max_trans_age = commit_max_age;
|
|
|
|
} else if (commit_max_age == 0) {
|
|
|
|
/* 0 means restore defaults. */
|
|
|
|
journal->j_max_commit_age = journal->j_default_max_commit_age;
|
|
|
|
journal->j_max_trans_age = JOURNAL_MAX_TRANS_AGE;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (blocks) {
|
2008-02-08 20:21:47 +08:00
|
|
|
err = reiserfs_resize(s, blocks);
|
|
|
|
if (err != 0)
|
|
|
|
goto out_err;
|
2005-07-13 11:21:28 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (*mount_flags & MS_RDONLY) {
|
|
|
|
reiserfs_xattr_init(s, *mount_flags);
|
|
|
|
/* remount read-only */
|
|
|
|
if (s->s_flags & MS_RDONLY)
|
|
|
|
/* it is read-only already */
|
2008-02-08 20:21:47 +08:00
|
|
|
goto out_ok;
|
2010-05-19 19:16:40 +08:00
|
|
|
|
2010-05-19 19:16:41 +08:00
|
|
|
err = dquot_suspend(s, -1);
|
|
|
|
if (err < 0)
|
2010-05-19 19:16:40 +08:00
|
|
|
goto out_err;
|
|
|
|
|
2005-07-13 11:21:28 +08:00
|
|
|
/* try to remount file system with read-only permissions */
|
|
|
|
if (sb_umount_state(rs) == REISERFS_VALID_FS
|
|
|
|
|| REISERFS_SB(s)->s_mount_state != REISERFS_VALID_FS) {
|
2008-02-08 20:21:47 +08:00
|
|
|
goto out_ok;
|
2005-07-13 11:21:28 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
err = journal_begin(&th, s, 10);
|
|
|
|
if (err)
|
2008-02-08 20:21:47 +08:00
|
|
|
goto out_err;
|
2005-07-13 11:21:28 +08:00
|
|
|
|
|
|
|
/* Mounting a rw partition read-only. */
|
|
|
|
reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1);
|
|
|
|
set_sb_umount_state(rs, REISERFS_SB(s)->s_mount_state);
|
|
|
|
journal_mark_dirty(&th, s, SB_BUFFER_WITH_SB(s));
|
|
|
|
} else {
|
|
|
|
/* remount read-write */
|
|
|
|
if (!(s->s_flags & MS_RDONLY)) {
|
|
|
|
reiserfs_xattr_init(s, *mount_flags);
|
2008-02-08 20:21:47 +08:00
|
|
|
goto out_ok; /* We are read-write already */
|
2005-07-13 11:21:28 +08:00
|
|
|
}
|
|
|
|
|
2008-02-08 20:21:47 +08:00
|
|
|
if (reiserfs_is_journal_aborted(journal)) {
|
|
|
|
err = journal->j_errno;
|
|
|
|
goto out_err;
|
|
|
|
}
|
2005-07-13 11:21:28 +08:00
|
|
|
|
|
|
|
handle_data_mode(s, mount_options);
|
|
|
|
handle_barrier_mode(s, mount_options);
|
|
|
|
REISERFS_SB(s)->s_mount_state = sb_umount_state(rs);
|
|
|
|
s->s_flags &= ~MS_RDONLY; /* now it is safe to call journal_begin */
|
|
|
|
err = journal_begin(&th, s, 10);
|
|
|
|
if (err)
|
2008-02-08 20:21:47 +08:00
|
|
|
goto out_err;
|
2005-07-13 11:21:28 +08:00
|
|
|
|
|
|
|
/* Mount a partition which is read-only, read-write */
|
|
|
|
reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1);
|
|
|
|
REISERFS_SB(s)->s_mount_state = sb_umount_state(rs);
|
|
|
|
s->s_flags &= ~MS_RDONLY;
|
|
|
|
set_sb_umount_state(rs, REISERFS_ERROR_FS);
|
2009-03-31 02:02:16 +08:00
|
|
|
if (!old_format_only(s))
|
|
|
|
set_sb_mnt_count(rs, sb_mnt_count(rs) + 1);
|
2005-07-13 11:21:28 +08:00
|
|
|
/* mark_buffer_dirty (SB_BUFFER_WITH_SB (s), 1); */
|
|
|
|
journal_mark_dirty(&th, s, SB_BUFFER_WITH_SB(s));
|
|
|
|
REISERFS_SB(s)->s_mount_state = REISERFS_VALID_FS;
|
|
|
|
}
|
|
|
|
/* this will force a full flush of all journal lists */
|
|
|
|
SB_JOURNAL(s)->j_must_wait = 1;
|
|
|
|
err = journal_end(&th, s, 10);
|
|
|
|
if (err)
|
2008-02-08 20:21:47 +08:00
|
|
|
goto out_err;
|
2005-07-13 11:21:28 +08:00
|
|
|
s->s_dirt = 0;
|
|
|
|
|
|
|
|
if (!(*mount_flags & MS_RDONLY)) {
|
2010-05-19 19:16:41 +08:00
|
|
|
dquot_resume(s, -1);
|
2005-07-13 11:21:28 +08:00
|
|
|
finish_unfinished(s);
|
|
|
|
reiserfs_xattr_init(s, *mount_flags);
|
|
|
|
}
|
|
|
|
|
2008-02-08 20:21:47 +08:00
|
|
|
out_ok:
|
2009-05-09 04:05:57 +08:00
|
|
|
replace_mount_options(s, new_opts);
|
reiserfs: kill-the-BKL
This patch is an attempt to remove the Bkl based locking scheme from
reiserfs and is intended.
It is a bit inspired from an old attempt by Peter Zijlstra:
http://lkml.indiana.edu/hypermail/linux/kernel/0704.2/2174.html
The bkl is heavily used in this filesystem to prevent from
concurrent write accesses on the filesystem.
Reiserfs makes a deep use of the specific properties of the Bkl:
- It can be acqquired recursively by a same task
- It is released on the schedule() calls and reacquired when schedule() returns
The two properties above are a roadmap for the reiserfs write locking so it's
very hard to simply replace it with a common mutex.
- We need a recursive-able locking unless we want to restructure several blocks
of the code.
- We need to identify the sites where the bkl was implictly relaxed
(schedule, wait, sync, etc...) so that we can in turn release and
reacquire our new lock explicitly.
Such implicit releases of the lock are often required to let other
resources producer/consumer do their job or we can suffer unexpected
starvations or deadlocks.
So the new lock that replaces the bkl here is a per superblock mutex with a
specific property: it can be acquired recursively by a same task, like the
bkl.
For such purpose, we integrate a lock owner and a lock depth field on the
superblock information structure.
The first axis on this patch is to turn reiserfs_write_(un)lock() function
into a wrapper to manage this mutex. Also some explicit calls to
lock_kernel() have been converted to reiserfs_write_lock() helpers.
The second axis is to find the important blocking sites (schedule...(),
wait_on_buffer(), sync_dirty_buffer(), etc...) and then apply an explicit
release of the write lock on these locations before blocking. Then we can
safely wait for those who can give us resources or those who need some.
Typically this is a fight between the current writer, the reiserfs workqueue
(aka the async commiter) and the pdflush threads.
The third axis is a consequence of the second. The write lock is usually
on top of a lock dependency chain which can include the journal lock, the
flush lock or the commit lock. So it's dangerous to release and trying to
reacquire the write lock while we still hold other locks.
This is fine with the bkl:
T1 T2
lock_kernel()
mutex_lock(A)
unlock_kernel()
// do something
lock_kernel()
mutex_lock(A) -> already locked by T1
schedule() (and then unlock_kernel())
lock_kernel()
mutex_unlock(A)
....
This is not fine with a mutex:
T1 T2
mutex_lock(write)
mutex_lock(A)
mutex_unlock(write)
// do something
mutex_lock(write)
mutex_lock(A) -> already locked by T1
schedule()
mutex_lock(write) -> already locked by T2
deadlock
The solution in this patch is to provide a helper which releases the write
lock and sleep a bit if we can't lock a mutex that depend on it. It's another
simulation of the bkl behaviour.
The last axis is to locate the fs callbacks that are called with the bkl held,
according to Documentation/filesystem/Locking.
Those are:
- reiserfs_remount
- reiserfs_fill_super
- reiserfs_put_super
Reiserfs didn't need to explicitly lock because of the context of these callbacks.
But now we must take care of that with the new locking.
After this patch, reiserfs suffers from a slight performance regression (for now).
On UP, a high volume write with dd reports an average of 27 MB/s instead
of 30 MB/s without the patch applied.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Reviewed-by: Ingo Molnar <mingo@elte.hu>
Cc: Jeff Mahoney <jeffm@suse.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Bron Gondwana <brong@fastmail.fm>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
LKML-Reference: <1239070789-13354-1-git-send-email-fweisbec@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-04-07 10:19:49 +08:00
|
|
|
reiserfs_write_unlock(s);
|
2005-07-13 11:21:28 +08:00
|
|
|
return 0;
|
2008-02-08 20:21:47 +08:00
|
|
|
|
|
|
|
out_err:
|
|
|
|
kfree(new_opts);
|
reiserfs: kill-the-BKL
This patch is an attempt to remove the Bkl based locking scheme from
reiserfs and is intended.
It is a bit inspired from an old attempt by Peter Zijlstra:
http://lkml.indiana.edu/hypermail/linux/kernel/0704.2/2174.html
The bkl is heavily used in this filesystem to prevent from
concurrent write accesses on the filesystem.
Reiserfs makes a deep use of the specific properties of the Bkl:
- It can be acqquired recursively by a same task
- It is released on the schedule() calls and reacquired when schedule() returns
The two properties above are a roadmap for the reiserfs write locking so it's
very hard to simply replace it with a common mutex.
- We need a recursive-able locking unless we want to restructure several blocks
of the code.
- We need to identify the sites where the bkl was implictly relaxed
(schedule, wait, sync, etc...) so that we can in turn release and
reacquire our new lock explicitly.
Such implicit releases of the lock are often required to let other
resources producer/consumer do their job or we can suffer unexpected
starvations or deadlocks.
So the new lock that replaces the bkl here is a per superblock mutex with a
specific property: it can be acquired recursively by a same task, like the
bkl.
For such purpose, we integrate a lock owner and a lock depth field on the
superblock information structure.
The first axis on this patch is to turn reiserfs_write_(un)lock() function
into a wrapper to manage this mutex. Also some explicit calls to
lock_kernel() have been converted to reiserfs_write_lock() helpers.
The second axis is to find the important blocking sites (schedule...(),
wait_on_buffer(), sync_dirty_buffer(), etc...) and then apply an explicit
release of the write lock on these locations before blocking. Then we can
safely wait for those who can give us resources or those who need some.
Typically this is a fight between the current writer, the reiserfs workqueue
(aka the async commiter) and the pdflush threads.
The third axis is a consequence of the second. The write lock is usually
on top of a lock dependency chain which can include the journal lock, the
flush lock or the commit lock. So it's dangerous to release and trying to
reacquire the write lock while we still hold other locks.
This is fine with the bkl:
T1 T2
lock_kernel()
mutex_lock(A)
unlock_kernel()
// do something
lock_kernel()
mutex_lock(A) -> already locked by T1
schedule() (and then unlock_kernel())
lock_kernel()
mutex_unlock(A)
....
This is not fine with a mutex:
T1 T2
mutex_lock(write)
mutex_lock(A)
mutex_unlock(write)
// do something
mutex_lock(write)
mutex_lock(A) -> already locked by T1
schedule()
mutex_lock(write) -> already locked by T2
deadlock
The solution in this patch is to provide a helper which releases the write
lock and sleep a bit if we can't lock a mutex that depend on it. It's another
simulation of the bkl behaviour.
The last axis is to locate the fs callbacks that are called with the bkl held,
according to Documentation/filesystem/Locking.
Those are:
- reiserfs_remount
- reiserfs_fill_super
- reiserfs_put_super
Reiserfs didn't need to explicitly lock because of the context of these callbacks.
But now we must take care of that with the new locking.
After this patch, reiserfs suffers from a slight performance regression (for now).
On UP, a high volume write with dd reports an average of 27 MB/s instead
of 30 MB/s without the patch applied.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Reviewed-by: Ingo Molnar <mingo@elte.hu>
Cc: Jeff Mahoney <jeffm@suse.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Bron Gondwana <brong@fastmail.fm>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
LKML-Reference: <1239070789-13354-1-git-send-email-fweisbec@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-04-07 10:19:49 +08:00
|
|
|
reiserfs_write_unlock(s);
|
2008-02-08 20:21:47 +08:00
|
|
|
return err;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2005-07-13 11:21:28 +08:00
|
|
|
static int read_super_block(struct super_block *s, int offset)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2005-07-13 11:21:28 +08:00
|
|
|
struct buffer_head *bh;
|
|
|
|
struct reiserfs_super_block *rs;
|
|
|
|
int fs_blocksize;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2005-07-13 11:21:28 +08:00
|
|
|
bh = sb_bread(s, offset / s->s_blocksize);
|
|
|
|
if (!bh) {
|
2009-03-31 02:02:21 +08:00
|
|
|
reiserfs_warning(s, "sh-2006",
|
2005-07-13 11:21:28 +08:00
|
|
|
"bread failed (dev %s, block %lu, size %lu)",
|
|
|
|
reiserfs_bdevname(s), offset / s->s_blocksize,
|
|
|
|
s->s_blocksize);
|
|
|
|
return 1;
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2005-07-13 11:21:28 +08:00
|
|
|
rs = (struct reiserfs_super_block *)bh->b_data;
|
|
|
|
if (!is_any_reiserfs_magic_string(rs)) {
|
|
|
|
brelse(bh);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
//
|
|
|
|
// ok, reiserfs signature (old or new) found in at the given offset
|
2009-03-31 02:02:44 +08:00
|
|
|
//
|
2005-07-13 11:21:28 +08:00
|
|
|
fs_blocksize = sb_blocksize(rs);
|
|
|
|
brelse(bh);
|
|
|
|
sb_set_blocksize(s, fs_blocksize);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2005-07-13 11:21:28 +08:00
|
|
|
bh = sb_bread(s, offset / s->s_blocksize);
|
|
|
|
if (!bh) {
|
2009-03-31 02:02:21 +08:00
|
|
|
reiserfs_warning(s, "sh-2007",
|
|
|
|
"bread failed (dev %s, block %lu, size %lu)",
|
2005-07-13 11:21:28 +08:00
|
|
|
reiserfs_bdevname(s), offset / s->s_blocksize,
|
|
|
|
s->s_blocksize);
|
|
|
|
return 1;
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2005-07-13 11:21:28 +08:00
|
|
|
rs = (struct reiserfs_super_block *)bh->b_data;
|
|
|
|
if (sb_blocksize(rs) != s->s_blocksize) {
|
2009-03-31 02:02:21 +08:00
|
|
|
reiserfs_warning(s, "sh-2011", "can't find a reiserfs "
|
|
|
|
"filesystem on (dev %s, block %Lu, size %lu)",
|
2005-07-13 11:21:28 +08:00
|
|
|
reiserfs_bdevname(s),
|
|
|
|
(unsigned long long)bh->b_blocknr,
|
|
|
|
s->s_blocksize);
|
|
|
|
brelse(bh);
|
|
|
|
return 1;
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2005-07-13 11:21:28 +08:00
|
|
|
if (rs->s_v1.s_root_block == cpu_to_le32(-1)) {
|
|
|
|
brelse(bh);
|
2009-03-31 02:02:21 +08:00
|
|
|
reiserfs_warning(s, "super-6519", "Unfinished reiserfsck "
|
|
|
|
"--rebuild-tree run detected. Please run\n"
|
|
|
|
"reiserfsck --rebuild-tree and wait for a "
|
|
|
|
"completion. If that fails\n"
|
2005-07-13 11:21:28 +08:00
|
|
|
"get newer reiserfsprogs package");
|
|
|
|
return 1;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2005-07-13 11:21:28 +08:00
|
|
|
SB_BUFFER_WITH_SB(s) = bh;
|
|
|
|
SB_DISK_SUPER_BLOCK(s) = rs;
|
|
|
|
|
|
|
|
if (is_reiserfs_jr(rs)) {
|
|
|
|
/* magic is of non-standard journal filesystem, look at s_version to
|
|
|
|
find which format is in use */
|
|
|
|
if (sb_version(rs) == REISERFS_VERSION_2)
|
2009-03-31 02:02:20 +08:00
|
|
|
reiserfs_info(s, "found reiserfs format \"3.6\""
|
|
|
|
" with non-standard journal\n");
|
2005-07-13 11:21:28 +08:00
|
|
|
else if (sb_version(rs) == REISERFS_VERSION_1)
|
2009-03-31 02:02:20 +08:00
|
|
|
reiserfs_info(s, "found reiserfs format \"3.5\""
|
|
|
|
" with non-standard journal\n");
|
2005-07-13 11:21:28 +08:00
|
|
|
else {
|
2009-03-31 02:02:21 +08:00
|
|
|
reiserfs_warning(s, "sh-2012", "found unknown "
|
|
|
|
"format \"%u\" of reiserfs with "
|
|
|
|
"non-standard magic", sb_version(rs));
|
2005-07-13 11:21:28 +08:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
} else
|
|
|
|
/* s_version of standard format may contain incorrect information,
|
|
|
|
so we just look at the magic string */
|
|
|
|
reiserfs_info(s,
|
|
|
|
"found reiserfs format \"%s\" with standard journal\n",
|
|
|
|
is_reiserfs_3_5(rs) ? "3.5" : "3.6");
|
|
|
|
|
|
|
|
s->s_op = &reiserfs_sops;
|
|
|
|
s->s_export_op = &reiserfs_export_ops;
|
2005-04-17 06:20:36 +08:00
|
|
|
#ifdef CONFIG_QUOTA
|
2005-07-13 11:21:28 +08:00
|
|
|
s->s_qcop = &reiserfs_qctl_operations;
|
|
|
|
s->dq_op = &reiserfs_quota_operations;
|
2005-04-17 06:20:36 +08:00
|
|
|
#endif
|
|
|
|
|
2005-07-13 11:21:28 +08:00
|
|
|
/* new format is limited by the 32 bit wide i_blocks field, want to
|
|
|
|
** be one full block below that.
|
|
|
|
*/
|
|
|
|
s->s_maxbytes = (512LL << 32) - s->s_blocksize;
|
|
|
|
return 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* after journal replay, reread all bitmap and super blocks */
|
2005-07-13 11:21:28 +08:00
|
|
|
static int reread_meta_blocks(struct super_block *s)
|
|
|
|
{
|
|
|
|
ll_rw_block(READ, 1, &(SB_BUFFER_WITH_SB(s)));
|
reiserfs: kill-the-BKL
This patch is an attempt to remove the Bkl based locking scheme from
reiserfs and is intended.
It is a bit inspired from an old attempt by Peter Zijlstra:
http://lkml.indiana.edu/hypermail/linux/kernel/0704.2/2174.html
The bkl is heavily used in this filesystem to prevent from
concurrent write accesses on the filesystem.
Reiserfs makes a deep use of the specific properties of the Bkl:
- It can be acqquired recursively by a same task
- It is released on the schedule() calls and reacquired when schedule() returns
The two properties above are a roadmap for the reiserfs write locking so it's
very hard to simply replace it with a common mutex.
- We need a recursive-able locking unless we want to restructure several blocks
of the code.
- We need to identify the sites where the bkl was implictly relaxed
(schedule, wait, sync, etc...) so that we can in turn release and
reacquire our new lock explicitly.
Such implicit releases of the lock are often required to let other
resources producer/consumer do their job or we can suffer unexpected
starvations or deadlocks.
So the new lock that replaces the bkl here is a per superblock mutex with a
specific property: it can be acquired recursively by a same task, like the
bkl.
For such purpose, we integrate a lock owner and a lock depth field on the
superblock information structure.
The first axis on this patch is to turn reiserfs_write_(un)lock() function
into a wrapper to manage this mutex. Also some explicit calls to
lock_kernel() have been converted to reiserfs_write_lock() helpers.
The second axis is to find the important blocking sites (schedule...(),
wait_on_buffer(), sync_dirty_buffer(), etc...) and then apply an explicit
release of the write lock on these locations before blocking. Then we can
safely wait for those who can give us resources or those who need some.
Typically this is a fight between the current writer, the reiserfs workqueue
(aka the async commiter) and the pdflush threads.
The third axis is a consequence of the second. The write lock is usually
on top of a lock dependency chain which can include the journal lock, the
flush lock or the commit lock. So it's dangerous to release and trying to
reacquire the write lock while we still hold other locks.
This is fine with the bkl:
T1 T2
lock_kernel()
mutex_lock(A)
unlock_kernel()
// do something
lock_kernel()
mutex_lock(A) -> already locked by T1
schedule() (and then unlock_kernel())
lock_kernel()
mutex_unlock(A)
....
This is not fine with a mutex:
T1 T2
mutex_lock(write)
mutex_lock(A)
mutex_unlock(write)
// do something
mutex_lock(write)
mutex_lock(A) -> already locked by T1
schedule()
mutex_lock(write) -> already locked by T2
deadlock
The solution in this patch is to provide a helper which releases the write
lock and sleep a bit if we can't lock a mutex that depend on it. It's another
simulation of the bkl behaviour.
The last axis is to locate the fs callbacks that are called with the bkl held,
according to Documentation/filesystem/Locking.
Those are:
- reiserfs_remount
- reiserfs_fill_super
- reiserfs_put_super
Reiserfs didn't need to explicitly lock because of the context of these callbacks.
But now we must take care of that with the new locking.
After this patch, reiserfs suffers from a slight performance regression (for now).
On UP, a high volume write with dd reports an average of 27 MB/s instead
of 30 MB/s without the patch applied.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Reviewed-by: Ingo Molnar <mingo@elte.hu>
Cc: Jeff Mahoney <jeffm@suse.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Bron Gondwana <brong@fastmail.fm>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
LKML-Reference: <1239070789-13354-1-git-send-email-fweisbec@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-04-07 10:19:49 +08:00
|
|
|
reiserfs_write_unlock(s);
|
2005-07-13 11:21:28 +08:00
|
|
|
wait_on_buffer(SB_BUFFER_WITH_SB(s));
|
reiserfs: kill-the-BKL
This patch is an attempt to remove the Bkl based locking scheme from
reiserfs and is intended.
It is a bit inspired from an old attempt by Peter Zijlstra:
http://lkml.indiana.edu/hypermail/linux/kernel/0704.2/2174.html
The bkl is heavily used in this filesystem to prevent from
concurrent write accesses on the filesystem.
Reiserfs makes a deep use of the specific properties of the Bkl:
- It can be acqquired recursively by a same task
- It is released on the schedule() calls and reacquired when schedule() returns
The two properties above are a roadmap for the reiserfs write locking so it's
very hard to simply replace it with a common mutex.
- We need a recursive-able locking unless we want to restructure several blocks
of the code.
- We need to identify the sites where the bkl was implictly relaxed
(schedule, wait, sync, etc...) so that we can in turn release and
reacquire our new lock explicitly.
Such implicit releases of the lock are often required to let other
resources producer/consumer do their job or we can suffer unexpected
starvations or deadlocks.
So the new lock that replaces the bkl here is a per superblock mutex with a
specific property: it can be acquired recursively by a same task, like the
bkl.
For such purpose, we integrate a lock owner and a lock depth field on the
superblock information structure.
The first axis on this patch is to turn reiserfs_write_(un)lock() function
into a wrapper to manage this mutex. Also some explicit calls to
lock_kernel() have been converted to reiserfs_write_lock() helpers.
The second axis is to find the important blocking sites (schedule...(),
wait_on_buffer(), sync_dirty_buffer(), etc...) and then apply an explicit
release of the write lock on these locations before blocking. Then we can
safely wait for those who can give us resources or those who need some.
Typically this is a fight between the current writer, the reiserfs workqueue
(aka the async commiter) and the pdflush threads.
The third axis is a consequence of the second. The write lock is usually
on top of a lock dependency chain which can include the journal lock, the
flush lock or the commit lock. So it's dangerous to release and trying to
reacquire the write lock while we still hold other locks.
This is fine with the bkl:
T1 T2
lock_kernel()
mutex_lock(A)
unlock_kernel()
// do something
lock_kernel()
mutex_lock(A) -> already locked by T1
schedule() (and then unlock_kernel())
lock_kernel()
mutex_unlock(A)
....
This is not fine with a mutex:
T1 T2
mutex_lock(write)
mutex_lock(A)
mutex_unlock(write)
// do something
mutex_lock(write)
mutex_lock(A) -> already locked by T1
schedule()
mutex_lock(write) -> already locked by T2
deadlock
The solution in this patch is to provide a helper which releases the write
lock and sleep a bit if we can't lock a mutex that depend on it. It's another
simulation of the bkl behaviour.
The last axis is to locate the fs callbacks that are called with the bkl held,
according to Documentation/filesystem/Locking.
Those are:
- reiserfs_remount
- reiserfs_fill_super
- reiserfs_put_super
Reiserfs didn't need to explicitly lock because of the context of these callbacks.
But now we must take care of that with the new locking.
After this patch, reiserfs suffers from a slight performance regression (for now).
On UP, a high volume write with dd reports an average of 27 MB/s instead
of 30 MB/s without the patch applied.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Reviewed-by: Ingo Molnar <mingo@elte.hu>
Cc: Jeff Mahoney <jeffm@suse.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Bron Gondwana <brong@fastmail.fm>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
LKML-Reference: <1239070789-13354-1-git-send-email-fweisbec@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-04-07 10:19:49 +08:00
|
|
|
reiserfs_write_lock(s);
|
2005-07-13 11:21:28 +08:00
|
|
|
if (!buffer_uptodate(SB_BUFFER_WITH_SB(s))) {
|
2009-03-31 02:02:21 +08:00
|
|
|
reiserfs_warning(s, "reiserfs-2504", "error reading the super");
|
2005-07-13 11:21:28 +08:00
|
|
|
return 1;
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2005-07-13 11:21:28 +08:00
|
|
|
return 0;
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/////////////////////////////////////////////////////
|
|
|
|
// hash detection stuff
|
|
|
|
|
|
|
|
// if root directory is empty - we set default - Yura's - hash and
|
|
|
|
// warn about it
|
|
|
|
// FIXME: we look for only one name in a directory. If tea and yura
|
|
|
|
// bith have the same value - we ask user to send report to the
|
|
|
|
// mailing list
|
2005-07-13 11:21:28 +08:00
|
|
|
static __u32 find_hash_out(struct super_block *s)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2005-07-13 11:21:28 +08:00
|
|
|
int retval;
|
|
|
|
struct inode *inode;
|
|
|
|
struct cpu_key key;
|
|
|
|
INITIALIZE_PATH(path);
|
|
|
|
struct reiserfs_dir_entry de;
|
|
|
|
__u32 hash = DEFAULT_HASH;
|
|
|
|
|
|
|
|
inode = s->s_root->d_inode;
|
|
|
|
|
|
|
|
do { // Some serious "goto"-hater was there ;)
|
|
|
|
u32 teahash, r5hash, yurahash;
|
|
|
|
|
|
|
|
make_cpu_key(&key, inode, ~0, TYPE_DIRENTRY, 3);
|
|
|
|
retval = search_by_entry_key(s, &key, &path, &de);
|
|
|
|
if (retval == IO_ERROR) {
|
|
|
|
pathrelse(&path);
|
|
|
|
return UNSET_HASH;
|
|
|
|
}
|
|
|
|
if (retval == NAME_NOT_FOUND)
|
|
|
|
de.de_entry_num--;
|
|
|
|
set_de_name_and_namelen(&de);
|
|
|
|
if (deh_offset(&(de.de_deh[de.de_entry_num])) == DOT_DOT_OFFSET) {
|
|
|
|
/* allow override in this case */
|
|
|
|
if (reiserfs_rupasov_hash(s)) {
|
|
|
|
hash = YURA_HASH;
|
|
|
|
}
|
2009-03-31 02:02:20 +08:00
|
|
|
reiserfs_info(s, "FS seems to be empty, autodetect "
|
|
|
|
"is using the default hash\n");
|
2005-07-13 11:21:28 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
r5hash = GET_HASH_VALUE(r5_hash(de.de_name, de.de_namelen));
|
|
|
|
teahash = GET_HASH_VALUE(keyed_hash(de.de_name, de.de_namelen));
|
|
|
|
yurahash = GET_HASH_VALUE(yura_hash(de.de_name, de.de_namelen));
|
|
|
|
if (((teahash == r5hash)
|
|
|
|
&&
|
|
|
|
(GET_HASH_VALUE(deh_offset(&(de.de_deh[de.de_entry_num])))
|
|
|
|
== r5hash)) || ((teahash == yurahash)
|
|
|
|
&& (yurahash ==
|
|
|
|
GET_HASH_VALUE(deh_offset
|
|
|
|
(&
|
|
|
|
(de.
|
|
|
|
de_deh[de.
|
|
|
|
de_entry_num])))))
|
|
|
|
|| ((r5hash == yurahash)
|
|
|
|
&& (yurahash ==
|
|
|
|
GET_HASH_VALUE(deh_offset
|
|
|
|
(&(de.de_deh[de.de_entry_num])))))) {
|
2009-03-31 02:02:21 +08:00
|
|
|
reiserfs_warning(s, "reiserfs-2506", "Unable to "
|
|
|
|
"automatically detect hash function. "
|
|
|
|
"Please mount with -o "
|
|
|
|
"hash={tea,rupasov,r5}");
|
2005-07-13 11:21:28 +08:00
|
|
|
hash = UNSET_HASH;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (GET_HASH_VALUE(deh_offset(&(de.de_deh[de.de_entry_num]))) ==
|
|
|
|
yurahash)
|
|
|
|
hash = YURA_HASH;
|
|
|
|
else if (GET_HASH_VALUE
|
|
|
|
(deh_offset(&(de.de_deh[de.de_entry_num]))) == teahash)
|
|
|
|
hash = TEA_HASH;
|
|
|
|
else if (GET_HASH_VALUE
|
|
|
|
(deh_offset(&(de.de_deh[de.de_entry_num]))) == r5hash)
|
|
|
|
hash = R5_HASH;
|
|
|
|
else {
|
2009-03-31 02:02:21 +08:00
|
|
|
reiserfs_warning(s, "reiserfs-2506",
|
|
|
|
"Unrecognised hash function");
|
2005-07-13 11:21:28 +08:00
|
|
|
hash = UNSET_HASH;
|
|
|
|
}
|
|
|
|
} while (0);
|
|
|
|
|
|
|
|
pathrelse(&path);
|
|
|
|
return hash;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// finds out which hash names are sorted with
|
2005-07-13 11:21:28 +08:00
|
|
|
static int what_hash(struct super_block *s)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2005-07-13 11:21:28 +08:00
|
|
|
__u32 code;
|
|
|
|
|
|
|
|
code = sb_hash_function_code(SB_DISK_SUPER_BLOCK(s));
|
|
|
|
|
|
|
|
/* reiserfs_hash_detect() == true if any of the hash mount options
|
|
|
|
** were used. We must check them to make sure the user isn't
|
|
|
|
** using a bad hash value
|
|
|
|
*/
|
|
|
|
if (code == UNSET_HASH || reiserfs_hash_detect(s))
|
|
|
|
code = find_hash_out(s);
|
|
|
|
|
|
|
|
if (code != UNSET_HASH && reiserfs_hash_detect(s)) {
|
2009-03-31 02:02:44 +08:00
|
|
|
/* detection has found the hash, and we must check against the
|
|
|
|
** mount options
|
2005-07-13 11:21:28 +08:00
|
|
|
*/
|
|
|
|
if (reiserfs_rupasov_hash(s) && code != YURA_HASH) {
|
2009-03-31 02:02:21 +08:00
|
|
|
reiserfs_warning(s, "reiserfs-2507",
|
|
|
|
"Error, %s hash detected, "
|
2005-07-13 11:21:28 +08:00
|
|
|
"unable to force rupasov hash",
|
|
|
|
reiserfs_hashname(code));
|
|
|
|
code = UNSET_HASH;
|
|
|
|
} else if (reiserfs_tea_hash(s) && code != TEA_HASH) {
|
2009-03-31 02:02:21 +08:00
|
|
|
reiserfs_warning(s, "reiserfs-2508",
|
|
|
|
"Error, %s hash detected, "
|
2005-07-13 11:21:28 +08:00
|
|
|
"unable to force tea hash",
|
|
|
|
reiserfs_hashname(code));
|
|
|
|
code = UNSET_HASH;
|
|
|
|
} else if (reiserfs_r5_hash(s) && code != R5_HASH) {
|
2009-03-31 02:02:21 +08:00
|
|
|
reiserfs_warning(s, "reiserfs-2509",
|
|
|
|
"Error, %s hash detected, "
|
2005-07-13 11:21:28 +08:00
|
|
|
"unable to force r5 hash",
|
|
|
|
reiserfs_hashname(code));
|
|
|
|
code = UNSET_HASH;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/* find_hash_out was not called or could not determine the hash */
|
|
|
|
if (reiserfs_rupasov_hash(s)) {
|
|
|
|
code = YURA_HASH;
|
|
|
|
} else if (reiserfs_tea_hash(s)) {
|
|
|
|
code = TEA_HASH;
|
|
|
|
} else if (reiserfs_r5_hash(s)) {
|
|
|
|
code = R5_HASH;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-03-31 02:02:44 +08:00
|
|
|
/* if we are mounted RW, and we have a new valid hash code, update
|
2005-07-13 11:21:28 +08:00
|
|
|
** the super
|
|
|
|
*/
|
|
|
|
if (code != UNSET_HASH &&
|
|
|
|
!(s->s_flags & MS_RDONLY) &&
|
|
|
|
code != sb_hash_function_code(SB_DISK_SUPER_BLOCK(s))) {
|
|
|
|
set_sb_hash_function_code(SB_DISK_SUPER_BLOCK(s), code);
|
|
|
|
}
|
|
|
|
return code;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// return pointer to appropriate function
|
2005-07-13 11:21:28 +08:00
|
|
|
static hashf_t hash_function(struct super_block *s)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2005-07-13 11:21:28 +08:00
|
|
|
switch (what_hash(s)) {
|
|
|
|
case TEA_HASH:
|
|
|
|
reiserfs_info(s, "Using tea hash to sort names\n");
|
|
|
|
return keyed_hash;
|
|
|
|
case YURA_HASH:
|
|
|
|
reiserfs_info(s, "Using rupasov hash to sort names\n");
|
|
|
|
return yura_hash;
|
|
|
|
case R5_HASH:
|
|
|
|
reiserfs_info(s, "Using r5 hash to sort names\n");
|
|
|
|
return r5_hash;
|
|
|
|
}
|
|
|
|
return NULL;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// this is used to set up correct value for old partitions
|
2005-07-13 11:21:28 +08:00
|
|
|
static int function2code(hashf_t func)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2005-07-13 11:21:28 +08:00
|
|
|
if (func == keyed_hash)
|
|
|
|
return TEA_HASH;
|
|
|
|
if (func == yura_hash)
|
|
|
|
return YURA_HASH;
|
|
|
|
if (func == r5_hash)
|
|
|
|
return R5_HASH;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2005-07-13 11:21:28 +08:00
|
|
|
BUG(); // should never happen
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2005-07-13 11:21:28 +08:00
|
|
|
return 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2009-03-31 02:02:21 +08:00
|
|
|
#define SWARN(silent, s, id, ...) \
|
2005-04-17 06:20:36 +08:00
|
|
|
if (!(silent)) \
|
2009-03-31 02:02:21 +08:00
|
|
|
reiserfs_warning(s, id, __VA_ARGS__)
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2005-07-13 11:21:28 +08:00
|
|
|
static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2005-07-13 11:21:28 +08:00
|
|
|
struct inode *root_inode;
|
|
|
|
struct reiserfs_transaction_handle th;
|
|
|
|
int old_format = 0;
|
|
|
|
unsigned long blocks;
|
|
|
|
unsigned int commit_max_age = 0;
|
|
|
|
int jinit_done = 0;
|
|
|
|
struct reiserfs_iget_args args;
|
|
|
|
struct reiserfs_super_block *rs;
|
|
|
|
char *jdev_name;
|
|
|
|
struct reiserfs_sb_info *sbi;
|
|
|
|
int errval = -EINVAL;
|
2008-07-25 16:46:38 +08:00
|
|
|
char *qf_names[MAXQUOTAS] = {};
|
|
|
|
unsigned int qfmt = 0;
|
2005-07-13 11:21:28 +08:00
|
|
|
|
2008-02-08 20:21:47 +08:00
|
|
|
save_mount_options(s, data);
|
|
|
|
|
2006-12-07 12:39:01 +08:00
|
|
|
sbi = kzalloc(sizeof(struct reiserfs_sb_info), GFP_KERNEL);
|
2010-03-30 03:12:39 +08:00
|
|
|
if (!sbi)
|
|
|
|
return -ENOMEM;
|
2005-07-13 11:21:28 +08:00
|
|
|
s->s_fs_info = sbi;
|
|
|
|
/* Set default values for options: non-aggressive tails, RO on errors */
|
|
|
|
REISERFS_SB(s)->s_mount_opt |= (1 << REISERFS_SMALLTAIL);
|
|
|
|
REISERFS_SB(s)->s_mount_opt |= (1 << REISERFS_ERROR_RO);
|
2011-07-17 04:47:00 +08:00
|
|
|
REISERFS_SB(s)->s_mount_opt |= (1 << REISERFS_BARRIER_FLUSH);
|
2005-07-13 11:21:28 +08:00
|
|
|
/* no preallocation minimum, be smart in
|
|
|
|
reiserfs_file_write instead */
|
|
|
|
REISERFS_SB(s)->s_alloc_options.preallocmin = 0;
|
|
|
|
/* Preallocate by 16 blocks (17-1) at once */
|
|
|
|
REISERFS_SB(s)->s_alloc_options.preallocsize = 17;
|
|
|
|
/* setup default block allocator options */
|
|
|
|
reiserfs_init_alloc_options(s);
|
|
|
|
|
reiserfs: kill-the-BKL
This patch is an attempt to remove the Bkl based locking scheme from
reiserfs and is intended.
It is a bit inspired from an old attempt by Peter Zijlstra:
http://lkml.indiana.edu/hypermail/linux/kernel/0704.2/2174.html
The bkl is heavily used in this filesystem to prevent from
concurrent write accesses on the filesystem.
Reiserfs makes a deep use of the specific properties of the Bkl:
- It can be acqquired recursively by a same task
- It is released on the schedule() calls and reacquired when schedule() returns
The two properties above are a roadmap for the reiserfs write locking so it's
very hard to simply replace it with a common mutex.
- We need a recursive-able locking unless we want to restructure several blocks
of the code.
- We need to identify the sites where the bkl was implictly relaxed
(schedule, wait, sync, etc...) so that we can in turn release and
reacquire our new lock explicitly.
Such implicit releases of the lock are often required to let other
resources producer/consumer do their job or we can suffer unexpected
starvations or deadlocks.
So the new lock that replaces the bkl here is a per superblock mutex with a
specific property: it can be acquired recursively by a same task, like the
bkl.
For such purpose, we integrate a lock owner and a lock depth field on the
superblock information structure.
The first axis on this patch is to turn reiserfs_write_(un)lock() function
into a wrapper to manage this mutex. Also some explicit calls to
lock_kernel() have been converted to reiserfs_write_lock() helpers.
The second axis is to find the important blocking sites (schedule...(),
wait_on_buffer(), sync_dirty_buffer(), etc...) and then apply an explicit
release of the write lock on these locations before blocking. Then we can
safely wait for those who can give us resources or those who need some.
Typically this is a fight between the current writer, the reiserfs workqueue
(aka the async commiter) and the pdflush threads.
The third axis is a consequence of the second. The write lock is usually
on top of a lock dependency chain which can include the journal lock, the
flush lock or the commit lock. So it's dangerous to release and trying to
reacquire the write lock while we still hold other locks.
This is fine with the bkl:
T1 T2
lock_kernel()
mutex_lock(A)
unlock_kernel()
// do something
lock_kernel()
mutex_lock(A) -> already locked by T1
schedule() (and then unlock_kernel())
lock_kernel()
mutex_unlock(A)
....
This is not fine with a mutex:
T1 T2
mutex_lock(write)
mutex_lock(A)
mutex_unlock(write)
// do something
mutex_lock(write)
mutex_lock(A) -> already locked by T1
schedule()
mutex_lock(write) -> already locked by T2
deadlock
The solution in this patch is to provide a helper which releases the write
lock and sleep a bit if we can't lock a mutex that depend on it. It's another
simulation of the bkl behaviour.
The last axis is to locate the fs callbacks that are called with the bkl held,
according to Documentation/filesystem/Locking.
Those are:
- reiserfs_remount
- reiserfs_fill_super
- reiserfs_put_super
Reiserfs didn't need to explicitly lock because of the context of these callbacks.
But now we must take care of that with the new locking.
After this patch, reiserfs suffers from a slight performance regression (for now).
On UP, a high volume write with dd reports an average of 27 MB/s instead
of 30 MB/s without the patch applied.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Reviewed-by: Ingo Molnar <mingo@elte.hu>
Cc: Jeff Mahoney <jeffm@suse.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Bron Gondwana <brong@fastmail.fm>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
LKML-Reference: <1239070789-13354-1-git-send-email-fweisbec@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-04-07 10:19:49 +08:00
|
|
|
mutex_init(&REISERFS_SB(s)->lock);
|
|
|
|
REISERFS_SB(s)->lock_depth = -1;
|
|
|
|
|
2005-07-13 11:21:28 +08:00
|
|
|
jdev_name = NULL;
|
|
|
|
if (reiserfs_parse_options
|
|
|
|
(s, (char *)data, &(sbi->s_mount_opt), &blocks, &jdev_name,
|
2008-07-25 16:46:38 +08:00
|
|
|
&commit_max_age, qf_names, &qfmt) == 0) {
|
2012-01-11 07:11:07 +08:00
|
|
|
goto error_unlocked;
|
2005-07-13 11:21:28 +08:00
|
|
|
}
|
2011-12-22 03:17:10 +08:00
|
|
|
if (jdev_name && jdev_name[0]) {
|
|
|
|
REISERFS_SB(s)->s_jdev = kstrdup(jdev_name, GFP_KERNEL);
|
|
|
|
if (!REISERFS_SB(s)->s_jdev) {
|
|
|
|
SWARN(silent, s, "", "Cannot allocate memory for "
|
|
|
|
"journal device name");
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
}
|
2008-07-25 16:46:38 +08:00
|
|
|
#ifdef CONFIG_QUOTA
|
|
|
|
handle_quota_files(s, qf_names, &qfmt);
|
|
|
|
#endif
|
2005-07-13 11:21:28 +08:00
|
|
|
|
|
|
|
if (blocks) {
|
2009-03-31 02:02:21 +08:00
|
|
|
SWARN(silent, s, "jmacd-7", "resize option for remount only");
|
2012-01-11 07:11:07 +08:00
|
|
|
goto error_unlocked;
|
2005-07-13 11:21:28 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* try old format (undistributed bitmap, super block in 8-th 1k block of a device) */
|
|
|
|
if (!read_super_block(s, REISERFS_OLD_DISK_OFFSET_IN_BYTES))
|
|
|
|
old_format = 1;
|
|
|
|
/* try new format (64-th 1k block), which can contain reiserfs super block */
|
|
|
|
else if (read_super_block(s, REISERFS_DISK_OFFSET_IN_BYTES)) {
|
2009-03-31 02:02:21 +08:00
|
|
|
SWARN(silent, s, "sh-2021", "can not find reiserfs on %s",
|
2005-07-13 11:21:28 +08:00
|
|
|
reiserfs_bdevname(s));
|
2012-01-11 07:11:07 +08:00
|
|
|
goto error_unlocked;
|
2005-07-13 11:21:28 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
rs = SB_DISK_SUPER_BLOCK(s);
|
|
|
|
/* Let's do basic sanity check to verify that underlying device is not
|
|
|
|
smaller than the filesystem. If the check fails then abort and scream,
|
|
|
|
because bad stuff will happen otherwise. */
|
|
|
|
if (s->s_bdev && s->s_bdev->bd_inode
|
|
|
|
&& i_size_read(s->s_bdev->bd_inode) <
|
|
|
|
sb_block_count(rs) * sb_blocksize(rs)) {
|
2009-03-31 02:02:21 +08:00
|
|
|
SWARN(silent, s, "", "Filesystem cannot be "
|
|
|
|
"mounted because it is bigger than the device");
|
|
|
|
SWARN(silent, s, "", "You may need to run fsck "
|
|
|
|
"or increase size of your LVM partition");
|
|
|
|
SWARN(silent, s, "", "Or may be you forgot to "
|
|
|
|
"reboot after fdisk when it told you to");
|
2012-01-11 07:11:07 +08:00
|
|
|
goto error_unlocked;
|
2005-07-13 11:21:28 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
sbi->s_mount_state = SB_REISERFS_STATE(s);
|
|
|
|
sbi->s_mount_state = REISERFS_VALID_FS;
|
|
|
|
|
2006-10-01 14:28:43 +08:00
|
|
|
if ((errval = reiserfs_init_bitmap_cache(s))) {
|
2009-03-31 02:02:21 +08:00
|
|
|
SWARN(silent, s, "jmacd-8", "unable to read bitmap");
|
2012-01-11 07:11:07 +08:00
|
|
|
goto error_unlocked;
|
2005-07-13 11:21:28 +08:00
|
|
|
}
|
2012-01-11 07:11:07 +08:00
|
|
|
|
2006-11-03 14:07:20 +08:00
|
|
|
errval = -EINVAL;
|
2005-04-17 06:20:36 +08:00
|
|
|
#ifdef CONFIG_REISERFS_CHECK
|
2009-03-31 02:02:21 +08:00
|
|
|
SWARN(silent, s, "", "CONFIG_REISERFS_CHECK is set ON");
|
|
|
|
SWARN(silent, s, "", "- it is slow mode for debugging.");
|
2005-04-17 06:20:36 +08:00
|
|
|
#endif
|
|
|
|
|
2005-07-13 11:21:28 +08:00
|
|
|
/* make data=ordered the default */
|
|
|
|
if (!reiserfs_data_log(s) && !reiserfs_data_ordered(s) &&
|
|
|
|
!reiserfs_data_writeback(s)) {
|
|
|
|
REISERFS_SB(s)->s_mount_opt |= (1 << REISERFS_DATA_ORDERED);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (reiserfs_data_log(s)) {
|
|
|
|
reiserfs_info(s, "using journaled data mode\n");
|
|
|
|
} else if (reiserfs_data_ordered(s)) {
|
|
|
|
reiserfs_info(s, "using ordered data mode\n");
|
|
|
|
} else {
|
|
|
|
reiserfs_info(s, "using writeback data mode\n");
|
|
|
|
}
|
|
|
|
if (reiserfs_barrier_flush(s)) {
|
|
|
|
printk("reiserfs: using flush barriers\n");
|
|
|
|
}
|
2012-01-11 07:11:07 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* This path assumed to be called with the BKL in the old times.
|
|
|
|
* Now we have inherited the big reiserfs lock from it and many
|
|
|
|
* reiserfs helpers called in the mount path and elsewhere require
|
|
|
|
* this lock to be held even if it's not always necessary. Let's be
|
|
|
|
* conservative and hold it early. The window can be reduced after
|
|
|
|
* careful review of the code.
|
|
|
|
*/
|
|
|
|
reiserfs_write_lock(s);
|
|
|
|
|
2005-07-13 11:21:28 +08:00
|
|
|
// set_device_ro(s->s_dev, 1) ;
|
|
|
|
if (journal_init(s, jdev_name, old_format, commit_max_age)) {
|
2009-03-31 02:02:21 +08:00
|
|
|
SWARN(silent, s, "sh-2022",
|
|
|
|
"unable to initialize journal space");
|
2005-07-13 11:21:28 +08:00
|
|
|
goto error;
|
|
|
|
} else {
|
|
|
|
jinit_done = 1; /* once this is set, journal_release must be called
|
|
|
|
** if we error out of the mount
|
|
|
|
*/
|
|
|
|
}
|
|
|
|
if (reread_meta_blocks(s)) {
|
2009-03-31 02:02:21 +08:00
|
|
|
SWARN(silent, s, "jmacd-9",
|
|
|
|
"unable to reread meta blocks after journal init");
|
2005-07-13 11:21:28 +08:00
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (replay_only(s))
|
|
|
|
goto error;
|
|
|
|
|
|
|
|
if (bdev_read_only(s->s_bdev) && !(s->s_flags & MS_RDONLY)) {
|
2009-03-31 02:02:21 +08:00
|
|
|
SWARN(silent, s, "clm-7000",
|
|
|
|
"Detected readonly device, marking FS readonly");
|
2005-07-13 11:21:28 +08:00
|
|
|
s->s_flags |= MS_RDONLY;
|
|
|
|
}
|
|
|
|
args.objectid = REISERFS_ROOT_OBJECTID;
|
|
|
|
args.dirid = REISERFS_ROOT_PARENT_OBJECTID;
|
|
|
|
root_inode =
|
|
|
|
iget5_locked(s, REISERFS_ROOT_OBJECTID, reiserfs_find_actor,
|
|
|
|
reiserfs_init_locked_inode, (void *)(&args));
|
|
|
|
if (!root_inode) {
|
2009-03-31 02:02:21 +08:00
|
|
|
SWARN(silent, s, "jmacd-10", "get root inode failed");
|
2005-07-13 11:21:28 +08:00
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (root_inode->i_state & I_NEW) {
|
|
|
|
reiserfs_read_locked_inode(root_inode, &args);
|
|
|
|
unlock_new_inode(root_inode);
|
|
|
|
}
|
|
|
|
|
|
|
|
s->s_root = d_alloc_root(root_inode);
|
|
|
|
if (!s->s_root) {
|
|
|
|
iput(root_inode);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
// define and initialize hash function
|
|
|
|
sbi->s_hash_function = hash_function(s);
|
|
|
|
if (sbi->s_hash_function == NULL) {
|
|
|
|
dput(s->s_root);
|
|
|
|
s->s_root = NULL;
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (is_reiserfs_3_5(rs)
|
|
|
|
|| (is_reiserfs_jr(rs) && SB_VERSION(s) == REISERFS_VERSION_1))
|
|
|
|
set_bit(REISERFS_3_5, &(sbi->s_properties));
|
2006-10-01 14:28:40 +08:00
|
|
|
else if (old_format)
|
|
|
|
set_bit(REISERFS_OLD_FORMAT, &(sbi->s_properties));
|
2005-07-13 11:21:28 +08:00
|
|
|
else
|
|
|
|
set_bit(REISERFS_3_6, &(sbi->s_properties));
|
|
|
|
|
|
|
|
if (!(s->s_flags & MS_RDONLY)) {
|
|
|
|
|
|
|
|
errval = journal_begin(&th, s, 1);
|
|
|
|
if (errval) {
|
|
|
|
dput(s->s_root);
|
|
|
|
s->s_root = NULL;
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1);
|
|
|
|
|
|
|
|
set_sb_umount_state(rs, REISERFS_ERROR_FS);
|
|
|
|
set_sb_fs_state(rs, 0);
|
|
|
|
|
2007-10-19 14:39:27 +08:00
|
|
|
/* Clear out s_bmap_nr if it would wrap. We can handle this
|
|
|
|
* case, but older revisions can't. This will cause the
|
|
|
|
* file system to fail mount on those older implementations,
|
|
|
|
* avoiding corruption. -jeffm */
|
|
|
|
if (bmap_would_wrap(reiserfs_bmap_count(s)) &&
|
|
|
|
sb_bmap_nr(rs) != 0) {
|
2009-03-31 02:02:21 +08:00
|
|
|
reiserfs_warning(s, "super-2030", "This file system "
|
2007-10-19 14:39:27 +08:00
|
|
|
"claims to use %u bitmap blocks in "
|
|
|
|
"its super block, but requires %u. "
|
|
|
|
"Clearing to zero.", sb_bmap_nr(rs),
|
|
|
|
reiserfs_bmap_count(s));
|
|
|
|
|
|
|
|
set_sb_bmap_nr(rs, 0);
|
|
|
|
}
|
|
|
|
|
2005-07-13 11:21:28 +08:00
|
|
|
if (old_format_only(s)) {
|
|
|
|
/* filesystem of format 3.5 either with standard or non-standard
|
|
|
|
journal */
|
|
|
|
if (convert_reiserfs(s)) {
|
|
|
|
/* and -o conv is given */
|
|
|
|
if (!silent)
|
|
|
|
reiserfs_info(s,
|
|
|
|
"converting 3.5 filesystem to the 3.6 format");
|
|
|
|
|
|
|
|
if (is_reiserfs_3_5(rs))
|
|
|
|
/* put magic string of 3.6 format. 2.2 will not be able to
|
|
|
|
mount this filesystem anymore */
|
|
|
|
memcpy(rs->s_v1.s_magic,
|
|
|
|
reiserfs_3_6_magic_string,
|
|
|
|
sizeof
|
|
|
|
(reiserfs_3_6_magic_string));
|
|
|
|
|
|
|
|
set_sb_version(rs, REISERFS_VERSION_2);
|
|
|
|
reiserfs_convert_objectid_map_v1(s);
|
|
|
|
set_bit(REISERFS_3_6, &(sbi->s_properties));
|
|
|
|
clear_bit(REISERFS_3_5, &(sbi->s_properties));
|
|
|
|
} else if (!silent) {
|
|
|
|
reiserfs_info(s, "using 3.5.x disk format\n");
|
|
|
|
}
|
2009-03-31 02:02:16 +08:00
|
|
|
} else
|
|
|
|
set_sb_mnt_count(rs, sb_mnt_count(rs) + 1);
|
|
|
|
|
2005-07-13 11:21:28 +08:00
|
|
|
|
|
|
|
journal_mark_dirty(&th, s, SB_BUFFER_WITH_SB(s));
|
|
|
|
errval = journal_end(&th, s, 1);
|
|
|
|
if (errval) {
|
|
|
|
dput(s->s_root);
|
|
|
|
s->s_root = NULL;
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
2009-05-03 18:00:05 +08:00
|
|
|
if ((errval = reiserfs_lookup_privroot(s)) ||
|
|
|
|
(errval = reiserfs_xattr_init(s, s->s_flags))) {
|
2005-07-13 11:21:28 +08:00
|
|
|
dput(s->s_root);
|
|
|
|
s->s_root = NULL;
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* look for files which were to be removed in previous session */
|
|
|
|
finish_unfinished(s);
|
|
|
|
} else {
|
|
|
|
if (old_format_only(s) && !silent) {
|
|
|
|
reiserfs_info(s, "using 3.5.x disk format\n");
|
|
|
|
}
|
|
|
|
|
2009-05-03 18:00:05 +08:00
|
|
|
if ((errval = reiserfs_lookup_privroot(s)) ||
|
|
|
|
(errval = reiserfs_xattr_init(s, s->s_flags))) {
|
2005-07-13 11:21:28 +08:00
|
|
|
dput(s->s_root);
|
|
|
|
s->s_root = NULL;
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// mark hash in super block: it could be unset. overwrite should be ok
|
|
|
|
set_sb_hash_function_code(rs, function2code(sbi->s_hash_function));
|
|
|
|
|
|
|
|
handle_attrs(s);
|
|
|
|
|
|
|
|
reiserfs_proc_info_init(s);
|
|
|
|
|
|
|
|
init_waitqueue_head(&(sbi->s_wait));
|
|
|
|
spin_lock_init(&sbi->bitmap_lock);
|
|
|
|
|
reiserfs: kill-the-BKL
This patch is an attempt to remove the Bkl based locking scheme from
reiserfs and is intended.
It is a bit inspired from an old attempt by Peter Zijlstra:
http://lkml.indiana.edu/hypermail/linux/kernel/0704.2/2174.html
The bkl is heavily used in this filesystem to prevent from
concurrent write accesses on the filesystem.
Reiserfs makes a deep use of the specific properties of the Bkl:
- It can be acqquired recursively by a same task
- It is released on the schedule() calls and reacquired when schedule() returns
The two properties above are a roadmap for the reiserfs write locking so it's
very hard to simply replace it with a common mutex.
- We need a recursive-able locking unless we want to restructure several blocks
of the code.
- We need to identify the sites where the bkl was implictly relaxed
(schedule, wait, sync, etc...) so that we can in turn release and
reacquire our new lock explicitly.
Such implicit releases of the lock are often required to let other
resources producer/consumer do their job or we can suffer unexpected
starvations or deadlocks.
So the new lock that replaces the bkl here is a per superblock mutex with a
specific property: it can be acquired recursively by a same task, like the
bkl.
For such purpose, we integrate a lock owner and a lock depth field on the
superblock information structure.
The first axis on this patch is to turn reiserfs_write_(un)lock() function
into a wrapper to manage this mutex. Also some explicit calls to
lock_kernel() have been converted to reiserfs_write_lock() helpers.
The second axis is to find the important blocking sites (schedule...(),
wait_on_buffer(), sync_dirty_buffer(), etc...) and then apply an explicit
release of the write lock on these locations before blocking. Then we can
safely wait for those who can give us resources or those who need some.
Typically this is a fight between the current writer, the reiserfs workqueue
(aka the async commiter) and the pdflush threads.
The third axis is a consequence of the second. The write lock is usually
on top of a lock dependency chain which can include the journal lock, the
flush lock or the commit lock. So it's dangerous to release and trying to
reacquire the write lock while we still hold other locks.
This is fine with the bkl:
T1 T2
lock_kernel()
mutex_lock(A)
unlock_kernel()
// do something
lock_kernel()
mutex_lock(A) -> already locked by T1
schedule() (and then unlock_kernel())
lock_kernel()
mutex_unlock(A)
....
This is not fine with a mutex:
T1 T2
mutex_lock(write)
mutex_lock(A)
mutex_unlock(write)
// do something
mutex_lock(write)
mutex_lock(A) -> already locked by T1
schedule()
mutex_lock(write) -> already locked by T2
deadlock
The solution in this patch is to provide a helper which releases the write
lock and sleep a bit if we can't lock a mutex that depend on it. It's another
simulation of the bkl behaviour.
The last axis is to locate the fs callbacks that are called with the bkl held,
according to Documentation/filesystem/Locking.
Those are:
- reiserfs_remount
- reiserfs_fill_super
- reiserfs_put_super
Reiserfs didn't need to explicitly lock because of the context of these callbacks.
But now we must take care of that with the new locking.
After this patch, reiserfs suffers from a slight performance regression (for now).
On UP, a high volume write with dd reports an average of 27 MB/s instead
of 30 MB/s without the patch applied.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Reviewed-by: Ingo Molnar <mingo@elte.hu>
Cc: Jeff Mahoney <jeffm@suse.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Bron Gondwana <brong@fastmail.fm>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
LKML-Reference: <1239070789-13354-1-git-send-email-fweisbec@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-04-07 10:19:49 +08:00
|
|
|
reiserfs_write_unlock(s);
|
|
|
|
|
2005-07-13 11:21:28 +08:00
|
|
|
return (0);
|
|
|
|
|
2008-07-25 16:46:38 +08:00
|
|
|
error:
|
2012-01-11 07:11:07 +08:00
|
|
|
reiserfs_write_unlock(s);
|
|
|
|
|
|
|
|
error_unlocked:
|
|
|
|
/* kill the commit thread, free journal ram */
|
|
|
|
if (jinit_done) {
|
|
|
|
reiserfs_write_lock(s);
|
2005-07-13 11:21:28 +08:00
|
|
|
journal_release_error(NULL, s);
|
2012-01-11 07:11:07 +08:00
|
|
|
reiserfs_write_unlock(s);
|
2005-07-13 11:21:28 +08:00
|
|
|
}
|
2006-10-01 14:28:44 +08:00
|
|
|
|
|
|
|
reiserfs_free_bitmap_cache(s);
|
2005-07-13 11:21:28 +08:00
|
|
|
if (SB_BUFFER_WITH_SB(s))
|
|
|
|
brelse(SB_BUFFER_WITH_SB(s));
|
2005-04-17 06:20:36 +08:00
|
|
|
#ifdef CONFIG_QUOTA
|
2006-10-01 14:28:44 +08:00
|
|
|
{
|
|
|
|
int j;
|
2008-07-25 16:46:38 +08:00
|
|
|
for (j = 0; j < MAXQUOTAS; j++)
|
|
|
|
kfree(qf_names[j]);
|
2005-07-13 11:21:28 +08:00
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
#endif
|
2005-10-31 07:00:16 +08:00
|
|
|
kfree(sbi);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2005-07-13 11:21:28 +08:00
|
|
|
s->s_fs_info = NULL;
|
|
|
|
return errval;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2006-06-23 17:02:58 +08:00
|
|
|
static int reiserfs_statfs(struct dentry *dentry, struct kstatfs *buf)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2006-06-23 17:02:58 +08:00
|
|
|
struct reiserfs_super_block *rs = SB_DISK_SUPER_BLOCK(dentry->d_sb);
|
2005-07-13 11:21:28 +08:00
|
|
|
|
|
|
|
buf->f_namelen = (REISERFS_MAX_NAME(s->s_blocksize));
|
|
|
|
buf->f_bfree = sb_free_blocks(rs);
|
|
|
|
buf->f_bavail = buf->f_bfree;
|
|
|
|
buf->f_blocks = sb_block_count(rs) - sb_bmap_nr(rs) - 1;
|
2006-06-23 17:02:58 +08:00
|
|
|
buf->f_bsize = dentry->d_sb->s_blocksize;
|
2005-07-13 11:21:28 +08:00
|
|
|
/* changed to accommodate gcc folks. */
|
|
|
|
buf->f_type = REISERFS_SUPER_MAGIC;
|
2009-04-03 07:59:41 +08:00
|
|
|
buf->f_fsid.val[0] = (u32)crc32_le(0, rs->s_uuid, sizeof(rs->s_uuid)/2);
|
|
|
|
buf->f_fsid.val[1] = (u32)crc32_le(0, rs->s_uuid + sizeof(rs->s_uuid)/2,
|
|
|
|
sizeof(rs->s_uuid)/2);
|
|
|
|
|
2005-07-13 11:21:28 +08:00
|
|
|
return 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_QUOTA
|
|
|
|
static int reiserfs_write_dquot(struct dquot *dquot)
|
|
|
|
{
|
2005-07-13 11:21:28 +08:00
|
|
|
struct reiserfs_transaction_handle th;
|
|
|
|
int ret, err;
|
|
|
|
|
|
|
|
reiserfs_write_lock(dquot->dq_sb);
|
|
|
|
ret =
|
|
|
|
journal_begin(&th, dquot->dq_sb,
|
|
|
|
REISERFS_QUOTA_TRANS_BLOCKS(dquot->dq_sb));
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
ret = dquot_commit(dquot);
|
|
|
|
err =
|
|
|
|
journal_end(&th, dquot->dq_sb,
|
|
|
|
REISERFS_QUOTA_TRANS_BLOCKS(dquot->dq_sb));
|
|
|
|
if (!ret && err)
|
|
|
|
ret = err;
|
|
|
|
out:
|
|
|
|
reiserfs_write_unlock(dquot->dq_sb);
|
|
|
|
return ret;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int reiserfs_acquire_dquot(struct dquot *dquot)
|
|
|
|
{
|
2005-07-13 11:21:28 +08:00
|
|
|
struct reiserfs_transaction_handle th;
|
|
|
|
int ret, err;
|
|
|
|
|
|
|
|
reiserfs_write_lock(dquot->dq_sb);
|
|
|
|
ret =
|
|
|
|
journal_begin(&th, dquot->dq_sb,
|
|
|
|
REISERFS_QUOTA_INIT_BLOCKS(dquot->dq_sb));
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
ret = dquot_acquire(dquot);
|
|
|
|
err =
|
|
|
|
journal_end(&th, dquot->dq_sb,
|
|
|
|
REISERFS_QUOTA_INIT_BLOCKS(dquot->dq_sb));
|
|
|
|
if (!ret && err)
|
|
|
|
ret = err;
|
|
|
|
out:
|
|
|
|
reiserfs_write_unlock(dquot->dq_sb);
|
|
|
|
return ret;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int reiserfs_release_dquot(struct dquot *dquot)
|
|
|
|
{
|
2005-07-13 11:21:28 +08:00
|
|
|
struct reiserfs_transaction_handle th;
|
|
|
|
int ret, err;
|
|
|
|
|
|
|
|
reiserfs_write_lock(dquot->dq_sb);
|
|
|
|
ret =
|
|
|
|
journal_begin(&th, dquot->dq_sb,
|
|
|
|
REISERFS_QUOTA_DEL_BLOCKS(dquot->dq_sb));
|
2007-09-12 06:23:29 +08:00
|
|
|
if (ret) {
|
|
|
|
/* Release dquot anyway to avoid endless cycle in dqput() */
|
|
|
|
dquot_release(dquot);
|
2005-07-13 11:21:28 +08:00
|
|
|
goto out;
|
2007-09-12 06:23:29 +08:00
|
|
|
}
|
2005-07-13 11:21:28 +08:00
|
|
|
ret = dquot_release(dquot);
|
|
|
|
err =
|
|
|
|
journal_end(&th, dquot->dq_sb,
|
|
|
|
REISERFS_QUOTA_DEL_BLOCKS(dquot->dq_sb));
|
|
|
|
if (!ret && err)
|
|
|
|
ret = err;
|
|
|
|
out:
|
|
|
|
reiserfs_write_unlock(dquot->dq_sb);
|
|
|
|
return ret;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int reiserfs_mark_dquot_dirty(struct dquot *dquot)
|
|
|
|
{
|
2008-07-25 16:46:37 +08:00
|
|
|
/* Are we journaling quotas? */
|
2005-07-13 11:21:28 +08:00
|
|
|
if (REISERFS_SB(dquot->dq_sb)->s_qf_names[USRQUOTA] ||
|
|
|
|
REISERFS_SB(dquot->dq_sb)->s_qf_names[GRPQUOTA]) {
|
|
|
|
dquot_mark_dquot_dirty(dquot);
|
|
|
|
return reiserfs_write_dquot(dquot);
|
|
|
|
} else
|
|
|
|
return dquot_mark_dquot_dirty(dquot);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int reiserfs_write_info(struct super_block *sb, int type)
|
|
|
|
{
|
2005-07-13 11:21:28 +08:00
|
|
|
struct reiserfs_transaction_handle th;
|
|
|
|
int ret, err;
|
|
|
|
|
|
|
|
/* Data block + inode block */
|
|
|
|
reiserfs_write_lock(sb);
|
|
|
|
ret = journal_begin(&th, sb, 2);
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
ret = dquot_commit_info(sb, type);
|
|
|
|
err = journal_end(&th, sb, 2);
|
|
|
|
if (!ret && err)
|
|
|
|
ret = err;
|
|
|
|
out:
|
|
|
|
reiserfs_write_unlock(sb);
|
|
|
|
return ret;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2005-06-23 15:09:16 +08:00
|
|
|
* Turn on quotas during mount time - we need to find the quota file and such...
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
|
|
|
static int reiserfs_quota_on_mount(struct super_block *sb, int type)
|
|
|
|
{
|
2010-05-19 19:16:45 +08:00
|
|
|
return dquot_quota_on_mount(sb, REISERFS_SB(sb)->s_qf_names[type],
|
|
|
|
REISERFS_SB(sb)->s_jquota_fmt, type);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Standard function to be called on quota_on
|
|
|
|
*/
|
2005-07-13 11:21:28 +08:00
|
|
|
static int reiserfs_quota_on(struct super_block *sb, int type, int format_id,
|
2010-09-15 23:38:58 +08:00
|
|
|
struct path *path)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2005-07-13 11:21:28 +08:00
|
|
|
int err;
|
2008-04-28 17:16:23 +08:00
|
|
|
struct inode *inode;
|
2008-07-25 16:46:36 +08:00
|
|
|
struct reiserfs_transaction_handle th;
|
2011-12-22 03:17:10 +08:00
|
|
|
int opt = type == USRQUOTA ? REISERFS_USRQUOTA : REISERFS_GRPQUOTA;
|
2005-07-13 11:21:28 +08:00
|
|
|
|
2011-12-22 03:17:10 +08:00
|
|
|
if (!(REISERFS_SB(sb)->s_mount_opt & (1 << opt)))
|
2005-07-13 11:21:28 +08:00
|
|
|
return -EINVAL;
|
2010-05-19 19:16:43 +08:00
|
|
|
|
2005-07-13 11:21:28 +08:00
|
|
|
/* Quotafile not on the same filesystem? */
|
2011-12-08 07:16:57 +08:00
|
|
|
if (path->dentry->d_sb != sb) {
|
2008-08-01 16:29:18 +08:00
|
|
|
err = -EXDEV;
|
|
|
|
goto out;
|
2005-07-13 11:21:28 +08:00
|
|
|
}
|
2010-09-15 23:38:58 +08:00
|
|
|
inode = path->dentry->d_inode;
|
2005-07-13 11:21:28 +08:00
|
|
|
/* We must not pack tails for quota files on reiserfs for quota IO to work */
|
2008-04-28 17:16:23 +08:00
|
|
|
if (!(REISERFS_I(inode)->i_flags & i_nopack_mask)) {
|
|
|
|
err = reiserfs_unpack(inode, NULL);
|
|
|
|
if (err) {
|
2009-03-31 02:02:21 +08:00
|
|
|
reiserfs_warning(sb, "super-6520",
|
|
|
|
"Unpacking tail of quota file failed"
|
2008-04-28 17:16:23 +08:00
|
|
|
" (%d). Cannot turn on quotas.", err);
|
2008-08-01 16:29:18 +08:00
|
|
|
err = -EINVAL;
|
|
|
|
goto out;
|
2008-04-28 17:16:23 +08:00
|
|
|
}
|
|
|
|
mark_inode_dirty(inode);
|
2005-07-13 11:21:28 +08:00
|
|
|
}
|
2008-07-25 16:46:36 +08:00
|
|
|
/* Journaling quota? */
|
|
|
|
if (REISERFS_SB(sb)->s_qf_names[type]) {
|
|
|
|
/* Quotafile not of fs root? */
|
2010-09-15 23:38:58 +08:00
|
|
|
if (path->dentry->d_parent != sb->s_root)
|
2009-03-31 02:02:21 +08:00
|
|
|
reiserfs_warning(sb, "super-6521",
|
|
|
|
"Quota file not on filesystem root. "
|
2005-07-13 11:21:28 +08:00
|
|
|
"Journalled quota will not work.");
|
2008-07-25 16:46:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* When we journal data on quota file, we have to flush journal to see
|
|
|
|
* all updates to the file when we bypass pagecache...
|
|
|
|
*/
|
|
|
|
if (reiserfs_file_data_log(inode)) {
|
|
|
|
/* Just start temporary transaction and finish it */
|
|
|
|
err = journal_begin(&th, sb, 1);
|
|
|
|
if (err)
|
2008-08-01 16:29:18 +08:00
|
|
|
goto out;
|
2008-07-25 16:46:36 +08:00
|
|
|
err = journal_end_sync(&th, sb, 1);
|
|
|
|
if (err)
|
2008-08-01 16:29:18 +08:00
|
|
|
goto out;
|
2008-07-25 16:46:36 +08:00
|
|
|
}
|
2010-09-15 23:38:58 +08:00
|
|
|
err = dquot_quota_on(sb, type, format_id, path);
|
2008-08-01 16:29:18 +08:00
|
|
|
out:
|
|
|
|
return err;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Read data from quotafile - avoid pagecache and such because we cannot afford
|
|
|
|
* acquiring the locks... As quota files are never truncated and quota code
|
2011-03-31 09:57:33 +08:00
|
|
|
* itself serializes the operations (and no one else should touch the files)
|
2005-04-17 06:20:36 +08:00
|
|
|
* we don't have to be afraid of races */
|
|
|
|
static ssize_t reiserfs_quota_read(struct super_block *sb, int type, char *data,
|
|
|
|
size_t len, loff_t off)
|
|
|
|
{
|
2005-07-13 11:21:28 +08:00
|
|
|
struct inode *inode = sb_dqopt(sb)->files[type];
|
|
|
|
unsigned long blk = off >> sb->s_blocksize_bits;
|
|
|
|
int err = 0, offset = off & (sb->s_blocksize - 1), tocopy;
|
|
|
|
size_t toread;
|
|
|
|
struct buffer_head tmp_bh, *bh;
|
|
|
|
loff_t i_size = i_size_read(inode);
|
|
|
|
|
|
|
|
if (off > i_size)
|
|
|
|
return 0;
|
|
|
|
if (off + len > i_size)
|
|
|
|
len = i_size - off;
|
|
|
|
toread = len;
|
|
|
|
while (toread > 0) {
|
|
|
|
tocopy =
|
|
|
|
sb->s_blocksize - offset <
|
|
|
|
toread ? sb->s_blocksize - offset : toread;
|
|
|
|
tmp_bh.b_state = 0;
|
|
|
|
/* Quota files are without tails so we can safely use this function */
|
|
|
|
reiserfs_write_lock(sb);
|
|
|
|
err = reiserfs_get_block(inode, blk, &tmp_bh, 0);
|
|
|
|
reiserfs_write_unlock(sb);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
if (!buffer_mapped(&tmp_bh)) /* A hole? */
|
|
|
|
memset(data, 0, tocopy);
|
|
|
|
else {
|
|
|
|
bh = sb_bread(sb, tmp_bh.b_blocknr);
|
|
|
|
if (!bh)
|
|
|
|
return -EIO;
|
|
|
|
memcpy(data, bh->b_data + offset, tocopy);
|
|
|
|
brelse(bh);
|
|
|
|
}
|
|
|
|
offset = 0;
|
|
|
|
toread -= tocopy;
|
|
|
|
data += tocopy;
|
|
|
|
blk++;
|
|
|
|
}
|
|
|
|
return len;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Write to quotafile (we know the transaction is already started and has
|
|
|
|
* enough credits) */
|
|
|
|
static ssize_t reiserfs_quota_write(struct super_block *sb, int type,
|
|
|
|
const char *data, size_t len, loff_t off)
|
|
|
|
{
|
2005-07-13 11:21:28 +08:00
|
|
|
struct inode *inode = sb_dqopt(sb)->files[type];
|
|
|
|
unsigned long blk = off >> sb->s_blocksize_bits;
|
|
|
|
int err = 0, offset = off & (sb->s_blocksize - 1), tocopy;
|
|
|
|
int journal_quota = REISERFS_SB(sb)->s_qf_names[type] != NULL;
|
|
|
|
size_t towrite = len;
|
|
|
|
struct buffer_head tmp_bh, *bh;
|
|
|
|
|
2007-09-12 06:23:29 +08:00
|
|
|
if (!current->journal_info) {
|
|
|
|
printk(KERN_WARNING "reiserfs: Quota write (off=%Lu, len=%Lu)"
|
|
|
|
" cancelled because transaction is not started.\n",
|
|
|
|
(unsigned long long)off, (unsigned long long)len);
|
|
|
|
return -EIO;
|
|
|
|
}
|
2006-07-03 15:25:20 +08:00
|
|
|
mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA);
|
2005-07-13 11:21:28 +08:00
|
|
|
while (towrite > 0) {
|
|
|
|
tocopy = sb->s_blocksize - offset < towrite ?
|
|
|
|
sb->s_blocksize - offset : towrite;
|
|
|
|
tmp_bh.b_state = 0;
|
|
|
|
err = reiserfs_get_block(inode, blk, &tmp_bh, GET_BLOCK_CREATE);
|
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
if (offset || tocopy != sb->s_blocksize)
|
|
|
|
bh = sb_bread(sb, tmp_bh.b_blocknr);
|
|
|
|
else
|
|
|
|
bh = sb_getblk(sb, tmp_bh.b_blocknr);
|
|
|
|
if (!bh) {
|
|
|
|
err = -EIO;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
lock_buffer(bh);
|
|
|
|
memcpy(bh->b_data + offset, data, tocopy);
|
|
|
|
flush_dcache_page(bh->b_page);
|
|
|
|
set_buffer_uptodate(bh);
|
|
|
|
unlock_buffer(bh);
|
|
|
|
reiserfs_prepare_for_journal(sb, bh, 1);
|
|
|
|
journal_mark_dirty(current->journal_info, sb, bh);
|
|
|
|
if (!journal_quota)
|
|
|
|
reiserfs_add_ordered_list(inode, bh);
|
|
|
|
brelse(bh);
|
|
|
|
offset = 0;
|
|
|
|
towrite -= tocopy;
|
|
|
|
data += tocopy;
|
|
|
|
blk++;
|
|
|
|
}
|
2007-09-12 06:23:29 +08:00
|
|
|
out:
|
2008-07-05 00:59:34 +08:00
|
|
|
if (len == towrite) {
|
|
|
|
mutex_unlock(&inode->i_mutex);
|
2005-07-13 11:21:28 +08:00
|
|
|
return err;
|
2008-07-05 00:59:34 +08:00
|
|
|
}
|
2005-07-13 11:21:28 +08:00
|
|
|
if (inode->i_size < off + len - towrite)
|
|
|
|
i_size_write(inode, off + len - towrite);
|
|
|
|
inode->i_version++;
|
|
|
|
inode->i_mtime = inode->i_ctime = CURRENT_TIME;
|
|
|
|
mark_inode_dirty(inode);
|
2006-01-10 07:59:24 +08:00
|
|
|
mutex_unlock(&inode->i_mutex);
|
2005-07-13 11:21:28 +08:00
|
|
|
return len - towrite;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
2010-07-25 04:46:55 +08:00
|
|
|
static struct dentry *get_super_block(struct file_system_type *fs_type,
|
[PATCH] VFS: Permit filesystem to override root dentry on mount
Extend the get_sb() filesystem operation to take an extra argument that
permits the VFS to pass in the target vfsmount that defines the mountpoint.
The filesystem is then required to manually set the superblock and root dentry
pointers. For most filesystems, this should be done with simple_set_mnt()
which will set the superblock pointer and then set the root dentry to the
superblock's s_root (as per the old default behaviour).
The get_sb() op now returns an integer as there's now no need to return the
superblock pointer.
This patch permits a superblock to be implicitly shared amongst several mount
points, such as can be done with NFS to avoid potential inode aliasing. In
such a case, simple_set_mnt() would not be called, and instead the mnt_root
and mnt_sb would be set directly.
The patch also makes the following changes:
(*) the get_sb_*() convenience functions in the core kernel now take a vfsmount
pointer argument and return an integer, so most filesystems have to change
very little.
(*) If one of the convenience function is not used, then get_sb() should
normally call simple_set_mnt() to instantiate the vfsmount. This will
always return 0, and so can be tail-called from get_sb().
(*) generic_shutdown_super() now calls shrink_dcache_sb() to clean up the
dcache upon superblock destruction rather than shrink_dcache_anon().
This is required because the superblock may now have multiple trees that
aren't actually bound to s_root, but that still need to be cleaned up. The
currently called functions assume that the whole tree is rooted at s_root,
and that anonymous dentries are not the roots of trees which results in
dentries being left unculled.
However, with the way NFS superblock sharing are currently set to be
implemented, these assumptions are violated: the root of the filesystem is
simply a dummy dentry and inode (the real inode for '/' may well be
inaccessible), and all the vfsmounts are rooted on anonymous[*] dentries
with child trees.
[*] Anonymous until discovered from another tree.
(*) The documentation has been adjusted, including the additional bit of
changing ext2_* into foo_* in the documentation.
[akpm@osdl.org: convert ipath_fs, do other stuff]
Signed-off-by: David Howells <dhowells@redhat.com>
Acked-by: Al Viro <viro@zeniv.linux.org.uk>
Cc: Nathan Scott <nathans@sgi.com>
Cc: Roland Dreier <rolandd@cisco.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-06-23 17:02:57 +08:00
|
|
|
int flags, const char *dev_name,
|
2010-07-25 04:46:55 +08:00
|
|
|
void *data)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2010-07-25 04:46:55 +08:00
|
|
|
return mount_bdev(fs_type, flags, dev_name, data, reiserfs_fill_super);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2005-07-13 11:21:28 +08:00
|
|
|
static int __init init_reiserfs_fs(void)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
2005-07-13 11:21:28 +08:00
|
|
|
if ((ret = init_inodecache())) {
|
2005-04-17 06:20:36 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2005-07-13 11:21:28 +08:00
|
|
|
reiserfs_proc_info_global_init();
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2005-07-13 11:21:28 +08:00
|
|
|
ret = register_filesystem(&reiserfs_fs_type);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
if (ret == 0) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2005-07-13 11:21:28 +08:00
|
|
|
reiserfs_proc_info_global_done();
|
|
|
|
destroy_inodecache();
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2005-07-13 11:21:28 +08:00
|
|
|
static void __exit exit_reiserfs_fs(void)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2005-07-13 11:21:28 +08:00
|
|
|
reiserfs_proc_info_global_done();
|
|
|
|
unregister_filesystem(&reiserfs_fs_type);
|
|
|
|
destroy_inodecache();
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
struct file_system_type reiserfs_fs_type = {
|
2005-07-13 11:21:28 +08:00
|
|
|
.owner = THIS_MODULE,
|
|
|
|
.name = "reiserfs",
|
2010-07-25 04:46:55 +08:00
|
|
|
.mount = get_super_block,
|
2006-10-11 16:22:14 +08:00
|
|
|
.kill_sb = reiserfs_kill_sb,
|
2005-07-13 11:21:28 +08:00
|
|
|
.fs_flags = FS_REQUIRES_DEV,
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
|
|
|
|
2005-07-13 11:21:28 +08:00
|
|
|
MODULE_DESCRIPTION("ReiserFS journaled filesystem");
|
|
|
|
MODULE_AUTHOR("Hans Reiser <reiser@namesys.com>");
|
|
|
|
MODULE_LICENSE("GPL");
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2005-07-13 11:21:28 +08:00
|
|
|
module_init(init_reiserfs_fs);
|
|
|
|
module_exit(exit_reiserfs_fs);
|