Merge branch 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs-2.6
* 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs-2.6: (31 commits) dquot: Detect partial write error to quota file in write_blk() and add printk_ratelimit for quota error messages ocfs2: Fix lock inversion in quotas during umount ocfs2: Use __dquot_transfer to avoid lock inversion ocfs2: Fix NULL pointer deref when writing local dquot ocfs2: Fix estimate of credits needed for quota allocation ocfs2: Fix quota locking ocfs2: Avoid unnecessary block mapping when refreshing quota info ocfs2: Do not map blocks from local quota file on each write quota: Refactor dquot_transfer code so that OCFS2 can pass in its references quota: unify quota init condition in setattr quota: remove sb_has_quota_active in get/set_info quota: unify ->set_dqblk quota: unify ->get_dqblk ext3: make barrier options consistent with ext4 quota: Make quota stat accounting lockless. suppress warning: "quotatypes" defined but not used ext3: Fix waiting on transaction during fsync jbd: Provide function to check whether transaction will issue data barrier ufs: add ufs speciffic ->setattr call BKL: Remove BKL from ext2 filesystem ...
This commit is contained in:
commit
7ce1418f95
|
@ -59,8 +59,19 @@ commit=nrsec (*) Ext3 can be told to sync all its data and metadata
|
|||
Setting it to very large values will improve
|
||||
performance.
|
||||
|
||||
barrier=1 This enables/disables barriers. barrier=0 disables
|
||||
it, barrier=1 enables it.
|
||||
barrier=<0(*)|1> This enables/disables the use of write barriers in
|
||||
barrier the jbd code. barrier=0 disables, barrier=1 enables.
|
||||
nobarrier (*) This also requires an IO stack which can support
|
||||
barriers, and if jbd gets an error on a barrier
|
||||
write, it will disable again with a warning.
|
||||
Write barriers enforce proper on-disk ordering
|
||||
of journal commits, making volatile disk write caches
|
||||
safe to use, at some performance penalty. If
|
||||
your disks are battery-backed in one way or another,
|
||||
disabling barriers may safely improve performance.
|
||||
The mount options "barrier" and "nobarrier" can
|
||||
also be used to enable or disable barriers, for
|
||||
consistency with other ext3 mount options.
|
||||
|
||||
orlov (*) This enables the new Orlov block allocator. It is
|
||||
enabled by default.
|
||||
|
|
|
@ -1331,6 +1331,12 @@ retry_alloc:
|
|||
goto io_error;
|
||||
|
||||
free_blocks = le16_to_cpu(gdp->bg_free_blocks_count);
|
||||
/*
|
||||
* skip this group (and avoid loading bitmap) if there
|
||||
* are no free blocks
|
||||
*/
|
||||
if (!free_blocks)
|
||||
continue;
|
||||
/*
|
||||
* skip this group if the number of
|
||||
* free blocks is less than half of the reservation
|
||||
|
|
|
@ -106,7 +106,7 @@ void ext2_free_inode (struct inode * inode)
|
|||
struct super_block * sb = inode->i_sb;
|
||||
int is_directory;
|
||||
unsigned long ino;
|
||||
struct buffer_head *bitmap_bh = NULL;
|
||||
struct buffer_head *bitmap_bh;
|
||||
unsigned long block_group;
|
||||
unsigned long bit;
|
||||
struct ext2_super_block * es;
|
||||
|
@ -135,14 +135,13 @@ void ext2_free_inode (struct inode * inode)
|
|||
ino > le32_to_cpu(es->s_inodes_count)) {
|
||||
ext2_error (sb, "ext2_free_inode",
|
||||
"reserved or nonexistent inode %lu", ino);
|
||||
goto error_return;
|
||||
return;
|
||||
}
|
||||
block_group = (ino - 1) / EXT2_INODES_PER_GROUP(sb);
|
||||
bit = (ino - 1) % EXT2_INODES_PER_GROUP(sb);
|
||||
brelse(bitmap_bh);
|
||||
bitmap_bh = read_inode_bitmap(sb, block_group);
|
||||
if (!bitmap_bh)
|
||||
goto error_return;
|
||||
return;
|
||||
|
||||
/* Ok, now we can actually update the inode bitmaps.. */
|
||||
if (!ext2_clear_bit_atomic(sb_bgl_lock(EXT2_SB(sb), block_group),
|
||||
|
@ -154,7 +153,7 @@ void ext2_free_inode (struct inode * inode)
|
|||
mark_buffer_dirty(bitmap_bh);
|
||||
if (sb->s_flags & MS_SYNCHRONOUS)
|
||||
sync_dirty_buffer(bitmap_bh);
|
||||
error_return:
|
||||
|
||||
brelse(bitmap_bh);
|
||||
}
|
||||
|
||||
|
|
|
@ -22,7 +22,6 @@
|
|||
* Assorted race fixes, rewrite of ext2_get_block() by Al Viro, 2000
|
||||
*/
|
||||
|
||||
#include <linux/smp_lock.h>
|
||||
#include <linux/time.h>
|
||||
#include <linux/highuid.h>
|
||||
#include <linux/pagemap.h>
|
||||
|
@ -1406,11 +1405,11 @@ static int __ext2_write_inode(struct inode *inode, int do_sync)
|
|||
/* If this is the first large file
|
||||
* created, add a flag to the superblock.
|
||||
*/
|
||||
lock_kernel();
|
||||
spin_lock(&EXT2_SB(sb)->s_lock);
|
||||
ext2_update_dynamic_rev(sb);
|
||||
EXT2_SET_RO_COMPAT_FEATURE(sb,
|
||||
EXT2_FEATURE_RO_COMPAT_LARGE_FILE);
|
||||
unlock_kernel();
|
||||
spin_unlock(&EXT2_SB(sb)->s_lock);
|
||||
ext2_write_super(sb);
|
||||
}
|
||||
}
|
||||
|
@ -1467,7 +1466,7 @@ int ext2_setattr(struct dentry *dentry, struct iattr *iattr)
|
|||
if (error)
|
||||
return error;
|
||||
|
||||
if (iattr->ia_valid & ATTR_SIZE)
|
||||
if (is_quota_modification(inode, iattr))
|
||||
dquot_initialize(inode);
|
||||
if ((iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid) ||
|
||||
(iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid)) {
|
||||
|
|
|
@ -26,7 +26,6 @@
|
|||
#include <linux/random.h>
|
||||
#include <linux/buffer_head.h>
|
||||
#include <linux/exportfs.h>
|
||||
#include <linux/smp_lock.h>
|
||||
#include <linux/vfs.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/mount.h>
|
||||
|
@ -39,7 +38,7 @@
|
|||
#include "xip.h"
|
||||
|
||||
static void ext2_sync_super(struct super_block *sb,
|
||||
struct ext2_super_block *es);
|
||||
struct ext2_super_block *es, int wait);
|
||||
static int ext2_remount (struct super_block * sb, int * flags, char * data);
|
||||
static int ext2_statfs (struct dentry * dentry, struct kstatfs * buf);
|
||||
static int ext2_sync_fs(struct super_block *sb, int wait);
|
||||
|
@ -52,9 +51,11 @@ void ext2_error (struct super_block * sb, const char * function,
|
|||
struct ext2_super_block *es = sbi->s_es;
|
||||
|
||||
if (!(sb->s_flags & MS_RDONLY)) {
|
||||
spin_lock(&sbi->s_lock);
|
||||
sbi->s_mount_state |= EXT2_ERROR_FS;
|
||||
es->s_state |= cpu_to_le16(EXT2_ERROR_FS);
|
||||
ext2_sync_super(sb, es);
|
||||
spin_unlock(&sbi->s_lock);
|
||||
ext2_sync_super(sb, es, 1);
|
||||
}
|
||||
|
||||
va_start(args, fmt);
|
||||
|
@ -84,6 +85,9 @@ void ext2_msg(struct super_block *sb, const char *prefix,
|
|||
va_end(args);
|
||||
}
|
||||
|
||||
/*
|
||||
* This must be called with sbi->s_lock held.
|
||||
*/
|
||||
void ext2_update_dynamic_rev(struct super_block *sb)
|
||||
{
|
||||
struct ext2_super_block *es = EXT2_SB(sb)->s_es;
|
||||
|
@ -115,8 +119,6 @@ static void ext2_put_super (struct super_block * sb)
|
|||
int i;
|
||||
struct ext2_sb_info *sbi = EXT2_SB(sb);
|
||||
|
||||
lock_kernel();
|
||||
|
||||
if (sb->s_dirt)
|
||||
ext2_write_super(sb);
|
||||
|
||||
|
@ -124,8 +126,10 @@ static void ext2_put_super (struct super_block * sb)
|
|||
if (!(sb->s_flags & MS_RDONLY)) {
|
||||
struct ext2_super_block *es = sbi->s_es;
|
||||
|
||||
spin_lock(&sbi->s_lock);
|
||||
es->s_state = cpu_to_le16(sbi->s_mount_state);
|
||||
ext2_sync_super(sb, es);
|
||||
spin_unlock(&sbi->s_lock);
|
||||
ext2_sync_super(sb, es, 1);
|
||||
}
|
||||
db_count = sbi->s_gdb_count;
|
||||
for (i = 0; i < db_count; i++)
|
||||
|
@ -140,8 +144,6 @@ static void ext2_put_super (struct super_block * sb)
|
|||
sb->s_fs_info = NULL;
|
||||
kfree(sbi->s_blockgroup_lock);
|
||||
kfree(sbi);
|
||||
|
||||
unlock_kernel();
|
||||
}
|
||||
|
||||
static struct kmem_cache * ext2_inode_cachep;
|
||||
|
@ -209,6 +211,7 @@ static int ext2_show_options(struct seq_file *seq, struct vfsmount *vfs)
|
|||
struct ext2_super_block *es = sbi->s_es;
|
||||
unsigned long def_mount_opts;
|
||||
|
||||
spin_lock(&sbi->s_lock);
|
||||
def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
|
||||
|
||||
if (sbi->s_sb_block != 1)
|
||||
|
@ -281,6 +284,7 @@ static int ext2_show_options(struct seq_file *seq, struct vfsmount *vfs)
|
|||
if (!test_opt(sb, RESERVATION))
|
||||
seq_puts(seq, ",noreservation");
|
||||
|
||||
spin_unlock(&sbi->s_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -606,7 +610,6 @@ static int ext2_setup_super (struct super_block * sb,
|
|||
if (!le16_to_cpu(es->s_max_mnt_count))
|
||||
es->s_max_mnt_count = cpu_to_le16(EXT2_DFL_MAX_MNT_COUNT);
|
||||
le16_add_cpu(&es->s_mnt_count, 1);
|
||||
ext2_write_super(sb);
|
||||
if (test_opt (sb, DEBUG))
|
||||
ext2_msg(sb, KERN_INFO, "%s, %s, bs=%lu, fs=%lu, gc=%lu, "
|
||||
"bpg=%lu, ipg=%lu, mo=%04lx]",
|
||||
|
@ -767,6 +770,8 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
|
|||
sb->s_fs_info = sbi;
|
||||
sbi->s_sb_block = sb_block;
|
||||
|
||||
spin_lock_init(&sbi->s_lock);
|
||||
|
||||
/*
|
||||
* See what the current blocksize for the device is, and
|
||||
* use that as the blocksize. Otherwise (or if the blocksize
|
||||
|
@ -1079,7 +1084,9 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
|
|||
if (EXT2_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_HAS_JOURNAL))
|
||||
ext2_msg(sb, KERN_WARNING,
|
||||
"warning: mounting ext3 filesystem as ext2");
|
||||
ext2_setup_super (sb, es, sb->s_flags & MS_RDONLY);
|
||||
if (ext2_setup_super (sb, es, sb->s_flags & MS_RDONLY))
|
||||
sb->s_flags |= MS_RDONLY;
|
||||
ext2_write_super(sb);
|
||||
return 0;
|
||||
|
||||
cantfind_ext2:
|
||||
|
@ -1120,30 +1127,26 @@ static void ext2_clear_super_error(struct super_block *sb)
|
|||
* be remapped. Nothing we can do but to retry the
|
||||
* write and hope for the best.
|
||||
*/
|
||||
printk(KERN_ERR "EXT2-fs: %s previous I/O error to "
|
||||
"superblock detected", sb->s_id);
|
||||
ext2_msg(sb, KERN_ERR,
|
||||
"previous I/O error to superblock detected\n");
|
||||
clear_buffer_write_io_error(sbh);
|
||||
set_buffer_uptodate(sbh);
|
||||
}
|
||||
}
|
||||
|
||||
static void ext2_commit_super (struct super_block * sb,
|
||||
struct ext2_super_block * es)
|
||||
{
|
||||
ext2_clear_super_error(sb);
|
||||
es->s_wtime = cpu_to_le32(get_seconds());
|
||||
mark_buffer_dirty(EXT2_SB(sb)->s_sbh);
|
||||
sb->s_dirt = 0;
|
||||
}
|
||||
|
||||
static void ext2_sync_super(struct super_block *sb, struct ext2_super_block *es)
|
||||
static void ext2_sync_super(struct super_block *sb, struct ext2_super_block *es,
|
||||
int wait)
|
||||
{
|
||||
ext2_clear_super_error(sb);
|
||||
spin_lock(&EXT2_SB(sb)->s_lock);
|
||||
es->s_free_blocks_count = cpu_to_le32(ext2_count_free_blocks(sb));
|
||||
es->s_free_inodes_count = cpu_to_le32(ext2_count_free_inodes(sb));
|
||||
es->s_wtime = cpu_to_le32(get_seconds());
|
||||
/* unlock before we do IO */
|
||||
spin_unlock(&EXT2_SB(sb)->s_lock);
|
||||
mark_buffer_dirty(EXT2_SB(sb)->s_sbh);
|
||||
sync_dirty_buffer(EXT2_SB(sb)->s_sbh);
|
||||
if (wait)
|
||||
sync_dirty_buffer(EXT2_SB(sb)->s_sbh);
|
||||
sb->s_dirt = 0;
|
||||
}
|
||||
|
||||
|
@ -1157,43 +1160,18 @@ static void ext2_sync_super(struct super_block *sb, struct ext2_super_block *es)
|
|||
* may have been checked while mounted and e2fsck may have
|
||||
* set s_state to EXT2_VALID_FS after some corrections.
|
||||
*/
|
||||
|
||||
static int ext2_sync_fs(struct super_block *sb, int wait)
|
||||
{
|
||||
struct ext2_sb_info *sbi = EXT2_SB(sb);
|
||||
struct ext2_super_block *es = EXT2_SB(sb)->s_es;
|
||||
struct buffer_head *sbh = EXT2_SB(sb)->s_sbh;
|
||||
|
||||
lock_kernel();
|
||||
if (buffer_write_io_error(sbh)) {
|
||||
/*
|
||||
* Oh, dear. A previous attempt to write the
|
||||
* superblock failed. This could happen because the
|
||||
* USB device was yanked out. Or it could happen to
|
||||
* be a transient write error and maybe the block will
|
||||
* be remapped. Nothing we can do but to retry the
|
||||
* write and hope for the best.
|
||||
*/
|
||||
ext2_msg(sb, KERN_ERR,
|
||||
"previous I/O error to superblock detected\n");
|
||||
clear_buffer_write_io_error(sbh);
|
||||
set_buffer_uptodate(sbh);
|
||||
}
|
||||
|
||||
spin_lock(&sbi->s_lock);
|
||||
if (es->s_state & cpu_to_le16(EXT2_VALID_FS)) {
|
||||
ext2_debug("setting valid to 0\n");
|
||||
es->s_state &= cpu_to_le16(~EXT2_VALID_FS);
|
||||
es->s_free_blocks_count =
|
||||
cpu_to_le32(ext2_count_free_blocks(sb));
|
||||
es->s_free_inodes_count =
|
||||
cpu_to_le32(ext2_count_free_inodes(sb));
|
||||
es->s_mtime = cpu_to_le32(get_seconds());
|
||||
ext2_sync_super(sb, es);
|
||||
} else {
|
||||
ext2_commit_super(sb, es);
|
||||
}
|
||||
sb->s_dirt = 0;
|
||||
unlock_kernel();
|
||||
|
||||
spin_unlock(&sbi->s_lock);
|
||||
ext2_sync_super(sb, es, wait);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1215,7 +1193,7 @@ static int ext2_remount (struct super_block * sb, int * flags, char * data)
|
|||
unsigned long old_sb_flags;
|
||||
int err;
|
||||
|
||||
lock_kernel();
|
||||
spin_lock(&sbi->s_lock);
|
||||
|
||||
/* Store the old options */
|
||||
old_sb_flags = sb->s_flags;
|
||||
|
@ -1254,13 +1232,13 @@ static int ext2_remount (struct super_block * sb, int * flags, char * data)
|
|||
sbi->s_mount_opt |= old_mount_opt & EXT2_MOUNT_XIP;
|
||||
}
|
||||
if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY)) {
|
||||
unlock_kernel();
|
||||
spin_unlock(&sbi->s_lock);
|
||||
return 0;
|
||||
}
|
||||
if (*flags & MS_RDONLY) {
|
||||
if (le16_to_cpu(es->s_state) & EXT2_VALID_FS ||
|
||||
!(sbi->s_mount_state & EXT2_VALID_FS)) {
|
||||
unlock_kernel();
|
||||
spin_unlock(&sbi->s_lock);
|
||||
return 0;
|
||||
}
|
||||
/*
|
||||
|
@ -1269,6 +1247,8 @@ static int ext2_remount (struct super_block * sb, int * flags, char * data)
|
|||
*/
|
||||
es->s_state = cpu_to_le16(sbi->s_mount_state);
|
||||
es->s_mtime = cpu_to_le32(get_seconds());
|
||||
spin_unlock(&sbi->s_lock);
|
||||
ext2_sync_super(sb, es, 1);
|
||||
} else {
|
||||
__le32 ret = EXT2_HAS_RO_COMPAT_FEATURE(sb,
|
||||
~EXT2_FEATURE_RO_COMPAT_SUPP);
|
||||
|
@ -1288,16 +1268,16 @@ static int ext2_remount (struct super_block * sb, int * flags, char * data)
|
|||
sbi->s_mount_state = le16_to_cpu(es->s_state);
|
||||
if (!ext2_setup_super (sb, es, 0))
|
||||
sb->s_flags &= ~MS_RDONLY;
|
||||
spin_unlock(&sbi->s_lock);
|
||||
ext2_write_super(sb);
|
||||
}
|
||||
ext2_sync_super(sb, es);
|
||||
unlock_kernel();
|
||||
return 0;
|
||||
restore_opts:
|
||||
sbi->s_mount_opt = old_opts.s_mount_opt;
|
||||
sbi->s_resuid = old_opts.s_resuid;
|
||||
sbi->s_resgid = old_opts.s_resgid;
|
||||
sb->s_flags = old_sb_flags;
|
||||
unlock_kernel();
|
||||
spin_unlock(&sbi->s_lock);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -1308,6 +1288,8 @@ static int ext2_statfs (struct dentry * dentry, struct kstatfs * buf)
|
|||
struct ext2_super_block *es = sbi->s_es;
|
||||
u64 fsid;
|
||||
|
||||
spin_lock(&sbi->s_lock);
|
||||
|
||||
if (test_opt (sb, MINIX_DF))
|
||||
sbi->s_overhead_last = 0;
|
||||
else if (sbi->s_blocks_last != le32_to_cpu(es->s_blocks_count)) {
|
||||
|
@ -1362,6 +1344,7 @@ static int ext2_statfs (struct dentry * dentry, struct kstatfs * buf)
|
|||
le64_to_cpup((void *)es->s_uuid + sizeof(u64));
|
||||
buf->f_fsid.val[0] = fsid & 0xFFFFFFFFUL;
|
||||
buf->f_fsid.val[1] = (fsid >> 32) & 0xFFFFFFFFUL;
|
||||
spin_unlock(&sbi->s_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -345,7 +345,9 @@ static void ext2_xattr_update_super_block(struct super_block *sb)
|
|||
if (EXT2_HAS_COMPAT_FEATURE(sb, EXT2_FEATURE_COMPAT_EXT_ATTR))
|
||||
return;
|
||||
|
||||
spin_lock(&EXT2_SB(sb)->s_lock);
|
||||
EXT2_SET_COMPAT_FEATURE(sb, EXT2_FEATURE_COMPAT_EXT_ATTR);
|
||||
spin_unlock(&EXT2_SB(sb)->s_lock);
|
||||
sb->s_dirt = 1;
|
||||
mark_buffer_dirty(EXT2_SB(sb)->s_sbh);
|
||||
}
|
||||
|
|
|
@ -1583,6 +1583,12 @@ retry_alloc:
|
|||
if (!gdp)
|
||||
goto io_error;
|
||||
free_blocks = le16_to_cpu(gdp->bg_free_blocks_count);
|
||||
/*
|
||||
* skip this group (and avoid loading bitmap) if there
|
||||
* are no free blocks
|
||||
*/
|
||||
if (!free_blocks)
|
||||
continue;
|
||||
/*
|
||||
* skip this group if the number of
|
||||
* free blocks is less than half of the reservation
|
||||
|
|
|
@ -48,7 +48,7 @@ int ext3_sync_file(struct file * file, struct dentry *dentry, int datasync)
|
|||
struct inode *inode = dentry->d_inode;
|
||||
struct ext3_inode_info *ei = EXT3_I(inode);
|
||||
journal_t *journal = EXT3_SB(inode->i_sb)->s_journal;
|
||||
int ret = 0;
|
||||
int ret, needs_barrier = 0;
|
||||
tid_t commit_tid;
|
||||
|
||||
if (inode->i_sb->s_flags & MS_RDONLY)
|
||||
|
@ -70,28 +70,26 @@ int ext3_sync_file(struct file * file, struct dentry *dentry, int datasync)
|
|||
* (they were dirtied by commit). But that's OK - the blocks are
|
||||
* safe in-journal, which is all fsync() needs to ensure.
|
||||
*/
|
||||
if (ext3_should_journal_data(inode)) {
|
||||
ret = ext3_force_commit(inode->i_sb);
|
||||
goto out;
|
||||
}
|
||||
if (ext3_should_journal_data(inode))
|
||||
return ext3_force_commit(inode->i_sb);
|
||||
|
||||
if (datasync)
|
||||
commit_tid = atomic_read(&ei->i_datasync_tid);
|
||||
else
|
||||
commit_tid = atomic_read(&ei->i_sync_tid);
|
||||
|
||||
if (log_start_commit(journal, commit_tid)) {
|
||||
log_wait_commit(journal, commit_tid);
|
||||
goto out;
|
||||
}
|
||||
if (test_opt(inode->i_sb, BARRIER) &&
|
||||
!journal_trans_will_send_data_barrier(journal, commit_tid))
|
||||
needs_barrier = 1;
|
||||
log_start_commit(journal, commit_tid);
|
||||
ret = log_wait_commit(journal, commit_tid);
|
||||
|
||||
/*
|
||||
* In case we didn't commit a transaction, we have to flush
|
||||
* disk caches manually so that data really is on persistent
|
||||
* storage
|
||||
*/
|
||||
if (test_opt(inode->i_sb, BARRIER))
|
||||
if (needs_barrier)
|
||||
blkdev_issue_flush(inode->i_sb->s_bdev, NULL);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -3151,7 +3151,7 @@ int ext3_setattr(struct dentry *dentry, struct iattr *attr)
|
|||
if (error)
|
||||
return error;
|
||||
|
||||
if (ia_valid & ATTR_SIZE)
|
||||
if (is_quota_modification(inode, attr))
|
||||
dquot_initialize(inode);
|
||||
if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
|
||||
(ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
|
||||
|
|
|
@ -653,8 +653,12 @@ static int ext3_show_options(struct seq_file *seq, struct vfsmount *vfs)
|
|||
seq_printf(seq, ",commit=%u",
|
||||
(unsigned) (sbi->s_commit_interval / HZ));
|
||||
}
|
||||
if (test_opt(sb, BARRIER))
|
||||
seq_puts(seq, ",barrier=1");
|
||||
|
||||
/*
|
||||
* Always display barrier state so it's clear what the status is.
|
||||
*/
|
||||
seq_puts(seq, ",barrier=");
|
||||
seq_puts(seq, test_opt(sb, BARRIER) ? "1" : "0");
|
||||
if (test_opt(sb, NOBH))
|
||||
seq_puts(seq, ",nobh");
|
||||
|
||||
|
@ -810,8 +814,8 @@ enum {
|
|||
Opt_data_err_abort, Opt_data_err_ignore,
|
||||
Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota,
|
||||
Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota,
|
||||
Opt_noquota, Opt_ignore, Opt_barrier, Opt_err, Opt_resize,
|
||||
Opt_usrquota, Opt_grpquota
|
||||
Opt_noquota, Opt_ignore, Opt_barrier, Opt_nobarrier, Opt_err,
|
||||
Opt_resize, Opt_usrquota, Opt_grpquota
|
||||
};
|
||||
|
||||
static const match_table_t tokens = {
|
||||
|
@ -865,6 +869,8 @@ static const match_table_t tokens = {
|
|||
{Opt_quota, "quota"},
|
||||
{Opt_usrquota, "usrquota"},
|
||||
{Opt_barrier, "barrier=%u"},
|
||||
{Opt_barrier, "barrier"},
|
||||
{Opt_nobarrier, "nobarrier"},
|
||||
{Opt_resize, "resize"},
|
||||
{Opt_err, NULL},
|
||||
};
|
||||
|
@ -967,7 +973,11 @@ static int parse_options (char *options, struct super_block *sb,
|
|||
int token;
|
||||
if (!*p)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* Initialize args struct so we know whether arg was
|
||||
* found; some options take optional arguments.
|
||||
*/
|
||||
args[0].to = args[0].from = 0;
|
||||
token = match_token(p, tokens, args);
|
||||
switch (token) {
|
||||
case Opt_bsd_df:
|
||||
|
@ -1215,9 +1225,15 @@ set_qf_format:
|
|||
case Opt_abort:
|
||||
set_opt(sbi->s_mount_opt, ABORT);
|
||||
break;
|
||||
case Opt_nobarrier:
|
||||
clear_opt(sbi->s_mount_opt, BARRIER);
|
||||
break;
|
||||
case Opt_barrier:
|
||||
if (match_int(&args[0], &option))
|
||||
return 0;
|
||||
if (args[0].from) {
|
||||
if (match_int(&args[0], &option))
|
||||
return 0;
|
||||
} else
|
||||
option = 1; /* No argument, default to 1 */
|
||||
if (option)
|
||||
set_opt(sbi->s_mount_opt, BARRIER);
|
||||
else
|
||||
|
@ -1890,21 +1906,6 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
|
|||
get_random_bytes(&sbi->s_next_generation, sizeof(u32));
|
||||
spin_lock_init(&sbi->s_next_gen_lock);
|
||||
|
||||
err = percpu_counter_init(&sbi->s_freeblocks_counter,
|
||||
ext3_count_free_blocks(sb));
|
||||
if (!err) {
|
||||
err = percpu_counter_init(&sbi->s_freeinodes_counter,
|
||||
ext3_count_free_inodes(sb));
|
||||
}
|
||||
if (!err) {
|
||||
err = percpu_counter_init(&sbi->s_dirs_counter,
|
||||
ext3_count_dirs(sb));
|
||||
}
|
||||
if (err) {
|
||||
ext3_msg(sb, KERN_ERR, "error: insufficient memory");
|
||||
goto failed_mount3;
|
||||
}
|
||||
|
||||
/* per fileystem reservation list head & lock */
|
||||
spin_lock_init(&sbi->s_rsv_window_lock);
|
||||
sbi->s_rsv_window_root = RB_ROOT;
|
||||
|
@ -1945,15 +1946,29 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
|
|||
if (!test_opt(sb, NOLOAD) &&
|
||||
EXT3_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_HAS_JOURNAL)) {
|
||||
if (ext3_load_journal(sb, es, journal_devnum))
|
||||
goto failed_mount3;
|
||||
goto failed_mount2;
|
||||
} else if (journal_inum) {
|
||||
if (ext3_create_journal(sb, es, journal_inum))
|
||||
goto failed_mount3;
|
||||
goto failed_mount2;
|
||||
} else {
|
||||
if (!silent)
|
||||
ext3_msg(sb, KERN_ERR,
|
||||
"error: no journal found. "
|
||||
"mounting ext3 over ext2?");
|
||||
goto failed_mount2;
|
||||
}
|
||||
err = percpu_counter_init(&sbi->s_freeblocks_counter,
|
||||
ext3_count_free_blocks(sb));
|
||||
if (!err) {
|
||||
err = percpu_counter_init(&sbi->s_freeinodes_counter,
|
||||
ext3_count_free_inodes(sb));
|
||||
}
|
||||
if (!err) {
|
||||
err = percpu_counter_init(&sbi->s_dirs_counter,
|
||||
ext3_count_dirs(sb));
|
||||
}
|
||||
if (err) {
|
||||
ext3_msg(sb, KERN_ERR, "error: insufficient memory");
|
||||
goto failed_mount3;
|
||||
}
|
||||
|
||||
|
@ -1978,7 +1993,7 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
|
|||
ext3_msg(sb, KERN_ERR,
|
||||
"error: journal does not support "
|
||||
"requested data journaling mode");
|
||||
goto failed_mount4;
|
||||
goto failed_mount3;
|
||||
}
|
||||
default:
|
||||
break;
|
||||
|
@ -2001,19 +2016,19 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
|
|||
if (IS_ERR(root)) {
|
||||
ext3_msg(sb, KERN_ERR, "error: get root inode failed");
|
||||
ret = PTR_ERR(root);
|
||||
goto failed_mount4;
|
||||
goto failed_mount3;
|
||||
}
|
||||
if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
|
||||
iput(root);
|
||||
ext3_msg(sb, KERN_ERR, "error: corrupt root inode, run e2fsck");
|
||||
goto failed_mount4;
|
||||
goto failed_mount3;
|
||||
}
|
||||
sb->s_root = d_alloc_root(root);
|
||||
if (!sb->s_root) {
|
||||
ext3_msg(sb, KERN_ERR, "error: get root dentry failed");
|
||||
iput(root);
|
||||
ret = -ENOMEM;
|
||||
goto failed_mount4;
|
||||
goto failed_mount3;
|
||||
}
|
||||
|
||||
ext3_setup_super (sb, es, sb->s_flags & MS_RDONLY);
|
||||
|
@ -2039,12 +2054,11 @@ cantfind_ext3:
|
|||
sb->s_id);
|
||||
goto failed_mount;
|
||||
|
||||
failed_mount4:
|
||||
journal_destroy(sbi->s_journal);
|
||||
failed_mount3:
|
||||
percpu_counter_destroy(&sbi->s_freeblocks_counter);
|
||||
percpu_counter_destroy(&sbi->s_freeinodes_counter);
|
||||
percpu_counter_destroy(&sbi->s_dirs_counter);
|
||||
journal_destroy(sbi->s_journal);
|
||||
failed_mount2:
|
||||
for (i = 0; i < db_count; i++)
|
||||
brelse(sbi->s_group_desc[i]);
|
||||
|
@ -2278,6 +2292,9 @@ static int ext3_load_journal(struct super_block *sb,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!(journal->j_flags & JFS_BARRIER))
|
||||
printk(KERN_INFO "EXT3-fs: barriers not enabled\n");
|
||||
|
||||
if (!really_read_only && test_opt(sb, UPDATE_JOURNAL)) {
|
||||
err = journal_update_format(journal);
|
||||
if (err) {
|
||||
|
|
|
@ -5425,7 +5425,7 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
|
|||
if (error)
|
||||
return error;
|
||||
|
||||
if (ia_valid & ATTR_SIZE)
|
||||
if (is_quota_modification(inode, attr))
|
||||
dquot_initialize(inode);
|
||||
if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
|
||||
(ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
|
||||
|
|
|
@ -1476,8 +1476,8 @@ static int gfs2_quota_get_xstate(struct super_block *sb,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int gfs2_xquota_get(struct super_block *sb, int type, qid_t id,
|
||||
struct fs_disk_quota *fdq)
|
||||
static int gfs2_get_dqblk(struct super_block *sb, int type, qid_t id,
|
||||
struct fs_disk_quota *fdq)
|
||||
{
|
||||
struct gfs2_sbd *sdp = sb->s_fs_info;
|
||||
struct gfs2_quota_lvb *qlvb;
|
||||
|
@ -1521,8 +1521,8 @@ out:
|
|||
/* GFS2 only supports a subset of the XFS fields */
|
||||
#define GFS2_FIELDMASK (FS_DQ_BSOFT|FS_DQ_BHARD)
|
||||
|
||||
static int gfs2_xquota_set(struct super_block *sb, int type, qid_t id,
|
||||
struct fs_disk_quota *fdq)
|
||||
static int gfs2_set_dqblk(struct super_block *sb, int type, qid_t id,
|
||||
struct fs_disk_quota *fdq)
|
||||
{
|
||||
struct gfs2_sbd *sdp = sb->s_fs_info;
|
||||
struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
|
||||
|
@ -1629,7 +1629,7 @@ out_put:
|
|||
const struct quotactl_ops gfs2_quotactl_ops = {
|
||||
.quota_sync = gfs2_quota_sync,
|
||||
.get_xstate = gfs2_quota_get_xstate,
|
||||
.get_xquota = gfs2_xquota_get,
|
||||
.set_xquota = gfs2_xquota_set,
|
||||
.get_dqblk = gfs2_get_dqblk,
|
||||
.set_dqblk = gfs2_set_dqblk,
|
||||
};
|
||||
|
||||
|
|
|
@ -786,6 +786,12 @@ wait_for_iobuf:
|
|||
|
||||
jbd_debug(3, "JBD: commit phase 6\n");
|
||||
|
||||
/* All metadata is written, now write commit record and do cleanup */
|
||||
spin_lock(&journal->j_state_lock);
|
||||
J_ASSERT(commit_transaction->t_state == T_COMMIT);
|
||||
commit_transaction->t_state = T_COMMIT_RECORD;
|
||||
spin_unlock(&journal->j_state_lock);
|
||||
|
||||
if (journal_write_commit_record(journal, commit_transaction))
|
||||
err = -EIO;
|
||||
|
||||
|
@ -923,7 +929,7 @@ restart_loop:
|
|||
|
||||
jbd_debug(3, "JBD: commit phase 8\n");
|
||||
|
||||
J_ASSERT(commit_transaction->t_state == T_COMMIT);
|
||||
J_ASSERT(commit_transaction->t_state == T_COMMIT_RECORD);
|
||||
|
||||
commit_transaction->t_state = T_FINISHED;
|
||||
J_ASSERT(commit_transaction == journal->j_committing_transaction);
|
||||
|
|
|
@ -564,6 +564,38 @@ int log_wait_commit(journal_t *journal, tid_t tid)
|
|||
return err;
|
||||
}
|
||||
|
||||
/*
|
||||
* Return 1 if a given transaction has not yet sent barrier request
|
||||
* connected with a transaction commit. If 0 is returned, transaction
|
||||
* may or may not have sent the barrier. Used to avoid sending barrier
|
||||
* twice in common cases.
|
||||
*/
|
||||
int journal_trans_will_send_data_barrier(journal_t *journal, tid_t tid)
|
||||
{
|
||||
int ret = 0;
|
||||
transaction_t *commit_trans;
|
||||
|
||||
if (!(journal->j_flags & JFS_BARRIER))
|
||||
return 0;
|
||||
spin_lock(&journal->j_state_lock);
|
||||
/* Transaction already committed? */
|
||||
if (tid_geq(journal->j_commit_sequence, tid))
|
||||
goto out;
|
||||
/*
|
||||
* Transaction is being committed and we already proceeded to
|
||||
* writing commit record?
|
||||
*/
|
||||
commit_trans = journal->j_committing_transaction;
|
||||
if (commit_trans && commit_trans->t_tid == tid &&
|
||||
commit_trans->t_state >= T_COMMIT_RECORD)
|
||||
goto out;
|
||||
ret = 1;
|
||||
out:
|
||||
spin_unlock(&journal->j_state_lock);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(journal_trans_will_send_data_barrier);
|
||||
|
||||
/*
|
||||
* Log buffer allocation routines:
|
||||
*/
|
||||
|
@ -1157,6 +1189,7 @@ int journal_destroy(journal_t *journal)
|
|||
{
|
||||
int err = 0;
|
||||
|
||||
|
||||
/* Wait for the commit thread to wake up and die. */
|
||||
journal_kill_thread(journal);
|
||||
|
||||
|
|
|
@ -98,7 +98,7 @@ int jfs_setattr(struct dentry *dentry, struct iattr *iattr)
|
|||
if (rc)
|
||||
return rc;
|
||||
|
||||
if (iattr->ia_valid & ATTR_SIZE)
|
||||
if (is_quota_modification(inode, iattr))
|
||||
dquot_initialize(inode);
|
||||
if ((iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid) ||
|
||||
(iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid)) {
|
||||
|
|
|
@ -3897,7 +3897,8 @@ static int ocfs2_refresh_qinfo(struct ocfs2_mem_dqinfo *oinfo)
|
|||
oinfo->dqi_gi.dqi_free_entry =
|
||||
be32_to_cpu(lvb->lvb_free_entry);
|
||||
} else {
|
||||
status = ocfs2_read_quota_block(oinfo->dqi_gqinode, 0, &bh);
|
||||
status = ocfs2_read_quota_phys_block(oinfo->dqi_gqinode,
|
||||
oinfo->dqi_giblk, &bh);
|
||||
if (status) {
|
||||
mlog_errno(status);
|
||||
goto bail;
|
||||
|
|
|
@ -933,9 +933,8 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
|
|||
struct ocfs2_super *osb = OCFS2_SB(sb);
|
||||
struct buffer_head *bh = NULL;
|
||||
handle_t *handle = NULL;
|
||||
int qtype;
|
||||
struct dquot *transfer_from[MAXQUOTAS] = { };
|
||||
struct dquot *transfer_to[MAXQUOTAS] = { };
|
||||
int qtype;
|
||||
|
||||
mlog_entry("(0x%p, '%.*s')\n", dentry,
|
||||
dentry->d_name.len, dentry->d_name.name);
|
||||
|
@ -966,10 +965,10 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
|
|||
if (status)
|
||||
return status;
|
||||
|
||||
if (is_quota_modification(inode, attr))
|
||||
dquot_initialize(inode);
|
||||
size_change = S_ISREG(inode->i_mode) && attr->ia_valid & ATTR_SIZE;
|
||||
if (size_change) {
|
||||
dquot_initialize(inode);
|
||||
|
||||
status = ocfs2_rw_lock(inode, 1);
|
||||
if (status < 0) {
|
||||
mlog_errno(status);
|
||||
|
@ -1019,9 +1018,7 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
|
|||
OCFS2_FEATURE_RO_COMPAT_USRQUOTA)) {
|
||||
transfer_to[USRQUOTA] = dqget(sb, attr->ia_uid,
|
||||
USRQUOTA);
|
||||
transfer_from[USRQUOTA] = dqget(sb, inode->i_uid,
|
||||
USRQUOTA);
|
||||
if (!transfer_to[USRQUOTA] || !transfer_from[USRQUOTA]) {
|
||||
if (!transfer_to[USRQUOTA]) {
|
||||
status = -ESRCH;
|
||||
goto bail_unlock;
|
||||
}
|
||||
|
@ -1031,9 +1028,7 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
|
|||
OCFS2_FEATURE_RO_COMPAT_GRPQUOTA)) {
|
||||
transfer_to[GRPQUOTA] = dqget(sb, attr->ia_gid,
|
||||
GRPQUOTA);
|
||||
transfer_from[GRPQUOTA] = dqget(sb, inode->i_gid,
|
||||
GRPQUOTA);
|
||||
if (!transfer_to[GRPQUOTA] || !transfer_from[GRPQUOTA]) {
|
||||
if (!transfer_to[GRPQUOTA]) {
|
||||
status = -ESRCH;
|
||||
goto bail_unlock;
|
||||
}
|
||||
|
@ -1045,7 +1040,7 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
|
|||
mlog_errno(status);
|
||||
goto bail_unlock;
|
||||
}
|
||||
status = dquot_transfer(inode, attr);
|
||||
status = __dquot_transfer(inode, transfer_to);
|
||||
if (status < 0)
|
||||
goto bail_commit;
|
||||
} else {
|
||||
|
@ -1085,10 +1080,8 @@ bail:
|
|||
brelse(bh);
|
||||
|
||||
/* Release quota pointers in case we acquired them */
|
||||
for (qtype = 0; qtype < MAXQUOTAS; qtype++) {
|
||||
for (qtype = 0; qtype < MAXQUOTAS; qtype++)
|
||||
dqput(transfer_to[qtype]);
|
||||
dqput(transfer_from[qtype]);
|
||||
}
|
||||
|
||||
if (!status && attr->ia_valid & ATTR_MODE) {
|
||||
status = ocfs2_acl_chmod(inode);
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
struct ocfs2_dquot {
|
||||
struct dquot dq_dquot; /* Generic VFS dquot */
|
||||
loff_t dq_local_off; /* Offset in the local quota file */
|
||||
u64 dq_local_phys_blk; /* Physical block carrying quota structure */
|
||||
struct ocfs2_quota_chunk *dq_chunk; /* Chunk dquot is in */
|
||||
unsigned int dq_use_count; /* Number of nodes having reference to this entry in global quota file */
|
||||
s64 dq_origspace; /* Last globally synced space usage */
|
||||
|
@ -51,8 +52,9 @@ struct ocfs2_mem_dqinfo {
|
|||
struct ocfs2_lock_res dqi_gqlock; /* Lock protecting quota information structure */
|
||||
struct buffer_head *dqi_gqi_bh; /* Buffer head with global quota file inode - set only if inode lock is obtained */
|
||||
int dqi_gqi_count; /* Number of holders of dqi_gqi_bh */
|
||||
u64 dqi_giblk; /* Number of block with global information header */
|
||||
struct buffer_head *dqi_lqi_bh; /* Buffer head with local quota file inode */
|
||||
struct buffer_head *dqi_ibh; /* Buffer with information header */
|
||||
struct buffer_head *dqi_libh; /* Buffer with local information header */
|
||||
struct qtree_mem_dqinfo dqi_gi; /* Info about global file */
|
||||
struct delayed_work dqi_sync_work; /* Work for syncing dquots */
|
||||
struct ocfs2_quota_recovery *dqi_rec; /* Pointer to recovery
|
||||
|
@ -102,8 +104,12 @@ static inline int ocfs2_global_release_dquot(struct dquot *dquot)
|
|||
|
||||
int ocfs2_lock_global_qf(struct ocfs2_mem_dqinfo *oinfo, int ex);
|
||||
void ocfs2_unlock_global_qf(struct ocfs2_mem_dqinfo *oinfo, int ex);
|
||||
int ocfs2_read_quota_block(struct inode *inode, u64 v_block,
|
||||
struct buffer_head **bh);
|
||||
int ocfs2_validate_quota_block(struct super_block *sb, struct buffer_head *bh);
|
||||
int ocfs2_read_quota_phys_block(struct inode *inode, u64 p_block,
|
||||
struct buffer_head **bh);
|
||||
int ocfs2_create_local_dquot(struct dquot *dquot);
|
||||
int ocfs2_local_release_dquot(handle_t *handle, struct dquot *dquot);
|
||||
int ocfs2_local_write_dquot(struct dquot *dquot);
|
||||
|
||||
extern const struct dquot_operations ocfs2_quota_operations;
|
||||
extern struct quota_format_type ocfs2_quota_format;
|
||||
|
|
|
@ -25,8 +25,44 @@
|
|||
#include "dlmglue.h"
|
||||
#include "uptodate.h"
|
||||
#include "super.h"
|
||||
#include "buffer_head_io.h"
|
||||
#include "quota.h"
|
||||
|
||||
/*
|
||||
* Locking of quotas with OCFS2 is rather complex. Here are rules that
|
||||
* should be obeyed by all the functions:
|
||||
* - any write of quota structure (either to local or global file) is protected
|
||||
* by dqio_mutex or dquot->dq_lock.
|
||||
* - any modification of global quota file holds inode cluster lock, i_mutex,
|
||||
* and ip_alloc_sem of the global quota file (achieved by
|
||||
* ocfs2_lock_global_qf). It also has to hold qinfo_lock.
|
||||
* - an allocation of new blocks for local quota file is protected by
|
||||
* its ip_alloc_sem
|
||||
*
|
||||
* A rough sketch of locking dependencies (lf = local file, gf = global file):
|
||||
* Normal filesystem operation:
|
||||
* start_trans -> dqio_mutex -> write to lf
|
||||
* Syncing of local and global file:
|
||||
* ocfs2_lock_global_qf -> start_trans -> dqio_mutex -> qinfo_lock ->
|
||||
* write to gf
|
||||
* -> write to lf
|
||||
* Acquire dquot for the first time:
|
||||
* dq_lock -> ocfs2_lock_global_qf -> qinfo_lock -> read from gf
|
||||
* -> alloc space for gf
|
||||
* -> start_trans -> qinfo_lock -> write to gf
|
||||
* -> ip_alloc_sem of lf -> alloc space for lf
|
||||
* -> write to lf
|
||||
* Release last reference to dquot:
|
||||
* dq_lock -> ocfs2_lock_global_qf -> start_trans -> qinfo_lock -> write to gf
|
||||
* -> write to lf
|
||||
* Note that all the above operations also hold the inode cluster lock of lf.
|
||||
* Recovery:
|
||||
* inode cluster lock of recovered lf
|
||||
* -> read bitmaps -> ip_alloc_sem of lf
|
||||
* -> ocfs2_lock_global_qf -> start_trans -> dqio_mutex -> qinfo_lock ->
|
||||
* write to gf
|
||||
*/
|
||||
|
||||
static struct workqueue_struct *ocfs2_quota_wq = NULL;
|
||||
|
||||
static void qsync_work_fn(struct work_struct *work);
|
||||
|
@ -91,8 +127,7 @@ struct qtree_fmt_operations ocfs2_global_ops = {
|
|||
.is_id = ocfs2_global_is_id,
|
||||
};
|
||||
|
||||
static int ocfs2_validate_quota_block(struct super_block *sb,
|
||||
struct buffer_head *bh)
|
||||
int ocfs2_validate_quota_block(struct super_block *sb, struct buffer_head *bh)
|
||||
{
|
||||
struct ocfs2_disk_dqtrailer *dqt =
|
||||
ocfs2_block_dqtrailer(sb->s_blocksize, bh->b_data);
|
||||
|
@ -110,54 +145,19 @@ static int ocfs2_validate_quota_block(struct super_block *sb,
|
|||
return ocfs2_validate_meta_ecc(sb, bh->b_data, &dqt->dq_check);
|
||||
}
|
||||
|
||||
int ocfs2_read_quota_block(struct inode *inode, u64 v_block,
|
||||
struct buffer_head **bh)
|
||||
int ocfs2_read_quota_phys_block(struct inode *inode, u64 p_block,
|
||||
struct buffer_head **bhp)
|
||||
{
|
||||
int rc = 0;
|
||||
struct buffer_head *tmp = *bh;
|
||||
int rc;
|
||||
|
||||
if (i_size_read(inode) >> inode->i_sb->s_blocksize_bits <= v_block) {
|
||||
ocfs2_error(inode->i_sb,
|
||||
"Quota file %llu is probably corrupted! Requested "
|
||||
"to read block %Lu but file has size only %Lu\n",
|
||||
(unsigned long long)OCFS2_I(inode)->ip_blkno,
|
||||
(unsigned long long)v_block,
|
||||
(unsigned long long)i_size_read(inode));
|
||||
return -EIO;
|
||||
}
|
||||
rc = ocfs2_read_virt_blocks(inode, v_block, 1, &tmp, 0,
|
||||
ocfs2_validate_quota_block);
|
||||
*bhp = NULL;
|
||||
rc = ocfs2_read_blocks(INODE_CACHE(inode), p_block, 1, bhp, 0,
|
||||
ocfs2_validate_quota_block);
|
||||
if (rc)
|
||||
mlog_errno(rc);
|
||||
|
||||
/* If ocfs2_read_virt_blocks() got us a new bh, pass it up. */
|
||||
if (!rc && !*bh)
|
||||
*bh = tmp;
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int ocfs2_get_quota_block(struct inode *inode, int block,
|
||||
struct buffer_head **bh)
|
||||
{
|
||||
u64 pblock, pcount;
|
||||
int err;
|
||||
|
||||
down_read(&OCFS2_I(inode)->ip_alloc_sem);
|
||||
err = ocfs2_extent_map_get_blocks(inode, block, &pblock, &pcount, NULL);
|
||||
up_read(&OCFS2_I(inode)->ip_alloc_sem);
|
||||
if (err) {
|
||||
mlog_errno(err);
|
||||
return err;
|
||||
}
|
||||
*bh = sb_getblk(inode->i_sb, pblock);
|
||||
if (!*bh) {
|
||||
err = -EIO;
|
||||
mlog_errno(err);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Read data from global quotafile - avoid pagecache and such because we cannot
|
||||
* afford acquiring the locks... We use quota cluster lock to serialize
|
||||
* operations. Caller is responsible for acquiring it. */
|
||||
|
@ -172,6 +172,7 @@ ssize_t ocfs2_quota_read(struct super_block *sb, int type, char *data,
|
|||
int err = 0;
|
||||
struct buffer_head *bh;
|
||||
size_t toread, tocopy;
|
||||
u64 pblock = 0, pcount = 0;
|
||||
|
||||
if (off > i_size)
|
||||
return 0;
|
||||
|
@ -180,8 +181,19 @@ ssize_t ocfs2_quota_read(struct super_block *sb, int type, char *data,
|
|||
toread = len;
|
||||
while (toread > 0) {
|
||||
tocopy = min_t(size_t, (sb->s_blocksize - offset), toread);
|
||||
if (!pcount) {
|
||||
err = ocfs2_extent_map_get_blocks(gqinode, blk, &pblock,
|
||||
&pcount, NULL);
|
||||
if (err) {
|
||||
mlog_errno(err);
|
||||
return err;
|
||||
}
|
||||
} else {
|
||||
pcount--;
|
||||
pblock++;
|
||||
}
|
||||
bh = NULL;
|
||||
err = ocfs2_read_quota_block(gqinode, blk, &bh);
|
||||
err = ocfs2_read_quota_phys_block(gqinode, pblock, &bh);
|
||||
if (err) {
|
||||
mlog_errno(err);
|
||||
return err;
|
||||
|
@ -209,6 +221,7 @@ ssize_t ocfs2_quota_write(struct super_block *sb, int type,
|
|||
int err = 0, new = 0, ja_type;
|
||||
struct buffer_head *bh = NULL;
|
||||
handle_t *handle = journal_current_handle();
|
||||
u64 pblock, pcount;
|
||||
|
||||
if (!handle) {
|
||||
mlog(ML_ERROR, "Quota write (off=%llu, len=%llu) cancelled "
|
||||
|
@ -221,12 +234,11 @@ ssize_t ocfs2_quota_write(struct super_block *sb, int type,
|
|||
len = sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE - offset;
|
||||
}
|
||||
|
||||
mutex_lock_nested(&gqinode->i_mutex, I_MUTEX_QUOTA);
|
||||
if (gqinode->i_size < off + len) {
|
||||
loff_t rounded_end =
|
||||
ocfs2_align_bytes_to_blocks(sb, off + len);
|
||||
|
||||
/* Space is already allocated in ocfs2_global_read_dquot() */
|
||||
/* Space is already allocated in ocfs2_acquire_dquot() */
|
||||
err = ocfs2_simple_size_update(gqinode,
|
||||
oinfo->dqi_gqi_bh,
|
||||
rounded_end);
|
||||
|
@ -234,13 +246,20 @@ ssize_t ocfs2_quota_write(struct super_block *sb, int type,
|
|||
goto out;
|
||||
new = 1;
|
||||
}
|
||||
err = ocfs2_extent_map_get_blocks(gqinode, blk, &pblock, &pcount, NULL);
|
||||
if (err) {
|
||||
mlog_errno(err);
|
||||
goto out;
|
||||
}
|
||||
/* Not rewriting whole block? */
|
||||
if ((offset || len < sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE) &&
|
||||
!new) {
|
||||
err = ocfs2_read_quota_block(gqinode, blk, &bh);
|
||||
err = ocfs2_read_quota_phys_block(gqinode, pblock, &bh);
|
||||
ja_type = OCFS2_JOURNAL_ACCESS_WRITE;
|
||||
} else {
|
||||
err = ocfs2_get_quota_block(gqinode, blk, &bh);
|
||||
bh = sb_getblk(sb, pblock);
|
||||
if (!bh)
|
||||
err = -ENOMEM;
|
||||
ja_type = OCFS2_JOURNAL_ACCESS_CREATE;
|
||||
}
|
||||
if (err) {
|
||||
|
@ -265,13 +284,11 @@ ssize_t ocfs2_quota_write(struct super_block *sb, int type,
|
|||
brelse(bh);
|
||||
out:
|
||||
if (err) {
|
||||
mutex_unlock(&gqinode->i_mutex);
|
||||
mlog_errno(err);
|
||||
return err;
|
||||
}
|
||||
gqinode->i_version++;
|
||||
ocfs2_mark_inode_dirty(handle, gqinode, oinfo->dqi_gqi_bh);
|
||||
mutex_unlock(&gqinode->i_mutex);
|
||||
return len;
|
||||
}
|
||||
|
||||
|
@ -289,11 +306,23 @@ int ocfs2_lock_global_qf(struct ocfs2_mem_dqinfo *oinfo, int ex)
|
|||
else
|
||||
WARN_ON(bh != oinfo->dqi_gqi_bh);
|
||||
spin_unlock(&dq_data_lock);
|
||||
if (ex) {
|
||||
mutex_lock(&oinfo->dqi_gqinode->i_mutex);
|
||||
down_write(&OCFS2_I(oinfo->dqi_gqinode)->ip_alloc_sem);
|
||||
} else {
|
||||
down_read(&OCFS2_I(oinfo->dqi_gqinode)->ip_alloc_sem);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ocfs2_unlock_global_qf(struct ocfs2_mem_dqinfo *oinfo, int ex)
|
||||
{
|
||||
if (ex) {
|
||||
up_write(&OCFS2_I(oinfo->dqi_gqinode)->ip_alloc_sem);
|
||||
mutex_unlock(&oinfo->dqi_gqinode->i_mutex);
|
||||
} else {
|
||||
up_read(&OCFS2_I(oinfo->dqi_gqinode)->ip_alloc_sem);
|
||||
}
|
||||
ocfs2_inode_unlock(oinfo->dqi_gqinode, ex);
|
||||
brelse(oinfo->dqi_gqi_bh);
|
||||
spin_lock(&dq_data_lock);
|
||||
|
@ -311,6 +340,7 @@ int ocfs2_global_read_info(struct super_block *sb, int type)
|
|||
struct ocfs2_global_disk_dqinfo dinfo;
|
||||
struct mem_dqinfo *info = sb_dqinfo(sb, type);
|
||||
struct ocfs2_mem_dqinfo *oinfo = info->dqi_priv;
|
||||
u64 pcount;
|
||||
int status;
|
||||
|
||||
mlog_entry_void();
|
||||
|
@ -337,9 +367,19 @@ int ocfs2_global_read_info(struct super_block *sb, int type)
|
|||
mlog_errno(status);
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
status = ocfs2_extent_map_get_blocks(gqinode, 0, &oinfo->dqi_giblk,
|
||||
&pcount, NULL);
|
||||
if (status < 0)
|
||||
goto out_unlock;
|
||||
|
||||
status = ocfs2_qinfo_lock(oinfo, 0);
|
||||
if (status < 0)
|
||||
goto out_unlock;
|
||||
status = sb->s_op->quota_read(sb, type, (char *)&dinfo,
|
||||
sizeof(struct ocfs2_global_disk_dqinfo),
|
||||
OCFS2_GLOBAL_INFO_OFF);
|
||||
ocfs2_qinfo_unlock(oinfo, 0);
|
||||
ocfs2_unlock_global_qf(oinfo, 0);
|
||||
if (status != sizeof(struct ocfs2_global_disk_dqinfo)) {
|
||||
mlog(ML_ERROR, "Cannot read global quota info (%d).\n",
|
||||
|
@ -366,6 +406,10 @@ int ocfs2_global_read_info(struct super_block *sb, int type)
|
|||
out_err:
|
||||
mlog_exit(status);
|
||||
return status;
|
||||
out_unlock:
|
||||
ocfs2_unlock_global_qf(oinfo, 0);
|
||||
mlog_errno(status);
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
/* Write information to global quota file. Expects exlusive lock on quota
|
||||
|
@ -424,78 +468,10 @@ static int ocfs2_global_qinit_alloc(struct super_block *sb, int type)
|
|||
|
||||
static int ocfs2_calc_global_qinit_credits(struct super_block *sb, int type)
|
||||
{
|
||||
/* We modify all the allocated blocks, tree root, and info block */
|
||||
/* We modify all the allocated blocks, tree root, info block and
|
||||
* the inode */
|
||||
return (ocfs2_global_qinit_alloc(sb, type) + 2) *
|
||||
OCFS2_QUOTA_BLOCK_UPDATE_CREDITS;
|
||||
}
|
||||
|
||||
/* Read in information from global quota file and acquire a reference to it.
|
||||
* dquot_acquire() has already started the transaction and locked quota file */
|
||||
int ocfs2_global_read_dquot(struct dquot *dquot)
|
||||
{
|
||||
int err, err2, ex = 0;
|
||||
struct super_block *sb = dquot->dq_sb;
|
||||
int type = dquot->dq_type;
|
||||
struct ocfs2_mem_dqinfo *info = sb_dqinfo(sb, type)->dqi_priv;
|
||||
struct ocfs2_super *osb = OCFS2_SB(sb);
|
||||
struct inode *gqinode = info->dqi_gqinode;
|
||||
int need_alloc = ocfs2_global_qinit_alloc(sb, type);
|
||||
handle_t *handle = NULL;
|
||||
|
||||
err = ocfs2_qinfo_lock(info, 0);
|
||||
if (err < 0)
|
||||
goto out;
|
||||
err = qtree_read_dquot(&info->dqi_gi, dquot);
|
||||
if (err < 0)
|
||||
goto out_qlock;
|
||||
OCFS2_DQUOT(dquot)->dq_use_count++;
|
||||
OCFS2_DQUOT(dquot)->dq_origspace = dquot->dq_dqb.dqb_curspace;
|
||||
OCFS2_DQUOT(dquot)->dq_originodes = dquot->dq_dqb.dqb_curinodes;
|
||||
ocfs2_qinfo_unlock(info, 0);
|
||||
|
||||
if (!dquot->dq_off) { /* No real quota entry? */
|
||||
ex = 1;
|
||||
/*
|
||||
* Add blocks to quota file before we start a transaction since
|
||||
* locking allocators ranks above a transaction start
|
||||
*/
|
||||
WARN_ON(journal_current_handle());
|
||||
down_write(&OCFS2_I(gqinode)->ip_alloc_sem);
|
||||
err = ocfs2_extend_no_holes(gqinode,
|
||||
gqinode->i_size + (need_alloc << sb->s_blocksize_bits),
|
||||
gqinode->i_size);
|
||||
up_write(&OCFS2_I(gqinode)->ip_alloc_sem);
|
||||
if (err < 0)
|
||||
goto out;
|
||||
}
|
||||
|
||||
handle = ocfs2_start_trans(osb,
|
||||
ocfs2_calc_global_qinit_credits(sb, type));
|
||||
if (IS_ERR(handle)) {
|
||||
err = PTR_ERR(handle);
|
||||
goto out;
|
||||
}
|
||||
err = ocfs2_qinfo_lock(info, ex);
|
||||
if (err < 0)
|
||||
goto out_trans;
|
||||
err = qtree_write_dquot(&info->dqi_gi, dquot);
|
||||
if (ex && info_dirty(sb_dqinfo(dquot->dq_sb, dquot->dq_type))) {
|
||||
err2 = __ocfs2_global_write_info(dquot->dq_sb, dquot->dq_type);
|
||||
if (!err)
|
||||
err = err2;
|
||||
}
|
||||
out_qlock:
|
||||
if (ex)
|
||||
ocfs2_qinfo_unlock(info, 1);
|
||||
else
|
||||
ocfs2_qinfo_unlock(info, 0);
|
||||
out_trans:
|
||||
if (handle)
|
||||
ocfs2_commit_trans(osb, handle);
|
||||
out:
|
||||
if (err < 0)
|
||||
mlog_errno(err);
|
||||
return err;
|
||||
OCFS2_QUOTA_BLOCK_UPDATE_CREDITS + 1;
|
||||
}
|
||||
|
||||
/* Sync local information about quota modifications with global quota file.
|
||||
|
@ -636,14 +612,13 @@ static int ocfs2_sync_dquot_helper(struct dquot *dquot, unsigned long type)
|
|||
}
|
||||
mutex_lock(&sb_dqopt(sb)->dqio_mutex);
|
||||
status = ocfs2_sync_dquot(dquot);
|
||||
mutex_unlock(&sb_dqopt(sb)->dqio_mutex);
|
||||
if (status < 0)
|
||||
mlog_errno(status);
|
||||
/* We have to write local structure as well... */
|
||||
dquot_mark_dquot_dirty(dquot);
|
||||
status = dquot_commit(dquot);
|
||||
status = ocfs2_local_write_dquot(dquot);
|
||||
if (status < 0)
|
||||
mlog_errno(status);
|
||||
mutex_unlock(&sb_dqopt(sb)->dqio_mutex);
|
||||
ocfs2_commit_trans(osb, handle);
|
||||
out_ilock:
|
||||
ocfs2_unlock_global_qf(oinfo, 1);
|
||||
|
@ -682,7 +657,9 @@ static int ocfs2_write_dquot(struct dquot *dquot)
|
|||
mlog_errno(status);
|
||||
goto out;
|
||||
}
|
||||
status = dquot_commit(dquot);
|
||||
mutex_lock(&sb_dqopt(dquot->dq_sb)->dqio_mutex);
|
||||
status = ocfs2_local_write_dquot(dquot);
|
||||
mutex_unlock(&sb_dqopt(dquot->dq_sb)->dqio_mutex);
|
||||
ocfs2_commit_trans(osb, handle);
|
||||
out:
|
||||
mlog_exit(status);
|
||||
|
@ -713,6 +690,10 @@ static int ocfs2_release_dquot(struct dquot *dquot)
|
|||
|
||||
mlog_entry("id=%u, type=%d", dquot->dq_id, dquot->dq_type);
|
||||
|
||||
mutex_lock(&dquot->dq_lock);
|
||||
/* Check whether we are not racing with some other dqget() */
|
||||
if (atomic_read(&dquot->dq_count) > 1)
|
||||
goto out;
|
||||
status = ocfs2_lock_global_qf(oinfo, 1);
|
||||
if (status < 0)
|
||||
goto out;
|
||||
|
@ -723,30 +704,113 @@ static int ocfs2_release_dquot(struct dquot *dquot)
|
|||
mlog_errno(status);
|
||||
goto out_ilock;
|
||||
}
|
||||
status = dquot_release(dquot);
|
||||
|
||||
status = ocfs2_global_release_dquot(dquot);
|
||||
if (status < 0) {
|
||||
mlog_errno(status);
|
||||
goto out_trans;
|
||||
}
|
||||
status = ocfs2_local_release_dquot(handle, dquot);
|
||||
/*
|
||||
* If we fail here, we cannot do much as global structure is
|
||||
* already released. So just complain...
|
||||
*/
|
||||
if (status < 0)
|
||||
mlog_errno(status);
|
||||
clear_bit(DQ_ACTIVE_B, &dquot->dq_flags);
|
||||
out_trans:
|
||||
ocfs2_commit_trans(osb, handle);
|
||||
out_ilock:
|
||||
ocfs2_unlock_global_qf(oinfo, 1);
|
||||
out:
|
||||
mutex_unlock(&dquot->dq_lock);
|
||||
mlog_exit(status);
|
||||
return status;
|
||||
}
|
||||
|
||||
/*
|
||||
* Read global dquot structure from disk or create it if it does
|
||||
* not exist. Also update use count of the global structure and
|
||||
* create structure in node-local quota file.
|
||||
*/
|
||||
static int ocfs2_acquire_dquot(struct dquot *dquot)
|
||||
{
|
||||
struct ocfs2_mem_dqinfo *oinfo =
|
||||
sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv;
|
||||
int status = 0;
|
||||
int status = 0, err;
|
||||
int ex = 0;
|
||||
struct super_block *sb = dquot->dq_sb;
|
||||
struct ocfs2_super *osb = OCFS2_SB(sb);
|
||||
int type = dquot->dq_type;
|
||||
struct ocfs2_mem_dqinfo *info = sb_dqinfo(sb, type)->dqi_priv;
|
||||
struct inode *gqinode = info->dqi_gqinode;
|
||||
int need_alloc = ocfs2_global_qinit_alloc(sb, type);
|
||||
handle_t *handle;
|
||||
|
||||
mlog_entry("id=%u, type=%d", dquot->dq_id, dquot->dq_type);
|
||||
/* We need an exclusive lock, because we're going to update use count
|
||||
* and instantiate possibly new dquot structure */
|
||||
status = ocfs2_lock_global_qf(oinfo, 1);
|
||||
mlog_entry("id=%u, type=%d", dquot->dq_id, type);
|
||||
mutex_lock(&dquot->dq_lock);
|
||||
/*
|
||||
* We need an exclusive lock, because we're going to update use count
|
||||
* and instantiate possibly new dquot structure
|
||||
*/
|
||||
status = ocfs2_lock_global_qf(info, 1);
|
||||
if (status < 0)
|
||||
goto out;
|
||||
status = dquot_acquire(dquot);
|
||||
ocfs2_unlock_global_qf(oinfo, 1);
|
||||
if (!test_bit(DQ_READ_B, &dquot->dq_flags)) {
|
||||
status = ocfs2_qinfo_lock(info, 0);
|
||||
if (status < 0)
|
||||
goto out_dq;
|
||||
status = qtree_read_dquot(&info->dqi_gi, dquot);
|
||||
ocfs2_qinfo_unlock(info, 0);
|
||||
if (status < 0)
|
||||
goto out_dq;
|
||||
}
|
||||
set_bit(DQ_READ_B, &dquot->dq_flags);
|
||||
|
||||
OCFS2_DQUOT(dquot)->dq_use_count++;
|
||||
OCFS2_DQUOT(dquot)->dq_origspace = dquot->dq_dqb.dqb_curspace;
|
||||
OCFS2_DQUOT(dquot)->dq_originodes = dquot->dq_dqb.dqb_curinodes;
|
||||
if (!dquot->dq_off) { /* No real quota entry? */
|
||||
ex = 1;
|
||||
/*
|
||||
* Add blocks to quota file before we start a transaction since
|
||||
* locking allocators ranks above a transaction start
|
||||
*/
|
||||
WARN_ON(journal_current_handle());
|
||||
status = ocfs2_extend_no_holes(gqinode,
|
||||
gqinode->i_size + (need_alloc << sb->s_blocksize_bits),
|
||||
gqinode->i_size);
|
||||
if (status < 0)
|
||||
goto out_dq;
|
||||
}
|
||||
|
||||
handle = ocfs2_start_trans(osb,
|
||||
ocfs2_calc_global_qinit_credits(sb, type));
|
||||
if (IS_ERR(handle)) {
|
||||
status = PTR_ERR(handle);
|
||||
goto out_dq;
|
||||
}
|
||||
status = ocfs2_qinfo_lock(info, ex);
|
||||
if (status < 0)
|
||||
goto out_trans;
|
||||
status = qtree_write_dquot(&info->dqi_gi, dquot);
|
||||
if (ex && info_dirty(sb_dqinfo(sb, type))) {
|
||||
err = __ocfs2_global_write_info(sb, type);
|
||||
if (!status)
|
||||
status = err;
|
||||
}
|
||||
ocfs2_qinfo_unlock(info, ex);
|
||||
out_trans:
|
||||
ocfs2_commit_trans(osb, handle);
|
||||
out_dq:
|
||||
ocfs2_unlock_global_qf(info, 1);
|
||||
if (status < 0)
|
||||
goto out;
|
||||
|
||||
status = ocfs2_create_local_dquot(dquot);
|
||||
if (status < 0)
|
||||
goto out;
|
||||
set_bit(DQ_ACTIVE_B, &dquot->dq_flags);
|
||||
out:
|
||||
mutex_unlock(&dquot->dq_lock);
|
||||
mlog_exit(status);
|
||||
return status;
|
||||
}
|
||||
|
@ -768,7 +832,6 @@ static int ocfs2_mark_dquot_dirty(struct dquot *dquot)
|
|||
struct ocfs2_super *osb = OCFS2_SB(sb);
|
||||
|
||||
mlog_entry("id=%u, type=%d", dquot->dq_id, type);
|
||||
dquot_mark_dquot_dirty(dquot);
|
||||
|
||||
/* In case user set some limits, sync dquot immediately to global
|
||||
* quota file so that information propagates quicker */
|
||||
|
@ -791,14 +854,16 @@ static int ocfs2_mark_dquot_dirty(struct dquot *dquot)
|
|||
mlog_errno(status);
|
||||
goto out_ilock;
|
||||
}
|
||||
mutex_lock(&sb_dqopt(sb)->dqio_mutex);
|
||||
status = ocfs2_sync_dquot(dquot);
|
||||
if (status < 0) {
|
||||
mlog_errno(status);
|
||||
goto out_trans;
|
||||
goto out_dlock;
|
||||
}
|
||||
/* Now write updated local dquot structure */
|
||||
status = dquot_commit(dquot);
|
||||
out_trans:
|
||||
status = ocfs2_local_write_dquot(dquot);
|
||||
out_dlock:
|
||||
mutex_unlock(&sb_dqopt(sb)->dqio_mutex);
|
||||
ocfs2_commit_trans(osb, handle);
|
||||
out_ilock:
|
||||
ocfs2_unlock_global_qf(oinfo, 1);
|
||||
|
@ -850,7 +915,7 @@ static void ocfs2_destroy_dquot(struct dquot *dquot)
|
|||
}
|
||||
|
||||
const struct dquot_operations ocfs2_quota_operations = {
|
||||
.write_dquot = ocfs2_write_dquot,
|
||||
/* We never make dquot dirty so .write_dquot is never called */
|
||||
.acquire_dquot = ocfs2_acquire_dquot,
|
||||
.release_dquot = ocfs2_release_dquot,
|
||||
.mark_dirty = ocfs2_mark_dquot_dirty,
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
#include "dlmglue.h"
|
||||
#include "quota.h"
|
||||
#include "uptodate.h"
|
||||
#include "super.h"
|
||||
|
||||
/* Number of local quota structures per block */
|
||||
static inline unsigned int ol_quota_entries_per_block(struct super_block *sb)
|
||||
|
@ -129,6 +130,39 @@ static int ocfs2_modify_bh(struct inode *inode, struct buffer_head *bh,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Read quota block from a given logical offset.
|
||||
*
|
||||
* This function acquires ip_alloc_sem and thus it must not be called with a
|
||||
* transaction started.
|
||||
*/
|
||||
static int ocfs2_read_quota_block(struct inode *inode, u64 v_block,
|
||||
struct buffer_head **bh)
|
||||
{
|
||||
int rc = 0;
|
||||
struct buffer_head *tmp = *bh;
|
||||
|
||||
if (i_size_read(inode) >> inode->i_sb->s_blocksize_bits <= v_block) {
|
||||
ocfs2_error(inode->i_sb,
|
||||
"Quota file %llu is probably corrupted! Requested "
|
||||
"to read block %Lu but file has size only %Lu\n",
|
||||
(unsigned long long)OCFS2_I(inode)->ip_blkno,
|
||||
(unsigned long long)v_block,
|
||||
(unsigned long long)i_size_read(inode));
|
||||
return -EIO;
|
||||
}
|
||||
rc = ocfs2_read_virt_blocks(inode, v_block, 1, &tmp, 0,
|
||||
ocfs2_validate_quota_block);
|
||||
if (rc)
|
||||
mlog_errno(rc);
|
||||
|
||||
/* If ocfs2_read_virt_blocks() got us a new bh, pass it up. */
|
||||
if (!rc && !*bh)
|
||||
*bh = tmp;
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* Check whether we understand format of quota files */
|
||||
static int ocfs2_local_check_quota_file(struct super_block *sb, int type)
|
||||
{
|
||||
|
@ -671,7 +705,7 @@ static int ocfs2_local_read_info(struct super_block *sb, int type)
|
|||
INIT_LIST_HEAD(&oinfo->dqi_chunk);
|
||||
oinfo->dqi_rec = NULL;
|
||||
oinfo->dqi_lqi_bh = NULL;
|
||||
oinfo->dqi_ibh = NULL;
|
||||
oinfo->dqi_libh = NULL;
|
||||
|
||||
status = ocfs2_global_read_info(sb, type);
|
||||
if (status < 0)
|
||||
|
@ -697,7 +731,7 @@ static int ocfs2_local_read_info(struct super_block *sb, int type)
|
|||
info->dqi_flags = le32_to_cpu(ldinfo->dqi_flags);
|
||||
oinfo->dqi_chunks = le32_to_cpu(ldinfo->dqi_chunks);
|
||||
oinfo->dqi_blocks = le32_to_cpu(ldinfo->dqi_blocks);
|
||||
oinfo->dqi_ibh = bh;
|
||||
oinfo->dqi_libh = bh;
|
||||
|
||||
/* We crashed when using local quota file? */
|
||||
if (!(info->dqi_flags & OLQF_CLEAN)) {
|
||||
|
@ -759,7 +793,7 @@ static int ocfs2_local_write_info(struct super_block *sb, int type)
|
|||
{
|
||||
struct mem_dqinfo *info = sb_dqinfo(sb, type);
|
||||
struct buffer_head *bh = ((struct ocfs2_mem_dqinfo *)info->dqi_priv)
|
||||
->dqi_ibh;
|
||||
->dqi_libh;
|
||||
int status;
|
||||
|
||||
status = ocfs2_modify_bh(sb_dqopt(sb)->files[type], bh, olq_update_info,
|
||||
|
@ -782,10 +816,6 @@ static int ocfs2_local_free_info(struct super_block *sb, int type)
|
|||
int mark_clean = 1, len;
|
||||
int status;
|
||||
|
||||
/* At this point we know there are no more dquots and thus
|
||||
* even if there's some sync in the pdflush queue, it won't
|
||||
* find any dquots and return without doing anything */
|
||||
cancel_delayed_work_sync(&oinfo->dqi_sync_work);
|
||||
iput(oinfo->dqi_gqinode);
|
||||
ocfs2_simple_drop_lockres(OCFS2_SB(sb), &oinfo->dqi_gqlock);
|
||||
ocfs2_lock_res_free(&oinfo->dqi_gqlock);
|
||||
|
@ -820,7 +850,7 @@ static int ocfs2_local_free_info(struct super_block *sb, int type)
|
|||
/* Mark local file as clean */
|
||||
info->dqi_flags |= OLQF_CLEAN;
|
||||
status = ocfs2_modify_bh(sb_dqopt(sb)->files[type],
|
||||
oinfo->dqi_ibh,
|
||||
oinfo->dqi_libh,
|
||||
olq_update_info,
|
||||
info);
|
||||
if (status < 0) {
|
||||
|
@ -830,7 +860,7 @@ static int ocfs2_local_free_info(struct super_block *sb, int type)
|
|||
|
||||
out:
|
||||
ocfs2_inode_unlock(sb_dqopt(sb)->files[type], 1);
|
||||
brelse(oinfo->dqi_ibh);
|
||||
brelse(oinfo->dqi_libh);
|
||||
brelse(oinfo->dqi_lqi_bh);
|
||||
kfree(oinfo);
|
||||
return 0;
|
||||
|
@ -858,22 +888,21 @@ static void olq_set_dquot(struct buffer_head *bh, void *private)
|
|||
}
|
||||
|
||||
/* Write dquot to local quota file */
|
||||
static int ocfs2_local_write_dquot(struct dquot *dquot)
|
||||
int ocfs2_local_write_dquot(struct dquot *dquot)
|
||||
{
|
||||
struct super_block *sb = dquot->dq_sb;
|
||||
struct ocfs2_dquot *od = OCFS2_DQUOT(dquot);
|
||||
struct buffer_head *bh = NULL;
|
||||
struct buffer_head *bh;
|
||||
struct inode *lqinode = sb_dqopt(sb)->files[dquot->dq_type];
|
||||
int status;
|
||||
|
||||
status = ocfs2_read_quota_block(sb_dqopt(sb)->files[dquot->dq_type],
|
||||
ol_dqblk_file_block(sb, od->dq_local_off),
|
||||
&bh);
|
||||
status = ocfs2_read_quota_phys_block(lqinode, od->dq_local_phys_blk,
|
||||
&bh);
|
||||
if (status) {
|
||||
mlog_errno(status);
|
||||
goto out;
|
||||
}
|
||||
status = ocfs2_modify_bh(sb_dqopt(sb)->files[dquot->dq_type], bh,
|
||||
olq_set_dquot, od);
|
||||
status = ocfs2_modify_bh(lqinode, bh, olq_set_dquot, od);
|
||||
if (status < 0) {
|
||||
mlog_errno(status);
|
||||
goto out;
|
||||
|
@ -973,10 +1002,8 @@ static struct ocfs2_quota_chunk *ocfs2_local_quota_add_chunk(
|
|||
}
|
||||
|
||||
/* Initialize chunk header */
|
||||
down_read(&OCFS2_I(lqinode)->ip_alloc_sem);
|
||||
status = ocfs2_extent_map_get_blocks(lqinode, oinfo->dqi_blocks,
|
||||
&p_blkno, NULL, NULL);
|
||||
up_read(&OCFS2_I(lqinode)->ip_alloc_sem);
|
||||
if (status < 0) {
|
||||
mlog_errno(status);
|
||||
goto out_trans;
|
||||
|
@ -1004,10 +1031,8 @@ static struct ocfs2_quota_chunk *ocfs2_local_quota_add_chunk(
|
|||
ocfs2_journal_dirty(handle, bh);
|
||||
|
||||
/* Initialize new block with structures */
|
||||
down_read(&OCFS2_I(lqinode)->ip_alloc_sem);
|
||||
status = ocfs2_extent_map_get_blocks(lqinode, oinfo->dqi_blocks + 1,
|
||||
&p_blkno, NULL, NULL);
|
||||
up_read(&OCFS2_I(lqinode)->ip_alloc_sem);
|
||||
if (status < 0) {
|
||||
mlog_errno(status);
|
||||
goto out_trans;
|
||||
|
@ -1104,10 +1129,8 @@ static struct ocfs2_quota_chunk *ocfs2_extend_local_quota_file(
|
|||
}
|
||||
|
||||
/* Get buffer from the just added block */
|
||||
down_read(&OCFS2_I(lqinode)->ip_alloc_sem);
|
||||
status = ocfs2_extent_map_get_blocks(lqinode, oinfo->dqi_blocks,
|
||||
&p_blkno, NULL, NULL);
|
||||
up_read(&OCFS2_I(lqinode)->ip_alloc_sem);
|
||||
if (status < 0) {
|
||||
mlog_errno(status);
|
||||
goto out;
|
||||
|
@ -1188,7 +1211,7 @@ static void olq_alloc_dquot(struct buffer_head *bh, void *private)
|
|||
}
|
||||
|
||||
/* Create dquot in the local file for given id */
|
||||
static int ocfs2_create_local_dquot(struct dquot *dquot)
|
||||
int ocfs2_create_local_dquot(struct dquot *dquot)
|
||||
{
|
||||
struct super_block *sb = dquot->dq_sb;
|
||||
int type = dquot->dq_type;
|
||||
|
@ -1197,17 +1220,27 @@ static int ocfs2_create_local_dquot(struct dquot *dquot)
|
|||
struct ocfs2_dquot *od = OCFS2_DQUOT(dquot);
|
||||
int offset;
|
||||
int status;
|
||||
u64 pcount;
|
||||
|
||||
down_write(&OCFS2_I(lqinode)->ip_alloc_sem);
|
||||
chunk = ocfs2_find_free_entry(sb, type, &offset);
|
||||
if (!chunk) {
|
||||
chunk = ocfs2_extend_local_quota_file(sb, type, &offset);
|
||||
if (IS_ERR(chunk))
|
||||
return PTR_ERR(chunk);
|
||||
if (IS_ERR(chunk)) {
|
||||
status = PTR_ERR(chunk);
|
||||
goto out;
|
||||
}
|
||||
} else if (IS_ERR(chunk)) {
|
||||
return PTR_ERR(chunk);
|
||||
status = PTR_ERR(chunk);
|
||||
goto out;
|
||||
}
|
||||
od->dq_local_off = ol_dqblk_off(sb, chunk->qc_num, offset);
|
||||
od->dq_chunk = chunk;
|
||||
status = ocfs2_extent_map_get_blocks(lqinode,
|
||||
ol_dqblk_block(sb, chunk->qc_num, offset),
|
||||
&od->dq_local_phys_blk,
|
||||
&pcount,
|
||||
NULL);
|
||||
|
||||
/* Initialize dquot structure on disk */
|
||||
status = ocfs2_local_write_dquot(dquot);
|
||||
|
@ -1224,39 +1257,15 @@ static int ocfs2_create_local_dquot(struct dquot *dquot)
|
|||
goto out;
|
||||
}
|
||||
out:
|
||||
up_write(&OCFS2_I(lqinode)->ip_alloc_sem);
|
||||
return status;
|
||||
}
|
||||
|
||||
/* Create entry in local file for dquot, load data from the global file */
|
||||
static int ocfs2_local_read_dquot(struct dquot *dquot)
|
||||
{
|
||||
int status;
|
||||
|
||||
mlog_entry("id=%u, type=%d\n", dquot->dq_id, dquot->dq_type);
|
||||
|
||||
status = ocfs2_global_read_dquot(dquot);
|
||||
if (status < 0) {
|
||||
mlog_errno(status);
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
/* Now create entry in the local quota file */
|
||||
status = ocfs2_create_local_dquot(dquot);
|
||||
if (status < 0) {
|
||||
mlog_errno(status);
|
||||
goto out_err;
|
||||
}
|
||||
mlog_exit(0);
|
||||
return 0;
|
||||
out_err:
|
||||
mlog_exit(status);
|
||||
return status;
|
||||
}
|
||||
|
||||
/* Release dquot structure from local quota file. ocfs2_release_dquot() has
|
||||
* already started a transaction and obtained exclusive lock for global
|
||||
* quota file. */
|
||||
static int ocfs2_local_release_dquot(struct dquot *dquot)
|
||||
/*
|
||||
* Release dquot structure from local quota file. ocfs2_release_dquot() has
|
||||
* already started a transaction and written all changes to global quota file
|
||||
*/
|
||||
int ocfs2_local_release_dquot(handle_t *handle, struct dquot *dquot)
|
||||
{
|
||||
int status;
|
||||
int type = dquot->dq_type;
|
||||
|
@ -1264,15 +1273,6 @@ static int ocfs2_local_release_dquot(struct dquot *dquot)
|
|||
struct super_block *sb = dquot->dq_sb;
|
||||
struct ocfs2_local_disk_chunk *dchunk;
|
||||
int offset;
|
||||
handle_t *handle = journal_current_handle();
|
||||
|
||||
BUG_ON(!handle);
|
||||
/* First write all local changes to global file */
|
||||
status = ocfs2_global_release_dquot(dquot);
|
||||
if (status < 0) {
|
||||
mlog_errno(status);
|
||||
goto out;
|
||||
}
|
||||
|
||||
status = ocfs2_journal_access_dq(handle,
|
||||
INODE_CACHE(sb_dqopt(sb)->files[type]),
|
||||
|
@ -1305,9 +1305,6 @@ static const struct quota_format_ops ocfs2_format_ops = {
|
|||
.read_file_info = ocfs2_local_read_info,
|
||||
.write_file_info = ocfs2_global_write_info,
|
||||
.free_file_info = ocfs2_local_free_info,
|
||||
.read_dqblk = ocfs2_local_read_dquot,
|
||||
.commit_dqblk = ocfs2_local_write_dquot,
|
||||
.release_dqblk = ocfs2_local_release_dquot,
|
||||
};
|
||||
|
||||
struct quota_format_type ocfs2_quota_format = {
|
||||
|
|
|
@ -938,12 +938,16 @@ static void ocfs2_disable_quotas(struct ocfs2_super *osb)
|
|||
int type;
|
||||
struct inode *inode;
|
||||
struct super_block *sb = osb->sb;
|
||||
struct ocfs2_mem_dqinfo *oinfo;
|
||||
|
||||
/* We mostly ignore errors in this function because there's not much
|
||||
* we can do when we see them */
|
||||
for (type = 0; type < MAXQUOTAS; type++) {
|
||||
if (!sb_has_quota_loaded(sb, type))
|
||||
continue;
|
||||
/* Cancel periodic syncing before we grab dqonoff_mutex */
|
||||
oinfo = sb_dqinfo(sb, type)->dqi_priv;
|
||||
cancel_delayed_work_sync(&oinfo->dqi_sync_work);
|
||||
inode = igrab(sb->s_dquot.files[type]);
|
||||
/* Turn off quotas. This will remove all dquot structures from
|
||||
* memory and so they will be automatically synced to global
|
||||
|
|
275
fs/quota/dquot.c
275
fs/quota/dquot.c
|
@ -82,7 +82,7 @@
|
|||
|
||||
/*
|
||||
* There are three quota SMP locks. dq_list_lock protects all lists with quotas
|
||||
* and quota formats, dqstats structure containing statistics about the lists
|
||||
* and quota formats.
|
||||
* dq_data_lock protects data from dq_dqb and also mem_dqinfo structures and
|
||||
* also guards consistency of dquot->dq_dqb with inode->i_blocks, i_bytes.
|
||||
* i_blocks and i_bytes updates itself are guarded by i_lock acquired directly
|
||||
|
@ -132,7 +132,9 @@ static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_state_lock);
|
|||
__cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_data_lock);
|
||||
EXPORT_SYMBOL(dq_data_lock);
|
||||
|
||||
#if defined(CONFIG_QUOTA_DEBUG) || defined(CONFIG_PRINT_QUOTA_WARNING)
|
||||
static char *quotatypes[] = INITQFNAMES;
|
||||
#endif
|
||||
static struct quota_format_type *quota_formats; /* List of registered formats */
|
||||
static struct quota_module_name module_names[] = INIT_QUOTA_MODULE_NAMES;
|
||||
|
||||
|
@ -226,6 +228,10 @@ static struct hlist_head *dquot_hash;
|
|||
|
||||
struct dqstats dqstats;
|
||||
EXPORT_SYMBOL(dqstats);
|
||||
#ifdef CONFIG_SMP
|
||||
struct dqstats *dqstats_pcpu;
|
||||
EXPORT_SYMBOL(dqstats_pcpu);
|
||||
#endif
|
||||
|
||||
static qsize_t inode_get_rsv_space(struct inode *inode);
|
||||
static void __dquot_initialize(struct inode *inode, int type);
|
||||
|
@ -273,7 +279,7 @@ static struct dquot *find_dquot(unsigned int hashent, struct super_block *sb,
|
|||
static inline void put_dquot_last(struct dquot *dquot)
|
||||
{
|
||||
list_add_tail(&dquot->dq_free, &free_dquots);
|
||||
dqstats.free_dquots++;
|
||||
dqstats_inc(DQST_FREE_DQUOTS);
|
||||
}
|
||||
|
||||
static inline void remove_free_dquot(struct dquot *dquot)
|
||||
|
@ -281,7 +287,7 @@ static inline void remove_free_dquot(struct dquot *dquot)
|
|||
if (list_empty(&dquot->dq_free))
|
||||
return;
|
||||
list_del_init(&dquot->dq_free);
|
||||
dqstats.free_dquots--;
|
||||
dqstats_dec(DQST_FREE_DQUOTS);
|
||||
}
|
||||
|
||||
static inline void put_inuse(struct dquot *dquot)
|
||||
|
@ -289,12 +295,12 @@ static inline void put_inuse(struct dquot *dquot)
|
|||
/* We add to the back of inuse list so we don't have to restart
|
||||
* when traversing this list and we block */
|
||||
list_add_tail(&dquot->dq_inuse, &inuse_list);
|
||||
dqstats.allocated_dquots++;
|
||||
dqstats_inc(DQST_ALLOC_DQUOTS);
|
||||
}
|
||||
|
||||
static inline void remove_inuse(struct dquot *dquot)
|
||||
{
|
||||
dqstats.allocated_dquots--;
|
||||
dqstats_dec(DQST_ALLOC_DQUOTS);
|
||||
list_del(&dquot->dq_inuse);
|
||||
}
|
||||
/*
|
||||
|
@ -317,14 +323,23 @@ static inline int mark_dquot_dirty(struct dquot *dquot)
|
|||
return dquot->dq_sb->dq_op->mark_dirty(dquot);
|
||||
}
|
||||
|
||||
/* Mark dquot dirty in atomic manner, and return it's old dirty flag state */
|
||||
int dquot_mark_dquot_dirty(struct dquot *dquot)
|
||||
{
|
||||
int ret = 1;
|
||||
|
||||
/* If quota is dirty already, we don't have to acquire dq_list_lock */
|
||||
if (test_bit(DQ_MOD_B, &dquot->dq_flags))
|
||||
return 1;
|
||||
|
||||
spin_lock(&dq_list_lock);
|
||||
if (!test_and_set_bit(DQ_MOD_B, &dquot->dq_flags))
|
||||
if (!test_and_set_bit(DQ_MOD_B, &dquot->dq_flags)) {
|
||||
list_add(&dquot->dq_dirty, &sb_dqopt(dquot->dq_sb)->
|
||||
info[dquot->dq_type].dqi_dirty_list);
|
||||
ret = 0;
|
||||
}
|
||||
spin_unlock(&dq_list_lock);
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(dquot_mark_dquot_dirty);
|
||||
|
||||
|
@ -550,8 +565,8 @@ int dquot_scan_active(struct super_block *sb,
|
|||
continue;
|
||||
/* Now we have active dquot so we can just increase use count */
|
||||
atomic_inc(&dquot->dq_count);
|
||||
dqstats.lookups++;
|
||||
spin_unlock(&dq_list_lock);
|
||||
dqstats_inc(DQST_LOOKUPS);
|
||||
dqput(old_dquot);
|
||||
old_dquot = dquot;
|
||||
ret = fn(dquot, priv);
|
||||
|
@ -596,8 +611,8 @@ int vfs_quota_sync(struct super_block *sb, int type, int wait)
|
|||
* holding reference so we can safely just increase
|
||||
* use count */
|
||||
atomic_inc(&dquot->dq_count);
|
||||
dqstats.lookups++;
|
||||
spin_unlock(&dq_list_lock);
|
||||
dqstats_inc(DQST_LOOKUPS);
|
||||
sb->dq_op->write_dquot(dquot);
|
||||
dqput(dquot);
|
||||
spin_lock(&dq_list_lock);
|
||||
|
@ -609,9 +624,7 @@ int vfs_quota_sync(struct super_block *sb, int type, int wait)
|
|||
if ((cnt == type || type == -1) && sb_has_quota_active(sb, cnt)
|
||||
&& info_dirty(&dqopt->info[cnt]))
|
||||
sb->dq_op->write_info(sb, cnt);
|
||||
spin_lock(&dq_list_lock);
|
||||
dqstats.syncs++;
|
||||
spin_unlock(&dq_list_lock);
|
||||
dqstats_inc(DQST_SYNCS);
|
||||
mutex_unlock(&dqopt->dqonoff_mutex);
|
||||
|
||||
if (!wait || (sb_dqopt(sb)->flags & DQUOT_QUOTA_SYS_FILE))
|
||||
|
@ -663,6 +676,22 @@ static void prune_dqcache(int count)
|
|||
}
|
||||
}
|
||||
|
||||
static int dqstats_read(unsigned int type)
|
||||
{
|
||||
int count = 0;
|
||||
#ifdef CONFIG_SMP
|
||||
int cpu;
|
||||
for_each_possible_cpu(cpu)
|
||||
count += per_cpu_ptr(dqstats_pcpu, cpu)->stat[type];
|
||||
/* Statistics reading is racy, but absolute accuracy isn't required */
|
||||
if (count < 0)
|
||||
count = 0;
|
||||
#else
|
||||
count = dqstats.stat[type];
|
||||
#endif
|
||||
return count;
|
||||
}
|
||||
|
||||
/*
|
||||
* This is called from kswapd when we think we need some
|
||||
* more memory
|
||||
|
@ -675,7 +704,7 @@ static int shrink_dqcache_memory(int nr, gfp_t gfp_mask)
|
|||
prune_dqcache(nr);
|
||||
spin_unlock(&dq_list_lock);
|
||||
}
|
||||
return (dqstats.free_dquots / 100) * sysctl_vfs_cache_pressure;
|
||||
return (dqstats_read(DQST_FREE_DQUOTS)/100) * sysctl_vfs_cache_pressure;
|
||||
}
|
||||
|
||||
static struct shrinker dqcache_shrinker = {
|
||||
|
@ -703,10 +732,7 @@ void dqput(struct dquot *dquot)
|
|||
BUG();
|
||||
}
|
||||
#endif
|
||||
|
||||
spin_lock(&dq_list_lock);
|
||||
dqstats.drops++;
|
||||
spin_unlock(&dq_list_lock);
|
||||
dqstats_inc(DQST_DROPS);
|
||||
we_slept:
|
||||
spin_lock(&dq_list_lock);
|
||||
if (atomic_read(&dquot->dq_count) > 1) {
|
||||
|
@ -823,15 +849,15 @@ we_slept:
|
|||
put_inuse(dquot);
|
||||
/* hash it first so it can be found */
|
||||
insert_dquot_hash(dquot);
|
||||
dqstats.lookups++;
|
||||
spin_unlock(&dq_list_lock);
|
||||
dqstats_inc(DQST_LOOKUPS);
|
||||
} else {
|
||||
if (!atomic_read(&dquot->dq_count))
|
||||
remove_free_dquot(dquot);
|
||||
atomic_inc(&dquot->dq_count);
|
||||
dqstats.cache_hits++;
|
||||
dqstats.lookups++;
|
||||
spin_unlock(&dq_list_lock);
|
||||
dqstats_inc(DQST_CACHE_HITS);
|
||||
dqstats_inc(DQST_LOOKUPS);
|
||||
}
|
||||
/* Wait for dq_lock - after this we know that either dquot_release() is
|
||||
* already finished or it will be canceled due to dq_count > 1 test */
|
||||
|
@ -1677,16 +1703,19 @@ EXPORT_SYMBOL(dquot_free_inode);
|
|||
|
||||
/*
|
||||
* Transfer the number of inode and blocks from one diskquota to an other.
|
||||
* On success, dquot references in transfer_to are consumed and references
|
||||
* to original dquots that need to be released are placed there. On failure,
|
||||
* references are kept untouched.
|
||||
*
|
||||
* This operation can block, but only after everything is updated
|
||||
* A transaction must be started when entering this function.
|
||||
*
|
||||
*/
|
||||
static int __dquot_transfer(struct inode *inode, qid_t *chid, unsigned long mask)
|
||||
int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
|
||||
{
|
||||
qsize_t space, cur_space;
|
||||
qsize_t rsv_space = 0;
|
||||
struct dquot *transfer_from[MAXQUOTAS];
|
||||
struct dquot *transfer_to[MAXQUOTAS];
|
||||
struct dquot *transfer_from[MAXQUOTAS] = {};
|
||||
int cnt, ret = 0;
|
||||
char warntype_to[MAXQUOTAS];
|
||||
char warntype_from_inodes[MAXQUOTAS], warntype_from_space[MAXQUOTAS];
|
||||
|
@ -1696,19 +1725,12 @@ static int __dquot_transfer(struct inode *inode, qid_t *chid, unsigned long mask
|
|||
if (IS_NOQUOTA(inode))
|
||||
return 0;
|
||||
/* Initialize the arrays */
|
||||
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
|
||||
transfer_from[cnt] = NULL;
|
||||
transfer_to[cnt] = NULL;
|
||||
for (cnt = 0; cnt < MAXQUOTAS; cnt++)
|
||||
warntype_to[cnt] = QUOTA_NL_NOWARN;
|
||||
}
|
||||
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
|
||||
if (mask & (1 << cnt))
|
||||
transfer_to[cnt] = dqget(inode->i_sb, chid[cnt], cnt);
|
||||
}
|
||||
down_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
|
||||
if (IS_NOQUOTA(inode)) { /* File without quota accounting? */
|
||||
up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
|
||||
goto put_all;
|
||||
return 0;
|
||||
}
|
||||
spin_lock(&dq_data_lock);
|
||||
cur_space = inode_get_bytes(inode);
|
||||
|
@ -1760,47 +1782,41 @@ static int __dquot_transfer(struct inode *inode, qid_t *chid, unsigned long mask
|
|||
|
||||
mark_all_dquot_dirty(transfer_from);
|
||||
mark_all_dquot_dirty(transfer_to);
|
||||
/* The reference we got is transferred to the inode */
|
||||
/* Pass back references to put */
|
||||
for (cnt = 0; cnt < MAXQUOTAS; cnt++)
|
||||
transfer_to[cnt] = NULL;
|
||||
warn_put_all:
|
||||
transfer_to[cnt] = transfer_from[cnt];
|
||||
warn:
|
||||
flush_warnings(transfer_to, warntype_to);
|
||||
flush_warnings(transfer_from, warntype_from_inodes);
|
||||
flush_warnings(transfer_from, warntype_from_space);
|
||||
put_all:
|
||||
dqput_all(transfer_from);
|
||||
dqput_all(transfer_to);
|
||||
return ret;
|
||||
over_quota:
|
||||
spin_unlock(&dq_data_lock);
|
||||
up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
|
||||
/* Clear dquot pointers we don't want to dqput() */
|
||||
for (cnt = 0; cnt < MAXQUOTAS; cnt++)
|
||||
transfer_from[cnt] = NULL;
|
||||
goto warn_put_all;
|
||||
goto warn;
|
||||
}
|
||||
EXPORT_SYMBOL(__dquot_transfer);
|
||||
|
||||
/* Wrapper for transferring ownership of an inode for uid/gid only
|
||||
* Called from FSXXX_setattr()
|
||||
*/
|
||||
int dquot_transfer(struct inode *inode, struct iattr *iattr)
|
||||
{
|
||||
qid_t chid[MAXQUOTAS];
|
||||
unsigned long mask = 0;
|
||||
struct dquot *transfer_to[MAXQUOTAS] = {};
|
||||
struct super_block *sb = inode->i_sb;
|
||||
int ret;
|
||||
|
||||
if (iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid) {
|
||||
mask |= 1 << USRQUOTA;
|
||||
chid[USRQUOTA] = iattr->ia_uid;
|
||||
}
|
||||
if (iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid) {
|
||||
mask |= 1 << GRPQUOTA;
|
||||
chid[GRPQUOTA] = iattr->ia_gid;
|
||||
}
|
||||
if (sb_any_quota_active(inode->i_sb) && !IS_NOQUOTA(inode)) {
|
||||
dquot_initialize(inode);
|
||||
return __dquot_transfer(inode, chid, mask);
|
||||
}
|
||||
return 0;
|
||||
if (!sb_any_quota_active(sb) || IS_NOQUOTA(inode))
|
||||
return 0;
|
||||
|
||||
if (iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid)
|
||||
transfer_to[USRQUOTA] = dqget(sb, iattr->ia_uid, USRQUOTA);
|
||||
if (iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid)
|
||||
transfer_to[GRPQUOTA] = dqget(sb, iattr->ia_uid, GRPQUOTA);
|
||||
|
||||
ret = __dquot_transfer(inode, transfer_to);
|
||||
dqput_all(transfer_to);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(dquot_transfer);
|
||||
|
||||
|
@ -2275,25 +2291,30 @@ static inline qsize_t stoqb(qsize_t space)
|
|||
}
|
||||
|
||||
/* Generic routine for getting common part of quota structure */
|
||||
static void do_get_dqblk(struct dquot *dquot, struct if_dqblk *di)
|
||||
static void do_get_dqblk(struct dquot *dquot, struct fs_disk_quota *di)
|
||||
{
|
||||
struct mem_dqblk *dm = &dquot->dq_dqb;
|
||||
|
||||
memset(di, 0, sizeof(*di));
|
||||
di->d_version = FS_DQUOT_VERSION;
|
||||
di->d_flags = dquot->dq_type == USRQUOTA ?
|
||||
XFS_USER_QUOTA : XFS_GROUP_QUOTA;
|
||||
di->d_id = dquot->dq_id;
|
||||
|
||||
spin_lock(&dq_data_lock);
|
||||
di->dqb_bhardlimit = stoqb(dm->dqb_bhardlimit);
|
||||
di->dqb_bsoftlimit = stoqb(dm->dqb_bsoftlimit);
|
||||
di->dqb_curspace = dm->dqb_curspace + dm->dqb_rsvspace;
|
||||
di->dqb_ihardlimit = dm->dqb_ihardlimit;
|
||||
di->dqb_isoftlimit = dm->dqb_isoftlimit;
|
||||
di->dqb_curinodes = dm->dqb_curinodes;
|
||||
di->dqb_btime = dm->dqb_btime;
|
||||
di->dqb_itime = dm->dqb_itime;
|
||||
di->dqb_valid = QIF_ALL;
|
||||
di->d_blk_hardlimit = stoqb(dm->dqb_bhardlimit);
|
||||
di->d_blk_softlimit = stoqb(dm->dqb_bsoftlimit);
|
||||
di->d_ino_hardlimit = dm->dqb_ihardlimit;
|
||||
di->d_ino_softlimit = dm->dqb_isoftlimit;
|
||||
di->d_bcount = dm->dqb_curspace + dm->dqb_rsvspace;
|
||||
di->d_icount = dm->dqb_curinodes;
|
||||
di->d_btimer = dm->dqb_btime;
|
||||
di->d_itimer = dm->dqb_itime;
|
||||
spin_unlock(&dq_data_lock);
|
||||
}
|
||||
|
||||
int vfs_get_dqblk(struct super_block *sb, int type, qid_t id,
|
||||
struct if_dqblk *di)
|
||||
struct fs_disk_quota *di)
|
||||
{
|
||||
struct dquot *dquot;
|
||||
|
||||
|
@ -2307,51 +2328,70 @@ int vfs_get_dqblk(struct super_block *sb, int type, qid_t id,
|
|||
}
|
||||
EXPORT_SYMBOL(vfs_get_dqblk);
|
||||
|
||||
#define VFS_FS_DQ_MASK \
|
||||
(FS_DQ_BCOUNT | FS_DQ_BSOFT | FS_DQ_BHARD | \
|
||||
FS_DQ_ICOUNT | FS_DQ_ISOFT | FS_DQ_IHARD | \
|
||||
FS_DQ_BTIMER | FS_DQ_ITIMER)
|
||||
|
||||
/* Generic routine for setting common part of quota structure */
|
||||
static int do_set_dqblk(struct dquot *dquot, struct if_dqblk *di)
|
||||
static int do_set_dqblk(struct dquot *dquot, struct fs_disk_quota *di)
|
||||
{
|
||||
struct mem_dqblk *dm = &dquot->dq_dqb;
|
||||
int check_blim = 0, check_ilim = 0;
|
||||
struct mem_dqinfo *dqi = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_type];
|
||||
|
||||
if ((di->dqb_valid & QIF_BLIMITS &&
|
||||
(di->dqb_bhardlimit > dqi->dqi_maxblimit ||
|
||||
di->dqb_bsoftlimit > dqi->dqi_maxblimit)) ||
|
||||
(di->dqb_valid & QIF_ILIMITS &&
|
||||
(di->dqb_ihardlimit > dqi->dqi_maxilimit ||
|
||||
di->dqb_isoftlimit > dqi->dqi_maxilimit)))
|
||||
if (di->d_fieldmask & ~VFS_FS_DQ_MASK)
|
||||
return -EINVAL;
|
||||
|
||||
if (((di->d_fieldmask & FS_DQ_BSOFT) &&
|
||||
(di->d_blk_softlimit > dqi->dqi_maxblimit)) ||
|
||||
((di->d_fieldmask & FS_DQ_BHARD) &&
|
||||
(di->d_blk_hardlimit > dqi->dqi_maxblimit)) ||
|
||||
((di->d_fieldmask & FS_DQ_ISOFT) &&
|
||||
(di->d_ino_softlimit > dqi->dqi_maxilimit)) ||
|
||||
((di->d_fieldmask & FS_DQ_IHARD) &&
|
||||
(di->d_ino_hardlimit > dqi->dqi_maxilimit)))
|
||||
return -ERANGE;
|
||||
|
||||
spin_lock(&dq_data_lock);
|
||||
if (di->dqb_valid & QIF_SPACE) {
|
||||
dm->dqb_curspace = di->dqb_curspace - dm->dqb_rsvspace;
|
||||
if (di->d_fieldmask & FS_DQ_BCOUNT) {
|
||||
dm->dqb_curspace = di->d_bcount - dm->dqb_rsvspace;
|
||||
check_blim = 1;
|
||||
set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags);
|
||||
}
|
||||
if (di->dqb_valid & QIF_BLIMITS) {
|
||||
dm->dqb_bsoftlimit = qbtos(di->dqb_bsoftlimit);
|
||||
dm->dqb_bhardlimit = qbtos(di->dqb_bhardlimit);
|
||||
|
||||
if (di->d_fieldmask & FS_DQ_BSOFT)
|
||||
dm->dqb_bsoftlimit = qbtos(di->d_blk_softlimit);
|
||||
if (di->d_fieldmask & FS_DQ_BHARD)
|
||||
dm->dqb_bhardlimit = qbtos(di->d_blk_hardlimit);
|
||||
if (di->d_fieldmask & (FS_DQ_BSOFT | FS_DQ_BHARD)) {
|
||||
check_blim = 1;
|
||||
set_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags);
|
||||
}
|
||||
if (di->dqb_valid & QIF_INODES) {
|
||||
dm->dqb_curinodes = di->dqb_curinodes;
|
||||
|
||||
if (di->d_fieldmask & FS_DQ_ICOUNT) {
|
||||
dm->dqb_curinodes = di->d_icount;
|
||||
check_ilim = 1;
|
||||
set_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags);
|
||||
}
|
||||
if (di->dqb_valid & QIF_ILIMITS) {
|
||||
dm->dqb_isoftlimit = di->dqb_isoftlimit;
|
||||
dm->dqb_ihardlimit = di->dqb_ihardlimit;
|
||||
|
||||
if (di->d_fieldmask & FS_DQ_ISOFT)
|
||||
dm->dqb_isoftlimit = di->d_ino_softlimit;
|
||||
if (di->d_fieldmask & FS_DQ_IHARD)
|
||||
dm->dqb_ihardlimit = di->d_ino_hardlimit;
|
||||
if (di->d_fieldmask & (FS_DQ_ISOFT | FS_DQ_IHARD)) {
|
||||
check_ilim = 1;
|
||||
set_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags);
|
||||
}
|
||||
if (di->dqb_valid & QIF_BTIME) {
|
||||
dm->dqb_btime = di->dqb_btime;
|
||||
|
||||
if (di->d_fieldmask & FS_DQ_BTIMER) {
|
||||
dm->dqb_btime = di->d_btimer;
|
||||
check_blim = 1;
|
||||
set_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags);
|
||||
}
|
||||
if (di->dqb_valid & QIF_ITIME) {
|
||||
dm->dqb_itime = di->dqb_itime;
|
||||
|
||||
if (di->d_fieldmask & FS_DQ_ITIMER) {
|
||||
dm->dqb_itime = di->d_itimer;
|
||||
check_ilim = 1;
|
||||
set_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags);
|
||||
}
|
||||
|
@ -2361,7 +2401,7 @@ static int do_set_dqblk(struct dquot *dquot, struct if_dqblk *di)
|
|||
dm->dqb_curspace < dm->dqb_bsoftlimit) {
|
||||
dm->dqb_btime = 0;
|
||||
clear_bit(DQ_BLKS_B, &dquot->dq_flags);
|
||||
} else if (!(di->dqb_valid & QIF_BTIME))
|
||||
} else if (!(di->d_fieldmask & FS_DQ_BTIMER))
|
||||
/* Set grace only if user hasn't provided his own... */
|
||||
dm->dqb_btime = get_seconds() + dqi->dqi_bgrace;
|
||||
}
|
||||
|
@ -2370,7 +2410,7 @@ static int do_set_dqblk(struct dquot *dquot, struct if_dqblk *di)
|
|||
dm->dqb_curinodes < dm->dqb_isoftlimit) {
|
||||
dm->dqb_itime = 0;
|
||||
clear_bit(DQ_INODES_B, &dquot->dq_flags);
|
||||
} else if (!(di->dqb_valid & QIF_ITIME))
|
||||
} else if (!(di->d_fieldmask & FS_DQ_ITIMER))
|
||||
/* Set grace only if user hasn't provided his own... */
|
||||
dm->dqb_itime = get_seconds() + dqi->dqi_igrace;
|
||||
}
|
||||
|
@ -2386,7 +2426,7 @@ static int do_set_dqblk(struct dquot *dquot, struct if_dqblk *di)
|
|||
}
|
||||
|
||||
int vfs_set_dqblk(struct super_block *sb, int type, qid_t id,
|
||||
struct if_dqblk *di)
|
||||
struct fs_disk_quota *di)
|
||||
{
|
||||
struct dquot *dquot;
|
||||
int rc;
|
||||
|
@ -2465,62 +2505,74 @@ const struct quotactl_ops vfs_quotactl_ops = {
|
|||
.set_dqblk = vfs_set_dqblk
|
||||
};
|
||||
|
||||
|
||||
static int do_proc_dqstats(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
/* Update global table */
|
||||
unsigned int type = (int *)table->data - dqstats.stat;
|
||||
dqstats.stat[type] = dqstats_read(type);
|
||||
#endif
|
||||
return proc_dointvec(table, write, buffer, lenp, ppos);
|
||||
}
|
||||
|
||||
static ctl_table fs_dqstats_table[] = {
|
||||
{
|
||||
.procname = "lookups",
|
||||
.data = &dqstats.lookups,
|
||||
.data = &dqstats.stat[DQST_LOOKUPS],
|
||||
.maxlen = sizeof(int),
|
||||
.mode = 0444,
|
||||
.proc_handler = proc_dointvec,
|
||||
.proc_handler = do_proc_dqstats,
|
||||
},
|
||||
{
|
||||
.procname = "drops",
|
||||
.data = &dqstats.drops,
|
||||
.data = &dqstats.stat[DQST_DROPS],
|
||||
.maxlen = sizeof(int),
|
||||
.mode = 0444,
|
||||
.proc_handler = proc_dointvec,
|
||||
.proc_handler = do_proc_dqstats,
|
||||
},
|
||||
{
|
||||
.procname = "reads",
|
||||
.data = &dqstats.reads,
|
||||
.data = &dqstats.stat[DQST_READS],
|
||||
.maxlen = sizeof(int),
|
||||
.mode = 0444,
|
||||
.proc_handler = proc_dointvec,
|
||||
.proc_handler = do_proc_dqstats,
|
||||
},
|
||||
{
|
||||
.procname = "writes",
|
||||
.data = &dqstats.writes,
|
||||
.data = &dqstats.stat[DQST_WRITES],
|
||||
.maxlen = sizeof(int),
|
||||
.mode = 0444,
|
||||
.proc_handler = proc_dointvec,
|
||||
.proc_handler = do_proc_dqstats,
|
||||
},
|
||||
{
|
||||
.procname = "cache_hits",
|
||||
.data = &dqstats.cache_hits,
|
||||
.data = &dqstats.stat[DQST_CACHE_HITS],
|
||||
.maxlen = sizeof(int),
|
||||
.mode = 0444,
|
||||
.proc_handler = proc_dointvec,
|
||||
.proc_handler = do_proc_dqstats,
|
||||
},
|
||||
{
|
||||
.procname = "allocated_dquots",
|
||||
.data = &dqstats.allocated_dquots,
|
||||
.data = &dqstats.stat[DQST_ALLOC_DQUOTS],
|
||||
.maxlen = sizeof(int),
|
||||
.mode = 0444,
|
||||
.proc_handler = proc_dointvec,
|
||||
.proc_handler = do_proc_dqstats,
|
||||
},
|
||||
{
|
||||
.procname = "free_dquots",
|
||||
.data = &dqstats.free_dquots,
|
||||
.data = &dqstats.stat[DQST_FREE_DQUOTS],
|
||||
.maxlen = sizeof(int),
|
||||
.mode = 0444,
|
||||
.proc_handler = proc_dointvec,
|
||||
.proc_handler = do_proc_dqstats,
|
||||
},
|
||||
{
|
||||
.procname = "syncs",
|
||||
.data = &dqstats.syncs,
|
||||
.data = &dqstats.stat[DQST_SYNCS],
|
||||
.maxlen = sizeof(int),
|
||||
.mode = 0444,
|
||||
.proc_handler = proc_dointvec,
|
||||
.proc_handler = do_proc_dqstats,
|
||||
},
|
||||
#ifdef CONFIG_PRINT_QUOTA_WARNING
|
||||
{
|
||||
|
@ -2572,6 +2624,13 @@ static int __init dquot_init(void)
|
|||
if (!dquot_hash)
|
||||
panic("Cannot create dquot hash table");
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
dqstats_pcpu = alloc_percpu(struct dqstats);
|
||||
if (!dqstats_pcpu)
|
||||
panic("Cannot create dquot stats table");
|
||||
#endif
|
||||
memset(&dqstats, 0, sizeof(struct dqstats));
|
||||
|
||||
/* Find power-of-two hlist_heads which can fit into allocation */
|
||||
nr_hash = (1UL << order) * PAGE_SIZE / sizeof(struct hlist_head);
|
||||
dq_hash_bits = 0;
|
||||
|
|
|
@ -113,8 +113,6 @@ static int quota_getinfo(struct super_block *sb, int type, void __user *addr)
|
|||
struct if_dqinfo info;
|
||||
int ret;
|
||||
|
||||
if (!sb_has_quota_active(sb, type))
|
||||
return -ESRCH;
|
||||
if (!sb->s_qcop->get_info)
|
||||
return -ENOSYS;
|
||||
ret = sb->s_qcop->get_info(sb, type, &info);
|
||||
|
@ -129,43 +127,80 @@ static int quota_setinfo(struct super_block *sb, int type, void __user *addr)
|
|||
|
||||
if (copy_from_user(&info, addr, sizeof(info)))
|
||||
return -EFAULT;
|
||||
if (!sb_has_quota_active(sb, type))
|
||||
return -ESRCH;
|
||||
if (!sb->s_qcop->set_info)
|
||||
return -ENOSYS;
|
||||
return sb->s_qcop->set_info(sb, type, &info);
|
||||
}
|
||||
|
||||
static void copy_to_if_dqblk(struct if_dqblk *dst, struct fs_disk_quota *src)
|
||||
{
|
||||
dst->dqb_bhardlimit = src->d_blk_hardlimit;
|
||||
dst->dqb_bsoftlimit = src->d_blk_softlimit;
|
||||
dst->dqb_curspace = src->d_bcount;
|
||||
dst->dqb_ihardlimit = src->d_ino_hardlimit;
|
||||
dst->dqb_isoftlimit = src->d_ino_softlimit;
|
||||
dst->dqb_curinodes = src->d_icount;
|
||||
dst->dqb_btime = src->d_btimer;
|
||||
dst->dqb_itime = src->d_itimer;
|
||||
dst->dqb_valid = QIF_ALL;
|
||||
}
|
||||
|
||||
static int quota_getquota(struct super_block *sb, int type, qid_t id,
|
||||
void __user *addr)
|
||||
{
|
||||
struct fs_disk_quota fdq;
|
||||
struct if_dqblk idq;
|
||||
int ret;
|
||||
|
||||
if (!sb_has_quota_active(sb, type))
|
||||
return -ESRCH;
|
||||
if (!sb->s_qcop->get_dqblk)
|
||||
return -ENOSYS;
|
||||
ret = sb->s_qcop->get_dqblk(sb, type, id, &idq);
|
||||
ret = sb->s_qcop->get_dqblk(sb, type, id, &fdq);
|
||||
if (ret)
|
||||
return ret;
|
||||
copy_to_if_dqblk(&idq, &fdq);
|
||||
if (copy_to_user(addr, &idq, sizeof(idq)))
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void copy_from_if_dqblk(struct fs_disk_quota *dst, struct if_dqblk *src)
|
||||
{
|
||||
dst->d_blk_hardlimit = src->dqb_bhardlimit;
|
||||
dst->d_blk_softlimit = src->dqb_bsoftlimit;
|
||||
dst->d_bcount = src->dqb_curspace;
|
||||
dst->d_ino_hardlimit = src->dqb_ihardlimit;
|
||||
dst->d_ino_softlimit = src->dqb_isoftlimit;
|
||||
dst->d_icount = src->dqb_curinodes;
|
||||
dst->d_btimer = src->dqb_btime;
|
||||
dst->d_itimer = src->dqb_itime;
|
||||
|
||||
dst->d_fieldmask = 0;
|
||||
if (src->dqb_valid & QIF_BLIMITS)
|
||||
dst->d_fieldmask |= FS_DQ_BSOFT | FS_DQ_BHARD;
|
||||
if (src->dqb_valid & QIF_SPACE)
|
||||
dst->d_fieldmask |= FS_DQ_BCOUNT;
|
||||
if (src->dqb_valid & QIF_ILIMITS)
|
||||
dst->d_fieldmask |= FS_DQ_ISOFT | FS_DQ_IHARD;
|
||||
if (src->dqb_valid & QIF_INODES)
|
||||
dst->d_fieldmask |= FS_DQ_ICOUNT;
|
||||
if (src->dqb_valid & QIF_BTIME)
|
||||
dst->d_fieldmask |= FS_DQ_BTIMER;
|
||||
if (src->dqb_valid & QIF_ITIME)
|
||||
dst->d_fieldmask |= FS_DQ_ITIMER;
|
||||
}
|
||||
|
||||
static int quota_setquota(struct super_block *sb, int type, qid_t id,
|
||||
void __user *addr)
|
||||
{
|
||||
struct fs_disk_quota fdq;
|
||||
struct if_dqblk idq;
|
||||
|
||||
if (copy_from_user(&idq, addr, sizeof(idq)))
|
||||
return -EFAULT;
|
||||
if (!sb_has_quota_active(sb, type))
|
||||
return -ESRCH;
|
||||
if (!sb->s_qcop->set_dqblk)
|
||||
return -ENOSYS;
|
||||
return sb->s_qcop->set_dqblk(sb, type, id, &idq);
|
||||
copy_from_if_dqblk(&fdq, &idq);
|
||||
return sb->s_qcop->set_dqblk(sb, type, id, &fdq);
|
||||
}
|
||||
|
||||
static int quota_setxstate(struct super_block *sb, int cmd, void __user *addr)
|
||||
|
@ -199,9 +234,9 @@ static int quota_setxquota(struct super_block *sb, int type, qid_t id,
|
|||
|
||||
if (copy_from_user(&fdq, addr, sizeof(fdq)))
|
||||
return -EFAULT;
|
||||
if (!sb->s_qcop->set_xquota)
|
||||
if (!sb->s_qcop->set_dqblk)
|
||||
return -ENOSYS;
|
||||
return sb->s_qcop->set_xquota(sb, type, id, &fdq);
|
||||
return sb->s_qcop->set_dqblk(sb, type, id, &fdq);
|
||||
}
|
||||
|
||||
static int quota_getxquota(struct super_block *sb, int type, qid_t id,
|
||||
|
@ -210,9 +245,9 @@ static int quota_getxquota(struct super_block *sb, int type, qid_t id,
|
|||
struct fs_disk_quota fdq;
|
||||
int ret;
|
||||
|
||||
if (!sb->s_qcop->get_xquota)
|
||||
if (!sb->s_qcop->get_dqblk)
|
||||
return -ENOSYS;
|
||||
ret = sb->s_qcop->get_xquota(sb, type, id, &fdq);
|
||||
ret = sb->s_qcop->get_dqblk(sb, type, id, &fdq);
|
||||
if (!ret && copy_to_user(addr, &fdq, sizeof(fdq)))
|
||||
return -EFAULT;
|
||||
return ret;
|
||||
|
|
|
@ -60,9 +60,17 @@ static ssize_t read_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf)
|
|||
static ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf)
|
||||
{
|
||||
struct super_block *sb = info->dqi_sb;
|
||||
ssize_t ret;
|
||||
|
||||
return sb->s_op->quota_write(sb, info->dqi_type, buf,
|
||||
ret = sb->s_op->quota_write(sb, info->dqi_type, buf,
|
||||
info->dqi_usable_bs, blk << info->dqi_blocksize_bits);
|
||||
if (ret != info->dqi_usable_bs) {
|
||||
q_warn(KERN_WARNING "VFS: dquota write failed on "
|
||||
"dev %s\n", sb->s_id);
|
||||
if (ret >= 0)
|
||||
ret = -EIO;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Remove empty block from list and return it */
|
||||
|
@ -152,7 +160,7 @@ static int remove_free_dqentry(struct qtree_mem_dqinfo *info, char *buf,
|
|||
dh->dqdh_next_free = dh->dqdh_prev_free = cpu_to_le32(0);
|
||||
/* No matter whether write succeeds block is out of list */
|
||||
if (write_blk(info, blk, buf) < 0)
|
||||
printk(KERN_ERR
|
||||
q_warn(KERN_ERR
|
||||
"VFS: Can't write block (%u) with free entries.\n",
|
||||
blk);
|
||||
return 0;
|
||||
|
@ -244,7 +252,7 @@ static uint find_free_dqentry(struct qtree_mem_dqinfo *info,
|
|||
if (le16_to_cpu(dh->dqdh_entries) + 1 >= qtree_dqstr_in_blk(info)) {
|
||||
*err = remove_free_dqentry(info, buf, blk);
|
||||
if (*err < 0) {
|
||||
printk(KERN_ERR "VFS: find_free_dqentry(): Can't "
|
||||
q_warn(KERN_ERR "VFS: find_free_dqentry(): Can't "
|
||||
"remove block (%u) from entry free list.\n",
|
||||
blk);
|
||||
goto out_buf;
|
||||
|
@ -268,7 +276,7 @@ static uint find_free_dqentry(struct qtree_mem_dqinfo *info,
|
|||
#endif
|
||||
*err = write_blk(info, blk, buf);
|
||||
if (*err < 0) {
|
||||
printk(KERN_ERR "VFS: find_free_dqentry(): Can't write quota "
|
||||
q_warn(KERN_ERR "VFS: find_free_dqentry(): Can't write quota "
|
||||
"data block %u.\n", blk);
|
||||
goto out_buf;
|
||||
}
|
||||
|
@ -303,7 +311,7 @@ static int do_insert_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
|
|||
} else {
|
||||
ret = read_blk(info, *treeblk, buf);
|
||||
if (ret < 0) {
|
||||
printk(KERN_ERR "VFS: Can't read tree quota block "
|
||||
q_warn(KERN_ERR "VFS: Can't read tree quota block "
|
||||
"%u.\n", *treeblk);
|
||||
goto out_buf;
|
||||
}
|
||||
|
@ -365,7 +373,7 @@ int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
|
|||
if (!dquot->dq_off) {
|
||||
ret = dq_insert_tree(info, dquot);
|
||||
if (ret < 0) {
|
||||
printk(KERN_ERR "VFS: Error %zd occurred while "
|
||||
q_warn(KERN_ERR "VFS: Error %zd occurred while "
|
||||
"creating quota.\n", ret);
|
||||
kfree(ddquot);
|
||||
return ret;
|
||||
|
@ -377,14 +385,14 @@ int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
|
|||
ret = sb->s_op->quota_write(sb, type, ddquot, info->dqi_entry_size,
|
||||
dquot->dq_off);
|
||||
if (ret != info->dqi_entry_size) {
|
||||
printk(KERN_WARNING "VFS: dquota write failed on dev %s\n",
|
||||
q_warn(KERN_WARNING "VFS: dquota write failed on dev %s\n",
|
||||
sb->s_id);
|
||||
if (ret >= 0)
|
||||
ret = -ENOSPC;
|
||||
} else {
|
||||
ret = 0;
|
||||
}
|
||||
dqstats.writes++;
|
||||
dqstats_inc(DQST_WRITES);
|
||||
kfree(ddquot);
|
||||
|
||||
return ret;
|
||||
|
@ -402,14 +410,14 @@ static int free_dqentry(struct qtree_mem_dqinfo *info, struct dquot *dquot,
|
|||
if (!buf)
|
||||
return -ENOMEM;
|
||||
if (dquot->dq_off >> info->dqi_blocksize_bits != blk) {
|
||||
printk(KERN_ERR "VFS: Quota structure has offset to other "
|
||||
q_warn(KERN_ERR "VFS: Quota structure has offset to other "
|
||||
"block (%u) than it should (%u).\n", blk,
|
||||
(uint)(dquot->dq_off >> info->dqi_blocksize_bits));
|
||||
goto out_buf;
|
||||
}
|
||||
ret = read_blk(info, blk, buf);
|
||||
if (ret < 0) {
|
||||
printk(KERN_ERR "VFS: Can't read quota data block %u\n", blk);
|
||||
q_warn(KERN_ERR "VFS: Can't read quota data block %u\n", blk);
|
||||
goto out_buf;
|
||||
}
|
||||
dh = (struct qt_disk_dqdbheader *)buf;
|
||||
|
@ -419,7 +427,7 @@ static int free_dqentry(struct qtree_mem_dqinfo *info, struct dquot *dquot,
|
|||
if (ret >= 0)
|
||||
ret = put_free_dqblk(info, buf, blk);
|
||||
if (ret < 0) {
|
||||
printk(KERN_ERR "VFS: Can't move quota data block (%u) "
|
||||
q_warn(KERN_ERR "VFS: Can't move quota data block (%u) "
|
||||
"to free list.\n", blk);
|
||||
goto out_buf;
|
||||
}
|
||||
|
@ -432,14 +440,14 @@ static int free_dqentry(struct qtree_mem_dqinfo *info, struct dquot *dquot,
|
|||
/* Insert will write block itself */
|
||||
ret = insert_free_dqentry(info, buf, blk);
|
||||
if (ret < 0) {
|
||||
printk(KERN_ERR "VFS: Can't insert quota data "
|
||||
q_warn(KERN_ERR "VFS: Can't insert quota data "
|
||||
"block (%u) to free entry list.\n", blk);
|
||||
goto out_buf;
|
||||
}
|
||||
} else {
|
||||
ret = write_blk(info, blk, buf);
|
||||
if (ret < 0) {
|
||||
printk(KERN_ERR "VFS: Can't write quota data "
|
||||
q_warn(KERN_ERR "VFS: Can't write quota data "
|
||||
"block %u\n", blk);
|
||||
goto out_buf;
|
||||
}
|
||||
|
@ -464,7 +472,7 @@ static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
|
|||
return -ENOMEM;
|
||||
ret = read_blk(info, *blk, buf);
|
||||
if (ret < 0) {
|
||||
printk(KERN_ERR "VFS: Can't read quota data block %u\n", *blk);
|
||||
q_warn(KERN_ERR "VFS: Can't read quota data block %u\n", *blk);
|
||||
goto out_buf;
|
||||
}
|
||||
newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]);
|
||||
|
@ -488,7 +496,7 @@ static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
|
|||
} else {
|
||||
ret = write_blk(info, *blk, buf);
|
||||
if (ret < 0)
|
||||
printk(KERN_ERR "VFS: Can't write quota tree "
|
||||
q_warn(KERN_ERR "VFS: Can't write quota tree "
|
||||
"block %u.\n", *blk);
|
||||
}
|
||||
}
|
||||
|
@ -521,7 +529,7 @@ static loff_t find_block_dqentry(struct qtree_mem_dqinfo *info,
|
|||
return -ENOMEM;
|
||||
ret = read_blk(info, blk, buf);
|
||||
if (ret < 0) {
|
||||
printk(KERN_ERR "VFS: Can't read quota tree block %u.\n", blk);
|
||||
q_warn(KERN_ERR "VFS: Can't read quota tree block %u.\n", blk);
|
||||
goto out_buf;
|
||||
}
|
||||
ddquot = buf + sizeof(struct qt_disk_dqdbheader);
|
||||
|
@ -531,7 +539,7 @@ static loff_t find_block_dqentry(struct qtree_mem_dqinfo *info,
|
|||
ddquot += info->dqi_entry_size;
|
||||
}
|
||||
if (i == qtree_dqstr_in_blk(info)) {
|
||||
printk(KERN_ERR "VFS: Quota for id %u referenced "
|
||||
q_warn(KERN_ERR "VFS: Quota for id %u referenced "
|
||||
"but not present.\n", dquot->dq_id);
|
||||
ret = -EIO;
|
||||
goto out_buf;
|
||||
|
@ -556,7 +564,7 @@ static loff_t find_tree_dqentry(struct qtree_mem_dqinfo *info,
|
|||
return -ENOMEM;
|
||||
ret = read_blk(info, blk, buf);
|
||||
if (ret < 0) {
|
||||
printk(KERN_ERR "VFS: Can't read quota tree block %u.\n", blk);
|
||||
q_warn(KERN_ERR "VFS: Can't read quota tree block %u.\n", blk);
|
||||
goto out_buf;
|
||||
}
|
||||
ret = 0;
|
||||
|
@ -599,7 +607,7 @@ int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
|
|||
offset = find_dqentry(info, dquot);
|
||||
if (offset <= 0) { /* Entry not present? */
|
||||
if (offset < 0)
|
||||
printk(KERN_ERR "VFS: Can't read quota "
|
||||
q_warn(KERN_ERR "VFS: Can't read quota "
|
||||
"structure for id %u.\n", dquot->dq_id);
|
||||
dquot->dq_off = 0;
|
||||
set_bit(DQ_FAKE_B, &dquot->dq_flags);
|
||||
|
@ -617,7 +625,7 @@ int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
|
|||
if (ret != info->dqi_entry_size) {
|
||||
if (ret >= 0)
|
||||
ret = -EIO;
|
||||
printk(KERN_ERR "VFS: Error while reading quota "
|
||||
q_warn(KERN_ERR "VFS: Error while reading quota "
|
||||
"structure for id %u.\n", dquot->dq_id);
|
||||
set_bit(DQ_FAKE_B, &dquot->dq_flags);
|
||||
memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk));
|
||||
|
@ -634,7 +642,7 @@ int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
|
|||
spin_unlock(&dq_data_lock);
|
||||
kfree(ddquot);
|
||||
out:
|
||||
dqstats.reads++;
|
||||
dqstats_inc(DQST_READS);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(qtree_read_dquot);
|
||||
|
|
|
@ -22,4 +22,10 @@ struct qt_disk_dqdbheader {
|
|||
|
||||
#define QT_TREEOFF 1 /* Offset of tree in file in blocks */
|
||||
|
||||
#define q_warn(fmt, args...) \
|
||||
do { \
|
||||
if (printk_ratelimit()) \
|
||||
printk(fmt, ## args); \
|
||||
} while(0)
|
||||
|
||||
#endif /* _LINUX_QUOTAIO_TREE_H */
|
||||
|
|
|
@ -71,7 +71,7 @@ static int v1_read_dqblk(struct dquot *dquot)
|
|||
dquot->dq_dqb.dqb_ihardlimit == 0 &&
|
||||
dquot->dq_dqb.dqb_isoftlimit == 0)
|
||||
set_bit(DQ_FAKE_B, &dquot->dq_flags);
|
||||
dqstats.reads++;
|
||||
dqstats_inc(DQST_READS);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -104,7 +104,7 @@ static int v1_commit_dqblk(struct dquot *dquot)
|
|||
ret = 0;
|
||||
|
||||
out:
|
||||
dqstats.writes++;
|
||||
dqstats_inc(DQST_WRITES);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -63,7 +63,7 @@ static int v2_read_header(struct super_block *sb, int type,
|
|||
size = sb->s_op->quota_read(sb, type, (char *)dqhead,
|
||||
sizeof(struct v2_disk_dqheader), 0);
|
||||
if (size != sizeof(struct v2_disk_dqheader)) {
|
||||
printk(KERN_WARNING "quota_v2: Failed header read:"
|
||||
q_warn(KERN_WARNING "quota_v2: Failed header read:"
|
||||
" expected=%zd got=%zd\n",
|
||||
sizeof(struct v2_disk_dqheader), size);
|
||||
return 0;
|
||||
|
@ -106,7 +106,7 @@ static int v2_read_file_info(struct super_block *sb, int type)
|
|||
size = sb->s_op->quota_read(sb, type, (char *)&dinfo,
|
||||
sizeof(struct v2_disk_dqinfo), V2_DQINFOOFF);
|
||||
if (size != sizeof(struct v2_disk_dqinfo)) {
|
||||
printk(KERN_WARNING "quota_v2: Can't read info structure on device %s.\n",
|
||||
q_warn(KERN_WARNING "quota_v2: Can't read info structure on device %s.\n",
|
||||
sb->s_id);
|
||||
return -1;
|
||||
}
|
||||
|
@ -167,7 +167,7 @@ static int v2_write_file_info(struct super_block *sb, int type)
|
|||
size = sb->s_op->quota_write(sb, type, (char *)&dinfo,
|
||||
sizeof(struct v2_disk_dqinfo), V2_DQINFOOFF);
|
||||
if (size != sizeof(struct v2_disk_dqinfo)) {
|
||||
printk(KERN_WARNING "Can't write info structure on device %s.\n",
|
||||
q_warn(KERN_WARNING "Can't write info structure on device %s.\n",
|
||||
sb->s_id);
|
||||
return -1;
|
||||
}
|
||||
|
|
|
@ -3076,9 +3076,10 @@ int reiserfs_setattr(struct dentry *dentry, struct iattr *attr)
|
|||
ia_valid = attr->ia_valid &= ~(ATTR_KILL_SUID|ATTR_KILL_SGID);
|
||||
|
||||
depth = reiserfs_write_lock_once(inode->i_sb);
|
||||
if (attr->ia_valid & ATTR_SIZE) {
|
||||
if (is_quota_modification(inode, attr))
|
||||
dquot_initialize(inode);
|
||||
|
||||
if (attr->ia_valid & ATTR_SIZE) {
|
||||
/* version 2 items will be caught by the s_maxbytes check
|
||||
** done for us in vmtruncate
|
||||
*/
|
||||
|
|
|
@ -227,7 +227,7 @@ int udf_setattr(struct dentry *dentry, struct iattr *iattr)
|
|||
if (error)
|
||||
return error;
|
||||
|
||||
if (iattr->ia_valid & ATTR_SIZE)
|
||||
if (is_quota_modification(inode, iattr))
|
||||
dquot_initialize(inode);
|
||||
|
||||
if ((iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid) ||
|
||||
|
|
|
@ -603,7 +603,7 @@ static void ufs_set_inode_ops(struct inode *inode)
|
|||
if (!inode->i_blocks)
|
||||
inode->i_op = &ufs_fast_symlink_inode_operations;
|
||||
else {
|
||||
inode->i_op = &page_symlink_inode_operations;
|
||||
inode->i_op = &ufs_symlink_inode_operations;
|
||||
inode->i_mapping->a_ops = &ufs_aops;
|
||||
}
|
||||
} else
|
||||
|
|
|
@ -148,7 +148,7 @@ static int ufs_symlink (struct inode * dir, struct dentry * dentry,
|
|||
|
||||
if (l > UFS_SB(sb)->s_uspi->s_maxsymlinklen) {
|
||||
/* slow symlink */
|
||||
inode->i_op = &page_symlink_inode_operations;
|
||||
inode->i_op = &ufs_symlink_inode_operations;
|
||||
inode->i_mapping->a_ops = &ufs_aops;
|
||||
err = page_symlink(inode, symname, l);
|
||||
if (err)
|
||||
|
|
|
@ -42,4 +42,12 @@ static void *ufs_follow_link(struct dentry *dentry, struct nameidata *nd)
|
|||
const struct inode_operations ufs_fast_symlink_inode_operations = {
|
||||
.readlink = generic_readlink,
|
||||
.follow_link = ufs_follow_link,
|
||||
.setattr = ufs_setattr,
|
||||
};
|
||||
|
||||
const struct inode_operations ufs_symlink_inode_operations = {
|
||||
.readlink = generic_readlink,
|
||||
.follow_link = page_follow_link_light,
|
||||
.put_link = page_put_link,
|
||||
.setattr = ufs_setattr,
|
||||
};
|
||||
|
|
|
@ -508,7 +508,7 @@ out:
|
|||
* - there is no way to know old size
|
||||
* - there is no way inform user about error, if it happens in `truncate'
|
||||
*/
|
||||
static int ufs_setattr(struct dentry *dentry, struct iattr *attr)
|
||||
int ufs_setattr(struct dentry *dentry, struct iattr *attr)
|
||||
{
|
||||
struct inode *inode = dentry->d_inode;
|
||||
unsigned int ia_valid = attr->ia_valid;
|
||||
|
@ -518,18 +518,18 @@ static int ufs_setattr(struct dentry *dentry, struct iattr *attr)
|
|||
if (error)
|
||||
return error;
|
||||
|
||||
if (is_quota_modification(inode, attr))
|
||||
dquot_initialize(inode);
|
||||
|
||||
if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
|
||||
(ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
|
||||
error = dquot_transfer(inode, attr);
|
||||
if (error)
|
||||
return error;
|
||||
}
|
||||
if (ia_valid & ATTR_SIZE &&
|
||||
attr->ia_size != i_size_read(inode)) {
|
||||
if (ia_valid & ATTR_SIZE && attr->ia_size != inode->i_size) {
|
||||
loff_t old_i_size = inode->i_size;
|
||||
|
||||
dquot_initialize(inode);
|
||||
|
||||
error = vmtruncate(inode, attr->ia_size);
|
||||
if (error)
|
||||
return error;
|
||||
|
|
|
@ -122,9 +122,11 @@ extern void ufs_panic (struct super_block *, const char *, const char *, ...) __
|
|||
|
||||
/* symlink.c */
|
||||
extern const struct inode_operations ufs_fast_symlink_inode_operations;
|
||||
extern const struct inode_operations ufs_symlink_inode_operations;
|
||||
|
||||
/* truncate.c */
|
||||
extern int ufs_truncate (struct inode *, loff_t);
|
||||
extern int ufs_setattr(struct dentry *dentry, struct iattr *attr);
|
||||
|
||||
static inline struct ufs_sb_info *UFS_SB(struct super_block *sb)
|
||||
{
|
||||
|
|
|
@ -97,7 +97,7 @@ xfs_fs_set_xstate(
|
|||
}
|
||||
|
||||
STATIC int
|
||||
xfs_fs_get_xquota(
|
||||
xfs_fs_get_dqblk(
|
||||
struct super_block *sb,
|
||||
int type,
|
||||
qid_t id,
|
||||
|
@ -114,7 +114,7 @@ xfs_fs_get_xquota(
|
|||
}
|
||||
|
||||
STATIC int
|
||||
xfs_fs_set_xquota(
|
||||
xfs_fs_set_dqblk(
|
||||
struct super_block *sb,
|
||||
int type,
|
||||
qid_t id,
|
||||
|
@ -135,6 +135,6 @@ xfs_fs_set_xquota(
|
|||
const struct quotactl_ops xfs_quotactl_operations = {
|
||||
.get_xstate = xfs_fs_get_xstate,
|
||||
.set_xstate = xfs_fs_set_xstate,
|
||||
.get_xquota = xfs_fs_get_xquota,
|
||||
.set_xquota = xfs_fs_set_xquota,
|
||||
.get_dqblk = xfs_fs_get_dqblk,
|
||||
.set_dqblk = xfs_fs_set_dqblk,
|
||||
};
|
||||
|
|
|
@ -448,6 +448,9 @@ xfs_qm_scall_getqstat(
|
|||
return 0;
|
||||
}
|
||||
|
||||
#define XFS_DQ_MASK \
|
||||
(FS_DQ_LIMIT_MASK | FS_DQ_TIMER_MASK | FS_DQ_WARNS_MASK)
|
||||
|
||||
/*
|
||||
* Adjust quota limits, and start/stop timers accordingly.
|
||||
*/
|
||||
|
@ -465,9 +468,10 @@ xfs_qm_scall_setqlim(
|
|||
int error;
|
||||
xfs_qcnt_t hard, soft;
|
||||
|
||||
if ((newlim->d_fieldmask &
|
||||
(FS_DQ_LIMIT_MASK|FS_DQ_TIMER_MASK|FS_DQ_WARNS_MASK)) == 0)
|
||||
return (0);
|
||||
if (newlim->d_fieldmask & ~XFS_DQ_MASK)
|
||||
return EINVAL;
|
||||
if ((newlim->d_fieldmask & XFS_DQ_MASK) == 0)
|
||||
return 0;
|
||||
|
||||
tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SETQLIM);
|
||||
if ((error = xfs_trans_reserve(tp, 0, sizeof(xfs_disk_dquot_t) + 128,
|
||||
|
|
|
@ -109,6 +109,15 @@ typedef struct fs_disk_quota {
|
|||
#define FS_DQ_RTBWARNS (1<<11)
|
||||
#define FS_DQ_WARNS_MASK (FS_DQ_BWARNS | FS_DQ_IWARNS | FS_DQ_RTBWARNS)
|
||||
|
||||
/*
|
||||
* Accounting values. These can only be set for filesystem with
|
||||
* non-transactional quotas that require quotacheck(8) in userspace.
|
||||
*/
|
||||
#define FS_DQ_BCOUNT (1<<12)
|
||||
#define FS_DQ_ICOUNT (1<<13)
|
||||
#define FS_DQ_RTBCOUNT (1<<14)
|
||||
#define FS_DQ_ACCT_MASK (FS_DQ_BCOUNT | FS_DQ_ICOUNT | FS_DQ_RTBCOUNT)
|
||||
|
||||
/*
|
||||
* Various flags related to quotactl(2). Only relevant to XFS filesystems.
|
||||
*/
|
||||
|
|
|
@ -106,6 +106,15 @@ struct ext2_sb_info {
|
|||
spinlock_t s_rsv_window_lock;
|
||||
struct rb_root s_rsv_window_root;
|
||||
struct ext2_reserve_window_node s_rsv_window_head;
|
||||
/*
|
||||
* s_lock protects against concurrent modifications of s_mount_state,
|
||||
* s_blocks_last, s_overhead_last and the content of superblock's
|
||||
* buffer pointed to by sbi->s_es.
|
||||
*
|
||||
* Note: It is used in ext2_show_options() to provide a consistent view
|
||||
* of the mount options.
|
||||
*/
|
||||
spinlock_t s_lock;
|
||||
};
|
||||
|
||||
static inline spinlock_t *
|
||||
|
|
|
@ -427,9 +427,9 @@ struct transaction_s
|
|||
enum {
|
||||
T_RUNNING,
|
||||
T_LOCKED,
|
||||
T_RUNDOWN,
|
||||
T_FLUSH,
|
||||
T_COMMIT,
|
||||
T_COMMIT_RECORD,
|
||||
T_FINISHED
|
||||
} t_state;
|
||||
|
||||
|
@ -991,6 +991,7 @@ int journal_start_commit(journal_t *journal, tid_t *tid);
|
|||
int journal_force_commit_nested(journal_t *journal);
|
||||
int log_wait_commit(journal_t *journal, tid_t tid);
|
||||
int log_do_checkpoint(journal_t *journal);
|
||||
int journal_trans_will_send_data_barrier(journal_t *journal, tid_t tid);
|
||||
|
||||
void __log_wait_for_space(journal_t *journal);
|
||||
extern void __journal_drop_transaction(journal_t *, transaction_t *);
|
||||
|
|
|
@ -174,6 +174,8 @@ enum {
|
|||
#include <linux/rwsem.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/smp.h>
|
||||
|
||||
#include <linux/dqblk_xfs.h>
|
||||
#include <linux/dqblk_v1.h>
|
||||
|
@ -238,19 +240,43 @@ static inline int info_dirty(struct mem_dqinfo *info)
|
|||
return test_bit(DQF_INFO_DIRTY_B, &info->dqi_flags);
|
||||
}
|
||||
|
||||
struct dqstats {
|
||||
int lookups;
|
||||
int drops;
|
||||
int reads;
|
||||
int writes;
|
||||
int cache_hits;
|
||||
int allocated_dquots;
|
||||
int free_dquots;
|
||||
int syncs;
|
||||
enum {
|
||||
DQST_LOOKUPS,
|
||||
DQST_DROPS,
|
||||
DQST_READS,
|
||||
DQST_WRITES,
|
||||
DQST_CACHE_HITS,
|
||||
DQST_ALLOC_DQUOTS,
|
||||
DQST_FREE_DQUOTS,
|
||||
DQST_SYNCS,
|
||||
_DQST_DQSTAT_LAST
|
||||
};
|
||||
|
||||
struct dqstats {
|
||||
int stat[_DQST_DQSTAT_LAST];
|
||||
};
|
||||
|
||||
extern struct dqstats *dqstats_pcpu;
|
||||
extern struct dqstats dqstats;
|
||||
|
||||
static inline void dqstats_inc(unsigned int type)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
per_cpu_ptr(dqstats_pcpu, smp_processor_id())->stat[type]++;
|
||||
#else
|
||||
dqstats.stat[type]++;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void dqstats_dec(unsigned int type)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
per_cpu_ptr(dqstats_pcpu, smp_processor_id())->stat[type]--;
|
||||
#else
|
||||
dqstats.stat[type]--;
|
||||
#endif
|
||||
}
|
||||
|
||||
#define DQ_MOD_B 0 /* dquot modified since read */
|
||||
#define DQ_BLKS_B 1 /* uid/gid has been warned about blk limit */
|
||||
#define DQ_INODES_B 2 /* uid/gid has been warned about inode limit */
|
||||
|
@ -311,12 +337,10 @@ struct quotactl_ops {
|
|||
int (*quota_sync)(struct super_block *, int, int);
|
||||
int (*get_info)(struct super_block *, int, struct if_dqinfo *);
|
||||
int (*set_info)(struct super_block *, int, struct if_dqinfo *);
|
||||
int (*get_dqblk)(struct super_block *, int, qid_t, struct if_dqblk *);
|
||||
int (*set_dqblk)(struct super_block *, int, qid_t, struct if_dqblk *);
|
||||
int (*get_dqblk)(struct super_block *, int, qid_t, struct fs_disk_quota *);
|
||||
int (*set_dqblk)(struct super_block *, int, qid_t, struct fs_disk_quota *);
|
||||
int (*get_xstate)(struct super_block *, struct fs_quota_stat *);
|
||||
int (*set_xstate)(struct super_block *, unsigned int, int);
|
||||
int (*get_xquota)(struct super_block *, int, qid_t, struct fs_disk_quota *);
|
||||
int (*set_xquota)(struct super_block *, int, qid_t, struct fs_disk_quota *);
|
||||
};
|
||||
|
||||
struct quota_format_type {
|
||||
|
|
|
@ -14,6 +14,14 @@ static inline struct quota_info *sb_dqopt(struct super_block *sb)
|
|||
return &sb->s_dquot;
|
||||
}
|
||||
|
||||
/* i_mutex must being held */
|
||||
static inline bool is_quota_modification(struct inode *inode, struct iattr *ia)
|
||||
{
|
||||
return (ia->ia_valid & ATTR_SIZE && ia->ia_size != inode->i_size) ||
|
||||
(ia->ia_valid & ATTR_UID && ia->ia_uid != inode->i_uid) ||
|
||||
(ia->ia_valid & ATTR_GID && ia->ia_gid != inode->i_gid);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_QUOTA)
|
||||
|
||||
/*
|
||||
|
@ -63,9 +71,12 @@ int vfs_quota_disable(struct super_block *sb, int type, unsigned int flags);
|
|||
int vfs_quota_sync(struct super_block *sb, int type, int wait);
|
||||
int vfs_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii);
|
||||
int vfs_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii);
|
||||
int vfs_get_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *di);
|
||||
int vfs_set_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *di);
|
||||
int vfs_get_dqblk(struct super_block *sb, int type, qid_t id,
|
||||
struct fs_disk_quota *di);
|
||||
int vfs_set_dqblk(struct super_block *sb, int type, qid_t id,
|
||||
struct fs_disk_quota *di);
|
||||
|
||||
int __dquot_transfer(struct inode *inode, struct dquot **transfer_to);
|
||||
int dquot_transfer(struct inode *inode, struct iattr *iattr);
|
||||
int vfs_dq_quota_on_remount(struct super_block *sb);
|
||||
|
||||
|
|
Loading…
Reference in New Issue