Merge branch 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs-2.6
* 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs-2.6: (24 commits) quota: Fix possible oops in __dquot_initialize() ext3: Update kernel-doc comments jbd/2: fixed typos ext2: fixed typo. ext3: Fix debug messages in ext3_group_extend() jbd: Convert atomic_inc() to get_bh() ext3: Remove misplaced BUFFER_TRACE() in ext3_truncate() jbd: Fix debug message in do_get_write_access() jbd: Check return value of __getblk() ext3: Use DIV_ROUND_UP() on group desc block counting ext3: Return proper error code on ext3_fill_super() ext3: Remove unnecessary casts on bh->b_data ext3: Cleanup ext3_setup_super() quota: Fix issuing of warnings from dquot_transfer quota: fix dquot_disable vs dquot_transfer race v2 jbd: Convert bitops to buffer fns ext3/jbd: Avoid WARN() messages when failing to write the superblock jbd: Use offset_in_page() instead of manual calculation jbd: Remove unnecessary goto statement jbd: Use printk_ratelimited() in journal_alloc_journal_head() ...
This commit is contained in:
commit
7d2f280e75
|
@ -646,10 +646,9 @@ find_next_usable_block(int start, struct buffer_head *bh, int maxblocks)
|
|||
return here;
|
||||
}
|
||||
|
||||
/*
|
||||
/**
|
||||
* ext2_try_to_allocate()
|
||||
* @sb: superblock
|
||||
* @handle: handle to this transaction
|
||||
* @group: given allocation block group
|
||||
* @bitmap_bh: bufferhead holds the block bitmap
|
||||
* @grp_goal: given target block within the group
|
||||
|
|
|
@ -792,9 +792,9 @@ find_next_usable_block(ext3_grpblk_t start, struct buffer_head *bh,
|
|||
if (here < 0)
|
||||
here = 0;
|
||||
|
||||
p = ((char *)bh->b_data) + (here >> 3);
|
||||
p = bh->b_data + (here >> 3);
|
||||
r = memscan(p, 0, ((maxblocks + 7) >> 3) - (here >> 3));
|
||||
next = (r - ((char *)bh->b_data)) << 3;
|
||||
next = (r - bh->b_data) << 3;
|
||||
|
||||
if (next < maxblocks && next >= start && ext3_test_allocatable(next, bh))
|
||||
return next;
|
||||
|
@ -810,8 +810,9 @@ find_next_usable_block(ext3_grpblk_t start, struct buffer_head *bh,
|
|||
|
||||
/**
|
||||
* claim_block()
|
||||
* @lock: the spin lock for this block group
|
||||
* @block: the free block (group relative) to allocate
|
||||
* @bh: the bufferhead containts the block group bitmap
|
||||
* @bh: the buffer_head contains the block group bitmap
|
||||
*
|
||||
* We think we can allocate this block in this bitmap. Try to set the bit.
|
||||
* If that succeeds then check that nobody has allocated and then freed the
|
||||
|
@ -956,9 +957,11 @@ fail_access:
|
|||
* but we will shift to the place where start_block is,
|
||||
* then start from there, when looking for a reservable space.
|
||||
*
|
||||
* @size: the target new reservation window size
|
||||
* @my_rsv: the reservation window
|
||||
*
|
||||
* @group_first_block: the first block we consider to start
|
||||
* @sb: the super block
|
||||
*
|
||||
* @start_block: the first block we consider to start
|
||||
* the real search from
|
||||
*
|
||||
* @last_block:
|
||||
|
@ -1084,7 +1087,7 @@ static int find_next_reservable_window(
|
|||
*
|
||||
* failed: we failed to find a reservation window in this group
|
||||
*
|
||||
* @rsv: the reservation
|
||||
* @my_rsv: the reservation window
|
||||
*
|
||||
* @grp_goal: The goal (group-relative). It is where the search for a
|
||||
* free reservable space should start from.
|
||||
|
@ -1273,8 +1276,8 @@ static void try_to_extend_reservation(struct ext3_reserve_window_node *my_rsv,
|
|||
* @group: given allocation block group
|
||||
* @bitmap_bh: bufferhead holds the block bitmap
|
||||
* @grp_goal: given target block within the group
|
||||
* @count: target number of blocks to allocate
|
||||
* @my_rsv: reservation window
|
||||
* @count: target number of blocks to allocate
|
||||
* @errp: pointer to store the error code
|
||||
*
|
||||
* This is the main function used to allocate a new block and its reservation
|
||||
|
|
|
@ -570,9 +570,14 @@ got:
|
|||
ei->i_state_flags = 0;
|
||||
ext3_set_inode_state(inode, EXT3_STATE_NEW);
|
||||
|
||||
ei->i_extra_isize =
|
||||
(EXT3_INODE_SIZE(inode->i_sb) > EXT3_GOOD_OLD_INODE_SIZE) ?
|
||||
sizeof(struct ext3_inode) - EXT3_GOOD_OLD_INODE_SIZE : 0;
|
||||
/* See comment in ext3_iget for explanation */
|
||||
if (ino >= EXT3_FIRST_INO(sb) + 1 &&
|
||||
EXT3_INODE_SIZE(sb) > EXT3_GOOD_OLD_INODE_SIZE) {
|
||||
ei->i_extra_isize =
|
||||
sizeof(struct ext3_inode) - EXT3_GOOD_OLD_INODE_SIZE;
|
||||
} else {
|
||||
ei->i_extra_isize = 0;
|
||||
}
|
||||
|
||||
ret = inode;
|
||||
dquot_initialize(inode);
|
||||
|
|
|
@ -498,7 +498,7 @@ static ext3_fsblk_t ext3_find_goal(struct inode *inode, long block,
|
|||
}
|
||||
|
||||
/**
|
||||
* ext3_blks_to_allocate: Look up the block map and count the number
|
||||
* ext3_blks_to_allocate - Look up the block map and count the number
|
||||
* of direct blocks need to be allocated for the given branch.
|
||||
*
|
||||
* @branch: chain of indirect blocks
|
||||
|
@ -536,14 +536,18 @@ static int ext3_blks_to_allocate(Indirect *branch, int k, unsigned long blks,
|
|||
}
|
||||
|
||||
/**
|
||||
* ext3_alloc_blocks: multiple allocate blocks needed for a branch
|
||||
* ext3_alloc_blocks - multiple allocate blocks needed for a branch
|
||||
* @handle: handle for this transaction
|
||||
* @inode: owner
|
||||
* @goal: preferred place for allocation
|
||||
* @indirect_blks: the number of blocks need to allocate for indirect
|
||||
* blocks
|
||||
*
|
||||
* @blks: number of blocks need to allocated for direct blocks
|
||||
* @new_blocks: on return it will store the new block numbers for
|
||||
* the indirect blocks(if needed) and the first direct block,
|
||||
* @blks: on return it will store the total number of allocated
|
||||
* direct blocks
|
||||
* @err: here we store the error value
|
||||
*
|
||||
* return the number of direct blocks allocated
|
||||
*/
|
||||
static int ext3_alloc_blocks(handle_t *handle, struct inode *inode,
|
||||
ext3_fsblk_t goal, int indirect_blks, int blks,
|
||||
|
@ -598,9 +602,11 @@ failed_out:
|
|||
|
||||
/**
|
||||
* ext3_alloc_branch - allocate and set up a chain of blocks.
|
||||
* @handle: handle for this transaction
|
||||
* @inode: owner
|
||||
* @indirect_blks: number of allocated indirect blocks
|
||||
* @blks: number of allocated direct blocks
|
||||
* @goal: preferred place for allocation
|
||||
* @offsets: offsets (in the blocks) to store the pointers to next.
|
||||
* @branch: place to store the chain in.
|
||||
*
|
||||
|
@ -700,10 +706,9 @@ failed:
|
|||
|
||||
/**
|
||||
* ext3_splice_branch - splice the allocated branch onto inode.
|
||||
* @handle: handle for this transaction
|
||||
* @inode: owner
|
||||
* @block: (logical) number of block we are adding
|
||||
* @chain: chain of indirect blocks (with a missing link - see
|
||||
* ext3_alloc_branch)
|
||||
* @where: location of missing link
|
||||
* @num: number of indirect blocks we are adding
|
||||
* @blks: number of direct blocks we are adding
|
||||
|
@ -2530,7 +2535,6 @@ void ext3_truncate(struct inode *inode)
|
|||
*/
|
||||
} else {
|
||||
/* Shared branch grows from an indirect block */
|
||||
BUFFER_TRACE(partial->bh, "get_write_access");
|
||||
ext3_free_branches(handle, inode, partial->bh,
|
||||
partial->p,
|
||||
partial->p+1, (chain+n-1) - partial);
|
||||
|
|
|
@ -977,7 +977,8 @@ int ext3_group_extend(struct super_block *sb, struct ext3_super_block *es,
|
|||
o_blocks_count = le32_to_cpu(es->s_blocks_count);
|
||||
|
||||
if (test_opt(sb, DEBUG))
|
||||
printk(KERN_DEBUG "EXT3-fs: extending last group from "E3FSBLK" uto "E3FSBLK" blocks\n",
|
||||
printk(KERN_DEBUG "EXT3-fs: extending last group from "E3FSBLK
|
||||
" upto "E3FSBLK" blocks\n",
|
||||
o_blocks_count, n_blocks_count);
|
||||
|
||||
if (n_blocks_count == 0 || n_blocks_count == o_blocks_count)
|
||||
|
@ -985,7 +986,7 @@ int ext3_group_extend(struct super_block *sb, struct ext3_super_block *es,
|
|||
|
||||
if (n_blocks_count > (sector_t)(~0ULL) >> (sb->s_blocksize_bits - 9)) {
|
||||
printk(KERN_ERR "EXT3-fs: filesystem on %s:"
|
||||
" too large to resize to %lu blocks safely\n",
|
||||
" too large to resize to "E3FSBLK" blocks safely\n",
|
||||
sb->s_id, n_blocks_count);
|
||||
if (sizeof(sector_t) < 8)
|
||||
ext3_warning(sb, __func__,
|
||||
|
@ -1065,11 +1066,11 @@ int ext3_group_extend(struct super_block *sb, struct ext3_super_block *es,
|
|||
es->s_blocks_count = cpu_to_le32(o_blocks_count + add);
|
||||
ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh);
|
||||
mutex_unlock(&EXT3_SB(sb)->s_resize_lock);
|
||||
ext3_debug("freeing blocks %lu through "E3FSBLK"\n", o_blocks_count,
|
||||
o_blocks_count + add);
|
||||
ext3_debug("freeing blocks "E3FSBLK" through "E3FSBLK"\n",
|
||||
o_blocks_count, o_blocks_count + add);
|
||||
ext3_free_blocks_sb(handle, sb, o_blocks_count, add, &freed_blocks);
|
||||
ext3_debug("freed blocks "E3FSBLK" through "E3FSBLK"\n", o_blocks_count,
|
||||
o_blocks_count + add);
|
||||
ext3_debug("freed blocks "E3FSBLK" through "E3FSBLK"\n",
|
||||
o_blocks_count, o_blocks_count + add);
|
||||
if ((err = ext3_journal_stop(handle)))
|
||||
goto exit_put;
|
||||
if (test_opt(sb, DEBUG))
|
||||
|
|
|
@ -1301,9 +1301,9 @@ static int ext3_setup_super(struct super_block *sb, struct ext3_super_block *es,
|
|||
ext3_msg(sb, KERN_WARNING,
|
||||
"warning: mounting fs with errors, "
|
||||
"running e2fsck is recommended");
|
||||
else if ((__s16) le16_to_cpu(es->s_max_mnt_count) >= 0 &&
|
||||
else if ((__s16) le16_to_cpu(es->s_max_mnt_count) > 0 &&
|
||||
le16_to_cpu(es->s_mnt_count) >=
|
||||
(unsigned short) (__s16) le16_to_cpu(es->s_max_mnt_count))
|
||||
le16_to_cpu(es->s_max_mnt_count))
|
||||
ext3_msg(sb, KERN_WARNING,
|
||||
"warning: maximal mount count reached, "
|
||||
"running e2fsck is recommended");
|
||||
|
@ -1320,7 +1320,7 @@ static int ext3_setup_super(struct super_block *sb, struct ext3_super_block *es,
|
|||
valid forever! :) */
|
||||
es->s_state &= cpu_to_le16(~EXT3_VALID_FS);
|
||||
#endif
|
||||
if (!(__s16) le16_to_cpu(es->s_max_mnt_count))
|
||||
if (!le16_to_cpu(es->s_max_mnt_count))
|
||||
es->s_max_mnt_count = cpu_to_le16(EXT3_DFL_MAX_MNT_COUNT);
|
||||
le16_add_cpu(&es->s_mnt_count, 1);
|
||||
es->s_mtime = cpu_to_le32(get_seconds());
|
||||
|
@ -1647,7 +1647,7 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
|
|||
* Note: s_es must be initialized as soon as possible because
|
||||
* some ext3 macro-instructions depend on its value
|
||||
*/
|
||||
es = (struct ext3_super_block *) (((char *)bh->b_data) + offset);
|
||||
es = (struct ext3_super_block *) (bh->b_data + offset);
|
||||
sbi->s_es = es;
|
||||
sb->s_magic = le16_to_cpu(es->s_magic);
|
||||
if (sb->s_magic != EXT3_SUPER_MAGIC)
|
||||
|
@ -1758,7 +1758,7 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
|
|||
"error: can't read superblock on 2nd try");
|
||||
goto failed_mount;
|
||||
}
|
||||
es = (struct ext3_super_block *)(((char *)bh->b_data) + offset);
|
||||
es = (struct ext3_super_block *)(bh->b_data + offset);
|
||||
sbi->s_es = es;
|
||||
if (es->s_magic != cpu_to_le16(EXT3_SUPER_MAGIC)) {
|
||||
ext3_msg(sb, KERN_ERR,
|
||||
|
@ -1857,13 +1857,13 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
|
|||
sbi->s_groups_count = ((le32_to_cpu(es->s_blocks_count) -
|
||||
le32_to_cpu(es->s_first_data_block) - 1)
|
||||
/ EXT3_BLOCKS_PER_GROUP(sb)) + 1;
|
||||
db_count = (sbi->s_groups_count + EXT3_DESC_PER_BLOCK(sb) - 1) /
|
||||
EXT3_DESC_PER_BLOCK(sb);
|
||||
db_count = DIV_ROUND_UP(sbi->s_groups_count, EXT3_DESC_PER_BLOCK(sb));
|
||||
sbi->s_group_desc = kmalloc(db_count * sizeof (struct buffer_head *),
|
||||
GFP_KERNEL);
|
||||
if (sbi->s_group_desc == NULL) {
|
||||
ext3_msg(sb, KERN_ERR,
|
||||
"error: not enough memory");
|
||||
ret = -ENOMEM;
|
||||
goto failed_mount;
|
||||
}
|
||||
|
||||
|
@ -1951,6 +1951,7 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
|
|||
}
|
||||
if (err) {
|
||||
ext3_msg(sb, KERN_ERR, "error: insufficient memory");
|
||||
ret = err;
|
||||
goto failed_mount3;
|
||||
}
|
||||
|
||||
|
@ -2159,7 +2160,7 @@ static journal_t *ext3_get_dev_journal(struct super_block *sb,
|
|||
goto out_bdev;
|
||||
}
|
||||
|
||||
es = (struct ext3_super_block *) (((char *)bh->b_data) + offset);
|
||||
es = (struct ext3_super_block *) (bh->b_data + offset);
|
||||
if ((le16_to_cpu(es->s_magic) != EXT3_SUPER_MAGIC) ||
|
||||
!(le32_to_cpu(es->s_feature_incompat) &
|
||||
EXT3_FEATURE_INCOMPAT_JOURNAL_DEV)) {
|
||||
|
@ -2352,6 +2353,21 @@ static int ext3_commit_super(struct super_block *sb,
|
|||
|
||||
if (!sbh)
|
||||
return error;
|
||||
|
||||
if (buffer_write_io_error(sbh)) {
|
||||
/*
|
||||
* Oh, dear. A previous attempt to write the
|
||||
* superblock failed. This could happen because the
|
||||
* USB device was yanked out. Or it could happen to
|
||||
* be a transient write error and maybe the block will
|
||||
* be remapped. Nothing we can do but to retry the
|
||||
* write and hope for the best.
|
||||
*/
|
||||
ext3_msg(sb, KERN_ERR, "previous I/O error to "
|
||||
"superblock detected");
|
||||
clear_buffer_write_io_error(sbh);
|
||||
set_buffer_uptodate(sbh);
|
||||
}
|
||||
/*
|
||||
* If the file system is mounted read-only, don't update the
|
||||
* superblock write time. This avoids updating the superblock
|
||||
|
@ -2368,8 +2384,15 @@ static int ext3_commit_super(struct super_block *sb,
|
|||
es->s_free_inodes_count = cpu_to_le32(ext3_count_free_inodes(sb));
|
||||
BUFFER_TRACE(sbh, "marking dirty");
|
||||
mark_buffer_dirty(sbh);
|
||||
if (sync)
|
||||
if (sync) {
|
||||
error = sync_dirty_buffer(sbh);
|
||||
if (buffer_write_io_error(sbh)) {
|
||||
ext3_msg(sb, KERN_ERR, "I/O error while writing "
|
||||
"superblock");
|
||||
clear_buffer_write_io_error(sbh);
|
||||
set_buffer_uptodate(sbh);
|
||||
}
|
||||
}
|
||||
return error;
|
||||
}
|
||||
|
||||
|
|
|
@ -221,7 +221,7 @@ restart:
|
|||
goto restart;
|
||||
}
|
||||
if (buffer_locked(bh)) {
|
||||
atomic_inc(&bh->b_count);
|
||||
get_bh(bh);
|
||||
spin_unlock(&journal->j_list_lock);
|
||||
jbd_unlock_bh_state(bh);
|
||||
wait_on_buffer(bh);
|
||||
|
@ -283,7 +283,7 @@ static int __process_buffer(journal_t *journal, struct journal_head *jh,
|
|||
int ret = 0;
|
||||
|
||||
if (buffer_locked(bh)) {
|
||||
atomic_inc(&bh->b_count);
|
||||
get_bh(bh);
|
||||
spin_unlock(&journal->j_list_lock);
|
||||
jbd_unlock_bh_state(bh);
|
||||
wait_on_buffer(bh);
|
||||
|
|
|
@ -587,13 +587,13 @@ void journal_commit_transaction(journal_t *journal)
|
|||
/* Bump b_count to prevent truncate from stumbling over
|
||||
the shadowed buffer! @@@ This can go if we ever get
|
||||
rid of the BJ_IO/BJ_Shadow pairing of buffers. */
|
||||
atomic_inc(&jh2bh(jh)->b_count);
|
||||
get_bh(jh2bh(jh));
|
||||
|
||||
/* Make a temporary IO buffer with which to write it out
|
||||
(this will requeue both the metadata buffer and the
|
||||
temporary IO buffer). new_bh goes on BJ_IO*/
|
||||
|
||||
set_bit(BH_JWrite, &jh2bh(jh)->b_state);
|
||||
set_buffer_jwrite(jh2bh(jh));
|
||||
/*
|
||||
* akpm: journal_write_metadata_buffer() sets
|
||||
* new_bh->b_transaction to commit_transaction.
|
||||
|
@ -603,7 +603,7 @@ void journal_commit_transaction(journal_t *journal)
|
|||
JBUFFER_TRACE(jh, "ph3: write metadata");
|
||||
flags = journal_write_metadata_buffer(commit_transaction,
|
||||
jh, &new_jh, blocknr);
|
||||
set_bit(BH_JWrite, &jh2bh(new_jh)->b_state);
|
||||
set_buffer_jwrite(jh2bh(new_jh));
|
||||
wbuf[bufs++] = jh2bh(new_jh);
|
||||
|
||||
/* Record the new block's tag in the current descriptor
|
||||
|
@ -713,7 +713,7 @@ wait_for_iobuf:
|
|||
shadowed buffer */
|
||||
jh = commit_transaction->t_shadow_list->b_tprev;
|
||||
bh = jh2bh(jh);
|
||||
clear_bit(BH_JWrite, &bh->b_state);
|
||||
clear_buffer_jwrite(bh);
|
||||
J_ASSERT_BH(bh, buffer_jbddirty(bh));
|
||||
|
||||
/* The metadata is now released for reuse, but we need
|
||||
|
|
|
@ -36,6 +36,7 @@
|
|||
#include <linux/poison.h>
|
||||
#include <linux/proc_fs.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/ratelimit.h>
|
||||
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/page.h>
|
||||
|
@ -84,6 +85,7 @@ EXPORT_SYMBOL(journal_force_commit);
|
|||
|
||||
static int journal_convert_superblock_v1(journal_t *, journal_superblock_t *);
|
||||
static void __journal_abort_soft (journal_t *journal, int errno);
|
||||
static const char *journal_dev_name(journal_t *journal, char *buffer);
|
||||
|
||||
/*
|
||||
* Helper function used to manage commit timeouts
|
||||
|
@ -439,7 +441,7 @@ int __log_start_commit(journal_t *journal, tid_t target)
|
|||
*/
|
||||
if (!tid_geq(journal->j_commit_request, target)) {
|
||||
/*
|
||||
* We want a new commit: OK, mark the request and wakup the
|
||||
* We want a new commit: OK, mark the request and wakeup the
|
||||
* commit thread. We do _not_ do the commit ourselves.
|
||||
*/
|
||||
|
||||
|
@ -950,6 +952,8 @@ int journal_create(journal_t *journal)
|
|||
if (err)
|
||||
return err;
|
||||
bh = __getblk(journal->j_dev, blocknr, journal->j_blocksize);
|
||||
if (unlikely(!bh))
|
||||
return -ENOMEM;
|
||||
lock_buffer(bh);
|
||||
memset (bh->b_data, 0, journal->j_blocksize);
|
||||
BUFFER_TRACE(bh, "marking dirty");
|
||||
|
@ -1010,6 +1014,23 @@ void journal_update_superblock(journal_t *journal, int wait)
|
|||
goto out;
|
||||
}
|
||||
|
||||
if (buffer_write_io_error(bh)) {
|
||||
char b[BDEVNAME_SIZE];
|
||||
/*
|
||||
* Oh, dear. A previous attempt to write the journal
|
||||
* superblock failed. This could happen because the
|
||||
* USB device was yanked out. Or it could happen to
|
||||
* be a transient write error and maybe the block will
|
||||
* be remapped. Nothing we can do but to retry the
|
||||
* write and hope for the best.
|
||||
*/
|
||||
printk(KERN_ERR "JBD: previous I/O error detected "
|
||||
"for journal superblock update for %s.\n",
|
||||
journal_dev_name(journal, b));
|
||||
clear_buffer_write_io_error(bh);
|
||||
set_buffer_uptodate(bh);
|
||||
}
|
||||
|
||||
spin_lock(&journal->j_state_lock);
|
||||
jbd_debug(1,"JBD: updating superblock (start %u, seq %d, errno %d)\n",
|
||||
journal->j_tail, journal->j_tail_sequence, journal->j_errno);
|
||||
|
@ -1021,9 +1042,17 @@ void journal_update_superblock(journal_t *journal, int wait)
|
|||
|
||||
BUFFER_TRACE(bh, "marking dirty");
|
||||
mark_buffer_dirty(bh);
|
||||
if (wait)
|
||||
if (wait) {
|
||||
sync_dirty_buffer(bh);
|
||||
else
|
||||
if (buffer_write_io_error(bh)) {
|
||||
char b[BDEVNAME_SIZE];
|
||||
printk(KERN_ERR "JBD: I/O error detected "
|
||||
"when updating journal superblock for %s.\n",
|
||||
journal_dev_name(journal, b));
|
||||
clear_buffer_write_io_error(bh);
|
||||
set_buffer_uptodate(bh);
|
||||
}
|
||||
} else
|
||||
write_dirty_buffer(bh, WRITE);
|
||||
|
||||
out:
|
||||
|
@ -1719,7 +1748,6 @@ static void journal_destroy_journal_head_cache(void)
|
|||
static struct journal_head *journal_alloc_journal_head(void)
|
||||
{
|
||||
struct journal_head *ret;
|
||||
static unsigned long last_warning;
|
||||
|
||||
#ifdef CONFIG_JBD_DEBUG
|
||||
atomic_inc(&nr_journal_heads);
|
||||
|
@ -1727,11 +1755,9 @@ static struct journal_head *journal_alloc_journal_head(void)
|
|||
ret = kmem_cache_alloc(journal_head_cache, GFP_NOFS);
|
||||
if (ret == NULL) {
|
||||
jbd_debug(1, "out of memory for journal_head\n");
|
||||
if (time_after(jiffies, last_warning + 5*HZ)) {
|
||||
printk(KERN_NOTICE "ENOMEM in %s, retrying.\n",
|
||||
__func__);
|
||||
last_warning = jiffies;
|
||||
}
|
||||
printk_ratelimited(KERN_NOTICE "ENOMEM in %s, retrying.\n",
|
||||
__func__);
|
||||
|
||||
while (ret == NULL) {
|
||||
yield();
|
||||
ret = kmem_cache_alloc(journal_head_cache, GFP_NOFS);
|
||||
|
|
|
@ -296,10 +296,10 @@ int journal_skip_recovery(journal_t *journal)
|
|||
#ifdef CONFIG_JBD_DEBUG
|
||||
int dropped = info.end_transaction -
|
||||
be32_to_cpu(journal->j_superblock->s_sequence);
|
||||
#endif
|
||||
jbd_debug(1,
|
||||
"JBD: ignoring %d transaction%s from the journal.\n",
|
||||
dropped, (dropped == 1) ? "" : "s");
|
||||
#endif
|
||||
journal->j_transaction_sequence = ++info.end_transaction;
|
||||
}
|
||||
|
||||
|
|
|
@ -293,9 +293,7 @@ handle_t *journal_start(journal_t *journal, int nblocks)
|
|||
jbd_free_handle(handle);
|
||||
current->journal_info = NULL;
|
||||
handle = ERR_PTR(err);
|
||||
goto out;
|
||||
}
|
||||
out:
|
||||
return handle;
|
||||
}
|
||||
|
||||
|
@ -528,7 +526,7 @@ do_get_write_access(handle_t *handle, struct journal_head *jh,
|
|||
transaction = handle->h_transaction;
|
||||
journal = transaction->t_journal;
|
||||
|
||||
jbd_debug(5, "buffer_head %p, force_copy %d\n", jh, force_copy);
|
||||
jbd_debug(5, "journal_head %p, force_copy %d\n", jh, force_copy);
|
||||
|
||||
JBUFFER_TRACE(jh, "entry");
|
||||
repeat:
|
||||
|
@ -713,7 +711,7 @@ done:
|
|||
J_EXPECT_JH(jh, buffer_uptodate(jh2bh(jh)),
|
||||
"Possible IO failure.\n");
|
||||
page = jh2bh(jh)->b_page;
|
||||
offset = ((unsigned long) jh2bh(jh)->b_data) & ~PAGE_MASK;
|
||||
offset = offset_in_page(jh2bh(jh)->b_data);
|
||||
source = kmap_atomic(page, KM_USER0);
|
||||
memcpy(jh->b_frozen_data, source+offset, jh2bh(jh)->b_size);
|
||||
kunmap_atomic(source, KM_USER0);
|
||||
|
|
|
@ -478,7 +478,7 @@ int __jbd2_log_start_commit(journal_t *journal, tid_t target)
|
|||
*/
|
||||
if (!tid_geq(journal->j_commit_request, target)) {
|
||||
/*
|
||||
* We want a new commit: OK, mark the request and wakup the
|
||||
* We want a new commit: OK, mark the request and wakeup the
|
||||
* commit thread. We do _not_ do the commit ourselves.
|
||||
*/
|
||||
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
|
||||
config QUOTA
|
||||
bool "Quota support"
|
||||
select QUOTACTL
|
||||
help
|
||||
If you say Y here, you will be able to set per user limits for disk
|
||||
usage (also called disk quotas). Currently, it works for the
|
||||
|
@ -65,8 +66,7 @@ config QFMT_V2
|
|||
|
||||
config QUOTACTL
|
||||
bool
|
||||
depends on XFS_QUOTA || QUOTA
|
||||
default y
|
||||
default n
|
||||
|
||||
config QUOTACTL_COMPAT
|
||||
bool
|
||||
|
|
|
@ -1386,6 +1386,9 @@ static void __dquot_initialize(struct inode *inode, int type)
|
|||
/* Avoid races with quotaoff() */
|
||||
if (!sb_has_quota_active(sb, cnt))
|
||||
continue;
|
||||
/* We could race with quotaon or dqget() could have failed */
|
||||
if (!got[cnt])
|
||||
continue;
|
||||
if (!inode->i_dquot[cnt]) {
|
||||
inode->i_dquot[cnt] = got[cnt];
|
||||
got[cnt] = NULL;
|
||||
|
@ -1736,6 +1739,7 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
|
|||
qsize_t rsv_space = 0;
|
||||
struct dquot *transfer_from[MAXQUOTAS] = {};
|
||||
int cnt, ret = 0;
|
||||
char is_valid[MAXQUOTAS] = {};
|
||||
char warntype_to[MAXQUOTAS];
|
||||
char warntype_from_inodes[MAXQUOTAS], warntype_from_space[MAXQUOTAS];
|
||||
|
||||
|
@ -1757,8 +1761,15 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
|
|||
space = cur_space + rsv_space;
|
||||
/* Build the transfer_from list and check the limits */
|
||||
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
|
||||
/*
|
||||
* Skip changes for same uid or gid or for turned off quota-type.
|
||||
*/
|
||||
if (!transfer_to[cnt])
|
||||
continue;
|
||||
/* Avoid races with quotaoff() */
|
||||
if (!sb_has_quota_active(inode->i_sb, cnt))
|
||||
continue;
|
||||
is_valid[cnt] = 1;
|
||||
transfer_from[cnt] = inode->i_dquot[cnt];
|
||||
ret = check_idq(transfer_to[cnt], 1, warntype_to + cnt);
|
||||
if (ret)
|
||||
|
@ -1772,12 +1783,8 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
|
|||
* Finally perform the needed transfer from transfer_from to transfer_to
|
||||
*/
|
||||
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
|
||||
/*
|
||||
* Skip changes for same uid or gid or for turned off quota-type.
|
||||
*/
|
||||
if (!transfer_to[cnt])
|
||||
if (!is_valid[cnt])
|
||||
continue;
|
||||
|
||||
/* Due to IO error we might not have transfer_from[] structure */
|
||||
if (transfer_from[cnt]) {
|
||||
warntype_from_inodes[cnt] =
|
||||
|
@ -1801,18 +1808,19 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
|
|||
|
||||
mark_all_dquot_dirty(transfer_from);
|
||||
mark_all_dquot_dirty(transfer_to);
|
||||
/* Pass back references to put */
|
||||
for (cnt = 0; cnt < MAXQUOTAS; cnt++)
|
||||
transfer_to[cnt] = transfer_from[cnt];
|
||||
warn:
|
||||
flush_warnings(transfer_to, warntype_to);
|
||||
flush_warnings(transfer_from, warntype_from_inodes);
|
||||
flush_warnings(transfer_from, warntype_from_space);
|
||||
return ret;
|
||||
/* Pass back references to put */
|
||||
for (cnt = 0; cnt < MAXQUOTAS; cnt++)
|
||||
if (is_valid[cnt])
|
||||
transfer_to[cnt] = transfer_from[cnt];
|
||||
return 0;
|
||||
over_quota:
|
||||
spin_unlock(&dq_data_lock);
|
||||
up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
|
||||
goto warn;
|
||||
flush_warnings(transfer_to, warntype_to);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(__dquot_transfer);
|
||||
|
||||
|
|
|
@ -22,6 +22,7 @@ config XFS_FS
|
|||
config XFS_QUOTA
|
||||
bool "XFS Quota support"
|
||||
depends on XFS_FS
|
||||
select QUOTACTL
|
||||
help
|
||||
If you say Y here, you will be able to set limits for disk usage on
|
||||
a per user and/or a per group basis under XFS. XFS considers quota
|
||||
|
|
Loading…
Reference in New Issue