-----BEGIN PGP SIGNATURE----- iQEzBAABCAAdFiEEq1nRK9aeMoq1VSgcnJ2qBz9kQNkFAl0mED8ACgkQnJ2qBz9k QNnZ9wgAmC+eP8m6jB38HM7gZ+fWGEX3+FvnjbMbnXmNJTsnYWYC1VIRZhwKZb4b 42OGinfLq5tZMY/whrFBdB/c4UbVhAMhd1aFTpM2n5A6FR12YZxaLZEC+MLy3T7z VU8m4uWDn80OvlUByo4Bylh+Icj78m8tLgj8SHSWxoh/DlGVKSLj9OKufV9Laens YxubcUxE5sEEu8IVQen84283oJoizmeQf+f9yogAKIaskDLBzxqBIZwEACEUUchz kEWRiHwS+Ou8EUHuwXqdKKksQgoLHEdxz2szYK1xSQ1wPmxMKPG5DqbQZv2QUBD0 Ek5T5YP4Tmph4s14n+jKDhakAJcqIQ== =HWaa -----END PGP SIGNATURE----- Merge tag 'for_v5.3-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs Pull ext2, udf and quota updates from Jan Kara: - some ext2 fixes and cleanups - a fix of udf bug when extending files - a fix of quota Q_XGETQSTAT[V] handling * tag 'for_v5.3-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs: udf: Fix incorrect final NOT_ALLOCATED (hole) extent length ext2: Use kmemdup rather than duplicating its implementation quota: honor quota type in Q_XGETQSTAT[V] calls ext2: Always brelse bh on failure in ext2_iget() ext2: add missing brelse() in ext2_iget() ext2: Fix a typo in ext2_getattr argument ext2: fix a typo in comment ext2: add missing brelse() in ext2_new_inode() ext2: optimize ext2_xattr_get() ext2: introduce new helper for xattr entry comparison ext2: merge xattr next entry check to ext2_xattr_entry_valid() ext2: code cleanup for ext2_preread_inode() ext2: code cleanup by using test_opt() and clear_opt() doc: ext2: update description of quota options for ext2 ext2: Strengthen xattr block checks ext2: Merge loops in ext2_xattr_set() ext2: introduce helper for xattr entry validation ext2: introduce helper for xattr header validation quota: add dqi_dirty_list description to comment of Dquot List Management
This commit is contained in:
commit
682f7c5c46
|
@ -57,7 +57,13 @@ noacl Don't support POSIX ACLs.
|
|||
|
||||
nobh Do not attach buffer_heads to file pagecache.
|
||||
|
||||
grpquota,noquota,quota,usrquota Quota options are silently ignored by ext2.
|
||||
quota, usrquota Enable user disk quota support
|
||||
(requires CONFIG_QUOTA).
|
||||
|
||||
grpquota Enable group disk quota support
|
||||
(requires CONFIG_QUOTA).
|
||||
|
||||
noquota option ls silently ignored by ext2.
|
||||
|
||||
|
||||
Specification
|
||||
|
|
|
@ -1197,7 +1197,7 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
|
|||
|
||||
/*
|
||||
* Returns 1 if the passed-in block region is valid; 0 if some part overlaps
|
||||
* with filesystem metadata blocksi.
|
||||
* with filesystem metadata blocks.
|
||||
*/
|
||||
int ext2_data_block_valid(struct ext2_sb_info *sbi, ext2_fsblk_t start_blk,
|
||||
unsigned int count)
|
||||
|
@ -1212,7 +1212,6 @@ int ext2_data_block_valid(struct ext2_sb_info *sbi, ext2_fsblk_t start_blk,
|
|||
(start_blk + count >= sbi->s_sb_block))
|
||||
return 0;
|
||||
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
|
|
@ -172,9 +172,7 @@ static void ext2_preread_inode(struct inode *inode)
|
|||
struct backing_dev_info *bdi;
|
||||
|
||||
bdi = inode_to_bdi(inode);
|
||||
if (bdi_read_congested(bdi))
|
||||
return;
|
||||
if (bdi_write_congested(bdi))
|
||||
if (bdi_rw_congested(bdi))
|
||||
return;
|
||||
|
||||
block_group = (inode->i_ino - 1) / EXT2_INODES_PER_GROUP(inode->i_sb);
|
||||
|
@ -511,6 +509,7 @@ repeat_in_this_group:
|
|||
/*
|
||||
* Scanned all blockgroups.
|
||||
*/
|
||||
brelse(bitmap_bh);
|
||||
err = -ENOSPC;
|
||||
goto fail;
|
||||
got:
|
||||
|
|
|
@ -1400,7 +1400,7 @@ void ext2_set_file_ops(struct inode *inode)
|
|||
struct inode *ext2_iget (struct super_block *sb, unsigned long ino)
|
||||
{
|
||||
struct ext2_inode_info *ei;
|
||||
struct buffer_head * bh;
|
||||
struct buffer_head * bh = NULL;
|
||||
struct ext2_inode *raw_inode;
|
||||
struct inode *inode;
|
||||
long ret = -EIO;
|
||||
|
@ -1446,7 +1446,6 @@ struct inode *ext2_iget (struct super_block *sb, unsigned long ino)
|
|||
*/
|
||||
if (inode->i_nlink == 0 && (inode->i_mode == 0 || ei->i_dtime)) {
|
||||
/* this inode is deleted */
|
||||
brelse (bh);
|
||||
ret = -ESTALE;
|
||||
goto bad_inode;
|
||||
}
|
||||
|
@ -1463,7 +1462,6 @@ struct inode *ext2_iget (struct super_block *sb, unsigned long ino)
|
|||
!ext2_data_block_valid(EXT2_SB(sb), ei->i_file_acl, 1)) {
|
||||
ext2_error(sb, "ext2_iget", "bad extended attribute block %u",
|
||||
ei->i_file_acl);
|
||||
brelse(bh);
|
||||
ret = -EFSCORRUPTED;
|
||||
goto bad_inode;
|
||||
}
|
||||
|
@ -1526,6 +1524,7 @@ struct inode *ext2_iget (struct super_block *sb, unsigned long ino)
|
|||
return inode;
|
||||
|
||||
bad_inode:
|
||||
brelse(bh);
|
||||
iget_failed(inode);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
@ -1640,7 +1639,7 @@ int ext2_write_inode(struct inode *inode, struct writeback_control *wbc)
|
|||
}
|
||||
|
||||
int ext2_getattr(const struct path *path, struct kstat *stat,
|
||||
u32 request_mask, unsigned int query_falgs)
|
||||
u32 request_mask, unsigned int query_flags)
|
||||
{
|
||||
struct inode *inode = d_inode(path->dentry);
|
||||
struct ext2_inode_info *ei = EXT2_I(inode);
|
||||
|
|
|
@ -303,16 +303,16 @@ static int ext2_show_options(struct seq_file *seq, struct dentry *root)
|
|||
if (test_opt(sb, NOBH))
|
||||
seq_puts(seq, ",nobh");
|
||||
|
||||
if (sbi->s_mount_opt & EXT2_MOUNT_USRQUOTA)
|
||||
if (test_opt(sb, USRQUOTA))
|
||||
seq_puts(seq, ",usrquota");
|
||||
|
||||
if (sbi->s_mount_opt & EXT2_MOUNT_GRPQUOTA)
|
||||
if (test_opt(sb, GRPQUOTA))
|
||||
seq_puts(seq, ",grpquota");
|
||||
|
||||
if (sbi->s_mount_opt & EXT2_MOUNT_XIP)
|
||||
if (test_opt(sb, XIP))
|
||||
seq_puts(seq, ",xip");
|
||||
|
||||
if (sbi->s_mount_opt & EXT2_MOUNT_DAX)
|
||||
if (test_opt(sb, DAX))
|
||||
seq_puts(seq, ",dax");
|
||||
|
||||
if (!test_opt(sb, RESERVATION))
|
||||
|
@ -935,8 +935,7 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
|
|||
sbi->s_resgid = opts.s_resgid;
|
||||
|
||||
sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
|
||||
((EXT2_SB(sb)->s_mount_opt & EXT2_MOUNT_POSIX_ACL) ?
|
||||
SB_POSIXACL : 0);
|
||||
(test_opt(sb, POSIX_ACL) ? SB_POSIXACL : 0);
|
||||
sb->s_iflags |= SB_I_CGROUPWB;
|
||||
|
||||
if (le32_to_cpu(es->s_rev_level) == EXT2_GOOD_OLD_REV &&
|
||||
|
@ -967,11 +966,11 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
|
|||
|
||||
blocksize = BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size);
|
||||
|
||||
if (sbi->s_mount_opt & EXT2_MOUNT_DAX) {
|
||||
if (test_opt(sb, DAX)) {
|
||||
if (!bdev_dax_supported(sb->s_bdev, blocksize)) {
|
||||
ext2_msg(sb, KERN_ERR,
|
||||
"DAX unsupported by block device. Turning off DAX.");
|
||||
sbi->s_mount_opt &= ~EXT2_MOUNT_DAX;
|
||||
clear_opt(sbi->s_mount_opt, DAX);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1404,7 +1403,7 @@ out_set:
|
|||
sbi->s_resuid = new_opts.s_resuid;
|
||||
sbi->s_resgid = new_opts.s_resgid;
|
||||
sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
|
||||
((sbi->s_mount_opt & EXT2_MOUNT_POSIX_ACL) ? SB_POSIXACL : 0);
|
||||
(test_opt(sb, POSIX_ACL) ? SB_POSIXACL : 0);
|
||||
spin_unlock(&sbi->s_lock);
|
||||
|
||||
return 0;
|
||||
|
|
164
fs/ext2/xattr.c
164
fs/ext2/xattr.c
|
@ -134,6 +134,53 @@ ext2_xattr_handler(int name_index)
|
|||
return handler;
|
||||
}
|
||||
|
||||
static bool
|
||||
ext2_xattr_header_valid(struct ext2_xattr_header *header)
|
||||
{
|
||||
if (header->h_magic != cpu_to_le32(EXT2_XATTR_MAGIC) ||
|
||||
header->h_blocks != cpu_to_le32(1))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool
|
||||
ext2_xattr_entry_valid(struct ext2_xattr_entry *entry,
|
||||
char *end, size_t end_offs)
|
||||
{
|
||||
struct ext2_xattr_entry *next;
|
||||
size_t size;
|
||||
|
||||
next = EXT2_XATTR_NEXT(entry);
|
||||
if ((char *)next >= end)
|
||||
return false;
|
||||
|
||||
if (entry->e_value_block != 0)
|
||||
return false;
|
||||
|
||||
size = le32_to_cpu(entry->e_value_size);
|
||||
if (size > end_offs ||
|
||||
le16_to_cpu(entry->e_value_offs) + size > end_offs)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static int
|
||||
ext2_xattr_cmp_entry(int name_index, size_t name_len, const char *name,
|
||||
struct ext2_xattr_entry *entry)
|
||||
{
|
||||
int cmp;
|
||||
|
||||
cmp = name_index - entry->e_name_index;
|
||||
if (!cmp)
|
||||
cmp = name_len - entry->e_name_len;
|
||||
if (!cmp)
|
||||
cmp = memcmp(name, entry->e_name, name_len);
|
||||
|
||||
return cmp;
|
||||
}
|
||||
|
||||
/*
|
||||
* ext2_xattr_get()
|
||||
*
|
||||
|
@ -152,7 +199,7 @@ ext2_xattr_get(struct inode *inode, int name_index, const char *name,
|
|||
struct ext2_xattr_entry *entry;
|
||||
size_t name_len, size;
|
||||
char *end;
|
||||
int error;
|
||||
int error, not_found;
|
||||
struct mb_cache *ea_block_cache = EA_BLOCK_CACHE(inode);
|
||||
|
||||
ea_idebug(inode, "name=%d.%s, buffer=%p, buffer_size=%ld",
|
||||
|
@ -176,9 +223,9 @@ ext2_xattr_get(struct inode *inode, int name_index, const char *name,
|
|||
ea_bdebug(bh, "b_count=%d, refcount=%d",
|
||||
atomic_read(&(bh->b_count)), le32_to_cpu(HDR(bh)->h_refcount));
|
||||
end = bh->b_data + bh->b_size;
|
||||
if (HDR(bh)->h_magic != cpu_to_le32(EXT2_XATTR_MAGIC) ||
|
||||
HDR(bh)->h_blocks != cpu_to_le32(1)) {
|
||||
bad_block: ext2_error(inode->i_sb, "ext2_xattr_get",
|
||||
if (!ext2_xattr_header_valid(HDR(bh))) {
|
||||
bad_block:
|
||||
ext2_error(inode->i_sb, "ext2_xattr_get",
|
||||
"inode %ld: bad block %d", inode->i_ino,
|
||||
EXT2_I(inode)->i_file_acl);
|
||||
error = -EIO;
|
||||
|
@ -188,29 +235,25 @@ bad_block: ext2_error(inode->i_sb, "ext2_xattr_get",
|
|||
/* find named attribute */
|
||||
entry = FIRST_ENTRY(bh);
|
||||
while (!IS_LAST_ENTRY(entry)) {
|
||||
struct ext2_xattr_entry *next =
|
||||
EXT2_XATTR_NEXT(entry);
|
||||
if ((char *)next >= end)
|
||||
if (!ext2_xattr_entry_valid(entry, end,
|
||||
inode->i_sb->s_blocksize))
|
||||
goto bad_block;
|
||||
if (name_index == entry->e_name_index &&
|
||||
name_len == entry->e_name_len &&
|
||||
memcmp(name, entry->e_name, name_len) == 0)
|
||||
|
||||
not_found = ext2_xattr_cmp_entry(name_index, name_len, name,
|
||||
entry);
|
||||
if (!not_found)
|
||||
goto found;
|
||||
entry = next;
|
||||
if (not_found < 0)
|
||||
break;
|
||||
|
||||
entry = EXT2_XATTR_NEXT(entry);
|
||||
}
|
||||
if (ext2_xattr_cache_insert(ea_block_cache, bh))
|
||||
ea_idebug(inode, "cache insert failed");
|
||||
error = -ENODATA;
|
||||
goto cleanup;
|
||||
found:
|
||||
/* check the buffer size */
|
||||
if (entry->e_value_block != 0)
|
||||
goto bad_block;
|
||||
size = le32_to_cpu(entry->e_value_size);
|
||||
if (size > inode->i_sb->s_blocksize ||
|
||||
le16_to_cpu(entry->e_value_offs) + size > inode->i_sb->s_blocksize)
|
||||
goto bad_block;
|
||||
|
||||
if (ext2_xattr_cache_insert(ea_block_cache, bh))
|
||||
ea_idebug(inode, "cache insert failed");
|
||||
if (buffer) {
|
||||
|
@ -266,9 +309,9 @@ ext2_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
|
|||
ea_bdebug(bh, "b_count=%d, refcount=%d",
|
||||
atomic_read(&(bh->b_count)), le32_to_cpu(HDR(bh)->h_refcount));
|
||||
end = bh->b_data + bh->b_size;
|
||||
if (HDR(bh)->h_magic != cpu_to_le32(EXT2_XATTR_MAGIC) ||
|
||||
HDR(bh)->h_blocks != cpu_to_le32(1)) {
|
||||
bad_block: ext2_error(inode->i_sb, "ext2_xattr_list",
|
||||
if (!ext2_xattr_header_valid(HDR(bh))) {
|
||||
bad_block:
|
||||
ext2_error(inode->i_sb, "ext2_xattr_list",
|
||||
"inode %ld: bad block %d", inode->i_ino,
|
||||
EXT2_I(inode)->i_file_acl);
|
||||
error = -EIO;
|
||||
|
@ -278,11 +321,10 @@ bad_block: ext2_error(inode->i_sb, "ext2_xattr_list",
|
|||
/* check the on-disk data structure */
|
||||
entry = FIRST_ENTRY(bh);
|
||||
while (!IS_LAST_ENTRY(entry)) {
|
||||
struct ext2_xattr_entry *next = EXT2_XATTR_NEXT(entry);
|
||||
|
||||
if ((char *)next >= end)
|
||||
if (!ext2_xattr_entry_valid(entry, end,
|
||||
inode->i_sb->s_blocksize))
|
||||
goto bad_block;
|
||||
entry = next;
|
||||
entry = EXT2_XATTR_NEXT(entry);
|
||||
}
|
||||
if (ext2_xattr_cache_insert(ea_block_cache, bh))
|
||||
ea_idebug(inode, "cache insert failed");
|
||||
|
@ -367,7 +409,7 @@ ext2_xattr_set(struct inode *inode, int name_index, const char *name,
|
|||
struct super_block *sb = inode->i_sb;
|
||||
struct buffer_head *bh = NULL;
|
||||
struct ext2_xattr_header *header = NULL;
|
||||
struct ext2_xattr_entry *here, *last;
|
||||
struct ext2_xattr_entry *here = NULL, *last = NULL;
|
||||
size_t name_len, free, min_offs = sb->s_blocksize;
|
||||
int not_found = 1, error;
|
||||
char *end;
|
||||
|
@ -406,47 +448,39 @@ ext2_xattr_set(struct inode *inode, int name_index, const char *name,
|
|||
le32_to_cpu(HDR(bh)->h_refcount));
|
||||
header = HDR(bh);
|
||||
end = bh->b_data + bh->b_size;
|
||||
if (header->h_magic != cpu_to_le32(EXT2_XATTR_MAGIC) ||
|
||||
header->h_blocks != cpu_to_le32(1)) {
|
||||
bad_block: ext2_error(sb, "ext2_xattr_set",
|
||||
if (!ext2_xattr_header_valid(header)) {
|
||||
bad_block:
|
||||
ext2_error(sb, "ext2_xattr_set",
|
||||
"inode %ld: bad block %d", inode->i_ino,
|
||||
EXT2_I(inode)->i_file_acl);
|
||||
error = -EIO;
|
||||
goto cleanup;
|
||||
}
|
||||
/* Find the named attribute. */
|
||||
here = FIRST_ENTRY(bh);
|
||||
while (!IS_LAST_ENTRY(here)) {
|
||||
struct ext2_xattr_entry *next = EXT2_XATTR_NEXT(here);
|
||||
if ((char *)next >= end)
|
||||
goto bad_block;
|
||||
if (!here->e_value_block && here->e_value_size) {
|
||||
size_t offs = le16_to_cpu(here->e_value_offs);
|
||||
if (offs < min_offs)
|
||||
min_offs = offs;
|
||||
}
|
||||
not_found = name_index - here->e_name_index;
|
||||
if (!not_found)
|
||||
not_found = name_len - here->e_name_len;
|
||||
if (!not_found)
|
||||
not_found = memcmp(name, here->e_name,name_len);
|
||||
if (not_found <= 0)
|
||||
break;
|
||||
here = next;
|
||||
}
|
||||
last = here;
|
||||
/* We still need to compute min_offs and last. */
|
||||
/*
|
||||
* Find the named attribute. If not found, 'here' will point
|
||||
* to entry where the new attribute should be inserted to
|
||||
* maintain sorting.
|
||||
*/
|
||||
last = FIRST_ENTRY(bh);
|
||||
while (!IS_LAST_ENTRY(last)) {
|
||||
struct ext2_xattr_entry *next = EXT2_XATTR_NEXT(last);
|
||||
if ((char *)next >= end)
|
||||
if (!ext2_xattr_entry_valid(last, end, sb->s_blocksize))
|
||||
goto bad_block;
|
||||
if (!last->e_value_block && last->e_value_size) {
|
||||
if (last->e_value_size) {
|
||||
size_t offs = le16_to_cpu(last->e_value_offs);
|
||||
if (offs < min_offs)
|
||||
min_offs = offs;
|
||||
}
|
||||
last = next;
|
||||
if (not_found > 0) {
|
||||
not_found = ext2_xattr_cmp_entry(name_index,
|
||||
name_len,
|
||||
name, last);
|
||||
if (not_found <= 0)
|
||||
here = last;
|
||||
}
|
||||
last = EXT2_XATTR_NEXT(last);
|
||||
}
|
||||
if (not_found > 0)
|
||||
here = last;
|
||||
|
||||
/* Check whether we have enough space left. */
|
||||
free = min_offs - ((char*)last - (char*)header) - sizeof(__u32);
|
||||
|
@ -454,7 +488,6 @@ bad_block: ext2_error(sb, "ext2_xattr_set",
|
|||
/* We will use a new extended attribute block. */
|
||||
free = sb->s_blocksize -
|
||||
sizeof(struct ext2_xattr_header) - sizeof(__u32);
|
||||
here = last = NULL; /* avoid gcc uninitialized warning. */
|
||||
}
|
||||
|
||||
if (not_found) {
|
||||
|
@ -470,14 +503,7 @@ bad_block: ext2_error(sb, "ext2_xattr_set",
|
|||
error = -EEXIST;
|
||||
if (flags & XATTR_CREATE)
|
||||
goto cleanup;
|
||||
if (!here->e_value_block && here->e_value_size) {
|
||||
size_t size = le32_to_cpu(here->e_value_size);
|
||||
|
||||
if (le16_to_cpu(here->e_value_offs) + size >
|
||||
sb->s_blocksize || size > sb->s_blocksize)
|
||||
goto bad_block;
|
||||
free += EXT2_XATTR_SIZE(size);
|
||||
}
|
||||
free += EXT2_XATTR_SIZE(le32_to_cpu(here->e_value_size));
|
||||
free += EXT2_XATTR_LEN(name_len);
|
||||
}
|
||||
error = -ENOSPC;
|
||||
|
@ -506,11 +532,10 @@ bad_block: ext2_error(sb, "ext2_xattr_set",
|
|||
|
||||
unlock_buffer(bh);
|
||||
ea_bdebug(bh, "cloning");
|
||||
header = kmalloc(bh->b_size, GFP_KERNEL);
|
||||
header = kmemdup(HDR(bh), bh->b_size, GFP_KERNEL);
|
||||
error = -ENOMEM;
|
||||
if (header == NULL)
|
||||
goto cleanup;
|
||||
memcpy(header, HDR(bh), bh->b_size);
|
||||
header->h_refcount = cpu_to_le32(1);
|
||||
|
||||
offset = (char *)here - bh->b_data;
|
||||
|
@ -542,7 +567,7 @@ bad_block: ext2_error(sb, "ext2_xattr_set",
|
|||
here->e_name_len = name_len;
|
||||
memcpy(here->e_name, name, name_len);
|
||||
} else {
|
||||
if (!here->e_value_block && here->e_value_size) {
|
||||
if (here->e_value_size) {
|
||||
char *first_val = (char *)header + min_offs;
|
||||
size_t offs = le16_to_cpu(here->e_value_offs);
|
||||
char *val = (char *)header + offs;
|
||||
|
@ -569,7 +594,7 @@ bad_block: ext2_error(sb, "ext2_xattr_set",
|
|||
last = ENTRY(header+1);
|
||||
while (!IS_LAST_ENTRY(last)) {
|
||||
size_t o = le16_to_cpu(last->e_value_offs);
|
||||
if (!last->e_value_block && o < offs)
|
||||
if (o < offs)
|
||||
last->e_value_offs =
|
||||
cpu_to_le16(o + size);
|
||||
last = EXT2_XATTR_NEXT(last);
|
||||
|
@ -784,8 +809,7 @@ ext2_xattr_delete_inode(struct inode *inode)
|
|||
goto cleanup;
|
||||
}
|
||||
ea_bdebug(bh, "b_count=%d", atomic_read(&(bh->b_count)));
|
||||
if (HDR(bh)->h_magic != cpu_to_le32(EXT2_XATTR_MAGIC) ||
|
||||
HDR(bh)->h_blocks != cpu_to_le32(1)) {
|
||||
if (!ext2_xattr_header_valid(HDR(bh))) {
|
||||
ext2_error(inode->i_sb, "ext2_xattr_delete_inode",
|
||||
"inode %ld: bad block %d", inode->i_ino,
|
||||
EXT2_I(inode)->i_file_acl);
|
||||
|
|
|
@ -223,9 +223,9 @@ static void put_quota_format(struct quota_format_type *fmt)
|
|||
|
||||
/*
|
||||
* Dquot List Management:
|
||||
* The quota code uses three lists for dquot management: the inuse_list,
|
||||
* free_dquots, and dquot_hash[] array. A single dquot structure may be
|
||||
* on all three lists, depending on its current state.
|
||||
* The quota code uses four lists for dquot management: the inuse_list,
|
||||
* free_dquots, dqi_dirty_list, and dquot_hash[] array. A single dquot
|
||||
* structure may be on some of those lists, depending on its current state.
|
||||
*
|
||||
* All dquots are placed to the end of inuse_list when first created, and this
|
||||
* list is used for invalidate operation, which must look at every dquot.
|
||||
|
@ -236,6 +236,11 @@ static void put_quota_format(struct quota_format_type *fmt)
|
|||
* dqstats.free_dquots gives the number of dquots on the list. When
|
||||
* dquot is invalidated it's completely released from memory.
|
||||
*
|
||||
* Dirty dquots are added to the dqi_dirty_list of quota_info when mark
|
||||
* dirtied, and this list is searched when writing dirty dquots back to
|
||||
* quota file. Note that some filesystems do dirty dquot tracking on their
|
||||
* own (e.g. in a journal) and thus don't use dqi_dirty_list.
|
||||
*
|
||||
* Dquots with a specific identity (device, type and id) are placed on
|
||||
* one of the dquot_hash[] hash chains. The provides an efficient search
|
||||
* mechanism to locate a specific dquot.
|
||||
|
|
|
@ -331,9 +331,9 @@ static int quota_state_to_flags(struct qc_state *state)
|
|||
return flags;
|
||||
}
|
||||
|
||||
static int quota_getstate(struct super_block *sb, struct fs_quota_stat *fqs)
|
||||
static int quota_getstate(struct super_block *sb, int type,
|
||||
struct fs_quota_stat *fqs)
|
||||
{
|
||||
int type;
|
||||
struct qc_state state;
|
||||
int ret;
|
||||
|
||||
|
@ -349,14 +349,7 @@ static int quota_getstate(struct super_block *sb, struct fs_quota_stat *fqs)
|
|||
if (!fqs->qs_flags)
|
||||
return -ENOSYS;
|
||||
fqs->qs_incoredqs = state.s_incoredqs;
|
||||
/*
|
||||
* GETXSTATE quotactl has space for just one set of time limits so
|
||||
* report them for the first enabled quota type
|
||||
*/
|
||||
for (type = 0; type < MAXQUOTAS; type++)
|
||||
if (state.s_state[type].flags & QCI_ACCT_ENABLED)
|
||||
break;
|
||||
BUG_ON(type == MAXQUOTAS);
|
||||
|
||||
fqs->qs_btimelimit = state.s_state[type].spc_timelimit;
|
||||
fqs->qs_itimelimit = state.s_state[type].ino_timelimit;
|
||||
fqs->qs_rtbtimelimit = state.s_state[type].rt_spc_timelimit;
|
||||
|
@ -391,22 +384,22 @@ static int quota_getstate(struct super_block *sb, struct fs_quota_stat *fqs)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int quota_getxstate(struct super_block *sb, void __user *addr)
|
||||
static int quota_getxstate(struct super_block *sb, int type, void __user *addr)
|
||||
{
|
||||
struct fs_quota_stat fqs;
|
||||
int ret;
|
||||
|
||||
if (!sb->s_qcop->get_state)
|
||||
return -ENOSYS;
|
||||
ret = quota_getstate(sb, &fqs);
|
||||
ret = quota_getstate(sb, type, &fqs);
|
||||
if (!ret && copy_to_user(addr, &fqs, sizeof(fqs)))
|
||||
return -EFAULT;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int quota_getstatev(struct super_block *sb, struct fs_quota_statv *fqs)
|
||||
static int quota_getstatev(struct super_block *sb, int type,
|
||||
struct fs_quota_statv *fqs)
|
||||
{
|
||||
int type;
|
||||
struct qc_state state;
|
||||
int ret;
|
||||
|
||||
|
@ -422,14 +415,7 @@ static int quota_getstatev(struct super_block *sb, struct fs_quota_statv *fqs)
|
|||
if (!fqs->qs_flags)
|
||||
return -ENOSYS;
|
||||
fqs->qs_incoredqs = state.s_incoredqs;
|
||||
/*
|
||||
* GETXSTATV quotactl has space for just one set of time limits so
|
||||
* report them for the first enabled quota type
|
||||
*/
|
||||
for (type = 0; type < MAXQUOTAS; type++)
|
||||
if (state.s_state[type].flags & QCI_ACCT_ENABLED)
|
||||
break;
|
||||
BUG_ON(type == MAXQUOTAS);
|
||||
|
||||
fqs->qs_btimelimit = state.s_state[type].spc_timelimit;
|
||||
fqs->qs_itimelimit = state.s_state[type].ino_timelimit;
|
||||
fqs->qs_rtbtimelimit = state.s_state[type].rt_spc_timelimit;
|
||||
|
@ -455,7 +441,7 @@ static int quota_getstatev(struct super_block *sb, struct fs_quota_statv *fqs)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int quota_getxstatev(struct super_block *sb, void __user *addr)
|
||||
static int quota_getxstatev(struct super_block *sb, int type, void __user *addr)
|
||||
{
|
||||
struct fs_quota_statv fqs;
|
||||
int ret;
|
||||
|
@ -474,7 +460,7 @@ static int quota_getxstatev(struct super_block *sb, void __user *addr)
|
|||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
ret = quota_getstatev(sb, &fqs);
|
||||
ret = quota_getstatev(sb, type, &fqs);
|
||||
if (!ret && copy_to_user(addr, &fqs, sizeof(fqs)))
|
||||
return -EFAULT;
|
||||
return ret;
|
||||
|
@ -744,9 +730,9 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id,
|
|||
case Q_XQUOTARM:
|
||||
return quota_rmxquota(sb, addr);
|
||||
case Q_XGETQSTAT:
|
||||
return quota_getxstate(sb, addr);
|
||||
return quota_getxstate(sb, type, addr);
|
||||
case Q_XGETQSTATV:
|
||||
return quota_getxstatev(sb, addr);
|
||||
return quota_getxstatev(sb, type, addr);
|
||||
case Q_XSETQLIM:
|
||||
return quota_setxquota(sb, type, id, addr);
|
||||
case Q_XGETQUOTA:
|
||||
|
|
|
@ -470,13 +470,15 @@ static struct buffer_head *udf_getblk(struct inode *inode, udf_pblk_t block,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
/* Extend the file by 'blocks' blocks, return the number of extents added */
|
||||
/* Extend the file with new blocks totaling 'new_block_bytes',
|
||||
* return the number of extents added
|
||||
*/
|
||||
static int udf_do_extend_file(struct inode *inode,
|
||||
struct extent_position *last_pos,
|
||||
struct kernel_long_ad *last_ext,
|
||||
sector_t blocks)
|
||||
loff_t new_block_bytes)
|
||||
{
|
||||
sector_t add;
|
||||
uint32_t add;
|
||||
int count = 0, fake = !(last_ext->extLength & UDF_EXTENT_LENGTH_MASK);
|
||||
struct super_block *sb = inode->i_sb;
|
||||
struct kernel_lb_addr prealloc_loc = {};
|
||||
|
@ -486,7 +488,7 @@ static int udf_do_extend_file(struct inode *inode,
|
|||
|
||||
/* The previous extent is fake and we should not extend by anything
|
||||
* - there's nothing to do... */
|
||||
if (!blocks && fake)
|
||||
if (!new_block_bytes && fake)
|
||||
return 0;
|
||||
|
||||
iinfo = UDF_I(inode);
|
||||
|
@ -517,13 +519,12 @@ static int udf_do_extend_file(struct inode *inode,
|
|||
/* Can we merge with the previous extent? */
|
||||
if ((last_ext->extLength & UDF_EXTENT_FLAG_MASK) ==
|
||||
EXT_NOT_RECORDED_NOT_ALLOCATED) {
|
||||
add = ((1 << 30) - sb->s_blocksize -
|
||||
(last_ext->extLength & UDF_EXTENT_LENGTH_MASK)) >>
|
||||
sb->s_blocksize_bits;
|
||||
if (add > blocks)
|
||||
add = blocks;
|
||||
blocks -= add;
|
||||
last_ext->extLength += add << sb->s_blocksize_bits;
|
||||
add = (1 << 30) - sb->s_blocksize -
|
||||
(last_ext->extLength & UDF_EXTENT_LENGTH_MASK);
|
||||
if (add > new_block_bytes)
|
||||
add = new_block_bytes;
|
||||
new_block_bytes -= add;
|
||||
last_ext->extLength += add;
|
||||
}
|
||||
|
||||
if (fake) {
|
||||
|
@ -544,28 +545,27 @@ static int udf_do_extend_file(struct inode *inode,
|
|||
}
|
||||
|
||||
/* Managed to do everything necessary? */
|
||||
if (!blocks)
|
||||
if (!new_block_bytes)
|
||||
goto out;
|
||||
|
||||
/* All further extents will be NOT_RECORDED_NOT_ALLOCATED */
|
||||
last_ext->extLocation.logicalBlockNum = 0;
|
||||
last_ext->extLocation.partitionReferenceNum = 0;
|
||||
add = (1 << (30-sb->s_blocksize_bits)) - 1;
|
||||
last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
|
||||
(add << sb->s_blocksize_bits);
|
||||
add = (1 << 30) - sb->s_blocksize;
|
||||
last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED | add;
|
||||
|
||||
/* Create enough extents to cover the whole hole */
|
||||
while (blocks > add) {
|
||||
blocks -= add;
|
||||
while (new_block_bytes > add) {
|
||||
new_block_bytes -= add;
|
||||
err = udf_add_aext(inode, last_pos, &last_ext->extLocation,
|
||||
last_ext->extLength, 1);
|
||||
if (err)
|
||||
return err;
|
||||
count++;
|
||||
}
|
||||
if (blocks) {
|
||||
if (new_block_bytes) {
|
||||
last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
|
||||
(blocks << sb->s_blocksize_bits);
|
||||
new_block_bytes;
|
||||
err = udf_add_aext(inode, last_pos, &last_ext->extLocation,
|
||||
last_ext->extLength, 1);
|
||||
if (err)
|
||||
|
@ -596,6 +596,24 @@ out:
|
|||
return count;
|
||||
}
|
||||
|
||||
/* Extend the final block of the file to final_block_len bytes */
|
||||
static void udf_do_extend_final_block(struct inode *inode,
|
||||
struct extent_position *last_pos,
|
||||
struct kernel_long_ad *last_ext,
|
||||
uint32_t final_block_len)
|
||||
{
|
||||
struct super_block *sb = inode->i_sb;
|
||||
uint32_t added_bytes;
|
||||
|
||||
added_bytes = final_block_len -
|
||||
(last_ext->extLength & (sb->s_blocksize - 1));
|
||||
last_ext->extLength += added_bytes;
|
||||
UDF_I(inode)->i_lenExtents += added_bytes;
|
||||
|
||||
udf_write_aext(inode, last_pos, &last_ext->extLocation,
|
||||
last_ext->extLength, 1);
|
||||
}
|
||||
|
||||
static int udf_extend_file(struct inode *inode, loff_t newsize)
|
||||
{
|
||||
|
||||
|
@ -605,10 +623,12 @@ static int udf_extend_file(struct inode *inode, loff_t newsize)
|
|||
int8_t etype;
|
||||
struct super_block *sb = inode->i_sb;
|
||||
sector_t first_block = newsize >> sb->s_blocksize_bits, offset;
|
||||
unsigned long partial_final_block;
|
||||
int adsize;
|
||||
struct udf_inode_info *iinfo = UDF_I(inode);
|
||||
struct kernel_long_ad extent;
|
||||
int err;
|
||||
int err = 0;
|
||||
int within_final_block;
|
||||
|
||||
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
|
||||
adsize = sizeof(struct short_ad);
|
||||
|
@ -618,18 +638,8 @@ static int udf_extend_file(struct inode *inode, loff_t newsize)
|
|||
BUG();
|
||||
|
||||
etype = inode_bmap(inode, first_block, &epos, &eloc, &elen, &offset);
|
||||
within_final_block = (etype != -1);
|
||||
|
||||
/* File has extent covering the new size (could happen when extending
|
||||
* inside a block)? */
|
||||
if (etype != -1)
|
||||
return 0;
|
||||
if (newsize & (sb->s_blocksize - 1))
|
||||
offset++;
|
||||
/* Extended file just to the boundary of the last file block? */
|
||||
if (offset == 0)
|
||||
return 0;
|
||||
|
||||
/* Truncate is extending the file by 'offset' blocks */
|
||||
if ((!epos.bh && epos.offset == udf_file_entry_alloc_offset(inode)) ||
|
||||
(epos.bh && epos.offset == sizeof(struct allocExtDesc))) {
|
||||
/* File has no extents at all or has empty last
|
||||
|
@ -643,7 +653,22 @@ static int udf_extend_file(struct inode *inode, loff_t newsize)
|
|||
&extent.extLength, 0);
|
||||
extent.extLength |= etype << 30;
|
||||
}
|
||||
err = udf_do_extend_file(inode, &epos, &extent, offset);
|
||||
|
||||
partial_final_block = newsize & (sb->s_blocksize - 1);
|
||||
|
||||
/* File has extent covering the new size (could happen when extending
|
||||
* inside a block)?
|
||||
*/
|
||||
if (within_final_block) {
|
||||
/* Extending file within the last file block */
|
||||
udf_do_extend_final_block(inode, &epos, &extent,
|
||||
partial_final_block);
|
||||
} else {
|
||||
loff_t add = ((loff_t)offset << sb->s_blocksize_bits) |
|
||||
partial_final_block;
|
||||
err = udf_do_extend_file(inode, &epos, &extent, add);
|
||||
}
|
||||
|
||||
if (err < 0)
|
||||
goto out;
|
||||
err = 0;
|
||||
|
@ -745,6 +770,7 @@ static sector_t inode_getblk(struct inode *inode, sector_t block,
|
|||
/* Are we beyond EOF? */
|
||||
if (etype == -1) {
|
||||
int ret;
|
||||
loff_t hole_len;
|
||||
isBeyondEOF = true;
|
||||
if (count) {
|
||||
if (c)
|
||||
|
@ -760,7 +786,8 @@ static sector_t inode_getblk(struct inode *inode, sector_t block,
|
|||
startnum = (offset > 0);
|
||||
}
|
||||
/* Create extents for the hole between EOF and offset */
|
||||
ret = udf_do_extend_file(inode, &prev_epos, laarr, offset);
|
||||
hole_len = (loff_t)offset << inode->i_blkbits;
|
||||
ret = udf_do_extend_file(inode, &prev_epos, laarr, hole_len);
|
||||
if (ret < 0) {
|
||||
*err = ret;
|
||||
newblock = 0;
|
||||
|
|
Loading…
Reference in New Issue