Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable: Btrfs: don't warn in btrfs_add_orphan Btrfs: fix free space cache when there are pinned extents and clusters V2 Btrfs: Fix uninitialized root flags for subvolumes btrfs: clear __GFP_FS flag in the space cache inode Btrfs: fix memory leak in start_transaction() Btrfs: fix memory leak in btrfs_ioctl_start_sync() Btrfs: fix subvol_sem leak in btrfs_rename() Btrfs: Fix oops for defrag with compression turned on Btrfs: fix /proc/mounts info. Btrfs: fix compiler warning in file.c
This commit is contained in:
commit
884b8267d5
|
@ -1284,6 +1284,8 @@ struct btrfs_root {
|
||||||
#define BTRFS_INODE_DIRSYNC (1 << 10)
|
#define BTRFS_INODE_DIRSYNC (1 << 10)
|
||||||
#define BTRFS_INODE_COMPRESS (1 << 11)
|
#define BTRFS_INODE_COMPRESS (1 << 11)
|
||||||
|
|
||||||
|
#define BTRFS_INODE_ROOT_ITEM_INIT (1 << 31)
|
||||||
|
|
||||||
/* some macros to generate set/get funcs for the struct fields. This
|
/* some macros to generate set/get funcs for the struct fields. This
|
||||||
* assumes there is a lefoo_to_cpu for every type, so lets make a simple
|
* assumes there is a lefoo_to_cpu for every type, so lets make a simple
|
||||||
* one for u8:
|
* one for u8:
|
||||||
|
@ -2359,6 +2361,8 @@ int btrfs_find_dead_roots(struct btrfs_root *root, u64 objectid);
|
||||||
int btrfs_find_orphan_roots(struct btrfs_root *tree_root);
|
int btrfs_find_orphan_roots(struct btrfs_root *tree_root);
|
||||||
int btrfs_set_root_node(struct btrfs_root_item *item,
|
int btrfs_set_root_node(struct btrfs_root_item *item,
|
||||||
struct extent_buffer *node);
|
struct extent_buffer *node);
|
||||||
|
void btrfs_check_and_init_root_item(struct btrfs_root_item *item);
|
||||||
|
|
||||||
/* dir-item.c */
|
/* dir-item.c */
|
||||||
int btrfs_insert_dir_item(struct btrfs_trans_handle *trans,
|
int btrfs_insert_dir_item(struct btrfs_trans_handle *trans,
|
||||||
struct btrfs_root *root, const char *name,
|
struct btrfs_root *root, const char *name,
|
||||||
|
|
|
@ -1275,8 +1275,10 @@ struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root,
|
||||||
root->commit_root = btrfs_root_node(root);
|
root->commit_root = btrfs_root_node(root);
|
||||||
BUG_ON(!root->node);
|
BUG_ON(!root->node);
|
||||||
out:
|
out:
|
||||||
if (location->objectid != BTRFS_TREE_LOG_OBJECTID)
|
if (location->objectid != BTRFS_TREE_LOG_OBJECTID) {
|
||||||
root->ref_cows = 1;
|
root->ref_cows = 1;
|
||||||
|
btrfs_check_and_init_root_item(&root->root_item);
|
||||||
|
}
|
||||||
|
|
||||||
return root;
|
return root;
|
||||||
}
|
}
|
||||||
|
|
|
@ -906,7 +906,7 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
|
||||||
unsigned long last_index;
|
unsigned long last_index;
|
||||||
size_t num_written = 0;
|
size_t num_written = 0;
|
||||||
int nrptrs;
|
int nrptrs;
|
||||||
int ret;
|
int ret = 0;
|
||||||
|
|
||||||
nrptrs = min((iov_iter_count(i) + PAGE_CACHE_SIZE - 1) /
|
nrptrs = min((iov_iter_count(i) + PAGE_CACHE_SIZE - 1) /
|
||||||
PAGE_CACHE_SIZE, PAGE_CACHE_SIZE /
|
PAGE_CACHE_SIZE, PAGE_CACHE_SIZE /
|
||||||
|
|
|
@ -24,6 +24,7 @@
|
||||||
#include "free-space-cache.h"
|
#include "free-space-cache.h"
|
||||||
#include "transaction.h"
|
#include "transaction.h"
|
||||||
#include "disk-io.h"
|
#include "disk-io.h"
|
||||||
|
#include "extent_io.h"
|
||||||
|
|
||||||
#define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8)
|
#define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8)
|
||||||
#define MAX_CACHE_BYTES_PER_GIG (32 * 1024)
|
#define MAX_CACHE_BYTES_PER_GIG (32 * 1024)
|
||||||
|
@ -81,6 +82,8 @@ struct inode *lookup_free_space_inode(struct btrfs_root *root,
|
||||||
return ERR_PTR(-ENOENT);
|
return ERR_PTR(-ENOENT);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
inode->i_mapping->flags &= ~__GFP_FS;
|
||||||
|
|
||||||
spin_lock(&block_group->lock);
|
spin_lock(&block_group->lock);
|
||||||
if (!root->fs_info->closing) {
|
if (!root->fs_info->closing) {
|
||||||
block_group->inode = igrab(inode);
|
block_group->inode = igrab(inode);
|
||||||
|
@ -222,6 +225,7 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
|
||||||
u64 num_entries;
|
u64 num_entries;
|
||||||
u64 num_bitmaps;
|
u64 num_bitmaps;
|
||||||
u64 generation;
|
u64 generation;
|
||||||
|
u64 used = btrfs_block_group_used(&block_group->item);
|
||||||
u32 cur_crc = ~(u32)0;
|
u32 cur_crc = ~(u32)0;
|
||||||
pgoff_t index = 0;
|
pgoff_t index = 0;
|
||||||
unsigned long first_page_offset;
|
unsigned long first_page_offset;
|
||||||
|
@ -467,6 +471,17 @@ next:
|
||||||
index++;
|
index++;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
spin_lock(&block_group->tree_lock);
|
||||||
|
if (block_group->free_space != (block_group->key.offset - used -
|
||||||
|
block_group->bytes_super)) {
|
||||||
|
spin_unlock(&block_group->tree_lock);
|
||||||
|
printk(KERN_ERR "block group %llu has an wrong amount of free "
|
||||||
|
"space\n", block_group->key.objectid);
|
||||||
|
ret = 0;
|
||||||
|
goto free_cache;
|
||||||
|
}
|
||||||
|
spin_unlock(&block_group->tree_lock);
|
||||||
|
|
||||||
ret = 1;
|
ret = 1;
|
||||||
out:
|
out:
|
||||||
kfree(checksums);
|
kfree(checksums);
|
||||||
|
@ -495,8 +510,11 @@ int btrfs_write_out_cache(struct btrfs_root *root,
|
||||||
struct list_head *pos, *n;
|
struct list_head *pos, *n;
|
||||||
struct page *page;
|
struct page *page;
|
||||||
struct extent_state *cached_state = NULL;
|
struct extent_state *cached_state = NULL;
|
||||||
|
struct btrfs_free_cluster *cluster = NULL;
|
||||||
|
struct extent_io_tree *unpin = NULL;
|
||||||
struct list_head bitmap_list;
|
struct list_head bitmap_list;
|
||||||
struct btrfs_key key;
|
struct btrfs_key key;
|
||||||
|
u64 start, end, len;
|
||||||
u64 bytes = 0;
|
u64 bytes = 0;
|
||||||
u32 *crc, *checksums;
|
u32 *crc, *checksums;
|
||||||
pgoff_t index = 0, last_index = 0;
|
pgoff_t index = 0, last_index = 0;
|
||||||
|
@ -505,6 +523,7 @@ int btrfs_write_out_cache(struct btrfs_root *root,
|
||||||
int entries = 0;
|
int entries = 0;
|
||||||
int bitmaps = 0;
|
int bitmaps = 0;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
bool next_page = false;
|
||||||
|
|
||||||
root = root->fs_info->tree_root;
|
root = root->fs_info->tree_root;
|
||||||
|
|
||||||
|
@ -551,6 +570,18 @@ int btrfs_write_out_cache(struct btrfs_root *root,
|
||||||
*/
|
*/
|
||||||
first_page_offset = (sizeof(u32) * num_checksums) + sizeof(u64);
|
first_page_offset = (sizeof(u32) * num_checksums) + sizeof(u64);
|
||||||
|
|
||||||
|
/* Get the cluster for this block_group if it exists */
|
||||||
|
if (!list_empty(&block_group->cluster_list))
|
||||||
|
cluster = list_entry(block_group->cluster_list.next,
|
||||||
|
struct btrfs_free_cluster,
|
||||||
|
block_group_list);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We shouldn't have switched the pinned extents yet so this is the
|
||||||
|
* right one
|
||||||
|
*/
|
||||||
|
unpin = root->fs_info->pinned_extents;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Lock all pages first so we can lock the extent safely.
|
* Lock all pages first so we can lock the extent safely.
|
||||||
*
|
*
|
||||||
|
@ -580,6 +611,12 @@ int btrfs_write_out_cache(struct btrfs_root *root,
|
||||||
lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
|
lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
|
||||||
0, &cached_state, GFP_NOFS);
|
0, &cached_state, GFP_NOFS);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* When searching for pinned extents, we need to start at our start
|
||||||
|
* offset.
|
||||||
|
*/
|
||||||
|
start = block_group->key.objectid;
|
||||||
|
|
||||||
/* Write out the extent entries */
|
/* Write out the extent entries */
|
||||||
do {
|
do {
|
||||||
struct btrfs_free_space_entry *entry;
|
struct btrfs_free_space_entry *entry;
|
||||||
|
@ -587,6 +624,8 @@ int btrfs_write_out_cache(struct btrfs_root *root,
|
||||||
unsigned long offset = 0;
|
unsigned long offset = 0;
|
||||||
unsigned long start_offset = 0;
|
unsigned long start_offset = 0;
|
||||||
|
|
||||||
|
next_page = false;
|
||||||
|
|
||||||
if (index == 0) {
|
if (index == 0) {
|
||||||
start_offset = first_page_offset;
|
start_offset = first_page_offset;
|
||||||
offset = start_offset;
|
offset = start_offset;
|
||||||
|
@ -598,7 +637,7 @@ int btrfs_write_out_cache(struct btrfs_root *root,
|
||||||
entry = addr + start_offset;
|
entry = addr + start_offset;
|
||||||
|
|
||||||
memset(addr, 0, PAGE_CACHE_SIZE);
|
memset(addr, 0, PAGE_CACHE_SIZE);
|
||||||
while (1) {
|
while (node && !next_page) {
|
||||||
struct btrfs_free_space *e;
|
struct btrfs_free_space *e;
|
||||||
|
|
||||||
e = rb_entry(node, struct btrfs_free_space, offset_index);
|
e = rb_entry(node, struct btrfs_free_space, offset_index);
|
||||||
|
@ -614,12 +653,49 @@ int btrfs_write_out_cache(struct btrfs_root *root,
|
||||||
entry->type = BTRFS_FREE_SPACE_EXTENT;
|
entry->type = BTRFS_FREE_SPACE_EXTENT;
|
||||||
}
|
}
|
||||||
node = rb_next(node);
|
node = rb_next(node);
|
||||||
if (!node)
|
if (!node && cluster) {
|
||||||
break;
|
node = rb_first(&cluster->root);
|
||||||
|
cluster = NULL;
|
||||||
|
}
|
||||||
offset += sizeof(struct btrfs_free_space_entry);
|
offset += sizeof(struct btrfs_free_space_entry);
|
||||||
if (offset + sizeof(struct btrfs_free_space_entry) >=
|
if (offset + sizeof(struct btrfs_free_space_entry) >=
|
||||||
PAGE_CACHE_SIZE)
|
PAGE_CACHE_SIZE)
|
||||||
|
next_page = true;
|
||||||
|
entry++;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We want to add any pinned extents to our free space cache
|
||||||
|
* so we don't leak the space
|
||||||
|
*/
|
||||||
|
while (!next_page && (start < block_group->key.objectid +
|
||||||
|
block_group->key.offset)) {
|
||||||
|
ret = find_first_extent_bit(unpin, start, &start, &end,
|
||||||
|
EXTENT_DIRTY);
|
||||||
|
if (ret) {
|
||||||
|
ret = 0;
|
||||||
break;
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* This pinned extent is out of our range */
|
||||||
|
if (start >= block_group->key.objectid +
|
||||||
|
block_group->key.offset)
|
||||||
|
break;
|
||||||
|
|
||||||
|
len = block_group->key.objectid +
|
||||||
|
block_group->key.offset - start;
|
||||||
|
len = min(len, end + 1 - start);
|
||||||
|
|
||||||
|
entries++;
|
||||||
|
entry->offset = cpu_to_le64(start);
|
||||||
|
entry->bytes = cpu_to_le64(len);
|
||||||
|
entry->type = BTRFS_FREE_SPACE_EXTENT;
|
||||||
|
|
||||||
|
start = end + 1;
|
||||||
|
offset += sizeof(struct btrfs_free_space_entry);
|
||||||
|
if (offset + sizeof(struct btrfs_free_space_entry) >=
|
||||||
|
PAGE_CACHE_SIZE)
|
||||||
|
next_page = true;
|
||||||
entry++;
|
entry++;
|
||||||
}
|
}
|
||||||
*crc = ~(u32)0;
|
*crc = ~(u32)0;
|
||||||
|
@ -650,7 +726,7 @@ int btrfs_write_out_cache(struct btrfs_root *root,
|
||||||
page_cache_release(page);
|
page_cache_release(page);
|
||||||
|
|
||||||
index++;
|
index++;
|
||||||
} while (node);
|
} while (node || next_page);
|
||||||
|
|
||||||
/* Write out the bitmaps */
|
/* Write out the bitmaps */
|
||||||
list_for_each_safe(pos, n, &bitmap_list) {
|
list_for_each_safe(pos, n, &bitmap_list) {
|
||||||
|
|
|
@ -112,6 +112,7 @@ static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
|
||||||
static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
|
static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
|
||||||
struct btrfs_root *root, struct inode *inode,
|
struct btrfs_root *root, struct inode *inode,
|
||||||
u64 start, size_t size, size_t compressed_size,
|
u64 start, size_t size, size_t compressed_size,
|
||||||
|
int compress_type,
|
||||||
struct page **compressed_pages)
|
struct page **compressed_pages)
|
||||||
{
|
{
|
||||||
struct btrfs_key key;
|
struct btrfs_key key;
|
||||||
|
@ -126,12 +127,9 @@ static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
|
||||||
size_t cur_size = size;
|
size_t cur_size = size;
|
||||||
size_t datasize;
|
size_t datasize;
|
||||||
unsigned long offset;
|
unsigned long offset;
|
||||||
int compress_type = BTRFS_COMPRESS_NONE;
|
|
||||||
|
|
||||||
if (compressed_size && compressed_pages) {
|
if (compressed_size && compressed_pages)
|
||||||
compress_type = root->fs_info->compress_type;
|
|
||||||
cur_size = compressed_size;
|
cur_size = compressed_size;
|
||||||
}
|
|
||||||
|
|
||||||
path = btrfs_alloc_path();
|
path = btrfs_alloc_path();
|
||||||
if (!path)
|
if (!path)
|
||||||
|
@ -221,7 +219,7 @@ fail:
|
||||||
static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans,
|
static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans,
|
||||||
struct btrfs_root *root,
|
struct btrfs_root *root,
|
||||||
struct inode *inode, u64 start, u64 end,
|
struct inode *inode, u64 start, u64 end,
|
||||||
size_t compressed_size,
|
size_t compressed_size, int compress_type,
|
||||||
struct page **compressed_pages)
|
struct page **compressed_pages)
|
||||||
{
|
{
|
||||||
u64 isize = i_size_read(inode);
|
u64 isize = i_size_read(inode);
|
||||||
|
@ -254,7 +252,7 @@ static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans,
|
||||||
inline_len = min_t(u64, isize, actual_end);
|
inline_len = min_t(u64, isize, actual_end);
|
||||||
ret = insert_inline_extent(trans, root, inode, start,
|
ret = insert_inline_extent(trans, root, inode, start,
|
||||||
inline_len, compressed_size,
|
inline_len, compressed_size,
|
||||||
compressed_pages);
|
compress_type, compressed_pages);
|
||||||
BUG_ON(ret);
|
BUG_ON(ret);
|
||||||
btrfs_delalloc_release_metadata(inode, end + 1 - start);
|
btrfs_delalloc_release_metadata(inode, end + 1 - start);
|
||||||
btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0);
|
btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0);
|
||||||
|
@ -433,12 +431,13 @@ again:
|
||||||
* to make an uncompressed inline extent.
|
* to make an uncompressed inline extent.
|
||||||
*/
|
*/
|
||||||
ret = cow_file_range_inline(trans, root, inode,
|
ret = cow_file_range_inline(trans, root, inode,
|
||||||
start, end, 0, NULL);
|
start, end, 0, 0, NULL);
|
||||||
} else {
|
} else {
|
||||||
/* try making a compressed inline extent */
|
/* try making a compressed inline extent */
|
||||||
ret = cow_file_range_inline(trans, root, inode,
|
ret = cow_file_range_inline(trans, root, inode,
|
||||||
start, end,
|
start, end,
|
||||||
total_compressed, pages);
|
total_compressed,
|
||||||
|
compress_type, pages);
|
||||||
}
|
}
|
||||||
if (ret == 0) {
|
if (ret == 0) {
|
||||||
/*
|
/*
|
||||||
|
@ -792,7 +791,7 @@ static noinline int cow_file_range(struct inode *inode,
|
||||||
if (start == 0) {
|
if (start == 0) {
|
||||||
/* lets try to make an inline extent */
|
/* lets try to make an inline extent */
|
||||||
ret = cow_file_range_inline(trans, root, inode,
|
ret = cow_file_range_inline(trans, root, inode,
|
||||||
start, end, 0, NULL);
|
start, end, 0, 0, NULL);
|
||||||
if (ret == 0) {
|
if (ret == 0) {
|
||||||
extent_clear_unlock_delalloc(inode,
|
extent_clear_unlock_delalloc(inode,
|
||||||
&BTRFS_I(inode)->io_tree,
|
&BTRFS_I(inode)->io_tree,
|
||||||
|
@ -2222,8 +2221,6 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
|
||||||
insert = 1;
|
insert = 1;
|
||||||
#endif
|
#endif
|
||||||
insert = 1;
|
insert = 1;
|
||||||
} else {
|
|
||||||
WARN_ON(!BTRFS_I(inode)->orphan_meta_reserved);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!BTRFS_I(inode)->orphan_meta_reserved) {
|
if (!BTRFS_I(inode)->orphan_meta_reserved) {
|
||||||
|
@ -2537,8 +2534,6 @@ static void btrfs_read_locked_inode(struct inode *inode)
|
||||||
BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
|
BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
|
||||||
|
|
||||||
alloc_group_block = btrfs_inode_block_group(leaf, inode_item);
|
alloc_group_block = btrfs_inode_block_group(leaf, inode_item);
|
||||||
if (location.objectid == BTRFS_FREE_SPACE_OBJECTID)
|
|
||||||
inode->i_mapping->flags &= ~__GFP_FS;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* try to precache a NULL acl entry for files that don't have
|
* try to precache a NULL acl entry for files that don't have
|
||||||
|
@ -6960,8 +6955,10 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
|
||||||
* should cover the worst case number of items we'll modify.
|
* should cover the worst case number of items we'll modify.
|
||||||
*/
|
*/
|
||||||
trans = btrfs_start_transaction(root, 20);
|
trans = btrfs_start_transaction(root, 20);
|
||||||
if (IS_ERR(trans))
|
if (IS_ERR(trans)) {
|
||||||
return PTR_ERR(trans);
|
ret = PTR_ERR(trans);
|
||||||
|
goto out_notrans;
|
||||||
|
}
|
||||||
|
|
||||||
btrfs_set_trans_block_group(trans, new_dir);
|
btrfs_set_trans_block_group(trans, new_dir);
|
||||||
|
|
||||||
|
@ -7061,7 +7058,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
|
||||||
}
|
}
|
||||||
out_fail:
|
out_fail:
|
||||||
btrfs_end_transaction_throttle(trans, root);
|
btrfs_end_transaction_throttle(trans, root);
|
||||||
|
out_notrans:
|
||||||
if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
|
if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
|
||||||
up_read(&root->fs_info->subvol_sem);
|
up_read(&root->fs_info->subvol_sem);
|
||||||
|
|
||||||
|
|
|
@ -373,6 +373,10 @@ static noinline int create_subvol(struct btrfs_root *root,
|
||||||
inode_item->nbytes = cpu_to_le64(root->leafsize);
|
inode_item->nbytes = cpu_to_le64(root->leafsize);
|
||||||
inode_item->mode = cpu_to_le32(S_IFDIR | 0755);
|
inode_item->mode = cpu_to_le32(S_IFDIR | 0755);
|
||||||
|
|
||||||
|
root_item.flags = 0;
|
||||||
|
root_item.byte_limit = 0;
|
||||||
|
inode_item->flags = cpu_to_le64(BTRFS_INODE_ROOT_ITEM_INIT);
|
||||||
|
|
||||||
btrfs_set_root_bytenr(&root_item, leaf->start);
|
btrfs_set_root_bytenr(&root_item, leaf->start);
|
||||||
btrfs_set_root_generation(&root_item, trans->transid);
|
btrfs_set_root_generation(&root_item, trans->transid);
|
||||||
btrfs_set_root_level(&root_item, 0);
|
btrfs_set_root_level(&root_item, 0);
|
||||||
|
@ -2436,8 +2440,10 @@ static noinline long btrfs_ioctl_start_sync(struct file *file, void __user *argp
|
||||||
return PTR_ERR(trans);
|
return PTR_ERR(trans);
|
||||||
transid = trans->transid;
|
transid = trans->transid;
|
||||||
ret = btrfs_commit_transaction_async(trans, root, 0);
|
ret = btrfs_commit_transaction_async(trans, root, 0);
|
||||||
if (ret)
|
if (ret) {
|
||||||
|
btrfs_end_transaction(trans, root);
|
||||||
return ret;
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
if (argp)
|
if (argp)
|
||||||
if (copy_to_user(argp, &transid, sizeof(transid)))
|
if (copy_to_user(argp, &transid, sizeof(transid)))
|
||||||
|
|
|
@ -473,3 +473,21 @@ again:
|
||||||
btrfs_free_path(path);
|
btrfs_free_path(path);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Old btrfs forgets to init root_item->flags and root_item->byte_limit
|
||||||
|
* for subvolumes. To work around this problem, we steal a bit from
|
||||||
|
* root_item->inode_item->flags, and use it to indicate if those fields
|
||||||
|
* have been properly initialized.
|
||||||
|
*/
|
||||||
|
void btrfs_check_and_init_root_item(struct btrfs_root_item *root_item)
|
||||||
|
{
|
||||||
|
u64 inode_flags = le64_to_cpu(root_item->inode.flags);
|
||||||
|
|
||||||
|
if (!(inode_flags & BTRFS_INODE_ROOT_ITEM_INIT)) {
|
||||||
|
inode_flags |= BTRFS_INODE_ROOT_ITEM_INIT;
|
||||||
|
root_item->inode.flags = cpu_to_le64(inode_flags);
|
||||||
|
root_item->flags = 0;
|
||||||
|
root_item->byte_limit = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -644,6 +644,7 @@ static int btrfs_show_options(struct seq_file *seq, struct vfsmount *vfs)
|
||||||
{
|
{
|
||||||
struct btrfs_root *root = btrfs_sb(vfs->mnt_sb);
|
struct btrfs_root *root = btrfs_sb(vfs->mnt_sb);
|
||||||
struct btrfs_fs_info *info = root->fs_info;
|
struct btrfs_fs_info *info = root->fs_info;
|
||||||
|
char *compress_type;
|
||||||
|
|
||||||
if (btrfs_test_opt(root, DEGRADED))
|
if (btrfs_test_opt(root, DEGRADED))
|
||||||
seq_puts(seq, ",degraded");
|
seq_puts(seq, ",degraded");
|
||||||
|
@ -662,8 +663,16 @@ static int btrfs_show_options(struct seq_file *seq, struct vfsmount *vfs)
|
||||||
if (info->thread_pool_size != min_t(unsigned long,
|
if (info->thread_pool_size != min_t(unsigned long,
|
||||||
num_online_cpus() + 2, 8))
|
num_online_cpus() + 2, 8))
|
||||||
seq_printf(seq, ",thread_pool=%d", info->thread_pool_size);
|
seq_printf(seq, ",thread_pool=%d", info->thread_pool_size);
|
||||||
if (btrfs_test_opt(root, COMPRESS))
|
if (btrfs_test_opt(root, COMPRESS)) {
|
||||||
seq_puts(seq, ",compress");
|
if (info->compress_type == BTRFS_COMPRESS_ZLIB)
|
||||||
|
compress_type = "zlib";
|
||||||
|
else
|
||||||
|
compress_type = "lzo";
|
||||||
|
if (btrfs_test_opt(root, FORCE_COMPRESS))
|
||||||
|
seq_printf(seq, ",compress-force=%s", compress_type);
|
||||||
|
else
|
||||||
|
seq_printf(seq, ",compress=%s", compress_type);
|
||||||
|
}
|
||||||
if (btrfs_test_opt(root, NOSSD))
|
if (btrfs_test_opt(root, NOSSD))
|
||||||
seq_puts(seq, ",nossd");
|
seq_puts(seq, ",nossd");
|
||||||
if (btrfs_test_opt(root, SSD_SPREAD))
|
if (btrfs_test_opt(root, SSD_SPREAD))
|
||||||
|
@ -678,6 +687,12 @@ static int btrfs_show_options(struct seq_file *seq, struct vfsmount *vfs)
|
||||||
seq_puts(seq, ",discard");
|
seq_puts(seq, ",discard");
|
||||||
if (!(root->fs_info->sb->s_flags & MS_POSIXACL))
|
if (!(root->fs_info->sb->s_flags & MS_POSIXACL))
|
||||||
seq_puts(seq, ",noacl");
|
seq_puts(seq, ",noacl");
|
||||||
|
if (btrfs_test_opt(root, SPACE_CACHE))
|
||||||
|
seq_puts(seq, ",space_cache");
|
||||||
|
if (btrfs_test_opt(root, CLEAR_CACHE))
|
||||||
|
seq_puts(seq, ",clear_cache");
|
||||||
|
if (btrfs_test_opt(root, USER_SUBVOL_RM_ALLOWED))
|
||||||
|
seq_puts(seq, ",user_subvol_rm_allowed");
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -197,6 +197,7 @@ again:
|
||||||
|
|
||||||
ret = join_transaction(root);
|
ret = join_transaction(root);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
|
kmem_cache_free(btrfs_trans_handle_cachep, h);
|
||||||
if (type != TRANS_JOIN_NOLOCK)
|
if (type != TRANS_JOIN_NOLOCK)
|
||||||
mutex_unlock(&root->fs_info->trans_mutex);
|
mutex_unlock(&root->fs_info->trans_mutex);
|
||||||
return ERR_PTR(ret);
|
return ERR_PTR(ret);
|
||||||
|
@ -975,6 +976,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
|
||||||
record_root_in_trans(trans, root);
|
record_root_in_trans(trans, root);
|
||||||
btrfs_set_root_last_snapshot(&root->root_item, trans->transid);
|
btrfs_set_root_last_snapshot(&root->root_item, trans->transid);
|
||||||
memcpy(new_root_item, &root->root_item, sizeof(*new_root_item));
|
memcpy(new_root_item, &root->root_item, sizeof(*new_root_item));
|
||||||
|
btrfs_check_and_init_root_item(new_root_item);
|
||||||
|
|
||||||
root_flags = btrfs_root_flags(new_root_item);
|
root_flags = btrfs_root_flags(new_root_item);
|
||||||
if (pending->readonly)
|
if (pending->readonly)
|
||||||
|
|
Loading…
Reference in New Issue