Merge branch 'for-4.13-part1' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux
Pull btrfs updates from David Sterba: "The core updates improve error handling (mostly related to bios), with the usual incremental work on the GFP_NOFS (mis)use removal, refactoring or cleanups. Except the two top patches, all have been in for-next for an extensive amount of time. User visible changes: - statx support - quota override tunable - improved compression thresholds - obsoleted mount option alloc_start Core updates: - bio-related updates: - faster bio cloning - no allocation failures - preallocated flush bios - more kvzalloc use, memalloc_nofs protections, GFP_NOFS updates - prep work for btree_inode removal - dir-item validation - qgoup fixes and updates - cleanups: - removed unused struct members, unused code, refactoring - argument refactoring (fs_info/root, caller -> callee sink) - SEARCH_TREE ioctl docs" * 'for-4.13-part1' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux: (115 commits) btrfs: Remove false alert when fiemap range is smaller than on-disk extent btrfs: Don't clear SGID when inheriting ACLs btrfs: fix integer overflow in calc_reclaim_items_nr btrfs: scrub: fix target device intialization while setting up scrub context btrfs: qgroup: Fix qgroup reserved space underflow by only freeing reserved ranges btrfs: qgroup: Introduce extent changeset for qgroup reserve functions btrfs: qgroup: Fix qgroup reserved space underflow caused by buffered write and quotas being enabled btrfs: qgroup: Return actually freed bytes for qgroup release or free data btrfs: qgroup: Cleanup btrfs_qgroup_prepare_account_extents function btrfs: qgroup: Add quick exit for non-fs extents Btrfs: rework delayed ref total_bytes_pinned accounting Btrfs: return old and new total ref mods when adding delayed refs Btrfs: always account pinned bytes when dropping a tree block ref Btrfs: update total_bytes_pinned when pinning down extents Btrfs: make BUG_ON() in add_pinned_bytes() an ASSERT() Btrfs: make add_pinned_bytes() take an s64 num_bytes instead of u64 btrfs: fix validation of XATTR_ITEM dir items btrfs: Verify dir_item in iterate_object_props btrfs: Check name_len before in btrfs_del_root_ref btrfs: Check name_len before reading btrfs_get_name ...
This commit is contained in:
commit
8c27cb3566
|
@ -78,12 +78,6 @@ static int __btrfs_set_acl(struct btrfs_trans_handle *trans,
|
||||||
switch (type) {
|
switch (type) {
|
||||||
case ACL_TYPE_ACCESS:
|
case ACL_TYPE_ACCESS:
|
||||||
name = XATTR_NAME_POSIX_ACL_ACCESS;
|
name = XATTR_NAME_POSIX_ACL_ACCESS;
|
||||||
if (acl) {
|
|
||||||
ret = posix_acl_update_mode(inode, &inode->i_mode, &acl);
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
ret = 0;
|
|
||||||
break;
|
break;
|
||||||
case ACL_TYPE_DEFAULT:
|
case ACL_TYPE_DEFAULT:
|
||||||
if (!S_ISDIR(inode->i_mode))
|
if (!S_ISDIR(inode->i_mode))
|
||||||
|
@ -119,6 +113,13 @@ out:
|
||||||
|
|
||||||
int btrfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
|
int btrfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
|
||||||
{
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
if (type == ACL_TYPE_ACCESS && acl) {
|
||||||
|
ret = posix_acl_update_mode(inode, &inode->i_mode, &acl);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
return __btrfs_set_acl(NULL, inode, acl, type);
|
return __btrfs_set_acl(NULL, inode, acl, type);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -16,7 +16,7 @@
|
||||||
* Boston, MA 021110-1307, USA.
|
* Boston, MA 021110-1307, USA.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <linux/vmalloc.h>
|
#include <linux/mm.h>
|
||||||
#include <linux/rbtree.h>
|
#include <linux/rbtree.h>
|
||||||
#include "ctree.h"
|
#include "ctree.h"
|
||||||
#include "disk-io.h"
|
#include "disk-io.h"
|
||||||
|
@ -2305,7 +2305,7 @@ struct btrfs_data_container *init_data_container(u32 total_bytes)
|
||||||
size_t alloc_bytes;
|
size_t alloc_bytes;
|
||||||
|
|
||||||
alloc_bytes = max_t(size_t, total_bytes, sizeof(*data));
|
alloc_bytes = max_t(size_t, total_bytes, sizeof(*data));
|
||||||
data = vmalloc(alloc_bytes);
|
data = kvmalloc(alloc_bytes, GFP_KERNEL);
|
||||||
if (!data)
|
if (!data)
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
|
|
||||||
|
@ -2339,9 +2339,9 @@ struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
|
||||||
if (IS_ERR(fspath))
|
if (IS_ERR(fspath))
|
||||||
return (void *)fspath;
|
return (void *)fspath;
|
||||||
|
|
||||||
ifp = kmalloc(sizeof(*ifp), GFP_NOFS);
|
ifp = kmalloc(sizeof(*ifp), GFP_KERNEL);
|
||||||
if (!ifp) {
|
if (!ifp) {
|
||||||
vfree(fspath);
|
kvfree(fspath);
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2356,6 +2356,6 @@ void free_ipath(struct inode_fs_paths *ipath)
|
||||||
{
|
{
|
||||||
if (!ipath)
|
if (!ipath)
|
||||||
return;
|
return;
|
||||||
vfree(ipath->fspath);
|
kvfree(ipath->fspath);
|
||||||
kfree(ipath);
|
kfree(ipath);
|
||||||
}
|
}
|
||||||
|
|
|
@ -94,7 +94,7 @@
|
||||||
#include <linux/mutex.h>
|
#include <linux/mutex.h>
|
||||||
#include <linux/genhd.h>
|
#include <linux/genhd.h>
|
||||||
#include <linux/blkdev.h>
|
#include <linux/blkdev.h>
|
||||||
#include <linux/vmalloc.h>
|
#include <linux/mm.h>
|
||||||
#include <linux/string.h>
|
#include <linux/string.h>
|
||||||
#include "ctree.h"
|
#include "ctree.h"
|
||||||
#include "disk-io.h"
|
#include "disk-io.h"
|
||||||
|
@ -1638,12 +1638,7 @@ static int btrfsic_read_block(struct btrfsic_state *state,
|
||||||
struct bio *bio;
|
struct bio *bio;
|
||||||
unsigned int j;
|
unsigned int j;
|
||||||
|
|
||||||
bio = btrfs_io_bio_alloc(GFP_NOFS, num_pages - i);
|
bio = btrfs_io_bio_alloc(num_pages - i);
|
||||||
if (!bio) {
|
|
||||||
pr_info("btrfsic: bio_alloc() for %u pages failed!\n",
|
|
||||||
num_pages - i);
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
bio->bi_bdev = block_ctx->dev->bdev;
|
bio->bi_bdev = block_ctx->dev->bdev;
|
||||||
bio->bi_iter.bi_sector = dev_bytenr >> 9;
|
bio->bi_iter.bi_sector = dev_bytenr >> 9;
|
||||||
bio_set_op_attrs(bio, REQ_OP_READ, 0);
|
bio_set_op_attrs(bio, REQ_OP_READ, 0);
|
||||||
|
@ -1668,14 +1663,8 @@ static int btrfsic_read_block(struct btrfsic_state *state,
|
||||||
dev_bytenr += (j - i) * PAGE_SIZE;
|
dev_bytenr += (j - i) * PAGE_SIZE;
|
||||||
i = j;
|
i = j;
|
||||||
}
|
}
|
||||||
for (i = 0; i < num_pages; i++) {
|
for (i = 0; i < num_pages; i++)
|
||||||
block_ctx->datav[i] = kmap(block_ctx->pagev[i]);
|
block_ctx->datav[i] = kmap(block_ctx->pagev[i]);
|
||||||
if (!block_ctx->datav[i]) {
|
|
||||||
pr_info("btrfsic: kmap() failed (dev %s)!\n",
|
|
||||||
block_ctx->dev->name);
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return block_ctx->len;
|
return block_ctx->len;
|
||||||
}
|
}
|
||||||
|
@ -2822,44 +2811,47 @@ static void __btrfsic_submit_bio(struct bio *bio)
|
||||||
dev_state = btrfsic_dev_state_lookup(bio->bi_bdev);
|
dev_state = btrfsic_dev_state_lookup(bio->bi_bdev);
|
||||||
if (NULL != dev_state &&
|
if (NULL != dev_state &&
|
||||||
(bio_op(bio) == REQ_OP_WRITE) && bio_has_data(bio)) {
|
(bio_op(bio) == REQ_OP_WRITE) && bio_has_data(bio)) {
|
||||||
unsigned int i;
|
unsigned int i = 0;
|
||||||
u64 dev_bytenr;
|
u64 dev_bytenr;
|
||||||
u64 cur_bytenr;
|
u64 cur_bytenr;
|
||||||
struct bio_vec *bvec;
|
struct bio_vec bvec;
|
||||||
|
struct bvec_iter iter;
|
||||||
int bio_is_patched;
|
int bio_is_patched;
|
||||||
char **mapped_datav;
|
char **mapped_datav;
|
||||||
|
unsigned int segs = bio_segments(bio);
|
||||||
|
|
||||||
dev_bytenr = 512 * bio->bi_iter.bi_sector;
|
dev_bytenr = 512 * bio->bi_iter.bi_sector;
|
||||||
bio_is_patched = 0;
|
bio_is_patched = 0;
|
||||||
if (dev_state->state->print_mask &
|
if (dev_state->state->print_mask &
|
||||||
BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH)
|
BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH)
|
||||||
pr_info("submit_bio(rw=%d,0x%x, bi_vcnt=%u, bi_sector=%llu (bytenr %llu), bi_bdev=%p)\n",
|
pr_info("submit_bio(rw=%d,0x%x, bi_vcnt=%u, bi_sector=%llu (bytenr %llu), bi_bdev=%p)\n",
|
||||||
bio_op(bio), bio->bi_opf, bio->bi_vcnt,
|
bio_op(bio), bio->bi_opf, segs,
|
||||||
(unsigned long long)bio->bi_iter.bi_sector,
|
(unsigned long long)bio->bi_iter.bi_sector,
|
||||||
dev_bytenr, bio->bi_bdev);
|
dev_bytenr, bio->bi_bdev);
|
||||||
|
|
||||||
mapped_datav = kmalloc_array(bio->bi_vcnt,
|
mapped_datav = kmalloc_array(segs,
|
||||||
sizeof(*mapped_datav), GFP_NOFS);
|
sizeof(*mapped_datav), GFP_NOFS);
|
||||||
if (!mapped_datav)
|
if (!mapped_datav)
|
||||||
goto leave;
|
goto leave;
|
||||||
cur_bytenr = dev_bytenr;
|
cur_bytenr = dev_bytenr;
|
||||||
|
|
||||||
bio_for_each_segment_all(bvec, bio, i) {
|
bio_for_each_segment(bvec, bio, iter) {
|
||||||
BUG_ON(bvec->bv_len != PAGE_SIZE);
|
BUG_ON(bvec.bv_len != PAGE_SIZE);
|
||||||
mapped_datav[i] = kmap(bvec->bv_page);
|
mapped_datav[i] = kmap(bvec.bv_page);
|
||||||
|
i++;
|
||||||
|
|
||||||
if (dev_state->state->print_mask &
|
if (dev_state->state->print_mask &
|
||||||
BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH_VERBOSE)
|
BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH_VERBOSE)
|
||||||
pr_info("#%u: bytenr=%llu, len=%u, offset=%u\n",
|
pr_info("#%u: bytenr=%llu, len=%u, offset=%u\n",
|
||||||
i, cur_bytenr, bvec->bv_len, bvec->bv_offset);
|
i, cur_bytenr, bvec.bv_len, bvec.bv_offset);
|
||||||
cur_bytenr += bvec->bv_len;
|
cur_bytenr += bvec.bv_len;
|
||||||
}
|
}
|
||||||
btrfsic_process_written_block(dev_state, dev_bytenr,
|
btrfsic_process_written_block(dev_state, dev_bytenr,
|
||||||
mapped_datav, bio->bi_vcnt,
|
mapped_datav, segs,
|
||||||
bio, &bio_is_patched,
|
bio, &bio_is_patched,
|
||||||
NULL, bio->bi_opf);
|
NULL, bio->bi_opf);
|
||||||
bio_for_each_segment_all(bvec, bio, i)
|
bio_for_each_segment(bvec, bio, iter)
|
||||||
kunmap(bvec->bv_page);
|
kunmap(bvec.bv_page);
|
||||||
kfree(mapped_datav);
|
kfree(mapped_datav);
|
||||||
} else if (NULL != dev_state && (bio->bi_opf & REQ_PREFLUSH)) {
|
} else if (NULL != dev_state && (bio->bi_opf & REQ_PREFLUSH)) {
|
||||||
if (dev_state->state->print_mask &
|
if (dev_state->state->print_mask &
|
||||||
|
@ -2923,13 +2915,10 @@ int btrfsic_mount(struct btrfs_fs_info *fs_info,
|
||||||
fs_info->sectorsize, PAGE_SIZE);
|
fs_info->sectorsize, PAGE_SIZE);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
state = kzalloc(sizeof(*state), GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
|
state = kvzalloc(sizeof(*state), GFP_KERNEL);
|
||||||
if (!state) {
|
if (!state) {
|
||||||
state = vzalloc(sizeof(*state));
|
pr_info("btrfs check-integrity: allocation failed!\n");
|
||||||
if (!state) {
|
return -1;
|
||||||
pr_info("btrfs check-integrity: vzalloc() failed!\n");
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!btrfsic_is_initialized) {
|
if (!btrfsic_is_initialized) {
|
||||||
|
|
|
@ -32,6 +32,7 @@
|
||||||
#include <linux/writeback.h>
|
#include <linux/writeback.h>
|
||||||
#include <linux/bit_spinlock.h>
|
#include <linux/bit_spinlock.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
|
#include <linux/sched/mm.h>
|
||||||
#include "ctree.h"
|
#include "ctree.h"
|
||||||
#include "disk-io.h"
|
#include "disk-io.h"
|
||||||
#include "transaction.h"
|
#include "transaction.h"
|
||||||
|
@ -42,48 +43,7 @@
|
||||||
#include "extent_io.h"
|
#include "extent_io.h"
|
||||||
#include "extent_map.h"
|
#include "extent_map.h"
|
||||||
|
|
||||||
struct compressed_bio {
|
static int btrfs_decompress_bio(struct compressed_bio *cb);
|
||||||
/* number of bios pending for this compressed extent */
|
|
||||||
refcount_t pending_bios;
|
|
||||||
|
|
||||||
/* the pages with the compressed data on them */
|
|
||||||
struct page **compressed_pages;
|
|
||||||
|
|
||||||
/* inode that owns this data */
|
|
||||||
struct inode *inode;
|
|
||||||
|
|
||||||
/* starting offset in the inode for our pages */
|
|
||||||
u64 start;
|
|
||||||
|
|
||||||
/* number of bytes in the inode we're working on */
|
|
||||||
unsigned long len;
|
|
||||||
|
|
||||||
/* number of bytes on disk */
|
|
||||||
unsigned long compressed_len;
|
|
||||||
|
|
||||||
/* the compression algorithm for this bio */
|
|
||||||
int compress_type;
|
|
||||||
|
|
||||||
/* number of compressed pages in the array */
|
|
||||||
unsigned long nr_pages;
|
|
||||||
|
|
||||||
/* IO errors */
|
|
||||||
int errors;
|
|
||||||
int mirror_num;
|
|
||||||
|
|
||||||
/* for reads, this is the bio we are copying the data into */
|
|
||||||
struct bio *orig_bio;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* the start of a variable length array of checksums only
|
|
||||||
* used by reads
|
|
||||||
*/
|
|
||||||
u32 sums;
|
|
||||||
};
|
|
||||||
|
|
||||||
static int btrfs_decompress_bio(int type, struct page **pages_in,
|
|
||||||
u64 disk_start, struct bio *orig_bio,
|
|
||||||
size_t srclen);
|
|
||||||
|
|
||||||
static inline int compressed_bio_size(struct btrfs_fs_info *fs_info,
|
static inline int compressed_bio_size(struct btrfs_fs_info *fs_info,
|
||||||
unsigned long disk_size)
|
unsigned long disk_size)
|
||||||
|
@ -94,12 +54,6 @@ static inline int compressed_bio_size(struct btrfs_fs_info *fs_info,
|
||||||
(DIV_ROUND_UP(disk_size, fs_info->sectorsize)) * csum_size;
|
(DIV_ROUND_UP(disk_size, fs_info->sectorsize)) * csum_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct bio *compressed_bio_alloc(struct block_device *bdev,
|
|
||||||
u64 first_byte, gfp_t gfp_flags)
|
|
||||||
{
|
|
||||||
return btrfs_bio_alloc(bdev, first_byte >> 9, BIO_MAX_PAGES, gfp_flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int check_compressed_csum(struct btrfs_inode *inode,
|
static int check_compressed_csum(struct btrfs_inode *inode,
|
||||||
struct compressed_bio *cb,
|
struct compressed_bio *cb,
|
||||||
u64 disk_start)
|
u64 disk_start)
|
||||||
|
@ -173,11 +127,8 @@ static void end_compressed_bio_read(struct bio *bio)
|
||||||
/* ok, we're the last bio for this extent, lets start
|
/* ok, we're the last bio for this extent, lets start
|
||||||
* the decompression.
|
* the decompression.
|
||||||
*/
|
*/
|
||||||
ret = btrfs_decompress_bio(cb->compress_type,
|
ret = btrfs_decompress_bio(cb);
|
||||||
cb->compressed_pages,
|
|
||||||
cb->start,
|
|
||||||
cb->orig_bio,
|
|
||||||
cb->compressed_len);
|
|
||||||
csum_failed:
|
csum_failed:
|
||||||
if (ret)
|
if (ret)
|
||||||
cb->errors = 1;
|
cb->errors = 1;
|
||||||
|
@ -355,11 +306,7 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
|
||||||
|
|
||||||
bdev = fs_info->fs_devices->latest_bdev;
|
bdev = fs_info->fs_devices->latest_bdev;
|
||||||
|
|
||||||
bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS);
|
bio = btrfs_bio_alloc(bdev, first_byte);
|
||||||
if (!bio) {
|
|
||||||
kfree(cb);
|
|
||||||
return BLK_STS_RESOURCE;
|
|
||||||
}
|
|
||||||
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
|
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
|
||||||
bio->bi_private = cb;
|
bio->bi_private = cb;
|
||||||
bio->bi_end_io = end_compressed_bio_write;
|
bio->bi_end_io = end_compressed_bio_write;
|
||||||
|
@ -406,8 +353,7 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
|
||||||
|
|
||||||
bio_put(bio);
|
bio_put(bio);
|
||||||
|
|
||||||
bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS);
|
bio = btrfs_bio_alloc(bdev, first_byte);
|
||||||
BUG_ON(!bio);
|
|
||||||
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
|
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
|
||||||
bio->bi_private = cb;
|
bio->bi_private = cb;
|
||||||
bio->bi_end_io = end_compressed_bio_write;
|
bio->bi_end_io = end_compressed_bio_write;
|
||||||
|
@ -650,9 +596,7 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
|
||||||
/* include any pages we added in add_ra-bio_pages */
|
/* include any pages we added in add_ra-bio_pages */
|
||||||
cb->len = bio->bi_iter.bi_size;
|
cb->len = bio->bi_iter.bi_size;
|
||||||
|
|
||||||
comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, GFP_NOFS);
|
comp_bio = btrfs_bio_alloc(bdev, cur_disk_byte);
|
||||||
if (!comp_bio)
|
|
||||||
goto fail2;
|
|
||||||
bio_set_op_attrs (comp_bio, REQ_OP_READ, 0);
|
bio_set_op_attrs (comp_bio, REQ_OP_READ, 0);
|
||||||
comp_bio->bi_private = cb;
|
comp_bio->bi_private = cb;
|
||||||
comp_bio->bi_end_io = end_compressed_bio_read;
|
comp_bio->bi_end_io = end_compressed_bio_read;
|
||||||
|
@ -703,9 +647,7 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
|
||||||
|
|
||||||
bio_put(comp_bio);
|
bio_put(comp_bio);
|
||||||
|
|
||||||
comp_bio = compressed_bio_alloc(bdev, cur_disk_byte,
|
comp_bio = btrfs_bio_alloc(bdev, cur_disk_byte);
|
||||||
GFP_NOFS);
|
|
||||||
BUG_ON(!comp_bio);
|
|
||||||
bio_set_op_attrs(comp_bio, REQ_OP_READ, 0);
|
bio_set_op_attrs(comp_bio, REQ_OP_READ, 0);
|
||||||
comp_bio->bi_private = cb;
|
comp_bio->bi_private = cb;
|
||||||
comp_bio->bi_end_io = end_compressed_bio_read;
|
comp_bio->bi_end_io = end_compressed_bio_read;
|
||||||
|
@ -801,6 +743,7 @@ static struct list_head *find_workspace(int type)
|
||||||
struct list_head *workspace;
|
struct list_head *workspace;
|
||||||
int cpus = num_online_cpus();
|
int cpus = num_online_cpus();
|
||||||
int idx = type - 1;
|
int idx = type - 1;
|
||||||
|
unsigned nofs_flag;
|
||||||
|
|
||||||
struct list_head *idle_ws = &btrfs_comp_ws[idx].idle_ws;
|
struct list_head *idle_ws = &btrfs_comp_ws[idx].idle_ws;
|
||||||
spinlock_t *ws_lock = &btrfs_comp_ws[idx].ws_lock;
|
spinlock_t *ws_lock = &btrfs_comp_ws[idx].ws_lock;
|
||||||
|
@ -830,7 +773,15 @@ again:
|
||||||
atomic_inc(total_ws);
|
atomic_inc(total_ws);
|
||||||
spin_unlock(ws_lock);
|
spin_unlock(ws_lock);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Allocation helpers call vmalloc that can't use GFP_NOFS, so we have
|
||||||
|
* to turn it off here because we might get called from the restricted
|
||||||
|
* context of btrfs_compress_bio/btrfs_compress_pages
|
||||||
|
*/
|
||||||
|
nofs_flag = memalloc_nofs_save();
|
||||||
workspace = btrfs_compress_op[idx]->alloc_workspace();
|
workspace = btrfs_compress_op[idx]->alloc_workspace();
|
||||||
|
memalloc_nofs_restore(nofs_flag);
|
||||||
|
|
||||||
if (IS_ERR(workspace)) {
|
if (IS_ERR(workspace)) {
|
||||||
atomic_dec(total_ws);
|
atomic_dec(total_ws);
|
||||||
wake_up(ws_wait);
|
wake_up(ws_wait);
|
||||||
|
@ -961,19 +912,16 @@ int btrfs_compress_pages(int type, struct address_space *mapping,
|
||||||
* be contiguous. They all correspond to the range of bytes covered by
|
* be contiguous. They all correspond to the range of bytes covered by
|
||||||
* the compressed extent.
|
* the compressed extent.
|
||||||
*/
|
*/
|
||||||
static int btrfs_decompress_bio(int type, struct page **pages_in,
|
static int btrfs_decompress_bio(struct compressed_bio *cb)
|
||||||
u64 disk_start, struct bio *orig_bio,
|
|
||||||
size_t srclen)
|
|
||||||
{
|
{
|
||||||
struct list_head *workspace;
|
struct list_head *workspace;
|
||||||
int ret;
|
int ret;
|
||||||
|
int type = cb->compress_type;
|
||||||
|
|
||||||
workspace = find_workspace(type);
|
workspace = find_workspace(type);
|
||||||
|
ret = btrfs_compress_op[type - 1]->decompress_bio(workspace, cb);
|
||||||
ret = btrfs_compress_op[type-1]->decompress_bio(workspace, pages_in,
|
|
||||||
disk_start, orig_bio,
|
|
||||||
srclen);
|
|
||||||
free_workspace(type, workspace);
|
free_workspace(type, workspace);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -34,6 +34,45 @@
|
||||||
/* Maximum size of data before compression */
|
/* Maximum size of data before compression */
|
||||||
#define BTRFS_MAX_UNCOMPRESSED (SZ_128K)
|
#define BTRFS_MAX_UNCOMPRESSED (SZ_128K)
|
||||||
|
|
||||||
|
struct compressed_bio {
|
||||||
|
/* number of bios pending for this compressed extent */
|
||||||
|
refcount_t pending_bios;
|
||||||
|
|
||||||
|
/* the pages with the compressed data on them */
|
||||||
|
struct page **compressed_pages;
|
||||||
|
|
||||||
|
/* inode that owns this data */
|
||||||
|
struct inode *inode;
|
||||||
|
|
||||||
|
/* starting offset in the inode for our pages */
|
||||||
|
u64 start;
|
||||||
|
|
||||||
|
/* number of bytes in the inode we're working on */
|
||||||
|
unsigned long len;
|
||||||
|
|
||||||
|
/* number of bytes on disk */
|
||||||
|
unsigned long compressed_len;
|
||||||
|
|
||||||
|
/* the compression algorithm for this bio */
|
||||||
|
int compress_type;
|
||||||
|
|
||||||
|
/* number of compressed pages in the array */
|
||||||
|
unsigned long nr_pages;
|
||||||
|
|
||||||
|
/* IO errors */
|
||||||
|
int errors;
|
||||||
|
int mirror_num;
|
||||||
|
|
||||||
|
/* for reads, this is the bio we are copying the data into */
|
||||||
|
struct bio *orig_bio;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* the start of a variable length array of checksums only
|
||||||
|
* used by reads
|
||||||
|
*/
|
||||||
|
u32 sums;
|
||||||
|
};
|
||||||
|
|
||||||
void btrfs_init_compress(void);
|
void btrfs_init_compress(void);
|
||||||
void btrfs_exit_compress(void);
|
void btrfs_exit_compress(void);
|
||||||
|
|
||||||
|
@ -78,10 +117,7 @@ struct btrfs_compress_op {
|
||||||
unsigned long *total_out);
|
unsigned long *total_out);
|
||||||
|
|
||||||
int (*decompress_bio)(struct list_head *workspace,
|
int (*decompress_bio)(struct list_head *workspace,
|
||||||
struct page **pages_in,
|
struct compressed_bio *cb);
|
||||||
u64 disk_start,
|
|
||||||
struct bio *orig_bio,
|
|
||||||
size_t srclen);
|
|
||||||
|
|
||||||
int (*decompress)(struct list_head *workspace,
|
int (*decompress)(struct list_head *workspace,
|
||||||
unsigned char *data_in,
|
unsigned char *data_in,
|
||||||
|
|
|
@ -19,7 +19,7 @@
|
||||||
#include <linux/sched.h>
|
#include <linux/sched.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/rbtree.h>
|
#include <linux/rbtree.h>
|
||||||
#include <linux/vmalloc.h>
|
#include <linux/mm.h>
|
||||||
#include "ctree.h"
|
#include "ctree.h"
|
||||||
#include "disk-io.h"
|
#include "disk-io.h"
|
||||||
#include "transaction.h"
|
#include "transaction.h"
|
||||||
|
@ -3667,14 +3667,14 @@ static noinline int __push_leaf_right(struct btrfs_fs_info *fs_info,
|
||||||
/* make room in the right data area */
|
/* make room in the right data area */
|
||||||
data_end = leaf_data_end(fs_info, right);
|
data_end = leaf_data_end(fs_info, right);
|
||||||
memmove_extent_buffer(right,
|
memmove_extent_buffer(right,
|
||||||
btrfs_leaf_data(right) + data_end - push_space,
|
BTRFS_LEAF_DATA_OFFSET + data_end - push_space,
|
||||||
btrfs_leaf_data(right) + data_end,
|
BTRFS_LEAF_DATA_OFFSET + data_end,
|
||||||
BTRFS_LEAF_DATA_SIZE(fs_info) - data_end);
|
BTRFS_LEAF_DATA_SIZE(fs_info) - data_end);
|
||||||
|
|
||||||
/* copy from the left data area */
|
/* copy from the left data area */
|
||||||
copy_extent_buffer(right, left, btrfs_leaf_data(right) +
|
copy_extent_buffer(right, left, BTRFS_LEAF_DATA_OFFSET +
|
||||||
BTRFS_LEAF_DATA_SIZE(fs_info) - push_space,
|
BTRFS_LEAF_DATA_SIZE(fs_info) - push_space,
|
||||||
btrfs_leaf_data(left) + leaf_data_end(fs_info, left),
|
BTRFS_LEAF_DATA_OFFSET + leaf_data_end(fs_info, left),
|
||||||
push_space);
|
push_space);
|
||||||
|
|
||||||
memmove_extent_buffer(right, btrfs_item_nr_offset(push_items),
|
memmove_extent_buffer(right, btrfs_item_nr_offset(push_items),
|
||||||
|
@ -3888,9 +3888,9 @@ static noinline int __push_leaf_left(struct btrfs_fs_info *fs_info,
|
||||||
push_space = BTRFS_LEAF_DATA_SIZE(fs_info) -
|
push_space = BTRFS_LEAF_DATA_SIZE(fs_info) -
|
||||||
btrfs_item_offset_nr(right, push_items - 1);
|
btrfs_item_offset_nr(right, push_items - 1);
|
||||||
|
|
||||||
copy_extent_buffer(left, right, btrfs_leaf_data(left) +
|
copy_extent_buffer(left, right, BTRFS_LEAF_DATA_OFFSET +
|
||||||
leaf_data_end(fs_info, left) - push_space,
|
leaf_data_end(fs_info, left) - push_space,
|
||||||
btrfs_leaf_data(right) +
|
BTRFS_LEAF_DATA_OFFSET +
|
||||||
btrfs_item_offset_nr(right, push_items - 1),
|
btrfs_item_offset_nr(right, push_items - 1),
|
||||||
push_space);
|
push_space);
|
||||||
old_left_nritems = btrfs_header_nritems(left);
|
old_left_nritems = btrfs_header_nritems(left);
|
||||||
|
@ -3917,9 +3917,9 @@ static noinline int __push_leaf_left(struct btrfs_fs_info *fs_info,
|
||||||
if (push_items < right_nritems) {
|
if (push_items < right_nritems) {
|
||||||
push_space = btrfs_item_offset_nr(right, push_items - 1) -
|
push_space = btrfs_item_offset_nr(right, push_items - 1) -
|
||||||
leaf_data_end(fs_info, right);
|
leaf_data_end(fs_info, right);
|
||||||
memmove_extent_buffer(right, btrfs_leaf_data(right) +
|
memmove_extent_buffer(right, BTRFS_LEAF_DATA_OFFSET +
|
||||||
BTRFS_LEAF_DATA_SIZE(fs_info) - push_space,
|
BTRFS_LEAF_DATA_SIZE(fs_info) - push_space,
|
||||||
btrfs_leaf_data(right) +
|
BTRFS_LEAF_DATA_OFFSET +
|
||||||
leaf_data_end(fs_info, right), push_space);
|
leaf_data_end(fs_info, right), push_space);
|
||||||
|
|
||||||
memmove_extent_buffer(right, btrfs_item_nr_offset(0),
|
memmove_extent_buffer(right, btrfs_item_nr_offset(0),
|
||||||
|
@ -4069,8 +4069,8 @@ static noinline void copy_for_split(struct btrfs_trans_handle *trans,
|
||||||
nritems * sizeof(struct btrfs_item));
|
nritems * sizeof(struct btrfs_item));
|
||||||
|
|
||||||
copy_extent_buffer(right, l,
|
copy_extent_buffer(right, l,
|
||||||
btrfs_leaf_data(right) + BTRFS_LEAF_DATA_SIZE(fs_info) -
|
BTRFS_LEAF_DATA_OFFSET + BTRFS_LEAF_DATA_SIZE(fs_info) -
|
||||||
data_copy_size, btrfs_leaf_data(l) +
|
data_copy_size, BTRFS_LEAF_DATA_OFFSET +
|
||||||
leaf_data_end(fs_info, l), data_copy_size);
|
leaf_data_end(fs_info, l), data_copy_size);
|
||||||
|
|
||||||
rt_data_off = BTRFS_LEAF_DATA_SIZE(fs_info) - btrfs_item_end_nr(l, mid);
|
rt_data_off = BTRFS_LEAF_DATA_SIZE(fs_info) - btrfs_item_end_nr(l, mid);
|
||||||
|
@ -4607,8 +4607,8 @@ void btrfs_truncate_item(struct btrfs_fs_info *fs_info,
|
||||||
|
|
||||||
/* shift the data */
|
/* shift the data */
|
||||||
if (from_end) {
|
if (from_end) {
|
||||||
memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
|
memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
|
||||||
data_end + size_diff, btrfs_leaf_data(leaf) +
|
data_end + size_diff, BTRFS_LEAF_DATA_OFFSET +
|
||||||
data_end, old_data_start + new_size - data_end);
|
data_end, old_data_start + new_size - data_end);
|
||||||
} else {
|
} else {
|
||||||
struct btrfs_disk_key disk_key;
|
struct btrfs_disk_key disk_key;
|
||||||
|
@ -4634,8 +4634,8 @@ void btrfs_truncate_item(struct btrfs_fs_info *fs_info,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
|
memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
|
||||||
data_end + size_diff, btrfs_leaf_data(leaf) +
|
data_end + size_diff, BTRFS_LEAF_DATA_OFFSET +
|
||||||
data_end, old_data_start - data_end);
|
data_end, old_data_start - data_end);
|
||||||
|
|
||||||
offset = btrfs_disk_key_offset(&disk_key);
|
offset = btrfs_disk_key_offset(&disk_key);
|
||||||
|
@ -4707,8 +4707,8 @@ void btrfs_extend_item(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
|
||||||
}
|
}
|
||||||
|
|
||||||
/* shift the data */
|
/* shift the data */
|
||||||
memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
|
memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
|
||||||
data_end - data_size, btrfs_leaf_data(leaf) +
|
data_end - data_size, BTRFS_LEAF_DATA_OFFSET +
|
||||||
data_end, old_data - data_end);
|
data_end, old_data - data_end);
|
||||||
|
|
||||||
data_end = old_data;
|
data_end = old_data;
|
||||||
|
@ -4790,8 +4790,8 @@ void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
|
||||||
(nritems - slot) * sizeof(struct btrfs_item));
|
(nritems - slot) * sizeof(struct btrfs_item));
|
||||||
|
|
||||||
/* shift the data */
|
/* shift the data */
|
||||||
memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
|
memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
|
||||||
data_end - total_data, btrfs_leaf_data(leaf) +
|
data_end - total_data, BTRFS_LEAF_DATA_OFFSET +
|
||||||
data_end, old_data - data_end);
|
data_end, old_data - data_end);
|
||||||
data_end = old_data;
|
data_end = old_data;
|
||||||
}
|
}
|
||||||
|
@ -4983,9 +4983,9 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
|
||||||
if (slot + nr != nritems) {
|
if (slot + nr != nritems) {
|
||||||
int data_end = leaf_data_end(fs_info, leaf);
|
int data_end = leaf_data_end(fs_info, leaf);
|
||||||
|
|
||||||
memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
|
memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
|
||||||
data_end + dsize,
|
data_end + dsize,
|
||||||
btrfs_leaf_data(leaf) + data_end,
|
BTRFS_LEAF_DATA_OFFSET + data_end,
|
||||||
last_off - data_end);
|
last_off - data_end);
|
||||||
|
|
||||||
for (i = slot + nr; i < nritems; i++) {
|
for (i = slot + nr; i < nritems; i++) {
|
||||||
|
|
|
@ -48,7 +48,6 @@ struct btrfs_trans_handle;
|
||||||
struct btrfs_transaction;
|
struct btrfs_transaction;
|
||||||
struct btrfs_pending_snapshot;
|
struct btrfs_pending_snapshot;
|
||||||
extern struct kmem_cache *btrfs_trans_handle_cachep;
|
extern struct kmem_cache *btrfs_trans_handle_cachep;
|
||||||
extern struct kmem_cache *btrfs_transaction_cachep;
|
|
||||||
extern struct kmem_cache *btrfs_bit_radix_cachep;
|
extern struct kmem_cache *btrfs_bit_radix_cachep;
|
||||||
extern struct kmem_cache *btrfs_path_cachep;
|
extern struct kmem_cache *btrfs_path_cachep;
|
||||||
extern struct kmem_cache *btrfs_free_space_cachep;
|
extern struct kmem_cache *btrfs_free_space_cachep;
|
||||||
|
@ -716,6 +715,10 @@ struct btrfs_delayed_root;
|
||||||
#define BTRFS_FS_BTREE_ERR 11
|
#define BTRFS_FS_BTREE_ERR 11
|
||||||
#define BTRFS_FS_LOG1_ERR 12
|
#define BTRFS_FS_LOG1_ERR 12
|
||||||
#define BTRFS_FS_LOG2_ERR 13
|
#define BTRFS_FS_LOG2_ERR 13
|
||||||
|
#define BTRFS_FS_QUOTA_OVERRIDE 14
|
||||||
|
/* Used to record internally whether fs has been frozen */
|
||||||
|
#define BTRFS_FS_FROZEN 15
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Indicate that a whole-filesystem exclusive operation is running
|
* Indicate that a whole-filesystem exclusive operation is running
|
||||||
* (device replace, resize, device add/delete, balance)
|
* (device replace, resize, device add/delete, balance)
|
||||||
|
@ -748,8 +751,7 @@ struct btrfs_fs_info {
|
||||||
struct rb_root block_group_cache_tree;
|
struct rb_root block_group_cache_tree;
|
||||||
|
|
||||||
/* keep track of unallocated space */
|
/* keep track of unallocated space */
|
||||||
spinlock_t free_chunk_lock;
|
atomic64_t free_chunk_space;
|
||||||
u64 free_chunk_space;
|
|
||||||
|
|
||||||
struct extent_io_tree freed_extents[2];
|
struct extent_io_tree freed_extents[2];
|
||||||
struct extent_io_tree *pinned_extents;
|
struct extent_io_tree *pinned_extents;
|
||||||
|
@ -797,17 +799,7 @@ struct btrfs_fs_info {
|
||||||
* so it is also safe.
|
* so it is also safe.
|
||||||
*/
|
*/
|
||||||
u64 max_inline;
|
u64 max_inline;
|
||||||
/*
|
|
||||||
* Protected by ->chunk_mutex and sb->s_umount.
|
|
||||||
*
|
|
||||||
* The reason that we use two lock to protect it is because only
|
|
||||||
* remount and mount operations can change it and these two operations
|
|
||||||
* are under sb->s_umount, but the read side (chunk allocation) can not
|
|
||||||
* acquire sb->s_umount or the deadlock would happen. So we use two
|
|
||||||
* locks to protect it. On the write side, we must acquire two locks,
|
|
||||||
* and on the read side, we just need acquire one of them.
|
|
||||||
*/
|
|
||||||
u64 alloc_start;
|
|
||||||
struct btrfs_transaction *running_transaction;
|
struct btrfs_transaction *running_transaction;
|
||||||
wait_queue_head_t transaction_throttle;
|
wait_queue_head_t transaction_throttle;
|
||||||
wait_queue_head_t transaction_wait;
|
wait_queue_head_t transaction_wait;
|
||||||
|
@ -1107,9 +1099,6 @@ struct btrfs_fs_info {
|
||||||
*/
|
*/
|
||||||
struct list_head pinned_chunks;
|
struct list_head pinned_chunks;
|
||||||
|
|
||||||
/* Used to record internally whether fs has been frozen */
|
|
||||||
int fs_frozen;
|
|
||||||
|
|
||||||
/* Cached block sizes */
|
/* Cached block sizes */
|
||||||
u32 nodesize;
|
u32 nodesize;
|
||||||
u32 sectorsize;
|
u32 sectorsize;
|
||||||
|
@ -1277,21 +1266,20 @@ struct btrfs_root {
|
||||||
/* For qgroup metadata space reserve */
|
/* For qgroup metadata space reserve */
|
||||||
atomic64_t qgroup_meta_rsv;
|
atomic64_t qgroup_meta_rsv;
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline u32 btrfs_inode_sectorsize(const struct inode *inode)
|
static inline u32 btrfs_inode_sectorsize(const struct inode *inode)
|
||||||
{
|
{
|
||||||
return btrfs_sb(inode->i_sb)->sectorsize;
|
return btrfs_sb(inode->i_sb)->sectorsize;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline u32 __BTRFS_LEAF_DATA_SIZE(u32 blocksize)
|
|
||||||
{
|
|
||||||
return blocksize - sizeof(struct btrfs_header);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline u32 BTRFS_LEAF_DATA_SIZE(const struct btrfs_fs_info *info)
|
static inline u32 BTRFS_LEAF_DATA_SIZE(const struct btrfs_fs_info *info)
|
||||||
{
|
{
|
||||||
return __BTRFS_LEAF_DATA_SIZE(info->nodesize);
|
|
||||||
|
return info->nodesize - sizeof(struct btrfs_header);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define BTRFS_LEAF_DATA_OFFSET offsetof(struct btrfs_leaf, items)
|
||||||
|
|
||||||
static inline u32 BTRFS_MAX_ITEM_SIZE(const struct btrfs_fs_info *info)
|
static inline u32 BTRFS_MAX_ITEM_SIZE(const struct btrfs_fs_info *info)
|
||||||
{
|
{
|
||||||
return BTRFS_LEAF_DATA_SIZE(info) - sizeof(struct btrfs_item);
|
return BTRFS_LEAF_DATA_SIZE(info) - sizeof(struct btrfs_item);
|
||||||
|
@ -1553,8 +1541,27 @@ static inline void btrfs_set_##name(type *s, u##bits val) \
|
||||||
s->member = cpu_to_le##bits(val); \
|
s->member = cpu_to_le##bits(val); \
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static inline u64 btrfs_device_total_bytes(struct extent_buffer *eb,
|
||||||
|
struct btrfs_dev_item *s)
|
||||||
|
{
|
||||||
|
BUILD_BUG_ON(sizeof(u64) !=
|
||||||
|
sizeof(((struct btrfs_dev_item *)0))->total_bytes);
|
||||||
|
return btrfs_get_64(eb, s, offsetof(struct btrfs_dev_item,
|
||||||
|
total_bytes));
|
||||||
|
}
|
||||||
|
static inline void btrfs_set_device_total_bytes(struct extent_buffer *eb,
|
||||||
|
struct btrfs_dev_item *s,
|
||||||
|
u64 val)
|
||||||
|
{
|
||||||
|
BUILD_BUG_ON(sizeof(u64) !=
|
||||||
|
sizeof(((struct btrfs_dev_item *)0))->total_bytes);
|
||||||
|
WARN_ON(!IS_ALIGNED(val, eb->fs_info->sectorsize));
|
||||||
|
btrfs_set_64(eb, s, offsetof(struct btrfs_dev_item, total_bytes), val);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
BTRFS_SETGET_FUNCS(device_type, struct btrfs_dev_item, type, 64);
|
BTRFS_SETGET_FUNCS(device_type, struct btrfs_dev_item, type, 64);
|
||||||
BTRFS_SETGET_FUNCS(device_total_bytes, struct btrfs_dev_item, total_bytes, 64);
|
|
||||||
BTRFS_SETGET_FUNCS(device_bytes_used, struct btrfs_dev_item, bytes_used, 64);
|
BTRFS_SETGET_FUNCS(device_bytes_used, struct btrfs_dev_item, bytes_used, 64);
|
||||||
BTRFS_SETGET_FUNCS(device_io_align, struct btrfs_dev_item, io_align, 32);
|
BTRFS_SETGET_FUNCS(device_io_align, struct btrfs_dev_item, io_align, 32);
|
||||||
BTRFS_SETGET_FUNCS(device_io_width, struct btrfs_dev_item, io_width, 32);
|
BTRFS_SETGET_FUNCS(device_io_width, struct btrfs_dev_item, io_width, 32);
|
||||||
|
@ -2324,10 +2331,6 @@ static inline int btrfs_super_csum_size(struct btrfs_super_block *s)
|
||||||
return btrfs_csum_sizes[t];
|
return btrfs_csum_sizes[t];
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline unsigned long btrfs_leaf_data(struct extent_buffer *l)
|
|
||||||
{
|
|
||||||
return offsetof(struct btrfs_leaf, items);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The leaf data grows from end-to-front in the node.
|
* The leaf data grows from end-to-front in the node.
|
||||||
|
@ -2538,11 +2541,11 @@ BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_cursor_right,
|
||||||
|
|
||||||
/* helper function to cast into the data area of the leaf. */
|
/* helper function to cast into the data area of the leaf. */
|
||||||
#define btrfs_item_ptr(leaf, slot, type) \
|
#define btrfs_item_ptr(leaf, slot, type) \
|
||||||
((type *)(btrfs_leaf_data(leaf) + \
|
((type *)(BTRFS_LEAF_DATA_OFFSET + \
|
||||||
btrfs_item_offset_nr(leaf, slot)))
|
btrfs_item_offset_nr(leaf, slot)))
|
||||||
|
|
||||||
#define btrfs_item_ptr_offset(leaf, slot) \
|
#define btrfs_item_ptr_offset(leaf, slot) \
|
||||||
((unsigned long)(btrfs_leaf_data(leaf) + \
|
((unsigned long)(BTRFS_LEAF_DATA_OFFSET + \
|
||||||
btrfs_item_offset_nr(leaf, slot)))
|
btrfs_item_offset_nr(leaf, slot)))
|
||||||
|
|
||||||
static inline bool btrfs_mixed_space_info(struct btrfs_space_info *space_info)
|
static inline bool btrfs_mixed_space_info(struct btrfs_space_info *space_info)
|
||||||
|
@ -2680,7 +2683,9 @@ void btrfs_get_block_group_trimming(struct btrfs_block_group_cache *cache);
|
||||||
void btrfs_put_block_group_trimming(struct btrfs_block_group_cache *cache);
|
void btrfs_put_block_group_trimming(struct btrfs_block_group_cache *cache);
|
||||||
void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
|
void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
|
||||||
struct btrfs_fs_info *fs_info);
|
struct btrfs_fs_info *fs_info);
|
||||||
u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data);
|
u64 btrfs_data_alloc_profile(struct btrfs_fs_info *fs_info);
|
||||||
|
u64 btrfs_metadata_alloc_profile(struct btrfs_fs_info *fs_info);
|
||||||
|
u64 btrfs_system_alloc_profile(struct btrfs_fs_info *fs_info);
|
||||||
void btrfs_clear_space_info_full(struct btrfs_fs_info *info);
|
void btrfs_clear_space_info_full(struct btrfs_fs_info *info);
|
||||||
|
|
||||||
enum btrfs_reserve_flush_enum {
|
enum btrfs_reserve_flush_enum {
|
||||||
|
@ -2703,9 +2708,13 @@ enum btrfs_flush_state {
|
||||||
COMMIT_TRANS = 6,
|
COMMIT_TRANS = 6,
|
||||||
};
|
};
|
||||||
|
|
||||||
int btrfs_check_data_free_space(struct inode *inode, u64 start, u64 len);
|
|
||||||
int btrfs_alloc_data_chunk_ondemand(struct btrfs_inode *inode, u64 bytes);
|
int btrfs_alloc_data_chunk_ondemand(struct btrfs_inode *inode, u64 bytes);
|
||||||
void btrfs_free_reserved_data_space(struct inode *inode, u64 start, u64 len);
|
int btrfs_check_data_free_space(struct inode *inode,
|
||||||
|
struct extent_changeset **reserved, u64 start, u64 len);
|
||||||
|
void btrfs_free_reserved_data_space(struct inode *inode,
|
||||||
|
struct extent_changeset *reserved, u64 start, u64 len);
|
||||||
|
void btrfs_delalloc_release_space(struct inode *inode,
|
||||||
|
struct extent_changeset *reserved, u64 start, u64 len);
|
||||||
void btrfs_free_reserved_data_space_noquota(struct inode *inode, u64 start,
|
void btrfs_free_reserved_data_space_noquota(struct inode *inode, u64 start,
|
||||||
u64 len);
|
u64 len);
|
||||||
void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
|
void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
|
||||||
|
@ -2722,8 +2731,8 @@ void btrfs_subvolume_release_metadata(struct btrfs_fs_info *fs_info,
|
||||||
struct btrfs_block_rsv *rsv);
|
struct btrfs_block_rsv *rsv);
|
||||||
int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes);
|
int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes);
|
||||||
void btrfs_delalloc_release_metadata(struct btrfs_inode *inode, u64 num_bytes);
|
void btrfs_delalloc_release_metadata(struct btrfs_inode *inode, u64 num_bytes);
|
||||||
int btrfs_delalloc_reserve_space(struct inode *inode, u64 start, u64 len);
|
int btrfs_delalloc_reserve_space(struct inode *inode,
|
||||||
void btrfs_delalloc_release_space(struct inode *inode, u64 start, u64 len);
|
struct extent_changeset **reserved, u64 start, u64 len);
|
||||||
void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type);
|
void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type);
|
||||||
struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_fs_info *fs_info,
|
struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_fs_info *fs_info,
|
||||||
unsigned short type);
|
unsigned short type);
|
||||||
|
@ -3031,12 +3040,14 @@ struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans,
|
||||||
const char *name, u16 name_len,
|
const char *name, u16 name_len,
|
||||||
int mod);
|
int mod);
|
||||||
int verify_dir_item(struct btrfs_fs_info *fs_info,
|
int verify_dir_item(struct btrfs_fs_info *fs_info,
|
||||||
struct extent_buffer *leaf,
|
struct extent_buffer *leaf, int slot,
|
||||||
struct btrfs_dir_item *dir_item);
|
struct btrfs_dir_item *dir_item);
|
||||||
struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_fs_info *fs_info,
|
struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_fs_info *fs_info,
|
||||||
struct btrfs_path *path,
|
struct btrfs_path *path,
|
||||||
const char *name,
|
const char *name,
|
||||||
int name_len);
|
int name_len);
|
||||||
|
bool btrfs_is_name_len_valid(struct extent_buffer *leaf, int slot,
|
||||||
|
unsigned long start, u16 name_len);
|
||||||
|
|
||||||
/* orphan.c */
|
/* orphan.c */
|
||||||
int btrfs_insert_orphan_item(struct btrfs_trans_handle *trans,
|
int btrfs_insert_orphan_item(struct btrfs_trans_handle *trans,
|
||||||
|
@ -3171,6 +3182,7 @@ int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
|
||||||
int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
|
int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
|
||||||
size_t size, struct bio *bio,
|
size_t size, struct bio *bio,
|
||||||
unsigned long bio_flags);
|
unsigned long bio_flags);
|
||||||
|
void btrfs_set_range_writeback(void *private_data, u64 start, u64 end);
|
||||||
int btrfs_page_mkwrite(struct vm_fault *vmf);
|
int btrfs_page_mkwrite(struct vm_fault *vmf);
|
||||||
int btrfs_readpage(struct file *file, struct page *page);
|
int btrfs_readpage(struct file *file, struct page *page);
|
||||||
void btrfs_evict_inode(struct inode *inode);
|
void btrfs_evict_inode(struct inode *inode);
|
||||||
|
|
|
@ -470,7 +470,8 @@ add_tail:
|
||||||
static noinline void
|
static noinline void
|
||||||
update_existing_head_ref(struct btrfs_delayed_ref_root *delayed_refs,
|
update_existing_head_ref(struct btrfs_delayed_ref_root *delayed_refs,
|
||||||
struct btrfs_delayed_ref_node *existing,
|
struct btrfs_delayed_ref_node *existing,
|
||||||
struct btrfs_delayed_ref_node *update)
|
struct btrfs_delayed_ref_node *update,
|
||||||
|
int *old_ref_mod_ret)
|
||||||
{
|
{
|
||||||
struct btrfs_delayed_ref_head *existing_ref;
|
struct btrfs_delayed_ref_head *existing_ref;
|
||||||
struct btrfs_delayed_ref_head *ref;
|
struct btrfs_delayed_ref_head *ref;
|
||||||
|
@ -523,6 +524,8 @@ update_existing_head_ref(struct btrfs_delayed_ref_root *delayed_refs,
|
||||||
* currently, for refs we just added we know we're a-ok.
|
* currently, for refs we just added we know we're a-ok.
|
||||||
*/
|
*/
|
||||||
old_ref_mod = existing_ref->total_ref_mod;
|
old_ref_mod = existing_ref->total_ref_mod;
|
||||||
|
if (old_ref_mod_ret)
|
||||||
|
*old_ref_mod_ret = old_ref_mod;
|
||||||
existing->ref_mod += update->ref_mod;
|
existing->ref_mod += update->ref_mod;
|
||||||
existing_ref->total_ref_mod += update->ref_mod;
|
existing_ref->total_ref_mod += update->ref_mod;
|
||||||
|
|
||||||
|
@ -550,7 +553,8 @@ add_delayed_ref_head(struct btrfs_fs_info *fs_info,
|
||||||
struct btrfs_delayed_ref_node *ref,
|
struct btrfs_delayed_ref_node *ref,
|
||||||
struct btrfs_qgroup_extent_record *qrecord,
|
struct btrfs_qgroup_extent_record *qrecord,
|
||||||
u64 bytenr, u64 num_bytes, u64 ref_root, u64 reserved,
|
u64 bytenr, u64 num_bytes, u64 ref_root, u64 reserved,
|
||||||
int action, int is_data, int *qrecord_inserted_ret)
|
int action, int is_data, int *qrecord_inserted_ret,
|
||||||
|
int *old_ref_mod, int *new_ref_mod)
|
||||||
{
|
{
|
||||||
struct btrfs_delayed_ref_head *existing;
|
struct btrfs_delayed_ref_head *existing;
|
||||||
struct btrfs_delayed_ref_head *head_ref = NULL;
|
struct btrfs_delayed_ref_head *head_ref = NULL;
|
||||||
|
@ -638,7 +642,8 @@ add_delayed_ref_head(struct btrfs_fs_info *fs_info,
|
||||||
if (existing) {
|
if (existing) {
|
||||||
WARN_ON(ref_root && reserved && existing->qgroup_ref_root
|
WARN_ON(ref_root && reserved && existing->qgroup_ref_root
|
||||||
&& existing->qgroup_reserved);
|
&& existing->qgroup_reserved);
|
||||||
update_existing_head_ref(delayed_refs, &existing->node, ref);
|
update_existing_head_ref(delayed_refs, &existing->node, ref,
|
||||||
|
old_ref_mod);
|
||||||
/*
|
/*
|
||||||
* we've updated the existing ref, free the newly
|
* we've updated the existing ref, free the newly
|
||||||
* allocated ref
|
* allocated ref
|
||||||
|
@ -646,6 +651,8 @@ add_delayed_ref_head(struct btrfs_fs_info *fs_info,
|
||||||
kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
|
kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
|
||||||
head_ref = existing;
|
head_ref = existing;
|
||||||
} else {
|
} else {
|
||||||
|
if (old_ref_mod)
|
||||||
|
*old_ref_mod = 0;
|
||||||
if (is_data && count_mod < 0)
|
if (is_data && count_mod < 0)
|
||||||
delayed_refs->pending_csums += num_bytes;
|
delayed_refs->pending_csums += num_bytes;
|
||||||
delayed_refs->num_heads++;
|
delayed_refs->num_heads++;
|
||||||
|
@ -655,6 +662,8 @@ add_delayed_ref_head(struct btrfs_fs_info *fs_info,
|
||||||
}
|
}
|
||||||
if (qrecord_inserted_ret)
|
if (qrecord_inserted_ret)
|
||||||
*qrecord_inserted_ret = qrecord_inserted;
|
*qrecord_inserted_ret = qrecord_inserted;
|
||||||
|
if (new_ref_mod)
|
||||||
|
*new_ref_mod = head_ref->total_ref_mod;
|
||||||
return head_ref;
|
return head_ref;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -778,7 +787,8 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
|
||||||
struct btrfs_trans_handle *trans,
|
struct btrfs_trans_handle *trans,
|
||||||
u64 bytenr, u64 num_bytes, u64 parent,
|
u64 bytenr, u64 num_bytes, u64 parent,
|
||||||
u64 ref_root, int level, int action,
|
u64 ref_root, int level, int action,
|
||||||
struct btrfs_delayed_extent_op *extent_op)
|
struct btrfs_delayed_extent_op *extent_op,
|
||||||
|
int *old_ref_mod, int *new_ref_mod)
|
||||||
{
|
{
|
||||||
struct btrfs_delayed_tree_ref *ref;
|
struct btrfs_delayed_tree_ref *ref;
|
||||||
struct btrfs_delayed_ref_head *head_ref;
|
struct btrfs_delayed_ref_head *head_ref;
|
||||||
|
@ -813,7 +823,8 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
|
||||||
*/
|
*/
|
||||||
head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node, record,
|
head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node, record,
|
||||||
bytenr, num_bytes, 0, 0, action, 0,
|
bytenr, num_bytes, 0, 0, action, 0,
|
||||||
&qrecord_inserted);
|
&qrecord_inserted, old_ref_mod,
|
||||||
|
new_ref_mod);
|
||||||
|
|
||||||
add_delayed_tree_ref(fs_info, trans, head_ref, &ref->node, bytenr,
|
add_delayed_tree_ref(fs_info, trans, head_ref, &ref->node, bytenr,
|
||||||
num_bytes, parent, ref_root, level, action);
|
num_bytes, parent, ref_root, level, action);
|
||||||
|
@ -838,7 +849,8 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
|
||||||
struct btrfs_trans_handle *trans,
|
struct btrfs_trans_handle *trans,
|
||||||
u64 bytenr, u64 num_bytes,
|
u64 bytenr, u64 num_bytes,
|
||||||
u64 parent, u64 ref_root,
|
u64 parent, u64 ref_root,
|
||||||
u64 owner, u64 offset, u64 reserved, int action)
|
u64 owner, u64 offset, u64 reserved, int action,
|
||||||
|
int *old_ref_mod, int *new_ref_mod)
|
||||||
{
|
{
|
||||||
struct btrfs_delayed_data_ref *ref;
|
struct btrfs_delayed_data_ref *ref;
|
||||||
struct btrfs_delayed_ref_head *head_ref;
|
struct btrfs_delayed_ref_head *head_ref;
|
||||||
|
@ -878,7 +890,8 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
|
||||||
*/
|
*/
|
||||||
head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node, record,
|
head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node, record,
|
||||||
bytenr, num_bytes, ref_root, reserved,
|
bytenr, num_bytes, ref_root, reserved,
|
||||||
action, 1, &qrecord_inserted);
|
action, 1, &qrecord_inserted,
|
||||||
|
old_ref_mod, new_ref_mod);
|
||||||
|
|
||||||
add_delayed_data_ref(fs_info, trans, head_ref, &ref->node, bytenr,
|
add_delayed_data_ref(fs_info, trans, head_ref, &ref->node, bytenr,
|
||||||
num_bytes, parent, ref_root, owner, offset,
|
num_bytes, parent, ref_root, owner, offset,
|
||||||
|
@ -909,7 +922,7 @@ int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
|
||||||
|
|
||||||
add_delayed_ref_head(fs_info, trans, &head_ref->node, NULL, bytenr,
|
add_delayed_ref_head(fs_info, trans, &head_ref->node, NULL, bytenr,
|
||||||
num_bytes, 0, 0, BTRFS_UPDATE_DELAYED_HEAD,
|
num_bytes, 0, 0, BTRFS_UPDATE_DELAYED_HEAD,
|
||||||
extent_op->is_data, NULL);
|
extent_op->is_data, NULL, NULL, NULL);
|
||||||
|
|
||||||
spin_unlock(&delayed_refs->lock);
|
spin_unlock(&delayed_refs->lock);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -247,12 +247,14 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
|
||||||
struct btrfs_trans_handle *trans,
|
struct btrfs_trans_handle *trans,
|
||||||
u64 bytenr, u64 num_bytes, u64 parent,
|
u64 bytenr, u64 num_bytes, u64 parent,
|
||||||
u64 ref_root, int level, int action,
|
u64 ref_root, int level, int action,
|
||||||
struct btrfs_delayed_extent_op *extent_op);
|
struct btrfs_delayed_extent_op *extent_op,
|
||||||
|
int *old_ref_mod, int *new_ref_mod);
|
||||||
int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
|
int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
|
||||||
struct btrfs_trans_handle *trans,
|
struct btrfs_trans_handle *trans,
|
||||||
u64 bytenr, u64 num_bytes,
|
u64 bytenr, u64 num_bytes,
|
||||||
u64 parent, u64 ref_root,
|
u64 parent, u64 ref_root,
|
||||||
u64 owner, u64 offset, u64 reserved, int action);
|
u64 owner, u64 offset, u64 reserved, int action,
|
||||||
|
int *old_ref_mod, int *new_ref_mod);
|
||||||
int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
|
int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
|
||||||
struct btrfs_trans_handle *trans,
|
struct btrfs_trans_handle *trans,
|
||||||
u64 bytenr, u64 num_bytes,
|
u64 bytenr, u64 num_bytes,
|
||||||
|
|
|
@ -388,7 +388,7 @@ int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info,
|
||||||
if (ret)
|
if (ret)
|
||||||
btrfs_err(fs_info, "kobj add dev failed %d", ret);
|
btrfs_err(fs_info, "kobj add dev failed %d", ret);
|
||||||
|
|
||||||
btrfs_wait_ordered_roots(fs_info, -1, 0, (u64)-1);
|
btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
|
||||||
|
|
||||||
/* force writing the updated state information to disk */
|
/* force writing the updated state information to disk */
|
||||||
trans = btrfs_start_transaction(root, 0);
|
trans = btrfs_start_transaction(root, 0);
|
||||||
|
@ -507,7 +507,7 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
|
||||||
mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
|
mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
btrfs_wait_ordered_roots(fs_info, -1, 0, (u64)-1);
|
btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
|
||||||
|
|
||||||
trans = btrfs_start_transaction(root, 0);
|
trans = btrfs_start_transaction(root, 0);
|
||||||
if (IS_ERR(trans)) {
|
if (IS_ERR(trans)) {
|
||||||
|
|
|
@ -395,8 +395,6 @@ struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_fs_info *fs_info,
|
||||||
|
|
||||||
leaf = path->nodes[0];
|
leaf = path->nodes[0];
|
||||||
dir_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dir_item);
|
dir_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dir_item);
|
||||||
if (verify_dir_item(fs_info, leaf, dir_item))
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
total_len = btrfs_item_size_nr(leaf, path->slots[0]);
|
total_len = btrfs_item_size_nr(leaf, path->slots[0]);
|
||||||
while (cur < total_len) {
|
while (cur < total_len) {
|
||||||
|
@ -405,6 +403,8 @@ struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_fs_info *fs_info,
|
||||||
btrfs_dir_data_len(leaf, dir_item);
|
btrfs_dir_data_len(leaf, dir_item);
|
||||||
name_ptr = (unsigned long)(dir_item + 1);
|
name_ptr = (unsigned long)(dir_item + 1);
|
||||||
|
|
||||||
|
if (verify_dir_item(fs_info, leaf, path->slots[0], dir_item))
|
||||||
|
return NULL;
|
||||||
if (btrfs_dir_name_len(leaf, dir_item) == name_len &&
|
if (btrfs_dir_name_len(leaf, dir_item) == name_len &&
|
||||||
memcmp_extent_buffer(leaf, name, name_ptr, name_len) == 0)
|
memcmp_extent_buffer(leaf, name, name_ptr, name_len) == 0)
|
||||||
return dir_item;
|
return dir_item;
|
||||||
|
@ -453,9 +453,11 @@ int btrfs_delete_one_dir_name(struct btrfs_trans_handle *trans,
|
||||||
|
|
||||||
int verify_dir_item(struct btrfs_fs_info *fs_info,
|
int verify_dir_item(struct btrfs_fs_info *fs_info,
|
||||||
struct extent_buffer *leaf,
|
struct extent_buffer *leaf,
|
||||||
|
int slot,
|
||||||
struct btrfs_dir_item *dir_item)
|
struct btrfs_dir_item *dir_item)
|
||||||
{
|
{
|
||||||
u16 namelen = BTRFS_NAME_LEN;
|
u16 namelen = BTRFS_NAME_LEN;
|
||||||
|
int ret;
|
||||||
u8 type = btrfs_dir_type(leaf, dir_item);
|
u8 type = btrfs_dir_type(leaf, dir_item);
|
||||||
|
|
||||||
if (type >= BTRFS_FT_MAX) {
|
if (type >= BTRFS_FT_MAX) {
|
||||||
|
@ -472,6 +474,12 @@ int verify_dir_item(struct btrfs_fs_info *fs_info,
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
namelen = btrfs_dir_name_len(leaf, dir_item);
|
||||||
|
ret = btrfs_is_name_len_valid(leaf, slot,
|
||||||
|
(unsigned long)(dir_item + 1), namelen);
|
||||||
|
if (!ret)
|
||||||
|
return 1;
|
||||||
|
|
||||||
/* BTRFS_MAX_XATTR_SIZE is the same for all dir items */
|
/* BTRFS_MAX_XATTR_SIZE is the same for all dir items */
|
||||||
if ((btrfs_dir_data_len(leaf, dir_item) +
|
if ((btrfs_dir_data_len(leaf, dir_item) +
|
||||||
btrfs_dir_name_len(leaf, dir_item)) >
|
btrfs_dir_name_len(leaf, dir_item)) >
|
||||||
|
@ -484,3 +492,67 @@ int verify_dir_item(struct btrfs_fs_info *fs_info,
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool btrfs_is_name_len_valid(struct extent_buffer *leaf, int slot,
|
||||||
|
unsigned long start, u16 name_len)
|
||||||
|
{
|
||||||
|
struct btrfs_fs_info *fs_info = leaf->fs_info;
|
||||||
|
struct btrfs_key key;
|
||||||
|
u32 read_start;
|
||||||
|
u32 read_end;
|
||||||
|
u32 item_start;
|
||||||
|
u32 item_end;
|
||||||
|
u32 size;
|
||||||
|
bool ret = true;
|
||||||
|
|
||||||
|
ASSERT(start > BTRFS_LEAF_DATA_OFFSET);
|
||||||
|
|
||||||
|
read_start = start - BTRFS_LEAF_DATA_OFFSET;
|
||||||
|
read_end = read_start + name_len;
|
||||||
|
item_start = btrfs_item_offset_nr(leaf, slot);
|
||||||
|
item_end = btrfs_item_end_nr(leaf, slot);
|
||||||
|
|
||||||
|
btrfs_item_key_to_cpu(leaf, &key, slot);
|
||||||
|
|
||||||
|
switch (key.type) {
|
||||||
|
case BTRFS_DIR_ITEM_KEY:
|
||||||
|
case BTRFS_XATTR_ITEM_KEY:
|
||||||
|
case BTRFS_DIR_INDEX_KEY:
|
||||||
|
size = sizeof(struct btrfs_dir_item);
|
||||||
|
break;
|
||||||
|
case BTRFS_INODE_REF_KEY:
|
||||||
|
size = sizeof(struct btrfs_inode_ref);
|
||||||
|
break;
|
||||||
|
case BTRFS_INODE_EXTREF_KEY:
|
||||||
|
size = sizeof(struct btrfs_inode_extref);
|
||||||
|
break;
|
||||||
|
case BTRFS_ROOT_REF_KEY:
|
||||||
|
case BTRFS_ROOT_BACKREF_KEY:
|
||||||
|
size = sizeof(struct btrfs_root_ref);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
ret = false;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (read_start < item_start) {
|
||||||
|
ret = false;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
if (read_end > item_end) {
|
||||||
|
ret = false;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* there shall be item(s) before name */
|
||||||
|
if (read_start - item_start < size) {
|
||||||
|
ret = false;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
out:
|
||||||
|
if (!ret)
|
||||||
|
btrfs_crit(fs_info, "invalid dir item name len: %u",
|
||||||
|
(unsigned int)name_len);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
|
@ -89,7 +89,6 @@ struct btrfs_end_io_wq {
|
||||||
struct btrfs_fs_info *info;
|
struct btrfs_fs_info *info;
|
||||||
blk_status_t status;
|
blk_status_t status;
|
||||||
enum btrfs_wq_endio_type metadata;
|
enum btrfs_wq_endio_type metadata;
|
||||||
struct list_head list;
|
|
||||||
struct btrfs_work work;
|
struct btrfs_work work;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -118,9 +117,9 @@ void btrfs_end_io_wq_exit(void)
|
||||||
* just before they are sent down the IO stack.
|
* just before they are sent down the IO stack.
|
||||||
*/
|
*/
|
||||||
struct async_submit_bio {
|
struct async_submit_bio {
|
||||||
struct inode *inode;
|
void *private_data;
|
||||||
|
struct btrfs_fs_info *fs_info;
|
||||||
struct bio *bio;
|
struct bio *bio;
|
||||||
struct list_head list;
|
|
||||||
extent_submit_bio_hook_t *submit_bio_start;
|
extent_submit_bio_hook_t *submit_bio_start;
|
||||||
extent_submit_bio_hook_t *submit_bio_done;
|
extent_submit_bio_hook_t *submit_bio_done;
|
||||||
int mirror_num;
|
int mirror_num;
|
||||||
|
@ -871,7 +870,7 @@ static void run_one_async_start(struct btrfs_work *work)
|
||||||
blk_status_t ret;
|
blk_status_t ret;
|
||||||
|
|
||||||
async = container_of(work, struct async_submit_bio, work);
|
async = container_of(work, struct async_submit_bio, work);
|
||||||
ret = async->submit_bio_start(async->inode, async->bio,
|
ret = async->submit_bio_start(async->private_data, async->bio,
|
||||||
async->mirror_num, async->bio_flags,
|
async->mirror_num, async->bio_flags,
|
||||||
async->bio_offset);
|
async->bio_offset);
|
||||||
if (ret)
|
if (ret)
|
||||||
|
@ -885,7 +884,7 @@ static void run_one_async_done(struct btrfs_work *work)
|
||||||
int limit;
|
int limit;
|
||||||
|
|
||||||
async = container_of(work, struct async_submit_bio, work);
|
async = container_of(work, struct async_submit_bio, work);
|
||||||
fs_info = BTRFS_I(async->inode)->root->fs_info;
|
fs_info = async->fs_info;
|
||||||
|
|
||||||
limit = btrfs_async_submit_limit(fs_info);
|
limit = btrfs_async_submit_limit(fs_info);
|
||||||
limit = limit * 2 / 3;
|
limit = limit * 2 / 3;
|
||||||
|
@ -904,7 +903,7 @@ static void run_one_async_done(struct btrfs_work *work)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
async->submit_bio_done(async->inode, async->bio, async->mirror_num,
|
async->submit_bio_done(async->private_data, async->bio, async->mirror_num,
|
||||||
async->bio_flags, async->bio_offset);
|
async->bio_flags, async->bio_offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -916,11 +915,11 @@ static void run_one_async_free(struct btrfs_work *work)
|
||||||
kfree(async);
|
kfree(async);
|
||||||
}
|
}
|
||||||
|
|
||||||
blk_status_t btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info,
|
blk_status_t btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
|
||||||
struct inode *inode, struct bio *bio, int mirror_num,
|
int mirror_num, unsigned long bio_flags,
|
||||||
unsigned long bio_flags, u64 bio_offset,
|
u64 bio_offset, void *private_data,
|
||||||
extent_submit_bio_hook_t *submit_bio_start,
|
extent_submit_bio_hook_t *submit_bio_start,
|
||||||
extent_submit_bio_hook_t *submit_bio_done)
|
extent_submit_bio_hook_t *submit_bio_done)
|
||||||
{
|
{
|
||||||
struct async_submit_bio *async;
|
struct async_submit_bio *async;
|
||||||
|
|
||||||
|
@ -928,7 +927,8 @@ blk_status_t btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info,
|
||||||
if (!async)
|
if (!async)
|
||||||
return BLK_STS_RESOURCE;
|
return BLK_STS_RESOURCE;
|
||||||
|
|
||||||
async->inode = inode;
|
async->private_data = private_data;
|
||||||
|
async->fs_info = fs_info;
|
||||||
async->bio = bio;
|
async->bio = bio;
|
||||||
async->mirror_num = mirror_num;
|
async->mirror_num = mirror_num;
|
||||||
async->submit_bio_start = submit_bio_start;
|
async->submit_bio_start = submit_bio_start;
|
||||||
|
@ -974,9 +974,9 @@ static blk_status_t btree_csum_one_bio(struct bio *bio)
|
||||||
return errno_to_blk_status(ret);
|
return errno_to_blk_status(ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
static blk_status_t __btree_submit_bio_start(struct inode *inode,
|
static blk_status_t __btree_submit_bio_start(void *private_data, struct bio *bio,
|
||||||
struct bio *bio, int mirror_num, unsigned long bio_flags,
|
int mirror_num, unsigned long bio_flags,
|
||||||
u64 bio_offset)
|
u64 bio_offset)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* when we're called for a write, we're already in the async
|
* when we're called for a write, we're already in the async
|
||||||
|
@ -985,10 +985,11 @@ static blk_status_t __btree_submit_bio_start(struct inode *inode,
|
||||||
return btree_csum_one_bio(bio);
|
return btree_csum_one_bio(bio);
|
||||||
}
|
}
|
||||||
|
|
||||||
static blk_status_t __btree_submit_bio_done(struct inode *inode,
|
static blk_status_t __btree_submit_bio_done(void *private_data, struct bio *bio,
|
||||||
struct bio *bio, int mirror_num, unsigned long bio_flags,
|
int mirror_num, unsigned long bio_flags,
|
||||||
u64 bio_offset)
|
u64 bio_offset)
|
||||||
{
|
{
|
||||||
|
struct inode *inode = private_data;
|
||||||
blk_status_t ret;
|
blk_status_t ret;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1014,10 +1015,11 @@ static int check_async_write(unsigned long bio_flags)
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static blk_status_t btree_submit_bio_hook(struct inode *inode, struct bio *bio,
|
static blk_status_t btree_submit_bio_hook(void *private_data, struct bio *bio,
|
||||||
int mirror_num, unsigned long bio_flags,
|
int mirror_num, unsigned long bio_flags,
|
||||||
u64 bio_offset)
|
u64 bio_offset)
|
||||||
{
|
{
|
||||||
|
struct inode *inode = private_data;
|
||||||
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
|
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
|
||||||
int async = check_async_write(bio_flags);
|
int async = check_async_write(bio_flags);
|
||||||
blk_status_t ret;
|
blk_status_t ret;
|
||||||
|
@ -1042,8 +1044,8 @@ static blk_status_t btree_submit_bio_hook(struct inode *inode, struct bio *bio,
|
||||||
* kthread helpers are used to submit writes so that
|
* kthread helpers are used to submit writes so that
|
||||||
* checksumming can happen in parallel across all CPUs
|
* checksumming can happen in parallel across all CPUs
|
||||||
*/
|
*/
|
||||||
ret = btrfs_wq_submit_bio(fs_info, inode, bio, mirror_num, 0,
|
ret = btrfs_wq_submit_bio(fs_info, bio, mirror_num, 0,
|
||||||
bio_offset,
|
bio_offset, private_data,
|
||||||
__btree_submit_bio_start,
|
__btree_submit_bio_start,
|
||||||
__btree_submit_bio_done);
|
__btree_submit_bio_done);
|
||||||
}
|
}
|
||||||
|
@ -1221,10 +1223,10 @@ int btrfs_write_tree_block(struct extent_buffer *buf)
|
||||||
buf->start + buf->len - 1);
|
buf->start + buf->len - 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
int btrfs_wait_tree_block_writeback(struct extent_buffer *buf)
|
void btrfs_wait_tree_block_writeback(struct extent_buffer *buf)
|
||||||
{
|
{
|
||||||
return filemap_fdatawait_range(buf->pages[0]->mapping,
|
filemap_fdatawait_range(buf->pages[0]->mapping,
|
||||||
buf->start, buf->start + buf->len - 1);
|
buf->start, buf->start + buf->len - 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct extent_buffer *read_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr,
|
struct extent_buffer *read_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr,
|
||||||
|
@ -1346,8 +1348,7 @@ static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info,
|
||||||
root->log_transid_committed = -1;
|
root->log_transid_committed = -1;
|
||||||
root->last_log_commit = 0;
|
root->last_log_commit = 0;
|
||||||
if (!dummy)
|
if (!dummy)
|
||||||
extent_io_tree_init(&root->dirty_log_pages,
|
extent_io_tree_init(&root->dirty_log_pages, NULL);
|
||||||
fs_info->btree_inode->i_mapping);
|
|
||||||
|
|
||||||
memset(&root->root_key, 0, sizeof(root->root_key));
|
memset(&root->root_key, 0, sizeof(root->root_key));
|
||||||
memset(&root->root_item, 0, sizeof(root->root_item));
|
memset(&root->root_item, 0, sizeof(root->root_item));
|
||||||
|
@ -2308,7 +2309,7 @@ static void btrfs_init_btree_inode(struct btrfs_fs_info *fs_info)
|
||||||
inode->i_mapping->a_ops = &btree_aops;
|
inode->i_mapping->a_ops = &btree_aops;
|
||||||
|
|
||||||
RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
|
RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
|
||||||
extent_io_tree_init(&BTRFS_I(inode)->io_tree, inode->i_mapping);
|
extent_io_tree_init(&BTRFS_I(inode)->io_tree, inode);
|
||||||
BTRFS_I(inode)->io_tree.track_uptodate = 0;
|
BTRFS_I(inode)->io_tree.track_uptodate = 0;
|
||||||
extent_map_tree_init(&BTRFS_I(inode)->extent_tree);
|
extent_map_tree_init(&BTRFS_I(inode)->extent_tree);
|
||||||
|
|
||||||
|
@ -2625,7 +2626,6 @@ int open_ctree(struct super_block *sb,
|
||||||
spin_lock_init(&fs_info->fs_roots_radix_lock);
|
spin_lock_init(&fs_info->fs_roots_radix_lock);
|
||||||
spin_lock_init(&fs_info->delayed_iput_lock);
|
spin_lock_init(&fs_info->delayed_iput_lock);
|
||||||
spin_lock_init(&fs_info->defrag_inodes_lock);
|
spin_lock_init(&fs_info->defrag_inodes_lock);
|
||||||
spin_lock_init(&fs_info->free_chunk_lock);
|
|
||||||
spin_lock_init(&fs_info->tree_mod_seq_lock);
|
spin_lock_init(&fs_info->tree_mod_seq_lock);
|
||||||
spin_lock_init(&fs_info->super_lock);
|
spin_lock_init(&fs_info->super_lock);
|
||||||
spin_lock_init(&fs_info->qgroup_op_lock);
|
spin_lock_init(&fs_info->qgroup_op_lock);
|
||||||
|
@ -2661,12 +2661,11 @@ int open_ctree(struct super_block *sb,
|
||||||
atomic_set(&fs_info->qgroup_op_seq, 0);
|
atomic_set(&fs_info->qgroup_op_seq, 0);
|
||||||
atomic_set(&fs_info->reada_works_cnt, 0);
|
atomic_set(&fs_info->reada_works_cnt, 0);
|
||||||
atomic64_set(&fs_info->tree_mod_seq, 0);
|
atomic64_set(&fs_info->tree_mod_seq, 0);
|
||||||
fs_info->fs_frozen = 0;
|
|
||||||
fs_info->sb = sb;
|
fs_info->sb = sb;
|
||||||
fs_info->max_inline = BTRFS_DEFAULT_MAX_INLINE;
|
fs_info->max_inline = BTRFS_DEFAULT_MAX_INLINE;
|
||||||
fs_info->metadata_ratio = 0;
|
fs_info->metadata_ratio = 0;
|
||||||
fs_info->defrag_inodes = RB_ROOT;
|
fs_info->defrag_inodes = RB_ROOT;
|
||||||
fs_info->free_chunk_space = 0;
|
atomic64_set(&fs_info->free_chunk_space, 0);
|
||||||
fs_info->tree_mod_log = RB_ROOT;
|
fs_info->tree_mod_log = RB_ROOT;
|
||||||
fs_info->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL;
|
fs_info->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL;
|
||||||
fs_info->avg_delayed_ref_runtime = NSEC_PER_SEC >> 6; /* div by 64 */
|
fs_info->avg_delayed_ref_runtime = NSEC_PER_SEC >> 6; /* div by 64 */
|
||||||
|
@ -2703,10 +2702,8 @@ int open_ctree(struct super_block *sb,
|
||||||
fs_info->block_group_cache_tree = RB_ROOT;
|
fs_info->block_group_cache_tree = RB_ROOT;
|
||||||
fs_info->first_logical_byte = (u64)-1;
|
fs_info->first_logical_byte = (u64)-1;
|
||||||
|
|
||||||
extent_io_tree_init(&fs_info->freed_extents[0],
|
extent_io_tree_init(&fs_info->freed_extents[0], NULL);
|
||||||
fs_info->btree_inode->i_mapping);
|
extent_io_tree_init(&fs_info->freed_extents[1], NULL);
|
||||||
extent_io_tree_init(&fs_info->freed_extents[1],
|
|
||||||
fs_info->btree_inode->i_mapping);
|
|
||||||
fs_info->pinned_extents = &fs_info->freed_extents[0];
|
fs_info->pinned_extents = &fs_info->freed_extents[0];
|
||||||
set_bit(BTRFS_FS_BARRIER, &fs_info->flags);
|
set_bit(BTRFS_FS_BARRIER, &fs_info->flags);
|
||||||
|
|
||||||
|
@ -3484,65 +3481,61 @@ static int write_dev_supers(struct btrfs_device *device,
|
||||||
*/
|
*/
|
||||||
static void btrfs_end_empty_barrier(struct bio *bio)
|
static void btrfs_end_empty_barrier(struct bio *bio)
|
||||||
{
|
{
|
||||||
if (bio->bi_private)
|
complete(bio->bi_private);
|
||||||
complete(bio->bi_private);
|
|
||||||
bio_put(bio);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* trigger flushes for one the devices. If you pass wait == 0, the flushes are
|
* Submit a flush request to the device if it supports it. Error handling is
|
||||||
* sent down. With wait == 1, it waits for the previous flush.
|
* done in the waiting counterpart.
|
||||||
*
|
|
||||||
* any device where the flush fails with eopnotsupp are flagged as not-barrier
|
|
||||||
* capable
|
|
||||||
*/
|
*/
|
||||||
static blk_status_t write_dev_flush(struct btrfs_device *device, int wait)
|
static void write_dev_flush(struct btrfs_device *device)
|
||||||
{
|
{
|
||||||
struct request_queue *q = bdev_get_queue(device->bdev);
|
struct request_queue *q = bdev_get_queue(device->bdev);
|
||||||
struct bio *bio;
|
struct bio *bio = device->flush_bio;
|
||||||
blk_status_t ret = 0;
|
|
||||||
|
|
||||||
if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags))
|
if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags))
|
||||||
return 0;
|
return;
|
||||||
|
|
||||||
if (wait) {
|
|
||||||
bio = device->flush_bio;
|
|
||||||
if (!bio)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
wait_for_completion(&device->flush_wait);
|
|
||||||
|
|
||||||
if (bio->bi_status) {
|
|
||||||
ret = bio->bi_status;
|
|
||||||
btrfs_dev_stat_inc_and_print(device,
|
|
||||||
BTRFS_DEV_STAT_FLUSH_ERRS);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* drop the reference from the wait == 0 run */
|
|
||||||
bio_put(bio);
|
|
||||||
device->flush_bio = NULL;
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* one reference for us, and we leave it for the
|
|
||||||
* caller
|
|
||||||
*/
|
|
||||||
device->flush_bio = NULL;
|
|
||||||
bio = btrfs_io_bio_alloc(GFP_NOFS, 0);
|
|
||||||
if (!bio)
|
|
||||||
return BLK_STS_RESOURCE;
|
|
||||||
|
|
||||||
|
bio_reset(bio);
|
||||||
bio->bi_end_io = btrfs_end_empty_barrier;
|
bio->bi_end_io = btrfs_end_empty_barrier;
|
||||||
bio->bi_bdev = device->bdev;
|
bio->bi_bdev = device->bdev;
|
||||||
bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH;
|
bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH;
|
||||||
init_completion(&device->flush_wait);
|
init_completion(&device->flush_wait);
|
||||||
bio->bi_private = &device->flush_wait;
|
bio->bi_private = &device->flush_wait;
|
||||||
device->flush_bio = bio;
|
|
||||||
|
|
||||||
bio_get(bio);
|
submit_bio(bio);
|
||||||
btrfsic_submit_bio(bio);
|
device->flush_bio_sent = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If the flush bio has been submitted by write_dev_flush, wait for it.
|
||||||
|
*/
|
||||||
|
static blk_status_t wait_dev_flush(struct btrfs_device *device)
|
||||||
|
{
|
||||||
|
struct bio *bio = device->flush_bio;
|
||||||
|
|
||||||
|
if (!device->flush_bio_sent)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
device->flush_bio_sent = 0;
|
||||||
|
wait_for_completion_io(&device->flush_wait);
|
||||||
|
|
||||||
|
return bio->bi_status;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int check_barrier_error(struct btrfs_fs_devices *fsdevs)
|
||||||
|
{
|
||||||
|
int dev_flush_error = 0;
|
||||||
|
struct btrfs_device *dev;
|
||||||
|
|
||||||
|
list_for_each_entry_rcu(dev, &fsdevs->devices, dev_list) {
|
||||||
|
if (!dev->bdev || dev->last_flush_error)
|
||||||
|
dev_flush_error++;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (dev_flush_error >
|
||||||
|
fsdevs->fs_info->num_tolerated_disk_barrier_failures)
|
||||||
|
return -EIO;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -3555,7 +3548,6 @@ static int barrier_all_devices(struct btrfs_fs_info *info)
|
||||||
{
|
{
|
||||||
struct list_head *head;
|
struct list_head *head;
|
||||||
struct btrfs_device *dev;
|
struct btrfs_device *dev;
|
||||||
int errors_send = 0;
|
|
||||||
int errors_wait = 0;
|
int errors_wait = 0;
|
||||||
blk_status_t ret;
|
blk_status_t ret;
|
||||||
|
|
||||||
|
@ -3564,16 +3556,13 @@ static int barrier_all_devices(struct btrfs_fs_info *info)
|
||||||
list_for_each_entry_rcu(dev, head, dev_list) {
|
list_for_each_entry_rcu(dev, head, dev_list) {
|
||||||
if (dev->missing)
|
if (dev->missing)
|
||||||
continue;
|
continue;
|
||||||
if (!dev->bdev) {
|
if (!dev->bdev)
|
||||||
errors_send++;
|
|
||||||
continue;
|
continue;
|
||||||
}
|
|
||||||
if (!dev->in_fs_metadata || !dev->writeable)
|
if (!dev->in_fs_metadata || !dev->writeable)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
ret = write_dev_flush(dev, 0);
|
write_dev_flush(dev);
|
||||||
if (ret)
|
dev->last_flush_error = 0;
|
||||||
errors_send++;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* wait for all the barriers */
|
/* wait for all the barriers */
|
||||||
|
@ -3587,13 +3576,23 @@ static int barrier_all_devices(struct btrfs_fs_info *info)
|
||||||
if (!dev->in_fs_metadata || !dev->writeable)
|
if (!dev->in_fs_metadata || !dev->writeable)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
ret = write_dev_flush(dev, 1);
|
ret = wait_dev_flush(dev);
|
||||||
if (ret)
|
if (ret) {
|
||||||
|
dev->last_flush_error = ret;
|
||||||
|
btrfs_dev_stat_inc_and_print(dev,
|
||||||
|
BTRFS_DEV_STAT_FLUSH_ERRS);
|
||||||
errors_wait++;
|
errors_wait++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (errors_wait) {
|
||||||
|
/*
|
||||||
|
* At some point we need the status of all disks
|
||||||
|
* to arrive at the volume status. So error checking
|
||||||
|
* is being pushed to a separate loop.
|
||||||
|
*/
|
||||||
|
return check_barrier_error(info->fs_devices);
|
||||||
}
|
}
|
||||||
if (errors_send > info->num_tolerated_disk_barrier_failures ||
|
|
||||||
errors_wait > info->num_tolerated_disk_barrier_failures)
|
|
||||||
return -EIO;
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4577,11 +4576,6 @@ void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
|
||||||
|
|
||||||
cur_trans->state =TRANS_STATE_COMPLETED;
|
cur_trans->state =TRANS_STATE_COMPLETED;
|
||||||
wake_up(&cur_trans->commit_wait);
|
wake_up(&cur_trans->commit_wait);
|
||||||
|
|
||||||
/*
|
|
||||||
memset(cur_trans, 0, sizeof(*cur_trans));
|
|
||||||
kmem_cache_free(btrfs_transaction_cachep, cur_trans);
|
|
||||||
*/
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info)
|
static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info)
|
||||||
|
@ -4637,6 +4631,12 @@ static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static struct btrfs_fs_info *btree_fs_info(void *private_data)
|
||||||
|
{
|
||||||
|
struct inode *inode = private_data;
|
||||||
|
return btrfs_sb(inode->i_sb);
|
||||||
|
}
|
||||||
|
|
||||||
static const struct extent_io_ops btree_extent_io_ops = {
|
static const struct extent_io_ops btree_extent_io_ops = {
|
||||||
/* mandatory callbacks */
|
/* mandatory callbacks */
|
||||||
.submit_bio_hook = btree_submit_bio_hook,
|
.submit_bio_hook = btree_submit_bio_hook,
|
||||||
|
@ -4644,6 +4644,8 @@ static const struct extent_io_ops btree_extent_io_ops = {
|
||||||
/* note we're sharing with inode.c for the merge bio hook */
|
/* note we're sharing with inode.c for the merge bio hook */
|
||||||
.merge_bio_hook = btrfs_merge_bio_hook,
|
.merge_bio_hook = btrfs_merge_bio_hook,
|
||||||
.readpage_io_failed_hook = btree_io_failed_hook,
|
.readpage_io_failed_hook = btree_io_failed_hook,
|
||||||
|
.set_range_writeback = btrfs_set_range_writeback,
|
||||||
|
.tree_fs_info = btree_fs_info,
|
||||||
|
|
||||||
/* optional callbacks */
|
/* optional callbacks */
|
||||||
};
|
};
|
||||||
|
|
|
@ -120,14 +120,14 @@ u32 btrfs_csum_data(const char *data, u32 seed, size_t len);
|
||||||
void btrfs_csum_final(u32 crc, u8 *result);
|
void btrfs_csum_final(u32 crc, u8 *result);
|
||||||
blk_status_t btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
|
blk_status_t btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
|
||||||
enum btrfs_wq_endio_type metadata);
|
enum btrfs_wq_endio_type metadata);
|
||||||
blk_status_t btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info,
|
blk_status_t btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
|
||||||
struct inode *inode, struct bio *bio, int mirror_num,
|
int mirror_num, unsigned long bio_flags,
|
||||||
unsigned long bio_flags, u64 bio_offset,
|
u64 bio_offset, void *private_data,
|
||||||
extent_submit_bio_hook_t *submit_bio_start,
|
extent_submit_bio_hook_t *submit_bio_start,
|
||||||
extent_submit_bio_hook_t *submit_bio_done);
|
extent_submit_bio_hook_t *submit_bio_done);
|
||||||
unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info);
|
unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info);
|
||||||
int btrfs_write_tree_block(struct extent_buffer *buf);
|
int btrfs_write_tree_block(struct extent_buffer *buf);
|
||||||
int btrfs_wait_tree_block_writeback(struct extent_buffer *buf);
|
void btrfs_wait_tree_block_writeback(struct extent_buffer *buf);
|
||||||
int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
|
int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
|
||||||
struct btrfs_fs_info *fs_info);
|
struct btrfs_fs_info *fs_info);
|
||||||
int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
|
int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
|
||||||
|
|
|
@ -282,6 +282,11 @@ static int btrfs_get_name(struct dentry *parent, char *name,
|
||||||
name_len = btrfs_inode_ref_name_len(leaf, iref);
|
name_len = btrfs_inode_ref_name_len(leaf, iref);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ret = btrfs_is_name_len_valid(leaf, path->slots[0], name_ptr, name_len);
|
||||||
|
if (!ret) {
|
||||||
|
btrfs_free_path(path);
|
||||||
|
return -EIO;
|
||||||
|
}
|
||||||
read_extent_buffer(leaf, name, name_ptr, name_len);
|
read_extent_buffer(leaf, name, name_ptr, name_len);
|
||||||
btrfs_free_path(path);
|
btrfs_free_path(path);
|
||||||
|
|
||||||
|
|
|
@ -97,10 +97,11 @@ static int btrfs_free_reserved_bytes(struct btrfs_block_group_cache *cache,
|
||||||
u64 num_bytes, int delalloc);
|
u64 num_bytes, int delalloc);
|
||||||
static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
|
static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
|
||||||
u64 num_bytes);
|
u64 num_bytes);
|
||||||
static int __reserve_metadata_bytes(struct btrfs_root *root,
|
static int __reserve_metadata_bytes(struct btrfs_fs_info *fs_info,
|
||||||
struct btrfs_space_info *space_info,
|
struct btrfs_space_info *space_info,
|
||||||
u64 orig_bytes,
|
u64 orig_bytes,
|
||||||
enum btrfs_reserve_flush_enum flush);
|
enum btrfs_reserve_flush_enum flush,
|
||||||
|
bool system_chunk);
|
||||||
static void space_info_add_new_bytes(struct btrfs_fs_info *fs_info,
|
static void space_info_add_new_bytes(struct btrfs_fs_info *fs_info,
|
||||||
struct btrfs_space_info *space_info,
|
struct btrfs_space_info *space_info,
|
||||||
u64 num_bytes);
|
u64 num_bytes);
|
||||||
|
@ -766,6 +767,26 @@ static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void add_pinned_bytes(struct btrfs_fs_info *fs_info, s64 num_bytes,
|
||||||
|
u64 owner, u64 root_objectid)
|
||||||
|
{
|
||||||
|
struct btrfs_space_info *space_info;
|
||||||
|
u64 flags;
|
||||||
|
|
||||||
|
if (owner < BTRFS_FIRST_FREE_OBJECTID) {
|
||||||
|
if (root_objectid == BTRFS_CHUNK_TREE_OBJECTID)
|
||||||
|
flags = BTRFS_BLOCK_GROUP_SYSTEM;
|
||||||
|
else
|
||||||
|
flags = BTRFS_BLOCK_GROUP_METADATA;
|
||||||
|
} else {
|
||||||
|
flags = BTRFS_BLOCK_GROUP_DATA;
|
||||||
|
}
|
||||||
|
|
||||||
|
space_info = __find_space_info(fs_info, flags);
|
||||||
|
ASSERT(space_info);
|
||||||
|
percpu_counter_add(&space_info->total_bytes_pinned, num_bytes);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* after adding space to the filesystem, we need to clear the full flags
|
* after adding space to the filesystem, we need to clear the full flags
|
||||||
* on all the space infos.
|
* on all the space infos.
|
||||||
|
@ -2092,6 +2113,7 @@ int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
|
||||||
u64 bytenr, u64 num_bytes, u64 parent,
|
u64 bytenr, u64 num_bytes, u64 parent,
|
||||||
u64 root_objectid, u64 owner, u64 offset)
|
u64 root_objectid, u64 owner, u64 offset)
|
||||||
{
|
{
|
||||||
|
int old_ref_mod, new_ref_mod;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
|
BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
|
||||||
|
@ -2099,15 +2121,21 @@ int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
|
||||||
|
|
||||||
if (owner < BTRFS_FIRST_FREE_OBJECTID) {
|
if (owner < BTRFS_FIRST_FREE_OBJECTID) {
|
||||||
ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
|
ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
|
||||||
num_bytes,
|
num_bytes, parent,
|
||||||
parent, root_objectid, (int)owner,
|
root_objectid, (int)owner,
|
||||||
BTRFS_ADD_DELAYED_REF, NULL);
|
BTRFS_ADD_DELAYED_REF, NULL,
|
||||||
|
&old_ref_mod, &new_ref_mod);
|
||||||
} else {
|
} else {
|
||||||
ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
|
ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
|
||||||
num_bytes, parent, root_objectid,
|
num_bytes, parent,
|
||||||
owner, offset, 0,
|
root_objectid, owner, offset,
|
||||||
BTRFS_ADD_DELAYED_REF);
|
0, BTRFS_ADD_DELAYED_REF,
|
||||||
|
&old_ref_mod, &new_ref_mod);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (ret == 0 && old_ref_mod < 0 && new_ref_mod >= 0)
|
||||||
|
add_pinned_bytes(fs_info, -num_bytes, owner, root_objectid);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2411,6 +2439,16 @@ static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
|
||||||
head = btrfs_delayed_node_to_head(node);
|
head = btrfs_delayed_node_to_head(node);
|
||||||
trace_run_delayed_ref_head(fs_info, node, head, node->action);
|
trace_run_delayed_ref_head(fs_info, node, head, node->action);
|
||||||
|
|
||||||
|
if (head->total_ref_mod < 0) {
|
||||||
|
struct btrfs_block_group_cache *cache;
|
||||||
|
|
||||||
|
cache = btrfs_lookup_block_group(fs_info, node->bytenr);
|
||||||
|
ASSERT(cache);
|
||||||
|
percpu_counter_add(&cache->space_info->total_bytes_pinned,
|
||||||
|
-node->num_bytes);
|
||||||
|
btrfs_put_block_group(cache);
|
||||||
|
}
|
||||||
|
|
||||||
if (insert_reserved) {
|
if (insert_reserved) {
|
||||||
btrfs_pin_extent(fs_info, node->bytenr,
|
btrfs_pin_extent(fs_info, node->bytenr,
|
||||||
node->num_bytes, 1);
|
node->num_bytes, 1);
|
||||||
|
@ -3364,6 +3402,7 @@ static int cache_save_setup(struct btrfs_block_group_cache *block_group,
|
||||||
struct btrfs_fs_info *fs_info = block_group->fs_info;
|
struct btrfs_fs_info *fs_info = block_group->fs_info;
|
||||||
struct btrfs_root *root = fs_info->tree_root;
|
struct btrfs_root *root = fs_info->tree_root;
|
||||||
struct inode *inode = NULL;
|
struct inode *inode = NULL;
|
||||||
|
struct extent_changeset *data_reserved = NULL;
|
||||||
u64 alloc_hint = 0;
|
u64 alloc_hint = 0;
|
||||||
int dcs = BTRFS_DC_ERROR;
|
int dcs = BTRFS_DC_ERROR;
|
||||||
u64 num_pages = 0;
|
u64 num_pages = 0;
|
||||||
|
@ -3483,7 +3522,7 @@ again:
|
||||||
num_pages *= 16;
|
num_pages *= 16;
|
||||||
num_pages *= PAGE_SIZE;
|
num_pages *= PAGE_SIZE;
|
||||||
|
|
||||||
ret = btrfs_check_data_free_space(inode, 0, num_pages);
|
ret = btrfs_check_data_free_space(inode, &data_reserved, 0, num_pages);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out_put;
|
goto out_put;
|
||||||
|
|
||||||
|
@ -3514,6 +3553,7 @@ out:
|
||||||
block_group->disk_cache_state = dcs;
|
block_group->disk_cache_state = dcs;
|
||||||
spin_unlock(&block_group->lock);
|
spin_unlock(&block_group->lock);
|
||||||
|
|
||||||
|
extent_changeset_free(data_reserved);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3924,15 +3964,60 @@ static const char *alloc_name(u64 flags)
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
static int update_space_info(struct btrfs_fs_info *info, u64 flags,
|
static int create_space_info(struct btrfs_fs_info *info, u64 flags,
|
||||||
|
struct btrfs_space_info **new)
|
||||||
|
{
|
||||||
|
|
||||||
|
struct btrfs_space_info *space_info;
|
||||||
|
int i;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
space_info = kzalloc(sizeof(*space_info), GFP_NOFS);
|
||||||
|
if (!space_info)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
ret = percpu_counter_init(&space_info->total_bytes_pinned, 0,
|
||||||
|
GFP_KERNEL);
|
||||||
|
if (ret) {
|
||||||
|
kfree(space_info);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
|
||||||
|
INIT_LIST_HEAD(&space_info->block_groups[i]);
|
||||||
|
init_rwsem(&space_info->groups_sem);
|
||||||
|
spin_lock_init(&space_info->lock);
|
||||||
|
space_info->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
|
||||||
|
space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
|
||||||
|
init_waitqueue_head(&space_info->wait);
|
||||||
|
INIT_LIST_HEAD(&space_info->ro_bgs);
|
||||||
|
INIT_LIST_HEAD(&space_info->tickets);
|
||||||
|
INIT_LIST_HEAD(&space_info->priority_tickets);
|
||||||
|
|
||||||
|
ret = kobject_init_and_add(&space_info->kobj, &space_info_ktype,
|
||||||
|
info->space_info_kobj, "%s",
|
||||||
|
alloc_name(space_info->flags));
|
||||||
|
if (ret) {
|
||||||
|
percpu_counter_destroy(&space_info->total_bytes_pinned);
|
||||||
|
kfree(space_info);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
*new = space_info;
|
||||||
|
list_add_rcu(&space_info->list, &info->space_info);
|
||||||
|
if (flags & BTRFS_BLOCK_GROUP_DATA)
|
||||||
|
info->data_sinfo = space_info;
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void update_space_info(struct btrfs_fs_info *info, u64 flags,
|
||||||
u64 total_bytes, u64 bytes_used,
|
u64 total_bytes, u64 bytes_used,
|
||||||
u64 bytes_readonly,
|
u64 bytes_readonly,
|
||||||
struct btrfs_space_info **space_info)
|
struct btrfs_space_info **space_info)
|
||||||
{
|
{
|
||||||
struct btrfs_space_info *found;
|
struct btrfs_space_info *found;
|
||||||
int i;
|
|
||||||
int factor;
|
int factor;
|
||||||
int ret;
|
|
||||||
|
|
||||||
if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
|
if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
|
||||||
BTRFS_BLOCK_GROUP_RAID10))
|
BTRFS_BLOCK_GROUP_RAID10))
|
||||||
|
@ -3941,69 +4026,19 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags,
|
||||||
factor = 1;
|
factor = 1;
|
||||||
|
|
||||||
found = __find_space_info(info, flags);
|
found = __find_space_info(info, flags);
|
||||||
if (found) {
|
ASSERT(found);
|
||||||
spin_lock(&found->lock);
|
spin_lock(&found->lock);
|
||||||
found->total_bytes += total_bytes;
|
found->total_bytes += total_bytes;
|
||||||
found->disk_total += total_bytes * factor;
|
found->disk_total += total_bytes * factor;
|
||||||
found->bytes_used += bytes_used;
|
found->bytes_used += bytes_used;
|
||||||
found->disk_used += bytes_used * factor;
|
found->disk_used += bytes_used * factor;
|
||||||
found->bytes_readonly += bytes_readonly;
|
found->bytes_readonly += bytes_readonly;
|
||||||
if (total_bytes > 0)
|
if (total_bytes > 0)
|
||||||
found->full = 0;
|
found->full = 0;
|
||||||
space_info_add_new_bytes(info, found, total_bytes -
|
space_info_add_new_bytes(info, found, total_bytes -
|
||||||
bytes_used - bytes_readonly);
|
bytes_used - bytes_readonly);
|
||||||
spin_unlock(&found->lock);
|
spin_unlock(&found->lock);
|
||||||
*space_info = found;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
found = kzalloc(sizeof(*found), GFP_NOFS);
|
|
||||||
if (!found)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
ret = percpu_counter_init(&found->total_bytes_pinned, 0, GFP_KERNEL);
|
|
||||||
if (ret) {
|
|
||||||
kfree(found);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
|
|
||||||
INIT_LIST_HEAD(&found->block_groups[i]);
|
|
||||||
init_rwsem(&found->groups_sem);
|
|
||||||
spin_lock_init(&found->lock);
|
|
||||||
found->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
|
|
||||||
found->total_bytes = total_bytes;
|
|
||||||
found->disk_total = total_bytes * factor;
|
|
||||||
found->bytes_used = bytes_used;
|
|
||||||
found->disk_used = bytes_used * factor;
|
|
||||||
found->bytes_pinned = 0;
|
|
||||||
found->bytes_reserved = 0;
|
|
||||||
found->bytes_readonly = bytes_readonly;
|
|
||||||
found->bytes_may_use = 0;
|
|
||||||
found->full = 0;
|
|
||||||
found->max_extent_size = 0;
|
|
||||||
found->force_alloc = CHUNK_ALLOC_NO_FORCE;
|
|
||||||
found->chunk_alloc = 0;
|
|
||||||
found->flush = 0;
|
|
||||||
init_waitqueue_head(&found->wait);
|
|
||||||
INIT_LIST_HEAD(&found->ro_bgs);
|
|
||||||
INIT_LIST_HEAD(&found->tickets);
|
|
||||||
INIT_LIST_HEAD(&found->priority_tickets);
|
|
||||||
|
|
||||||
ret = kobject_init_and_add(&found->kobj, &space_info_ktype,
|
|
||||||
info->space_info_kobj, "%s",
|
|
||||||
alloc_name(found->flags));
|
|
||||||
if (ret) {
|
|
||||||
percpu_counter_destroy(&found->total_bytes_pinned);
|
|
||||||
kfree(found);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
*space_info = found;
|
*space_info = found;
|
||||||
list_add_rcu(&found->list, &info->space_info);
|
|
||||||
if (flags & BTRFS_BLOCK_GROUP_DATA)
|
|
||||||
info->data_sinfo = found;
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
|
static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
|
||||||
|
@ -4121,7 +4156,7 @@ static u64 get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags)
|
||||||
return btrfs_reduce_alloc_profile(fs_info, flags);
|
return btrfs_reduce_alloc_profile(fs_info, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
|
static u64 get_alloc_profile_by_root(struct btrfs_root *root, int data)
|
||||||
{
|
{
|
||||||
struct btrfs_fs_info *fs_info = root->fs_info;
|
struct btrfs_fs_info *fs_info = root->fs_info;
|
||||||
u64 flags;
|
u64 flags;
|
||||||
|
@ -4138,6 +4173,21 @@ u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
u64 btrfs_data_alloc_profile(struct btrfs_fs_info *fs_info)
|
||||||
|
{
|
||||||
|
return get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_DATA);
|
||||||
|
}
|
||||||
|
|
||||||
|
u64 btrfs_metadata_alloc_profile(struct btrfs_fs_info *fs_info)
|
||||||
|
{
|
||||||
|
return get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_METADATA);
|
||||||
|
}
|
||||||
|
|
||||||
|
u64 btrfs_system_alloc_profile(struct btrfs_fs_info *fs_info)
|
||||||
|
{
|
||||||
|
return get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
|
||||||
|
}
|
||||||
|
|
||||||
static u64 btrfs_space_info_used(struct btrfs_space_info *s_info,
|
static u64 btrfs_space_info_used(struct btrfs_space_info *s_info,
|
||||||
bool may_use_included)
|
bool may_use_included)
|
||||||
{
|
{
|
||||||
|
@ -4187,7 +4237,7 @@ again:
|
||||||
data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
|
data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
|
||||||
spin_unlock(&data_sinfo->lock);
|
spin_unlock(&data_sinfo->lock);
|
||||||
alloc:
|
alloc:
|
||||||
alloc_target = btrfs_get_alloc_profile(root, 1);
|
alloc_target = btrfs_data_alloc_profile(fs_info);
|
||||||
/*
|
/*
|
||||||
* It is ugly that we don't call nolock join
|
* It is ugly that we don't call nolock join
|
||||||
* transaction for the free space inode case here.
|
* transaction for the free space inode case here.
|
||||||
|
@ -4238,7 +4288,7 @@ commit_trans:
|
||||||
|
|
||||||
if (need_commit > 0) {
|
if (need_commit > 0) {
|
||||||
btrfs_start_delalloc_roots(fs_info, 0, -1);
|
btrfs_start_delalloc_roots(fs_info, 0, -1);
|
||||||
btrfs_wait_ordered_roots(fs_info, -1, 0,
|
btrfs_wait_ordered_roots(fs_info, U64_MAX, 0,
|
||||||
(u64)-1);
|
(u64)-1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4278,12 +4328,8 @@ commit_trans:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
int btrfs_check_data_free_space(struct inode *inode,
|
||||||
* New check_data_free_space() with ability for precious data reservation
|
struct extent_changeset **reserved, u64 start, u64 len)
|
||||||
* Will replace old btrfs_check_data_free_space(), but for patch split,
|
|
||||||
* add a new function first and then replace it.
|
|
||||||
*/
|
|
||||||
int btrfs_check_data_free_space(struct inode *inode, u64 start, u64 len)
|
|
||||||
{
|
{
|
||||||
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
|
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
|
||||||
int ret;
|
int ret;
|
||||||
|
@ -4298,9 +4344,11 @@ int btrfs_check_data_free_space(struct inode *inode, u64 start, u64 len)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
/* Use new btrfs_qgroup_reserve_data to reserve precious data space. */
|
/* Use new btrfs_qgroup_reserve_data to reserve precious data space. */
|
||||||
ret = btrfs_qgroup_reserve_data(inode, start, len);
|
ret = btrfs_qgroup_reserve_data(inode, reserved, start, len);
|
||||||
if (ret)
|
if (ret < 0)
|
||||||
btrfs_free_reserved_data_space_noquota(inode, start, len);
|
btrfs_free_reserved_data_space_noquota(inode, start, len);
|
||||||
|
else
|
||||||
|
ret = 0;
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4341,7 +4389,8 @@ void btrfs_free_reserved_data_space_noquota(struct inode *inode, u64 start,
|
||||||
* This one will handle the per-inode data rsv map for accurate reserved
|
* This one will handle the per-inode data rsv map for accurate reserved
|
||||||
* space framework.
|
* space framework.
|
||||||
*/
|
*/
|
||||||
void btrfs_free_reserved_data_space(struct inode *inode, u64 start, u64 len)
|
void btrfs_free_reserved_data_space(struct inode *inode,
|
||||||
|
struct extent_changeset *reserved, u64 start, u64 len)
|
||||||
{
|
{
|
||||||
struct btrfs_root *root = BTRFS_I(inode)->root;
|
struct btrfs_root *root = BTRFS_I(inode)->root;
|
||||||
|
|
||||||
|
@ -4351,7 +4400,7 @@ void btrfs_free_reserved_data_space(struct inode *inode, u64 start, u64 len)
|
||||||
start = round_down(start, root->fs_info->sectorsize);
|
start = round_down(start, root->fs_info->sectorsize);
|
||||||
|
|
||||||
btrfs_free_reserved_data_space_noquota(inode, start, len);
|
btrfs_free_reserved_data_space_noquota(inode, start, len);
|
||||||
btrfs_qgroup_free_data(inode, start, len);
|
btrfs_qgroup_free_data(inode, reserved, start, len);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void force_metadata_allocation(struct btrfs_fs_info *info)
|
static void force_metadata_allocation(struct btrfs_fs_info *info)
|
||||||
|
@ -4463,9 +4512,8 @@ void check_system_chunk(struct btrfs_trans_handle *trans,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (left < thresh) {
|
if (left < thresh) {
|
||||||
u64 flags;
|
u64 flags = btrfs_system_alloc_profile(fs_info);
|
||||||
|
|
||||||
flags = btrfs_get_alloc_profile(fs_info->chunk_root, 0);
|
|
||||||
/*
|
/*
|
||||||
* Ignore failure to create system chunk. We might end up not
|
* Ignore failure to create system chunk. We might end up not
|
||||||
* needing it, as we might not need to COW all nodes/leafs from
|
* needing it, as we might not need to COW all nodes/leafs from
|
||||||
|
@ -4506,10 +4554,10 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
|
||||||
|
|
||||||
space_info = __find_space_info(fs_info, flags);
|
space_info = __find_space_info(fs_info, flags);
|
||||||
if (!space_info) {
|
if (!space_info) {
|
||||||
ret = update_space_info(fs_info, flags, 0, 0, 0, &space_info);
|
ret = create_space_info(fs_info, flags, &space_info);
|
||||||
BUG_ON(ret); /* -ENOMEM */
|
if (ret)
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
BUG_ON(!space_info); /* Logic error */
|
|
||||||
|
|
||||||
again:
|
again:
|
||||||
spin_lock(&space_info->lock);
|
spin_lock(&space_info->lock);
|
||||||
|
@ -4614,11 +4662,11 @@ out:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int can_overcommit(struct btrfs_root *root,
|
static int can_overcommit(struct btrfs_fs_info *fs_info,
|
||||||
struct btrfs_space_info *space_info, u64 bytes,
|
struct btrfs_space_info *space_info, u64 bytes,
|
||||||
enum btrfs_reserve_flush_enum flush)
|
enum btrfs_reserve_flush_enum flush,
|
||||||
|
bool system_chunk)
|
||||||
{
|
{
|
||||||
struct btrfs_fs_info *fs_info = root->fs_info;
|
|
||||||
struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
|
struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
|
||||||
u64 profile;
|
u64 profile;
|
||||||
u64 space_size;
|
u64 space_size;
|
||||||
|
@ -4629,7 +4677,11 @@ static int can_overcommit(struct btrfs_root *root,
|
||||||
if (space_info->flags & BTRFS_BLOCK_GROUP_DATA)
|
if (space_info->flags & BTRFS_BLOCK_GROUP_DATA)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
profile = btrfs_get_alloc_profile(root, 0);
|
if (system_chunk)
|
||||||
|
profile = btrfs_system_alloc_profile(fs_info);
|
||||||
|
else
|
||||||
|
profile = btrfs_metadata_alloc_profile(fs_info);
|
||||||
|
|
||||||
used = btrfs_space_info_used(space_info, false);
|
used = btrfs_space_info_used(space_info, false);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -4646,9 +4698,7 @@ static int can_overcommit(struct btrfs_root *root,
|
||||||
|
|
||||||
used += space_info->bytes_may_use;
|
used += space_info->bytes_may_use;
|
||||||
|
|
||||||
spin_lock(&fs_info->free_chunk_lock);
|
avail = atomic64_read(&fs_info->free_chunk_space);
|
||||||
avail = fs_info->free_chunk_space;
|
|
||||||
spin_unlock(&fs_info->free_chunk_lock);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If we have dup, raid1 or raid10 then only half of the free
|
* If we have dup, raid1 or raid10 then only half of the free
|
||||||
|
@ -4698,14 +4748,14 @@ static void btrfs_writeback_inodes_sb_nr(struct btrfs_fs_info *fs_info,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int calc_reclaim_items_nr(struct btrfs_fs_info *fs_info,
|
static inline u64 calc_reclaim_items_nr(struct btrfs_fs_info *fs_info,
|
||||||
u64 to_reclaim)
|
u64 to_reclaim)
|
||||||
{
|
{
|
||||||
u64 bytes;
|
u64 bytes;
|
||||||
int nr;
|
u64 nr;
|
||||||
|
|
||||||
bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
|
bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
|
||||||
nr = (int)div64_u64(to_reclaim, bytes);
|
nr = div64_u64(to_reclaim, bytes);
|
||||||
if (!nr)
|
if (!nr)
|
||||||
nr = 1;
|
nr = 1;
|
||||||
return nr;
|
return nr;
|
||||||
|
@ -4716,24 +4766,23 @@ static inline int calc_reclaim_items_nr(struct btrfs_fs_info *fs_info,
|
||||||
/*
|
/*
|
||||||
* shrink metadata reservation for delalloc
|
* shrink metadata reservation for delalloc
|
||||||
*/
|
*/
|
||||||
static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
|
static void shrink_delalloc(struct btrfs_fs_info *fs_info, u64 to_reclaim,
|
||||||
bool wait_ordered)
|
u64 orig, bool wait_ordered)
|
||||||
{
|
{
|
||||||
struct btrfs_fs_info *fs_info = root->fs_info;
|
|
||||||
struct btrfs_block_rsv *block_rsv;
|
struct btrfs_block_rsv *block_rsv;
|
||||||
struct btrfs_space_info *space_info;
|
struct btrfs_space_info *space_info;
|
||||||
struct btrfs_trans_handle *trans;
|
struct btrfs_trans_handle *trans;
|
||||||
u64 delalloc_bytes;
|
u64 delalloc_bytes;
|
||||||
u64 max_reclaim;
|
u64 max_reclaim;
|
||||||
|
u64 items;
|
||||||
long time_left;
|
long time_left;
|
||||||
unsigned long nr_pages;
|
unsigned long nr_pages;
|
||||||
int loops;
|
int loops;
|
||||||
int items;
|
|
||||||
enum btrfs_reserve_flush_enum flush;
|
enum btrfs_reserve_flush_enum flush;
|
||||||
|
|
||||||
/* Calc the number of the pages we need flush for space reservation */
|
/* Calc the number of the pages we need flush for space reservation */
|
||||||
items = calc_reclaim_items_nr(fs_info, to_reclaim);
|
items = calc_reclaim_items_nr(fs_info, to_reclaim);
|
||||||
to_reclaim = (u64)items * EXTENT_SIZE_PER_ITEM;
|
to_reclaim = items * EXTENT_SIZE_PER_ITEM;
|
||||||
|
|
||||||
trans = (struct btrfs_trans_handle *)current->journal_info;
|
trans = (struct btrfs_trans_handle *)current->journal_info;
|
||||||
block_rsv = &fs_info->delalloc_block_rsv;
|
block_rsv = &fs_info->delalloc_block_rsv;
|
||||||
|
@ -4776,7 +4825,7 @@ skip_async:
|
||||||
else
|
else
|
||||||
flush = BTRFS_RESERVE_NO_FLUSH;
|
flush = BTRFS_RESERVE_NO_FLUSH;
|
||||||
spin_lock(&space_info->lock);
|
spin_lock(&space_info->lock);
|
||||||
if (can_overcommit(root, space_info, orig, flush)) {
|
if (can_overcommit(fs_info, space_info, orig, flush, false)) {
|
||||||
spin_unlock(&space_info->lock);
|
spin_unlock(&space_info->lock);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -4838,7 +4887,7 @@ static int may_commit_transaction(struct btrfs_fs_info *fs_info,
|
||||||
|
|
||||||
spin_lock(&delayed_rsv->lock);
|
spin_lock(&delayed_rsv->lock);
|
||||||
if (percpu_counter_compare(&space_info->total_bytes_pinned,
|
if (percpu_counter_compare(&space_info->total_bytes_pinned,
|
||||||
bytes - delayed_rsv->size) >= 0) {
|
bytes - delayed_rsv->size) < 0) {
|
||||||
spin_unlock(&delayed_rsv->lock);
|
spin_unlock(&delayed_rsv->lock);
|
||||||
return -ENOSPC;
|
return -ENOSPC;
|
||||||
}
|
}
|
||||||
|
@ -4886,7 +4935,7 @@ static int flush_space(struct btrfs_fs_info *fs_info,
|
||||||
break;
|
break;
|
||||||
case FLUSH_DELALLOC:
|
case FLUSH_DELALLOC:
|
||||||
case FLUSH_DELALLOC_WAIT:
|
case FLUSH_DELALLOC_WAIT:
|
||||||
shrink_delalloc(root, num_bytes * 2, orig_bytes,
|
shrink_delalloc(fs_info, num_bytes * 2, orig_bytes,
|
||||||
state == FLUSH_DELALLOC_WAIT);
|
state == FLUSH_DELALLOC_WAIT);
|
||||||
break;
|
break;
|
||||||
case ALLOC_CHUNK:
|
case ALLOC_CHUNK:
|
||||||
|
@ -4896,7 +4945,7 @@ static int flush_space(struct btrfs_fs_info *fs_info,
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
ret = do_chunk_alloc(trans, fs_info,
|
ret = do_chunk_alloc(trans, fs_info,
|
||||||
btrfs_get_alloc_profile(root, 0),
|
btrfs_metadata_alloc_profile(fs_info),
|
||||||
CHUNK_ALLOC_NO_FORCE);
|
CHUNK_ALLOC_NO_FORCE);
|
||||||
btrfs_end_transaction(trans);
|
btrfs_end_transaction(trans);
|
||||||
if (ret > 0 || ret == -ENOSPC)
|
if (ret > 0 || ret == -ENOSPC)
|
||||||
|
@ -4917,8 +4966,9 @@ static int flush_space(struct btrfs_fs_info *fs_info,
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline u64
|
static inline u64
|
||||||
btrfs_calc_reclaim_metadata_size(struct btrfs_root *root,
|
btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info,
|
||||||
struct btrfs_space_info *space_info)
|
struct btrfs_space_info *space_info,
|
||||||
|
bool system_chunk)
|
||||||
{
|
{
|
||||||
struct reserve_ticket *ticket;
|
struct reserve_ticket *ticket;
|
||||||
u64 used;
|
u64 used;
|
||||||
|
@ -4933,14 +4983,14 @@ btrfs_calc_reclaim_metadata_size(struct btrfs_root *root,
|
||||||
return to_reclaim;
|
return to_reclaim;
|
||||||
|
|
||||||
to_reclaim = min_t(u64, num_online_cpus() * SZ_1M, SZ_16M);
|
to_reclaim = min_t(u64, num_online_cpus() * SZ_1M, SZ_16M);
|
||||||
if (can_overcommit(root, space_info, to_reclaim,
|
if (can_overcommit(fs_info, space_info, to_reclaim,
|
||||||
BTRFS_RESERVE_FLUSH_ALL))
|
BTRFS_RESERVE_FLUSH_ALL, system_chunk))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
used = space_info->bytes_used + space_info->bytes_reserved +
|
used = btrfs_space_info_used(space_info, true);
|
||||||
space_info->bytes_pinned + space_info->bytes_readonly +
|
|
||||||
space_info->bytes_may_use;
|
if (can_overcommit(fs_info, space_info, SZ_1M,
|
||||||
if (can_overcommit(root, space_info, SZ_1M, BTRFS_RESERVE_FLUSH_ALL))
|
BTRFS_RESERVE_FLUSH_ALL, system_chunk))
|
||||||
expected = div_factor_fine(space_info->total_bytes, 95);
|
expected = div_factor_fine(space_info->total_bytes, 95);
|
||||||
else
|
else
|
||||||
expected = div_factor_fine(space_info->total_bytes, 90);
|
expected = div_factor_fine(space_info->total_bytes, 90);
|
||||||
|
@ -4954,17 +5004,18 @@ btrfs_calc_reclaim_metadata_size(struct btrfs_root *root,
|
||||||
return to_reclaim;
|
return to_reclaim;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int need_do_async_reclaim(struct btrfs_space_info *space_info,
|
static inline int need_do_async_reclaim(struct btrfs_fs_info *fs_info,
|
||||||
struct btrfs_root *root, u64 used)
|
struct btrfs_space_info *space_info,
|
||||||
|
u64 used, bool system_chunk)
|
||||||
{
|
{
|
||||||
struct btrfs_fs_info *fs_info = root->fs_info;
|
|
||||||
u64 thresh = div_factor_fine(space_info->total_bytes, 98);
|
u64 thresh = div_factor_fine(space_info->total_bytes, 98);
|
||||||
|
|
||||||
/* If we're just plain full then async reclaim just slows us down. */
|
/* If we're just plain full then async reclaim just slows us down. */
|
||||||
if ((space_info->bytes_used + space_info->bytes_reserved) >= thresh)
|
if ((space_info->bytes_used + space_info->bytes_reserved) >= thresh)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (!btrfs_calc_reclaim_metadata_size(root, space_info))
|
if (!btrfs_calc_reclaim_metadata_size(fs_info, space_info,
|
||||||
|
system_chunk))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
return (used >= thresh && !btrfs_fs_closing(fs_info) &&
|
return (used >= thresh && !btrfs_fs_closing(fs_info) &&
|
||||||
|
@ -5001,8 +5052,8 @@ static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
|
||||||
space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
|
space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
|
||||||
|
|
||||||
spin_lock(&space_info->lock);
|
spin_lock(&space_info->lock);
|
||||||
to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info->fs_root,
|
to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info,
|
||||||
space_info);
|
false);
|
||||||
if (!to_reclaim) {
|
if (!to_reclaim) {
|
||||||
space_info->flush = 0;
|
space_info->flush = 0;
|
||||||
spin_unlock(&space_info->lock);
|
spin_unlock(&space_info->lock);
|
||||||
|
@ -5024,8 +5075,9 @@ static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
|
||||||
spin_unlock(&space_info->lock);
|
spin_unlock(&space_info->lock);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info->fs_root,
|
to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info,
|
||||||
space_info);
|
space_info,
|
||||||
|
false);
|
||||||
ticket = list_first_entry(&space_info->tickets,
|
ticket = list_first_entry(&space_info->tickets,
|
||||||
struct reserve_ticket, list);
|
struct reserve_ticket, list);
|
||||||
if (last_tickets_id == space_info->tickets_id) {
|
if (last_tickets_id == space_info->tickets_id) {
|
||||||
|
@ -5063,8 +5115,8 @@ static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info,
|
||||||
int flush_state = FLUSH_DELAYED_ITEMS_NR;
|
int flush_state = FLUSH_DELAYED_ITEMS_NR;
|
||||||
|
|
||||||
spin_lock(&space_info->lock);
|
spin_lock(&space_info->lock);
|
||||||
to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info->extent_root,
|
to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info,
|
||||||
space_info);
|
false);
|
||||||
if (!to_reclaim) {
|
if (!to_reclaim) {
|
||||||
spin_unlock(&space_info->lock);
|
spin_unlock(&space_info->lock);
|
||||||
return;
|
return;
|
||||||
|
@ -5143,12 +5195,12 @@ static int wait_reserve_ticket(struct btrfs_fs_info *fs_info,
|
||||||
* regain reservations will be made and this will fail if there is not enough
|
* regain reservations will be made and this will fail if there is not enough
|
||||||
* space already.
|
* space already.
|
||||||
*/
|
*/
|
||||||
static int __reserve_metadata_bytes(struct btrfs_root *root,
|
static int __reserve_metadata_bytes(struct btrfs_fs_info *fs_info,
|
||||||
struct btrfs_space_info *space_info,
|
struct btrfs_space_info *space_info,
|
||||||
u64 orig_bytes,
|
u64 orig_bytes,
|
||||||
enum btrfs_reserve_flush_enum flush)
|
enum btrfs_reserve_flush_enum flush,
|
||||||
|
bool system_chunk)
|
||||||
{
|
{
|
||||||
struct btrfs_fs_info *fs_info = root->fs_info;
|
|
||||||
struct reserve_ticket ticket;
|
struct reserve_ticket ticket;
|
||||||
u64 used;
|
u64 used;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
@ -5170,7 +5222,8 @@ static int __reserve_metadata_bytes(struct btrfs_root *root,
|
||||||
trace_btrfs_space_reservation(fs_info, "space_info",
|
trace_btrfs_space_reservation(fs_info, "space_info",
|
||||||
space_info->flags, orig_bytes, 1);
|
space_info->flags, orig_bytes, 1);
|
||||||
ret = 0;
|
ret = 0;
|
||||||
} else if (can_overcommit(root, space_info, orig_bytes, flush)) {
|
} else if (can_overcommit(fs_info, space_info, orig_bytes, flush,
|
||||||
|
system_chunk)) {
|
||||||
space_info->bytes_may_use += orig_bytes;
|
space_info->bytes_may_use += orig_bytes;
|
||||||
trace_btrfs_space_reservation(fs_info, "space_info",
|
trace_btrfs_space_reservation(fs_info, "space_info",
|
||||||
space_info->flags, orig_bytes, 1);
|
space_info->flags, orig_bytes, 1);
|
||||||
|
@ -5197,7 +5250,7 @@ static int __reserve_metadata_bytes(struct btrfs_root *root,
|
||||||
orig_bytes, flush,
|
orig_bytes, flush,
|
||||||
"enospc");
|
"enospc");
|
||||||
queue_work(system_unbound_wq,
|
queue_work(system_unbound_wq,
|
||||||
&root->fs_info->async_reclaim_work);
|
&fs_info->async_reclaim_work);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
list_add_tail(&ticket.list,
|
list_add_tail(&ticket.list,
|
||||||
|
@ -5211,7 +5264,8 @@ static int __reserve_metadata_bytes(struct btrfs_root *root,
|
||||||
* the async reclaim as we will panic.
|
* the async reclaim as we will panic.
|
||||||
*/
|
*/
|
||||||
if (!test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags) &&
|
if (!test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags) &&
|
||||||
need_do_async_reclaim(space_info, root, used) &&
|
need_do_async_reclaim(fs_info, space_info,
|
||||||
|
used, system_chunk) &&
|
||||||
!work_busy(&fs_info->async_reclaim_work)) {
|
!work_busy(&fs_info->async_reclaim_work)) {
|
||||||
trace_btrfs_trigger_flush(fs_info, space_info->flags,
|
trace_btrfs_trigger_flush(fs_info, space_info->flags,
|
||||||
orig_bytes, flush, "preempt");
|
orig_bytes, flush, "preempt");
|
||||||
|
@ -5269,9 +5323,10 @@ static int reserve_metadata_bytes(struct btrfs_root *root,
|
||||||
struct btrfs_fs_info *fs_info = root->fs_info;
|
struct btrfs_fs_info *fs_info = root->fs_info;
|
||||||
struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
|
struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
|
||||||
int ret;
|
int ret;
|
||||||
|
bool system_chunk = (root == fs_info->chunk_root);
|
||||||
|
|
||||||
ret = __reserve_metadata_bytes(root, block_rsv->space_info, orig_bytes,
|
ret = __reserve_metadata_bytes(fs_info, block_rsv->space_info,
|
||||||
flush);
|
orig_bytes, flush, system_chunk);
|
||||||
if (ret == -ENOSPC &&
|
if (ret == -ENOSPC &&
|
||||||
unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) {
|
unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) {
|
||||||
if (block_rsv != global_rsv &&
|
if (block_rsv != global_rsv &&
|
||||||
|
@ -5380,9 +5435,7 @@ static void space_info_add_old_bytes(struct btrfs_fs_info *fs_info,
|
||||||
* overcommit, and if we can't then we just need to free up our space
|
* overcommit, and if we can't then we just need to free up our space
|
||||||
* and not satisfy any requests.
|
* and not satisfy any requests.
|
||||||
*/
|
*/
|
||||||
used = space_info->bytes_used + space_info->bytes_reserved +
|
used = btrfs_space_info_used(space_info, true);
|
||||||
space_info->bytes_pinned + space_info->bytes_readonly +
|
|
||||||
space_info->bytes_may_use;
|
|
||||||
if (used - num_bytes >= space_info->total_bytes)
|
if (used - num_bytes >= space_info->total_bytes)
|
||||||
check_overcommit = true;
|
check_overcommit = true;
|
||||||
again:
|
again:
|
||||||
|
@ -5394,8 +5447,7 @@ again:
|
||||||
* adding the ticket space would be a double count.
|
* adding the ticket space would be a double count.
|
||||||
*/
|
*/
|
||||||
if (check_overcommit &&
|
if (check_overcommit &&
|
||||||
!can_overcommit(fs_info->extent_root, space_info, 0,
|
!can_overcommit(fs_info, space_info, 0, flush, false))
|
||||||
flush))
|
|
||||||
break;
|
break;
|
||||||
if (num_bytes >= ticket->bytes) {
|
if (num_bytes >= ticket->bytes) {
|
||||||
list_del_init(&ticket->list);
|
list_del_init(&ticket->list);
|
||||||
|
@ -6124,6 +6176,8 @@ void btrfs_delalloc_release_metadata(struct btrfs_inode *inode, u64 num_bytes)
|
||||||
* @inode: inode we're writing to
|
* @inode: inode we're writing to
|
||||||
* @start: start range we are writing to
|
* @start: start range we are writing to
|
||||||
* @len: how long the range we are writing to
|
* @len: how long the range we are writing to
|
||||||
|
* @reserved: mandatory parameter, record actually reserved qgroup ranges of
|
||||||
|
* current reservation.
|
||||||
*
|
*
|
||||||
* This will do the following things
|
* This will do the following things
|
||||||
*
|
*
|
||||||
|
@ -6141,16 +6195,17 @@ void btrfs_delalloc_release_metadata(struct btrfs_inode *inode, u64 num_bytes)
|
||||||
* Return 0 for success
|
* Return 0 for success
|
||||||
* Return <0 for error(-ENOSPC or -EQUOT)
|
* Return <0 for error(-ENOSPC or -EQUOT)
|
||||||
*/
|
*/
|
||||||
int btrfs_delalloc_reserve_space(struct inode *inode, u64 start, u64 len)
|
int btrfs_delalloc_reserve_space(struct inode *inode,
|
||||||
|
struct extent_changeset **reserved, u64 start, u64 len)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = btrfs_check_data_free_space(inode, start, len);
|
ret = btrfs_check_data_free_space(inode, reserved, start, len);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode), len);
|
ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode), len);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
btrfs_free_reserved_data_space(inode, start, len);
|
btrfs_free_reserved_data_space(inode, *reserved, start, len);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -6169,10 +6224,11 @@ int btrfs_delalloc_reserve_space(struct inode *inode, u64 start, u64 len)
|
||||||
* list if there are no delalloc bytes left.
|
* list if there are no delalloc bytes left.
|
||||||
* Also it will handle the qgroup reserved space.
|
* Also it will handle the qgroup reserved space.
|
||||||
*/
|
*/
|
||||||
void btrfs_delalloc_release_space(struct inode *inode, u64 start, u64 len)
|
void btrfs_delalloc_release_space(struct inode *inode,
|
||||||
|
struct extent_changeset *reserved, u64 start, u64 len)
|
||||||
{
|
{
|
||||||
btrfs_delalloc_release_metadata(BTRFS_I(inode), len);
|
btrfs_delalloc_release_metadata(BTRFS_I(inode), len);
|
||||||
btrfs_free_reserved_data_space(inode, start, len);
|
btrfs_free_reserved_data_space(inode, reserved, start, len);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int update_block_group(struct btrfs_trans_handle *trans,
|
static int update_block_group(struct btrfs_trans_handle *trans,
|
||||||
|
@ -6248,6 +6304,8 @@ static int update_block_group(struct btrfs_trans_handle *trans,
|
||||||
trace_btrfs_space_reservation(info, "pinned",
|
trace_btrfs_space_reservation(info, "pinned",
|
||||||
cache->space_info->flags,
|
cache->space_info->flags,
|
||||||
num_bytes, 1);
|
num_bytes, 1);
|
||||||
|
percpu_counter_add(&cache->space_info->total_bytes_pinned,
|
||||||
|
num_bytes);
|
||||||
set_extent_dirty(info->pinned_extents,
|
set_extent_dirty(info->pinned_extents,
|
||||||
bytenr, bytenr + num_bytes - 1,
|
bytenr, bytenr + num_bytes - 1,
|
||||||
GFP_NOFS | __GFP_NOFAIL);
|
GFP_NOFS | __GFP_NOFAIL);
|
||||||
|
@ -6324,6 +6382,7 @@ static int pin_down_extent(struct btrfs_fs_info *fs_info,
|
||||||
|
|
||||||
trace_btrfs_space_reservation(fs_info, "pinned",
|
trace_btrfs_space_reservation(fs_info, "pinned",
|
||||||
cache->space_info->flags, num_bytes, 1);
|
cache->space_info->flags, num_bytes, 1);
|
||||||
|
percpu_counter_add(&cache->space_info->total_bytes_pinned, num_bytes);
|
||||||
set_extent_dirty(fs_info->pinned_extents, bytenr,
|
set_extent_dirty(fs_info->pinned_extents, bytenr,
|
||||||
bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
|
bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -6794,27 +6853,6 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void add_pinned_bytes(struct btrfs_fs_info *fs_info, u64 num_bytes,
|
|
||||||
u64 owner, u64 root_objectid)
|
|
||||||
{
|
|
||||||
struct btrfs_space_info *space_info;
|
|
||||||
u64 flags;
|
|
||||||
|
|
||||||
if (owner < BTRFS_FIRST_FREE_OBJECTID) {
|
|
||||||
if (root_objectid == BTRFS_CHUNK_TREE_OBJECTID)
|
|
||||||
flags = BTRFS_BLOCK_GROUP_SYSTEM;
|
|
||||||
else
|
|
||||||
flags = BTRFS_BLOCK_GROUP_METADATA;
|
|
||||||
} else {
|
|
||||||
flags = BTRFS_BLOCK_GROUP_DATA;
|
|
||||||
}
|
|
||||||
|
|
||||||
space_info = __find_space_info(fs_info, flags);
|
|
||||||
BUG_ON(!space_info); /* Logic bug */
|
|
||||||
percpu_counter_add(&space_info->total_bytes_pinned, num_bytes);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
|
static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
|
||||||
struct btrfs_fs_info *info,
|
struct btrfs_fs_info *info,
|
||||||
struct btrfs_delayed_ref_node *node, u64 parent,
|
struct btrfs_delayed_ref_node *node, u64 parent,
|
||||||
|
@ -7037,8 +7075,6 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
add_pinned_bytes(info, -num_bytes, owner_objectid,
|
|
||||||
root_objectid);
|
|
||||||
} else {
|
} else {
|
||||||
if (found_extent) {
|
if (found_extent) {
|
||||||
BUG_ON(is_data && refs_to_drop !=
|
BUG_ON(is_data && refs_to_drop !=
|
||||||
|
@ -7170,19 +7206,19 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
|
if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
|
||||||
ret = btrfs_add_delayed_tree_ref(fs_info, trans,
|
int old_ref_mod, new_ref_mod;
|
||||||
buf->start, buf->len,
|
|
||||||
parent,
|
ret = btrfs_add_delayed_tree_ref(fs_info, trans, buf->start,
|
||||||
|
buf->len, parent,
|
||||||
root->root_key.objectid,
|
root->root_key.objectid,
|
||||||
btrfs_header_level(buf),
|
btrfs_header_level(buf),
|
||||||
BTRFS_DROP_DELAYED_REF, NULL);
|
BTRFS_DROP_DELAYED_REF, NULL,
|
||||||
|
&old_ref_mod, &new_ref_mod);
|
||||||
BUG_ON(ret); /* -ENOMEM */
|
BUG_ON(ret); /* -ENOMEM */
|
||||||
|
pin = old_ref_mod >= 0 && new_ref_mod < 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!last_ref)
|
if (last_ref && btrfs_header_generation(buf) == trans->transid) {
|
||||||
return;
|
|
||||||
|
|
||||||
if (btrfs_header_generation(buf) == trans->transid) {
|
|
||||||
struct btrfs_block_group_cache *cache;
|
struct btrfs_block_group_cache *cache;
|
||||||
|
|
||||||
if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
|
if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
|
||||||
|
@ -7191,6 +7227,7 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pin = 0;
|
||||||
cache = btrfs_lookup_block_group(fs_info, buf->start);
|
cache = btrfs_lookup_block_group(fs_info, buf->start);
|
||||||
|
|
||||||
if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
|
if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
|
||||||
|
@ -7206,18 +7243,19 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
|
||||||
btrfs_free_reserved_bytes(cache, buf->len, 0);
|
btrfs_free_reserved_bytes(cache, buf->len, 0);
|
||||||
btrfs_put_block_group(cache);
|
btrfs_put_block_group(cache);
|
||||||
trace_btrfs_reserved_extent_free(fs_info, buf->start, buf->len);
|
trace_btrfs_reserved_extent_free(fs_info, buf->start, buf->len);
|
||||||
pin = 0;
|
|
||||||
}
|
}
|
||||||
out:
|
out:
|
||||||
if (pin)
|
if (pin)
|
||||||
add_pinned_bytes(fs_info, buf->len, btrfs_header_level(buf),
|
add_pinned_bytes(fs_info, buf->len, btrfs_header_level(buf),
|
||||||
root->root_key.objectid);
|
root->root_key.objectid);
|
||||||
|
|
||||||
/*
|
if (last_ref) {
|
||||||
* Deleting the buffer, clear the corrupt flag since it doesn't matter
|
/*
|
||||||
* anymore.
|
* Deleting the buffer, clear the corrupt flag since it doesn't
|
||||||
*/
|
* matter anymore.
|
||||||
clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
|
*/
|
||||||
|
clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Can return -ENOMEM */
|
/* Can return -ENOMEM */
|
||||||
|
@ -7226,12 +7264,12 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans,
|
||||||
u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
|
u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
|
||||||
u64 owner, u64 offset)
|
u64 owner, u64 offset)
|
||||||
{
|
{
|
||||||
|
int old_ref_mod, new_ref_mod;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (btrfs_is_testing(fs_info))
|
if (btrfs_is_testing(fs_info))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
add_pinned_bytes(fs_info, num_bytes, owner, root_objectid);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* tree log blocks never actually go into the extent allocation
|
* tree log blocks never actually go into the extent allocation
|
||||||
|
@ -7241,19 +7279,25 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans,
|
||||||
WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
|
WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
|
||||||
/* unlocks the pinned mutex */
|
/* unlocks the pinned mutex */
|
||||||
btrfs_pin_extent(fs_info, bytenr, num_bytes, 1);
|
btrfs_pin_extent(fs_info, bytenr, num_bytes, 1);
|
||||||
|
old_ref_mod = new_ref_mod = 0;
|
||||||
ret = 0;
|
ret = 0;
|
||||||
} else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
|
} else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
|
||||||
ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
|
ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
|
||||||
num_bytes,
|
num_bytes, parent,
|
||||||
parent, root_objectid, (int)owner,
|
root_objectid, (int)owner,
|
||||||
BTRFS_DROP_DELAYED_REF, NULL);
|
BTRFS_DROP_DELAYED_REF, NULL,
|
||||||
|
&old_ref_mod, &new_ref_mod);
|
||||||
} else {
|
} else {
|
||||||
ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
|
ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
|
||||||
num_bytes,
|
num_bytes, parent,
|
||||||
parent, root_objectid, owner,
|
root_objectid, owner, offset,
|
||||||
offset, 0,
|
0, BTRFS_DROP_DELAYED_REF,
|
||||||
BTRFS_DROP_DELAYED_REF);
|
&old_ref_mod, &new_ref_mod);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (ret == 0 && old_ref_mod >= 0 && new_ref_mod < 0)
|
||||||
|
add_pinned_bytes(fs_info, num_bytes, owner, root_objectid);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -7956,7 +8000,7 @@ int btrfs_reserve_extent(struct btrfs_root *root, u64 ram_bytes,
|
||||||
u64 flags;
|
u64 flags;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
flags = btrfs_get_alloc_profile(root, is_data);
|
flags = get_alloc_profile_by_root(root, is_data);
|
||||||
again:
|
again:
|
||||||
WARN_ON(num_bytes < fs_info->sectorsize);
|
WARN_ON(num_bytes < fs_info->sectorsize);
|
||||||
ret = find_free_extent(fs_info, ram_bytes, num_bytes, empty_size,
|
ret = find_free_extent(fs_info, ram_bytes, num_bytes, empty_size,
|
||||||
|
@ -8200,9 +8244,9 @@ int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
|
||||||
BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
|
BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
|
||||||
|
|
||||||
ret = btrfs_add_delayed_data_ref(fs_info, trans, ins->objectid,
|
ret = btrfs_add_delayed_data_ref(fs_info, trans, ins->objectid,
|
||||||
ins->offset, 0,
|
ins->offset, 0, root_objectid, owner,
|
||||||
root_objectid, owner, offset,
|
offset, ram_bytes,
|
||||||
ram_bytes, BTRFS_ADD_DELAYED_EXTENT);
|
BTRFS_ADD_DELAYED_EXTENT, NULL, NULL);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -8422,11 +8466,11 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
|
||||||
extent_op->is_data = false;
|
extent_op->is_data = false;
|
||||||
extent_op->level = level;
|
extent_op->level = level;
|
||||||
|
|
||||||
ret = btrfs_add_delayed_tree_ref(fs_info, trans,
|
ret = btrfs_add_delayed_tree_ref(fs_info, trans, ins.objectid,
|
||||||
ins.objectid, ins.offset,
|
ins.offset, parent,
|
||||||
parent, root_objectid, level,
|
root_objectid, level,
|
||||||
BTRFS_ADD_DELAYED_EXTENT,
|
BTRFS_ADD_DELAYED_EXTENT,
|
||||||
extent_op);
|
extent_op, NULL, NULL);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out_free_delayed;
|
goto out_free_delayed;
|
||||||
}
|
}
|
||||||
|
@ -10059,19 +10103,9 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info)
|
||||||
}
|
}
|
||||||
|
|
||||||
trace_btrfs_add_block_group(info, cache, 0);
|
trace_btrfs_add_block_group(info, cache, 0);
|
||||||
ret = update_space_info(info, cache->flags, found_key.offset,
|
update_space_info(info, cache->flags, found_key.offset,
|
||||||
btrfs_block_group_used(&cache->item),
|
btrfs_block_group_used(&cache->item),
|
||||||
cache->bytes_super, &space_info);
|
cache->bytes_super, &space_info);
|
||||||
if (ret) {
|
|
||||||
btrfs_remove_free_space_cache(cache);
|
|
||||||
spin_lock(&info->block_group_cache_lock);
|
|
||||||
rb_erase(&cache->cache_node,
|
|
||||||
&info->block_group_cache_tree);
|
|
||||||
RB_CLEAR_NODE(&cache->cache_node);
|
|
||||||
spin_unlock(&info->block_group_cache_lock);
|
|
||||||
btrfs_put_block_group(cache);
|
|
||||||
goto error;
|
|
||||||
}
|
|
||||||
|
|
||||||
cache->space_info = space_info;
|
cache->space_info = space_info;
|
||||||
|
|
||||||
|
@ -10203,16 +10237,19 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
/*
|
/*
|
||||||
* Call to ensure the corresponding space_info object is created and
|
* Ensure the corresponding space_info object is created and
|
||||||
* assigned to our block group, but don't update its counters just yet.
|
* assigned to our block group. We want our bg to be added to the rbtree
|
||||||
* We want our bg to be added to the rbtree with its ->space_info set.
|
* with its ->space_info set.
|
||||||
*/
|
*/
|
||||||
ret = update_space_info(fs_info, cache->flags, 0, 0, 0,
|
cache->space_info = __find_space_info(fs_info, cache->flags);
|
||||||
&cache->space_info);
|
if (!cache->space_info) {
|
||||||
if (ret) {
|
ret = create_space_info(fs_info, cache->flags,
|
||||||
btrfs_remove_free_space_cache(cache);
|
&cache->space_info);
|
||||||
btrfs_put_block_group(cache);
|
if (ret) {
|
||||||
return ret;
|
btrfs_remove_free_space_cache(cache);
|
||||||
|
btrfs_put_block_group(cache);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = btrfs_add_block_group_cache(fs_info, cache);
|
ret = btrfs_add_block_group_cache(fs_info, cache);
|
||||||
|
@ -10227,18 +10264,8 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
|
||||||
* the rbtree, update the space info's counters.
|
* the rbtree, update the space info's counters.
|
||||||
*/
|
*/
|
||||||
trace_btrfs_add_block_group(fs_info, cache, 1);
|
trace_btrfs_add_block_group(fs_info, cache, 1);
|
||||||
ret = update_space_info(fs_info, cache->flags, size, bytes_used,
|
update_space_info(fs_info, cache->flags, size, bytes_used,
|
||||||
cache->bytes_super, &cache->space_info);
|
cache->bytes_super, &cache->space_info);
|
||||||
if (ret) {
|
|
||||||
btrfs_remove_free_space_cache(cache);
|
|
||||||
spin_lock(&fs_info->block_group_cache_lock);
|
|
||||||
rb_erase(&cache->cache_node,
|
|
||||||
&fs_info->block_group_cache_tree);
|
|
||||||
RB_CLEAR_NODE(&cache->cache_node);
|
|
||||||
spin_unlock(&fs_info->block_group_cache_lock);
|
|
||||||
btrfs_put_block_group(cache);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
update_global_block_rsv(fs_info);
|
update_global_block_rsv(fs_info);
|
||||||
|
|
||||||
__link_block_group(cache->space_info, cache);
|
__link_block_group(cache->space_info, cache);
|
||||||
|
@ -10786,21 +10813,21 @@ int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
|
||||||
mixed = 1;
|
mixed = 1;
|
||||||
|
|
||||||
flags = BTRFS_BLOCK_GROUP_SYSTEM;
|
flags = BTRFS_BLOCK_GROUP_SYSTEM;
|
||||||
ret = update_space_info(fs_info, flags, 0, 0, 0, &space_info);
|
ret = create_space_info(fs_info, flags, &space_info);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
if (mixed) {
|
if (mixed) {
|
||||||
flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
|
flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
|
||||||
ret = update_space_info(fs_info, flags, 0, 0, 0, &space_info);
|
ret = create_space_info(fs_info, flags, &space_info);
|
||||||
} else {
|
} else {
|
||||||
flags = BTRFS_BLOCK_GROUP_METADATA;
|
flags = BTRFS_BLOCK_GROUP_METADATA;
|
||||||
ret = update_space_info(fs_info, flags, 0, 0, 0, &space_info);
|
ret = create_space_info(fs_info, flags, &space_info);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
flags = BTRFS_BLOCK_GROUP_DATA;
|
flags = BTRFS_BLOCK_GROUP_DATA;
|
||||||
ret = update_space_info(fs_info, flags, 0, 0, 0, &space_info);
|
ret = create_space_info(fs_info, flags, &space_info);
|
||||||
}
|
}
|
||||||
out:
|
out:
|
||||||
return ret;
|
return ret;
|
||||||
|
|
|
@ -87,19 +87,9 @@ void btrfs_leak_debug_check(void)
|
||||||
static inline void __btrfs_debug_check_extent_io_range(const char *caller,
|
static inline void __btrfs_debug_check_extent_io_range(const char *caller,
|
||||||
struct extent_io_tree *tree, u64 start, u64 end)
|
struct extent_io_tree *tree, u64 start, u64 end)
|
||||||
{
|
{
|
||||||
struct inode *inode;
|
if (tree->ops && tree->ops->check_extent_io_range)
|
||||||
u64 isize;
|
tree->ops->check_extent_io_range(tree->private_data, caller,
|
||||||
|
start, end);
|
||||||
if (!tree->mapping)
|
|
||||||
return;
|
|
||||||
|
|
||||||
inode = tree->mapping->host;
|
|
||||||
isize = i_size_read(inode);
|
|
||||||
if (end >= PAGE_SIZE && (end % 2) == 0 && end != isize - 1) {
|
|
||||||
btrfs_debug_rl(BTRFS_I(inode)->root->fs_info,
|
|
||||||
"%s: ino %llu isize %llu odd range [%llu,%llu]",
|
|
||||||
caller, btrfs_ino(BTRFS_I(inode)), isize, start, end);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
#define btrfs_leak_debug_add(new, head) do {} while (0)
|
#define btrfs_leak_debug_add(new, head) do {} while (0)
|
||||||
|
@ -154,9 +144,9 @@ static noinline void flush_write_bio(void *data);
|
||||||
static inline struct btrfs_fs_info *
|
static inline struct btrfs_fs_info *
|
||||||
tree_fs_info(struct extent_io_tree *tree)
|
tree_fs_info(struct extent_io_tree *tree)
|
||||||
{
|
{
|
||||||
if (!tree->mapping)
|
if (tree->ops)
|
||||||
return NULL;
|
return tree->ops->tree_fs_info(tree->private_data);
|
||||||
return btrfs_sb(tree->mapping->host->i_sb);
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
int __init extent_io_init(void)
|
int __init extent_io_init(void)
|
||||||
|
@ -214,13 +204,13 @@ void extent_io_exit(void)
|
||||||
}
|
}
|
||||||
|
|
||||||
void extent_io_tree_init(struct extent_io_tree *tree,
|
void extent_io_tree_init(struct extent_io_tree *tree,
|
||||||
struct address_space *mapping)
|
void *private_data)
|
||||||
{
|
{
|
||||||
tree->state = RB_ROOT;
|
tree->state = RB_ROOT;
|
||||||
tree->ops = NULL;
|
tree->ops = NULL;
|
||||||
tree->dirty_bytes = 0;
|
tree->dirty_bytes = 0;
|
||||||
spin_lock_init(&tree->lock);
|
spin_lock_init(&tree->lock);
|
||||||
tree->mapping = mapping;
|
tree->private_data = private_data;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct extent_state *alloc_extent_state(gfp_t mask)
|
static struct extent_state *alloc_extent_state(gfp_t mask)
|
||||||
|
@ -370,8 +360,7 @@ static void merge_cb(struct extent_io_tree *tree, struct extent_state *new,
|
||||||
struct extent_state *other)
|
struct extent_state *other)
|
||||||
{
|
{
|
||||||
if (tree->ops && tree->ops->merge_extent_hook)
|
if (tree->ops && tree->ops->merge_extent_hook)
|
||||||
tree->ops->merge_extent_hook(tree->mapping->host, new,
|
tree->ops->merge_extent_hook(tree->private_data, new, other);
|
||||||
other);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -422,15 +411,14 @@ static void set_state_cb(struct extent_io_tree *tree,
|
||||||
struct extent_state *state, unsigned *bits)
|
struct extent_state *state, unsigned *bits)
|
||||||
{
|
{
|
||||||
if (tree->ops && tree->ops->set_bit_hook)
|
if (tree->ops && tree->ops->set_bit_hook)
|
||||||
tree->ops->set_bit_hook(tree->mapping->host, state, bits);
|
tree->ops->set_bit_hook(tree->private_data, state, bits);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void clear_state_cb(struct extent_io_tree *tree,
|
static void clear_state_cb(struct extent_io_tree *tree,
|
||||||
struct extent_state *state, unsigned *bits)
|
struct extent_state *state, unsigned *bits)
|
||||||
{
|
{
|
||||||
if (tree->ops && tree->ops->clear_bit_hook)
|
if (tree->ops && tree->ops->clear_bit_hook)
|
||||||
tree->ops->clear_bit_hook(BTRFS_I(tree->mapping->host),
|
tree->ops->clear_bit_hook(tree->private_data, state, bits);
|
||||||
state, bits);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void set_state_bits(struct extent_io_tree *tree,
|
static void set_state_bits(struct extent_io_tree *tree,
|
||||||
|
@ -479,7 +467,7 @@ static void split_cb(struct extent_io_tree *tree, struct extent_state *orig,
|
||||||
u64 split)
|
u64 split)
|
||||||
{
|
{
|
||||||
if (tree->ops && tree->ops->split_extent_hook)
|
if (tree->ops && tree->ops->split_extent_hook)
|
||||||
tree->ops->split_extent_hook(tree->mapping->host, orig, split);
|
tree->ops->split_extent_hook(tree->private_data, orig, split);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1403,17 +1391,7 @@ void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end)
|
||||||
*/
|
*/
|
||||||
static void set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
|
static void set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
|
||||||
{
|
{
|
||||||
unsigned long index = start >> PAGE_SHIFT;
|
tree->ops->set_range_writeback(tree->private_data, start, end);
|
||||||
unsigned long end_index = end >> PAGE_SHIFT;
|
|
||||||
struct page *page;
|
|
||||||
|
|
||||||
while (index <= end_index) {
|
|
||||||
page = find_get_page(tree->mapping, index);
|
|
||||||
BUG_ON(!page); /* Pages should be in the extent_io_tree */
|
|
||||||
set_page_writeback(page);
|
|
||||||
put_page(page);
|
|
||||||
index++;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* find the first state struct with 'bits' set after 'start', and
|
/* find the first state struct with 'bits' set after 'start', and
|
||||||
|
@ -1962,11 +1940,12 @@ static void check_page_uptodate(struct extent_io_tree *tree, struct page *page)
|
||||||
SetPageUptodate(page);
|
SetPageUptodate(page);
|
||||||
}
|
}
|
||||||
|
|
||||||
int free_io_failure(struct btrfs_inode *inode, struct io_failure_record *rec)
|
int free_io_failure(struct extent_io_tree *failure_tree,
|
||||||
|
struct extent_io_tree *io_tree,
|
||||||
|
struct io_failure_record *rec)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
int err = 0;
|
int err = 0;
|
||||||
struct extent_io_tree *failure_tree = &inode->io_failure_tree;
|
|
||||||
|
|
||||||
set_state_failrec(failure_tree, rec->start, NULL);
|
set_state_failrec(failure_tree, rec->start, NULL);
|
||||||
ret = clear_extent_bits(failure_tree, rec->start,
|
ret = clear_extent_bits(failure_tree, rec->start,
|
||||||
|
@ -1975,7 +1954,7 @@ int free_io_failure(struct btrfs_inode *inode, struct io_failure_record *rec)
|
||||||
if (ret)
|
if (ret)
|
||||||
err = ret;
|
err = ret;
|
||||||
|
|
||||||
ret = clear_extent_bits(&inode->io_tree, rec->start,
|
ret = clear_extent_bits(io_tree, rec->start,
|
||||||
rec->start + rec->len - 1,
|
rec->start + rec->len - 1,
|
||||||
EXTENT_DAMAGED);
|
EXTENT_DAMAGED);
|
||||||
if (ret && !err)
|
if (ret && !err)
|
||||||
|
@ -1995,11 +1974,10 @@ int free_io_failure(struct btrfs_inode *inode, struct io_failure_record *rec)
|
||||||
* currently, there can be no more than two copies of every data bit. thus,
|
* currently, there can be no more than two copies of every data bit. thus,
|
||||||
* exactly one rewrite is required.
|
* exactly one rewrite is required.
|
||||||
*/
|
*/
|
||||||
int repair_io_failure(struct btrfs_inode *inode, u64 start, u64 length,
|
int repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start,
|
||||||
u64 logical, struct page *page,
|
u64 length, u64 logical, struct page *page,
|
||||||
unsigned int pg_offset, int mirror_num)
|
unsigned int pg_offset, int mirror_num)
|
||||||
{
|
{
|
||||||
struct btrfs_fs_info *fs_info = inode->root->fs_info;
|
|
||||||
struct bio *bio;
|
struct bio *bio;
|
||||||
struct btrfs_device *dev;
|
struct btrfs_device *dev;
|
||||||
u64 map_length = 0;
|
u64 map_length = 0;
|
||||||
|
@ -2010,9 +1988,7 @@ int repair_io_failure(struct btrfs_inode *inode, u64 start, u64 length,
|
||||||
ASSERT(!(fs_info->sb->s_flags & MS_RDONLY));
|
ASSERT(!(fs_info->sb->s_flags & MS_RDONLY));
|
||||||
BUG_ON(!mirror_num);
|
BUG_ON(!mirror_num);
|
||||||
|
|
||||||
bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
|
bio = btrfs_io_bio_alloc(1);
|
||||||
if (!bio)
|
|
||||||
return -EIO;
|
|
||||||
bio->bi_iter.bi_size = 0;
|
bio->bi_iter.bi_size = 0;
|
||||||
map_length = length;
|
map_length = length;
|
||||||
|
|
||||||
|
@ -2071,7 +2047,7 @@ int repair_io_failure(struct btrfs_inode *inode, u64 start, u64 length,
|
||||||
|
|
||||||
btrfs_info_rl_in_rcu(fs_info,
|
btrfs_info_rl_in_rcu(fs_info,
|
||||||
"read error corrected: ino %llu off %llu (dev %s sector %llu)",
|
"read error corrected: ino %llu off %llu (dev %s sector %llu)",
|
||||||
btrfs_ino(inode), start,
|
ino, start,
|
||||||
rcu_str_deref(dev->name), sector);
|
rcu_str_deref(dev->name), sector);
|
||||||
btrfs_bio_counter_dec(fs_info);
|
btrfs_bio_counter_dec(fs_info);
|
||||||
bio_put(bio);
|
bio_put(bio);
|
||||||
|
@ -2091,8 +2067,7 @@ int repair_eb_io_failure(struct btrfs_fs_info *fs_info,
|
||||||
for (i = 0; i < num_pages; i++) {
|
for (i = 0; i < num_pages; i++) {
|
||||||
struct page *p = eb->pages[i];
|
struct page *p = eb->pages[i];
|
||||||
|
|
||||||
ret = repair_io_failure(BTRFS_I(fs_info->btree_inode), start,
|
ret = repair_io_failure(fs_info, 0, start, PAGE_SIZE, start, p,
|
||||||
PAGE_SIZE, start, p,
|
|
||||||
start - page_offset(p), mirror_num);
|
start - page_offset(p), mirror_num);
|
||||||
if (ret)
|
if (ret)
|
||||||
break;
|
break;
|
||||||
|
@ -2106,24 +2081,24 @@ int repair_eb_io_failure(struct btrfs_fs_info *fs_info,
|
||||||
* each time an IO finishes, we do a fast check in the IO failure tree
|
* each time an IO finishes, we do a fast check in the IO failure tree
|
||||||
* to see if we need to process or clean up an io_failure_record
|
* to see if we need to process or clean up an io_failure_record
|
||||||
*/
|
*/
|
||||||
int clean_io_failure(struct btrfs_inode *inode, u64 start, struct page *page,
|
int clean_io_failure(struct btrfs_fs_info *fs_info,
|
||||||
unsigned int pg_offset)
|
struct extent_io_tree *failure_tree,
|
||||||
|
struct extent_io_tree *io_tree, u64 start,
|
||||||
|
struct page *page, u64 ino, unsigned int pg_offset)
|
||||||
{
|
{
|
||||||
u64 private;
|
u64 private;
|
||||||
struct io_failure_record *failrec;
|
struct io_failure_record *failrec;
|
||||||
struct btrfs_fs_info *fs_info = inode->root->fs_info;
|
|
||||||
struct extent_state *state;
|
struct extent_state *state;
|
||||||
int num_copies;
|
int num_copies;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
private = 0;
|
private = 0;
|
||||||
ret = count_range_bits(&inode->io_failure_tree, &private,
|
ret = count_range_bits(failure_tree, &private, (u64)-1, 1,
|
||||||
(u64)-1, 1, EXTENT_DIRTY, 0);
|
EXTENT_DIRTY, 0);
|
||||||
if (!ret)
|
if (!ret)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
ret = get_state_failrec(&inode->io_failure_tree, start,
|
ret = get_state_failrec(failure_tree, start, &failrec);
|
||||||
&failrec);
|
|
||||||
if (ret)
|
if (ret)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
@ -2139,25 +2114,25 @@ int clean_io_failure(struct btrfs_inode *inode, u64 start, struct page *page,
|
||||||
if (fs_info->sb->s_flags & MS_RDONLY)
|
if (fs_info->sb->s_flags & MS_RDONLY)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
spin_lock(&inode->io_tree.lock);
|
spin_lock(&io_tree->lock);
|
||||||
state = find_first_extent_bit_state(&inode->io_tree,
|
state = find_first_extent_bit_state(io_tree,
|
||||||
failrec->start,
|
failrec->start,
|
||||||
EXTENT_LOCKED);
|
EXTENT_LOCKED);
|
||||||
spin_unlock(&inode->io_tree.lock);
|
spin_unlock(&io_tree->lock);
|
||||||
|
|
||||||
if (state && state->start <= failrec->start &&
|
if (state && state->start <= failrec->start &&
|
||||||
state->end >= failrec->start + failrec->len - 1) {
|
state->end >= failrec->start + failrec->len - 1) {
|
||||||
num_copies = btrfs_num_copies(fs_info, failrec->logical,
|
num_copies = btrfs_num_copies(fs_info, failrec->logical,
|
||||||
failrec->len);
|
failrec->len);
|
||||||
if (num_copies > 1) {
|
if (num_copies > 1) {
|
||||||
repair_io_failure(inode, start, failrec->len,
|
repair_io_failure(fs_info, ino, start, failrec->len,
|
||||||
failrec->logical, page,
|
failrec->logical, page, pg_offset,
|
||||||
pg_offset, failrec->failed_mirror);
|
failrec->failed_mirror);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
out:
|
out:
|
||||||
free_io_failure(inode, failrec);
|
free_io_failure(failure_tree, io_tree, failrec);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -2357,10 +2332,7 @@ struct bio *btrfs_create_repair_bio(struct inode *inode, struct bio *failed_bio,
|
||||||
struct btrfs_io_bio *btrfs_failed_bio;
|
struct btrfs_io_bio *btrfs_failed_bio;
|
||||||
struct btrfs_io_bio *btrfs_bio;
|
struct btrfs_io_bio *btrfs_bio;
|
||||||
|
|
||||||
bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
|
bio = btrfs_io_bio_alloc(1);
|
||||||
if (!bio)
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
bio->bi_end_io = endio_func;
|
bio->bi_end_io = endio_func;
|
||||||
bio->bi_iter.bi_sector = failrec->logical >> 9;
|
bio->bi_iter.bi_sector = failrec->logical >> 9;
|
||||||
bio->bi_bdev = fs_info->fs_devices->latest_bdev;
|
bio->bi_bdev = fs_info->fs_devices->latest_bdev;
|
||||||
|
@ -2398,6 +2370,7 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
|
||||||
struct io_failure_record *failrec;
|
struct io_failure_record *failrec;
|
||||||
struct inode *inode = page->mapping->host;
|
struct inode *inode = page->mapping->host;
|
||||||
struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
|
struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
|
||||||
|
struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
|
||||||
struct bio *bio;
|
struct bio *bio;
|
||||||
int read_mode = 0;
|
int read_mode = 0;
|
||||||
blk_status_t status;
|
blk_status_t status;
|
||||||
|
@ -2411,7 +2384,7 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
|
||||||
|
|
||||||
ret = btrfs_check_repairable(inode, failed_bio, failrec, failed_mirror);
|
ret = btrfs_check_repairable(inode, failed_bio, failrec, failed_mirror);
|
||||||
if (!ret) {
|
if (!ret) {
|
||||||
free_io_failure(BTRFS_I(inode), failrec);
|
free_io_failure(failure_tree, tree, failrec);
|
||||||
return -EIO;
|
return -EIO;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2424,7 +2397,7 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
|
||||||
(int)phy_offset, failed_bio->bi_end_io,
|
(int)phy_offset, failed_bio->bi_end_io,
|
||||||
NULL);
|
NULL);
|
||||||
if (!bio) {
|
if (!bio) {
|
||||||
free_io_failure(BTRFS_I(inode), failrec);
|
free_io_failure(failure_tree, tree, failrec);
|
||||||
return -EIO;
|
return -EIO;
|
||||||
}
|
}
|
||||||
bio_set_op_attrs(bio, REQ_OP_READ, read_mode);
|
bio_set_op_attrs(bio, REQ_OP_READ, read_mode);
|
||||||
|
@ -2433,10 +2406,10 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
|
||||||
"Repair Read Error: submitting new read[%#x] to this_mirror=%d, in_validation=%d",
|
"Repair Read Error: submitting new read[%#x] to this_mirror=%d, in_validation=%d",
|
||||||
read_mode, failrec->this_mirror, failrec->in_validation);
|
read_mode, failrec->this_mirror, failrec->in_validation);
|
||||||
|
|
||||||
status = tree->ops->submit_bio_hook(inode, bio, failrec->this_mirror,
|
status = tree->ops->submit_bio_hook(tree->private_data, bio, failrec->this_mirror,
|
||||||
failrec->bio_flags, 0);
|
failrec->bio_flags, 0);
|
||||||
if (status) {
|
if (status) {
|
||||||
free_io_failure(BTRFS_I(inode), failrec);
|
free_io_failure(failure_tree, tree, failrec);
|
||||||
bio_put(bio);
|
bio_put(bio);
|
||||||
ret = blk_status_to_errno(status);
|
ret = blk_status_to_errno(status);
|
||||||
}
|
}
|
||||||
|
@ -2542,7 +2515,7 @@ static void end_bio_extent_readpage(struct bio *bio)
|
||||||
struct bio_vec *bvec;
|
struct bio_vec *bvec;
|
||||||
int uptodate = !bio->bi_status;
|
int uptodate = !bio->bi_status;
|
||||||
struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
|
struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
|
||||||
struct extent_io_tree *tree;
|
struct extent_io_tree *tree, *failure_tree;
|
||||||
u64 offset = 0;
|
u64 offset = 0;
|
||||||
u64 start;
|
u64 start;
|
||||||
u64 end;
|
u64 end;
|
||||||
|
@ -2563,6 +2536,7 @@ static void end_bio_extent_readpage(struct bio *bio)
|
||||||
(u64)bio->bi_iter.bi_sector, bio->bi_status,
|
(u64)bio->bi_iter.bi_sector, bio->bi_status,
|
||||||
io_bio->mirror_num);
|
io_bio->mirror_num);
|
||||||
tree = &BTRFS_I(inode)->io_tree;
|
tree = &BTRFS_I(inode)->io_tree;
|
||||||
|
failure_tree = &BTRFS_I(inode)->io_failure_tree;
|
||||||
|
|
||||||
/* We always issue full-page reads, but if some block
|
/* We always issue full-page reads, but if some block
|
||||||
* in a page fails to read, blk_update_request() will
|
* in a page fails to read, blk_update_request() will
|
||||||
|
@ -2592,8 +2566,10 @@ static void end_bio_extent_readpage(struct bio *bio)
|
||||||
if (ret)
|
if (ret)
|
||||||
uptodate = 0;
|
uptodate = 0;
|
||||||
else
|
else
|
||||||
clean_io_failure(BTRFS_I(inode), start,
|
clean_io_failure(BTRFS_I(inode)->root->fs_info,
|
||||||
page, 0);
|
failure_tree, tree, start,
|
||||||
|
page,
|
||||||
|
btrfs_ino(BTRFS_I(inode)), 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (likely(uptodate))
|
if (likely(uptodate))
|
||||||
|
@ -2682,67 +2658,70 @@ readpage_ok:
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* this allocates from the btrfs_bioset. We're returning a bio right now
|
* Initialize the members up to but not including 'bio'. Use after allocating a
|
||||||
* but you can call btrfs_io_bio for the appropriate container_of magic
|
* new bio by bio_alloc_bioset as it does not initialize the bytes outside of
|
||||||
|
* 'bio' because use of __GFP_ZERO is not supported.
|
||||||
*/
|
*/
|
||||||
struct bio *
|
static inline void btrfs_io_bio_init(struct btrfs_io_bio *btrfs_bio)
|
||||||
btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
|
{
|
||||||
gfp_t gfp_flags)
|
memset(btrfs_bio, 0, offsetof(struct btrfs_io_bio, bio));
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The following helpers allocate a bio. As it's backed by a bioset, it'll
|
||||||
|
* never fail. We're returning a bio right now but you can call btrfs_io_bio
|
||||||
|
* for the appropriate container_of magic
|
||||||
|
*/
|
||||||
|
struct bio *btrfs_bio_alloc(struct block_device *bdev, u64 first_byte)
|
||||||
{
|
{
|
||||||
struct btrfs_io_bio *btrfs_bio;
|
|
||||||
struct bio *bio;
|
struct bio *bio;
|
||||||
|
|
||||||
bio = bio_alloc_bioset(gfp_flags, nr_vecs, btrfs_bioset);
|
bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_PAGES, btrfs_bioset);
|
||||||
|
bio->bi_bdev = bdev;
|
||||||
if (bio == NULL && (current->flags & PF_MEMALLOC)) {
|
bio->bi_iter.bi_sector = first_byte >> 9;
|
||||||
while (!bio && (nr_vecs /= 2)) {
|
btrfs_io_bio_init(btrfs_io_bio(bio));
|
||||||
bio = bio_alloc_bioset(gfp_flags,
|
|
||||||
nr_vecs, btrfs_bioset);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (bio) {
|
|
||||||
bio->bi_bdev = bdev;
|
|
||||||
bio->bi_iter.bi_sector = first_sector;
|
|
||||||
btrfs_bio = btrfs_io_bio(bio);
|
|
||||||
btrfs_bio->csum = NULL;
|
|
||||||
btrfs_bio->csum_allocated = NULL;
|
|
||||||
btrfs_bio->end_io = NULL;
|
|
||||||
}
|
|
||||||
return bio;
|
return bio;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct bio *btrfs_bio_clone(struct bio *bio, gfp_t gfp_mask)
|
struct bio *btrfs_bio_clone(struct bio *bio)
|
||||||
{
|
{
|
||||||
struct btrfs_io_bio *btrfs_bio;
|
struct btrfs_io_bio *btrfs_bio;
|
||||||
struct bio *new;
|
struct bio *new;
|
||||||
|
|
||||||
new = bio_clone_bioset(bio, gfp_mask, btrfs_bioset);
|
/* Bio allocation backed by a bioset does not fail */
|
||||||
if (new) {
|
new = bio_clone_fast(bio, GFP_NOFS, btrfs_bioset);
|
||||||
btrfs_bio = btrfs_io_bio(new);
|
btrfs_bio = btrfs_io_bio(new);
|
||||||
btrfs_bio->csum = NULL;
|
btrfs_io_bio_init(btrfs_bio);
|
||||||
btrfs_bio->csum_allocated = NULL;
|
btrfs_bio->iter = bio->bi_iter;
|
||||||
btrfs_bio->end_io = NULL;
|
|
||||||
}
|
|
||||||
return new;
|
return new;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* this also allocates from the btrfs_bioset */
|
struct bio *btrfs_io_bio_alloc(unsigned int nr_iovecs)
|
||||||
struct bio *btrfs_io_bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
|
|
||||||
{
|
{
|
||||||
struct btrfs_io_bio *btrfs_bio;
|
|
||||||
struct bio *bio;
|
struct bio *bio;
|
||||||
|
|
||||||
bio = bio_alloc_bioset(gfp_mask, nr_iovecs, btrfs_bioset);
|
/* Bio allocation backed by a bioset does not fail */
|
||||||
if (bio) {
|
bio = bio_alloc_bioset(GFP_NOFS, nr_iovecs, btrfs_bioset);
|
||||||
btrfs_bio = btrfs_io_bio(bio);
|
btrfs_io_bio_init(btrfs_io_bio(bio));
|
||||||
btrfs_bio->csum = NULL;
|
|
||||||
btrfs_bio->csum_allocated = NULL;
|
|
||||||
btrfs_bio->end_io = NULL;
|
|
||||||
}
|
|
||||||
return bio;
|
return bio;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct bio *btrfs_bio_clone_partial(struct bio *orig, int offset, int size)
|
||||||
|
{
|
||||||
|
struct bio *bio;
|
||||||
|
struct btrfs_io_bio *btrfs_bio;
|
||||||
|
|
||||||
|
/* this will never fail when it's backed by a bioset */
|
||||||
|
bio = bio_clone_fast(orig, GFP_NOFS, btrfs_bioset);
|
||||||
|
ASSERT(bio);
|
||||||
|
|
||||||
|
btrfs_bio = btrfs_io_bio(bio);
|
||||||
|
btrfs_io_bio_init(btrfs_bio);
|
||||||
|
|
||||||
|
bio_trim(bio, offset >> 9, size >> 9);
|
||||||
|
btrfs_bio->iter = bio->bi_iter;
|
||||||
|
return bio;
|
||||||
|
}
|
||||||
|
|
||||||
static int __must_check submit_one_bio(struct bio *bio, int mirror_num,
|
static int __must_check submit_one_bio(struct bio *bio, int mirror_num,
|
||||||
unsigned long bio_flags)
|
unsigned long bio_flags)
|
||||||
|
@ -2759,7 +2738,7 @@ static int __must_check submit_one_bio(struct bio *bio, int mirror_num,
|
||||||
bio_get(bio);
|
bio_get(bio);
|
||||||
|
|
||||||
if (tree->ops)
|
if (tree->ops)
|
||||||
ret = tree->ops->submit_bio_hook(page->mapping->host, bio,
|
ret = tree->ops->submit_bio_hook(tree->private_data, bio,
|
||||||
mirror_num, bio_flags, start);
|
mirror_num, bio_flags, start);
|
||||||
else
|
else
|
||||||
btrfsic_submit_bio(bio);
|
btrfsic_submit_bio(bio);
|
||||||
|
@ -2822,11 +2801,7 @@ static int submit_extent_page(int op, int op_flags, struct extent_io_tree *tree,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bio = btrfs_bio_alloc(bdev, sector, BIO_MAX_PAGES,
|
bio = btrfs_bio_alloc(bdev, sector << 9);
|
||||||
GFP_NOFS | __GFP_HIGH);
|
|
||||||
if (!bio)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
bio_add_page(bio, page, page_size, offset);
|
bio_add_page(bio, page, page_size, offset);
|
||||||
bio->bi_end_io = end_io_func;
|
bio->bi_end_io = end_io_func;
|
||||||
bio->bi_private = tree;
|
bio->bi_private = tree;
|
||||||
|
@ -3762,7 +3737,7 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
|
||||||
* header 0 1 2 .. N ... data_N .. data_2 data_1 data_0
|
* header 0 1 2 .. N ... data_N .. data_2 data_1 data_0
|
||||||
*/
|
*/
|
||||||
start = btrfs_item_nr_offset(nritems);
|
start = btrfs_item_nr_offset(nritems);
|
||||||
end = btrfs_leaf_data(eb) + leaf_data_end(fs_info, eb);
|
end = BTRFS_LEAF_DATA_OFFSET + leaf_data_end(fs_info, eb);
|
||||||
memzero_extent_buffer(eb, start, end - start);
|
memzero_extent_buffer(eb, start, end - start);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4468,29 +4443,25 @@ try_submit_last:
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Sanity check for fiemap cache
|
* Emit last fiemap cache
|
||||||
*
|
*
|
||||||
* All fiemap cache should be submitted by emit_fiemap_extent()
|
* The last fiemap cache may still be cached in the following case:
|
||||||
* Iteration should be terminated either by last fiemap extent or
|
* 0 4k 8k
|
||||||
* fieinfo->fi_extents_max.
|
* |<- Fiemap range ->|
|
||||||
* So no cached fiemap should exist.
|
* |<------------ First extent ----------->|
|
||||||
|
*
|
||||||
|
* In this case, the first extent range will be cached but not emitted.
|
||||||
|
* So we must emit it before ending extent_fiemap().
|
||||||
*/
|
*/
|
||||||
static int check_fiemap_cache(struct btrfs_fs_info *fs_info,
|
static int emit_last_fiemap_cache(struct btrfs_fs_info *fs_info,
|
||||||
struct fiemap_extent_info *fieinfo,
|
struct fiemap_extent_info *fieinfo,
|
||||||
struct fiemap_cache *cache)
|
struct fiemap_cache *cache)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (!cache->cached)
|
if (!cache->cached)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
/* Small and recoverbale problem, only to info developer */
|
|
||||||
#ifdef CONFIG_BTRFS_DEBUG
|
|
||||||
WARN_ON(1);
|
|
||||||
#endif
|
|
||||||
btrfs_warn(fs_info,
|
|
||||||
"unhandled fiemap cache detected: offset=%llu phys=%llu len=%llu flags=0x%x",
|
|
||||||
cache->offset, cache->phys, cache->len, cache->flags);
|
|
||||||
ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys,
|
ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys,
|
||||||
cache->len, cache->flags);
|
cache->len, cache->flags);
|
||||||
cache->cached = false;
|
cache->cached = false;
|
||||||
|
@ -4706,7 +4677,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
|
||||||
}
|
}
|
||||||
out_free:
|
out_free:
|
||||||
if (!ret)
|
if (!ret)
|
||||||
ret = check_fiemap_cache(root->fs_info, fieinfo, &cache);
|
ret = emit_last_fiemap_cache(root->fs_info, fieinfo, &cache);
|
||||||
free_extent_map(em);
|
free_extent_map(em);
|
||||||
out:
|
out:
|
||||||
btrfs_free_path(path);
|
btrfs_free_path(path);
|
||||||
|
|
|
@ -92,9 +92,9 @@ struct btrfs_inode;
|
||||||
struct btrfs_io_bio;
|
struct btrfs_io_bio;
|
||||||
struct io_failure_record;
|
struct io_failure_record;
|
||||||
|
|
||||||
typedef blk_status_t (extent_submit_bio_hook_t)(struct inode *inode,
|
typedef blk_status_t (extent_submit_bio_hook_t)(void *private_data, struct bio *bio,
|
||||||
struct bio *bio, int mirror_num, unsigned long bio_flags,
|
int mirror_num, unsigned long bio_flags,
|
||||||
u64 bio_offset);
|
u64 bio_offset);
|
||||||
struct extent_io_ops {
|
struct extent_io_ops {
|
||||||
/*
|
/*
|
||||||
* The following callbacks must be allways defined, the function
|
* The following callbacks must be allways defined, the function
|
||||||
|
@ -108,32 +108,36 @@ struct extent_io_ops {
|
||||||
size_t size, struct bio *bio,
|
size_t size, struct bio *bio,
|
||||||
unsigned long bio_flags);
|
unsigned long bio_flags);
|
||||||
int (*readpage_io_failed_hook)(struct page *page, int failed_mirror);
|
int (*readpage_io_failed_hook)(struct page *page, int failed_mirror);
|
||||||
|
struct btrfs_fs_info *(*tree_fs_info)(void *private_data);
|
||||||
|
void (*set_range_writeback)(void *private_data, u64 start, u64 end);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Optional hooks, called if the pointer is not NULL
|
* Optional hooks, called if the pointer is not NULL
|
||||||
*/
|
*/
|
||||||
int (*fill_delalloc)(struct inode *inode, struct page *locked_page,
|
int (*fill_delalloc)(void *private_data, struct page *locked_page,
|
||||||
u64 start, u64 end, int *page_started,
|
u64 start, u64 end, int *page_started,
|
||||||
unsigned long *nr_written);
|
unsigned long *nr_written);
|
||||||
|
|
||||||
int (*writepage_start_hook)(struct page *page, u64 start, u64 end);
|
int (*writepage_start_hook)(struct page *page, u64 start, u64 end);
|
||||||
void (*writepage_end_io_hook)(struct page *page, u64 start, u64 end,
|
void (*writepage_end_io_hook)(struct page *page, u64 start, u64 end,
|
||||||
struct extent_state *state, int uptodate);
|
struct extent_state *state, int uptodate);
|
||||||
void (*set_bit_hook)(struct inode *inode, struct extent_state *state,
|
void (*set_bit_hook)(void *private_data, struct extent_state *state,
|
||||||
unsigned *bits);
|
unsigned *bits);
|
||||||
void (*clear_bit_hook)(struct btrfs_inode *inode,
|
void (*clear_bit_hook)(void *private_data,
|
||||||
struct extent_state *state,
|
struct extent_state *state,
|
||||||
unsigned *bits);
|
unsigned *bits);
|
||||||
void (*merge_extent_hook)(struct inode *inode,
|
void (*merge_extent_hook)(void *private_data,
|
||||||
struct extent_state *new,
|
struct extent_state *new,
|
||||||
struct extent_state *other);
|
struct extent_state *other);
|
||||||
void (*split_extent_hook)(struct inode *inode,
|
void (*split_extent_hook)(void *private_data,
|
||||||
struct extent_state *orig, u64 split);
|
struct extent_state *orig, u64 split);
|
||||||
|
void (*check_extent_io_range)(void *private_data, const char *caller,
|
||||||
|
u64 start, u64 end);
|
||||||
};
|
};
|
||||||
|
|
||||||
struct extent_io_tree {
|
struct extent_io_tree {
|
||||||
struct rb_root state;
|
struct rb_root state;
|
||||||
struct address_space *mapping;
|
void *private_data;
|
||||||
u64 dirty_bytes;
|
u64 dirty_bytes;
|
||||||
int track_uptodate;
|
int track_uptodate;
|
||||||
spinlock_t lock;
|
spinlock_t lock;
|
||||||
|
@ -205,12 +209,46 @@ struct extent_buffer {
|
||||||
*/
|
*/
|
||||||
struct extent_changeset {
|
struct extent_changeset {
|
||||||
/* How many bytes are set/cleared in this operation */
|
/* How many bytes are set/cleared in this operation */
|
||||||
u64 bytes_changed;
|
unsigned int bytes_changed;
|
||||||
|
|
||||||
/* Changed ranges */
|
/* Changed ranges */
|
||||||
struct ulist range_changed;
|
struct ulist range_changed;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static inline void extent_changeset_init(struct extent_changeset *changeset)
|
||||||
|
{
|
||||||
|
changeset->bytes_changed = 0;
|
||||||
|
ulist_init(&changeset->range_changed);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline struct extent_changeset *extent_changeset_alloc(void)
|
||||||
|
{
|
||||||
|
struct extent_changeset *ret;
|
||||||
|
|
||||||
|
ret = kmalloc(sizeof(*ret), GFP_KERNEL);
|
||||||
|
if (!ret)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
extent_changeset_init(ret);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void extent_changeset_release(struct extent_changeset *changeset)
|
||||||
|
{
|
||||||
|
if (!changeset)
|
||||||
|
return;
|
||||||
|
changeset->bytes_changed = 0;
|
||||||
|
ulist_release(&changeset->range_changed);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void extent_changeset_free(struct extent_changeset *changeset)
|
||||||
|
{
|
||||||
|
if (!changeset)
|
||||||
|
return;
|
||||||
|
extent_changeset_release(changeset);
|
||||||
|
kfree(changeset);
|
||||||
|
}
|
||||||
|
|
||||||
static inline void extent_set_compress_type(unsigned long *bio_flags,
|
static inline void extent_set_compress_type(unsigned long *bio_flags,
|
||||||
int compress_type)
|
int compress_type)
|
||||||
{
|
{
|
||||||
|
@ -230,8 +268,7 @@ typedef struct extent_map *(get_extent_t)(struct btrfs_inode *inode,
|
||||||
u64 start, u64 len,
|
u64 start, u64 len,
|
||||||
int create);
|
int create);
|
||||||
|
|
||||||
void extent_io_tree_init(struct extent_io_tree *tree,
|
void extent_io_tree_init(struct extent_io_tree *tree, void *private_data);
|
||||||
struct address_space *mapping);
|
|
||||||
int try_release_extent_mapping(struct extent_map_tree *map,
|
int try_release_extent_mapping(struct extent_map_tree *map,
|
||||||
struct extent_io_tree *tree, struct page *page,
|
struct extent_io_tree *tree, struct page *page,
|
||||||
gfp_t mask);
|
gfp_t mask);
|
||||||
|
@ -459,20 +496,21 @@ void extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end,
|
||||||
u64 delalloc_end, struct page *locked_page,
|
u64 delalloc_end, struct page *locked_page,
|
||||||
unsigned bits_to_clear,
|
unsigned bits_to_clear,
|
||||||
unsigned long page_ops);
|
unsigned long page_ops);
|
||||||
struct bio *
|
struct bio *btrfs_bio_alloc(struct block_device *bdev, u64 first_byte);
|
||||||
btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
|
struct bio *btrfs_io_bio_alloc(unsigned int nr_iovecs);
|
||||||
gfp_t gfp_flags);
|
struct bio *btrfs_bio_clone(struct bio *bio);
|
||||||
struct bio *btrfs_io_bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs);
|
struct bio *btrfs_bio_clone_partial(struct bio *orig, int offset, int size);
|
||||||
struct bio *btrfs_bio_clone(struct bio *bio, gfp_t gfp_mask);
|
|
||||||
|
|
||||||
struct btrfs_fs_info;
|
struct btrfs_fs_info;
|
||||||
struct btrfs_inode;
|
struct btrfs_inode;
|
||||||
|
|
||||||
int repair_io_failure(struct btrfs_inode *inode, u64 start, u64 length,
|
int repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start,
|
||||||
u64 logical, struct page *page,
|
u64 length, u64 logical, struct page *page,
|
||||||
unsigned int pg_offset, int mirror_num);
|
unsigned int pg_offset, int mirror_num);
|
||||||
int clean_io_failure(struct btrfs_inode *inode, u64 start,
|
int clean_io_failure(struct btrfs_fs_info *fs_info,
|
||||||
struct page *page, unsigned int pg_offset);
|
struct extent_io_tree *failure_tree,
|
||||||
|
struct extent_io_tree *io_tree, u64 start,
|
||||||
|
struct page *page, u64 ino, unsigned int pg_offset);
|
||||||
void end_extent_writepage(struct page *page, int err, u64 start, u64 end);
|
void end_extent_writepage(struct page *page, int err, u64 start, u64 end);
|
||||||
int repair_eb_io_failure(struct btrfs_fs_info *fs_info,
|
int repair_eb_io_failure(struct btrfs_fs_info *fs_info,
|
||||||
struct extent_buffer *eb, int mirror_num);
|
struct extent_buffer *eb, int mirror_num);
|
||||||
|
@ -507,7 +545,9 @@ struct bio *btrfs_create_repair_bio(struct inode *inode, struct bio *failed_bio,
|
||||||
struct io_failure_record *failrec,
|
struct io_failure_record *failrec,
|
||||||
struct page *page, int pg_offset, int icsum,
|
struct page *page, int pg_offset, int icsum,
|
||||||
bio_end_io_t *endio_func, void *data);
|
bio_end_io_t *endio_func, void *data);
|
||||||
int free_io_failure(struct btrfs_inode *inode, struct io_failure_record *rec);
|
int free_io_failure(struct extent_io_tree *failure_tree,
|
||||||
|
struct extent_io_tree *io_tree,
|
||||||
|
struct io_failure_record *rec);
|
||||||
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
|
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
|
||||||
noinline u64 find_lock_delalloc_range(struct inode *inode,
|
noinline u64 find_lock_delalloc_range(struct inode *inode,
|
||||||
struct extent_io_tree *tree,
|
struct extent_io_tree *tree,
|
||||||
|
|
|
@ -164,7 +164,8 @@ static blk_status_t __btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio
|
||||||
u64 logical_offset, u32 *dst, int dio)
|
u64 logical_offset, u32 *dst, int dio)
|
||||||
{
|
{
|
||||||
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
|
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
|
||||||
struct bio_vec *bvec;
|
struct bio_vec bvec;
|
||||||
|
struct bvec_iter iter;
|
||||||
struct btrfs_io_bio *btrfs_bio = btrfs_io_bio(bio);
|
struct btrfs_io_bio *btrfs_bio = btrfs_io_bio(bio);
|
||||||
struct btrfs_csum_item *item = NULL;
|
struct btrfs_csum_item *item = NULL;
|
||||||
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
|
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
|
||||||
|
@ -177,7 +178,7 @@ static blk_status_t __btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio
|
||||||
u64 page_bytes_left;
|
u64 page_bytes_left;
|
||||||
u32 diff;
|
u32 diff;
|
||||||
int nblocks;
|
int nblocks;
|
||||||
int count = 0, i;
|
int count = 0;
|
||||||
u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
|
u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
|
||||||
|
|
||||||
path = btrfs_alloc_path();
|
path = btrfs_alloc_path();
|
||||||
|
@ -206,8 +207,6 @@ static blk_status_t __btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio
|
||||||
if (bio->bi_iter.bi_size > PAGE_SIZE * 8)
|
if (bio->bi_iter.bi_size > PAGE_SIZE * 8)
|
||||||
path->reada = READA_FORWARD;
|
path->reada = READA_FORWARD;
|
||||||
|
|
||||||
WARN_ON(bio->bi_vcnt <= 0);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* the free space stuff is only read when it hasn't been
|
* the free space stuff is only read when it hasn't been
|
||||||
* updated in the current transaction. So, we can safely
|
* updated in the current transaction. So, we can safely
|
||||||
|
@ -223,13 +222,13 @@ static blk_status_t __btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio
|
||||||
if (dio)
|
if (dio)
|
||||||
offset = logical_offset;
|
offset = logical_offset;
|
||||||
|
|
||||||
bio_for_each_segment_all(bvec, bio, i) {
|
bio_for_each_segment(bvec, bio, iter) {
|
||||||
page_bytes_left = bvec->bv_len;
|
page_bytes_left = bvec.bv_len;
|
||||||
if (count)
|
if (count)
|
||||||
goto next;
|
goto next;
|
||||||
|
|
||||||
if (!dio)
|
if (!dio)
|
||||||
offset = page_offset(bvec->bv_page) + bvec->bv_offset;
|
offset = page_offset(bvec.bv_page) + bvec.bv_offset;
|
||||||
count = btrfs_find_ordered_sum(inode, offset, disk_bytenr,
|
count = btrfs_find_ordered_sum(inode, offset, disk_bytenr,
|
||||||
(u32 *)csum, nblocks);
|
(u32 *)csum, nblocks);
|
||||||
if (count)
|
if (count)
|
||||||
|
@ -440,15 +439,15 @@ blk_status_t btrfs_csum_one_bio(struct inode *inode, struct bio *bio,
|
||||||
struct btrfs_ordered_sum *sums;
|
struct btrfs_ordered_sum *sums;
|
||||||
struct btrfs_ordered_extent *ordered = NULL;
|
struct btrfs_ordered_extent *ordered = NULL;
|
||||||
char *data;
|
char *data;
|
||||||
struct bio_vec *bvec;
|
struct bvec_iter iter;
|
||||||
|
struct bio_vec bvec;
|
||||||
int index;
|
int index;
|
||||||
int nr_sectors;
|
int nr_sectors;
|
||||||
int i, j;
|
|
||||||
unsigned long total_bytes = 0;
|
unsigned long total_bytes = 0;
|
||||||
unsigned long this_sum_bytes = 0;
|
unsigned long this_sum_bytes = 0;
|
||||||
|
int i;
|
||||||
u64 offset;
|
u64 offset;
|
||||||
|
|
||||||
WARN_ON(bio->bi_vcnt <= 0);
|
|
||||||
sums = kzalloc(btrfs_ordered_sum_size(fs_info, bio->bi_iter.bi_size),
|
sums = kzalloc(btrfs_ordered_sum_size(fs_info, bio->bi_iter.bi_size),
|
||||||
GFP_NOFS);
|
GFP_NOFS);
|
||||||
if (!sums)
|
if (!sums)
|
||||||
|
@ -465,19 +464,19 @@ blk_status_t btrfs_csum_one_bio(struct inode *inode, struct bio *bio,
|
||||||
sums->bytenr = (u64)bio->bi_iter.bi_sector << 9;
|
sums->bytenr = (u64)bio->bi_iter.bi_sector << 9;
|
||||||
index = 0;
|
index = 0;
|
||||||
|
|
||||||
bio_for_each_segment_all(bvec, bio, j) {
|
bio_for_each_segment(bvec, bio, iter) {
|
||||||
if (!contig)
|
if (!contig)
|
||||||
offset = page_offset(bvec->bv_page) + bvec->bv_offset;
|
offset = page_offset(bvec.bv_page) + bvec.bv_offset;
|
||||||
|
|
||||||
if (!ordered) {
|
if (!ordered) {
|
||||||
ordered = btrfs_lookup_ordered_extent(inode, offset);
|
ordered = btrfs_lookup_ordered_extent(inode, offset);
|
||||||
BUG_ON(!ordered); /* Logic error */
|
BUG_ON(!ordered); /* Logic error */
|
||||||
}
|
}
|
||||||
|
|
||||||
data = kmap_atomic(bvec->bv_page);
|
data = kmap_atomic(bvec.bv_page);
|
||||||
|
|
||||||
nr_sectors = BTRFS_BYTES_TO_BLKS(fs_info,
|
nr_sectors = BTRFS_BYTES_TO_BLKS(fs_info,
|
||||||
bvec->bv_len + fs_info->sectorsize
|
bvec.bv_len + fs_info->sectorsize
|
||||||
- 1);
|
- 1);
|
||||||
|
|
||||||
for (i = 0; i < nr_sectors; i++) {
|
for (i = 0; i < nr_sectors; i++) {
|
||||||
|
@ -504,12 +503,12 @@ blk_status_t btrfs_csum_one_bio(struct inode *inode, struct bio *bio,
|
||||||
+ total_bytes;
|
+ total_bytes;
|
||||||
index = 0;
|
index = 0;
|
||||||
|
|
||||||
data = kmap_atomic(bvec->bv_page);
|
data = kmap_atomic(bvec.bv_page);
|
||||||
}
|
}
|
||||||
|
|
||||||
sums->sums[index] = ~(u32)0;
|
sums->sums[index] = ~(u32)0;
|
||||||
sums->sums[index]
|
sums->sums[index]
|
||||||
= btrfs_csum_data(data + bvec->bv_offset
|
= btrfs_csum_data(data + bvec.bv_offset
|
||||||
+ (i * fs_info->sectorsize),
|
+ (i * fs_info->sectorsize),
|
||||||
sums->sums[index],
|
sums->sums[index],
|
||||||
fs_info->sectorsize);
|
fs_info->sectorsize);
|
||||||
|
|
|
@ -1581,6 +1581,7 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
|
||||||
struct btrfs_root *root = BTRFS_I(inode)->root;
|
struct btrfs_root *root = BTRFS_I(inode)->root;
|
||||||
struct page **pages = NULL;
|
struct page **pages = NULL;
|
||||||
struct extent_state *cached_state = NULL;
|
struct extent_state *cached_state = NULL;
|
||||||
|
struct extent_changeset *data_reserved = NULL;
|
||||||
u64 release_bytes = 0;
|
u64 release_bytes = 0;
|
||||||
u64 lockstart;
|
u64 lockstart;
|
||||||
u64 lockend;
|
u64 lockend;
|
||||||
|
@ -1628,7 +1629,9 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
|
||||||
reserve_bytes = round_up(write_bytes + sector_offset,
|
reserve_bytes = round_up(write_bytes + sector_offset,
|
||||||
fs_info->sectorsize);
|
fs_info->sectorsize);
|
||||||
|
|
||||||
ret = btrfs_check_data_free_space(inode, pos, write_bytes);
|
extent_changeset_release(data_reserved);
|
||||||
|
ret = btrfs_check_data_free_space(inode, &data_reserved, pos,
|
||||||
|
write_bytes);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
if ((BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW |
|
if ((BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW |
|
||||||
BTRFS_INODE_PREALLOC)) &&
|
BTRFS_INODE_PREALLOC)) &&
|
||||||
|
@ -1657,8 +1660,9 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
|
||||||
reserve_bytes);
|
reserve_bytes);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
if (!only_release_metadata)
|
if (!only_release_metadata)
|
||||||
btrfs_free_reserved_data_space(inode, pos,
|
btrfs_free_reserved_data_space(inode,
|
||||||
write_bytes);
|
data_reserved, pos,
|
||||||
|
write_bytes);
|
||||||
else
|
else
|
||||||
btrfs_end_write_no_snapshoting(root);
|
btrfs_end_write_no_snapshoting(root);
|
||||||
break;
|
break;
|
||||||
|
@ -1740,8 +1744,9 @@ again:
|
||||||
__pos = round_down(pos,
|
__pos = round_down(pos,
|
||||||
fs_info->sectorsize) +
|
fs_info->sectorsize) +
|
||||||
(dirty_pages << PAGE_SHIFT);
|
(dirty_pages << PAGE_SHIFT);
|
||||||
btrfs_delalloc_release_space(inode, __pos,
|
btrfs_delalloc_release_space(inode,
|
||||||
release_bytes);
|
data_reserved, __pos,
|
||||||
|
release_bytes);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1796,12 +1801,13 @@ again:
|
||||||
btrfs_delalloc_release_metadata(BTRFS_I(inode),
|
btrfs_delalloc_release_metadata(BTRFS_I(inode),
|
||||||
release_bytes);
|
release_bytes);
|
||||||
} else {
|
} else {
|
||||||
btrfs_delalloc_release_space(inode,
|
btrfs_delalloc_release_space(inode, data_reserved,
|
||||||
round_down(pos, fs_info->sectorsize),
|
round_down(pos, fs_info->sectorsize),
|
||||||
release_bytes);
|
release_bytes);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
extent_changeset_free(data_reserved);
|
||||||
return num_written ? num_written : ret;
|
return num_written ? num_written : ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2405,10 +2411,13 @@ out:
|
||||||
*/
|
*/
|
||||||
static int find_first_non_hole(struct inode *inode, u64 *start, u64 *len)
|
static int find_first_non_hole(struct inode *inode, u64 *start, u64 *len)
|
||||||
{
|
{
|
||||||
|
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
|
||||||
struct extent_map *em;
|
struct extent_map *em;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, *start, *len, 0);
|
em = btrfs_get_extent(BTRFS_I(inode), NULL, 0,
|
||||||
|
round_down(*start, fs_info->sectorsize),
|
||||||
|
round_up(*len, fs_info->sectorsize), 0);
|
||||||
if (IS_ERR(em))
|
if (IS_ERR(em))
|
||||||
return PTR_ERR(em);
|
return PTR_ERR(em);
|
||||||
|
|
||||||
|
@ -2784,6 +2793,7 @@ static long btrfs_fallocate(struct file *file, int mode,
|
||||||
{
|
{
|
||||||
struct inode *inode = file_inode(file);
|
struct inode *inode = file_inode(file);
|
||||||
struct extent_state *cached_state = NULL;
|
struct extent_state *cached_state = NULL;
|
||||||
|
struct extent_changeset *data_reserved = NULL;
|
||||||
struct falloc_range *range;
|
struct falloc_range *range;
|
||||||
struct falloc_range *tmp;
|
struct falloc_range *tmp;
|
||||||
struct list_head reserve_list;
|
struct list_head reserve_list;
|
||||||
|
@ -2913,8 +2923,8 @@ static long btrfs_fallocate(struct file *file, int mode,
|
||||||
free_extent_map(em);
|
free_extent_map(em);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
ret = btrfs_qgroup_reserve_data(inode, cur_offset,
|
ret = btrfs_qgroup_reserve_data(inode, &data_reserved,
|
||||||
last_byte - cur_offset);
|
cur_offset, last_byte - cur_offset);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
free_extent_map(em);
|
free_extent_map(em);
|
||||||
break;
|
break;
|
||||||
|
@ -2925,8 +2935,8 @@ static long btrfs_fallocate(struct file *file, int mode,
|
||||||
* range, free reserved data space first, otherwise
|
* range, free reserved data space first, otherwise
|
||||||
* it'll result in false ENOSPC error.
|
* it'll result in false ENOSPC error.
|
||||||
*/
|
*/
|
||||||
btrfs_free_reserved_data_space(inode, cur_offset,
|
btrfs_free_reserved_data_space(inode, data_reserved,
|
||||||
last_byte - cur_offset);
|
cur_offset, last_byte - cur_offset);
|
||||||
}
|
}
|
||||||
free_extent_map(em);
|
free_extent_map(em);
|
||||||
cur_offset = last_byte;
|
cur_offset = last_byte;
|
||||||
|
@ -2945,8 +2955,9 @@ static long btrfs_fallocate(struct file *file, int mode,
|
||||||
range->len, i_blocksize(inode),
|
range->len, i_blocksize(inode),
|
||||||
offset + len, &alloc_hint);
|
offset + len, &alloc_hint);
|
||||||
else
|
else
|
||||||
btrfs_free_reserved_data_space(inode, range->start,
|
btrfs_free_reserved_data_space(inode,
|
||||||
range->len);
|
data_reserved, range->start,
|
||||||
|
range->len);
|
||||||
list_del(&range->list);
|
list_del(&range->list);
|
||||||
kfree(range);
|
kfree(range);
|
||||||
}
|
}
|
||||||
|
@ -2984,8 +2995,9 @@ out:
|
||||||
inode_unlock(inode);
|
inode_unlock(inode);
|
||||||
/* Let go of our reservation. */
|
/* Let go of our reservation. */
|
||||||
if (ret != 0)
|
if (ret != 0)
|
||||||
btrfs_free_reserved_data_space(inode, alloc_start,
|
btrfs_free_reserved_data_space(inode, data_reserved,
|
||||||
alloc_end - cur_offset);
|
alloc_start, alloc_end - cur_offset);
|
||||||
|
extent_changeset_free(data_reserved);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -17,7 +17,7 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
#include <linux/vmalloc.h>
|
#include <linux/sched/mm.h>
|
||||||
#include "ctree.h"
|
#include "ctree.h"
|
||||||
#include "disk-io.h"
|
#include "disk-io.h"
|
||||||
#include "locking.h"
|
#include "locking.h"
|
||||||
|
@ -153,21 +153,21 @@ static inline u32 free_space_bitmap_size(u64 size, u32 sectorsize)
|
||||||
|
|
||||||
static u8 *alloc_bitmap(u32 bitmap_size)
|
static u8 *alloc_bitmap(u32 bitmap_size)
|
||||||
{
|
{
|
||||||
void *mem;
|
u8 *ret;
|
||||||
|
unsigned int nofs_flag;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The allocation size varies, observed numbers were < 4K up to 16K.
|
* GFP_NOFS doesn't work with kvmalloc(), but we really can't recurse
|
||||||
* Using vmalloc unconditionally would be too heavy, we'll try
|
* into the filesystem as the free space bitmap can be modified in the
|
||||||
* contiguous allocations first.
|
* critical section of a transaction commit.
|
||||||
|
*
|
||||||
|
* TODO: push the memalloc_nofs_{save,restore}() to the caller where we
|
||||||
|
* know that recursion is unsafe.
|
||||||
*/
|
*/
|
||||||
if (bitmap_size <= PAGE_SIZE)
|
nofs_flag = memalloc_nofs_save();
|
||||||
return kzalloc(bitmap_size, GFP_NOFS);
|
ret = kvzalloc(bitmap_size, GFP_KERNEL);
|
||||||
|
memalloc_nofs_restore(nofs_flag);
|
||||||
mem = kzalloc(bitmap_size, GFP_NOFS | __GFP_NOWARN);
|
return ret;
|
||||||
if (mem)
|
|
||||||
return mem;
|
|
||||||
|
|
||||||
return __vmalloc(bitmap_size, GFP_NOFS | __GFP_ZERO, PAGE_KERNEL);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
|
int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
|
||||||
|
@ -1188,11 +1188,7 @@ int btrfs_create_free_space_tree(struct btrfs_fs_info *fs_info)
|
||||||
btrfs_set_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID);
|
btrfs_set_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID);
|
||||||
clear_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags);
|
clear_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags);
|
||||||
|
|
||||||
ret = btrfs_commit_transaction(trans);
|
return btrfs_commit_transaction(trans);
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
abort:
|
abort:
|
||||||
clear_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags);
|
clear_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags);
|
||||||
|
@ -1277,11 +1273,7 @@ int btrfs_clear_free_space_tree(struct btrfs_fs_info *fs_info)
|
||||||
free_extent_buffer(free_space_root->commit_root);
|
free_extent_buffer(free_space_root->commit_root);
|
||||||
kfree(free_space_root);
|
kfree(free_space_root);
|
||||||
|
|
||||||
ret = btrfs_commit_transaction(trans);
|
return btrfs_commit_transaction(trans);
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
abort:
|
abort:
|
||||||
btrfs_abort_transaction(trans, ret);
|
btrfs_abort_transaction(trans, ret);
|
||||||
|
|
|
@ -400,6 +400,7 @@ int btrfs_save_ino_cache(struct btrfs_root *root,
|
||||||
struct btrfs_path *path;
|
struct btrfs_path *path;
|
||||||
struct inode *inode;
|
struct inode *inode;
|
||||||
struct btrfs_block_rsv *rsv;
|
struct btrfs_block_rsv *rsv;
|
||||||
|
struct extent_changeset *data_reserved = NULL;
|
||||||
u64 num_bytes;
|
u64 num_bytes;
|
||||||
u64 alloc_hint = 0;
|
u64 alloc_hint = 0;
|
||||||
int ret;
|
int ret;
|
||||||
|
@ -492,7 +493,7 @@ again:
|
||||||
/* Just to make sure we have enough space */
|
/* Just to make sure we have enough space */
|
||||||
prealloc += 8 * PAGE_SIZE;
|
prealloc += 8 * PAGE_SIZE;
|
||||||
|
|
||||||
ret = btrfs_delalloc_reserve_space(inode, 0, prealloc);
|
ret = btrfs_delalloc_reserve_space(inode, &data_reserved, 0, prealloc);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out_put;
|
goto out_put;
|
||||||
|
|
||||||
|
@ -516,6 +517,7 @@ out:
|
||||||
trans->bytes_reserved = num_bytes;
|
trans->bytes_reserved = num_bytes;
|
||||||
|
|
||||||
btrfs_free_path(path);
|
btrfs_free_path(path);
|
||||||
|
extent_changeset_free(data_reserved);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
457
fs/btrfs/inode.c
457
fs/btrfs/inode.c
File diff suppressed because it is too large
Load Diff
|
@ -37,7 +37,7 @@
|
||||||
#include <linux/bit_spinlock.h>
|
#include <linux/bit_spinlock.h>
|
||||||
#include <linux/security.h>
|
#include <linux/security.h>
|
||||||
#include <linux/xattr.h>
|
#include <linux/xattr.h>
|
||||||
#include <linux/vmalloc.h>
|
#include <linux/mm.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/blkdev.h>
|
#include <linux/blkdev.h>
|
||||||
#include <linux/uuid.h>
|
#include <linux/uuid.h>
|
||||||
|
@ -689,7 +689,7 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
|
||||||
if (ret)
|
if (ret)
|
||||||
goto dec_and_free;
|
goto dec_and_free;
|
||||||
|
|
||||||
btrfs_wait_ordered_extents(root, -1, 0, (u64)-1);
|
btrfs_wait_ordered_extents(root, U64_MAX, 0, (u64)-1);
|
||||||
|
|
||||||
btrfs_init_block_rsv(&pending_snapshot->block_rsv,
|
btrfs_init_block_rsv(&pending_snapshot->block_rsv,
|
||||||
BTRFS_BLOCK_RSV_TEMP);
|
BTRFS_BLOCK_RSV_TEMP);
|
||||||
|
@ -1127,6 +1127,7 @@ static int cluster_pages_for_defrag(struct inode *inode,
|
||||||
struct btrfs_ordered_extent *ordered;
|
struct btrfs_ordered_extent *ordered;
|
||||||
struct extent_state *cached_state = NULL;
|
struct extent_state *cached_state = NULL;
|
||||||
struct extent_io_tree *tree;
|
struct extent_io_tree *tree;
|
||||||
|
struct extent_changeset *data_reserved = NULL;
|
||||||
gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
|
gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
|
||||||
|
|
||||||
file_end = (isize - 1) >> PAGE_SHIFT;
|
file_end = (isize - 1) >> PAGE_SHIFT;
|
||||||
|
@ -1135,7 +1136,7 @@ static int cluster_pages_for_defrag(struct inode *inode,
|
||||||
|
|
||||||
page_cnt = min_t(u64, (u64)num_pages, (u64)file_end - start_index + 1);
|
page_cnt = min_t(u64, (u64)num_pages, (u64)file_end - start_index + 1);
|
||||||
|
|
||||||
ret = btrfs_delalloc_reserve_space(inode,
|
ret = btrfs_delalloc_reserve_space(inode, &data_reserved,
|
||||||
start_index << PAGE_SHIFT,
|
start_index << PAGE_SHIFT,
|
||||||
page_cnt << PAGE_SHIFT);
|
page_cnt << PAGE_SHIFT);
|
||||||
if (ret)
|
if (ret)
|
||||||
|
@ -1226,7 +1227,7 @@ again:
|
||||||
spin_lock(&BTRFS_I(inode)->lock);
|
spin_lock(&BTRFS_I(inode)->lock);
|
||||||
BTRFS_I(inode)->outstanding_extents++;
|
BTRFS_I(inode)->outstanding_extents++;
|
||||||
spin_unlock(&BTRFS_I(inode)->lock);
|
spin_unlock(&BTRFS_I(inode)->lock);
|
||||||
btrfs_delalloc_release_space(inode,
|
btrfs_delalloc_release_space(inode, data_reserved,
|
||||||
start_index << PAGE_SHIFT,
|
start_index << PAGE_SHIFT,
|
||||||
(page_cnt - i_done) << PAGE_SHIFT);
|
(page_cnt - i_done) << PAGE_SHIFT);
|
||||||
}
|
}
|
||||||
|
@ -1247,15 +1248,17 @@ again:
|
||||||
unlock_page(pages[i]);
|
unlock_page(pages[i]);
|
||||||
put_page(pages[i]);
|
put_page(pages[i]);
|
||||||
}
|
}
|
||||||
|
extent_changeset_free(data_reserved);
|
||||||
return i_done;
|
return i_done;
|
||||||
out:
|
out:
|
||||||
for (i = 0; i < i_done; i++) {
|
for (i = 0; i < i_done; i++) {
|
||||||
unlock_page(pages[i]);
|
unlock_page(pages[i]);
|
||||||
put_page(pages[i]);
|
put_page(pages[i]);
|
||||||
}
|
}
|
||||||
btrfs_delalloc_release_space(inode,
|
btrfs_delalloc_release_space(inode, data_reserved,
|
||||||
start_index << PAGE_SHIFT,
|
start_index << PAGE_SHIFT,
|
||||||
page_cnt << PAGE_SHIFT);
|
page_cnt << PAGE_SHIFT);
|
||||||
|
extent_changeset_free(data_reserved);
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -4588,7 +4591,7 @@ static long btrfs_ioctl_logical_to_ino(struct btrfs_fs_info *fs_info,
|
||||||
|
|
||||||
out:
|
out:
|
||||||
btrfs_free_path(path);
|
btrfs_free_path(path);
|
||||||
vfree(inodes);
|
kvfree(inodes);
|
||||||
kfree(loi);
|
kfree(loi);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -4897,7 +4900,6 @@ static long btrfs_ioctl_qgroup_assign(struct file *file, void __user *arg)
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* FIXME: check if the IDs really exist */
|
|
||||||
if (sa->assign) {
|
if (sa->assign) {
|
||||||
ret = btrfs_add_qgroup_relation(trans, fs_info,
|
ret = btrfs_add_qgroup_relation(trans, fs_info,
|
||||||
sa->src, sa->dst);
|
sa->src, sa->dst);
|
||||||
|
@ -4956,7 +4958,6 @@ static long btrfs_ioctl_qgroup_create(struct file *file, void __user *arg)
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* FIXME: check if the IDs really exist */
|
|
||||||
if (sa->create) {
|
if (sa->create) {
|
||||||
ret = btrfs_create_qgroup(trans, fs_info, sa->qgroupid);
|
ret = btrfs_create_qgroup(trans, fs_info, sa->qgroupid);
|
||||||
} else {
|
} else {
|
||||||
|
@ -5010,7 +5011,6 @@ static long btrfs_ioctl_qgroup_limit(struct file *file, void __user *arg)
|
||||||
qgroupid = root->root_key.objectid;
|
qgroupid = root->root_key.objectid;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* FIXME: check if the IDs really exist */
|
|
||||||
ret = btrfs_limit_qgroup(trans, fs_info, qgroupid, &sa->lim);
|
ret = btrfs_limit_qgroup(trans, fs_info, qgroupid, &sa->lim);
|
||||||
|
|
||||||
err = btrfs_end_transaction(trans);
|
err = btrfs_end_transaction(trans);
|
||||||
|
|
|
@ -18,13 +18,14 @@
|
||||||
|
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/vmalloc.h>
|
#include <linux/mm.h>
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
#include <linux/err.h>
|
#include <linux/err.h>
|
||||||
#include <linux/sched.h>
|
#include <linux/sched.h>
|
||||||
#include <linux/pagemap.h>
|
#include <linux/pagemap.h>
|
||||||
#include <linux/bio.h>
|
#include <linux/bio.h>
|
||||||
#include <linux/lzo.h>
|
#include <linux/lzo.h>
|
||||||
|
#include <linux/refcount.h>
|
||||||
#include "compression.h"
|
#include "compression.h"
|
||||||
|
|
||||||
#define LZO_LEN 4
|
#define LZO_LEN 4
|
||||||
|
@ -40,9 +41,9 @@ static void lzo_free_workspace(struct list_head *ws)
|
||||||
{
|
{
|
||||||
struct workspace *workspace = list_entry(ws, struct workspace, list);
|
struct workspace *workspace = list_entry(ws, struct workspace, list);
|
||||||
|
|
||||||
vfree(workspace->buf);
|
kvfree(workspace->buf);
|
||||||
vfree(workspace->cbuf);
|
kvfree(workspace->cbuf);
|
||||||
vfree(workspace->mem);
|
kvfree(workspace->mem);
|
||||||
kfree(workspace);
|
kfree(workspace);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -50,13 +51,13 @@ static struct list_head *lzo_alloc_workspace(void)
|
||||||
{
|
{
|
||||||
struct workspace *workspace;
|
struct workspace *workspace;
|
||||||
|
|
||||||
workspace = kzalloc(sizeof(*workspace), GFP_NOFS);
|
workspace = kzalloc(sizeof(*workspace), GFP_KERNEL);
|
||||||
if (!workspace)
|
if (!workspace)
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
|
|
||||||
workspace->mem = vmalloc(LZO1X_MEM_COMPRESS);
|
workspace->mem = kvmalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
|
||||||
workspace->buf = vmalloc(lzo1x_worst_compress(PAGE_SIZE));
|
workspace->buf = kvmalloc(lzo1x_worst_compress(PAGE_SIZE), GFP_KERNEL);
|
||||||
workspace->cbuf = vmalloc(lzo1x_worst_compress(PAGE_SIZE));
|
workspace->cbuf = kvmalloc(lzo1x_worst_compress(PAGE_SIZE), GFP_KERNEL);
|
||||||
if (!workspace->mem || !workspace->buf || !workspace->cbuf)
|
if (!workspace->mem || !workspace->buf || !workspace->cbuf)
|
||||||
goto fail;
|
goto fail;
|
||||||
|
|
||||||
|
@ -141,7 +142,7 @@ static int lzo_compress_pages(struct list_head *ws,
|
||||||
ret = lzo1x_1_compress(data_in, in_len, workspace->cbuf,
|
ret = lzo1x_1_compress(data_in, in_len, workspace->cbuf,
|
||||||
&out_len, workspace->mem);
|
&out_len, workspace->mem);
|
||||||
if (ret != LZO_E_OK) {
|
if (ret != LZO_E_OK) {
|
||||||
pr_debug("BTRFS: deflate in loop returned %d\n",
|
pr_debug("BTRFS: lzo in loop returned %d\n",
|
||||||
ret);
|
ret);
|
||||||
ret = -EIO;
|
ret = -EIO;
|
||||||
goto out;
|
goto out;
|
||||||
|
@ -229,8 +230,10 @@ static int lzo_compress_pages(struct list_head *ws,
|
||||||
in_len = min(bytes_left, PAGE_SIZE);
|
in_len = min(bytes_left, PAGE_SIZE);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (tot_out > tot_in)
|
if (tot_out >= tot_in) {
|
||||||
|
ret = -E2BIG;
|
||||||
goto out;
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
/* store the size of all chunks of compressed data */
|
/* store the size of all chunks of compressed data */
|
||||||
cpage_out = kmap(pages[0]);
|
cpage_out = kmap(pages[0]);
|
||||||
|
@ -254,16 +257,13 @@ out:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int lzo_decompress_bio(struct list_head *ws,
|
static int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
|
||||||
struct page **pages_in,
|
|
||||||
u64 disk_start,
|
|
||||||
struct bio *orig_bio,
|
|
||||||
size_t srclen)
|
|
||||||
{
|
{
|
||||||
struct workspace *workspace = list_entry(ws, struct workspace, list);
|
struct workspace *workspace = list_entry(ws, struct workspace, list);
|
||||||
int ret = 0, ret2;
|
int ret = 0, ret2;
|
||||||
char *data_in;
|
char *data_in;
|
||||||
unsigned long page_in_index = 0;
|
unsigned long page_in_index = 0;
|
||||||
|
size_t srclen = cb->compressed_len;
|
||||||
unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_SIZE);
|
unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_SIZE);
|
||||||
unsigned long buf_start;
|
unsigned long buf_start;
|
||||||
unsigned long buf_offset = 0;
|
unsigned long buf_offset = 0;
|
||||||
|
@ -278,6 +278,9 @@ static int lzo_decompress_bio(struct list_head *ws,
|
||||||
unsigned long tot_len;
|
unsigned long tot_len;
|
||||||
char *buf;
|
char *buf;
|
||||||
bool may_late_unmap, need_unmap;
|
bool may_late_unmap, need_unmap;
|
||||||
|
struct page **pages_in = cb->compressed_pages;
|
||||||
|
u64 disk_start = cb->start;
|
||||||
|
struct bio *orig_bio = cb->orig_bio;
|
||||||
|
|
||||||
data_in = kmap(pages_in[0]);
|
data_in = kmap(pages_in[0]);
|
||||||
tot_len = read_compress_length(data_in);
|
tot_len = read_compress_length(data_in);
|
||||||
|
|
|
@ -663,7 +663,7 @@ static void btrfs_run_ordered_extent_work(struct btrfs_work *work)
|
||||||
* wait for all the ordered extents in a root. This is done when balancing
|
* wait for all the ordered extents in a root. This is done when balancing
|
||||||
* space between drives.
|
* space between drives.
|
||||||
*/
|
*/
|
||||||
int btrfs_wait_ordered_extents(struct btrfs_root *root, int nr,
|
u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr,
|
||||||
const u64 range_start, const u64 range_len)
|
const u64 range_start, const u64 range_len)
|
||||||
{
|
{
|
||||||
struct btrfs_fs_info *fs_info = root->fs_info;
|
struct btrfs_fs_info *fs_info = root->fs_info;
|
||||||
|
@ -671,7 +671,7 @@ int btrfs_wait_ordered_extents(struct btrfs_root *root, int nr,
|
||||||
LIST_HEAD(skipped);
|
LIST_HEAD(skipped);
|
||||||
LIST_HEAD(works);
|
LIST_HEAD(works);
|
||||||
struct btrfs_ordered_extent *ordered, *next;
|
struct btrfs_ordered_extent *ordered, *next;
|
||||||
int count = 0;
|
u64 count = 0;
|
||||||
const u64 range_end = range_start + range_len;
|
const u64 range_end = range_start + range_len;
|
||||||
|
|
||||||
mutex_lock(&root->ordered_extent_mutex);
|
mutex_lock(&root->ordered_extent_mutex);
|
||||||
|
@ -701,7 +701,7 @@ int btrfs_wait_ordered_extents(struct btrfs_root *root, int nr,
|
||||||
|
|
||||||
cond_resched();
|
cond_resched();
|
||||||
spin_lock(&root->ordered_extent_lock);
|
spin_lock(&root->ordered_extent_lock);
|
||||||
if (nr != -1)
|
if (nr != U64_MAX)
|
||||||
nr--;
|
nr--;
|
||||||
count++;
|
count++;
|
||||||
}
|
}
|
||||||
|
@ -720,13 +720,13 @@ int btrfs_wait_ordered_extents(struct btrfs_root *root, int nr,
|
||||||
return count;
|
return count;
|
||||||
}
|
}
|
||||||
|
|
||||||
int btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr,
|
u64 btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
|
||||||
const u64 range_start, const u64 range_len)
|
const u64 range_start, const u64 range_len)
|
||||||
{
|
{
|
||||||
struct btrfs_root *root;
|
struct btrfs_root *root;
|
||||||
struct list_head splice;
|
struct list_head splice;
|
||||||
int done;
|
u64 total_done = 0;
|
||||||
int total_done = 0;
|
u64 done;
|
||||||
|
|
||||||
INIT_LIST_HEAD(&splice);
|
INIT_LIST_HEAD(&splice);
|
||||||
|
|
||||||
|
@ -748,9 +748,8 @@ int btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr,
|
||||||
total_done += done;
|
total_done += done;
|
||||||
|
|
||||||
spin_lock(&fs_info->ordered_root_lock);
|
spin_lock(&fs_info->ordered_root_lock);
|
||||||
if (nr != -1) {
|
if (nr != U64_MAX) {
|
||||||
nr -= done;
|
nr -= done;
|
||||||
WARN_ON(nr < 0);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
list_splice_tail(&splice, &fs_info->ordered_roots);
|
list_splice_tail(&splice, &fs_info->ordered_roots);
|
||||||
|
|
|
@ -200,9 +200,9 @@ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
|
||||||
struct btrfs_ordered_extent *ordered);
|
struct btrfs_ordered_extent *ordered);
|
||||||
int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
|
int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
|
||||||
u32 *sum, int len);
|
u32 *sum, int len);
|
||||||
int btrfs_wait_ordered_extents(struct btrfs_root *root, int nr,
|
u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr,
|
||||||
const u64 range_start, const u64 range_len);
|
const u64 range_start, const u64 range_len);
|
||||||
int btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr,
|
u64 btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
|
||||||
const u64 range_start, const u64 range_len);
|
const u64 range_start, const u64 range_len);
|
||||||
void btrfs_get_logged_extents(struct btrfs_inode *inode,
|
void btrfs_get_logged_extents(struct btrfs_inode *inode,
|
||||||
struct list_head *logged_list,
|
struct list_head *logged_list,
|
||||||
|
|
|
@ -261,8 +261,11 @@ void btrfs_print_leaf(struct btrfs_fs_info *fs_info, struct extent_buffer *l)
|
||||||
case BTRFS_BLOCK_GROUP_ITEM_KEY:
|
case BTRFS_BLOCK_GROUP_ITEM_KEY:
|
||||||
bi = btrfs_item_ptr(l, i,
|
bi = btrfs_item_ptr(l, i,
|
||||||
struct btrfs_block_group_item);
|
struct btrfs_block_group_item);
|
||||||
pr_info("\t\tblock group used %llu\n",
|
pr_info(
|
||||||
btrfs_disk_block_group_used(l, bi));
|
"\t\tblock group used %llu chunk_objectid %llu flags %llu\n",
|
||||||
|
btrfs_disk_block_group_used(l, bi),
|
||||||
|
btrfs_disk_block_group_chunk_objectid(l, bi),
|
||||||
|
btrfs_disk_block_group_flags(l, bi));
|
||||||
break;
|
break;
|
||||||
case BTRFS_CHUNK_ITEM_KEY:
|
case BTRFS_CHUNK_ITEM_KEY:
|
||||||
print_chunk(l, btrfs_item_ptr(l, i,
|
print_chunk(l, btrfs_item_ptr(l, i,
|
||||||
|
|
|
@ -164,6 +164,7 @@ static int iterate_object_props(struct btrfs_root *root,
|
||||||
size_t),
|
size_t),
|
||||||
void *ctx)
|
void *ctx)
|
||||||
{
|
{
|
||||||
|
struct btrfs_fs_info *fs_info = root->fs_info;
|
||||||
int ret;
|
int ret;
|
||||||
char *name_buf = NULL;
|
char *name_buf = NULL;
|
||||||
char *value_buf = NULL;
|
char *value_buf = NULL;
|
||||||
|
@ -214,6 +215,12 @@ static int iterate_object_props(struct btrfs_root *root,
|
||||||
name_ptr = (unsigned long)(di + 1);
|
name_ptr = (unsigned long)(di + 1);
|
||||||
data_ptr = name_ptr + name_len;
|
data_ptr = name_ptr + name_len;
|
||||||
|
|
||||||
|
if (verify_dir_item(fs_info, leaf,
|
||||||
|
path->slots[0], di)) {
|
||||||
|
ret = -EIO;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
if (name_len <= XATTR_BTRFS_PREFIX_LEN ||
|
if (name_len <= XATTR_BTRFS_PREFIX_LEN ||
|
||||||
memcmp_extent_buffer(leaf, XATTR_BTRFS_PREFIX,
|
memcmp_extent_buffer(leaf, XATTR_BTRFS_PREFIX,
|
||||||
name_ptr,
|
name_ptr,
|
||||||
|
|
|
@ -1406,38 +1406,6 @@ out:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
int btrfs_qgroup_prepare_account_extents(struct btrfs_trans_handle *trans,
|
|
||||||
struct btrfs_fs_info *fs_info)
|
|
||||||
{
|
|
||||||
struct btrfs_qgroup_extent_record *record;
|
|
||||||
struct btrfs_delayed_ref_root *delayed_refs;
|
|
||||||
struct rb_node *node;
|
|
||||||
u64 qgroup_to_skip;
|
|
||||||
int ret = 0;
|
|
||||||
|
|
||||||
delayed_refs = &trans->transaction->delayed_refs;
|
|
||||||
qgroup_to_skip = delayed_refs->qgroup_to_skip;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* No need to do lock, since this function will only be called in
|
|
||||||
* btrfs_commit_transaction().
|
|
||||||
*/
|
|
||||||
node = rb_first(&delayed_refs->dirty_extent_root);
|
|
||||||
while (node) {
|
|
||||||
record = rb_entry(node, struct btrfs_qgroup_extent_record,
|
|
||||||
node);
|
|
||||||
if (WARN_ON(!record->old_roots))
|
|
||||||
ret = btrfs_find_all_roots(NULL, fs_info,
|
|
||||||
record->bytenr, 0, &record->old_roots);
|
|
||||||
if (ret < 0)
|
|
||||||
break;
|
|
||||||
if (qgroup_to_skip)
|
|
||||||
ulist_del(record->old_roots, qgroup_to_skip, 0);
|
|
||||||
node = rb_next(node);
|
|
||||||
}
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
int btrfs_qgroup_trace_extent_nolock(struct btrfs_fs_info *fs_info,
|
int btrfs_qgroup_trace_extent_nolock(struct btrfs_fs_info *fs_info,
|
||||||
struct btrfs_delayed_ref_root *delayed_refs,
|
struct btrfs_delayed_ref_root *delayed_refs,
|
||||||
struct btrfs_qgroup_extent_record *record)
|
struct btrfs_qgroup_extent_record *record)
|
||||||
|
@ -1559,6 +1527,7 @@ int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans,
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
cond_resched();
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1918,6 +1887,35 @@ static int qgroup_update_counters(struct btrfs_fs_info *fs_info,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Check if the @roots potentially is a list of fs tree roots
|
||||||
|
*
|
||||||
|
* Return 0 for definitely not a fs/subvol tree roots ulist
|
||||||
|
* Return 1 for possible fs/subvol tree roots in the list (considering an empty
|
||||||
|
* one as well)
|
||||||
|
*/
|
||||||
|
static int maybe_fs_roots(struct ulist *roots)
|
||||||
|
{
|
||||||
|
struct ulist_node *unode;
|
||||||
|
struct ulist_iterator uiter;
|
||||||
|
|
||||||
|
/* Empty one, still possible for fs roots */
|
||||||
|
if (!roots || roots->nnodes == 0)
|
||||||
|
return 1;
|
||||||
|
|
||||||
|
ULIST_ITER_INIT(&uiter);
|
||||||
|
unode = ulist_next(roots, &uiter);
|
||||||
|
if (!unode)
|
||||||
|
return 1;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If it contains fs tree roots, then it must belong to fs/subvol
|
||||||
|
* trees.
|
||||||
|
* If it contains a non-fs tree, it won't be shared with fs/subvol trees.
|
||||||
|
*/
|
||||||
|
return is_fstree(unode->val);
|
||||||
|
}
|
||||||
|
|
||||||
int
|
int
|
||||||
btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans,
|
btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans,
|
||||||
struct btrfs_fs_info *fs_info,
|
struct btrfs_fs_info *fs_info,
|
||||||
|
@ -1934,10 +1932,20 @@ btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans,
|
||||||
if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
|
if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (new_roots)
|
if (new_roots) {
|
||||||
|
if (!maybe_fs_roots(new_roots))
|
||||||
|
goto out_free;
|
||||||
nr_new_roots = new_roots->nnodes;
|
nr_new_roots = new_roots->nnodes;
|
||||||
if (old_roots)
|
}
|
||||||
|
if (old_roots) {
|
||||||
|
if (!maybe_fs_roots(old_roots))
|
||||||
|
goto out_free;
|
||||||
nr_old_roots = old_roots->nnodes;
|
nr_old_roots = old_roots->nnodes;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Quick exit, either not fs tree roots, or won't affect any qgroup */
|
||||||
|
if (nr_old_roots == 0 && nr_new_roots == 0)
|
||||||
|
goto out_free;
|
||||||
|
|
||||||
BUG_ON(!fs_info->quota_root);
|
BUG_ON(!fs_info->quota_root);
|
||||||
|
|
||||||
|
@ -2016,6 +2024,19 @@ int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans,
|
||||||
trace_btrfs_qgroup_account_extents(fs_info, record);
|
trace_btrfs_qgroup_account_extents(fs_info, record);
|
||||||
|
|
||||||
if (!ret) {
|
if (!ret) {
|
||||||
|
/*
|
||||||
|
* Old roots should be searched when inserting qgroup
|
||||||
|
* extent record
|
||||||
|
*/
|
||||||
|
if (WARN_ON(!record->old_roots)) {
|
||||||
|
/* Search commit root to find old_roots */
|
||||||
|
ret = btrfs_find_all_roots(NULL, fs_info,
|
||||||
|
record->bytenr, 0,
|
||||||
|
&record->old_roots);
|
||||||
|
if (ret < 0)
|
||||||
|
goto cleanup;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Use SEQ_LAST as time_seq to do special search, which
|
* Use SEQ_LAST as time_seq to do special search, which
|
||||||
* doesn't lock tree or delayed_refs and search current
|
* doesn't lock tree or delayed_refs and search current
|
||||||
|
@ -2025,8 +2046,11 @@ int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans,
|
||||||
record->bytenr, SEQ_LAST, &new_roots);
|
record->bytenr, SEQ_LAST, &new_roots);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto cleanup;
|
goto cleanup;
|
||||||
if (qgroup_to_skip)
|
if (qgroup_to_skip) {
|
||||||
ulist_del(new_roots, qgroup_to_skip, 0);
|
ulist_del(new_roots, qgroup_to_skip, 0);
|
||||||
|
ulist_del(record->old_roots, qgroup_to_skip,
|
||||||
|
0);
|
||||||
|
}
|
||||||
ret = btrfs_qgroup_account_extent(trans, fs_info,
|
ret = btrfs_qgroup_account_extent(trans, fs_info,
|
||||||
record->bytenr, record->num_bytes,
|
record->bytenr, record->num_bytes,
|
||||||
record->old_roots, new_roots);
|
record->old_roots, new_roots);
|
||||||
|
@ -2338,6 +2362,11 @@ static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes, bool enforce)
|
||||||
|
|
||||||
if (num_bytes == 0)
|
if (num_bytes == 0)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
if (test_bit(BTRFS_FS_QUOTA_OVERRIDE, &fs_info->flags) &&
|
||||||
|
capable(CAP_SYS_RESOURCE))
|
||||||
|
enforce = false;
|
||||||
|
|
||||||
retry:
|
retry:
|
||||||
spin_lock(&fs_info->qgroup_lock);
|
spin_lock(&fs_info->qgroup_lock);
|
||||||
quota_root = fs_info->quota_root;
|
quota_root = fs_info->quota_root;
|
||||||
|
@ -2376,7 +2405,7 @@ retry:
|
||||||
ret = btrfs_start_delalloc_inodes(root, 0);
|
ret = btrfs_start_delalloc_inodes(root, 0);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
btrfs_wait_ordered_extents(root, -1, 0, (u64)-1);
|
btrfs_wait_ordered_extents(root, U64_MAX, 0, (u64)-1);
|
||||||
trans = btrfs_join_transaction(root);
|
trans = btrfs_join_transaction(root);
|
||||||
if (IS_ERR(trans))
|
if (IS_ERR(trans))
|
||||||
return PTR_ERR(trans);
|
return PTR_ERR(trans);
|
||||||
|
@ -2806,55 +2835,130 @@ btrfs_qgroup_rescan_resume(struct btrfs_fs_info *fs_info)
|
||||||
* Return <0 for error (including -EQUOT)
|
* Return <0 for error (including -EQUOT)
|
||||||
*
|
*
|
||||||
* NOTE: this function may sleep for memory allocation.
|
* NOTE: this function may sleep for memory allocation.
|
||||||
|
* if btrfs_qgroup_reserve_data() is called multiple times with
|
||||||
|
* same @reserved, caller must ensure when error happens it's OK
|
||||||
|
* to free *ALL* reserved space.
|
||||||
*/
|
*/
|
||||||
int btrfs_qgroup_reserve_data(struct inode *inode, u64 start, u64 len)
|
int btrfs_qgroup_reserve_data(struct inode *inode,
|
||||||
|
struct extent_changeset **reserved_ret, u64 start,
|
||||||
|
u64 len)
|
||||||
{
|
{
|
||||||
struct btrfs_root *root = BTRFS_I(inode)->root;
|
struct btrfs_root *root = BTRFS_I(inode)->root;
|
||||||
struct extent_changeset changeset;
|
|
||||||
struct ulist_node *unode;
|
struct ulist_node *unode;
|
||||||
struct ulist_iterator uiter;
|
struct ulist_iterator uiter;
|
||||||
|
struct extent_changeset *reserved;
|
||||||
|
u64 orig_reserved;
|
||||||
|
u64 to_reserve;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags) ||
|
if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags) ||
|
||||||
!is_fstree(root->objectid) || len == 0)
|
!is_fstree(root->objectid) || len == 0)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
changeset.bytes_changed = 0;
|
/* @reserved parameter is mandatory for qgroup */
|
||||||
ulist_init(&changeset.range_changed);
|
if (WARN_ON(!reserved_ret))
|
||||||
|
return -EINVAL;
|
||||||
|
if (!*reserved_ret) {
|
||||||
|
*reserved_ret = extent_changeset_alloc();
|
||||||
|
if (!*reserved_ret)
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
reserved = *reserved_ret;
|
||||||
|
/* Record already reserved space */
|
||||||
|
orig_reserved = reserved->bytes_changed;
|
||||||
ret = set_record_extent_bits(&BTRFS_I(inode)->io_tree, start,
|
ret = set_record_extent_bits(&BTRFS_I(inode)->io_tree, start,
|
||||||
start + len -1, EXTENT_QGROUP_RESERVED, &changeset);
|
start + len -1, EXTENT_QGROUP_RESERVED, reserved);
|
||||||
|
|
||||||
|
/* Newly reserved space */
|
||||||
|
to_reserve = reserved->bytes_changed - orig_reserved;
|
||||||
trace_btrfs_qgroup_reserve_data(inode, start, len,
|
trace_btrfs_qgroup_reserve_data(inode, start, len,
|
||||||
changeset.bytes_changed,
|
to_reserve, QGROUP_RESERVE);
|
||||||
QGROUP_RESERVE);
|
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto cleanup;
|
goto cleanup;
|
||||||
ret = qgroup_reserve(root, changeset.bytes_changed, true);
|
ret = qgroup_reserve(root, to_reserve, true);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto cleanup;
|
goto cleanup;
|
||||||
|
|
||||||
ulist_release(&changeset.range_changed);
|
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
cleanup:
|
cleanup:
|
||||||
/* cleanup already reserved ranges */
|
/* cleanup *ALL* already reserved ranges */
|
||||||
ULIST_ITER_INIT(&uiter);
|
ULIST_ITER_INIT(&uiter);
|
||||||
while ((unode = ulist_next(&changeset.range_changed, &uiter)))
|
while ((unode = ulist_next(&reserved->range_changed, &uiter)))
|
||||||
clear_extent_bit(&BTRFS_I(inode)->io_tree, unode->val,
|
clear_extent_bit(&BTRFS_I(inode)->io_tree, unode->val,
|
||||||
unode->aux, EXTENT_QGROUP_RESERVED, 0, 0, NULL,
|
unode->aux, EXTENT_QGROUP_RESERVED, 0, 0, NULL,
|
||||||
GFP_NOFS);
|
GFP_NOFS);
|
||||||
ulist_release(&changeset.range_changed);
|
extent_changeset_release(reserved);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __btrfs_qgroup_release_data(struct inode *inode, u64 start, u64 len,
|
/* Free ranges specified by @reserved, normally in error path */
|
||||||
int free)
|
static int qgroup_free_reserved_data(struct inode *inode,
|
||||||
|
struct extent_changeset *reserved, u64 start, u64 len)
|
||||||
|
{
|
||||||
|
struct btrfs_root *root = BTRFS_I(inode)->root;
|
||||||
|
struct ulist_node *unode;
|
||||||
|
struct ulist_iterator uiter;
|
||||||
|
struct extent_changeset changeset;
|
||||||
|
int freed = 0;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
extent_changeset_init(&changeset);
|
||||||
|
len = round_up(start + len, root->fs_info->sectorsize);
|
||||||
|
start = round_down(start, root->fs_info->sectorsize);
|
||||||
|
|
||||||
|
ULIST_ITER_INIT(&uiter);
|
||||||
|
while ((unode = ulist_next(&reserved->range_changed, &uiter))) {
|
||||||
|
u64 range_start = unode->val;
|
||||||
|
/* unode->aux is the inclusive end */
|
||||||
|
u64 range_len = unode->aux - range_start + 1;
|
||||||
|
u64 free_start;
|
||||||
|
u64 free_len;
|
||||||
|
|
||||||
|
extent_changeset_release(&changeset);
|
||||||
|
|
||||||
|
/* Only free range in range [start, start + len) */
|
||||||
|
if (range_start >= start + len ||
|
||||||
|
range_start + range_len <= start)
|
||||||
|
continue;
|
||||||
|
free_start = max(range_start, start);
|
||||||
|
free_len = min(start + len, range_start + range_len) -
|
||||||
|
free_start;
|
||||||
|
/*
|
||||||
|
* TODO: To also modify reserved->ranges_reserved to reflect
|
||||||
|
* the modification.
|
||||||
|
*
|
||||||
|
* However as long as we free qgroup reserved according to
|
||||||
|
* EXTENT_QGROUP_RESERVED, we won't double free.
|
||||||
|
* So not need to rush.
|
||||||
|
*/
|
||||||
|
ret = clear_record_extent_bits(&BTRFS_I(inode)->io_failure_tree,
|
||||||
|
free_start, free_start + free_len - 1,
|
||||||
|
EXTENT_QGROUP_RESERVED, &changeset);
|
||||||
|
if (ret < 0)
|
||||||
|
goto out;
|
||||||
|
freed += changeset.bytes_changed;
|
||||||
|
}
|
||||||
|
btrfs_qgroup_free_refroot(root->fs_info, root->objectid, freed);
|
||||||
|
ret = freed;
|
||||||
|
out:
|
||||||
|
extent_changeset_release(&changeset);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int __btrfs_qgroup_release_data(struct inode *inode,
|
||||||
|
struct extent_changeset *reserved, u64 start, u64 len,
|
||||||
|
int free)
|
||||||
{
|
{
|
||||||
struct extent_changeset changeset;
|
struct extent_changeset changeset;
|
||||||
int trace_op = QGROUP_RELEASE;
|
int trace_op = QGROUP_RELEASE;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
changeset.bytes_changed = 0;
|
/* In release case, we shouldn't have @reserved */
|
||||||
ulist_init(&changeset.range_changed);
|
WARN_ON(!free && reserved);
|
||||||
|
if (free && reserved)
|
||||||
|
return qgroup_free_reserved_data(inode, reserved, start, len);
|
||||||
|
extent_changeset_init(&changeset);
|
||||||
ret = clear_record_extent_bits(&BTRFS_I(inode)->io_tree, start,
|
ret = clear_record_extent_bits(&BTRFS_I(inode)->io_tree, start,
|
||||||
start + len -1, EXTENT_QGROUP_RESERVED, &changeset);
|
start + len -1, EXTENT_QGROUP_RESERVED, &changeset);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
|
@ -2868,8 +2972,9 @@ static int __btrfs_qgroup_release_data(struct inode *inode, u64 start, u64 len,
|
||||||
btrfs_qgroup_free_refroot(BTRFS_I(inode)->root->fs_info,
|
btrfs_qgroup_free_refroot(BTRFS_I(inode)->root->fs_info,
|
||||||
BTRFS_I(inode)->root->objectid,
|
BTRFS_I(inode)->root->objectid,
|
||||||
changeset.bytes_changed);
|
changeset.bytes_changed);
|
||||||
|
ret = changeset.bytes_changed;
|
||||||
out:
|
out:
|
||||||
ulist_release(&changeset.range_changed);
|
extent_changeset_release(&changeset);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2878,14 +2983,17 @@ out:
|
||||||
*
|
*
|
||||||
* Should be called when a range of pages get invalidated before reaching disk.
|
* Should be called when a range of pages get invalidated before reaching disk.
|
||||||
* Or for error cleanup case.
|
* Or for error cleanup case.
|
||||||
|
* if @reserved is given, only reserved range in [@start, @start + @len) will
|
||||||
|
* be freed.
|
||||||
*
|
*
|
||||||
* For data written to disk, use btrfs_qgroup_release_data().
|
* For data written to disk, use btrfs_qgroup_release_data().
|
||||||
*
|
*
|
||||||
* NOTE: This function may sleep for memory allocation.
|
* NOTE: This function may sleep for memory allocation.
|
||||||
*/
|
*/
|
||||||
int btrfs_qgroup_free_data(struct inode *inode, u64 start, u64 len)
|
int btrfs_qgroup_free_data(struct inode *inode,
|
||||||
|
struct extent_changeset *reserved, u64 start, u64 len)
|
||||||
{
|
{
|
||||||
return __btrfs_qgroup_release_data(inode, start, len, 1);
|
return __btrfs_qgroup_release_data(inode, reserved, start, len, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -2905,7 +3013,7 @@ int btrfs_qgroup_free_data(struct inode *inode, u64 start, u64 len)
|
||||||
*/
|
*/
|
||||||
int btrfs_qgroup_release_data(struct inode *inode, u64 start, u64 len)
|
int btrfs_qgroup_release_data(struct inode *inode, u64 start, u64 len)
|
||||||
{
|
{
|
||||||
return __btrfs_qgroup_release_data(inode, start, len, 0);
|
return __btrfs_qgroup_release_data(inode, NULL, start, len, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
|
int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
|
||||||
|
@ -2969,8 +3077,7 @@ void btrfs_qgroup_check_reserved_leak(struct inode *inode)
|
||||||
struct ulist_iterator iter;
|
struct ulist_iterator iter;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
changeset.bytes_changed = 0;
|
extent_changeset_init(&changeset);
|
||||||
ulist_init(&changeset.range_changed);
|
|
||||||
ret = clear_record_extent_bits(&BTRFS_I(inode)->io_tree, 0, (u64)-1,
|
ret = clear_record_extent_bits(&BTRFS_I(inode)->io_tree, 0, (u64)-1,
|
||||||
EXTENT_QGROUP_RESERVED, &changeset);
|
EXTENT_QGROUP_RESERVED, &changeset);
|
||||||
|
|
||||||
|
@ -2987,5 +3094,5 @@ void btrfs_qgroup_check_reserved_leak(struct inode *inode)
|
||||||
changeset.bytes_changed);
|
changeset.bytes_changed);
|
||||||
|
|
||||||
}
|
}
|
||||||
ulist_release(&changeset.range_changed);
|
extent_changeset_release(&changeset);
|
||||||
}
|
}
|
||||||
|
|
|
@ -134,8 +134,7 @@ int btrfs_limit_qgroup(struct btrfs_trans_handle *trans,
|
||||||
int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info);
|
int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info);
|
||||||
void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info);
|
void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info);
|
||||||
struct btrfs_delayed_extent_op;
|
struct btrfs_delayed_extent_op;
|
||||||
int btrfs_qgroup_prepare_account_extents(struct btrfs_trans_handle *trans,
|
|
||||||
struct btrfs_fs_info *fs_info);
|
|
||||||
/*
|
/*
|
||||||
* Inform qgroup to trace one dirty extent, its info is recorded in @record.
|
* Inform qgroup to trace one dirty extent, its info is recorded in @record.
|
||||||
* So qgroup can account it at transaction committing time.
|
* So qgroup can account it at transaction committing time.
|
||||||
|
@ -243,9 +242,11 @@ int btrfs_verify_qgroup_counts(struct btrfs_fs_info *fs_info, u64 qgroupid,
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* New io_tree based accurate qgroup reserve API */
|
/* New io_tree based accurate qgroup reserve API */
|
||||||
int btrfs_qgroup_reserve_data(struct inode *inode, u64 start, u64 len);
|
int btrfs_qgroup_reserve_data(struct inode *inode,
|
||||||
|
struct extent_changeset **reserved, u64 start, u64 len);
|
||||||
int btrfs_qgroup_release_data(struct inode *inode, u64 start, u64 len);
|
int btrfs_qgroup_release_data(struct inode *inode, u64 start, u64 len);
|
||||||
int btrfs_qgroup_free_data(struct inode *inode, u64 start, u64 len);
|
int btrfs_qgroup_free_data(struct inode *inode,
|
||||||
|
struct extent_changeset *reserved, u64 start, u64 len);
|
||||||
|
|
||||||
int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
|
int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
|
||||||
bool enforce);
|
bool enforce);
|
||||||
|
|
|
@ -31,7 +31,7 @@
|
||||||
#include <linux/hash.h>
|
#include <linux/hash.h>
|
||||||
#include <linux/list_sort.h>
|
#include <linux/list_sort.h>
|
||||||
#include <linux/raid/xor.h>
|
#include <linux/raid/xor.h>
|
||||||
#include <linux/vmalloc.h>
|
#include <linux/mm.h>
|
||||||
#include <asm/div64.h>
|
#include <asm/div64.h>
|
||||||
#include "ctree.h"
|
#include "ctree.h"
|
||||||
#include "extent_map.h"
|
#include "extent_map.h"
|
||||||
|
@ -218,12 +218,9 @@ int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info)
|
||||||
* of a failing mount.
|
* of a failing mount.
|
||||||
*/
|
*/
|
||||||
table_size = sizeof(*table) + sizeof(*h) * num_entries;
|
table_size = sizeof(*table) + sizeof(*h) * num_entries;
|
||||||
table = kzalloc(table_size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
|
table = kvzalloc(table_size, GFP_KERNEL);
|
||||||
if (!table) {
|
if (!table)
|
||||||
table = vzalloc(table_size);
|
return -ENOMEM;
|
||||||
if (!table)
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
|
|
||||||
spin_lock_init(&table->cache_lock);
|
spin_lock_init(&table->cache_lock);
|
||||||
INIT_LIST_HEAD(&table->stripe_cache);
|
INIT_LIST_HEAD(&table->stripe_cache);
|
||||||
|
@ -1101,10 +1098,7 @@ static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
|
||||||
}
|
}
|
||||||
|
|
||||||
/* put a new bio on the list */
|
/* put a new bio on the list */
|
||||||
bio = btrfs_io_bio_alloc(GFP_NOFS, bio_max_len >> PAGE_SHIFT?:1);
|
bio = btrfs_io_bio_alloc(bio_max_len >> PAGE_SHIFT ?: 1);
|
||||||
if (!bio)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
bio->bi_iter.bi_size = 0;
|
bio->bi_iter.bi_size = 0;
|
||||||
bio->bi_bdev = stripe->dev->bdev;
|
bio->bi_bdev = stripe->dev->bdev;
|
||||||
bio->bi_iter.bi_sector = disk_start >> 9;
|
bio->bi_iter.bi_sector = disk_start >> 9;
|
||||||
|
|
|
@ -66,7 +66,6 @@ struct reada_extctl {
|
||||||
struct reada_extent {
|
struct reada_extent {
|
||||||
u64 logical;
|
u64 logical;
|
||||||
struct btrfs_key top;
|
struct btrfs_key top;
|
||||||
int err;
|
|
||||||
struct list_head extctl;
|
struct list_head extctl;
|
||||||
int refcnt;
|
int refcnt;
|
||||||
spinlock_t lock;
|
spinlock_t lock;
|
||||||
|
|
|
@ -3093,11 +3093,12 @@ int prealloc_file_extent_cluster(struct inode *inode,
|
||||||
u64 prealloc_start = cluster->start - offset;
|
u64 prealloc_start = cluster->start - offset;
|
||||||
u64 prealloc_end = cluster->end - offset;
|
u64 prealloc_end = cluster->end - offset;
|
||||||
u64 cur_offset;
|
u64 cur_offset;
|
||||||
|
struct extent_changeset *data_reserved = NULL;
|
||||||
|
|
||||||
BUG_ON(cluster->start != cluster->boundary[0]);
|
BUG_ON(cluster->start != cluster->boundary[0]);
|
||||||
inode_lock(inode);
|
inode_lock(inode);
|
||||||
|
|
||||||
ret = btrfs_check_data_free_space(inode, prealloc_start,
|
ret = btrfs_check_data_free_space(inode, &data_reserved, prealloc_start,
|
||||||
prealloc_end + 1 - prealloc_start);
|
prealloc_end + 1 - prealloc_start);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out;
|
goto out;
|
||||||
|
@ -3113,8 +3114,8 @@ int prealloc_file_extent_cluster(struct inode *inode,
|
||||||
lock_extent(&BTRFS_I(inode)->io_tree, start, end);
|
lock_extent(&BTRFS_I(inode)->io_tree, start, end);
|
||||||
num_bytes = end + 1 - start;
|
num_bytes = end + 1 - start;
|
||||||
if (cur_offset < start)
|
if (cur_offset < start)
|
||||||
btrfs_free_reserved_data_space(inode, cur_offset,
|
btrfs_free_reserved_data_space(inode, data_reserved,
|
||||||
start - cur_offset);
|
cur_offset, start - cur_offset);
|
||||||
ret = btrfs_prealloc_file_range(inode, 0, start,
|
ret = btrfs_prealloc_file_range(inode, 0, start,
|
||||||
num_bytes, num_bytes,
|
num_bytes, num_bytes,
|
||||||
end + 1, &alloc_hint);
|
end + 1, &alloc_hint);
|
||||||
|
@ -3125,10 +3126,11 @@ int prealloc_file_extent_cluster(struct inode *inode,
|
||||||
nr++;
|
nr++;
|
||||||
}
|
}
|
||||||
if (cur_offset < prealloc_end)
|
if (cur_offset < prealloc_end)
|
||||||
btrfs_free_reserved_data_space(inode, cur_offset,
|
btrfs_free_reserved_data_space(inode, data_reserved,
|
||||||
prealloc_end + 1 - cur_offset);
|
cur_offset, prealloc_end + 1 - cur_offset);
|
||||||
out:
|
out:
|
||||||
inode_unlock(inode);
|
inode_unlock(inode);
|
||||||
|
extent_changeset_free(data_reserved);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4269,8 +4271,7 @@ static struct reloc_control *alloc_reloc_control(struct btrfs_fs_info *fs_info)
|
||||||
INIT_LIST_HEAD(&rc->reloc_roots);
|
INIT_LIST_HEAD(&rc->reloc_roots);
|
||||||
backref_cache_init(&rc->backref_cache);
|
backref_cache_init(&rc->backref_cache);
|
||||||
mapping_tree_init(&rc->reloc_root_tree);
|
mapping_tree_init(&rc->reloc_root_tree);
|
||||||
extent_io_tree_init(&rc->processed_blocks,
|
extent_io_tree_init(&rc->processed_blocks, NULL);
|
||||||
fs_info->btree_inode->i_mapping);
|
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4372,7 +4373,7 @@ int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start)
|
||||||
|
|
||||||
btrfs_wait_block_group_reservations(rc->block_group);
|
btrfs_wait_block_group_reservations(rc->block_group);
|
||||||
btrfs_wait_nocow_writers(rc->block_group);
|
btrfs_wait_nocow_writers(rc->block_group);
|
||||||
btrfs_wait_ordered_roots(fs_info, -1,
|
btrfs_wait_ordered_roots(fs_info, U64_MAX,
|
||||||
rc->block_group->key.objectid,
|
rc->block_group->key.objectid,
|
||||||
rc->block_group->key.offset);
|
rc->block_group->key.offset);
|
||||||
|
|
||||||
|
|
|
@ -390,6 +390,13 @@ again:
|
||||||
WARN_ON(btrfs_root_ref_dirid(leaf, ref) != dirid);
|
WARN_ON(btrfs_root_ref_dirid(leaf, ref) != dirid);
|
||||||
WARN_ON(btrfs_root_ref_name_len(leaf, ref) != name_len);
|
WARN_ON(btrfs_root_ref_name_len(leaf, ref) != name_len);
|
||||||
ptr = (unsigned long)(ref + 1);
|
ptr = (unsigned long)(ref + 1);
|
||||||
|
ret = btrfs_is_name_len_valid(leaf, path->slots[0], ptr,
|
||||||
|
name_len);
|
||||||
|
if (!ret) {
|
||||||
|
err = -EIO;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
WARN_ON(memcmp_extent_buffer(leaf, name, ptr, name_len));
|
WARN_ON(memcmp_extent_buffer(leaf, name, ptr, name_len));
|
||||||
*sequence = btrfs_root_ref_sequence(leaf, ref);
|
*sequence = btrfs_root_ref_sequence(leaf, ref);
|
||||||
|
|
||||||
|
|
211
fs/btrfs/scrub.c
211
fs/btrfs/scrub.c
|
@ -18,6 +18,7 @@
|
||||||
|
|
||||||
#include <linux/blkdev.h>
|
#include <linux/blkdev.h>
|
||||||
#include <linux/ratelimit.h>
|
#include <linux/ratelimit.h>
|
||||||
|
#include <linux/sched/mm.h>
|
||||||
#include "ctree.h"
|
#include "ctree.h"
|
||||||
#include "volumes.h"
|
#include "volumes.h"
|
||||||
#include "disk-io.h"
|
#include "disk-io.h"
|
||||||
|
@ -161,14 +162,6 @@ struct scrub_parity {
|
||||||
unsigned long bitmap[0];
|
unsigned long bitmap[0];
|
||||||
};
|
};
|
||||||
|
|
||||||
struct scrub_wr_ctx {
|
|
||||||
struct scrub_bio *wr_curr_bio;
|
|
||||||
struct btrfs_device *tgtdev;
|
|
||||||
int pages_per_wr_bio; /* <= SCRUB_PAGES_PER_WR_BIO */
|
|
||||||
atomic_t flush_all_writes;
|
|
||||||
struct mutex wr_lock;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct scrub_ctx {
|
struct scrub_ctx {
|
||||||
struct scrub_bio *bios[SCRUB_BIOS_PER_SCTX];
|
struct scrub_bio *bios[SCRUB_BIOS_PER_SCTX];
|
||||||
struct btrfs_fs_info *fs_info;
|
struct btrfs_fs_info *fs_info;
|
||||||
|
@ -183,11 +176,14 @@ struct scrub_ctx {
|
||||||
atomic_t cancel_req;
|
atomic_t cancel_req;
|
||||||
int readonly;
|
int readonly;
|
||||||
int pages_per_rd_bio;
|
int pages_per_rd_bio;
|
||||||
u32 sectorsize;
|
|
||||||
u32 nodesize;
|
|
||||||
|
|
||||||
int is_dev_replace;
|
int is_dev_replace;
|
||||||
struct scrub_wr_ctx wr_ctx;
|
|
||||||
|
struct scrub_bio *wr_curr_bio;
|
||||||
|
struct mutex wr_lock;
|
||||||
|
int pages_per_wr_bio; /* <= SCRUB_PAGES_PER_WR_BIO */
|
||||||
|
atomic_t flush_all_writes;
|
||||||
|
struct btrfs_device *wr_tgtdev;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* statistics
|
* statistics
|
||||||
|
@ -289,10 +285,6 @@ static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
|
||||||
u64 *extent_physical,
|
u64 *extent_physical,
|
||||||
struct btrfs_device **extent_dev,
|
struct btrfs_device **extent_dev,
|
||||||
int *extent_mirror_num);
|
int *extent_mirror_num);
|
||||||
static int scrub_setup_wr_ctx(struct scrub_wr_ctx *wr_ctx,
|
|
||||||
struct btrfs_device *dev,
|
|
||||||
int is_dev_replace);
|
|
||||||
static void scrub_free_wr_ctx(struct scrub_wr_ctx *wr_ctx);
|
|
||||||
static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
|
static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
|
||||||
struct scrub_page *spage);
|
struct scrub_page *spage);
|
||||||
static void scrub_wr_submit(struct scrub_ctx *sctx);
|
static void scrub_wr_submit(struct scrub_ctx *sctx);
|
||||||
|
@ -643,8 +635,6 @@ static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx)
|
||||||
if (!sctx)
|
if (!sctx)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
scrub_free_wr_ctx(&sctx->wr_ctx);
|
|
||||||
|
|
||||||
/* this can happen when scrub is cancelled */
|
/* this can happen when scrub is cancelled */
|
||||||
if (sctx->curr != -1) {
|
if (sctx->curr != -1) {
|
||||||
struct scrub_bio *sbio = sctx->bios[sctx->curr];
|
struct scrub_bio *sbio = sctx->bios[sctx->curr];
|
||||||
|
@ -664,6 +654,7 @@ static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx)
|
||||||
kfree(sbio);
|
kfree(sbio);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
kfree(sctx->wr_curr_bio);
|
||||||
scrub_free_csums(sctx);
|
scrub_free_csums(sctx);
|
||||||
kfree(sctx);
|
kfree(sctx);
|
||||||
}
|
}
|
||||||
|
@ -680,7 +671,6 @@ struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
|
||||||
struct scrub_ctx *sctx;
|
struct scrub_ctx *sctx;
|
||||||
int i;
|
int i;
|
||||||
struct btrfs_fs_info *fs_info = dev->fs_info;
|
struct btrfs_fs_info *fs_info = dev->fs_info;
|
||||||
int ret;
|
|
||||||
|
|
||||||
sctx = kzalloc(sizeof(*sctx), GFP_KERNEL);
|
sctx = kzalloc(sizeof(*sctx), GFP_KERNEL);
|
||||||
if (!sctx)
|
if (!sctx)
|
||||||
|
@ -710,8 +700,6 @@ struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
|
||||||
sctx->bios[i]->next_free = -1;
|
sctx->bios[i]->next_free = -1;
|
||||||
}
|
}
|
||||||
sctx->first_free = 0;
|
sctx->first_free = 0;
|
||||||
sctx->nodesize = fs_info->nodesize;
|
|
||||||
sctx->sectorsize = fs_info->sectorsize;
|
|
||||||
atomic_set(&sctx->bios_in_flight, 0);
|
atomic_set(&sctx->bios_in_flight, 0);
|
||||||
atomic_set(&sctx->workers_pending, 0);
|
atomic_set(&sctx->workers_pending, 0);
|
||||||
atomic_set(&sctx->cancel_req, 0);
|
atomic_set(&sctx->cancel_req, 0);
|
||||||
|
@ -722,12 +710,16 @@ struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
|
||||||
spin_lock_init(&sctx->stat_lock);
|
spin_lock_init(&sctx->stat_lock);
|
||||||
init_waitqueue_head(&sctx->list_wait);
|
init_waitqueue_head(&sctx->list_wait);
|
||||||
|
|
||||||
ret = scrub_setup_wr_ctx(&sctx->wr_ctx,
|
WARN_ON(sctx->wr_curr_bio != NULL);
|
||||||
fs_info->dev_replace.tgtdev, is_dev_replace);
|
mutex_init(&sctx->wr_lock);
|
||||||
if (ret) {
|
sctx->wr_curr_bio = NULL;
|
||||||
scrub_free_ctx(sctx);
|
if (is_dev_replace) {
|
||||||
return ERR_PTR(ret);
|
WARN_ON(!fs_info->dev_replace.tgtdev);
|
||||||
|
sctx->pages_per_wr_bio = SCRUB_PAGES_PER_WR_BIO;
|
||||||
|
sctx->wr_tgtdev = fs_info->dev_replace.tgtdev;
|
||||||
|
atomic_set(&sctx->flush_all_writes, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
return sctx;
|
return sctx;
|
||||||
|
|
||||||
nomem:
|
nomem:
|
||||||
|
@ -742,6 +734,7 @@ static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root,
|
||||||
u32 nlink;
|
u32 nlink;
|
||||||
int ret;
|
int ret;
|
||||||
int i;
|
int i;
|
||||||
|
unsigned nofs_flag;
|
||||||
struct extent_buffer *eb;
|
struct extent_buffer *eb;
|
||||||
struct btrfs_inode_item *inode_item;
|
struct btrfs_inode_item *inode_item;
|
||||||
struct scrub_warning *swarn = warn_ctx;
|
struct scrub_warning *swarn = warn_ctx;
|
||||||
|
@ -780,7 +773,14 @@ static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root,
|
||||||
nlink = btrfs_inode_nlink(eb, inode_item);
|
nlink = btrfs_inode_nlink(eb, inode_item);
|
||||||
btrfs_release_path(swarn->path);
|
btrfs_release_path(swarn->path);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* init_path might indirectly call vmalloc, or use GFP_KERNEL. Scrub
|
||||||
|
* uses GFP_NOFS in this context, so we keep it consistent but it does
|
||||||
|
* not seem to be strictly necessary.
|
||||||
|
*/
|
||||||
|
nofs_flag = memalloc_nofs_save();
|
||||||
ipath = init_ipath(4096, local_root, swarn->path);
|
ipath = init_ipath(4096, local_root, swarn->path);
|
||||||
|
memalloc_nofs_restore(nofs_flag);
|
||||||
if (IS_ERR(ipath)) {
|
if (IS_ERR(ipath)) {
|
||||||
ret = PTR_ERR(ipath);
|
ret = PTR_ERR(ipath);
|
||||||
ipath = NULL;
|
ipath = NULL;
|
||||||
|
@ -954,7 +954,7 @@ static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *fixup_ctx)
|
||||||
ret = -EIO;
|
ret = -EIO;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
ret = repair_io_failure(BTRFS_I(inode), offset, PAGE_SIZE,
|
ret = repair_io_failure(fs_info, inum, offset, PAGE_SIZE,
|
||||||
fixup->logical, page,
|
fixup->logical, page,
|
||||||
offset - page_offset(page),
|
offset - page_offset(page),
|
||||||
fixup->mirror_num);
|
fixup->mirror_num);
|
||||||
|
@ -1737,12 +1737,7 @@ static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
|
||||||
}
|
}
|
||||||
|
|
||||||
WARN_ON(!page->page);
|
WARN_ON(!page->page);
|
||||||
bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
|
bio = btrfs_io_bio_alloc(1);
|
||||||
if (!bio) {
|
|
||||||
page->io_error = 1;
|
|
||||||
sblock->no_io_error_seen = 0;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
bio->bi_bdev = page->dev->bdev;
|
bio->bi_bdev = page->dev->bdev;
|
||||||
|
|
||||||
bio_add_page(bio, page->page, PAGE_SIZE, 0);
|
bio_add_page(bio, page->page, PAGE_SIZE, 0);
|
||||||
|
@ -1830,9 +1825,7 @@ static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
|
||||||
return -EIO;
|
return -EIO;
|
||||||
}
|
}
|
||||||
|
|
||||||
bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
|
bio = btrfs_io_bio_alloc(1);
|
||||||
if (!bio)
|
|
||||||
return -EIO;
|
|
||||||
bio->bi_bdev = page_bad->dev->bdev;
|
bio->bi_bdev = page_bad->dev->bdev;
|
||||||
bio->bi_iter.bi_sector = page_bad->physical >> 9;
|
bio->bi_iter.bi_sector = page_bad->physical >> 9;
|
||||||
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
|
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
|
||||||
|
@ -1898,37 +1891,31 @@ static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
|
||||||
static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
|
static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
|
||||||
struct scrub_page *spage)
|
struct scrub_page *spage)
|
||||||
{
|
{
|
||||||
struct scrub_wr_ctx *wr_ctx = &sctx->wr_ctx;
|
|
||||||
struct scrub_bio *sbio;
|
struct scrub_bio *sbio;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
mutex_lock(&wr_ctx->wr_lock);
|
mutex_lock(&sctx->wr_lock);
|
||||||
again:
|
again:
|
||||||
if (!wr_ctx->wr_curr_bio) {
|
if (!sctx->wr_curr_bio) {
|
||||||
wr_ctx->wr_curr_bio = kzalloc(sizeof(*wr_ctx->wr_curr_bio),
|
sctx->wr_curr_bio = kzalloc(sizeof(*sctx->wr_curr_bio),
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
if (!wr_ctx->wr_curr_bio) {
|
if (!sctx->wr_curr_bio) {
|
||||||
mutex_unlock(&wr_ctx->wr_lock);
|
mutex_unlock(&sctx->wr_lock);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
wr_ctx->wr_curr_bio->sctx = sctx;
|
sctx->wr_curr_bio->sctx = sctx;
|
||||||
wr_ctx->wr_curr_bio->page_count = 0;
|
sctx->wr_curr_bio->page_count = 0;
|
||||||
}
|
}
|
||||||
sbio = wr_ctx->wr_curr_bio;
|
sbio = sctx->wr_curr_bio;
|
||||||
if (sbio->page_count == 0) {
|
if (sbio->page_count == 0) {
|
||||||
struct bio *bio;
|
struct bio *bio;
|
||||||
|
|
||||||
sbio->physical = spage->physical_for_dev_replace;
|
sbio->physical = spage->physical_for_dev_replace;
|
||||||
sbio->logical = spage->logical;
|
sbio->logical = spage->logical;
|
||||||
sbio->dev = wr_ctx->tgtdev;
|
sbio->dev = sctx->wr_tgtdev;
|
||||||
bio = sbio->bio;
|
bio = sbio->bio;
|
||||||
if (!bio) {
|
if (!bio) {
|
||||||
bio = btrfs_io_bio_alloc(GFP_KERNEL,
|
bio = btrfs_io_bio_alloc(sctx->pages_per_wr_bio);
|
||||||
wr_ctx->pages_per_wr_bio);
|
|
||||||
if (!bio) {
|
|
||||||
mutex_unlock(&wr_ctx->wr_lock);
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
sbio->bio = bio;
|
sbio->bio = bio;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1951,7 +1938,7 @@ again:
|
||||||
if (sbio->page_count < 1) {
|
if (sbio->page_count < 1) {
|
||||||
bio_put(sbio->bio);
|
bio_put(sbio->bio);
|
||||||
sbio->bio = NULL;
|
sbio->bio = NULL;
|
||||||
mutex_unlock(&wr_ctx->wr_lock);
|
mutex_unlock(&sctx->wr_lock);
|
||||||
return -EIO;
|
return -EIO;
|
||||||
}
|
}
|
||||||
scrub_wr_submit(sctx);
|
scrub_wr_submit(sctx);
|
||||||
|
@ -1961,23 +1948,22 @@ again:
|
||||||
sbio->pagev[sbio->page_count] = spage;
|
sbio->pagev[sbio->page_count] = spage;
|
||||||
scrub_page_get(spage);
|
scrub_page_get(spage);
|
||||||
sbio->page_count++;
|
sbio->page_count++;
|
||||||
if (sbio->page_count == wr_ctx->pages_per_wr_bio)
|
if (sbio->page_count == sctx->pages_per_wr_bio)
|
||||||
scrub_wr_submit(sctx);
|
scrub_wr_submit(sctx);
|
||||||
mutex_unlock(&wr_ctx->wr_lock);
|
mutex_unlock(&sctx->wr_lock);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void scrub_wr_submit(struct scrub_ctx *sctx)
|
static void scrub_wr_submit(struct scrub_ctx *sctx)
|
||||||
{
|
{
|
||||||
struct scrub_wr_ctx *wr_ctx = &sctx->wr_ctx;
|
|
||||||
struct scrub_bio *sbio;
|
struct scrub_bio *sbio;
|
||||||
|
|
||||||
if (!wr_ctx->wr_curr_bio)
|
if (!sctx->wr_curr_bio)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
sbio = wr_ctx->wr_curr_bio;
|
sbio = sctx->wr_curr_bio;
|
||||||
wr_ctx->wr_curr_bio = NULL;
|
sctx->wr_curr_bio = NULL;
|
||||||
WARN_ON(!sbio->bio->bi_bdev);
|
WARN_ON(!sbio->bio->bi_bdev);
|
||||||
scrub_pending_bio_inc(sctx);
|
scrub_pending_bio_inc(sctx);
|
||||||
/* process all writes in a single worker thread. Then the block layer
|
/* process all writes in a single worker thread. Then the block layer
|
||||||
|
@ -2081,7 +2067,7 @@ static int scrub_checksum_data(struct scrub_block *sblock)
|
||||||
page = sblock->pagev[0]->page;
|
page = sblock->pagev[0]->page;
|
||||||
buffer = kmap_atomic(page);
|
buffer = kmap_atomic(page);
|
||||||
|
|
||||||
len = sctx->sectorsize;
|
len = sctx->fs_info->sectorsize;
|
||||||
index = 0;
|
index = 0;
|
||||||
for (;;) {
|
for (;;) {
|
||||||
u64 l = min_t(u64, len, PAGE_SIZE);
|
u64 l = min_t(u64, len, PAGE_SIZE);
|
||||||
|
@ -2146,7 +2132,7 @@ static int scrub_checksum_tree_block(struct scrub_block *sblock)
|
||||||
BTRFS_UUID_SIZE))
|
BTRFS_UUID_SIZE))
|
||||||
sblock->header_error = 1;
|
sblock->header_error = 1;
|
||||||
|
|
||||||
len = sctx->nodesize - BTRFS_CSUM_SIZE;
|
len = sctx->fs_info->nodesize - BTRFS_CSUM_SIZE;
|
||||||
mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
|
mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
|
||||||
p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
|
p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
|
||||||
index = 0;
|
index = 0;
|
||||||
|
@ -2329,10 +2315,7 @@ again:
|
||||||
sbio->dev = spage->dev;
|
sbio->dev = spage->dev;
|
||||||
bio = sbio->bio;
|
bio = sbio->bio;
|
||||||
if (!bio) {
|
if (!bio) {
|
||||||
bio = btrfs_io_bio_alloc(GFP_KERNEL,
|
bio = btrfs_io_bio_alloc(sctx->pages_per_rd_bio);
|
||||||
sctx->pages_per_rd_bio);
|
|
||||||
if (!bio)
|
|
||||||
return -ENOMEM;
|
|
||||||
sbio->bio = bio;
|
sbio->bio = bio;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2420,10 +2403,10 @@ static void scrub_missing_raid56_worker(struct btrfs_work *work)
|
||||||
scrub_block_put(sblock);
|
scrub_block_put(sblock);
|
||||||
|
|
||||||
if (sctx->is_dev_replace &&
|
if (sctx->is_dev_replace &&
|
||||||
atomic_read(&sctx->wr_ctx.flush_all_writes)) {
|
atomic_read(&sctx->flush_all_writes)) {
|
||||||
mutex_lock(&sctx->wr_ctx.wr_lock);
|
mutex_lock(&sctx->wr_lock);
|
||||||
scrub_wr_submit(sctx);
|
scrub_wr_submit(sctx);
|
||||||
mutex_unlock(&sctx->wr_ctx.wr_lock);
|
mutex_unlock(&sctx->wr_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
scrub_pending_bio_dec(sctx);
|
scrub_pending_bio_dec(sctx);
|
||||||
|
@ -2458,10 +2441,7 @@ static void scrub_missing_raid56_pages(struct scrub_block *sblock)
|
||||||
goto bbio_out;
|
goto bbio_out;
|
||||||
}
|
}
|
||||||
|
|
||||||
bio = btrfs_io_bio_alloc(GFP_NOFS, 0);
|
bio = btrfs_io_bio_alloc(0);
|
||||||
if (!bio)
|
|
||||||
goto bbio_out;
|
|
||||||
|
|
||||||
bio->bi_iter.bi_sector = logical >> 9;
|
bio->bi_iter.bi_sector = logical >> 9;
|
||||||
bio->bi_private = sblock;
|
bio->bi_private = sblock;
|
||||||
bio->bi_end_io = scrub_missing_raid56_end_io;
|
bio->bi_end_io = scrub_missing_raid56_end_io;
|
||||||
|
@ -2628,10 +2608,10 @@ static void scrub_bio_end_io_worker(struct btrfs_work *work)
|
||||||
spin_unlock(&sctx->list_lock);
|
spin_unlock(&sctx->list_lock);
|
||||||
|
|
||||||
if (sctx->is_dev_replace &&
|
if (sctx->is_dev_replace &&
|
||||||
atomic_read(&sctx->wr_ctx.flush_all_writes)) {
|
atomic_read(&sctx->flush_all_writes)) {
|
||||||
mutex_lock(&sctx->wr_ctx.wr_lock);
|
mutex_lock(&sctx->wr_lock);
|
||||||
scrub_wr_submit(sctx);
|
scrub_wr_submit(sctx);
|
||||||
mutex_unlock(&sctx->wr_ctx.wr_lock);
|
mutex_unlock(&sctx->wr_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
scrub_pending_bio_dec(sctx);
|
scrub_pending_bio_dec(sctx);
|
||||||
|
@ -2726,8 +2706,8 @@ static int scrub_find_csum(struct scrub_ctx *sctx, u64 logical, u8 *csum)
|
||||||
if (!sum)
|
if (!sum)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
index = ((u32)(logical - sum->bytenr)) / sctx->sectorsize;
|
index = ((u32)(logical - sum->bytenr)) / sctx->fs_info->sectorsize;
|
||||||
num_sectors = sum->len / sctx->sectorsize;
|
num_sectors = sum->len / sctx->fs_info->sectorsize;
|
||||||
memcpy(csum, sum->sums + index, sctx->csum_size);
|
memcpy(csum, sum->sums + index, sctx->csum_size);
|
||||||
if (index == num_sectors - 1) {
|
if (index == num_sectors - 1) {
|
||||||
list_del(&sum->list);
|
list_del(&sum->list);
|
||||||
|
@ -2746,19 +2726,19 @@ static int scrub_extent(struct scrub_ctx *sctx, u64 logical, u64 len,
|
||||||
u32 blocksize;
|
u32 blocksize;
|
||||||
|
|
||||||
if (flags & BTRFS_EXTENT_FLAG_DATA) {
|
if (flags & BTRFS_EXTENT_FLAG_DATA) {
|
||||||
blocksize = sctx->sectorsize;
|
blocksize = sctx->fs_info->sectorsize;
|
||||||
spin_lock(&sctx->stat_lock);
|
spin_lock(&sctx->stat_lock);
|
||||||
sctx->stat.data_extents_scrubbed++;
|
sctx->stat.data_extents_scrubbed++;
|
||||||
sctx->stat.data_bytes_scrubbed += len;
|
sctx->stat.data_bytes_scrubbed += len;
|
||||||
spin_unlock(&sctx->stat_lock);
|
spin_unlock(&sctx->stat_lock);
|
||||||
} else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
|
} else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
|
||||||
blocksize = sctx->nodesize;
|
blocksize = sctx->fs_info->nodesize;
|
||||||
spin_lock(&sctx->stat_lock);
|
spin_lock(&sctx->stat_lock);
|
||||||
sctx->stat.tree_extents_scrubbed++;
|
sctx->stat.tree_extents_scrubbed++;
|
||||||
sctx->stat.tree_bytes_scrubbed += len;
|
sctx->stat.tree_bytes_scrubbed += len;
|
||||||
spin_unlock(&sctx->stat_lock);
|
spin_unlock(&sctx->stat_lock);
|
||||||
} else {
|
} else {
|
||||||
blocksize = sctx->sectorsize;
|
blocksize = sctx->fs_info->sectorsize;
|
||||||
WARN_ON(1);
|
WARN_ON(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2892,11 +2872,11 @@ static int scrub_extent_for_parity(struct scrub_parity *sparity,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (flags & BTRFS_EXTENT_FLAG_DATA) {
|
if (flags & BTRFS_EXTENT_FLAG_DATA) {
|
||||||
blocksize = sctx->sectorsize;
|
blocksize = sctx->fs_info->sectorsize;
|
||||||
} else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
|
} else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
|
||||||
blocksize = sctx->nodesize;
|
blocksize = sctx->fs_info->nodesize;
|
||||||
} else {
|
} else {
|
||||||
blocksize = sctx->sectorsize;
|
blocksize = sctx->fs_info->sectorsize;
|
||||||
WARN_ON(1);
|
WARN_ON(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3037,10 +3017,7 @@ static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
|
||||||
if (ret || !bbio || !bbio->raid_map)
|
if (ret || !bbio || !bbio->raid_map)
|
||||||
goto bbio_out;
|
goto bbio_out;
|
||||||
|
|
||||||
bio = btrfs_io_bio_alloc(GFP_NOFS, 0);
|
bio = btrfs_io_bio_alloc(0);
|
||||||
if (!bio)
|
|
||||||
goto bbio_out;
|
|
||||||
|
|
||||||
bio->bi_iter.bi_sector = sparity->logic_start >> 9;
|
bio->bi_iter.bi_sector = sparity->logic_start >> 9;
|
||||||
bio->bi_private = sparity;
|
bio->bi_private = sparity;
|
||||||
bio->bi_end_io = scrub_parity_bio_endio;
|
bio->bi_end_io = scrub_parity_bio_endio;
|
||||||
|
@ -3305,9 +3282,9 @@ out:
|
||||||
logic_end - logic_start);
|
logic_end - logic_start);
|
||||||
scrub_parity_put(sparity);
|
scrub_parity_put(sparity);
|
||||||
scrub_submit(sctx);
|
scrub_submit(sctx);
|
||||||
mutex_lock(&sctx->wr_ctx.wr_lock);
|
mutex_lock(&sctx->wr_lock);
|
||||||
scrub_wr_submit(sctx);
|
scrub_wr_submit(sctx);
|
||||||
mutex_unlock(&sctx->wr_ctx.wr_lock);
|
mutex_unlock(&sctx->wr_lock);
|
||||||
|
|
||||||
btrfs_release_path(path);
|
btrfs_release_path(path);
|
||||||
return ret < 0 ? ret : 0;
|
return ret < 0 ? ret : 0;
|
||||||
|
@ -3463,14 +3440,14 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
|
||||||
*/
|
*/
|
||||||
if (atomic_read(&fs_info->scrub_pause_req)) {
|
if (atomic_read(&fs_info->scrub_pause_req)) {
|
||||||
/* push queued extents */
|
/* push queued extents */
|
||||||
atomic_set(&sctx->wr_ctx.flush_all_writes, 1);
|
atomic_set(&sctx->flush_all_writes, 1);
|
||||||
scrub_submit(sctx);
|
scrub_submit(sctx);
|
||||||
mutex_lock(&sctx->wr_ctx.wr_lock);
|
mutex_lock(&sctx->wr_lock);
|
||||||
scrub_wr_submit(sctx);
|
scrub_wr_submit(sctx);
|
||||||
mutex_unlock(&sctx->wr_ctx.wr_lock);
|
mutex_unlock(&sctx->wr_lock);
|
||||||
wait_event(sctx->list_wait,
|
wait_event(sctx->list_wait,
|
||||||
atomic_read(&sctx->bios_in_flight) == 0);
|
atomic_read(&sctx->bios_in_flight) == 0);
|
||||||
atomic_set(&sctx->wr_ctx.flush_all_writes, 0);
|
atomic_set(&sctx->flush_all_writes, 0);
|
||||||
scrub_blocked_if_needed(fs_info);
|
scrub_blocked_if_needed(fs_info);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3677,9 +3654,9 @@ skip:
|
||||||
out:
|
out:
|
||||||
/* push queued extents */
|
/* push queued extents */
|
||||||
scrub_submit(sctx);
|
scrub_submit(sctx);
|
||||||
mutex_lock(&sctx->wr_ctx.wr_lock);
|
mutex_lock(&sctx->wr_lock);
|
||||||
scrub_wr_submit(sctx);
|
scrub_wr_submit(sctx);
|
||||||
mutex_unlock(&sctx->wr_ctx.wr_lock);
|
mutex_unlock(&sctx->wr_lock);
|
||||||
|
|
||||||
blk_finish_plug(&plug);
|
blk_finish_plug(&plug);
|
||||||
btrfs_free_path(path);
|
btrfs_free_path(path);
|
||||||
|
@ -3859,7 +3836,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
|
||||||
*/
|
*/
|
||||||
btrfs_wait_block_group_reservations(cache);
|
btrfs_wait_block_group_reservations(cache);
|
||||||
btrfs_wait_nocow_writers(cache);
|
btrfs_wait_nocow_writers(cache);
|
||||||
ret = btrfs_wait_ordered_roots(fs_info, -1,
|
ret = btrfs_wait_ordered_roots(fs_info, U64_MAX,
|
||||||
cache->key.objectid,
|
cache->key.objectid,
|
||||||
cache->key.offset);
|
cache->key.offset);
|
||||||
if (ret > 0) {
|
if (ret > 0) {
|
||||||
|
@ -3916,11 +3893,11 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
|
||||||
* write requests are really completed when bios_in_flight
|
* write requests are really completed when bios_in_flight
|
||||||
* changes to 0.
|
* changes to 0.
|
||||||
*/
|
*/
|
||||||
atomic_set(&sctx->wr_ctx.flush_all_writes, 1);
|
atomic_set(&sctx->flush_all_writes, 1);
|
||||||
scrub_submit(sctx);
|
scrub_submit(sctx);
|
||||||
mutex_lock(&sctx->wr_ctx.wr_lock);
|
mutex_lock(&sctx->wr_lock);
|
||||||
scrub_wr_submit(sctx);
|
scrub_wr_submit(sctx);
|
||||||
mutex_unlock(&sctx->wr_ctx.wr_lock);
|
mutex_unlock(&sctx->wr_lock);
|
||||||
|
|
||||||
wait_event(sctx->list_wait,
|
wait_event(sctx->list_wait,
|
||||||
atomic_read(&sctx->bios_in_flight) == 0);
|
atomic_read(&sctx->bios_in_flight) == 0);
|
||||||
|
@ -3934,7 +3911,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
|
||||||
*/
|
*/
|
||||||
wait_event(sctx->list_wait,
|
wait_event(sctx->list_wait,
|
||||||
atomic_read(&sctx->workers_pending) == 0);
|
atomic_read(&sctx->workers_pending) == 0);
|
||||||
atomic_set(&sctx->wr_ctx.flush_all_writes, 0);
|
atomic_set(&sctx->flush_all_writes, 0);
|
||||||
|
|
||||||
scrub_pause_off(fs_info);
|
scrub_pause_off(fs_info);
|
||||||
|
|
||||||
|
@ -4337,32 +4314,6 @@ static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
|
||||||
btrfs_put_bbio(bbio);
|
btrfs_put_bbio(bbio);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int scrub_setup_wr_ctx(struct scrub_wr_ctx *wr_ctx,
|
|
||||||
struct btrfs_device *dev,
|
|
||||||
int is_dev_replace)
|
|
||||||
{
|
|
||||||
WARN_ON(wr_ctx->wr_curr_bio != NULL);
|
|
||||||
|
|
||||||
mutex_init(&wr_ctx->wr_lock);
|
|
||||||
wr_ctx->wr_curr_bio = NULL;
|
|
||||||
if (!is_dev_replace)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
WARN_ON(!dev->bdev);
|
|
||||||
wr_ctx->pages_per_wr_bio = SCRUB_PAGES_PER_WR_BIO;
|
|
||||||
wr_ctx->tgtdev = dev;
|
|
||||||
atomic_set(&wr_ctx->flush_all_writes, 0);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void scrub_free_wr_ctx(struct scrub_wr_ctx *wr_ctx)
|
|
||||||
{
|
|
||||||
mutex_lock(&wr_ctx->wr_lock);
|
|
||||||
kfree(wr_ctx->wr_curr_bio);
|
|
||||||
wr_ctx->wr_curr_bio = NULL;
|
|
||||||
mutex_unlock(&wr_ctx->wr_lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
|
static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
|
||||||
int mirror_num, u64 physical_for_dev_replace)
|
int mirror_num, u64 physical_for_dev_replace)
|
||||||
{
|
{
|
||||||
|
@ -4665,7 +4616,7 @@ static int write_page_nocow(struct scrub_ctx *sctx,
|
||||||
struct btrfs_device *dev;
|
struct btrfs_device *dev;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
dev = sctx->wr_ctx.tgtdev;
|
dev = sctx->wr_tgtdev;
|
||||||
if (!dev)
|
if (!dev)
|
||||||
return -EIO;
|
return -EIO;
|
||||||
if (!dev->bdev) {
|
if (!dev->bdev) {
|
||||||
|
@ -4673,13 +4624,7 @@ static int write_page_nocow(struct scrub_ctx *sctx,
|
||||||
"scrub write_page_nocow(bdev == NULL) is unexpected");
|
"scrub write_page_nocow(bdev == NULL) is unexpected");
|
||||||
return -EIO;
|
return -EIO;
|
||||||
}
|
}
|
||||||
bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
|
bio = btrfs_io_bio_alloc(1);
|
||||||
if (!bio) {
|
|
||||||
spin_lock(&sctx->stat_lock);
|
|
||||||
sctx->stat.malloc_errors++;
|
|
||||||
spin_unlock(&sctx->stat_lock);
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
bio->bi_iter.bi_size = 0;
|
bio->bi_iter.bi_size = 0;
|
||||||
bio->bi_iter.bi_sector = physical_for_dev_replace >> 9;
|
bio->bi_iter.bi_sector = physical_for_dev_replace >> 9;
|
||||||
bio->bi_bdev = dev->bdev;
|
bio->bi_bdev = dev->bdev;
|
||||||
|
|
112
fs/btrfs/send.c
112
fs/btrfs/send.c
|
@ -1069,6 +1069,12 @@ static int iterate_dir_item(struct btrfs_root *root, struct btrfs_path *path,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ret = btrfs_is_name_len_valid(eb, path->slots[0],
|
||||||
|
(unsigned long)(di + 1), name_len + data_len);
|
||||||
|
if (!ret) {
|
||||||
|
ret = -EIO;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
if (name_len + data_len > buf_len) {
|
if (name_len + data_len > buf_len) {
|
||||||
buf_len = name_len + data_len;
|
buf_len = name_len + data_len;
|
||||||
if (is_vmalloc_addr(buf)) {
|
if (is_vmalloc_addr(buf)) {
|
||||||
|
@ -1083,7 +1089,7 @@ static int iterate_dir_item(struct btrfs_root *root, struct btrfs_path *path,
|
||||||
buf = tmp;
|
buf = tmp;
|
||||||
}
|
}
|
||||||
if (!buf) {
|
if (!buf) {
|
||||||
buf = vmalloc(buf_len);
|
buf = kvmalloc(buf_len, GFP_KERNEL);
|
||||||
if (!buf) {
|
if (!buf) {
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto out;
|
goto out;
|
||||||
|
@ -2769,15 +2775,20 @@ out:
|
||||||
|
|
||||||
struct recorded_ref {
|
struct recorded_ref {
|
||||||
struct list_head list;
|
struct list_head list;
|
||||||
char *dir_path;
|
|
||||||
char *name;
|
char *name;
|
||||||
struct fs_path *full_path;
|
struct fs_path *full_path;
|
||||||
u64 dir;
|
u64 dir;
|
||||||
u64 dir_gen;
|
u64 dir_gen;
|
||||||
int dir_path_len;
|
|
||||||
int name_len;
|
int name_len;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static void set_ref_path(struct recorded_ref *ref, struct fs_path *path)
|
||||||
|
{
|
||||||
|
ref->full_path = path;
|
||||||
|
ref->name = (char *)kbasename(ref->full_path->start);
|
||||||
|
ref->name_len = ref->full_path->end - ref->name;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We need to process new refs before deleted refs, but compare_tree gives us
|
* We need to process new refs before deleted refs, but compare_tree gives us
|
||||||
* everything mixed. So we first record all refs and later process them.
|
* everything mixed. So we first record all refs and later process them.
|
||||||
|
@ -2794,17 +2805,7 @@ static int __record_ref(struct list_head *head, u64 dir,
|
||||||
|
|
||||||
ref->dir = dir;
|
ref->dir = dir;
|
||||||
ref->dir_gen = dir_gen;
|
ref->dir_gen = dir_gen;
|
||||||
ref->full_path = path;
|
set_ref_path(ref, path);
|
||||||
|
|
||||||
ref->name = (char *)kbasename(ref->full_path->start);
|
|
||||||
ref->name_len = ref->full_path->end - ref->name;
|
|
||||||
ref->dir_path = ref->full_path->start;
|
|
||||||
if (ref->name == ref->full_path->start)
|
|
||||||
ref->dir_path_len = 0;
|
|
||||||
else
|
|
||||||
ref->dir_path_len = ref->full_path->end -
|
|
||||||
ref->full_path->start - 1 - ref->name_len;
|
|
||||||
|
|
||||||
list_add_tail(&ref->list, head);
|
list_add_tail(&ref->list, head);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -3546,9 +3547,17 @@ static int is_ancestor(struct btrfs_root *root,
|
||||||
struct fs_path *fs_path)
|
struct fs_path *fs_path)
|
||||||
{
|
{
|
||||||
u64 ino = ino2;
|
u64 ino = ino2;
|
||||||
|
bool free_path = false;
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
|
if (!fs_path) {
|
||||||
|
fs_path = fs_path_alloc();
|
||||||
|
if (!fs_path)
|
||||||
|
return -ENOMEM;
|
||||||
|
free_path = true;
|
||||||
|
}
|
||||||
|
|
||||||
while (ino > BTRFS_FIRST_FREE_OBJECTID) {
|
while (ino > BTRFS_FIRST_FREE_OBJECTID) {
|
||||||
int ret;
|
|
||||||
u64 parent;
|
u64 parent;
|
||||||
u64 parent_gen;
|
u64 parent_gen;
|
||||||
|
|
||||||
|
@ -3557,13 +3566,18 @@ static int is_ancestor(struct btrfs_root *root,
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
if (ret == -ENOENT && ino == ino2)
|
if (ret == -ENOENT && ino == ino2)
|
||||||
ret = 0;
|
ret = 0;
|
||||||
return ret;
|
goto out;
|
||||||
|
}
|
||||||
|
if (parent == ino1) {
|
||||||
|
ret = parent_gen == ino1_gen ? 1 : 0;
|
||||||
|
goto out;
|
||||||
}
|
}
|
||||||
if (parent == ino1)
|
|
||||||
return parent_gen == ino1_gen ? 1 : 0;
|
|
||||||
ino = parent;
|
ino = parent;
|
||||||
}
|
}
|
||||||
return 0;
|
out:
|
||||||
|
if (free_path)
|
||||||
|
fs_path_free(fs_path);
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int wait_for_parent_move(struct send_ctx *sctx,
|
static int wait_for_parent_move(struct send_ctx *sctx,
|
||||||
|
@ -3686,6 +3700,7 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
|
||||||
int is_orphan = 0;
|
int is_orphan = 0;
|
||||||
u64 last_dir_ino_rm = 0;
|
u64 last_dir_ino_rm = 0;
|
||||||
bool can_rename = true;
|
bool can_rename = true;
|
||||||
|
bool orphanized_ancestor = false;
|
||||||
|
|
||||||
btrfs_debug(fs_info, "process_recorded_refs %llu", sctx->cur_ino);
|
btrfs_debug(fs_info, "process_recorded_refs %llu", sctx->cur_ino);
|
||||||
|
|
||||||
|
@ -3837,9 +3852,16 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
|
||||||
* might contain the pre-orphanization name of
|
* might contain the pre-orphanization name of
|
||||||
* ow_inode, which is no longer valid.
|
* ow_inode, which is no longer valid.
|
||||||
*/
|
*/
|
||||||
fs_path_reset(valid_path);
|
ret = is_ancestor(sctx->parent_root,
|
||||||
ret = get_cur_path(sctx, sctx->cur_ino,
|
ow_inode, ow_gen,
|
||||||
sctx->cur_inode_gen, valid_path);
|
sctx->cur_ino, NULL);
|
||||||
|
if (ret > 0) {
|
||||||
|
orphanized_ancestor = true;
|
||||||
|
fs_path_reset(valid_path);
|
||||||
|
ret = get_cur_path(sctx, sctx->cur_ino,
|
||||||
|
sctx->cur_inode_gen,
|
||||||
|
valid_path);
|
||||||
|
}
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto out;
|
goto out;
|
||||||
} else {
|
} else {
|
||||||
|
@ -3960,6 +3982,43 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto out;
|
goto out;
|
||||||
if (!ret) {
|
if (!ret) {
|
||||||
|
/*
|
||||||
|
* If we orphanized any ancestor before, we need
|
||||||
|
* to recompute the full path for deleted names,
|
||||||
|
* since any such path was computed before we
|
||||||
|
* processed any references and orphanized any
|
||||||
|
* ancestor inode.
|
||||||
|
*/
|
||||||
|
if (orphanized_ancestor) {
|
||||||
|
struct fs_path *new_path;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Our reference's name member points to
|
||||||
|
* its full_path member string, so we
|
||||||
|
* use here a new path.
|
||||||
|
*/
|
||||||
|
new_path = fs_path_alloc();
|
||||||
|
if (!new_path) {
|
||||||
|
ret = -ENOMEM;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
ret = get_cur_path(sctx, cur->dir,
|
||||||
|
cur->dir_gen,
|
||||||
|
new_path);
|
||||||
|
if (ret < 0) {
|
||||||
|
fs_path_free(new_path);
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
ret = fs_path_add(new_path,
|
||||||
|
cur->name,
|
||||||
|
cur->name_len);
|
||||||
|
if (ret < 0) {
|
||||||
|
fs_path_free(new_path);
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
fs_path_free(cur->full_path);
|
||||||
|
set_ref_path(cur, new_path);
|
||||||
|
}
|
||||||
ret = send_unlink(sctx, cur->full_path);
|
ret = send_unlink(sctx, cur->full_path);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto out;
|
goto out;
|
||||||
|
@ -6397,13 +6456,10 @@ long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_)
|
||||||
|
|
||||||
alloc_size = sizeof(struct clone_root) * (arg->clone_sources_count + 1);
|
alloc_size = sizeof(struct clone_root) * (arg->clone_sources_count + 1);
|
||||||
|
|
||||||
sctx->clone_roots = kzalloc(alloc_size, GFP_KERNEL | __GFP_NOWARN);
|
sctx->clone_roots = kzalloc(alloc_size, GFP_KERNEL);
|
||||||
if (!sctx->clone_roots) {
|
if (!sctx->clone_roots) {
|
||||||
sctx->clone_roots = vzalloc(alloc_size);
|
ret = -ENOMEM;
|
||||||
if (!sctx->clone_roots) {
|
goto out;
|
||||||
ret = -ENOMEM;
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
alloc_size = arg->clone_sources_count * sizeof(*arg->clone_sources);
|
alloc_size = arg->clone_sources_count * sizeof(*arg->clone_sources);
|
||||||
|
|
|
@ -601,18 +601,8 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case Opt_alloc_start:
|
case Opt_alloc_start:
|
||||||
num = match_strdup(&args[0]);
|
btrfs_info(info,
|
||||||
if (num) {
|
"option alloc_start is obsolete, ignored");
|
||||||
mutex_lock(&info->chunk_mutex);
|
|
||||||
info->alloc_start = memparse(num, NULL);
|
|
||||||
mutex_unlock(&info->chunk_mutex);
|
|
||||||
kfree(num);
|
|
||||||
btrfs_info(info, "allocations start at %llu",
|
|
||||||
info->alloc_start);
|
|
||||||
} else {
|
|
||||||
ret = -ENOMEM;
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
break;
|
break;
|
||||||
case Opt_acl:
|
case Opt_acl:
|
||||||
#ifdef CONFIG_BTRFS_FS_POSIX_ACL
|
#ifdef CONFIG_BTRFS_FS_POSIX_ACL
|
||||||
|
@ -1187,7 +1177,7 @@ int btrfs_sync_fs(struct super_block *sb, int wait)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
btrfs_wait_ordered_roots(fs_info, -1, 0, (u64)-1);
|
btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
|
||||||
|
|
||||||
trans = btrfs_attach_transaction_barrier(root);
|
trans = btrfs_attach_transaction_barrier(root);
|
||||||
if (IS_ERR(trans)) {
|
if (IS_ERR(trans)) {
|
||||||
|
@ -1232,8 +1222,6 @@ static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
|
||||||
seq_puts(seq, ",nobarrier");
|
seq_puts(seq, ",nobarrier");
|
||||||
if (info->max_inline != BTRFS_DEFAULT_MAX_INLINE)
|
if (info->max_inline != BTRFS_DEFAULT_MAX_INLINE)
|
||||||
seq_printf(seq, ",max_inline=%llu", info->max_inline);
|
seq_printf(seq, ",max_inline=%llu", info->max_inline);
|
||||||
if (info->alloc_start != 0)
|
|
||||||
seq_printf(seq, ",alloc_start=%llu", info->alloc_start);
|
|
||||||
if (info->thread_pool_size != min_t(unsigned long,
|
if (info->thread_pool_size != min_t(unsigned long,
|
||||||
num_online_cpus() + 2, 8))
|
num_online_cpus() + 2, 8))
|
||||||
seq_printf(seq, ",thread_pool=%d", info->thread_pool_size);
|
seq_printf(seq, ",thread_pool=%d", info->thread_pool_size);
|
||||||
|
@ -1716,7 +1704,6 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
|
||||||
unsigned long old_opts = fs_info->mount_opt;
|
unsigned long old_opts = fs_info->mount_opt;
|
||||||
unsigned long old_compress_type = fs_info->compress_type;
|
unsigned long old_compress_type = fs_info->compress_type;
|
||||||
u64 old_max_inline = fs_info->max_inline;
|
u64 old_max_inline = fs_info->max_inline;
|
||||||
u64 old_alloc_start = fs_info->alloc_start;
|
|
||||||
int old_thread_pool_size = fs_info->thread_pool_size;
|
int old_thread_pool_size = fs_info->thread_pool_size;
|
||||||
unsigned int old_metadata_ratio = fs_info->metadata_ratio;
|
unsigned int old_metadata_ratio = fs_info->metadata_ratio;
|
||||||
int ret;
|
int ret;
|
||||||
|
@ -1855,9 +1842,6 @@ restore:
|
||||||
fs_info->mount_opt = old_opts;
|
fs_info->mount_opt = old_opts;
|
||||||
fs_info->compress_type = old_compress_type;
|
fs_info->compress_type = old_compress_type;
|
||||||
fs_info->max_inline = old_max_inline;
|
fs_info->max_inline = old_max_inline;
|
||||||
mutex_lock(&fs_info->chunk_mutex);
|
|
||||||
fs_info->alloc_start = old_alloc_start;
|
|
||||||
mutex_unlock(&fs_info->chunk_mutex);
|
|
||||||
btrfs_resize_thread_pool(fs_info,
|
btrfs_resize_thread_pool(fs_info,
|
||||||
old_thread_pool_size, fs_info->thread_pool_size);
|
old_thread_pool_size, fs_info->thread_pool_size);
|
||||||
fs_info->metadata_ratio = old_metadata_ratio;
|
fs_info->metadata_ratio = old_metadata_ratio;
|
||||||
|
@ -1898,18 +1882,15 @@ static inline void btrfs_descending_sort_devices(
|
||||||
static int btrfs_calc_avail_data_space(struct btrfs_fs_info *fs_info,
|
static int btrfs_calc_avail_data_space(struct btrfs_fs_info *fs_info,
|
||||||
u64 *free_bytes)
|
u64 *free_bytes)
|
||||||
{
|
{
|
||||||
struct btrfs_root *root = fs_info->tree_root;
|
|
||||||
struct btrfs_device_info *devices_info;
|
struct btrfs_device_info *devices_info;
|
||||||
struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
|
struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
|
||||||
struct btrfs_device *device;
|
struct btrfs_device *device;
|
||||||
u64 skip_space;
|
u64 skip_space;
|
||||||
u64 type;
|
u64 type;
|
||||||
u64 avail_space;
|
u64 avail_space;
|
||||||
u64 used_space;
|
|
||||||
u64 min_stripe_size;
|
u64 min_stripe_size;
|
||||||
int min_stripes = 1, num_stripes = 1;
|
int min_stripes = 1, num_stripes = 1;
|
||||||
int i = 0, nr_devices;
|
int i = 0, nr_devices;
|
||||||
int ret;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We aren't under the device list lock, so this is racy-ish, but good
|
* We aren't under the device list lock, so this is racy-ish, but good
|
||||||
|
@ -1927,12 +1908,12 @@ static int btrfs_calc_avail_data_space(struct btrfs_fs_info *fs_info,
|
||||||
}
|
}
|
||||||
|
|
||||||
devices_info = kmalloc_array(nr_devices, sizeof(*devices_info),
|
devices_info = kmalloc_array(nr_devices, sizeof(*devices_info),
|
||||||
GFP_NOFS);
|
GFP_KERNEL);
|
||||||
if (!devices_info)
|
if (!devices_info)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
/* calc min stripe number for data space allocation */
|
/* calc min stripe number for data space allocation */
|
||||||
type = btrfs_get_alloc_profile(root, 1);
|
type = btrfs_data_alloc_profile(fs_info);
|
||||||
if (type & BTRFS_BLOCK_GROUP_RAID0) {
|
if (type & BTRFS_BLOCK_GROUP_RAID0) {
|
||||||
min_stripes = 2;
|
min_stripes = 2;
|
||||||
num_stripes = nr_devices;
|
num_stripes = nr_devices;
|
||||||
|
@ -1949,8 +1930,6 @@ static int btrfs_calc_avail_data_space(struct btrfs_fs_info *fs_info,
|
||||||
else
|
else
|
||||||
min_stripe_size = BTRFS_STRIPE_LEN;
|
min_stripe_size = BTRFS_STRIPE_LEN;
|
||||||
|
|
||||||
if (fs_info->alloc_start)
|
|
||||||
mutex_lock(&fs_devices->device_list_mutex);
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
list_for_each_entry_rcu(device, &fs_devices->devices, dev_list) {
|
list_for_each_entry_rcu(device, &fs_devices->devices, dev_list) {
|
||||||
if (!device->in_fs_metadata || !device->bdev ||
|
if (!device->in_fs_metadata || !device->bdev ||
|
||||||
|
@ -1973,34 +1952,6 @@ static int btrfs_calc_avail_data_space(struct btrfs_fs_info *fs_info,
|
||||||
*/
|
*/
|
||||||
skip_space = SZ_1M;
|
skip_space = SZ_1M;
|
||||||
|
|
||||||
/* user can set the offset in fs_info->alloc_start. */
|
|
||||||
if (fs_info->alloc_start &&
|
|
||||||
fs_info->alloc_start + BTRFS_STRIPE_LEN <=
|
|
||||||
device->total_bytes) {
|
|
||||||
rcu_read_unlock();
|
|
||||||
skip_space = max(fs_info->alloc_start, skip_space);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* btrfs can not use the free space in
|
|
||||||
* [0, skip_space - 1], we must subtract it from the
|
|
||||||
* total. In order to implement it, we account the used
|
|
||||||
* space in this range first.
|
|
||||||
*/
|
|
||||||
ret = btrfs_account_dev_extents_size(device, 0,
|
|
||||||
skip_space - 1,
|
|
||||||
&used_space);
|
|
||||||
if (ret) {
|
|
||||||
kfree(devices_info);
|
|
||||||
mutex_unlock(&fs_devices->device_list_mutex);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
rcu_read_lock();
|
|
||||||
|
|
||||||
/* calc the free space in [0, skip_space - 1] */
|
|
||||||
skip_space -= used_space;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* we can use the free space in [0, skip_space - 1], subtract
|
* we can use the free space in [0, skip_space - 1], subtract
|
||||||
* it from the total.
|
* it from the total.
|
||||||
|
@ -2019,8 +1970,6 @@ static int btrfs_calc_avail_data_space(struct btrfs_fs_info *fs_info,
|
||||||
i++;
|
i++;
|
||||||
}
|
}
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
if (fs_info->alloc_start)
|
|
||||||
mutex_unlock(&fs_devices->device_list_mutex);
|
|
||||||
|
|
||||||
nr_devices = i;
|
nr_devices = i;
|
||||||
|
|
||||||
|
@ -2057,10 +2006,9 @@ static int btrfs_calc_avail_data_space(struct btrfs_fs_info *fs_info,
|
||||||
* multiplier to scale the sizes.
|
* multiplier to scale the sizes.
|
||||||
*
|
*
|
||||||
* Unused device space usage is based on simulating the chunk allocator
|
* Unused device space usage is based on simulating the chunk allocator
|
||||||
* algorithm that respects the device sizes, order of allocations and the
|
* algorithm that respects the device sizes and order of allocations. This is
|
||||||
* 'alloc_start' value, this is a close approximation of the actual use but
|
* a close approximation of the actual use but there are other factors that may
|
||||||
* there are other factors that may change the result (like a new metadata
|
* change the result (like a new metadata chunk).
|
||||||
* chunk).
|
|
||||||
*
|
*
|
||||||
* If metadata is exhausted, f_bavail will be 0.
|
* If metadata is exhausted, f_bavail will be 0.
|
||||||
*/
|
*/
|
||||||
|
@ -2243,7 +2191,7 @@ static int btrfs_freeze(struct super_block *sb)
|
||||||
struct btrfs_fs_info *fs_info = btrfs_sb(sb);
|
struct btrfs_fs_info *fs_info = btrfs_sb(sb);
|
||||||
struct btrfs_root *root = fs_info->tree_root;
|
struct btrfs_root *root = fs_info->tree_root;
|
||||||
|
|
||||||
fs_info->fs_frozen = 1;
|
set_bit(BTRFS_FS_FROZEN, &fs_info->flags);
|
||||||
/*
|
/*
|
||||||
* We don't need a barrier here, we'll wait for any transaction that
|
* We don't need a barrier here, we'll wait for any transaction that
|
||||||
* could be in progress on other threads (and do delayed iputs that
|
* could be in progress on other threads (and do delayed iputs that
|
||||||
|
@ -2262,7 +2210,9 @@ static int btrfs_freeze(struct super_block *sb)
|
||||||
|
|
||||||
static int btrfs_unfreeze(struct super_block *sb)
|
static int btrfs_unfreeze(struct super_block *sb)
|
||||||
{
|
{
|
||||||
btrfs_sb(sb)->fs_frozen = 0;
|
struct btrfs_fs_info *fs_info = btrfs_sb(sb);
|
||||||
|
|
||||||
|
clear_bit(BTRFS_FS_FROZEN, &fs_info->flags);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -447,11 +447,52 @@ static ssize_t btrfs_clone_alignment_show(struct kobject *kobj,
|
||||||
|
|
||||||
BTRFS_ATTR(clone_alignment, btrfs_clone_alignment_show);
|
BTRFS_ATTR(clone_alignment, btrfs_clone_alignment_show);
|
||||||
|
|
||||||
|
static ssize_t quota_override_show(struct kobject *kobj,
|
||||||
|
struct kobj_attribute *a, char *buf)
|
||||||
|
{
|
||||||
|
struct btrfs_fs_info *fs_info = to_fs_info(kobj);
|
||||||
|
int quota_override;
|
||||||
|
|
||||||
|
quota_override = test_bit(BTRFS_FS_QUOTA_OVERRIDE, &fs_info->flags);
|
||||||
|
return snprintf(buf, PAGE_SIZE, "%d\n", quota_override);
|
||||||
|
}
|
||||||
|
|
||||||
|
static ssize_t quota_override_store(struct kobject *kobj,
|
||||||
|
struct kobj_attribute *a,
|
||||||
|
const char *buf, size_t len)
|
||||||
|
{
|
||||||
|
struct btrfs_fs_info *fs_info = to_fs_info(kobj);
|
||||||
|
unsigned long knob;
|
||||||
|
int err;
|
||||||
|
|
||||||
|
if (!fs_info)
|
||||||
|
return -EPERM;
|
||||||
|
|
||||||
|
if (!capable(CAP_SYS_RESOURCE))
|
||||||
|
return -EPERM;
|
||||||
|
|
||||||
|
err = kstrtoul(buf, 10, &knob);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
if (knob > 1)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (knob)
|
||||||
|
set_bit(BTRFS_FS_QUOTA_OVERRIDE, &fs_info->flags);
|
||||||
|
else
|
||||||
|
clear_bit(BTRFS_FS_QUOTA_OVERRIDE, &fs_info->flags);
|
||||||
|
|
||||||
|
return len;
|
||||||
|
}
|
||||||
|
|
||||||
|
BTRFS_ATTR_RW(quota_override, quota_override_show, quota_override_store);
|
||||||
|
|
||||||
static const struct attribute *btrfs_attrs[] = {
|
static const struct attribute *btrfs_attrs[] = {
|
||||||
BTRFS_ATTR_PTR(label),
|
BTRFS_ATTR_PTR(label),
|
||||||
BTRFS_ATTR_PTR(nodesize),
|
BTRFS_ATTR_PTR(nodesize),
|
||||||
BTRFS_ATTR_PTR(sectorsize),
|
BTRFS_ATTR_PTR(sectorsize),
|
||||||
BTRFS_ATTR_PTR(clone_alignment),
|
BTRFS_ATTR_PTR(clone_alignment),
|
||||||
|
BTRFS_ATTR_PTR(quota_override),
|
||||||
NULL,
|
NULL,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -87,7 +87,7 @@ static int test_find_delalloc(u32 sectorsize)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
extent_io_tree_init(&tmp, &inode->i_data);
|
extent_io_tree_init(&tmp, inode);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* First go through and create and mark all of our pages dirty, we pin
|
* First go through and create and mark all of our pages dirty, we pin
|
||||||
|
|
|
@ -93,7 +93,7 @@ void btrfs_put_transaction(struct btrfs_transaction *transaction)
|
||||||
btrfs_put_block_group_trimming(cache);
|
btrfs_put_block_group_trimming(cache);
|
||||||
btrfs_put_block_group(cache);
|
btrfs_put_block_group(cache);
|
||||||
}
|
}
|
||||||
kmem_cache_free(btrfs_transaction_cachep, transaction);
|
kfree(transaction);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -228,7 +228,7 @@ loop:
|
||||||
*/
|
*/
|
||||||
BUG_ON(type == TRANS_JOIN_NOLOCK);
|
BUG_ON(type == TRANS_JOIN_NOLOCK);
|
||||||
|
|
||||||
cur_trans = kmem_cache_alloc(btrfs_transaction_cachep, GFP_NOFS);
|
cur_trans = kmalloc(sizeof(*cur_trans), GFP_NOFS);
|
||||||
if (!cur_trans)
|
if (!cur_trans)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
@ -238,11 +238,11 @@ loop:
|
||||||
* someone started a transaction after we unlocked. Make sure
|
* someone started a transaction after we unlocked. Make sure
|
||||||
* to redo the checks above
|
* to redo the checks above
|
||||||
*/
|
*/
|
||||||
kmem_cache_free(btrfs_transaction_cachep, cur_trans);
|
kfree(cur_trans);
|
||||||
goto loop;
|
goto loop;
|
||||||
} else if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
|
} else if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
|
||||||
spin_unlock(&fs_info->trans_lock);
|
spin_unlock(&fs_info->trans_lock);
|
||||||
kmem_cache_free(btrfs_transaction_cachep, cur_trans);
|
kfree(cur_trans);
|
||||||
return -EROFS;
|
return -EROFS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -294,7 +294,7 @@ loop:
|
||||||
spin_lock_init(&cur_trans->dropped_roots_lock);
|
spin_lock_init(&cur_trans->dropped_roots_lock);
|
||||||
list_add_tail(&cur_trans->list, &fs_info->trans_list);
|
list_add_tail(&cur_trans->list, &fs_info->trans_list);
|
||||||
extent_io_tree_init(&cur_trans->dirty_pages,
|
extent_io_tree_init(&cur_trans->dirty_pages,
|
||||||
fs_info->btree_inode->i_mapping);
|
fs_info->btree_inode);
|
||||||
fs_info->generation++;
|
fs_info->generation++;
|
||||||
cur_trans->transid = fs_info->generation;
|
cur_trans->transid = fs_info->generation;
|
||||||
fs_info->running_transaction = cur_trans;
|
fs_info->running_transaction = cur_trans;
|
||||||
|
@ -1374,9 +1374,6 @@ static int qgroup_account_snapshot(struct btrfs_trans_handle *trans,
|
||||||
ret = commit_fs_roots(trans, fs_info);
|
ret = commit_fs_roots(trans, fs_info);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out;
|
goto out;
|
||||||
ret = btrfs_qgroup_prepare_account_extents(trans, fs_info);
|
|
||||||
if (ret < 0)
|
|
||||||
goto out;
|
|
||||||
ret = btrfs_qgroup_account_extents(trans, fs_info);
|
ret = btrfs_qgroup_account_extents(trans, fs_info);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto out;
|
goto out;
|
||||||
|
@ -1926,7 +1923,7 @@ static inline int btrfs_start_delalloc_flush(struct btrfs_fs_info *fs_info)
|
||||||
static inline void btrfs_wait_delalloc_flush(struct btrfs_fs_info *fs_info)
|
static inline void btrfs_wait_delalloc_flush(struct btrfs_fs_info *fs_info)
|
||||||
{
|
{
|
||||||
if (btrfs_test_opt(fs_info, FLUSHONCOMMIT))
|
if (btrfs_test_opt(fs_info, FLUSHONCOMMIT))
|
||||||
btrfs_wait_ordered_roots(fs_info, -1, 0, (u64)-1);
|
btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
|
@ -2180,13 +2177,6 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
|
||||||
goto scrub_continue;
|
goto scrub_continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = btrfs_qgroup_prepare_account_extents(trans, fs_info);
|
|
||||||
if (ret) {
|
|
||||||
mutex_unlock(&fs_info->tree_log_mutex);
|
|
||||||
mutex_unlock(&fs_info->reloc_mutex);
|
|
||||||
goto scrub_continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Since fs roots are all committed, we can get a quite accurate
|
* Since fs roots are all committed, we can get a quite accurate
|
||||||
* new_roots. So let's do quota accounting.
|
* new_roots. So let's do quota accounting.
|
||||||
|
@ -2314,7 +2304,8 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
|
||||||
* it'll result in deadlock about SB_FREEZE_FS.
|
* it'll result in deadlock about SB_FREEZE_FS.
|
||||||
*/
|
*/
|
||||||
if (current != fs_info->transaction_kthread &&
|
if (current != fs_info->transaction_kthread &&
|
||||||
current != fs_info->cleaner_kthread && !fs_info->fs_frozen)
|
current != fs_info->cleaner_kthread &&
|
||||||
|
!test_bit(BTRFS_FS_FROZEN, &fs_info->flags))
|
||||||
btrfs_run_delayed_iputs(fs_info);
|
btrfs_run_delayed_iputs(fs_info);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
|
|
@ -1175,15 +1175,19 @@ next:
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int extref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr,
|
static int extref_get_fields(struct extent_buffer *eb, int slot,
|
||||||
u32 *namelen, char **name, u64 *index,
|
unsigned long ref_ptr, u32 *namelen, char **name,
|
||||||
u64 *parent_objectid)
|
u64 *index, u64 *parent_objectid)
|
||||||
{
|
{
|
||||||
struct btrfs_inode_extref *extref;
|
struct btrfs_inode_extref *extref;
|
||||||
|
|
||||||
extref = (struct btrfs_inode_extref *)ref_ptr;
|
extref = (struct btrfs_inode_extref *)ref_ptr;
|
||||||
|
|
||||||
*namelen = btrfs_inode_extref_name_len(eb, extref);
|
*namelen = btrfs_inode_extref_name_len(eb, extref);
|
||||||
|
if (!btrfs_is_name_len_valid(eb, slot, (unsigned long)&extref->name,
|
||||||
|
*namelen))
|
||||||
|
return -EIO;
|
||||||
|
|
||||||
*name = kmalloc(*namelen, GFP_NOFS);
|
*name = kmalloc(*namelen, GFP_NOFS);
|
||||||
if (*name == NULL)
|
if (*name == NULL)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
@ -1198,14 +1202,19 @@ static int extref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr,
|
static int ref_get_fields(struct extent_buffer *eb, int slot,
|
||||||
u32 *namelen, char **name, u64 *index)
|
unsigned long ref_ptr, u32 *namelen, char **name,
|
||||||
|
u64 *index)
|
||||||
{
|
{
|
||||||
struct btrfs_inode_ref *ref;
|
struct btrfs_inode_ref *ref;
|
||||||
|
|
||||||
ref = (struct btrfs_inode_ref *)ref_ptr;
|
ref = (struct btrfs_inode_ref *)ref_ptr;
|
||||||
|
|
||||||
*namelen = btrfs_inode_ref_name_len(eb, ref);
|
*namelen = btrfs_inode_ref_name_len(eb, ref);
|
||||||
|
if (!btrfs_is_name_len_valid(eb, slot, (unsigned long)(ref + 1),
|
||||||
|
*namelen))
|
||||||
|
return -EIO;
|
||||||
|
|
||||||
*name = kmalloc(*namelen, GFP_NOFS);
|
*name = kmalloc(*namelen, GFP_NOFS);
|
||||||
if (*name == NULL)
|
if (*name == NULL)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
@ -1280,8 +1289,8 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
|
||||||
|
|
||||||
while (ref_ptr < ref_end) {
|
while (ref_ptr < ref_end) {
|
||||||
if (log_ref_ver) {
|
if (log_ref_ver) {
|
||||||
ret = extref_get_fields(eb, ref_ptr, &namelen, &name,
|
ret = extref_get_fields(eb, slot, ref_ptr, &namelen,
|
||||||
&ref_index, &parent_objectid);
|
&name, &ref_index, &parent_objectid);
|
||||||
/*
|
/*
|
||||||
* parent object can change from one array
|
* parent object can change from one array
|
||||||
* item to another.
|
* item to another.
|
||||||
|
@ -1293,8 +1302,8 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
ret = ref_get_fields(eb, ref_ptr, &namelen, &name,
|
ret = ref_get_fields(eb, slot, ref_ptr, &namelen,
|
||||||
&ref_index);
|
&name, &ref_index);
|
||||||
}
|
}
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out;
|
goto out;
|
||||||
|
@ -1841,7 +1850,7 @@ static noinline int replay_one_dir_item(struct btrfs_trans_handle *trans,
|
||||||
ptr_end = ptr + item_size;
|
ptr_end = ptr + item_size;
|
||||||
while (ptr < ptr_end) {
|
while (ptr < ptr_end) {
|
||||||
di = (struct btrfs_dir_item *)ptr;
|
di = (struct btrfs_dir_item *)ptr;
|
||||||
if (verify_dir_item(fs_info, eb, di))
|
if (verify_dir_item(fs_info, eb, slot, di))
|
||||||
return -EIO;
|
return -EIO;
|
||||||
name_len = btrfs_dir_name_len(eb, di);
|
name_len = btrfs_dir_name_len(eb, di);
|
||||||
ret = replay_one_name(trans, root, path, eb, di, key);
|
ret = replay_one_name(trans, root, path, eb, di, key);
|
||||||
|
@ -2017,7 +2026,7 @@ again:
|
||||||
ptr_end = ptr + item_size;
|
ptr_end = ptr + item_size;
|
||||||
while (ptr < ptr_end) {
|
while (ptr < ptr_end) {
|
||||||
di = (struct btrfs_dir_item *)ptr;
|
di = (struct btrfs_dir_item *)ptr;
|
||||||
if (verify_dir_item(fs_info, eb, di)) {
|
if (verify_dir_item(fs_info, eb, slot, di)) {
|
||||||
ret = -EIO;
|
ret = -EIO;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
@ -2102,6 +2111,7 @@ static int replay_xattr_deletes(struct btrfs_trans_handle *trans,
|
||||||
struct btrfs_path *path,
|
struct btrfs_path *path,
|
||||||
const u64 ino)
|
const u64 ino)
|
||||||
{
|
{
|
||||||
|
struct btrfs_fs_info *fs_info = root->fs_info;
|
||||||
struct btrfs_key search_key;
|
struct btrfs_key search_key;
|
||||||
struct btrfs_path *log_path;
|
struct btrfs_path *log_path;
|
||||||
int i;
|
int i;
|
||||||
|
@ -2143,6 +2153,12 @@ process_leaf:
|
||||||
u32 this_len = sizeof(*di) + name_len + data_len;
|
u32 this_len = sizeof(*di) + name_len + data_len;
|
||||||
char *name;
|
char *name;
|
||||||
|
|
||||||
|
ret = verify_dir_item(fs_info, path->nodes[0],
|
||||||
|
path->slots[0], di);
|
||||||
|
if (ret) {
|
||||||
|
ret = -EIO;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
name = kmalloc(name_len, GFP_NOFS);
|
name = kmalloc(name_len, GFP_NOFS);
|
||||||
if (!name) {
|
if (!name) {
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
|
@ -4546,6 +4562,12 @@ static int btrfs_check_ref_name_override(struct extent_buffer *eb,
|
||||||
this_len = sizeof(*extref) + this_name_len;
|
this_len = sizeof(*extref) + this_name_len;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ret = btrfs_is_name_len_valid(eb, slot, name_ptr,
|
||||||
|
this_name_len);
|
||||||
|
if (!ret) {
|
||||||
|
ret = -EIO;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
if (this_name_len > name_len) {
|
if (this_name_len > name_len) {
|
||||||
char *new_name;
|
char *new_name;
|
||||||
|
|
||||||
|
|
|
@ -242,6 +242,17 @@ static struct btrfs_device *__alloc_device(void)
|
||||||
if (!dev)
|
if (!dev)
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Preallocate a bio that's always going to be used for flushing device
|
||||||
|
* barriers and matches the device lifespan
|
||||||
|
*/
|
||||||
|
dev->flush_bio = bio_alloc_bioset(GFP_KERNEL, 0, NULL);
|
||||||
|
if (!dev->flush_bio) {
|
||||||
|
kfree(dev);
|
||||||
|
return ERR_PTR(-ENOMEM);
|
||||||
|
}
|
||||||
|
bio_get(dev->flush_bio);
|
||||||
|
|
||||||
INIT_LIST_HEAD(&dev->dev_list);
|
INIT_LIST_HEAD(&dev->dev_list);
|
||||||
INIT_LIST_HEAD(&dev->dev_alloc_list);
|
INIT_LIST_HEAD(&dev->dev_alloc_list);
|
||||||
INIT_LIST_HEAD(&dev->resized_list);
|
INIT_LIST_HEAD(&dev->resized_list);
|
||||||
|
@ -838,6 +849,7 @@ static void __free_device(struct work_struct *work)
|
||||||
|
|
||||||
device = container_of(work, struct btrfs_device, rcu_work);
|
device = container_of(work, struct btrfs_device, rcu_work);
|
||||||
rcu_string_free(device->name);
|
rcu_string_free(device->name);
|
||||||
|
bio_put(device->flush_bio);
|
||||||
kfree(device);
|
kfree(device);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1353,15 +1365,13 @@ int find_free_dev_extent_start(struct btrfs_transaction *transaction,
|
||||||
int ret;
|
int ret;
|
||||||
int slot;
|
int slot;
|
||||||
struct extent_buffer *l;
|
struct extent_buffer *l;
|
||||||
u64 min_search_start;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We don't want to overwrite the superblock on the drive nor any area
|
* We don't want to overwrite the superblock on the drive nor any area
|
||||||
* used by the boot loader (grub for example), so we make sure to start
|
* used by the boot loader (grub for example), so we make sure to start
|
||||||
* at an offset of at least 1MB.
|
* at an offset of at least 1MB.
|
||||||
*/
|
*/
|
||||||
min_search_start = max(fs_info->alloc_start, 1024ull * 1024);
|
search_start = max_t(u64, search_start, SZ_1M);
|
||||||
search_start = max(search_start, min_search_start);
|
|
||||||
|
|
||||||
path = btrfs_alloc_path();
|
path = btrfs_alloc_path();
|
||||||
if (!path)
|
if (!path)
|
||||||
|
@ -2387,7 +2397,8 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
|
||||||
device->io_width = fs_info->sectorsize;
|
device->io_width = fs_info->sectorsize;
|
||||||
device->io_align = fs_info->sectorsize;
|
device->io_align = fs_info->sectorsize;
|
||||||
device->sector_size = fs_info->sectorsize;
|
device->sector_size = fs_info->sectorsize;
|
||||||
device->total_bytes = i_size_read(bdev->bd_inode);
|
device->total_bytes = round_down(i_size_read(bdev->bd_inode),
|
||||||
|
fs_info->sectorsize);
|
||||||
device->disk_total_bytes = device->total_bytes;
|
device->disk_total_bytes = device->total_bytes;
|
||||||
device->commit_total_bytes = device->total_bytes;
|
device->commit_total_bytes = device->total_bytes;
|
||||||
device->fs_info = fs_info;
|
device->fs_info = fs_info;
|
||||||
|
@ -2417,16 +2428,14 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
|
||||||
fs_info->fs_devices->total_devices++;
|
fs_info->fs_devices->total_devices++;
|
||||||
fs_info->fs_devices->total_rw_bytes += device->total_bytes;
|
fs_info->fs_devices->total_rw_bytes += device->total_bytes;
|
||||||
|
|
||||||
spin_lock(&fs_info->free_chunk_lock);
|
atomic64_add(device->total_bytes, &fs_info->free_chunk_space);
|
||||||
fs_info->free_chunk_space += device->total_bytes;
|
|
||||||
spin_unlock(&fs_info->free_chunk_lock);
|
|
||||||
|
|
||||||
if (!blk_queue_nonrot(q))
|
if (!blk_queue_nonrot(q))
|
||||||
fs_info->fs_devices->rotating = 1;
|
fs_info->fs_devices->rotating = 1;
|
||||||
|
|
||||||
tmp = btrfs_super_total_bytes(fs_info->super_copy);
|
tmp = btrfs_super_total_bytes(fs_info->super_copy);
|
||||||
btrfs_set_super_total_bytes(fs_info->super_copy,
|
btrfs_set_super_total_bytes(fs_info->super_copy,
|
||||||
tmp + device->total_bytes);
|
round_down(tmp + device->total_bytes, fs_info->sectorsize));
|
||||||
|
|
||||||
tmp = btrfs_super_num_devices(fs_info->super_copy);
|
tmp = btrfs_super_num_devices(fs_info->super_copy);
|
||||||
btrfs_set_super_num_devices(fs_info->super_copy, tmp + 1);
|
btrfs_set_super_num_devices(fs_info->super_copy, tmp + 1);
|
||||||
|
@ -2574,7 +2583,7 @@ int btrfs_init_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
|
||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
|
|
||||||
name = rcu_string_strdup(device_path, GFP_NOFS);
|
name = rcu_string_strdup(device_path, GFP_KERNEL);
|
||||||
if (!name) {
|
if (!name) {
|
||||||
kfree(device);
|
kfree(device);
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
|
@ -2689,6 +2698,8 @@ int btrfs_grow_device(struct btrfs_trans_handle *trans,
|
||||||
if (!device->writeable)
|
if (!device->writeable)
|
||||||
return -EACCES;
|
return -EACCES;
|
||||||
|
|
||||||
|
new_size = round_down(new_size, fs_info->sectorsize);
|
||||||
|
|
||||||
mutex_lock(&fs_info->chunk_mutex);
|
mutex_lock(&fs_info->chunk_mutex);
|
||||||
old_total = btrfs_super_total_bytes(super_copy);
|
old_total = btrfs_super_total_bytes(super_copy);
|
||||||
diff = new_size - device->total_bytes;
|
diff = new_size - device->total_bytes;
|
||||||
|
@ -2701,7 +2712,8 @@ int btrfs_grow_device(struct btrfs_trans_handle *trans,
|
||||||
|
|
||||||
fs_devices = fs_info->fs_devices;
|
fs_devices = fs_info->fs_devices;
|
||||||
|
|
||||||
btrfs_set_super_total_bytes(super_copy, old_total + diff);
|
btrfs_set_super_total_bytes(super_copy,
|
||||||
|
round_down(old_total + diff, fs_info->sectorsize));
|
||||||
device->fs_devices->total_rw_bytes += diff;
|
device->fs_devices->total_rw_bytes += diff;
|
||||||
|
|
||||||
btrfs_device_set_total_bytes(device, new_size);
|
btrfs_device_set_total_bytes(device, new_size);
|
||||||
|
@ -2874,9 +2886,7 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans,
|
||||||
mutex_lock(&fs_info->chunk_mutex);
|
mutex_lock(&fs_info->chunk_mutex);
|
||||||
btrfs_device_set_bytes_used(device,
|
btrfs_device_set_bytes_used(device,
|
||||||
device->bytes_used - dev_extent_len);
|
device->bytes_used - dev_extent_len);
|
||||||
spin_lock(&fs_info->free_chunk_lock);
|
atomic64_add(dev_extent_len, &fs_info->free_chunk_space);
|
||||||
fs_info->free_chunk_space += dev_extent_len;
|
|
||||||
spin_unlock(&fs_info->free_chunk_lock);
|
|
||||||
btrfs_clear_space_info_full(fs_info);
|
btrfs_clear_space_info_full(fs_info);
|
||||||
mutex_unlock(&fs_info->chunk_mutex);
|
mutex_unlock(&fs_info->chunk_mutex);
|
||||||
}
|
}
|
||||||
|
@ -4393,7 +4403,10 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
|
||||||
struct btrfs_super_block *super_copy = fs_info->super_copy;
|
struct btrfs_super_block *super_copy = fs_info->super_copy;
|
||||||
u64 old_total = btrfs_super_total_bytes(super_copy);
|
u64 old_total = btrfs_super_total_bytes(super_copy);
|
||||||
u64 old_size = btrfs_device_get_total_bytes(device);
|
u64 old_size = btrfs_device_get_total_bytes(device);
|
||||||
u64 diff = old_size - new_size;
|
u64 diff;
|
||||||
|
|
||||||
|
new_size = round_down(new_size, fs_info->sectorsize);
|
||||||
|
diff = old_size - new_size;
|
||||||
|
|
||||||
if (device->is_tgtdev_for_dev_replace)
|
if (device->is_tgtdev_for_dev_replace)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
@ -4409,9 +4422,7 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
|
||||||
btrfs_device_set_total_bytes(device, new_size);
|
btrfs_device_set_total_bytes(device, new_size);
|
||||||
if (device->writeable) {
|
if (device->writeable) {
|
||||||
device->fs_devices->total_rw_bytes -= diff;
|
device->fs_devices->total_rw_bytes -= diff;
|
||||||
spin_lock(&fs_info->free_chunk_lock);
|
atomic64_sub(diff, &fs_info->free_chunk_space);
|
||||||
fs_info->free_chunk_space -= diff;
|
|
||||||
spin_unlock(&fs_info->free_chunk_lock);
|
|
||||||
}
|
}
|
||||||
mutex_unlock(&fs_info->chunk_mutex);
|
mutex_unlock(&fs_info->chunk_mutex);
|
||||||
|
|
||||||
|
@ -4522,7 +4533,8 @@ again:
|
||||||
&fs_info->fs_devices->resized_devices);
|
&fs_info->fs_devices->resized_devices);
|
||||||
|
|
||||||
WARN_ON(diff > old_total);
|
WARN_ON(diff > old_total);
|
||||||
btrfs_set_super_total_bytes(super_copy, old_total - diff);
|
btrfs_set_super_total_bytes(super_copy,
|
||||||
|
round_down(old_total - diff, fs_info->sectorsize));
|
||||||
mutex_unlock(&fs_info->chunk_mutex);
|
mutex_unlock(&fs_info->chunk_mutex);
|
||||||
|
|
||||||
/* Now btrfs_update_device() will change the on-disk size. */
|
/* Now btrfs_update_device() will change the on-disk size. */
|
||||||
|
@ -4535,9 +4547,7 @@ done:
|
||||||
btrfs_device_set_total_bytes(device, old_size);
|
btrfs_device_set_total_bytes(device, old_size);
|
||||||
if (device->writeable)
|
if (device->writeable)
|
||||||
device->fs_devices->total_rw_bytes += diff;
|
device->fs_devices->total_rw_bytes += diff;
|
||||||
spin_lock(&fs_info->free_chunk_lock);
|
atomic64_add(diff, &fs_info->free_chunk_space);
|
||||||
fs_info->free_chunk_space += diff;
|
|
||||||
spin_unlock(&fs_info->free_chunk_lock);
|
|
||||||
mutex_unlock(&fs_info->chunk_mutex);
|
mutex_unlock(&fs_info->chunk_mutex);
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -4882,9 +4892,7 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
|
||||||
btrfs_device_set_bytes_used(map->stripes[i].dev, num_bytes);
|
btrfs_device_set_bytes_used(map->stripes[i].dev, num_bytes);
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_lock(&info->free_chunk_lock);
|
atomic64_sub(stripe_size * map->num_stripes, &info->free_chunk_space);
|
||||||
info->free_chunk_space -= (stripe_size * map->num_stripes);
|
|
||||||
spin_unlock(&info->free_chunk_lock);
|
|
||||||
|
|
||||||
free_extent_map(em);
|
free_extent_map(em);
|
||||||
check_raid56_incompat_flag(info, type);
|
check_raid56_incompat_flag(info, type);
|
||||||
|
@ -5029,20 +5037,19 @@ int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
|
||||||
static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
|
static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
|
||||||
struct btrfs_fs_info *fs_info)
|
struct btrfs_fs_info *fs_info)
|
||||||
{
|
{
|
||||||
struct btrfs_root *extent_root = fs_info->extent_root;
|
|
||||||
u64 chunk_offset;
|
u64 chunk_offset;
|
||||||
u64 sys_chunk_offset;
|
u64 sys_chunk_offset;
|
||||||
u64 alloc_profile;
|
u64 alloc_profile;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
chunk_offset = find_next_chunk(fs_info);
|
chunk_offset = find_next_chunk(fs_info);
|
||||||
alloc_profile = btrfs_get_alloc_profile(extent_root, 0);
|
alloc_profile = btrfs_metadata_alloc_profile(fs_info);
|
||||||
ret = __btrfs_alloc_chunk(trans, chunk_offset, alloc_profile);
|
ret = __btrfs_alloc_chunk(trans, chunk_offset, alloc_profile);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
sys_chunk_offset = find_next_chunk(fs_info);
|
sys_chunk_offset = find_next_chunk(fs_info);
|
||||||
alloc_profile = btrfs_get_alloc_profile(fs_info->chunk_root, 0);
|
alloc_profile = btrfs_system_alloc_profile(fs_info);
|
||||||
ret = __btrfs_alloc_chunk(trans, sys_chunk_offset, alloc_profile);
|
ret = __btrfs_alloc_chunk(trans, sys_chunk_offset, alloc_profile);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -6267,10 +6274,9 @@ int btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (dev_nr < total_devs - 1) {
|
if (dev_nr < total_devs - 1)
|
||||||
bio = btrfs_bio_clone(first_bio, GFP_NOFS);
|
bio = btrfs_bio_clone(first_bio);
|
||||||
BUG_ON(!bio); /* -ENOMEM */
|
else
|
||||||
} else
|
|
||||||
bio = first_bio;
|
bio = first_bio;
|
||||||
|
|
||||||
submit_stripe_bio(bbio, bio, bbio->stripes[dev_nr].physical,
|
submit_stripe_bio(bbio, bio, bbio->stripes[dev_nr].physical,
|
||||||
|
@ -6685,10 +6691,8 @@ static int read_one_dev(struct btrfs_fs_info *fs_info,
|
||||||
device->in_fs_metadata = 1;
|
device->in_fs_metadata = 1;
|
||||||
if (device->writeable && !device->is_tgtdev_for_dev_replace) {
|
if (device->writeable && !device->is_tgtdev_for_dev_replace) {
|
||||||
device->fs_devices->total_rw_bytes += device->total_bytes;
|
device->fs_devices->total_rw_bytes += device->total_bytes;
|
||||||
spin_lock(&fs_info->free_chunk_lock);
|
atomic64_add(device->total_bytes - device->bytes_used,
|
||||||
fs_info->free_chunk_space += device->total_bytes -
|
&fs_info->free_chunk_space);
|
||||||
device->bytes_used;
|
|
||||||
spin_unlock(&fs_info->free_chunk_lock);
|
|
||||||
}
|
}
|
||||||
ret = 0;
|
ret = 0;
|
||||||
return ret;
|
return ret;
|
||||||
|
|
|
@ -74,6 +74,8 @@ struct btrfs_device {
|
||||||
int missing;
|
int missing;
|
||||||
int can_discard;
|
int can_discard;
|
||||||
int is_tgtdev_for_dev_replace;
|
int is_tgtdev_for_dev_replace;
|
||||||
|
int last_flush_error;
|
||||||
|
int flush_bio_sent;
|
||||||
|
|
||||||
#ifdef __BTRFS_NEED_DEVICE_DATA_ORDERED
|
#ifdef __BTRFS_NEED_DEVICE_DATA_ORDERED
|
||||||
seqcount_t data_seqcount;
|
seqcount_t data_seqcount;
|
||||||
|
@ -279,6 +281,11 @@ struct btrfs_io_bio {
|
||||||
u8 csum_inline[BTRFS_BIO_INLINE_CSUM_SIZE];
|
u8 csum_inline[BTRFS_BIO_INLINE_CSUM_SIZE];
|
||||||
u8 *csum_allocated;
|
u8 *csum_allocated;
|
||||||
btrfs_io_bio_end_io_t *end_io;
|
btrfs_io_bio_end_io_t *end_io;
|
||||||
|
struct bvec_iter iter;
|
||||||
|
/*
|
||||||
|
* This member must come last, bio_alloc_bioset will allocate enough
|
||||||
|
* bytes for entire btrfs_io_bio but relies on bio being last.
|
||||||
|
*/
|
||||||
struct bio bio;
|
struct bio bio;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -336,7 +336,7 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size)
|
||||||
u32 this_len = sizeof(*di) + name_len + data_len;
|
u32 this_len = sizeof(*di) + name_len + data_len;
|
||||||
unsigned long name_ptr = (unsigned long)(di + 1);
|
unsigned long name_ptr = (unsigned long)(di + 1);
|
||||||
|
|
||||||
if (verify_dir_item(fs_info, leaf, di)) {
|
if (verify_dir_item(fs_info, leaf, slot, di)) {
|
||||||
ret = -EIO;
|
ret = -EIO;
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
|
@ -24,12 +24,13 @@
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/zlib.h>
|
#include <linux/zlib.h>
|
||||||
#include <linux/zutil.h>
|
#include <linux/zutil.h>
|
||||||
#include <linux/vmalloc.h>
|
#include <linux/mm.h>
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
#include <linux/err.h>
|
#include <linux/err.h>
|
||||||
#include <linux/sched.h>
|
#include <linux/sched.h>
|
||||||
#include <linux/pagemap.h>
|
#include <linux/pagemap.h>
|
||||||
#include <linux/bio.h>
|
#include <linux/bio.h>
|
||||||
|
#include <linux/refcount.h>
|
||||||
#include "compression.h"
|
#include "compression.h"
|
||||||
|
|
||||||
struct workspace {
|
struct workspace {
|
||||||
|
@ -42,7 +43,7 @@ static void zlib_free_workspace(struct list_head *ws)
|
||||||
{
|
{
|
||||||
struct workspace *workspace = list_entry(ws, struct workspace, list);
|
struct workspace *workspace = list_entry(ws, struct workspace, list);
|
||||||
|
|
||||||
vfree(workspace->strm.workspace);
|
kvfree(workspace->strm.workspace);
|
||||||
kfree(workspace->buf);
|
kfree(workspace->buf);
|
||||||
kfree(workspace);
|
kfree(workspace);
|
||||||
}
|
}
|
||||||
|
@ -52,14 +53,14 @@ static struct list_head *zlib_alloc_workspace(void)
|
||||||
struct workspace *workspace;
|
struct workspace *workspace;
|
||||||
int workspacesize;
|
int workspacesize;
|
||||||
|
|
||||||
workspace = kzalloc(sizeof(*workspace), GFP_NOFS);
|
workspace = kzalloc(sizeof(*workspace), GFP_KERNEL);
|
||||||
if (!workspace)
|
if (!workspace)
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
|
|
||||||
workspacesize = max(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL),
|
workspacesize = max(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL),
|
||||||
zlib_inflate_workspacesize());
|
zlib_inflate_workspacesize());
|
||||||
workspace->strm.workspace = vmalloc(workspacesize);
|
workspace->strm.workspace = kvmalloc(workspacesize, GFP_KERNEL);
|
||||||
workspace->buf = kmalloc(PAGE_SIZE, GFP_NOFS);
|
workspace->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
|
||||||
if (!workspace->strm.workspace || !workspace->buf)
|
if (!workspace->strm.workspace || !workspace->buf)
|
||||||
goto fail;
|
goto fail;
|
||||||
|
|
||||||
|
@ -211,10 +212,7 @@ out:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int zlib_decompress_bio(struct list_head *ws, struct page **pages_in,
|
static int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
|
||||||
u64 disk_start,
|
|
||||||
struct bio *orig_bio,
|
|
||||||
size_t srclen)
|
|
||||||
{
|
{
|
||||||
struct workspace *workspace = list_entry(ws, struct workspace, list);
|
struct workspace *workspace = list_entry(ws, struct workspace, list);
|
||||||
int ret = 0, ret2;
|
int ret = 0, ret2;
|
||||||
|
@ -222,8 +220,12 @@ static int zlib_decompress_bio(struct list_head *ws, struct page **pages_in,
|
||||||
char *data_in;
|
char *data_in;
|
||||||
size_t total_out = 0;
|
size_t total_out = 0;
|
||||||
unsigned long page_in_index = 0;
|
unsigned long page_in_index = 0;
|
||||||
|
size_t srclen = cb->compressed_len;
|
||||||
unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_SIZE);
|
unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_SIZE);
|
||||||
unsigned long buf_start;
|
unsigned long buf_start;
|
||||||
|
struct page **pages_in = cb->compressed_pages;
|
||||||
|
u64 disk_start = cb->start;
|
||||||
|
struct bio *orig_bio = cb->orig_bio;
|
||||||
|
|
||||||
data_in = kmap(pages_in[page_in_index]);
|
data_in = kmap(pages_in[page_in_index]);
|
||||||
workspace->strm.next_in = data_in;
|
workspace->strm.next_in = data_in;
|
||||||
|
|
|
@ -1410,42 +1410,6 @@ DEFINE_EVENT(btrfs__workqueue_done, btrfs_workqueue_destroy,
|
||||||
TP_ARGS(wq)
|
TP_ARGS(wq)
|
||||||
);
|
);
|
||||||
|
|
||||||
DECLARE_EVENT_CLASS(btrfs__qgroup_data_map,
|
|
||||||
|
|
||||||
TP_PROTO(struct inode *inode, u64 free_reserved),
|
|
||||||
|
|
||||||
TP_ARGS(inode, free_reserved),
|
|
||||||
|
|
||||||
TP_STRUCT__entry_btrfs(
|
|
||||||
__field( u64, rootid )
|
|
||||||
__field( unsigned long, ino )
|
|
||||||
__field( u64, free_reserved )
|
|
||||||
),
|
|
||||||
|
|
||||||
TP_fast_assign_btrfs(btrfs_sb(inode->i_sb),
|
|
||||||
__entry->rootid = BTRFS_I(inode)->root->objectid;
|
|
||||||
__entry->ino = inode->i_ino;
|
|
||||||
__entry->free_reserved = free_reserved;
|
|
||||||
),
|
|
||||||
|
|
||||||
TP_printk_btrfs("rootid=%llu ino=%lu free_reserved=%llu",
|
|
||||||
__entry->rootid, __entry->ino, __entry->free_reserved)
|
|
||||||
);
|
|
||||||
|
|
||||||
DEFINE_EVENT(btrfs__qgroup_data_map, btrfs_qgroup_init_data_rsv_map,
|
|
||||||
|
|
||||||
TP_PROTO(struct inode *inode, u64 free_reserved),
|
|
||||||
|
|
||||||
TP_ARGS(inode, free_reserved)
|
|
||||||
);
|
|
||||||
|
|
||||||
DEFINE_EVENT(btrfs__qgroup_data_map, btrfs_qgroup_free_data_rsv_map,
|
|
||||||
|
|
||||||
TP_PROTO(struct inode *inode, u64 free_reserved),
|
|
||||||
|
|
||||||
TP_ARGS(inode, free_reserved)
|
|
||||||
);
|
|
||||||
|
|
||||||
#define BTRFS_QGROUP_OPERATIONS \
|
#define BTRFS_QGROUP_OPERATIONS \
|
||||||
{ QGROUP_RESERVE, "reserve" }, \
|
{ QGROUP_RESERVE, "reserve" }, \
|
||||||
{ QGROUP_RELEASE, "release" }, \
|
{ QGROUP_RELEASE, "release" }, \
|
||||||
|
|
|
@ -426,31 +426,54 @@ struct btrfs_ioctl_ino_lookup_args {
|
||||||
char name[BTRFS_INO_LOOKUP_PATH_MAX];
|
char name[BTRFS_INO_LOOKUP_PATH_MAX];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/* Search criteria for the btrfs SEARCH ioctl family. */
|
||||||
struct btrfs_ioctl_search_key {
|
struct btrfs_ioctl_search_key {
|
||||||
/* which root are we searching. 0 is the tree of tree roots */
|
/*
|
||||||
__u64 tree_id;
|
* The tree we're searching in. 1 is the tree of tree roots, 2 is the
|
||||||
|
* extent tree, etc...
|
||||||
/* keys returned will be >= min and <= max */
|
*
|
||||||
__u64 min_objectid;
|
* A special tree_id value of 0 will cause a search in the subvolume
|
||||||
__u64 max_objectid;
|
* tree that the inode which is passed to the ioctl is part of.
|
||||||
|
*/
|
||||||
/* keys returned will be >= min and <= max */
|
__u64 tree_id; /* in */
|
||||||
__u64 min_offset;
|
|
||||||
__u64 max_offset;
|
|
||||||
|
|
||||||
/* max and min transids to search for */
|
|
||||||
__u64 min_transid;
|
|
||||||
__u64 max_transid;
|
|
||||||
|
|
||||||
/* keys returned will be >= min and <= max */
|
|
||||||
__u32 min_type;
|
|
||||||
__u32 max_type;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* how many items did userland ask for, and how many are we
|
* When doing a tree search, we're actually taking a slice from a
|
||||||
* returning
|
* linear search space of 136-bit keys.
|
||||||
|
*
|
||||||
|
* A full 136-bit tree key is composed as:
|
||||||
|
* (objectid << 72) + (type << 64) + offset
|
||||||
|
*
|
||||||
|
* The individual min and max values for objectid, type and offset
|
||||||
|
* define the min_key and max_key values for the search range. All
|
||||||
|
* metadata items with a key in the interval [min_key, max_key] will be
|
||||||
|
* returned.
|
||||||
|
*
|
||||||
|
* Additionally, we can filter the items returned on transaction id of
|
||||||
|
* the metadata block they're stored in by specifying a transid range.
|
||||||
|
* Be aware that this transaction id only denotes when the metadata
|
||||||
|
* page that currently contains the item got written the last time as
|
||||||
|
* result of a COW operation. The number does not have any meaning
|
||||||
|
* related to the transaction in which an individual item that is being
|
||||||
|
* returned was created or changed.
|
||||||
*/
|
*/
|
||||||
__u32 nr_items;
|
__u64 min_objectid; /* in */
|
||||||
|
__u64 max_objectid; /* in */
|
||||||
|
__u64 min_offset; /* in */
|
||||||
|
__u64 max_offset; /* in */
|
||||||
|
__u64 min_transid; /* in */
|
||||||
|
__u64 max_transid; /* in */
|
||||||
|
__u32 min_type; /* in */
|
||||||
|
__u32 max_type; /* in */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* input: The maximum amount of results desired.
|
||||||
|
* output: The actual amount of items returned, restricted by any of:
|
||||||
|
* - reaching the upper bound of the search range
|
||||||
|
* - reaching the input nr_items amount of items
|
||||||
|
* - completely filling the supplied memory buffer
|
||||||
|
*/
|
||||||
|
__u32 nr_items; /* in/out */
|
||||||
|
|
||||||
/* align to 64 bits */
|
/* align to 64 bits */
|
||||||
__u32 unused;
|
__u32 unused;
|
||||||
|
|
Loading…
Reference in New Issue