2019-06-21 03:37:45 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
|
2019-08-22 00:54:28 +08:00
|
|
|
#include "misc.h"
|
2019-06-21 03:37:45 +08:00
|
|
|
#include "ctree.h"
|
|
|
|
#include "block-group.h"
|
2019-06-21 03:37:47 +08:00
|
|
|
#include "space-info.h"
|
2019-08-06 22:43:19 +08:00
|
|
|
#include "disk-io.h"
|
|
|
|
#include "free-space-cache.h"
|
|
|
|
#include "free-space-tree.h"
|
2019-06-21 03:37:55 +08:00
|
|
|
#include "volumes.h"
|
|
|
|
#include "transaction.h"
|
|
|
|
#include "ref-verify.h"
|
2019-06-21 03:37:57 +08:00
|
|
|
#include "sysfs.h"
|
|
|
|
#include "tree-log.h"
|
2019-06-21 03:38:00 +08:00
|
|
|
#include "delalloc-space.h"
|
2019-12-14 08:22:14 +08:00
|
|
|
#include "discard.h"
|
2019-12-11 01:57:51 +08:00
|
|
|
#include "raid56.h"
|
2021-02-04 18:21:50 +08:00
|
|
|
#include "zoned.h"
|
2019-06-21 03:37:45 +08:00
|
|
|
|
2019-06-21 03:38:05 +08:00
|
|
|
/*
|
|
|
|
* Return target flags in extended format or 0 if restripe for this chunk_type
|
|
|
|
* is not in progress
|
|
|
|
*
|
|
|
|
* Should be called with balance_lock held
|
|
|
|
*/
|
2019-06-21 03:38:07 +08:00
|
|
|
static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
|
2019-06-21 03:38:05 +08:00
|
|
|
{
|
|
|
|
struct btrfs_balance_control *bctl = fs_info->balance_ctl;
|
|
|
|
u64 target = 0;
|
|
|
|
|
|
|
|
if (!bctl)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (flags & BTRFS_BLOCK_GROUP_DATA &&
|
|
|
|
bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
|
|
|
|
target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
|
|
|
|
} else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
|
|
|
|
bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
|
|
|
|
target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
|
|
|
|
} else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
|
|
|
|
bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
|
|
|
|
target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
|
|
|
|
}
|
|
|
|
|
|
|
|
return target;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* @flags: available profiles in extended format (see ctree.h)
|
|
|
|
*
|
|
|
|
* Return reduced profile in chunk format. If profile changing is in progress
|
|
|
|
* (either running or paused) picks the target profile (if it's already
|
|
|
|
* available), otherwise falls back to plain reducing.
|
|
|
|
*/
|
|
|
|
static u64 btrfs_reduce_alloc_profile(struct btrfs_fs_info *fs_info, u64 flags)
|
|
|
|
{
|
|
|
|
u64 num_devices = fs_info->fs_devices->rw_devices;
|
|
|
|
u64 target;
|
|
|
|
u64 raid_type;
|
|
|
|
u64 allowed = 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* See if restripe for this chunk_type is in progress, if so try to
|
|
|
|
* reduce to the target profile
|
|
|
|
*/
|
|
|
|
spin_lock(&fs_info->balance_lock);
|
2019-06-21 03:38:07 +08:00
|
|
|
target = get_restripe_target(fs_info, flags);
|
2019-06-21 03:38:05 +08:00
|
|
|
if (target) {
|
2020-07-21 22:48:46 +08:00
|
|
|
spin_unlock(&fs_info->balance_lock);
|
|
|
|
return extended_to_chunk(target);
|
2019-06-21 03:38:05 +08:00
|
|
|
}
|
|
|
|
spin_unlock(&fs_info->balance_lock);
|
|
|
|
|
|
|
|
/* First, mask out the RAID levels which aren't possible */
|
|
|
|
for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) {
|
|
|
|
if (num_devices >= btrfs_raid_array[raid_type].devs_min)
|
|
|
|
allowed |= btrfs_raid_array[raid_type].bg_flag;
|
|
|
|
}
|
|
|
|
allowed &= flags;
|
|
|
|
|
|
|
|
if (allowed & BTRFS_BLOCK_GROUP_RAID6)
|
|
|
|
allowed = BTRFS_BLOCK_GROUP_RAID6;
|
|
|
|
else if (allowed & BTRFS_BLOCK_GROUP_RAID5)
|
|
|
|
allowed = BTRFS_BLOCK_GROUP_RAID5;
|
|
|
|
else if (allowed & BTRFS_BLOCK_GROUP_RAID10)
|
|
|
|
allowed = BTRFS_BLOCK_GROUP_RAID10;
|
|
|
|
else if (allowed & BTRFS_BLOCK_GROUP_RAID1)
|
|
|
|
allowed = BTRFS_BLOCK_GROUP_RAID1;
|
|
|
|
else if (allowed & BTRFS_BLOCK_GROUP_RAID0)
|
|
|
|
allowed = BTRFS_BLOCK_GROUP_RAID0;
|
|
|
|
|
|
|
|
flags &= ~BTRFS_BLOCK_GROUP_PROFILE_MASK;
|
|
|
|
|
|
|
|
return extended_to_chunk(flags | allowed);
|
|
|
|
}
|
|
|
|
|
2020-01-03 00:14:57 +08:00
|
|
|
u64 btrfs_get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags)
|
2019-06-21 03:38:05 +08:00
|
|
|
{
|
|
|
|
unsigned seq;
|
|
|
|
u64 flags;
|
|
|
|
|
|
|
|
do {
|
|
|
|
flags = orig_flags;
|
|
|
|
seq = read_seqbegin(&fs_info->profiles_lock);
|
|
|
|
|
|
|
|
if (flags & BTRFS_BLOCK_GROUP_DATA)
|
|
|
|
flags |= fs_info->avail_data_alloc_bits;
|
|
|
|
else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
|
|
|
|
flags |= fs_info->avail_system_alloc_bits;
|
|
|
|
else if (flags & BTRFS_BLOCK_GROUP_METADATA)
|
|
|
|
flags |= fs_info->avail_metadata_alloc_bits;
|
|
|
|
} while (read_seqretry(&fs_info->profiles_lock, seq));
|
|
|
|
|
|
|
|
return btrfs_reduce_alloc_profile(fs_info, flags);
|
|
|
|
}
|
|
|
|
|
2019-10-30 02:20:18 +08:00
|
|
|
void btrfs_get_block_group(struct btrfs_block_group *cache)
|
2019-06-21 03:37:46 +08:00
|
|
|
{
|
2020-07-06 21:14:11 +08:00
|
|
|
refcount_inc(&cache->refs);
|
2019-06-21 03:37:46 +08:00
|
|
|
}
|
|
|
|
|
2019-10-30 02:20:18 +08:00
|
|
|
void btrfs_put_block_group(struct btrfs_block_group *cache)
|
2019-06-21 03:37:46 +08:00
|
|
|
{
|
2020-07-06 21:14:11 +08:00
|
|
|
if (refcount_dec_and_test(&cache->refs)) {
|
2019-06-21 03:37:46 +08:00
|
|
|
WARN_ON(cache->pinned > 0);
|
|
|
|
WARN_ON(cache->reserved > 0);
|
|
|
|
|
2019-12-14 08:22:14 +08:00
|
|
|
/*
|
|
|
|
* A block_group shouldn't be on the discard_list anymore.
|
|
|
|
* Remove the block_group from the discard_list to prevent us
|
|
|
|
* from causing a panic due to NULL pointer dereference.
|
|
|
|
*/
|
|
|
|
if (WARN_ON(!list_empty(&cache->discard_list)))
|
|
|
|
btrfs_discard_cancel_work(&cache->fs_info->discard_ctl,
|
|
|
|
cache);
|
|
|
|
|
2019-06-21 03:37:46 +08:00
|
|
|
/*
|
|
|
|
* If not empty, someone is still holding mutex of
|
|
|
|
* full_stripe_lock, which can only be released by caller.
|
|
|
|
* And it will definitely cause use-after-free when caller
|
|
|
|
* tries to release full stripe lock.
|
|
|
|
*
|
|
|
|
* No better way to resolve, but only to warn.
|
|
|
|
*/
|
|
|
|
WARN_ON(!RB_EMPTY_ROOT(&cache->full_stripe_locks_root.root));
|
|
|
|
kfree(cache->free_space_ctl);
|
|
|
|
kfree(cache);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-21 03:37:57 +08:00
|
|
|
/*
|
|
|
|
* This adds the block group to the fs_info rb tree for the block group cache
|
|
|
|
*/
|
|
|
|
static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
|
2019-10-30 02:20:18 +08:00
|
|
|
struct btrfs_block_group *block_group)
|
2019-06-21 03:37:57 +08:00
|
|
|
{
|
|
|
|
struct rb_node **p;
|
|
|
|
struct rb_node *parent = NULL;
|
2019-10-30 02:20:18 +08:00
|
|
|
struct btrfs_block_group *cache;
|
2019-06-21 03:37:57 +08:00
|
|
|
|
2020-05-05 07:58:20 +08:00
|
|
|
ASSERT(block_group->length != 0);
|
|
|
|
|
2019-06-21 03:37:57 +08:00
|
|
|
spin_lock(&info->block_group_cache_lock);
|
|
|
|
p = &info->block_group_cache_tree.rb_node;
|
|
|
|
|
|
|
|
while (*p) {
|
|
|
|
parent = *p;
|
2019-10-30 02:20:18 +08:00
|
|
|
cache = rb_entry(parent, struct btrfs_block_group, cache_node);
|
2019-10-24 00:48:22 +08:00
|
|
|
if (block_group->start < cache->start) {
|
2019-06-21 03:37:57 +08:00
|
|
|
p = &(*p)->rb_left;
|
2019-10-24 00:48:22 +08:00
|
|
|
} else if (block_group->start > cache->start) {
|
2019-06-21 03:37:57 +08:00
|
|
|
p = &(*p)->rb_right;
|
|
|
|
} else {
|
|
|
|
spin_unlock(&info->block_group_cache_lock);
|
|
|
|
return -EEXIST;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
rb_link_node(&block_group->cache_node, parent, p);
|
|
|
|
rb_insert_color(&block_group->cache_node,
|
|
|
|
&info->block_group_cache_tree);
|
|
|
|
|
2019-10-24 00:48:22 +08:00
|
|
|
if (info->first_logical_byte > block_group->start)
|
|
|
|
info->first_logical_byte = block_group->start;
|
2019-06-21 03:37:57 +08:00
|
|
|
|
|
|
|
spin_unlock(&info->block_group_cache_lock);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-06-21 03:37:45 +08:00
|
|
|
/*
|
|
|
|
* This will return the block group at or after bytenr if contains is 0, else
|
|
|
|
* it will return the block group that contains the bytenr
|
|
|
|
*/
|
2019-10-30 02:20:18 +08:00
|
|
|
static struct btrfs_block_group *block_group_cache_tree_search(
|
2019-06-21 03:37:45 +08:00
|
|
|
struct btrfs_fs_info *info, u64 bytenr, int contains)
|
|
|
|
{
|
2019-10-30 02:20:18 +08:00
|
|
|
struct btrfs_block_group *cache, *ret = NULL;
|
2019-06-21 03:37:45 +08:00
|
|
|
struct rb_node *n;
|
|
|
|
u64 end, start;
|
|
|
|
|
|
|
|
spin_lock(&info->block_group_cache_lock);
|
|
|
|
n = info->block_group_cache_tree.rb_node;
|
|
|
|
|
|
|
|
while (n) {
|
2019-10-30 02:20:18 +08:00
|
|
|
cache = rb_entry(n, struct btrfs_block_group, cache_node);
|
2019-10-24 00:48:22 +08:00
|
|
|
end = cache->start + cache->length - 1;
|
|
|
|
start = cache->start;
|
2019-06-21 03:37:45 +08:00
|
|
|
|
|
|
|
if (bytenr < start) {
|
2019-10-24 00:48:22 +08:00
|
|
|
if (!contains && (!ret || start < ret->start))
|
2019-06-21 03:37:45 +08:00
|
|
|
ret = cache;
|
|
|
|
n = n->rb_left;
|
|
|
|
} else if (bytenr > start) {
|
|
|
|
if (contains && bytenr <= end) {
|
|
|
|
ret = cache;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
n = n->rb_right;
|
|
|
|
} else {
|
|
|
|
ret = cache;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (ret) {
|
|
|
|
btrfs_get_block_group(ret);
|
2019-10-24 00:48:22 +08:00
|
|
|
if (bytenr == 0 && info->first_logical_byte > ret->start)
|
|
|
|
info->first_logical_byte = ret->start;
|
2019-06-21 03:37:45 +08:00
|
|
|
}
|
|
|
|
spin_unlock(&info->block_group_cache_lock);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Return the block group that starts at or after bytenr
|
|
|
|
*/
|
2019-10-30 02:20:18 +08:00
|
|
|
struct btrfs_block_group *btrfs_lookup_first_block_group(
|
2019-06-21 03:37:45 +08:00
|
|
|
struct btrfs_fs_info *info, u64 bytenr)
|
|
|
|
{
|
|
|
|
return block_group_cache_tree_search(info, bytenr, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Return the block group that contains the given bytenr
|
|
|
|
*/
|
2019-10-30 02:20:18 +08:00
|
|
|
struct btrfs_block_group *btrfs_lookup_block_group(
|
2019-06-21 03:37:45 +08:00
|
|
|
struct btrfs_fs_info *info, u64 bytenr)
|
|
|
|
{
|
|
|
|
return block_group_cache_tree_search(info, bytenr, 1);
|
|
|
|
}
|
|
|
|
|
2019-10-30 02:20:18 +08:00
|
|
|
struct btrfs_block_group *btrfs_next_block_group(
|
|
|
|
struct btrfs_block_group *cache)
|
2019-06-21 03:37:45 +08:00
|
|
|
{
|
|
|
|
struct btrfs_fs_info *fs_info = cache->fs_info;
|
|
|
|
struct rb_node *node;
|
|
|
|
|
|
|
|
spin_lock(&fs_info->block_group_cache_lock);
|
|
|
|
|
|
|
|
/* If our block group was removed, we need a full search. */
|
|
|
|
if (RB_EMPTY_NODE(&cache->cache_node)) {
|
2019-10-24 00:48:22 +08:00
|
|
|
const u64 next_bytenr = cache->start + cache->length;
|
2019-06-21 03:37:45 +08:00
|
|
|
|
|
|
|
spin_unlock(&fs_info->block_group_cache_lock);
|
|
|
|
btrfs_put_block_group(cache);
|
|
|
|
cache = btrfs_lookup_first_block_group(fs_info, next_bytenr); return cache;
|
|
|
|
}
|
|
|
|
node = rb_next(&cache->cache_node);
|
|
|
|
btrfs_put_block_group(cache);
|
|
|
|
if (node) {
|
2019-10-30 02:20:18 +08:00
|
|
|
cache = rb_entry(node, struct btrfs_block_group, cache_node);
|
2019-06-21 03:37:45 +08:00
|
|
|
btrfs_get_block_group(cache);
|
|
|
|
} else
|
|
|
|
cache = NULL;
|
|
|
|
spin_unlock(&fs_info->block_group_cache_lock);
|
|
|
|
return cache;
|
|
|
|
}
|
2019-06-21 03:37:47 +08:00
|
|
|
|
|
|
|
bool btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr)
|
|
|
|
{
|
2019-10-30 02:20:18 +08:00
|
|
|
struct btrfs_block_group *bg;
|
2019-06-21 03:37:47 +08:00
|
|
|
bool ret = true;
|
|
|
|
|
|
|
|
bg = btrfs_lookup_block_group(fs_info, bytenr);
|
|
|
|
if (!bg)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
spin_lock(&bg->lock);
|
|
|
|
if (bg->ro)
|
|
|
|
ret = false;
|
|
|
|
else
|
|
|
|
atomic_inc(&bg->nocow_writers);
|
|
|
|
spin_unlock(&bg->lock);
|
|
|
|
|
|
|
|
/* No put on block group, done by btrfs_dec_nocow_writers */
|
|
|
|
if (!ret)
|
|
|
|
btrfs_put_block_group(bg);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr)
|
|
|
|
{
|
2019-10-30 02:20:18 +08:00
|
|
|
struct btrfs_block_group *bg;
|
2019-06-21 03:37:47 +08:00
|
|
|
|
|
|
|
bg = btrfs_lookup_block_group(fs_info, bytenr);
|
|
|
|
ASSERT(bg);
|
|
|
|
if (atomic_dec_and_test(&bg->nocow_writers))
|
|
|
|
wake_up_var(&bg->nocow_writers);
|
|
|
|
/*
|
|
|
|
* Once for our lookup and once for the lookup done by a previous call
|
|
|
|
* to btrfs_inc_nocow_writers()
|
|
|
|
*/
|
|
|
|
btrfs_put_block_group(bg);
|
|
|
|
btrfs_put_block_group(bg);
|
|
|
|
}
|
|
|
|
|
2019-10-30 02:20:18 +08:00
|
|
|
void btrfs_wait_nocow_writers(struct btrfs_block_group *bg)
|
2019-06-21 03:37:47 +08:00
|
|
|
{
|
|
|
|
wait_var_event(&bg->nocow_writers, !atomic_read(&bg->nocow_writers));
|
|
|
|
}
|
|
|
|
|
|
|
|
void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info,
|
|
|
|
const u64 start)
|
|
|
|
{
|
2019-10-30 02:20:18 +08:00
|
|
|
struct btrfs_block_group *bg;
|
2019-06-21 03:37:47 +08:00
|
|
|
|
|
|
|
bg = btrfs_lookup_block_group(fs_info, start);
|
|
|
|
ASSERT(bg);
|
|
|
|
if (atomic_dec_and_test(&bg->reservations))
|
|
|
|
wake_up_var(&bg->reservations);
|
|
|
|
btrfs_put_block_group(bg);
|
|
|
|
}
|
|
|
|
|
2019-10-30 02:20:18 +08:00
|
|
|
void btrfs_wait_block_group_reservations(struct btrfs_block_group *bg)
|
2019-06-21 03:37:47 +08:00
|
|
|
{
|
|
|
|
struct btrfs_space_info *space_info = bg->space_info;
|
|
|
|
|
|
|
|
ASSERT(bg->ro);
|
|
|
|
|
|
|
|
if (!(bg->flags & BTRFS_BLOCK_GROUP_DATA))
|
|
|
|
return;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Our block group is read only but before we set it to read only,
|
|
|
|
* some task might have had allocated an extent from it already, but it
|
|
|
|
* has not yet created a respective ordered extent (and added it to a
|
|
|
|
* root's list of ordered extents).
|
|
|
|
* Therefore wait for any task currently allocating extents, since the
|
|
|
|
* block group's reservations counter is incremented while a read lock
|
|
|
|
* on the groups' semaphore is held and decremented after releasing
|
|
|
|
* the read access on that semaphore and creating the ordered extent.
|
|
|
|
*/
|
|
|
|
down_write(&space_info->groups_sem);
|
|
|
|
up_write(&space_info->groups_sem);
|
|
|
|
|
|
|
|
wait_var_event(&bg->reservations, !atomic_read(&bg->reservations));
|
|
|
|
}
|
2019-08-06 22:43:19 +08:00
|
|
|
|
|
|
|
struct btrfs_caching_control *btrfs_get_caching_control(
|
2019-10-30 02:20:18 +08:00
|
|
|
struct btrfs_block_group *cache)
|
2019-08-06 22:43:19 +08:00
|
|
|
{
|
|
|
|
struct btrfs_caching_control *ctl;
|
|
|
|
|
|
|
|
spin_lock(&cache->lock);
|
|
|
|
if (!cache->caching_ctl) {
|
|
|
|
spin_unlock(&cache->lock);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
ctl = cache->caching_ctl;
|
|
|
|
refcount_inc(&ctl->count);
|
|
|
|
spin_unlock(&cache->lock);
|
|
|
|
return ctl;
|
|
|
|
}
|
|
|
|
|
|
|
|
void btrfs_put_caching_control(struct btrfs_caching_control *ctl)
|
|
|
|
{
|
|
|
|
if (refcount_dec_and_test(&ctl->count))
|
|
|
|
kfree(ctl);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* When we wait for progress in the block group caching, its because our
|
|
|
|
* allocation attempt failed at least once. So, we must sleep and let some
|
|
|
|
* progress happen before we try again.
|
|
|
|
*
|
|
|
|
* This function will sleep at least once waiting for new free space to show
|
|
|
|
* up, and then it will check the block group free space numbers for our min
|
|
|
|
* num_bytes. Another option is to have it go ahead and look in the rbtree for
|
|
|
|
* a free extent of a given size, but this is a good start.
|
|
|
|
*
|
|
|
|
* Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using
|
|
|
|
* any of the information in this block group.
|
|
|
|
*/
|
2019-10-30 02:20:18 +08:00
|
|
|
void btrfs_wait_block_group_cache_progress(struct btrfs_block_group *cache,
|
2019-08-06 22:43:19 +08:00
|
|
|
u64 num_bytes)
|
|
|
|
{
|
|
|
|
struct btrfs_caching_control *caching_ctl;
|
|
|
|
|
|
|
|
caching_ctl = btrfs_get_caching_control(cache);
|
|
|
|
if (!caching_ctl)
|
|
|
|
return;
|
|
|
|
|
2019-10-30 02:20:18 +08:00
|
|
|
wait_event(caching_ctl->wait, btrfs_block_group_done(cache) ||
|
2019-08-06 22:43:19 +08:00
|
|
|
(cache->free_space_ctl->free_space >= num_bytes));
|
|
|
|
|
|
|
|
btrfs_put_caching_control(caching_ctl);
|
|
|
|
}
|
|
|
|
|
2019-10-30 02:20:18 +08:00
|
|
|
int btrfs_wait_block_group_cache_done(struct btrfs_block_group *cache)
|
2019-08-06 22:43:19 +08:00
|
|
|
{
|
|
|
|
struct btrfs_caching_control *caching_ctl;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
caching_ctl = btrfs_get_caching_control(cache);
|
|
|
|
if (!caching_ctl)
|
|
|
|
return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0;
|
|
|
|
|
2019-10-30 02:20:18 +08:00
|
|
|
wait_event(caching_ctl->wait, btrfs_block_group_done(cache));
|
2019-08-06 22:43:19 +08:00
|
|
|
if (cache->cached == BTRFS_CACHE_ERROR)
|
|
|
|
ret = -EIO;
|
|
|
|
btrfs_put_caching_control(caching_ctl);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2020-10-23 21:58:10 +08:00
|
|
|
static bool space_cache_v1_done(struct btrfs_block_group *cache)
|
|
|
|
{
|
|
|
|
bool ret;
|
|
|
|
|
|
|
|
spin_lock(&cache->lock);
|
|
|
|
ret = cache->cached != BTRFS_CACHE_FAST;
|
|
|
|
spin_unlock(&cache->lock);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
void btrfs_wait_space_cache_v1_finished(struct btrfs_block_group *cache,
|
|
|
|
struct btrfs_caching_control *caching_ctl)
|
|
|
|
{
|
|
|
|
wait_event(caching_ctl->wait, space_cache_v1_done(cache));
|
|
|
|
}
|
|
|
|
|
2019-08-06 22:43:19 +08:00
|
|
|
#ifdef CONFIG_BTRFS_DEBUG
|
2019-10-30 02:20:18 +08:00
|
|
|
static void fragment_free_space(struct btrfs_block_group *block_group)
|
2019-08-06 22:43:19 +08:00
|
|
|
{
|
|
|
|
struct btrfs_fs_info *fs_info = block_group->fs_info;
|
2019-10-24 00:48:22 +08:00
|
|
|
u64 start = block_group->start;
|
|
|
|
u64 len = block_group->length;
|
2019-08-06 22:43:19 +08:00
|
|
|
u64 chunk = block_group->flags & BTRFS_BLOCK_GROUP_METADATA ?
|
|
|
|
fs_info->nodesize : fs_info->sectorsize;
|
|
|
|
u64 step = chunk << 1;
|
|
|
|
|
|
|
|
while (len > chunk) {
|
|
|
|
btrfs_remove_free_space(block_group, start, chunk);
|
|
|
|
start += step;
|
|
|
|
if (len < step)
|
|
|
|
len = 0;
|
|
|
|
else
|
|
|
|
len -= step;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This is only called by btrfs_cache_block_group, since we could have freed
|
|
|
|
* extents we need to check the pinned_extents for any extents that can't be
|
|
|
|
* used yet since their free space will be released as soon as the transaction
|
|
|
|
* commits.
|
|
|
|
*/
|
2019-10-30 02:20:18 +08:00
|
|
|
u64 add_new_free_space(struct btrfs_block_group *block_group, u64 start, u64 end)
|
2019-08-06 22:43:19 +08:00
|
|
|
{
|
|
|
|
struct btrfs_fs_info *info = block_group->fs_info;
|
|
|
|
u64 extent_start, extent_end, size, total_added = 0;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
while (start < end) {
|
2020-01-20 22:09:18 +08:00
|
|
|
ret = find_first_extent_bit(&info->excluded_extents, start,
|
2019-08-06 22:43:19 +08:00
|
|
|
&extent_start, &extent_end,
|
|
|
|
EXTENT_DIRTY | EXTENT_UPTODATE,
|
|
|
|
NULL);
|
|
|
|
if (ret)
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (extent_start <= start) {
|
|
|
|
start = extent_end + 1;
|
|
|
|
} else if (extent_start > start && extent_start < end) {
|
|
|
|
size = extent_start - start;
|
|
|
|
total_added += size;
|
2019-12-14 08:22:14 +08:00
|
|
|
ret = btrfs_add_free_space_async_trimmed(block_group,
|
|
|
|
start, size);
|
2019-08-06 22:43:19 +08:00
|
|
|
BUG_ON(ret); /* -ENOMEM or logic error */
|
|
|
|
start = extent_end + 1;
|
|
|
|
} else {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (start < end) {
|
|
|
|
size = end - start;
|
|
|
|
total_added += size;
|
2019-12-14 08:22:14 +08:00
|
|
|
ret = btrfs_add_free_space_async_trimmed(block_group, start,
|
|
|
|
size);
|
2019-08-06 22:43:19 +08:00
|
|
|
BUG_ON(ret); /* -ENOMEM or logic error */
|
|
|
|
}
|
|
|
|
|
|
|
|
return total_added;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int load_extent_tree_free(struct btrfs_caching_control *caching_ctl)
|
|
|
|
{
|
2019-10-30 02:20:18 +08:00
|
|
|
struct btrfs_block_group *block_group = caching_ctl->block_group;
|
2019-08-06 22:43:19 +08:00
|
|
|
struct btrfs_fs_info *fs_info = block_group->fs_info;
|
|
|
|
struct btrfs_root *extent_root = fs_info->extent_root;
|
|
|
|
struct btrfs_path *path;
|
|
|
|
struct extent_buffer *leaf;
|
|
|
|
struct btrfs_key key;
|
|
|
|
u64 total_found = 0;
|
|
|
|
u64 last = 0;
|
|
|
|
u32 nritems;
|
|
|
|
int ret;
|
|
|
|
bool wakeup = true;
|
|
|
|
|
|
|
|
path = btrfs_alloc_path();
|
|
|
|
if (!path)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2019-10-24 00:48:22 +08:00
|
|
|
last = max_t(u64, block_group->start, BTRFS_SUPER_INFO_OFFSET);
|
2019-08-06 22:43:19 +08:00
|
|
|
|
|
|
|
#ifdef CONFIG_BTRFS_DEBUG
|
|
|
|
/*
|
|
|
|
* If we're fragmenting we don't want to make anybody think we can
|
|
|
|
* allocate from this block group until we've had a chance to fragment
|
|
|
|
* the free space.
|
|
|
|
*/
|
|
|
|
if (btrfs_should_fragment_free_space(block_group))
|
|
|
|
wakeup = false;
|
|
|
|
#endif
|
|
|
|
/*
|
|
|
|
* We don't want to deadlock with somebody trying to allocate a new
|
|
|
|
* extent for the extent root while also trying to search the extent
|
|
|
|
* root to add free space. So we skip locking and search the commit
|
|
|
|
* root, since its read-only
|
|
|
|
*/
|
|
|
|
path->skip_locking = 1;
|
|
|
|
path->search_commit_root = 1;
|
|
|
|
path->reada = READA_FORWARD;
|
|
|
|
|
|
|
|
key.objectid = last;
|
|
|
|
key.offset = 0;
|
|
|
|
key.type = BTRFS_EXTENT_ITEM_KEY;
|
|
|
|
|
|
|
|
next:
|
|
|
|
ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
|
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
leaf = path->nodes[0];
|
|
|
|
nritems = btrfs_header_nritems(leaf);
|
|
|
|
|
|
|
|
while (1) {
|
|
|
|
if (btrfs_fs_closing(fs_info) > 1) {
|
|
|
|
last = (u64)-1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (path->slots[0] < nritems) {
|
|
|
|
btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
|
|
|
|
} else {
|
|
|
|
ret = btrfs_find_next_key(extent_root, path, &key, 0, 0);
|
|
|
|
if (ret)
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (need_resched() ||
|
|
|
|
rwsem_is_contended(&fs_info->commit_root_sem)) {
|
|
|
|
if (wakeup)
|
|
|
|
caching_ctl->progress = last;
|
|
|
|
btrfs_release_path(path);
|
|
|
|
up_read(&fs_info->commit_root_sem);
|
|
|
|
mutex_unlock(&caching_ctl->mutex);
|
|
|
|
cond_resched();
|
|
|
|
mutex_lock(&caching_ctl->mutex);
|
|
|
|
down_read(&fs_info->commit_root_sem);
|
|
|
|
goto next;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = btrfs_next_leaf(extent_root, path);
|
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
|
|
|
if (ret)
|
|
|
|
break;
|
|
|
|
leaf = path->nodes[0];
|
|
|
|
nritems = btrfs_header_nritems(leaf);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (key.objectid < last) {
|
|
|
|
key.objectid = last;
|
|
|
|
key.offset = 0;
|
|
|
|
key.type = BTRFS_EXTENT_ITEM_KEY;
|
|
|
|
|
|
|
|
if (wakeup)
|
|
|
|
caching_ctl->progress = last;
|
|
|
|
btrfs_release_path(path);
|
|
|
|
goto next;
|
|
|
|
}
|
|
|
|
|
2019-10-24 00:48:22 +08:00
|
|
|
if (key.objectid < block_group->start) {
|
2019-08-06 22:43:19 +08:00
|
|
|
path->slots[0]++;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2019-10-24 00:48:22 +08:00
|
|
|
if (key.objectid >= block_group->start + block_group->length)
|
2019-08-06 22:43:19 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
if (key.type == BTRFS_EXTENT_ITEM_KEY ||
|
|
|
|
key.type == BTRFS_METADATA_ITEM_KEY) {
|
|
|
|
total_found += add_new_free_space(block_group, last,
|
|
|
|
key.objectid);
|
|
|
|
if (key.type == BTRFS_METADATA_ITEM_KEY)
|
|
|
|
last = key.objectid +
|
|
|
|
fs_info->nodesize;
|
|
|
|
else
|
|
|
|
last = key.objectid + key.offset;
|
|
|
|
|
|
|
|
if (total_found > CACHING_CTL_WAKE_UP) {
|
|
|
|
total_found = 0;
|
|
|
|
if (wakeup)
|
|
|
|
wake_up(&caching_ctl->wait);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
path->slots[0]++;
|
|
|
|
}
|
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
total_found += add_new_free_space(block_group, last,
|
2019-10-24 00:48:22 +08:00
|
|
|
block_group->start + block_group->length);
|
2019-08-06 22:43:19 +08:00
|
|
|
caching_ctl->progress = (u64)-1;
|
|
|
|
|
|
|
|
out:
|
|
|
|
btrfs_free_path(path);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static noinline void caching_thread(struct btrfs_work *work)
|
|
|
|
{
|
2019-10-30 02:20:18 +08:00
|
|
|
struct btrfs_block_group *block_group;
|
2019-08-06 22:43:19 +08:00
|
|
|
struct btrfs_fs_info *fs_info;
|
|
|
|
struct btrfs_caching_control *caching_ctl;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
caching_ctl = container_of(work, struct btrfs_caching_control, work);
|
|
|
|
block_group = caching_ctl->block_group;
|
|
|
|
fs_info = block_group->fs_info;
|
|
|
|
|
|
|
|
mutex_lock(&caching_ctl->mutex);
|
|
|
|
down_read(&fs_info->commit_root_sem);
|
|
|
|
|
2020-10-23 21:58:10 +08:00
|
|
|
if (btrfs_test_opt(fs_info, SPACE_CACHE)) {
|
|
|
|
ret = load_free_space_cache(block_group);
|
|
|
|
if (ret == 1) {
|
|
|
|
ret = 0;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We failed to load the space cache, set ourselves to
|
|
|
|
* CACHE_STARTED and carry on.
|
|
|
|
*/
|
|
|
|
spin_lock(&block_group->lock);
|
|
|
|
block_group->cached = BTRFS_CACHE_STARTED;
|
|
|
|
spin_unlock(&block_group->lock);
|
|
|
|
wake_up(&caching_ctl->wait);
|
|
|
|
}
|
|
|
|
|
btrfs: fix possible free space tree corruption with online conversion
While running btrfs/011 in a loop I would often ASSERT() while trying to
add a new free space entry that already existed, or get an EEXIST while
adding a new block to the extent tree, which is another indication of
double allocation.
This occurs because when we do the free space tree population, we create
the new root and then populate the tree and commit the transaction.
The problem is when you create a new root, the root node and commit root
node are the same. During this initial transaction commit we will run
all of the delayed refs that were paused during the free space tree
generation, and thus begin to cache block groups. While caching block
groups the caching thread will be reading from the main root for the
free space tree, so as we make allocations we'll be changing the free
space tree, which can cause us to add the same range twice which results
in either the ASSERT(ret != -EEXIST); in __btrfs_add_free_space, or in a
variety of different errors when running delayed refs because of a
double allocation.
Fix this by marking the fs_info as unsafe to load the free space tree,
and fall back on the old slow method. We could be smarter than this,
for example caching the block group while we're populating the free
space tree, but since this is a serious problem I've opted for the
simplest solution.
CC: stable@vger.kernel.org # 4.9+
Fixes: a5ed91828518 ("Btrfs: implement the free space B-tree")
Reviewed-by: Filipe Manana <fdmanana@suse.com>
Signed-off-by: Josef Bacik <josef@toxicpanda.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2021-01-16 05:26:17 +08:00
|
|
|
/*
|
|
|
|
* If we are in the transaction that populated the free space tree we
|
|
|
|
* can't actually cache from the free space tree as our commit root and
|
|
|
|
* real root are the same, so we could change the contents of the blocks
|
|
|
|
* while caching. Instead do the slow caching in this case, and after
|
|
|
|
* the transaction has committed we will be safe.
|
|
|
|
*/
|
|
|
|
if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) &&
|
|
|
|
!(test_bit(BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, &fs_info->flags)))
|
2019-08-06 22:43:19 +08:00
|
|
|
ret = load_free_space_tree(caching_ctl);
|
|
|
|
else
|
|
|
|
ret = load_extent_tree_free(caching_ctl);
|
2020-10-23 21:58:10 +08:00
|
|
|
done:
|
2019-08-06 22:43:19 +08:00
|
|
|
spin_lock(&block_group->lock);
|
|
|
|
block_group->caching_ctl = NULL;
|
|
|
|
block_group->cached = ret ? BTRFS_CACHE_ERROR : BTRFS_CACHE_FINISHED;
|
|
|
|
spin_unlock(&block_group->lock);
|
|
|
|
|
|
|
|
#ifdef CONFIG_BTRFS_DEBUG
|
|
|
|
if (btrfs_should_fragment_free_space(block_group)) {
|
|
|
|
u64 bytes_used;
|
|
|
|
|
|
|
|
spin_lock(&block_group->space_info->lock);
|
|
|
|
spin_lock(&block_group->lock);
|
2019-10-24 00:48:22 +08:00
|
|
|
bytes_used = block_group->length - block_group->used;
|
2019-08-06 22:43:19 +08:00
|
|
|
block_group->space_info->bytes_used += bytes_used >> 1;
|
|
|
|
spin_unlock(&block_group->lock);
|
|
|
|
spin_unlock(&block_group->space_info->lock);
|
2019-06-21 03:38:07 +08:00
|
|
|
fragment_free_space(block_group);
|
2019-08-06 22:43:19 +08:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
caching_ctl->progress = (u64)-1;
|
|
|
|
|
|
|
|
up_read(&fs_info->commit_root_sem);
|
|
|
|
btrfs_free_excluded_extents(block_group);
|
|
|
|
mutex_unlock(&caching_ctl->mutex);
|
|
|
|
|
|
|
|
wake_up(&caching_ctl->wait);
|
|
|
|
|
|
|
|
btrfs_put_caching_control(caching_ctl);
|
|
|
|
btrfs_put_block_group(block_group);
|
|
|
|
}
|
|
|
|
|
2019-10-30 02:20:18 +08:00
|
|
|
int btrfs_cache_block_group(struct btrfs_block_group *cache, int load_cache_only)
|
2019-08-06 22:43:19 +08:00
|
|
|
{
|
|
|
|
DEFINE_WAIT(wait);
|
|
|
|
struct btrfs_fs_info *fs_info = cache->fs_info;
|
2020-10-23 21:58:10 +08:00
|
|
|
struct btrfs_caching_control *caching_ctl = NULL;
|
2019-08-06 22:43:19 +08:00
|
|
|
int ret = 0;
|
|
|
|
|
2021-02-04 18:21:53 +08:00
|
|
|
/* Allocator for zoned filesystems does not use the cache at all */
|
|
|
|
if (btrfs_is_zoned(fs_info))
|
|
|
|
return 0;
|
|
|
|
|
2019-08-06 22:43:19 +08:00
|
|
|
caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
|
|
|
|
if (!caching_ctl)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
INIT_LIST_HEAD(&caching_ctl->list);
|
|
|
|
mutex_init(&caching_ctl->mutex);
|
|
|
|
init_waitqueue_head(&caching_ctl->wait);
|
|
|
|
caching_ctl->block_group = cache;
|
2019-10-24 00:48:22 +08:00
|
|
|
caching_ctl->progress = cache->start;
|
2020-10-23 21:58:10 +08:00
|
|
|
refcount_set(&caching_ctl->count, 2);
|
2019-09-17 02:30:57 +08:00
|
|
|
btrfs_init_work(&caching_ctl->work, caching_thread, NULL, NULL);
|
2019-08-06 22:43:19 +08:00
|
|
|
|
|
|
|
spin_lock(&cache->lock);
|
|
|
|
if (cache->cached != BTRFS_CACHE_NO) {
|
|
|
|
kfree(caching_ctl);
|
2020-10-23 21:58:10 +08:00
|
|
|
|
|
|
|
caching_ctl = cache->caching_ctl;
|
|
|
|
if (caching_ctl)
|
|
|
|
refcount_inc(&caching_ctl->count);
|
|
|
|
spin_unlock(&cache->lock);
|
|
|
|
goto out;
|
2019-08-06 22:43:19 +08:00
|
|
|
}
|
|
|
|
WARN_ON(cache->caching_ctl);
|
|
|
|
cache->caching_ctl = caching_ctl;
|
2020-10-23 21:58:10 +08:00
|
|
|
if (btrfs_test_opt(fs_info, SPACE_CACHE))
|
|
|
|
cache->cached = BTRFS_CACHE_FAST;
|
|
|
|
else
|
|
|
|
cache->cached = BTRFS_CACHE_STARTED;
|
|
|
|
cache->has_caching_ctl = 1;
|
2019-08-06 22:43:19 +08:00
|
|
|
spin_unlock(&cache->lock);
|
|
|
|
|
2020-10-23 21:58:11 +08:00
|
|
|
spin_lock(&fs_info->block_group_cache_lock);
|
2019-08-06 22:43:19 +08:00
|
|
|
refcount_inc(&caching_ctl->count);
|
|
|
|
list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
|
2020-10-23 21:58:11 +08:00
|
|
|
spin_unlock(&fs_info->block_group_cache_lock);
|
2019-08-06 22:43:19 +08:00
|
|
|
|
|
|
|
btrfs_get_block_group(cache);
|
|
|
|
|
|
|
|
btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work);
|
2020-10-23 21:58:10 +08:00
|
|
|
out:
|
|
|
|
if (load_cache_only && caching_ctl)
|
|
|
|
btrfs_wait_space_cache_v1_finished(cache, caching_ctl);
|
|
|
|
if (caching_ctl)
|
|
|
|
btrfs_put_caching_control(caching_ctl);
|
2019-08-06 22:43:19 +08:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
2019-06-21 03:37:55 +08:00
|
|
|
|
|
|
|
static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
|
|
|
|
{
|
|
|
|
u64 extra_flags = chunk_to_extended(flags) &
|
|
|
|
BTRFS_EXTENDED_PROFILE_MASK;
|
|
|
|
|
|
|
|
write_seqlock(&fs_info->profiles_lock);
|
|
|
|
if (flags & BTRFS_BLOCK_GROUP_DATA)
|
|
|
|
fs_info->avail_data_alloc_bits &= ~extra_flags;
|
|
|
|
if (flags & BTRFS_BLOCK_GROUP_METADATA)
|
|
|
|
fs_info->avail_metadata_alloc_bits &= ~extra_flags;
|
|
|
|
if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
|
|
|
|
fs_info->avail_system_alloc_bits &= ~extra_flags;
|
|
|
|
write_sequnlock(&fs_info->profiles_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Clear incompat bits for the following feature(s):
|
|
|
|
*
|
|
|
|
* - RAID56 - in case there's neither RAID5 nor RAID6 profile block group
|
|
|
|
* in the whole filesystem
|
2019-10-31 22:52:01 +08:00
|
|
|
*
|
|
|
|
* - RAID1C34 - same as above for RAID1C3 and RAID1C4 block groups
|
2019-06-21 03:37:55 +08:00
|
|
|
*/
|
|
|
|
static void clear_incompat_bg_bits(struct btrfs_fs_info *fs_info, u64 flags)
|
|
|
|
{
|
2019-10-31 22:52:01 +08:00
|
|
|
bool found_raid56 = false;
|
|
|
|
bool found_raid1c34 = false;
|
|
|
|
|
|
|
|
if ((flags & BTRFS_BLOCK_GROUP_RAID56_MASK) ||
|
|
|
|
(flags & BTRFS_BLOCK_GROUP_RAID1C3) ||
|
|
|
|
(flags & BTRFS_BLOCK_GROUP_RAID1C4)) {
|
2019-06-21 03:37:55 +08:00
|
|
|
struct list_head *head = &fs_info->space_info;
|
|
|
|
struct btrfs_space_info *sinfo;
|
|
|
|
|
|
|
|
list_for_each_entry_rcu(sinfo, head, list) {
|
|
|
|
down_read(&sinfo->groups_sem);
|
|
|
|
if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID5]))
|
2019-10-31 22:52:01 +08:00
|
|
|
found_raid56 = true;
|
2019-06-21 03:37:55 +08:00
|
|
|
if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID6]))
|
2019-10-31 22:52:01 +08:00
|
|
|
found_raid56 = true;
|
|
|
|
if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID1C3]))
|
|
|
|
found_raid1c34 = true;
|
|
|
|
if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID1C4]))
|
|
|
|
found_raid1c34 = true;
|
2019-06-21 03:37:55 +08:00
|
|
|
up_read(&sinfo->groups_sem);
|
|
|
|
}
|
2020-03-21 02:43:48 +08:00
|
|
|
if (!found_raid56)
|
2019-10-31 22:52:01 +08:00
|
|
|
btrfs_clear_fs_incompat(fs_info, RAID56);
|
2020-03-21 02:43:48 +08:00
|
|
|
if (!found_raid1c34)
|
2019-10-31 22:52:01 +08:00
|
|
|
btrfs_clear_fs_incompat(fs_info, RAID1C34);
|
2019-06-21 03:37:55 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-05 07:58:21 +08:00
|
|
|
static int remove_block_group_item(struct btrfs_trans_handle *trans,
|
|
|
|
struct btrfs_path *path,
|
|
|
|
struct btrfs_block_group *block_group)
|
|
|
|
{
|
|
|
|
struct btrfs_fs_info *fs_info = trans->fs_info;
|
|
|
|
struct btrfs_root *root;
|
|
|
|
struct btrfs_key key;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
root = fs_info->extent_root;
|
|
|
|
key.objectid = block_group->start;
|
|
|
|
key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
|
|
|
|
key.offset = block_group->length;
|
|
|
|
|
|
|
|
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
|
|
|
|
if (ret > 0)
|
|
|
|
ret = -ENOENT;
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
ret = btrfs_del_item(trans, root, path);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-06-21 03:37:55 +08:00
|
|
|
int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
|
|
|
|
u64 group_start, struct extent_map *em)
|
|
|
|
{
|
|
|
|
struct btrfs_fs_info *fs_info = trans->fs_info;
|
|
|
|
struct btrfs_path *path;
|
2019-10-30 02:20:18 +08:00
|
|
|
struct btrfs_block_group *block_group;
|
2019-06-21 03:37:55 +08:00
|
|
|
struct btrfs_free_cluster *cluster;
|
|
|
|
struct inode *inode;
|
|
|
|
struct kobject *kobj = NULL;
|
|
|
|
int ret;
|
|
|
|
int index;
|
|
|
|
int factor;
|
|
|
|
struct btrfs_caching_control *caching_ctl = NULL;
|
|
|
|
bool remove_em;
|
|
|
|
bool remove_rsv = false;
|
|
|
|
|
|
|
|
block_group = btrfs_lookup_block_group(fs_info, group_start);
|
|
|
|
BUG_ON(!block_group);
|
|
|
|
BUG_ON(!block_group->ro);
|
|
|
|
|
|
|
|
trace_btrfs_remove_block_group(block_group);
|
|
|
|
/*
|
|
|
|
* Free the reserved super bytes from this block group before
|
|
|
|
* remove it.
|
|
|
|
*/
|
|
|
|
btrfs_free_excluded_extents(block_group);
|
2019-10-24 00:48:22 +08:00
|
|
|
btrfs_free_ref_tree_range(fs_info, block_group->start,
|
|
|
|
block_group->length);
|
2019-06-21 03:37:55 +08:00
|
|
|
|
|
|
|
index = btrfs_bg_flags_to_raid_index(block_group->flags);
|
|
|
|
factor = btrfs_bg_type_to_factor(block_group->flags);
|
|
|
|
|
|
|
|
/* make sure this block group isn't part of an allocation cluster */
|
|
|
|
cluster = &fs_info->data_alloc_cluster;
|
|
|
|
spin_lock(&cluster->refill_lock);
|
|
|
|
btrfs_return_cluster_to_free_space(block_group, cluster);
|
|
|
|
spin_unlock(&cluster->refill_lock);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* make sure this block group isn't part of a metadata
|
|
|
|
* allocation cluster
|
|
|
|
*/
|
|
|
|
cluster = &fs_info->meta_alloc_cluster;
|
|
|
|
spin_lock(&cluster->refill_lock);
|
|
|
|
btrfs_return_cluster_to_free_space(block_group, cluster);
|
|
|
|
spin_unlock(&cluster->refill_lock);
|
|
|
|
|
2021-02-04 18:22:18 +08:00
|
|
|
btrfs_clear_treelog_bg(block_group);
|
|
|
|
|
2019-06-21 03:37:55 +08:00
|
|
|
path = btrfs_alloc_path();
|
|
|
|
if (!path) {
|
|
|
|
ret = -ENOMEM;
|
2020-06-02 02:12:06 +08:00
|
|
|
goto out;
|
2019-06-21 03:37:55 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* get the inode first so any iput calls done for the io_list
|
|
|
|
* aren't the final iput (no unlinks allowed now)
|
|
|
|
*/
|
|
|
|
inode = lookup_free_space_inode(block_group, path);
|
|
|
|
|
|
|
|
mutex_lock(&trans->transaction->cache_write_mutex);
|
|
|
|
/*
|
|
|
|
* Make sure our free space cache IO is done before removing the
|
|
|
|
* free space inode
|
|
|
|
*/
|
|
|
|
spin_lock(&trans->transaction->dirty_bgs_lock);
|
|
|
|
if (!list_empty(&block_group->io_list)) {
|
|
|
|
list_del_init(&block_group->io_list);
|
|
|
|
|
|
|
|
WARN_ON(!IS_ERR(inode) && inode != block_group->io_ctl.inode);
|
|
|
|
|
|
|
|
spin_unlock(&trans->transaction->dirty_bgs_lock);
|
|
|
|
btrfs_wait_cache_io(trans, block_group, path);
|
|
|
|
btrfs_put_block_group(block_group);
|
|
|
|
spin_lock(&trans->transaction->dirty_bgs_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!list_empty(&block_group->dirty_list)) {
|
|
|
|
list_del_init(&block_group->dirty_list);
|
|
|
|
remove_rsv = true;
|
|
|
|
btrfs_put_block_group(block_group);
|
|
|
|
}
|
|
|
|
spin_unlock(&trans->transaction->dirty_bgs_lock);
|
|
|
|
mutex_unlock(&trans->transaction->cache_write_mutex);
|
|
|
|
|
2020-11-19 07:06:25 +08:00
|
|
|
ret = btrfs_remove_free_space_inode(trans, inode, block_group);
|
|
|
|
if (ret)
|
2020-06-02 02:12:06 +08:00
|
|
|
goto out;
|
2019-06-21 03:37:55 +08:00
|
|
|
|
|
|
|
spin_lock(&fs_info->block_group_cache_lock);
|
|
|
|
rb_erase(&block_group->cache_node,
|
|
|
|
&fs_info->block_group_cache_tree);
|
|
|
|
RB_CLEAR_NODE(&block_group->cache_node);
|
|
|
|
|
2020-06-02 02:12:06 +08:00
|
|
|
/* Once for the block groups rbtree */
|
|
|
|
btrfs_put_block_group(block_group);
|
|
|
|
|
2019-10-24 00:48:22 +08:00
|
|
|
if (fs_info->first_logical_byte == block_group->start)
|
2019-06-21 03:37:55 +08:00
|
|
|
fs_info->first_logical_byte = (u64)-1;
|
|
|
|
spin_unlock(&fs_info->block_group_cache_lock);
|
|
|
|
|
|
|
|
down_write(&block_group->space_info->groups_sem);
|
|
|
|
/*
|
|
|
|
* we must use list_del_init so people can check to see if they
|
|
|
|
* are still on the list after taking the semaphore
|
|
|
|
*/
|
|
|
|
list_del_init(&block_group->list);
|
|
|
|
if (list_empty(&block_group->space_info->block_groups[index])) {
|
|
|
|
kobj = block_group->space_info->block_group_kobjs[index];
|
|
|
|
block_group->space_info->block_group_kobjs[index] = NULL;
|
|
|
|
clear_avail_alloc_bits(fs_info, block_group->flags);
|
|
|
|
}
|
|
|
|
up_write(&block_group->space_info->groups_sem);
|
|
|
|
clear_incompat_bg_bits(fs_info, block_group->flags);
|
|
|
|
if (kobj) {
|
|
|
|
kobject_del(kobj);
|
|
|
|
kobject_put(kobj);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (block_group->has_caching_ctl)
|
|
|
|
caching_ctl = btrfs_get_caching_control(block_group);
|
|
|
|
if (block_group->cached == BTRFS_CACHE_STARTED)
|
|
|
|
btrfs_wait_block_group_cache_done(block_group);
|
|
|
|
if (block_group->has_caching_ctl) {
|
2020-10-23 21:58:11 +08:00
|
|
|
spin_lock(&fs_info->block_group_cache_lock);
|
2019-06-21 03:37:55 +08:00
|
|
|
if (!caching_ctl) {
|
|
|
|
struct btrfs_caching_control *ctl;
|
|
|
|
|
|
|
|
list_for_each_entry(ctl,
|
|
|
|
&fs_info->caching_block_groups, list)
|
|
|
|
if (ctl->block_group == block_group) {
|
|
|
|
caching_ctl = ctl;
|
|
|
|
refcount_inc(&caching_ctl->count);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (caching_ctl)
|
|
|
|
list_del_init(&caching_ctl->list);
|
2020-10-23 21:58:11 +08:00
|
|
|
spin_unlock(&fs_info->block_group_cache_lock);
|
2019-06-21 03:37:55 +08:00
|
|
|
if (caching_ctl) {
|
|
|
|
/* Once for the caching bgs list and once for us. */
|
|
|
|
btrfs_put_caching_control(caching_ctl);
|
|
|
|
btrfs_put_caching_control(caching_ctl);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_lock(&trans->transaction->dirty_bgs_lock);
|
|
|
|
WARN_ON(!list_empty(&block_group->dirty_list));
|
|
|
|
WARN_ON(!list_empty(&block_group->io_list));
|
|
|
|
spin_unlock(&trans->transaction->dirty_bgs_lock);
|
|
|
|
|
|
|
|
btrfs_remove_free_space_cache(block_group);
|
|
|
|
|
|
|
|
spin_lock(&block_group->space_info->lock);
|
|
|
|
list_del_init(&block_group->ro_list);
|
|
|
|
|
|
|
|
if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
|
|
|
|
WARN_ON(block_group->space_info->total_bytes
|
2019-10-24 00:48:22 +08:00
|
|
|
< block_group->length);
|
2019-06-21 03:37:55 +08:00
|
|
|
WARN_ON(block_group->space_info->bytes_readonly
|
2021-02-04 18:21:52 +08:00
|
|
|
< block_group->length - block_group->zone_unusable);
|
|
|
|
WARN_ON(block_group->space_info->bytes_zone_unusable
|
|
|
|
< block_group->zone_unusable);
|
2019-06-21 03:37:55 +08:00
|
|
|
WARN_ON(block_group->space_info->disk_total
|
2019-10-24 00:48:22 +08:00
|
|
|
< block_group->length * factor);
|
2019-06-21 03:37:55 +08:00
|
|
|
}
|
2019-10-24 00:48:22 +08:00
|
|
|
block_group->space_info->total_bytes -= block_group->length;
|
2021-02-04 18:21:52 +08:00
|
|
|
block_group->space_info->bytes_readonly -=
|
|
|
|
(block_group->length - block_group->zone_unusable);
|
|
|
|
block_group->space_info->bytes_zone_unusable -=
|
|
|
|
block_group->zone_unusable;
|
2019-10-24 00:48:22 +08:00
|
|
|
block_group->space_info->disk_total -= block_group->length * factor;
|
2019-06-21 03:37:55 +08:00
|
|
|
|
|
|
|
spin_unlock(&block_group->space_info->lock);
|
|
|
|
|
btrfs: fix race between block group removal and block group creation
There is a race between block group removal and block group creation
when the removal is completed by a task running fitrim or scrub. When
this happens we end up failing the block group creation with an error
-EEXIST since we attempt to insert a duplicate block group item key
in the extent tree. That results in a transaction abort.
The race happens like this:
1) Task A is doing a fitrim, and at btrfs_trim_block_group() it freezes
block group X with btrfs_freeze_block_group() (until very recently
that was named btrfs_get_block_group_trimming());
2) Task B starts removing block group X, either because it's now unused
or due to relocation for example. So at btrfs_remove_block_group(),
while holding the chunk mutex and the block group's lock, it sets
the 'removed' flag of the block group and it sets the local variable
'remove_em' to false, because the block group is currently frozen
(its 'frozen' counter is > 0, until very recently this counter was
named 'trimming');
3) Task B unlocks the block group and the chunk mutex;
4) Task A is done trimming the block group and unfreezes the block group
by calling btrfs_unfreeze_block_group() (until very recently this was
named btrfs_put_block_group_trimming()). In this function we lock the
block group and set the local variable 'cleanup' to true because we
were able to decrement the block group's 'frozen' counter down to 0 and
the flag 'removed' is set in the block group.
Since 'cleanup' is set to true, it locks the chunk mutex and removes
the extent mapping representing the block group from the mapping tree;
5) Task C allocates a new block group Y and it picks up the logical address
that block group X had as the logical address for Y, because X was the
block group with the highest logical address and now the second block
group with the highest logical address, the last in the fs mapping tree,
ends at an offset corresponding to block group X's logical address (this
logical address selection is done at volumes.c:find_next_chunk()).
At this point the new block group Y does not have yet its item added
to the extent tree (nor the corresponding device extent items and
chunk item in the device and chunk trees). The new group Y is added to
the list of pending block groups in the transaction handle;
6) Before task B proceeds to removing the block group item for block
group X from the extent tree, which has a key matching:
(X logical offset, BTRFS_BLOCK_GROUP_ITEM_KEY, length)
task C while ending its transaction handle calls
btrfs_create_pending_block_groups(), which finds block group Y and
tries to insert the block group item for Y into the exten tree, which
fails with -EEXIST since logical offset is the same that X had and
task B hasn't yet deleted the key from the extent tree.
This failure results in a transaction abort, producing a stack like
the following:
------------[ cut here ]------------
BTRFS: Transaction aborted (error -17)
WARNING: CPU: 2 PID: 19736 at fs/btrfs/block-group.c:2074 btrfs_create_pending_block_groups+0x1eb/0x260 [btrfs]
Modules linked in: btrfs blake2b_generic xor raid6_pq (...)
CPU: 2 PID: 19736 Comm: fsstress Tainted: G W 5.6.0-rc7-btrfs-next-58 #5
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.12.0-59-gc9ba5276e321-prebuilt.qemu.org 04/01/2014
RIP: 0010:btrfs_create_pending_block_groups+0x1eb/0x260 [btrfs]
Code: ff ff ff 48 8b 55 50 f0 48 (...)
RSP: 0018:ffffa4160a1c7d58 EFLAGS: 00010286
RAX: 0000000000000000 RBX: ffff961581909d98 RCX: 0000000000000000
RDX: 0000000000000001 RSI: ffffffffb3d63990 RDI: 0000000000000001
RBP: ffff9614f3356a58 R08: 0000000000000000 R09: 0000000000000001
R10: ffff9615b65b0040 R11: 0000000000000000 R12: ffff961581909c10
R13: ffff9615b0c32000 R14: ffff9614f3356ab0 R15: ffff9614be779000
FS: 00007f2ce2841e80(0000) GS:ffff9615bae00000(0000) knlGS:0000000000000000
CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
CR2: 0000555f18780000 CR3: 0000000131d34005 CR4: 00000000003606e0
DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
Call Trace:
btrfs_start_dirty_block_groups+0x398/0x4e0 [btrfs]
btrfs_commit_transaction+0xd0/0xc50 [btrfs]
? btrfs_attach_transaction_barrier+0x1e/0x50 [btrfs]
? __ia32_sys_fdatasync+0x20/0x20
iterate_supers+0xdb/0x180
ksys_sync+0x60/0xb0
__ia32_sys_sync+0xa/0x10
do_syscall_64+0x5c/0x280
entry_SYSCALL_64_after_hwframe+0x49/0xbe
RIP: 0033:0x7f2ce1d4d5b7
Code: 83 c4 08 48 3d 01 (...)
RSP: 002b:00007ffd8b558c58 EFLAGS: 00000202 ORIG_RAX: 00000000000000a2
RAX: ffffffffffffffda RBX: 000000000000002c RCX: 00007f2ce1d4d5b7
RDX: 00000000ffffffff RSI: 00000000186ba07b RDI: 000000000000002c
RBP: 0000555f17b9e520 R08: 0000000000000012 R09: 000000000000ce00
R10: 0000000000000078 R11: 0000000000000202 R12: 0000000000000032
R13: 0000000051eb851f R14: 00007ffd8b558cd0 R15: 0000555f1798ec20
irq event stamp: 0
hardirqs last enabled at (0): [<0000000000000000>] 0x0
hardirqs last disabled at (0): [<ffffffffb2abdedf>] copy_process+0x74f/0x2020
softirqs last enabled at (0): [<ffffffffb2abdedf>] copy_process+0x74f/0x2020
softirqs last disabled at (0): [<0000000000000000>] 0x0
---[ end trace bd7c03622e0b0a9c ]---
Fix this simply by making btrfs_remove_block_group() remove the block
group's item from the extent tree before it flags the block group as
removed. Also make the free space deletion from the free space tree
before flagging the block group as removed, to avoid a similar race
with adding and removing free space entries for the free space tree.
Fixes: 04216820fe83d5 ("Btrfs: fix race between fs trimming and block group remove/allocation")
CC: stable@vger.kernel.org # 4.4+
Signed-off-by: Filipe Manana <fdmanana@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2020-06-02 02:12:19 +08:00
|
|
|
/*
|
|
|
|
* Remove the free space for the block group from the free space tree
|
|
|
|
* and the block group's item from the extent tree before marking the
|
|
|
|
* block group as removed. This is to prevent races with tasks that
|
|
|
|
* freeze and unfreeze a block group, this task and another task
|
|
|
|
* allocating a new block group - the unfreeze task ends up removing
|
|
|
|
* the block group's extent map before the task calling this function
|
|
|
|
* deletes the block group item from the extent tree, allowing for
|
|
|
|
* another task to attempt to create another block group with the same
|
|
|
|
* item key (and failing with -EEXIST and a transaction abort).
|
|
|
|
*/
|
|
|
|
ret = remove_block_group_free_space(trans, block_group);
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
ret = remove_block_group_item(trans, path, block_group);
|
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
|
|
|
|
2019-06-21 03:37:55 +08:00
|
|
|
spin_lock(&block_group->lock);
|
|
|
|
block_group->removed = 1;
|
|
|
|
/*
|
2020-05-08 18:01:47 +08:00
|
|
|
* At this point trimming or scrub can't start on this block group,
|
|
|
|
* because we removed the block group from the rbtree
|
|
|
|
* fs_info->block_group_cache_tree so no one can't find it anymore and
|
|
|
|
* even if someone already got this block group before we removed it
|
|
|
|
* from the rbtree, they have already incremented block_group->frozen -
|
|
|
|
* if they didn't, for the trimming case they won't find any free space
|
|
|
|
* entries because we already removed them all when we called
|
|
|
|
* btrfs_remove_free_space_cache().
|
2019-06-21 03:37:55 +08:00
|
|
|
*
|
|
|
|
* And we must not remove the extent map from the fs_info->mapping_tree
|
|
|
|
* to prevent the same logical address range and physical device space
|
2020-05-08 18:01:47 +08:00
|
|
|
* ranges from being reused for a new block group. This is needed to
|
|
|
|
* avoid races with trimming and scrub.
|
|
|
|
*
|
|
|
|
* An fs trim operation (btrfs_trim_fs() / btrfs_ioctl_fitrim()) is
|
2019-06-21 03:37:55 +08:00
|
|
|
* completely transactionless, so while it is trimming a range the
|
|
|
|
* currently running transaction might finish and a new one start,
|
|
|
|
* allowing for new block groups to be created that can reuse the same
|
|
|
|
* physical device locations unless we take this special care.
|
|
|
|
*
|
|
|
|
* There may also be an implicit trim operation if the file system
|
|
|
|
* is mounted with -odiscard. The same protections must remain
|
|
|
|
* in place until the extents have been discarded completely when
|
|
|
|
* the transaction commit has completed.
|
|
|
|
*/
|
2020-05-08 18:01:47 +08:00
|
|
|
remove_em = (atomic_read(&block_group->frozen) == 0);
|
2019-06-21 03:37:55 +08:00
|
|
|
spin_unlock(&block_group->lock);
|
|
|
|
|
|
|
|
if (remove_em) {
|
|
|
|
struct extent_map_tree *em_tree;
|
|
|
|
|
|
|
|
em_tree = &fs_info->mapping_tree;
|
|
|
|
write_lock(&em_tree->lock);
|
|
|
|
remove_extent_mapping(em_tree, em);
|
|
|
|
write_unlock(&em_tree->lock);
|
|
|
|
/* once for the tree */
|
|
|
|
free_extent_map(em);
|
|
|
|
}
|
2020-04-21 10:54:11 +08:00
|
|
|
|
2020-06-02 02:12:06 +08:00
|
|
|
out:
|
2020-04-21 10:54:11 +08:00
|
|
|
/* Once for the lookup reference */
|
|
|
|
btrfs_put_block_group(block_group);
|
2019-06-21 03:37:55 +08:00
|
|
|
if (remove_rsv)
|
|
|
|
btrfs_delayed_refs_rsv_release(fs_info, 1);
|
|
|
|
btrfs_free_path(path);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct btrfs_trans_handle *btrfs_start_trans_remove_block_group(
|
|
|
|
struct btrfs_fs_info *fs_info, const u64 chunk_offset)
|
|
|
|
{
|
|
|
|
struct extent_map_tree *em_tree = &fs_info->mapping_tree;
|
|
|
|
struct extent_map *em;
|
|
|
|
struct map_lookup *map;
|
|
|
|
unsigned int num_items;
|
|
|
|
|
|
|
|
read_lock(&em_tree->lock);
|
|
|
|
em = lookup_extent_mapping(em_tree, chunk_offset, 1);
|
|
|
|
read_unlock(&em_tree->lock);
|
|
|
|
ASSERT(em && em->start == chunk_offset);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We need to reserve 3 + N units from the metadata space info in order
|
|
|
|
* to remove a block group (done at btrfs_remove_chunk() and at
|
|
|
|
* btrfs_remove_block_group()), which are used for:
|
|
|
|
*
|
|
|
|
* 1 unit for adding the free space inode's orphan (located in the tree
|
|
|
|
* of tree roots).
|
|
|
|
* 1 unit for deleting the block group item (located in the extent
|
|
|
|
* tree).
|
|
|
|
* 1 unit for deleting the free space item (located in tree of tree
|
|
|
|
* roots).
|
|
|
|
* N units for deleting N device extent items corresponding to each
|
|
|
|
* stripe (located in the device tree).
|
|
|
|
*
|
|
|
|
* In order to remove a block group we also need to reserve units in the
|
|
|
|
* system space info in order to update the chunk tree (update one or
|
|
|
|
* more device items and remove one chunk item), but this is done at
|
|
|
|
* btrfs_remove_chunk() through a call to check_system_chunk().
|
|
|
|
*/
|
|
|
|
map = em->map_lookup;
|
|
|
|
num_items = 3 + map->num_stripes;
|
|
|
|
free_extent_map(em);
|
|
|
|
|
|
|
|
return btrfs_start_transaction_fallback_global_rsv(fs_info->extent_root,
|
2020-03-14 03:58:05 +08:00
|
|
|
num_items);
|
2019-06-21 03:37:55 +08:00
|
|
|
}
|
|
|
|
|
2019-06-21 03:37:59 +08:00
|
|
|
/*
|
|
|
|
* Mark block group @cache read-only, so later write won't happen to block
|
|
|
|
* group @cache.
|
|
|
|
*
|
|
|
|
* If @force is not set, this function will only mark the block group readonly
|
|
|
|
* if we have enough free space (1M) in other metadata/system block groups.
|
|
|
|
* If @force is not set, this function will mark the block group readonly
|
|
|
|
* without checking free space.
|
|
|
|
*
|
|
|
|
* NOTE: This function doesn't care if other block groups can contain all the
|
|
|
|
* data in this block group. That check should be done by relocation routine,
|
|
|
|
* not this function.
|
|
|
|
*/
|
2019-10-30 02:20:18 +08:00
|
|
|
static int inc_block_group_ro(struct btrfs_block_group *cache, int force)
|
2019-06-21 03:37:59 +08:00
|
|
|
{
|
|
|
|
struct btrfs_space_info *sinfo = cache->space_info;
|
|
|
|
u64 num_bytes;
|
|
|
|
int ret = -ENOSPC;
|
|
|
|
|
|
|
|
spin_lock(&sinfo->lock);
|
|
|
|
spin_lock(&cache->lock);
|
|
|
|
|
btrfs: fix race between writes to swap files and scrub
When we active a swap file, at btrfs_swap_activate(), we acquire the
exclusive operation lock to prevent the physical location of the swap
file extents to be changed by operations such as balance and device
replace/resize/remove. We also call there can_nocow_extent() which,
among other things, checks if the block group of a swap file extent is
currently RO, and if it is we can not use the extent, since a write
into it would result in COWing the extent.
However we have no protection against a scrub operation running after we
activate the swap file, which can result in the swap file extents to be
COWed while the scrub is running and operating on the respective block
group, because scrub turns a block group into RO before it processes it
and then back again to RW mode after processing it. That means an attempt
to write into a swap file extent while scrub is processing the respective
block group, will result in COWing the extent, changing its physical
location on disk.
Fix this by making sure that block groups that have extents that are used
by active swap files can not be turned into RO mode, therefore making it
not possible for a scrub to turn them into RO mode. When a scrub finds a
block group that can not be turned to RO due to the existence of extents
used by swap files, it proceeds to the next block group and logs a warning
message that mentions the block group was skipped due to active swap
files - this is the same approach we currently use for balance.
Fixes: ed46ff3d42378 ("Btrfs: support swap files")
CC: stable@vger.kernel.org # 5.4+
Reviewed-by: Anand Jain <anand.jain@oracle.com>
Reviewed-by: Josef Bacik <josef@toxicpanda.com>
Signed-off-by: Filipe Manana <fdmanana@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2021-02-05 20:55:37 +08:00
|
|
|
if (cache->swap_extents) {
|
|
|
|
ret = -ETXTBSY;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2019-06-21 03:37:59 +08:00
|
|
|
if (cache->ro) {
|
|
|
|
cache->ro++;
|
|
|
|
ret = 0;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2019-10-24 00:48:22 +08:00
|
|
|
num_bytes = cache->length - cache->reserved - cache->pinned -
|
2021-02-04 18:21:52 +08:00
|
|
|
cache->bytes_super - cache->zone_unusable - cache->used;
|
2019-06-21 03:37:59 +08:00
|
|
|
|
|
|
|
/*
|
2020-01-17 22:07:39 +08:00
|
|
|
* Data never overcommits, even in mixed mode, so do just the straight
|
|
|
|
* check of left over space in how much we have allocated.
|
2019-06-21 03:37:59 +08:00
|
|
|
*/
|
2020-01-17 22:07:39 +08:00
|
|
|
if (force) {
|
|
|
|
ret = 0;
|
|
|
|
} else if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA) {
|
|
|
|
u64 sinfo_used = btrfs_space_info_used(sinfo, true);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Here we make sure if we mark this bg RO, we still have enough
|
|
|
|
* free space as buffer.
|
|
|
|
*/
|
|
|
|
if (sinfo_used + num_bytes <= sinfo->total_bytes)
|
|
|
|
ret = 0;
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* We overcommit metadata, so we need to do the
|
|
|
|
* btrfs_can_overcommit check here, and we need to pass in
|
|
|
|
* BTRFS_RESERVE_NO_FLUSH to give ourselves the most amount of
|
|
|
|
* leeway to allow us to mark this block group as read only.
|
|
|
|
*/
|
|
|
|
if (btrfs_can_overcommit(cache->fs_info, sinfo, num_bytes,
|
|
|
|
BTRFS_RESERVE_NO_FLUSH))
|
|
|
|
ret = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!ret) {
|
2019-06-21 03:37:59 +08:00
|
|
|
sinfo->bytes_readonly += num_bytes;
|
2021-02-04 18:21:52 +08:00
|
|
|
if (btrfs_is_zoned(cache->fs_info)) {
|
|
|
|
/* Migrate zone_unusable bytes to readonly */
|
|
|
|
sinfo->bytes_readonly += cache->zone_unusable;
|
|
|
|
sinfo->bytes_zone_unusable -= cache->zone_unusable;
|
|
|
|
cache->zone_unusable = 0;
|
|
|
|
}
|
2019-06-21 03:37:59 +08:00
|
|
|
cache->ro++;
|
|
|
|
list_add_tail(&cache->ro_list, &sinfo->ro_bgs);
|
|
|
|
}
|
|
|
|
out:
|
|
|
|
spin_unlock(&cache->lock);
|
|
|
|
spin_unlock(&sinfo->lock);
|
|
|
|
if (ret == -ENOSPC && btrfs_test_opt(cache->fs_info, ENOSPC_DEBUG)) {
|
|
|
|
btrfs_info(cache->fs_info,
|
2019-10-24 00:48:22 +08:00
|
|
|
"unable to make block group %llu ro", cache->start);
|
2019-06-21 03:37:59 +08:00
|
|
|
btrfs_dump_space_info(cache->fs_info, cache->space_info, 0, 0);
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2020-01-20 22:09:18 +08:00
|
|
|
static bool clean_pinned_extents(struct btrfs_trans_handle *trans,
|
|
|
|
struct btrfs_block_group *bg)
|
2020-01-20 22:09:17 +08:00
|
|
|
{
|
|
|
|
struct btrfs_fs_info *fs_info = bg->fs_info;
|
2020-01-20 22:09:18 +08:00
|
|
|
struct btrfs_transaction *prev_trans = NULL;
|
2020-01-20 22:09:17 +08:00
|
|
|
const u64 start = bg->start;
|
|
|
|
const u64 end = start + bg->length - 1;
|
|
|
|
int ret;
|
|
|
|
|
2020-01-20 22:09:18 +08:00
|
|
|
spin_lock(&fs_info->trans_lock);
|
|
|
|
if (trans->transaction->list.prev != &fs_info->trans_list) {
|
|
|
|
prev_trans = list_last_entry(&trans->transaction->list,
|
|
|
|
struct btrfs_transaction, list);
|
|
|
|
refcount_inc(&prev_trans->use_count);
|
|
|
|
}
|
|
|
|
spin_unlock(&fs_info->trans_lock);
|
|
|
|
|
2020-01-20 22:09:17 +08:00
|
|
|
/*
|
|
|
|
* Hold the unused_bg_unpin_mutex lock to avoid racing with
|
|
|
|
* btrfs_finish_extent_commit(). If we are at transaction N, another
|
|
|
|
* task might be running finish_extent_commit() for the previous
|
|
|
|
* transaction N - 1, and have seen a range belonging to the block
|
2020-01-20 22:09:18 +08:00
|
|
|
* group in pinned_extents before we were able to clear the whole block
|
|
|
|
* group range from pinned_extents. This means that task can lookup for
|
|
|
|
* the block group after we unpinned it from pinned_extents and removed
|
|
|
|
* it, leading to a BUG_ON() at unpin_extent_range().
|
2020-01-20 22:09:17 +08:00
|
|
|
*/
|
|
|
|
mutex_lock(&fs_info->unused_bg_unpin_mutex);
|
2020-01-20 22:09:18 +08:00
|
|
|
if (prev_trans) {
|
|
|
|
ret = clear_extent_bits(&prev_trans->pinned_extents, start, end,
|
|
|
|
EXTENT_DIRTY);
|
|
|
|
if (ret)
|
2020-04-17 23:36:50 +08:00
|
|
|
goto out;
|
2020-01-20 22:09:18 +08:00
|
|
|
}
|
2020-01-20 22:09:17 +08:00
|
|
|
|
2020-01-20 22:09:18 +08:00
|
|
|
ret = clear_extent_bits(&trans->transaction->pinned_extents, start, end,
|
2020-01-20 22:09:17 +08:00
|
|
|
EXTENT_DIRTY);
|
2020-04-17 23:36:50 +08:00
|
|
|
out:
|
2020-01-20 22:09:17 +08:00
|
|
|
mutex_unlock(&fs_info->unused_bg_unpin_mutex);
|
2020-04-17 23:36:15 +08:00
|
|
|
if (prev_trans)
|
|
|
|
btrfs_put_transaction(prev_trans);
|
2020-01-20 22:09:17 +08:00
|
|
|
|
2020-04-17 23:36:50 +08:00
|
|
|
return ret == 0;
|
2020-01-20 22:09:17 +08:00
|
|
|
}
|
|
|
|
|
2019-06-21 03:37:55 +08:00
|
|
|
/*
|
|
|
|
* Process the unused_bgs list and remove any that don't have any allocated
|
|
|
|
* space inside of them.
|
|
|
|
*/
|
|
|
|
void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
|
|
|
|
{
|
2019-10-30 02:20:18 +08:00
|
|
|
struct btrfs_block_group *block_group;
|
2019-06-21 03:37:55 +08:00
|
|
|
struct btrfs_space_info *space_info;
|
|
|
|
struct btrfs_trans_handle *trans;
|
btrfs: handle empty block_group removal for async discard
block_group removal is a little tricky. It can race with the extent
allocator, the cleaner thread, and balancing. The current path is for a
block_group to be added to the unused_bgs list. Then, when the cleaner
thread comes around, it starts a transaction and then proceeds with
removing the block_group. Extents that are pinned are subsequently
removed from the pinned trees and then eventually a discard is issued
for the entire block_group.
Async discard introduces another player into the game, the discard
workqueue. While it has none of the racing issues, the new problem is
ensuring we don't leave free space untrimmed prior to forgetting the
block_group. This is handled by placing fully free block_groups on a
separate discard queue. This is necessary to maintain discarding order
as in the future we will slowly trim even fully free block_groups. The
ordering helps us make progress on the same block_group rather than say
the last fully freed block_group or needing to search through the fully
freed block groups at the beginning of a list and insert after.
The new order of events is a fully freed block group gets placed on the
unused discard queue first. Once it's processed, it will be placed on
the unusued_bgs list and then the original sequence of events will
happen, just without the final whole block_group discard.
The mount flags can change when processing unused_bgs, so when flipping
from DISCARD to DISCARD_ASYNC, the unused_bgs must be punted to the
discard_list to be trimmed. If we flip off DISCARD_ASYNC, we punt
free block groups on the discard_list to the unused_bg queue which will
do the final discard for us.
Reviewed-by: Josef Bacik <josef@toxicpanda.com>
Signed-off-by: Dennis Zhou <dennis@kernel.org>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2019-12-14 08:22:15 +08:00
|
|
|
const bool async_trim_enabled = btrfs_test_opt(fs_info, DISCARD_ASYNC);
|
2019-06-21 03:37:55 +08:00
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags))
|
|
|
|
return;
|
|
|
|
|
2020-12-19 03:24:19 +08:00
|
|
|
/*
|
|
|
|
* Long running balances can keep us blocked here for eternity, so
|
|
|
|
* simply skip deletion if we're unable to get the mutex.
|
|
|
|
*/
|
|
|
|
if (!mutex_trylock(&fs_info->delete_unused_bgs_mutex))
|
|
|
|
return;
|
|
|
|
|
2019-06-21 03:37:55 +08:00
|
|
|
spin_lock(&fs_info->unused_bgs_lock);
|
|
|
|
while (!list_empty(&fs_info->unused_bgs)) {
|
|
|
|
int trimming;
|
|
|
|
|
|
|
|
block_group = list_first_entry(&fs_info->unused_bgs,
|
2019-10-30 02:20:18 +08:00
|
|
|
struct btrfs_block_group,
|
2019-06-21 03:37:55 +08:00
|
|
|
bg_list);
|
|
|
|
list_del_init(&block_group->bg_list);
|
|
|
|
|
|
|
|
space_info = block_group->space_info;
|
|
|
|
|
|
|
|
if (ret || btrfs_mixed_space_info(space_info)) {
|
|
|
|
btrfs_put_block_group(block_group);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
spin_unlock(&fs_info->unused_bgs_lock);
|
|
|
|
|
2019-12-14 08:22:14 +08:00
|
|
|
btrfs_discard_cancel_work(&fs_info->discard_ctl, block_group);
|
|
|
|
|
2019-06-21 03:37:55 +08:00
|
|
|
/* Don't want to race with allocators so take the groups_sem */
|
|
|
|
down_write(&space_info->groups_sem);
|
btrfs: handle empty block_group removal for async discard
block_group removal is a little tricky. It can race with the extent
allocator, the cleaner thread, and balancing. The current path is for a
block_group to be added to the unused_bgs list. Then, when the cleaner
thread comes around, it starts a transaction and then proceeds with
removing the block_group. Extents that are pinned are subsequently
removed from the pinned trees and then eventually a discard is issued
for the entire block_group.
Async discard introduces another player into the game, the discard
workqueue. While it has none of the racing issues, the new problem is
ensuring we don't leave free space untrimmed prior to forgetting the
block_group. This is handled by placing fully free block_groups on a
separate discard queue. This is necessary to maintain discarding order
as in the future we will slowly trim even fully free block_groups. The
ordering helps us make progress on the same block_group rather than say
the last fully freed block_group or needing to search through the fully
freed block groups at the beginning of a list and insert after.
The new order of events is a fully freed block group gets placed on the
unused discard queue first. Once it's processed, it will be placed on
the unusued_bgs list and then the original sequence of events will
happen, just without the final whole block_group discard.
The mount flags can change when processing unused_bgs, so when flipping
from DISCARD to DISCARD_ASYNC, the unused_bgs must be punted to the
discard_list to be trimmed. If we flip off DISCARD_ASYNC, we punt
free block groups on the discard_list to the unused_bg queue which will
do the final discard for us.
Reviewed-by: Josef Bacik <josef@toxicpanda.com>
Signed-off-by: Dennis Zhou <dennis@kernel.org>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2019-12-14 08:22:15 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Async discard moves the final block group discard to be prior
|
|
|
|
* to the unused_bgs code path. Therefore, if it's not fully
|
|
|
|
* trimmed, punt it back to the async discard lists.
|
|
|
|
*/
|
|
|
|
if (btrfs_test_opt(fs_info, DISCARD_ASYNC) &&
|
|
|
|
!btrfs_is_free_space_trimmed(block_group)) {
|
|
|
|
trace_btrfs_skip_unused_block_group(block_group);
|
|
|
|
up_write(&space_info->groups_sem);
|
|
|
|
/* Requeue if we failed because of async discard */
|
|
|
|
btrfs_discard_queue_work(&fs_info->discard_ctl,
|
|
|
|
block_group);
|
|
|
|
goto next;
|
|
|
|
}
|
|
|
|
|
2019-06-21 03:37:55 +08:00
|
|
|
spin_lock(&block_group->lock);
|
|
|
|
if (block_group->reserved || block_group->pinned ||
|
2019-10-24 00:48:11 +08:00
|
|
|
block_group->used || block_group->ro ||
|
2019-06-21 03:37:55 +08:00
|
|
|
list_is_singular(&block_group->list)) {
|
|
|
|
/*
|
|
|
|
* We want to bail if we made new allocations or have
|
|
|
|
* outstanding allocations in this block group. We do
|
|
|
|
* the ro check in case balance is currently acting on
|
|
|
|
* this block group.
|
|
|
|
*/
|
|
|
|
trace_btrfs_skip_unused_block_group(block_group);
|
|
|
|
spin_unlock(&block_group->lock);
|
|
|
|
up_write(&space_info->groups_sem);
|
|
|
|
goto next;
|
|
|
|
}
|
|
|
|
spin_unlock(&block_group->lock);
|
|
|
|
|
|
|
|
/* We don't want to force the issue, only flip if it's ok. */
|
2019-06-21 03:38:07 +08:00
|
|
|
ret = inc_block_group_ro(block_group, 0);
|
2019-06-21 03:37:55 +08:00
|
|
|
up_write(&space_info->groups_sem);
|
|
|
|
if (ret < 0) {
|
|
|
|
ret = 0;
|
|
|
|
goto next;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Want to do this before we do anything else so we can recover
|
|
|
|
* properly if we fail to join the transaction.
|
|
|
|
*/
|
|
|
|
trans = btrfs_start_trans_remove_block_group(fs_info,
|
2019-10-24 00:48:22 +08:00
|
|
|
block_group->start);
|
2019-06-21 03:37:55 +08:00
|
|
|
if (IS_ERR(trans)) {
|
|
|
|
btrfs_dec_block_group_ro(block_group);
|
|
|
|
ret = PTR_ERR(trans);
|
|
|
|
goto next;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We could have pending pinned extents for this block group,
|
|
|
|
* just delete them, we don't care about them anymore.
|
|
|
|
*/
|
2020-04-17 23:36:50 +08:00
|
|
|
if (!clean_pinned_extents(trans, block_group)) {
|
|
|
|
btrfs_dec_block_group_ro(block_group);
|
2019-06-21 03:37:55 +08:00
|
|
|
goto end_trans;
|
2020-04-17 23:36:50 +08:00
|
|
|
}
|
2019-06-21 03:37:55 +08:00
|
|
|
|
2019-12-14 08:22:14 +08:00
|
|
|
/*
|
|
|
|
* At this point, the block_group is read only and should fail
|
|
|
|
* new allocations. However, btrfs_finish_extent_commit() can
|
|
|
|
* cause this block_group to be placed back on the discard
|
|
|
|
* lists because now the block_group isn't fully discarded.
|
|
|
|
* Bail here and try again later after discarding everything.
|
|
|
|
*/
|
|
|
|
spin_lock(&fs_info->discard_ctl.lock);
|
|
|
|
if (!list_empty(&block_group->discard_list)) {
|
|
|
|
spin_unlock(&fs_info->discard_ctl.lock);
|
|
|
|
btrfs_dec_block_group_ro(block_group);
|
|
|
|
btrfs_discard_queue_work(&fs_info->discard_ctl,
|
|
|
|
block_group);
|
|
|
|
goto end_trans;
|
|
|
|
}
|
|
|
|
spin_unlock(&fs_info->discard_ctl.lock);
|
|
|
|
|
2019-06-21 03:37:55 +08:00
|
|
|
/* Reset pinned so btrfs_put_block_group doesn't complain */
|
|
|
|
spin_lock(&space_info->lock);
|
|
|
|
spin_lock(&block_group->lock);
|
|
|
|
|
|
|
|
btrfs_space_info_update_bytes_pinned(fs_info, space_info,
|
|
|
|
-block_group->pinned);
|
|
|
|
space_info->bytes_readonly += block_group->pinned;
|
2021-01-16 05:48:55 +08:00
|
|
|
__btrfs_mod_total_bytes_pinned(space_info, -block_group->pinned);
|
2019-06-21 03:37:55 +08:00
|
|
|
block_group->pinned = 0;
|
|
|
|
|
|
|
|
spin_unlock(&block_group->lock);
|
|
|
|
spin_unlock(&space_info->lock);
|
|
|
|
|
btrfs: handle empty block_group removal for async discard
block_group removal is a little tricky. It can race with the extent
allocator, the cleaner thread, and balancing. The current path is for a
block_group to be added to the unused_bgs list. Then, when the cleaner
thread comes around, it starts a transaction and then proceeds with
removing the block_group. Extents that are pinned are subsequently
removed from the pinned trees and then eventually a discard is issued
for the entire block_group.
Async discard introduces another player into the game, the discard
workqueue. While it has none of the racing issues, the new problem is
ensuring we don't leave free space untrimmed prior to forgetting the
block_group. This is handled by placing fully free block_groups on a
separate discard queue. This is necessary to maintain discarding order
as in the future we will slowly trim even fully free block_groups. The
ordering helps us make progress on the same block_group rather than say
the last fully freed block_group or needing to search through the fully
freed block groups at the beginning of a list and insert after.
The new order of events is a fully freed block group gets placed on the
unused discard queue first. Once it's processed, it will be placed on
the unusued_bgs list and then the original sequence of events will
happen, just without the final whole block_group discard.
The mount flags can change when processing unused_bgs, so when flipping
from DISCARD to DISCARD_ASYNC, the unused_bgs must be punted to the
discard_list to be trimmed. If we flip off DISCARD_ASYNC, we punt
free block groups on the discard_list to the unused_bg queue which will
do the final discard for us.
Reviewed-by: Josef Bacik <josef@toxicpanda.com>
Signed-off-by: Dennis Zhou <dennis@kernel.org>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2019-12-14 08:22:15 +08:00
|
|
|
/*
|
|
|
|
* The normal path here is an unused block group is passed here,
|
|
|
|
* then trimming is handled in the transaction commit path.
|
|
|
|
* Async discard interposes before this to do the trimming
|
|
|
|
* before coming down the unused block group path as trimming
|
|
|
|
* will no longer be done later in the transaction commit path.
|
|
|
|
*/
|
|
|
|
if (!async_trim_enabled && btrfs_test_opt(fs_info, DISCARD_ASYNC))
|
|
|
|
goto flip_async;
|
|
|
|
|
2021-02-04 18:21:56 +08:00
|
|
|
/*
|
|
|
|
* DISCARD can flip during remount. On zoned filesystems, we
|
|
|
|
* need to reset sequential-required zones.
|
|
|
|
*/
|
|
|
|
trimming = btrfs_test_opt(fs_info, DISCARD_SYNC) ||
|
|
|
|
btrfs_is_zoned(fs_info);
|
2019-06-21 03:37:55 +08:00
|
|
|
|
|
|
|
/* Implicit trim during transaction commit. */
|
|
|
|
if (trimming)
|
2020-05-08 18:01:47 +08:00
|
|
|
btrfs_freeze_block_group(block_group);
|
2019-06-21 03:37:55 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Btrfs_remove_chunk will abort the transaction if things go
|
|
|
|
* horribly wrong.
|
|
|
|
*/
|
2019-10-24 00:48:22 +08:00
|
|
|
ret = btrfs_remove_chunk(trans, block_group->start);
|
2019-06-21 03:37:55 +08:00
|
|
|
|
|
|
|
if (ret) {
|
|
|
|
if (trimming)
|
2020-05-08 18:01:47 +08:00
|
|
|
btrfs_unfreeze_block_group(block_group);
|
2019-06-21 03:37:55 +08:00
|
|
|
goto end_trans;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we're not mounted with -odiscard, we can just forget
|
|
|
|
* about this block group. Otherwise we'll need to wait
|
|
|
|
* until transaction commit to do the actual discard.
|
|
|
|
*/
|
|
|
|
if (trimming) {
|
|
|
|
spin_lock(&fs_info->unused_bgs_lock);
|
|
|
|
/*
|
|
|
|
* A concurrent scrub might have added us to the list
|
|
|
|
* fs_info->unused_bgs, so use a list_move operation
|
|
|
|
* to add the block group to the deleted_bgs list.
|
|
|
|
*/
|
|
|
|
list_move(&block_group->bg_list,
|
|
|
|
&trans->transaction->deleted_bgs);
|
|
|
|
spin_unlock(&fs_info->unused_bgs_lock);
|
|
|
|
btrfs_get_block_group(block_group);
|
|
|
|
}
|
|
|
|
end_trans:
|
|
|
|
btrfs_end_transaction(trans);
|
|
|
|
next:
|
|
|
|
btrfs_put_block_group(block_group);
|
|
|
|
spin_lock(&fs_info->unused_bgs_lock);
|
|
|
|
}
|
|
|
|
spin_unlock(&fs_info->unused_bgs_lock);
|
2020-12-19 03:24:19 +08:00
|
|
|
mutex_unlock(&fs_info->delete_unused_bgs_mutex);
|
btrfs: handle empty block_group removal for async discard
block_group removal is a little tricky. It can race with the extent
allocator, the cleaner thread, and balancing. The current path is for a
block_group to be added to the unused_bgs list. Then, when the cleaner
thread comes around, it starts a transaction and then proceeds with
removing the block_group. Extents that are pinned are subsequently
removed from the pinned trees and then eventually a discard is issued
for the entire block_group.
Async discard introduces another player into the game, the discard
workqueue. While it has none of the racing issues, the new problem is
ensuring we don't leave free space untrimmed prior to forgetting the
block_group. This is handled by placing fully free block_groups on a
separate discard queue. This is necessary to maintain discarding order
as in the future we will slowly trim even fully free block_groups. The
ordering helps us make progress on the same block_group rather than say
the last fully freed block_group or needing to search through the fully
freed block groups at the beginning of a list and insert after.
The new order of events is a fully freed block group gets placed on the
unused discard queue first. Once it's processed, it will be placed on
the unusued_bgs list and then the original sequence of events will
happen, just without the final whole block_group discard.
The mount flags can change when processing unused_bgs, so when flipping
from DISCARD to DISCARD_ASYNC, the unused_bgs must be punted to the
discard_list to be trimmed. If we flip off DISCARD_ASYNC, we punt
free block groups on the discard_list to the unused_bg queue which will
do the final discard for us.
Reviewed-by: Josef Bacik <josef@toxicpanda.com>
Signed-off-by: Dennis Zhou <dennis@kernel.org>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2019-12-14 08:22:15 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
flip_async:
|
|
|
|
btrfs_end_transaction(trans);
|
|
|
|
mutex_unlock(&fs_info->delete_unused_bgs_mutex);
|
|
|
|
btrfs_put_block_group(block_group);
|
|
|
|
btrfs_discard_punt_unused_bgs_list(fs_info);
|
2019-06-21 03:37:55 +08:00
|
|
|
}
|
|
|
|
|
2019-10-30 02:20:18 +08:00
|
|
|
void btrfs_mark_bg_unused(struct btrfs_block_group *bg)
|
2019-06-21 03:37:55 +08:00
|
|
|
{
|
|
|
|
struct btrfs_fs_info *fs_info = bg->fs_info;
|
|
|
|
|
|
|
|
spin_lock(&fs_info->unused_bgs_lock);
|
|
|
|
if (list_empty(&bg->bg_list)) {
|
|
|
|
btrfs_get_block_group(bg);
|
|
|
|
trace_btrfs_add_unused_block_group(bg);
|
|
|
|
list_add_tail(&bg->bg_list, &fs_info->unused_bgs);
|
|
|
|
}
|
|
|
|
spin_unlock(&fs_info->unused_bgs_lock);
|
|
|
|
}
|
2019-06-21 03:37:57 +08:00
|
|
|
|
2020-06-02 18:05:57 +08:00
|
|
|
static int read_bg_from_eb(struct btrfs_fs_info *fs_info, struct btrfs_key *key,
|
|
|
|
struct btrfs_path *path)
|
|
|
|
{
|
|
|
|
struct extent_map_tree *em_tree;
|
|
|
|
struct extent_map *em;
|
|
|
|
struct btrfs_block_group_item bg;
|
|
|
|
struct extent_buffer *leaf;
|
|
|
|
int slot;
|
|
|
|
u64 flags;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
slot = path->slots[0];
|
|
|
|
leaf = path->nodes[0];
|
|
|
|
|
|
|
|
em_tree = &fs_info->mapping_tree;
|
|
|
|
read_lock(&em_tree->lock);
|
|
|
|
em = lookup_extent_mapping(em_tree, key->objectid, key->offset);
|
|
|
|
read_unlock(&em_tree->lock);
|
|
|
|
if (!em) {
|
|
|
|
btrfs_err(fs_info,
|
|
|
|
"logical %llu len %llu found bg but no related chunk",
|
|
|
|
key->objectid, key->offset);
|
|
|
|
return -ENOENT;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (em->start != key->objectid || em->len != key->offset) {
|
|
|
|
btrfs_err(fs_info,
|
|
|
|
"block group %llu len %llu mismatch with chunk %llu len %llu",
|
|
|
|
key->objectid, key->offset, em->start, em->len);
|
|
|
|
ret = -EUCLEAN;
|
|
|
|
goto out_free_em;
|
|
|
|
}
|
|
|
|
|
|
|
|
read_extent_buffer(leaf, &bg, btrfs_item_ptr_offset(leaf, slot),
|
|
|
|
sizeof(bg));
|
|
|
|
flags = btrfs_stack_block_group_flags(&bg) &
|
|
|
|
BTRFS_BLOCK_GROUP_TYPE_MASK;
|
|
|
|
|
|
|
|
if (flags != (em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK)) {
|
|
|
|
btrfs_err(fs_info,
|
|
|
|
"block group %llu len %llu type flags 0x%llx mismatch with chunk type flags 0x%llx",
|
|
|
|
key->objectid, key->offset, flags,
|
|
|
|
(BTRFS_BLOCK_GROUP_TYPE_MASK & em->map_lookup->type));
|
|
|
|
ret = -EUCLEAN;
|
|
|
|
}
|
|
|
|
|
|
|
|
out_free_em:
|
|
|
|
free_extent_map(em);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-06-21 03:37:57 +08:00
|
|
|
static int find_first_block_group(struct btrfs_fs_info *fs_info,
|
|
|
|
struct btrfs_path *path,
|
|
|
|
struct btrfs_key *key)
|
|
|
|
{
|
|
|
|
struct btrfs_root *root = fs_info->extent_root;
|
2020-06-02 18:05:57 +08:00
|
|
|
int ret;
|
2019-06-21 03:37:57 +08:00
|
|
|
struct btrfs_key found_key;
|
|
|
|
struct extent_buffer *leaf;
|
|
|
|
int slot;
|
|
|
|
|
|
|
|
ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
|
|
|
|
if (ret < 0)
|
2020-06-02 18:05:57 +08:00
|
|
|
return ret;
|
2019-06-21 03:37:57 +08:00
|
|
|
|
|
|
|
while (1) {
|
|
|
|
slot = path->slots[0];
|
|
|
|
leaf = path->nodes[0];
|
|
|
|
if (slot >= btrfs_header_nritems(leaf)) {
|
|
|
|
ret = btrfs_next_leaf(root, path);
|
|
|
|
if (ret == 0)
|
|
|
|
continue;
|
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
btrfs_item_key_to_cpu(leaf, &found_key, slot);
|
|
|
|
|
|
|
|
if (found_key.objectid >= key->objectid &&
|
|
|
|
found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
|
2020-06-02 18:05:57 +08:00
|
|
|
ret = read_bg_from_eb(fs_info, &found_key, path);
|
|
|
|
break;
|
2019-06-21 03:37:57 +08:00
|
|
|
}
|
2020-06-02 18:05:57 +08:00
|
|
|
|
2019-06-21 03:37:57 +08:00
|
|
|
path->slots[0]++;
|
|
|
|
}
|
|
|
|
out:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
|
|
|
|
{
|
|
|
|
u64 extra_flags = chunk_to_extended(flags) &
|
|
|
|
BTRFS_EXTENDED_PROFILE_MASK;
|
|
|
|
|
|
|
|
write_seqlock(&fs_info->profiles_lock);
|
|
|
|
if (flags & BTRFS_BLOCK_GROUP_DATA)
|
|
|
|
fs_info->avail_data_alloc_bits |= extra_flags;
|
|
|
|
if (flags & BTRFS_BLOCK_GROUP_METADATA)
|
|
|
|
fs_info->avail_metadata_alloc_bits |= extra_flags;
|
|
|
|
if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
|
|
|
|
fs_info->avail_system_alloc_bits |= extra_flags;
|
|
|
|
write_sequnlock(&fs_info->profiles_lock);
|
|
|
|
}
|
|
|
|
|
2019-12-11 01:57:51 +08:00
|
|
|
/**
|
2021-01-22 17:57:58 +08:00
|
|
|
* Map a physical disk address to a list of logical addresses
|
|
|
|
*
|
|
|
|
* @fs_info: the filesystem
|
2019-12-11 01:57:51 +08:00
|
|
|
* @chunk_start: logical address of block group
|
2021-02-04 18:22:02 +08:00
|
|
|
* @bdev: physical device to resolve, can be NULL to indicate any device
|
2019-12-11 01:57:51 +08:00
|
|
|
* @physical: physical address to map to logical addresses
|
|
|
|
* @logical: return array of logical addresses which map to @physical
|
|
|
|
* @naddrs: length of @logical
|
|
|
|
* @stripe_len: size of IO stripe for the given block group
|
|
|
|
*
|
|
|
|
* Maps a particular @physical disk address to a list of @logical addresses.
|
|
|
|
* Used primarily to exclude those portions of a block group that contain super
|
|
|
|
* block copies.
|
|
|
|
*/
|
|
|
|
int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start,
|
2021-02-04 18:22:02 +08:00
|
|
|
struct block_device *bdev, u64 physical, u64 **logical,
|
|
|
|
int *naddrs, int *stripe_len)
|
2019-12-11 01:57:51 +08:00
|
|
|
{
|
|
|
|
struct extent_map *em;
|
|
|
|
struct map_lookup *map;
|
|
|
|
u64 *buf;
|
|
|
|
u64 bytenr;
|
2019-11-19 20:05:53 +08:00
|
|
|
u64 data_stripe_length;
|
|
|
|
u64 io_stripe_size;
|
|
|
|
int i, nr = 0;
|
|
|
|
int ret = 0;
|
2019-12-11 01:57:51 +08:00
|
|
|
|
|
|
|
em = btrfs_get_chunk_map(fs_info, chunk_start, 1);
|
|
|
|
if (IS_ERR(em))
|
|
|
|
return -EIO;
|
|
|
|
|
|
|
|
map = em->map_lookup;
|
2020-04-03 21:40:34 +08:00
|
|
|
data_stripe_length = em->orig_block_len;
|
2019-11-19 20:05:53 +08:00
|
|
|
io_stripe_size = map->stripe_len;
|
2021-02-04 18:22:02 +08:00
|
|
|
chunk_start = em->start;
|
2019-12-11 01:57:51 +08:00
|
|
|
|
2020-04-03 21:40:34 +08:00
|
|
|
/* For RAID5/6 adjust to a full IO stripe length */
|
|
|
|
if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
|
2019-11-19 20:05:53 +08:00
|
|
|
io_stripe_size = map->stripe_len * nr_data_stripes(map);
|
2019-12-11 01:57:51 +08:00
|
|
|
|
|
|
|
buf = kcalloc(map->num_stripes, sizeof(u64), GFP_NOFS);
|
2019-11-19 20:05:53 +08:00
|
|
|
if (!buf) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
}
|
2019-12-11 01:57:51 +08:00
|
|
|
|
|
|
|
for (i = 0; i < map->num_stripes; i++) {
|
2019-11-19 20:05:53 +08:00
|
|
|
bool already_inserted = false;
|
|
|
|
u64 stripe_nr;
|
2021-02-04 18:22:02 +08:00
|
|
|
u64 offset;
|
2019-11-19 20:05:53 +08:00
|
|
|
int j;
|
|
|
|
|
|
|
|
if (!in_range(physical, map->stripes[i].physical,
|
|
|
|
data_stripe_length))
|
2019-12-11 01:57:51 +08:00
|
|
|
continue;
|
|
|
|
|
2021-02-04 18:22:02 +08:00
|
|
|
if (bdev && map->stripes[i].dev->bdev != bdev)
|
|
|
|
continue;
|
|
|
|
|
2019-12-11 01:57:51 +08:00
|
|
|
stripe_nr = physical - map->stripes[i].physical;
|
2021-02-04 18:22:02 +08:00
|
|
|
stripe_nr = div64_u64_rem(stripe_nr, map->stripe_len, &offset);
|
2019-12-11 01:57:51 +08:00
|
|
|
|
|
|
|
if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
|
|
|
|
stripe_nr = stripe_nr * map->num_stripes + i;
|
|
|
|
stripe_nr = div_u64(stripe_nr, map->sub_stripes);
|
|
|
|
} else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
|
|
|
|
stripe_nr = stripe_nr * map->num_stripes + i;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* The remaining case would be for RAID56, multiply by
|
|
|
|
* nr_data_stripes(). Alternatively, just use rmap_len below
|
|
|
|
* instead of map->stripe_len
|
|
|
|
*/
|
|
|
|
|
2021-02-04 18:22:02 +08:00
|
|
|
bytenr = chunk_start + stripe_nr * io_stripe_size + offset;
|
2019-11-19 20:05:53 +08:00
|
|
|
|
|
|
|
/* Ensure we don't add duplicate addresses */
|
2019-12-11 01:57:51 +08:00
|
|
|
for (j = 0; j < nr; j++) {
|
2019-11-19 20:05:53 +08:00
|
|
|
if (buf[j] == bytenr) {
|
|
|
|
already_inserted = true;
|
2019-12-11 01:57:51 +08:00
|
|
|
break;
|
2019-11-19 20:05:53 +08:00
|
|
|
}
|
2019-12-11 01:57:51 +08:00
|
|
|
}
|
2019-11-19 20:05:53 +08:00
|
|
|
|
|
|
|
if (!already_inserted)
|
2019-12-11 01:57:51 +08:00
|
|
|
buf[nr++] = bytenr;
|
|
|
|
}
|
|
|
|
|
|
|
|
*logical = buf;
|
|
|
|
*naddrs = nr;
|
2019-11-19 20:05:53 +08:00
|
|
|
*stripe_len = io_stripe_size;
|
|
|
|
out:
|
2019-12-11 01:57:51 +08:00
|
|
|
free_extent_map(em);
|
2019-11-19 20:05:53 +08:00
|
|
|
return ret;
|
2019-12-11 01:57:51 +08:00
|
|
|
}
|
|
|
|
|
2019-10-30 02:20:18 +08:00
|
|
|
static int exclude_super_stripes(struct btrfs_block_group *cache)
|
2019-06-21 03:37:57 +08:00
|
|
|
{
|
|
|
|
struct btrfs_fs_info *fs_info = cache->fs_info;
|
btrfs: implement log-structured superblock for ZONED mode
Superblock (and its copies) is the only data structure in btrfs which
has a fixed location on a device. Since we cannot overwrite in a
sequential write required zone, we cannot place superblock in the zone.
One easy solution is limiting superblock and copies to be placed only in
conventional zones. However, this method has two downsides: one is
reduced number of superblock copies. The location of the second copy of
superblock is 256GB, which is in a sequential write required zone on
typical devices in the market today. So, the number of superblock and
copies is limited to be two. Second downside is that we cannot support
devices which have no conventional zones at all.
To solve these two problems, we employ superblock log writing. It uses
two adjacent zones as a circular buffer to write updated superblocks.
Once the first zone is filled up, start writing into the second one.
Then, when both zones are filled up and before starting to write to the
first zone again, it reset the first zone.
We can determine the position of the latest superblock by reading write
pointer information from a device. One corner case is when both zones
are full. For this situation, we read out the last superblock of each
zone, and compare them to determine which zone is older.
The following zones are reserved as the circular buffer on ZONED btrfs.
- The primary superblock: zones 0 and 1
- The first copy: zones 16 and 17
- The second copy: zones 1024 or zone at 256GB which is minimum, and
next to it
If these reserved zones are conventional, superblock is written fixed at
the start of the zone without logging.
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2020-11-10 19:26:14 +08:00
|
|
|
const bool zoned = btrfs_is_zoned(fs_info);
|
2019-06-21 03:37:57 +08:00
|
|
|
u64 bytenr;
|
|
|
|
u64 *logical;
|
|
|
|
int stripe_len;
|
|
|
|
int i, nr, ret;
|
|
|
|
|
2019-10-24 00:48:22 +08:00
|
|
|
if (cache->start < BTRFS_SUPER_INFO_OFFSET) {
|
|
|
|
stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->start;
|
2019-06-21 03:37:57 +08:00
|
|
|
cache->bytes_super += stripe_len;
|
2019-10-24 00:48:22 +08:00
|
|
|
ret = btrfs_add_excluded_extent(fs_info, cache->start,
|
2019-06-21 03:37:57 +08:00
|
|
|
stripe_len);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
|
|
|
|
bytenr = btrfs_sb_offset(i);
|
2021-02-04 18:22:02 +08:00
|
|
|
ret = btrfs_rmap_block(fs_info, cache->start, NULL,
|
2019-06-21 03:37:57 +08:00
|
|
|
bytenr, &logical, &nr, &stripe_len);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
btrfs: implement log-structured superblock for ZONED mode
Superblock (and its copies) is the only data structure in btrfs which
has a fixed location on a device. Since we cannot overwrite in a
sequential write required zone, we cannot place superblock in the zone.
One easy solution is limiting superblock and copies to be placed only in
conventional zones. However, this method has two downsides: one is
reduced number of superblock copies. The location of the second copy of
superblock is 256GB, which is in a sequential write required zone on
typical devices in the market today. So, the number of superblock and
copies is limited to be two. Second downside is that we cannot support
devices which have no conventional zones at all.
To solve these two problems, we employ superblock log writing. It uses
two adjacent zones as a circular buffer to write updated superblocks.
Once the first zone is filled up, start writing into the second one.
Then, when both zones are filled up and before starting to write to the
first zone again, it reset the first zone.
We can determine the position of the latest superblock by reading write
pointer information from a device. One corner case is when both zones
are full. For this situation, we read out the last superblock of each
zone, and compare them to determine which zone is older.
The following zones are reserved as the circular buffer on ZONED btrfs.
- The primary superblock: zones 0 and 1
- The first copy: zones 16 and 17
- The second copy: zones 1024 or zone at 256GB which is minimum, and
next to it
If these reserved zones are conventional, superblock is written fixed at
the start of the zone without logging.
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2020-11-10 19:26:14 +08:00
|
|
|
/* Shouldn't have super stripes in sequential zones */
|
|
|
|
if (zoned && nr) {
|
|
|
|
btrfs_err(fs_info,
|
|
|
|
"zoned: block group %llu must not contain super block",
|
|
|
|
cache->start);
|
|
|
|
return -EUCLEAN;
|
|
|
|
}
|
|
|
|
|
2019-06-21 03:37:57 +08:00
|
|
|
while (nr--) {
|
2020-04-03 21:40:35 +08:00
|
|
|
u64 len = min_t(u64, stripe_len,
|
|
|
|
cache->start + cache->length - logical[nr]);
|
2019-06-21 03:37:57 +08:00
|
|
|
|
|
|
|
cache->bytes_super += len;
|
2020-04-03 21:40:35 +08:00
|
|
|
ret = btrfs_add_excluded_extent(fs_info, logical[nr],
|
|
|
|
len);
|
2019-06-21 03:37:57 +08:00
|
|
|
if (ret) {
|
|
|
|
kfree(logical);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
kfree(logical);
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-10-30 02:20:18 +08:00
|
|
|
static void link_block_group(struct btrfs_block_group *cache)
|
2019-06-21 03:37:57 +08:00
|
|
|
{
|
|
|
|
struct btrfs_space_info *space_info = cache->space_info;
|
|
|
|
int index = btrfs_bg_flags_to_raid_index(cache->flags);
|
|
|
|
|
|
|
|
down_write(&space_info->groups_sem);
|
|
|
|
list_add_tail(&cache->list, &space_info->block_groups[index]);
|
|
|
|
up_write(&space_info->groups_sem);
|
|
|
|
}
|
|
|
|
|
2019-10-30 02:20:18 +08:00
|
|
|
static struct btrfs_block_group *btrfs_create_block_group_cache(
|
2020-05-05 07:58:20 +08:00
|
|
|
struct btrfs_fs_info *fs_info, u64 start)
|
2019-06-21 03:37:57 +08:00
|
|
|
{
|
2019-10-30 02:20:18 +08:00
|
|
|
struct btrfs_block_group *cache;
|
2019-06-21 03:37:57 +08:00
|
|
|
|
|
|
|
cache = kzalloc(sizeof(*cache), GFP_NOFS);
|
|
|
|
if (!cache)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
|
|
|
|
GFP_NOFS);
|
|
|
|
if (!cache->free_space_ctl) {
|
|
|
|
kfree(cache);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2019-10-24 00:48:22 +08:00
|
|
|
cache->start = start;
|
2019-06-21 03:37:57 +08:00
|
|
|
|
|
|
|
cache->fs_info = fs_info;
|
|
|
|
cache->full_stripe_len = btrfs_full_stripe_len(fs_info, start);
|
|
|
|
|
btrfs: handle empty block_group removal for async discard
block_group removal is a little tricky. It can race with the extent
allocator, the cleaner thread, and balancing. The current path is for a
block_group to be added to the unused_bgs list. Then, when the cleaner
thread comes around, it starts a transaction and then proceeds with
removing the block_group. Extents that are pinned are subsequently
removed from the pinned trees and then eventually a discard is issued
for the entire block_group.
Async discard introduces another player into the game, the discard
workqueue. While it has none of the racing issues, the new problem is
ensuring we don't leave free space untrimmed prior to forgetting the
block_group. This is handled by placing fully free block_groups on a
separate discard queue. This is necessary to maintain discarding order
as in the future we will slowly trim even fully free block_groups. The
ordering helps us make progress on the same block_group rather than say
the last fully freed block_group or needing to search through the fully
freed block groups at the beginning of a list and insert after.
The new order of events is a fully freed block group gets placed on the
unused discard queue first. Once it's processed, it will be placed on
the unusued_bgs list and then the original sequence of events will
happen, just without the final whole block_group discard.
The mount flags can change when processing unused_bgs, so when flipping
from DISCARD to DISCARD_ASYNC, the unused_bgs must be punted to the
discard_list to be trimmed. If we flip off DISCARD_ASYNC, we punt
free block groups on the discard_list to the unused_bg queue which will
do the final discard for us.
Reviewed-by: Josef Bacik <josef@toxicpanda.com>
Signed-off-by: Dennis Zhou <dennis@kernel.org>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2019-12-14 08:22:15 +08:00
|
|
|
cache->discard_index = BTRFS_DISCARD_INDEX_UNUSED;
|
|
|
|
|
2020-07-06 21:14:11 +08:00
|
|
|
refcount_set(&cache->refs, 1);
|
2019-06-21 03:37:57 +08:00
|
|
|
spin_lock_init(&cache->lock);
|
|
|
|
init_rwsem(&cache->data_rwsem);
|
|
|
|
INIT_LIST_HEAD(&cache->list);
|
|
|
|
INIT_LIST_HEAD(&cache->cluster_list);
|
|
|
|
INIT_LIST_HEAD(&cache->bg_list);
|
|
|
|
INIT_LIST_HEAD(&cache->ro_list);
|
2019-12-14 08:22:14 +08:00
|
|
|
INIT_LIST_HEAD(&cache->discard_list);
|
2019-06-21 03:37:57 +08:00
|
|
|
INIT_LIST_HEAD(&cache->dirty_list);
|
|
|
|
INIT_LIST_HEAD(&cache->io_list);
|
2020-10-23 21:58:08 +08:00
|
|
|
btrfs_init_free_space_ctl(cache, cache->free_space_ctl);
|
2020-05-08 18:01:47 +08:00
|
|
|
atomic_set(&cache->frozen, 0);
|
2019-06-21 03:37:57 +08:00
|
|
|
mutex_init(&cache->free_space_lock);
|
|
|
|
btrfs_init_full_stripe_locks_tree(&cache->full_stripe_locks_root);
|
|
|
|
|
|
|
|
return cache;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Iterate all chunks and verify that each of them has the corresponding block
|
|
|
|
* group
|
|
|
|
*/
|
|
|
|
static int check_chunk_block_group_mappings(struct btrfs_fs_info *fs_info)
|
|
|
|
{
|
|
|
|
struct extent_map_tree *map_tree = &fs_info->mapping_tree;
|
|
|
|
struct extent_map *em;
|
2019-10-30 02:20:18 +08:00
|
|
|
struct btrfs_block_group *bg;
|
2019-06-21 03:37:57 +08:00
|
|
|
u64 start = 0;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
while (1) {
|
|
|
|
read_lock(&map_tree->lock);
|
|
|
|
/*
|
|
|
|
* lookup_extent_mapping will return the first extent map
|
|
|
|
* intersecting the range, so setting @len to 1 is enough to
|
|
|
|
* get the first chunk.
|
|
|
|
*/
|
|
|
|
em = lookup_extent_mapping(map_tree, start, 1);
|
|
|
|
read_unlock(&map_tree->lock);
|
|
|
|
if (!em)
|
|
|
|
break;
|
|
|
|
|
|
|
|
bg = btrfs_lookup_block_group(fs_info, em->start);
|
|
|
|
if (!bg) {
|
|
|
|
btrfs_err(fs_info,
|
|
|
|
"chunk start=%llu len=%llu doesn't have corresponding block group",
|
|
|
|
em->start, em->len);
|
|
|
|
ret = -EUCLEAN;
|
|
|
|
free_extent_map(em);
|
|
|
|
break;
|
|
|
|
}
|
2019-10-24 00:48:22 +08:00
|
|
|
if (bg->start != em->start || bg->length != em->len ||
|
2019-06-21 03:37:57 +08:00
|
|
|
(bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK) !=
|
|
|
|
(em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK)) {
|
|
|
|
btrfs_err(fs_info,
|
|
|
|
"chunk start=%llu len=%llu flags=0x%llx doesn't match block group start=%llu len=%llu flags=0x%llx",
|
|
|
|
em->start, em->len,
|
|
|
|
em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK,
|
2019-10-24 00:48:22 +08:00
|
|
|
bg->start, bg->length,
|
2019-06-21 03:37:57 +08:00
|
|
|
bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK);
|
|
|
|
ret = -EUCLEAN;
|
|
|
|
free_extent_map(em);
|
|
|
|
btrfs_put_block_group(bg);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
start = em->start + em->len;
|
|
|
|
free_extent_map(em);
|
|
|
|
btrfs_put_block_group(bg);
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-10-10 10:39:27 +08:00
|
|
|
static int read_one_block_group(struct btrfs_fs_info *info,
|
2021-02-04 18:21:44 +08:00
|
|
|
struct btrfs_block_group_item *bgi,
|
2019-11-05 09:35:35 +08:00
|
|
|
const struct btrfs_key *key,
|
2019-10-10 10:39:27 +08:00
|
|
|
int need_clear)
|
|
|
|
{
|
2019-10-30 02:20:18 +08:00
|
|
|
struct btrfs_block_group *cache;
|
2019-10-10 10:39:27 +08:00
|
|
|
struct btrfs_space_info *space_info;
|
|
|
|
const bool mixed = btrfs_fs_incompat(info, MIXED_GROUPS);
|
|
|
|
int ret;
|
|
|
|
|
2019-11-05 09:35:35 +08:00
|
|
|
ASSERT(key->type == BTRFS_BLOCK_GROUP_ITEM_KEY);
|
2019-10-10 10:39:27 +08:00
|
|
|
|
2020-05-05 07:58:20 +08:00
|
|
|
cache = btrfs_create_block_group_cache(info, key->objectid);
|
2019-10-10 10:39:27 +08:00
|
|
|
if (!cache)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2021-02-04 18:21:44 +08:00
|
|
|
cache->length = key->offset;
|
|
|
|
cache->used = btrfs_stack_block_group_used(bgi);
|
|
|
|
cache->flags = btrfs_stack_block_group_flags(bgi);
|
2020-05-05 07:58:20 +08:00
|
|
|
|
2020-08-21 22:54:44 +08:00
|
|
|
set_free_space_tree_thresholds(cache);
|
|
|
|
|
2019-10-10 10:39:27 +08:00
|
|
|
if (need_clear) {
|
|
|
|
/*
|
|
|
|
* When we mount with old space cache, we need to
|
|
|
|
* set BTRFS_DC_CLEAR and set dirty flag.
|
|
|
|
*
|
|
|
|
* a) Setting 'BTRFS_DC_CLEAR' makes sure that we
|
|
|
|
* truncate the old free space cache inode and
|
|
|
|
* setup a new one.
|
|
|
|
* b) Setting 'dirty flag' makes sure that we flush
|
|
|
|
* the new space cache info onto disk.
|
|
|
|
*/
|
|
|
|
if (btrfs_test_opt(info, SPACE_CACHE))
|
|
|
|
cache->disk_cache_state = BTRFS_DC_CLEAR;
|
|
|
|
}
|
|
|
|
if (!mixed && ((cache->flags & BTRFS_BLOCK_GROUP_METADATA) &&
|
|
|
|
(cache->flags & BTRFS_BLOCK_GROUP_DATA))) {
|
|
|
|
btrfs_err(info,
|
|
|
|
"bg %llu is a mixed block group but filesystem hasn't enabled mixed block groups",
|
|
|
|
cache->start);
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
2021-02-04 18:21:51 +08:00
|
|
|
ret = btrfs_load_block_group_zone_info(cache, false);
|
2021-02-04 18:21:50 +08:00
|
|
|
if (ret) {
|
|
|
|
btrfs_err(info, "zoned: failed to load zone info of bg %llu",
|
|
|
|
cache->start);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
2019-10-10 10:39:27 +08:00
|
|
|
/*
|
|
|
|
* We need to exclude the super stripes now so that the space info has
|
|
|
|
* super bytes accounted for, otherwise we'll think we have more space
|
|
|
|
* than we actually do.
|
|
|
|
*/
|
|
|
|
ret = exclude_super_stripes(cache);
|
|
|
|
if (ret) {
|
|
|
|
/* We may have excluded something, so call this just in case. */
|
|
|
|
btrfs_free_excluded_extents(cache);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2021-02-04 18:21:52 +08:00
|
|
|
* For zoned filesystem, space after the allocation offset is the only
|
|
|
|
* free space for a block group. So, we don't need any caching work.
|
|
|
|
* btrfs_calc_zone_unusable() will set the amount of free space and
|
|
|
|
* zone_unusable space.
|
|
|
|
*
|
|
|
|
* For regular filesystem, check for two cases, either we are full, and
|
|
|
|
* therefore don't need to bother with the caching work since we won't
|
|
|
|
* find any space, or we are empty, and we can just add all the space
|
|
|
|
* in and be done with it. This saves us _a_lot_ of time, particularly
|
|
|
|
* in the full case.
|
2019-10-10 10:39:27 +08:00
|
|
|
*/
|
2021-02-04 18:21:52 +08:00
|
|
|
if (btrfs_is_zoned(info)) {
|
|
|
|
btrfs_calc_zone_unusable(cache);
|
|
|
|
} else if (cache->length == cache->used) {
|
2019-10-10 10:39:27 +08:00
|
|
|
cache->last_byte_to_unpin = (u64)-1;
|
|
|
|
cache->cached = BTRFS_CACHE_FINISHED;
|
|
|
|
btrfs_free_excluded_extents(cache);
|
|
|
|
} else if (cache->used == 0) {
|
|
|
|
cache->last_byte_to_unpin = (u64)-1;
|
|
|
|
cache->cached = BTRFS_CACHE_FINISHED;
|
2020-05-05 07:58:20 +08:00
|
|
|
add_new_free_space(cache, cache->start,
|
|
|
|
cache->start + cache->length);
|
2019-10-10 10:39:27 +08:00
|
|
|
btrfs_free_excluded_extents(cache);
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = btrfs_add_block_group_cache(info, cache);
|
|
|
|
if (ret) {
|
|
|
|
btrfs_remove_free_space_cache(cache);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
trace_btrfs_add_block_group(info, cache, 0);
|
2020-05-05 07:58:20 +08:00
|
|
|
btrfs_update_space_info(info, cache->flags, cache->length,
|
2021-02-04 18:21:52 +08:00
|
|
|
cache->used, cache->bytes_super,
|
|
|
|
cache->zone_unusable, &space_info);
|
2019-10-10 10:39:27 +08:00
|
|
|
|
|
|
|
cache->space_info = space_info;
|
|
|
|
|
|
|
|
link_block_group(cache);
|
|
|
|
|
|
|
|
set_avail_alloc_bits(info, cache->flags);
|
|
|
|
if (btrfs_chunk_readonly(info, cache->start)) {
|
|
|
|
inc_block_group_ro(cache, 1);
|
|
|
|
} else if (cache->used == 0) {
|
|
|
|
ASSERT(list_empty(&cache->bg_list));
|
btrfs: handle empty block_group removal for async discard
block_group removal is a little tricky. It can race with the extent
allocator, the cleaner thread, and balancing. The current path is for a
block_group to be added to the unused_bgs list. Then, when the cleaner
thread comes around, it starts a transaction and then proceeds with
removing the block_group. Extents that are pinned are subsequently
removed from the pinned trees and then eventually a discard is issued
for the entire block_group.
Async discard introduces another player into the game, the discard
workqueue. While it has none of the racing issues, the new problem is
ensuring we don't leave free space untrimmed prior to forgetting the
block_group. This is handled by placing fully free block_groups on a
separate discard queue. This is necessary to maintain discarding order
as in the future we will slowly trim even fully free block_groups. The
ordering helps us make progress on the same block_group rather than say
the last fully freed block_group or needing to search through the fully
freed block groups at the beginning of a list and insert after.
The new order of events is a fully freed block group gets placed on the
unused discard queue first. Once it's processed, it will be placed on
the unusued_bgs list and then the original sequence of events will
happen, just without the final whole block_group discard.
The mount flags can change when processing unused_bgs, so when flipping
from DISCARD to DISCARD_ASYNC, the unused_bgs must be punted to the
discard_list to be trimmed. If we flip off DISCARD_ASYNC, we punt
free block groups on the discard_list to the unused_bg queue which will
do the final discard for us.
Reviewed-by: Josef Bacik <josef@toxicpanda.com>
Signed-off-by: Dennis Zhou <dennis@kernel.org>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2019-12-14 08:22:15 +08:00
|
|
|
if (btrfs_test_opt(info, DISCARD_ASYNC))
|
|
|
|
btrfs_discard_queue_work(&info->discard_ctl, cache);
|
|
|
|
else
|
|
|
|
btrfs_mark_bg_unused(cache);
|
2019-10-10 10:39:27 +08:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
error:
|
|
|
|
btrfs_put_block_group(cache);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2020-10-16 23:29:18 +08:00
|
|
|
static int fill_dummy_bgs(struct btrfs_fs_info *fs_info)
|
|
|
|
{
|
|
|
|
struct extent_map_tree *em_tree = &fs_info->mapping_tree;
|
|
|
|
struct btrfs_space_info *space_info;
|
|
|
|
struct rb_node *node;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
for (node = rb_first_cached(&em_tree->map); node; node = rb_next(node)) {
|
|
|
|
struct extent_map *em;
|
|
|
|
struct map_lookup *map;
|
|
|
|
struct btrfs_block_group *bg;
|
|
|
|
|
|
|
|
em = rb_entry(node, struct extent_map, rb_node);
|
|
|
|
map = em->map_lookup;
|
|
|
|
bg = btrfs_create_block_group_cache(fs_info, em->start);
|
|
|
|
if (!bg) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Fill dummy cache as FULL */
|
|
|
|
bg->length = em->len;
|
|
|
|
bg->flags = map->type;
|
|
|
|
bg->last_byte_to_unpin = (u64)-1;
|
|
|
|
bg->cached = BTRFS_CACHE_FINISHED;
|
|
|
|
bg->used = em->len;
|
|
|
|
bg->flags = map->type;
|
|
|
|
ret = btrfs_add_block_group_cache(fs_info, bg);
|
|
|
|
if (ret) {
|
|
|
|
btrfs_remove_free_space_cache(bg);
|
|
|
|
btrfs_put_block_group(bg);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
btrfs_update_space_info(fs_info, bg->flags, em->len, em->len,
|
2021-02-04 18:21:52 +08:00
|
|
|
0, 0, &space_info);
|
2020-10-16 23:29:18 +08:00
|
|
|
bg->space_info = space_info;
|
|
|
|
link_block_group(bg);
|
|
|
|
|
|
|
|
set_avail_alloc_bits(fs_info, bg->flags);
|
|
|
|
}
|
|
|
|
if (!ret)
|
|
|
|
btrfs_init_global_block_rsv(fs_info);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-06-21 03:37:57 +08:00
|
|
|
int btrfs_read_block_groups(struct btrfs_fs_info *info)
|
|
|
|
{
|
|
|
|
struct btrfs_path *path;
|
|
|
|
int ret;
|
2019-10-30 02:20:18 +08:00
|
|
|
struct btrfs_block_group *cache;
|
2019-06-21 03:37:57 +08:00
|
|
|
struct btrfs_space_info *space_info;
|
|
|
|
struct btrfs_key key;
|
|
|
|
int need_clear = 0;
|
|
|
|
u64 cache_gen;
|
|
|
|
|
2020-10-16 23:29:18 +08:00
|
|
|
if (!info->extent_root)
|
|
|
|
return fill_dummy_bgs(info);
|
|
|
|
|
2019-06-21 03:37:57 +08:00
|
|
|
key.objectid = 0;
|
|
|
|
key.offset = 0;
|
|
|
|
key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
|
|
|
|
path = btrfs_alloc_path();
|
|
|
|
if (!path)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
cache_gen = btrfs_super_cache_generation(info->super_copy);
|
|
|
|
if (btrfs_test_opt(info, SPACE_CACHE) &&
|
|
|
|
btrfs_super_generation(info->super_copy) != cache_gen)
|
|
|
|
need_clear = 1;
|
|
|
|
if (btrfs_test_opt(info, CLEAR_CACHE))
|
|
|
|
need_clear = 1;
|
|
|
|
|
|
|
|
while (1) {
|
2021-02-04 18:21:44 +08:00
|
|
|
struct btrfs_block_group_item bgi;
|
|
|
|
struct extent_buffer *leaf;
|
|
|
|
int slot;
|
|
|
|
|
2019-06-21 03:37:57 +08:00
|
|
|
ret = find_first_block_group(info, path, &key);
|
|
|
|
if (ret > 0)
|
|
|
|
break;
|
|
|
|
if (ret != 0)
|
|
|
|
goto error;
|
|
|
|
|
2021-02-04 18:21:44 +08:00
|
|
|
leaf = path->nodes[0];
|
|
|
|
slot = path->slots[0];
|
|
|
|
|
|
|
|
read_extent_buffer(leaf, &bgi, btrfs_item_ptr_offset(leaf, slot),
|
|
|
|
sizeof(bgi));
|
|
|
|
|
|
|
|
btrfs_item_key_to_cpu(leaf, &key, slot);
|
|
|
|
btrfs_release_path(path);
|
|
|
|
ret = read_one_block_group(info, &bgi, &key, need_clear);
|
2019-10-10 10:39:27 +08:00
|
|
|
if (ret < 0)
|
2019-06-21 03:37:57 +08:00
|
|
|
goto error;
|
2019-10-10 10:39:27 +08:00
|
|
|
key.objectid += key.offset;
|
|
|
|
key.offset = 0;
|
2019-06-21 03:37:57 +08:00
|
|
|
}
|
2020-10-15 05:00:51 +08:00
|
|
|
btrfs_release_path(path);
|
2019-06-21 03:37:57 +08:00
|
|
|
|
2020-09-02 05:40:37 +08:00
|
|
|
list_for_each_entry(space_info, &info->space_info, list) {
|
btrfs: do not create raid sysfs entries under any locks
While running xfstests btrfs/177 I got the following lockdep splat
======================================================
WARNING: possible circular locking dependency detected
5.9.0-rc3+ #5 Not tainted
------------------------------------------------------
kswapd0/100 is trying to acquire lock:
ffff97066aa56760 (&delayed_node->mutex){+.+.}-{3:3}, at: __btrfs_release_delayed_node.part.0+0x3f/0x330
but task is already holding lock:
ffffffff9fd74700 (fs_reclaim){+.+.}-{0:0}, at: __fs_reclaim_acquire+0x5/0x30
which lock already depends on the new lock.
the existing dependency chain (in reverse order) is:
-> #3 (fs_reclaim){+.+.}-{0:0}:
fs_reclaim_acquire+0x65/0x80
slab_pre_alloc_hook.constprop.0+0x20/0x200
kmem_cache_alloc+0x37/0x270
alloc_inode+0x82/0xb0
iget_locked+0x10d/0x2c0
kernfs_get_inode+0x1b/0x130
kernfs_get_tree+0x136/0x240
sysfs_get_tree+0x16/0x40
vfs_get_tree+0x28/0xc0
path_mount+0x434/0xc00
__x64_sys_mount+0xe3/0x120
do_syscall_64+0x33/0x40
entry_SYSCALL_64_after_hwframe+0x44/0xa9
-> #2 (kernfs_mutex){+.+.}-{3:3}:
__mutex_lock+0x7e/0x7e0
kernfs_add_one+0x23/0x150
kernfs_create_dir_ns+0x7a/0xb0
sysfs_create_dir_ns+0x60/0xb0
kobject_add_internal+0xc0/0x2c0
kobject_add+0x6e/0x90
btrfs_sysfs_add_block_group_type+0x102/0x160
btrfs_make_block_group+0x167/0x230
btrfs_alloc_chunk+0x54f/0xb80
btrfs_chunk_alloc+0x18e/0x3a0
find_free_extent+0xdf6/0x1210
btrfs_reserve_extent+0xb3/0x1b0
btrfs_alloc_tree_block+0xb0/0x310
alloc_tree_block_no_bg_flush+0x4a/0x60
__btrfs_cow_block+0x11a/0x530
btrfs_cow_block+0x104/0x220
btrfs_search_slot+0x52e/0x9d0
btrfs_insert_empty_items+0x64/0xb0
btrfs_new_inode+0x225/0x730
btrfs_create+0xab/0x1f0
lookup_open.isra.0+0x52d/0x690
path_openat+0x2a7/0x9e0
do_filp_open+0x75/0x100
do_sys_openat2+0x7b/0x130
__x64_sys_openat+0x46/0x70
do_syscall_64+0x33/0x40
entry_SYSCALL_64_after_hwframe+0x44/0xa9
-> #1 (&fs_info->chunk_mutex){+.+.}-{3:3}:
__mutex_lock+0x7e/0x7e0
btrfs_chunk_alloc+0x125/0x3a0
find_free_extent+0xdf6/0x1210
btrfs_reserve_extent+0xb3/0x1b0
btrfs_alloc_tree_block+0xb0/0x310
alloc_tree_block_no_bg_flush+0x4a/0x60
__btrfs_cow_block+0x11a/0x530
btrfs_cow_block+0x104/0x220
btrfs_search_slot+0x52e/0x9d0
btrfs_lookup_inode+0x2a/0x8f
__btrfs_update_delayed_inode+0x80/0x240
btrfs_commit_inode_delayed_inode+0x119/0x120
btrfs_evict_inode+0x357/0x500
evict+0xcf/0x1f0
do_unlinkat+0x1a9/0x2b0
do_syscall_64+0x33/0x40
entry_SYSCALL_64_after_hwframe+0x44/0xa9
-> #0 (&delayed_node->mutex){+.+.}-{3:3}:
__lock_acquire+0x119c/0x1fc0
lock_acquire+0xa7/0x3d0
__mutex_lock+0x7e/0x7e0
__btrfs_release_delayed_node.part.0+0x3f/0x330
btrfs_evict_inode+0x24c/0x500
evict+0xcf/0x1f0
dispose_list+0x48/0x70
prune_icache_sb+0x44/0x50
super_cache_scan+0x161/0x1e0
do_shrink_slab+0x178/0x3c0
shrink_slab+0x17c/0x290
shrink_node+0x2b2/0x6d0
balance_pgdat+0x30a/0x670
kswapd+0x213/0x4c0
kthread+0x138/0x160
ret_from_fork+0x1f/0x30
other info that might help us debug this:
Chain exists of:
&delayed_node->mutex --> kernfs_mutex --> fs_reclaim
Possible unsafe locking scenario:
CPU0 CPU1
---- ----
lock(fs_reclaim);
lock(kernfs_mutex);
lock(fs_reclaim);
lock(&delayed_node->mutex);
*** DEADLOCK ***
3 locks held by kswapd0/100:
#0: ffffffff9fd74700 (fs_reclaim){+.+.}-{0:0}, at: __fs_reclaim_acquire+0x5/0x30
#1: ffffffff9fd65c50 (shrinker_rwsem){++++}-{3:3}, at: shrink_slab+0x115/0x290
#2: ffff9706629780e0 (&type->s_umount_key#36){++++}-{3:3}, at: super_cache_scan+0x38/0x1e0
stack backtrace:
CPU: 1 PID: 100 Comm: kswapd0 Not tainted 5.9.0-rc3+ #5
Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.13.0-2.fc32 04/01/2014
Call Trace:
dump_stack+0x8b/0xb8
check_noncircular+0x12d/0x150
__lock_acquire+0x119c/0x1fc0
lock_acquire+0xa7/0x3d0
? __btrfs_release_delayed_node.part.0+0x3f/0x330
__mutex_lock+0x7e/0x7e0
? __btrfs_release_delayed_node.part.0+0x3f/0x330
? __btrfs_release_delayed_node.part.0+0x3f/0x330
? lock_acquire+0xa7/0x3d0
? find_held_lock+0x2b/0x80
__btrfs_release_delayed_node.part.0+0x3f/0x330
btrfs_evict_inode+0x24c/0x500
evict+0xcf/0x1f0
dispose_list+0x48/0x70
prune_icache_sb+0x44/0x50
super_cache_scan+0x161/0x1e0
do_shrink_slab+0x178/0x3c0
shrink_slab+0x17c/0x290
shrink_node+0x2b2/0x6d0
balance_pgdat+0x30a/0x670
kswapd+0x213/0x4c0
? _raw_spin_unlock_irqrestore+0x41/0x50
? add_wait_queue_exclusive+0x70/0x70
? balance_pgdat+0x670/0x670
kthread+0x138/0x160
? kthread_create_worker_on_cpu+0x40/0x40
ret_from_fork+0x1f/0x30
This happens because when we link in a block group with a new raid index
type we'll create the corresponding sysfs entries for it. This is
problematic because while restriping we're holding the chunk_mutex, and
while mounting we're holding the tree locks.
Fixing this isn't pretty, we move the call to the sysfs stuff into the
btrfs_create_pending_block_groups() work, where we're not holding any
locks. This creates a slight race where other threads could see that
there's no sysfs kobj for that raid type, and race to create the
sysfs dir. Fix this by wrapping the creation in space_info->lock, so we
only get one thread calling kobject_add() for the new directory. We
don't worry about the lock on cleanup as it only gets deleted on
unmount.
On mount it's more straightforward, we loop through the space_infos
already, just check every raid index in each space_info and added the
sysfs entries for the corresponding block groups.
Signed-off-by: Josef Bacik <josef@toxicpanda.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2020-09-02 05:40:38 +08:00
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
|
|
|
|
if (list_empty(&space_info->block_groups[i]))
|
|
|
|
continue;
|
|
|
|
cache = list_first_entry(&space_info->block_groups[i],
|
|
|
|
struct btrfs_block_group,
|
|
|
|
list);
|
|
|
|
btrfs_sysfs_add_block_group_type(cache);
|
|
|
|
}
|
|
|
|
|
2019-06-21 03:37:57 +08:00
|
|
|
if (!(btrfs_get_alloc_profile(info, space_info->flags) &
|
|
|
|
(BTRFS_BLOCK_GROUP_RAID10 |
|
|
|
|
BTRFS_BLOCK_GROUP_RAID1_MASK |
|
|
|
|
BTRFS_BLOCK_GROUP_RAID56_MASK |
|
|
|
|
BTRFS_BLOCK_GROUP_DUP)))
|
|
|
|
continue;
|
|
|
|
/*
|
|
|
|
* Avoid allocating from un-mirrored block group if there are
|
|
|
|
* mirrored block groups.
|
|
|
|
*/
|
|
|
|
list_for_each_entry(cache,
|
|
|
|
&space_info->block_groups[BTRFS_RAID_RAID0],
|
|
|
|
list)
|
2019-06-21 03:38:07 +08:00
|
|
|
inc_block_group_ro(cache, 1);
|
2019-06-21 03:37:57 +08:00
|
|
|
list_for_each_entry(cache,
|
|
|
|
&space_info->block_groups[BTRFS_RAID_SINGLE],
|
|
|
|
list)
|
2019-06-21 03:38:07 +08:00
|
|
|
inc_block_group_ro(cache, 1);
|
2019-06-21 03:37:57 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
btrfs_init_global_block_rsv(info);
|
|
|
|
ret = check_chunk_block_group_mappings(info);
|
|
|
|
error:
|
|
|
|
btrfs_free_path(path);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2020-05-05 07:58:22 +08:00
|
|
|
static int insert_block_group_item(struct btrfs_trans_handle *trans,
|
|
|
|
struct btrfs_block_group *block_group)
|
|
|
|
{
|
|
|
|
struct btrfs_fs_info *fs_info = trans->fs_info;
|
|
|
|
struct btrfs_block_group_item bgi;
|
|
|
|
struct btrfs_root *root;
|
|
|
|
struct btrfs_key key;
|
|
|
|
|
|
|
|
spin_lock(&block_group->lock);
|
|
|
|
btrfs_set_stack_block_group_used(&bgi, block_group->used);
|
|
|
|
btrfs_set_stack_block_group_chunk_objectid(&bgi,
|
|
|
|
BTRFS_FIRST_CHUNK_TREE_OBJECTID);
|
|
|
|
btrfs_set_stack_block_group_flags(&bgi, block_group->flags);
|
|
|
|
key.objectid = block_group->start;
|
|
|
|
key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
|
|
|
|
key.offset = block_group->length;
|
|
|
|
spin_unlock(&block_group->lock);
|
|
|
|
|
|
|
|
root = fs_info->extent_root;
|
|
|
|
return btrfs_insert_item(trans, root, &key, &bgi, sizeof(bgi));
|
|
|
|
}
|
|
|
|
|
2019-06-21 03:37:57 +08:00
|
|
|
void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans)
|
|
|
|
{
|
|
|
|
struct btrfs_fs_info *fs_info = trans->fs_info;
|
2019-10-30 02:20:18 +08:00
|
|
|
struct btrfs_block_group *block_group;
|
2019-06-21 03:37:57 +08:00
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
if (!trans->can_flush_pending_bgs)
|
|
|
|
return;
|
|
|
|
|
|
|
|
while (!list_empty(&trans->new_bgs)) {
|
btrfs: do not create raid sysfs entries under any locks
While running xfstests btrfs/177 I got the following lockdep splat
======================================================
WARNING: possible circular locking dependency detected
5.9.0-rc3+ #5 Not tainted
------------------------------------------------------
kswapd0/100 is trying to acquire lock:
ffff97066aa56760 (&delayed_node->mutex){+.+.}-{3:3}, at: __btrfs_release_delayed_node.part.0+0x3f/0x330
but task is already holding lock:
ffffffff9fd74700 (fs_reclaim){+.+.}-{0:0}, at: __fs_reclaim_acquire+0x5/0x30
which lock already depends on the new lock.
the existing dependency chain (in reverse order) is:
-> #3 (fs_reclaim){+.+.}-{0:0}:
fs_reclaim_acquire+0x65/0x80
slab_pre_alloc_hook.constprop.0+0x20/0x200
kmem_cache_alloc+0x37/0x270
alloc_inode+0x82/0xb0
iget_locked+0x10d/0x2c0
kernfs_get_inode+0x1b/0x130
kernfs_get_tree+0x136/0x240
sysfs_get_tree+0x16/0x40
vfs_get_tree+0x28/0xc0
path_mount+0x434/0xc00
__x64_sys_mount+0xe3/0x120
do_syscall_64+0x33/0x40
entry_SYSCALL_64_after_hwframe+0x44/0xa9
-> #2 (kernfs_mutex){+.+.}-{3:3}:
__mutex_lock+0x7e/0x7e0
kernfs_add_one+0x23/0x150
kernfs_create_dir_ns+0x7a/0xb0
sysfs_create_dir_ns+0x60/0xb0
kobject_add_internal+0xc0/0x2c0
kobject_add+0x6e/0x90
btrfs_sysfs_add_block_group_type+0x102/0x160
btrfs_make_block_group+0x167/0x230
btrfs_alloc_chunk+0x54f/0xb80
btrfs_chunk_alloc+0x18e/0x3a0
find_free_extent+0xdf6/0x1210
btrfs_reserve_extent+0xb3/0x1b0
btrfs_alloc_tree_block+0xb0/0x310
alloc_tree_block_no_bg_flush+0x4a/0x60
__btrfs_cow_block+0x11a/0x530
btrfs_cow_block+0x104/0x220
btrfs_search_slot+0x52e/0x9d0
btrfs_insert_empty_items+0x64/0xb0
btrfs_new_inode+0x225/0x730
btrfs_create+0xab/0x1f0
lookup_open.isra.0+0x52d/0x690
path_openat+0x2a7/0x9e0
do_filp_open+0x75/0x100
do_sys_openat2+0x7b/0x130
__x64_sys_openat+0x46/0x70
do_syscall_64+0x33/0x40
entry_SYSCALL_64_after_hwframe+0x44/0xa9
-> #1 (&fs_info->chunk_mutex){+.+.}-{3:3}:
__mutex_lock+0x7e/0x7e0
btrfs_chunk_alloc+0x125/0x3a0
find_free_extent+0xdf6/0x1210
btrfs_reserve_extent+0xb3/0x1b0
btrfs_alloc_tree_block+0xb0/0x310
alloc_tree_block_no_bg_flush+0x4a/0x60
__btrfs_cow_block+0x11a/0x530
btrfs_cow_block+0x104/0x220
btrfs_search_slot+0x52e/0x9d0
btrfs_lookup_inode+0x2a/0x8f
__btrfs_update_delayed_inode+0x80/0x240
btrfs_commit_inode_delayed_inode+0x119/0x120
btrfs_evict_inode+0x357/0x500
evict+0xcf/0x1f0
do_unlinkat+0x1a9/0x2b0
do_syscall_64+0x33/0x40
entry_SYSCALL_64_after_hwframe+0x44/0xa9
-> #0 (&delayed_node->mutex){+.+.}-{3:3}:
__lock_acquire+0x119c/0x1fc0
lock_acquire+0xa7/0x3d0
__mutex_lock+0x7e/0x7e0
__btrfs_release_delayed_node.part.0+0x3f/0x330
btrfs_evict_inode+0x24c/0x500
evict+0xcf/0x1f0
dispose_list+0x48/0x70
prune_icache_sb+0x44/0x50
super_cache_scan+0x161/0x1e0
do_shrink_slab+0x178/0x3c0
shrink_slab+0x17c/0x290
shrink_node+0x2b2/0x6d0
balance_pgdat+0x30a/0x670
kswapd+0x213/0x4c0
kthread+0x138/0x160
ret_from_fork+0x1f/0x30
other info that might help us debug this:
Chain exists of:
&delayed_node->mutex --> kernfs_mutex --> fs_reclaim
Possible unsafe locking scenario:
CPU0 CPU1
---- ----
lock(fs_reclaim);
lock(kernfs_mutex);
lock(fs_reclaim);
lock(&delayed_node->mutex);
*** DEADLOCK ***
3 locks held by kswapd0/100:
#0: ffffffff9fd74700 (fs_reclaim){+.+.}-{0:0}, at: __fs_reclaim_acquire+0x5/0x30
#1: ffffffff9fd65c50 (shrinker_rwsem){++++}-{3:3}, at: shrink_slab+0x115/0x290
#2: ffff9706629780e0 (&type->s_umount_key#36){++++}-{3:3}, at: super_cache_scan+0x38/0x1e0
stack backtrace:
CPU: 1 PID: 100 Comm: kswapd0 Not tainted 5.9.0-rc3+ #5
Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.13.0-2.fc32 04/01/2014
Call Trace:
dump_stack+0x8b/0xb8
check_noncircular+0x12d/0x150
__lock_acquire+0x119c/0x1fc0
lock_acquire+0xa7/0x3d0
? __btrfs_release_delayed_node.part.0+0x3f/0x330
__mutex_lock+0x7e/0x7e0
? __btrfs_release_delayed_node.part.0+0x3f/0x330
? __btrfs_release_delayed_node.part.0+0x3f/0x330
? lock_acquire+0xa7/0x3d0
? find_held_lock+0x2b/0x80
__btrfs_release_delayed_node.part.0+0x3f/0x330
btrfs_evict_inode+0x24c/0x500
evict+0xcf/0x1f0
dispose_list+0x48/0x70
prune_icache_sb+0x44/0x50
super_cache_scan+0x161/0x1e0
do_shrink_slab+0x178/0x3c0
shrink_slab+0x17c/0x290
shrink_node+0x2b2/0x6d0
balance_pgdat+0x30a/0x670
kswapd+0x213/0x4c0
? _raw_spin_unlock_irqrestore+0x41/0x50
? add_wait_queue_exclusive+0x70/0x70
? balance_pgdat+0x670/0x670
kthread+0x138/0x160
? kthread_create_worker_on_cpu+0x40/0x40
ret_from_fork+0x1f/0x30
This happens because when we link in a block group with a new raid index
type we'll create the corresponding sysfs entries for it. This is
problematic because while restriping we're holding the chunk_mutex, and
while mounting we're holding the tree locks.
Fixing this isn't pretty, we move the call to the sysfs stuff into the
btrfs_create_pending_block_groups() work, where we're not holding any
locks. This creates a slight race where other threads could see that
there's no sysfs kobj for that raid type, and race to create the
sysfs dir. Fix this by wrapping the creation in space_info->lock, so we
only get one thread calling kobject_add() for the new directory. We
don't worry about the lock on cleanup as it only gets deleted on
unmount.
On mount it's more straightforward, we loop through the space_infos
already, just check every raid index in each space_info and added the
sysfs entries for the corresponding block groups.
Signed-off-by: Josef Bacik <josef@toxicpanda.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2020-09-02 05:40:38 +08:00
|
|
|
int index;
|
|
|
|
|
2019-06-21 03:37:57 +08:00
|
|
|
block_group = list_first_entry(&trans->new_bgs,
|
2019-10-30 02:20:18 +08:00
|
|
|
struct btrfs_block_group,
|
2019-06-21 03:37:57 +08:00
|
|
|
bg_list);
|
|
|
|
if (ret)
|
|
|
|
goto next;
|
|
|
|
|
btrfs: do not create raid sysfs entries under any locks
While running xfstests btrfs/177 I got the following lockdep splat
======================================================
WARNING: possible circular locking dependency detected
5.9.0-rc3+ #5 Not tainted
------------------------------------------------------
kswapd0/100 is trying to acquire lock:
ffff97066aa56760 (&delayed_node->mutex){+.+.}-{3:3}, at: __btrfs_release_delayed_node.part.0+0x3f/0x330
but task is already holding lock:
ffffffff9fd74700 (fs_reclaim){+.+.}-{0:0}, at: __fs_reclaim_acquire+0x5/0x30
which lock already depends on the new lock.
the existing dependency chain (in reverse order) is:
-> #3 (fs_reclaim){+.+.}-{0:0}:
fs_reclaim_acquire+0x65/0x80
slab_pre_alloc_hook.constprop.0+0x20/0x200
kmem_cache_alloc+0x37/0x270
alloc_inode+0x82/0xb0
iget_locked+0x10d/0x2c0
kernfs_get_inode+0x1b/0x130
kernfs_get_tree+0x136/0x240
sysfs_get_tree+0x16/0x40
vfs_get_tree+0x28/0xc0
path_mount+0x434/0xc00
__x64_sys_mount+0xe3/0x120
do_syscall_64+0x33/0x40
entry_SYSCALL_64_after_hwframe+0x44/0xa9
-> #2 (kernfs_mutex){+.+.}-{3:3}:
__mutex_lock+0x7e/0x7e0
kernfs_add_one+0x23/0x150
kernfs_create_dir_ns+0x7a/0xb0
sysfs_create_dir_ns+0x60/0xb0
kobject_add_internal+0xc0/0x2c0
kobject_add+0x6e/0x90
btrfs_sysfs_add_block_group_type+0x102/0x160
btrfs_make_block_group+0x167/0x230
btrfs_alloc_chunk+0x54f/0xb80
btrfs_chunk_alloc+0x18e/0x3a0
find_free_extent+0xdf6/0x1210
btrfs_reserve_extent+0xb3/0x1b0
btrfs_alloc_tree_block+0xb0/0x310
alloc_tree_block_no_bg_flush+0x4a/0x60
__btrfs_cow_block+0x11a/0x530
btrfs_cow_block+0x104/0x220
btrfs_search_slot+0x52e/0x9d0
btrfs_insert_empty_items+0x64/0xb0
btrfs_new_inode+0x225/0x730
btrfs_create+0xab/0x1f0
lookup_open.isra.0+0x52d/0x690
path_openat+0x2a7/0x9e0
do_filp_open+0x75/0x100
do_sys_openat2+0x7b/0x130
__x64_sys_openat+0x46/0x70
do_syscall_64+0x33/0x40
entry_SYSCALL_64_after_hwframe+0x44/0xa9
-> #1 (&fs_info->chunk_mutex){+.+.}-{3:3}:
__mutex_lock+0x7e/0x7e0
btrfs_chunk_alloc+0x125/0x3a0
find_free_extent+0xdf6/0x1210
btrfs_reserve_extent+0xb3/0x1b0
btrfs_alloc_tree_block+0xb0/0x310
alloc_tree_block_no_bg_flush+0x4a/0x60
__btrfs_cow_block+0x11a/0x530
btrfs_cow_block+0x104/0x220
btrfs_search_slot+0x52e/0x9d0
btrfs_lookup_inode+0x2a/0x8f
__btrfs_update_delayed_inode+0x80/0x240
btrfs_commit_inode_delayed_inode+0x119/0x120
btrfs_evict_inode+0x357/0x500
evict+0xcf/0x1f0
do_unlinkat+0x1a9/0x2b0
do_syscall_64+0x33/0x40
entry_SYSCALL_64_after_hwframe+0x44/0xa9
-> #0 (&delayed_node->mutex){+.+.}-{3:3}:
__lock_acquire+0x119c/0x1fc0
lock_acquire+0xa7/0x3d0
__mutex_lock+0x7e/0x7e0
__btrfs_release_delayed_node.part.0+0x3f/0x330
btrfs_evict_inode+0x24c/0x500
evict+0xcf/0x1f0
dispose_list+0x48/0x70
prune_icache_sb+0x44/0x50
super_cache_scan+0x161/0x1e0
do_shrink_slab+0x178/0x3c0
shrink_slab+0x17c/0x290
shrink_node+0x2b2/0x6d0
balance_pgdat+0x30a/0x670
kswapd+0x213/0x4c0
kthread+0x138/0x160
ret_from_fork+0x1f/0x30
other info that might help us debug this:
Chain exists of:
&delayed_node->mutex --> kernfs_mutex --> fs_reclaim
Possible unsafe locking scenario:
CPU0 CPU1
---- ----
lock(fs_reclaim);
lock(kernfs_mutex);
lock(fs_reclaim);
lock(&delayed_node->mutex);
*** DEADLOCK ***
3 locks held by kswapd0/100:
#0: ffffffff9fd74700 (fs_reclaim){+.+.}-{0:0}, at: __fs_reclaim_acquire+0x5/0x30
#1: ffffffff9fd65c50 (shrinker_rwsem){++++}-{3:3}, at: shrink_slab+0x115/0x290
#2: ffff9706629780e0 (&type->s_umount_key#36){++++}-{3:3}, at: super_cache_scan+0x38/0x1e0
stack backtrace:
CPU: 1 PID: 100 Comm: kswapd0 Not tainted 5.9.0-rc3+ #5
Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.13.0-2.fc32 04/01/2014
Call Trace:
dump_stack+0x8b/0xb8
check_noncircular+0x12d/0x150
__lock_acquire+0x119c/0x1fc0
lock_acquire+0xa7/0x3d0
? __btrfs_release_delayed_node.part.0+0x3f/0x330
__mutex_lock+0x7e/0x7e0
? __btrfs_release_delayed_node.part.0+0x3f/0x330
? __btrfs_release_delayed_node.part.0+0x3f/0x330
? lock_acquire+0xa7/0x3d0
? find_held_lock+0x2b/0x80
__btrfs_release_delayed_node.part.0+0x3f/0x330
btrfs_evict_inode+0x24c/0x500
evict+0xcf/0x1f0
dispose_list+0x48/0x70
prune_icache_sb+0x44/0x50
super_cache_scan+0x161/0x1e0
do_shrink_slab+0x178/0x3c0
shrink_slab+0x17c/0x290
shrink_node+0x2b2/0x6d0
balance_pgdat+0x30a/0x670
kswapd+0x213/0x4c0
? _raw_spin_unlock_irqrestore+0x41/0x50
? add_wait_queue_exclusive+0x70/0x70
? balance_pgdat+0x670/0x670
kthread+0x138/0x160
? kthread_create_worker_on_cpu+0x40/0x40
ret_from_fork+0x1f/0x30
This happens because when we link in a block group with a new raid index
type we'll create the corresponding sysfs entries for it. This is
problematic because while restriping we're holding the chunk_mutex, and
while mounting we're holding the tree locks.
Fixing this isn't pretty, we move the call to the sysfs stuff into the
btrfs_create_pending_block_groups() work, where we're not holding any
locks. This creates a slight race where other threads could see that
there's no sysfs kobj for that raid type, and race to create the
sysfs dir. Fix this by wrapping the creation in space_info->lock, so we
only get one thread calling kobject_add() for the new directory. We
don't worry about the lock on cleanup as it only gets deleted on
unmount.
On mount it's more straightforward, we loop through the space_infos
already, just check every raid index in each space_info and added the
sysfs entries for the corresponding block groups.
Signed-off-by: Josef Bacik <josef@toxicpanda.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2020-09-02 05:40:38 +08:00
|
|
|
index = btrfs_bg_flags_to_raid_index(block_group->flags);
|
|
|
|
|
2020-05-05 07:58:22 +08:00
|
|
|
ret = insert_block_group_item(trans, block_group);
|
2019-06-21 03:37:57 +08:00
|
|
|
if (ret)
|
|
|
|
btrfs_abort_transaction(trans, ret);
|
2020-05-05 07:58:22 +08:00
|
|
|
ret = btrfs_finish_chunk_alloc(trans, block_group->start,
|
|
|
|
block_group->length);
|
2019-06-21 03:37:57 +08:00
|
|
|
if (ret)
|
|
|
|
btrfs_abort_transaction(trans, ret);
|
|
|
|
add_block_group_free_space(trans, block_group);
|
btrfs: do not create raid sysfs entries under any locks
While running xfstests btrfs/177 I got the following lockdep splat
======================================================
WARNING: possible circular locking dependency detected
5.9.0-rc3+ #5 Not tainted
------------------------------------------------------
kswapd0/100 is trying to acquire lock:
ffff97066aa56760 (&delayed_node->mutex){+.+.}-{3:3}, at: __btrfs_release_delayed_node.part.0+0x3f/0x330
but task is already holding lock:
ffffffff9fd74700 (fs_reclaim){+.+.}-{0:0}, at: __fs_reclaim_acquire+0x5/0x30
which lock already depends on the new lock.
the existing dependency chain (in reverse order) is:
-> #3 (fs_reclaim){+.+.}-{0:0}:
fs_reclaim_acquire+0x65/0x80
slab_pre_alloc_hook.constprop.0+0x20/0x200
kmem_cache_alloc+0x37/0x270
alloc_inode+0x82/0xb0
iget_locked+0x10d/0x2c0
kernfs_get_inode+0x1b/0x130
kernfs_get_tree+0x136/0x240
sysfs_get_tree+0x16/0x40
vfs_get_tree+0x28/0xc0
path_mount+0x434/0xc00
__x64_sys_mount+0xe3/0x120
do_syscall_64+0x33/0x40
entry_SYSCALL_64_after_hwframe+0x44/0xa9
-> #2 (kernfs_mutex){+.+.}-{3:3}:
__mutex_lock+0x7e/0x7e0
kernfs_add_one+0x23/0x150
kernfs_create_dir_ns+0x7a/0xb0
sysfs_create_dir_ns+0x60/0xb0
kobject_add_internal+0xc0/0x2c0
kobject_add+0x6e/0x90
btrfs_sysfs_add_block_group_type+0x102/0x160
btrfs_make_block_group+0x167/0x230
btrfs_alloc_chunk+0x54f/0xb80
btrfs_chunk_alloc+0x18e/0x3a0
find_free_extent+0xdf6/0x1210
btrfs_reserve_extent+0xb3/0x1b0
btrfs_alloc_tree_block+0xb0/0x310
alloc_tree_block_no_bg_flush+0x4a/0x60
__btrfs_cow_block+0x11a/0x530
btrfs_cow_block+0x104/0x220
btrfs_search_slot+0x52e/0x9d0
btrfs_insert_empty_items+0x64/0xb0
btrfs_new_inode+0x225/0x730
btrfs_create+0xab/0x1f0
lookup_open.isra.0+0x52d/0x690
path_openat+0x2a7/0x9e0
do_filp_open+0x75/0x100
do_sys_openat2+0x7b/0x130
__x64_sys_openat+0x46/0x70
do_syscall_64+0x33/0x40
entry_SYSCALL_64_after_hwframe+0x44/0xa9
-> #1 (&fs_info->chunk_mutex){+.+.}-{3:3}:
__mutex_lock+0x7e/0x7e0
btrfs_chunk_alloc+0x125/0x3a0
find_free_extent+0xdf6/0x1210
btrfs_reserve_extent+0xb3/0x1b0
btrfs_alloc_tree_block+0xb0/0x310
alloc_tree_block_no_bg_flush+0x4a/0x60
__btrfs_cow_block+0x11a/0x530
btrfs_cow_block+0x104/0x220
btrfs_search_slot+0x52e/0x9d0
btrfs_lookup_inode+0x2a/0x8f
__btrfs_update_delayed_inode+0x80/0x240
btrfs_commit_inode_delayed_inode+0x119/0x120
btrfs_evict_inode+0x357/0x500
evict+0xcf/0x1f0
do_unlinkat+0x1a9/0x2b0
do_syscall_64+0x33/0x40
entry_SYSCALL_64_after_hwframe+0x44/0xa9
-> #0 (&delayed_node->mutex){+.+.}-{3:3}:
__lock_acquire+0x119c/0x1fc0
lock_acquire+0xa7/0x3d0
__mutex_lock+0x7e/0x7e0
__btrfs_release_delayed_node.part.0+0x3f/0x330
btrfs_evict_inode+0x24c/0x500
evict+0xcf/0x1f0
dispose_list+0x48/0x70
prune_icache_sb+0x44/0x50
super_cache_scan+0x161/0x1e0
do_shrink_slab+0x178/0x3c0
shrink_slab+0x17c/0x290
shrink_node+0x2b2/0x6d0
balance_pgdat+0x30a/0x670
kswapd+0x213/0x4c0
kthread+0x138/0x160
ret_from_fork+0x1f/0x30
other info that might help us debug this:
Chain exists of:
&delayed_node->mutex --> kernfs_mutex --> fs_reclaim
Possible unsafe locking scenario:
CPU0 CPU1
---- ----
lock(fs_reclaim);
lock(kernfs_mutex);
lock(fs_reclaim);
lock(&delayed_node->mutex);
*** DEADLOCK ***
3 locks held by kswapd0/100:
#0: ffffffff9fd74700 (fs_reclaim){+.+.}-{0:0}, at: __fs_reclaim_acquire+0x5/0x30
#1: ffffffff9fd65c50 (shrinker_rwsem){++++}-{3:3}, at: shrink_slab+0x115/0x290
#2: ffff9706629780e0 (&type->s_umount_key#36){++++}-{3:3}, at: super_cache_scan+0x38/0x1e0
stack backtrace:
CPU: 1 PID: 100 Comm: kswapd0 Not tainted 5.9.0-rc3+ #5
Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.13.0-2.fc32 04/01/2014
Call Trace:
dump_stack+0x8b/0xb8
check_noncircular+0x12d/0x150
__lock_acquire+0x119c/0x1fc0
lock_acquire+0xa7/0x3d0
? __btrfs_release_delayed_node.part.0+0x3f/0x330
__mutex_lock+0x7e/0x7e0
? __btrfs_release_delayed_node.part.0+0x3f/0x330
? __btrfs_release_delayed_node.part.0+0x3f/0x330
? lock_acquire+0xa7/0x3d0
? find_held_lock+0x2b/0x80
__btrfs_release_delayed_node.part.0+0x3f/0x330
btrfs_evict_inode+0x24c/0x500
evict+0xcf/0x1f0
dispose_list+0x48/0x70
prune_icache_sb+0x44/0x50
super_cache_scan+0x161/0x1e0
do_shrink_slab+0x178/0x3c0
shrink_slab+0x17c/0x290
shrink_node+0x2b2/0x6d0
balance_pgdat+0x30a/0x670
kswapd+0x213/0x4c0
? _raw_spin_unlock_irqrestore+0x41/0x50
? add_wait_queue_exclusive+0x70/0x70
? balance_pgdat+0x670/0x670
kthread+0x138/0x160
? kthread_create_worker_on_cpu+0x40/0x40
ret_from_fork+0x1f/0x30
This happens because when we link in a block group with a new raid index
type we'll create the corresponding sysfs entries for it. This is
problematic because while restriping we're holding the chunk_mutex, and
while mounting we're holding the tree locks.
Fixing this isn't pretty, we move the call to the sysfs stuff into the
btrfs_create_pending_block_groups() work, where we're not holding any
locks. This creates a slight race where other threads could see that
there's no sysfs kobj for that raid type, and race to create the
sysfs dir. Fix this by wrapping the creation in space_info->lock, so we
only get one thread calling kobject_add() for the new directory. We
don't worry about the lock on cleanup as it only gets deleted on
unmount.
On mount it's more straightforward, we loop through the space_infos
already, just check every raid index in each space_info and added the
sysfs entries for the corresponding block groups.
Signed-off-by: Josef Bacik <josef@toxicpanda.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2020-09-02 05:40:38 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If we restriped during balance, we may have added a new raid
|
|
|
|
* type, so now add the sysfs entries when it is safe to do so.
|
|
|
|
* We don't have to worry about locking here as it's handled in
|
|
|
|
* btrfs_sysfs_add_block_group_type.
|
|
|
|
*/
|
|
|
|
if (block_group->space_info->block_group_kobjs[index] == NULL)
|
|
|
|
btrfs_sysfs_add_block_group_type(block_group);
|
|
|
|
|
2019-06-21 03:37:57 +08:00
|
|
|
/* Already aborted the transaction if it failed. */
|
|
|
|
next:
|
|
|
|
btrfs_delayed_refs_rsv_release(fs_info, 1);
|
|
|
|
list_del_init(&block_group->bg_list);
|
|
|
|
}
|
|
|
|
btrfs_trans_release_chunk_metadata(trans);
|
|
|
|
}
|
|
|
|
|
|
|
|
int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used,
|
|
|
|
u64 type, u64 chunk_offset, u64 size)
|
|
|
|
{
|
|
|
|
struct btrfs_fs_info *fs_info = trans->fs_info;
|
2019-10-30 02:20:18 +08:00
|
|
|
struct btrfs_block_group *cache;
|
2019-06-21 03:37:57 +08:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
btrfs_set_log_full_commit(trans);
|
|
|
|
|
2020-05-05 07:58:20 +08:00
|
|
|
cache = btrfs_create_block_group_cache(fs_info, chunk_offset);
|
2019-06-21 03:37:57 +08:00
|
|
|
if (!cache)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2020-05-05 07:58:20 +08:00
|
|
|
cache->length = size;
|
2020-08-21 22:54:44 +08:00
|
|
|
set_free_space_tree_thresholds(cache);
|
2019-10-24 00:48:11 +08:00
|
|
|
cache->used = bytes_used;
|
2019-06-21 03:37:57 +08:00
|
|
|
cache->flags = type;
|
|
|
|
cache->last_byte_to_unpin = (u64)-1;
|
|
|
|
cache->cached = BTRFS_CACHE_FINISHED;
|
2020-11-19 07:06:18 +08:00
|
|
|
if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE))
|
|
|
|
cache->needs_free_space = 1;
|
2021-02-04 18:21:50 +08:00
|
|
|
|
2021-02-04 18:21:51 +08:00
|
|
|
ret = btrfs_load_block_group_zone_info(cache, true);
|
2021-02-04 18:21:50 +08:00
|
|
|
if (ret) {
|
|
|
|
btrfs_put_block_group(cache);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-06-21 03:37:57 +08:00
|
|
|
ret = exclude_super_stripes(cache);
|
|
|
|
if (ret) {
|
|
|
|
/* We may have excluded something, so call this just in case */
|
|
|
|
btrfs_free_excluded_extents(cache);
|
|
|
|
btrfs_put_block_group(cache);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
add_new_free_space(cache, chunk_offset, chunk_offset + size);
|
|
|
|
|
|
|
|
btrfs_free_excluded_extents(cache);
|
|
|
|
|
|
|
|
#ifdef CONFIG_BTRFS_DEBUG
|
|
|
|
if (btrfs_should_fragment_free_space(cache)) {
|
|
|
|
u64 new_bytes_used = size - bytes_used;
|
|
|
|
|
|
|
|
bytes_used += new_bytes_used >> 1;
|
2019-06-21 03:38:07 +08:00
|
|
|
fragment_free_space(cache);
|
2019-06-21 03:37:57 +08:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
/*
|
|
|
|
* Ensure the corresponding space_info object is created and
|
|
|
|
* assigned to our block group. We want our bg to be added to the rbtree
|
|
|
|
* with its ->space_info set.
|
|
|
|
*/
|
|
|
|
cache->space_info = btrfs_find_space_info(fs_info, cache->flags);
|
|
|
|
ASSERT(cache->space_info);
|
|
|
|
|
|
|
|
ret = btrfs_add_block_group_cache(fs_info, cache);
|
|
|
|
if (ret) {
|
|
|
|
btrfs_remove_free_space_cache(cache);
|
|
|
|
btrfs_put_block_group(cache);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Now that our block group has its ->space_info set and is inserted in
|
|
|
|
* the rbtree, update the space info's counters.
|
|
|
|
*/
|
|
|
|
trace_btrfs_add_block_group(fs_info, cache, 1);
|
|
|
|
btrfs_update_space_info(fs_info, cache->flags, size, bytes_used,
|
2021-02-04 18:21:52 +08:00
|
|
|
cache->bytes_super, 0, &cache->space_info);
|
2019-06-21 03:37:57 +08:00
|
|
|
btrfs_update_global_block_rsv(fs_info);
|
|
|
|
|
|
|
|
link_block_group(cache);
|
|
|
|
|
|
|
|
list_add_tail(&cache->bg_list, &trans->new_bgs);
|
|
|
|
trans->delayed_ref_updates++;
|
|
|
|
btrfs_update_delayed_refs_rsv(trans);
|
|
|
|
|
|
|
|
set_avail_alloc_bits(fs_info, type);
|
|
|
|
return 0;
|
|
|
|
}
|
2019-06-21 03:37:59 +08:00
|
|
|
|
btrfs: scrub: Don't check free space before marking a block group RO
[BUG]
When running btrfs/072 with only one online CPU, it has a pretty high
chance to fail:
btrfs/072 12s ... _check_dmesg: something found in dmesg (see xfstests-dev/results//btrfs/072.dmesg)
- output mismatch (see xfstests-dev/results//btrfs/072.out.bad)
--- tests/btrfs/072.out 2019-10-22 15:18:14.008965340 +0800
+++ /xfstests-dev/results//btrfs/072.out.bad 2019-11-14 15:56:45.877152240 +0800
@@ -1,2 +1,3 @@
QA output created by 072
Silence is golden
+Scrub find errors in "-m dup -d single" test
...
And with the following call trace:
BTRFS info (device dm-5): scrub: started on devid 1
------------[ cut here ]------------
BTRFS: Transaction aborted (error -27)
WARNING: CPU: 0 PID: 55087 at fs/btrfs/block-group.c:1890 btrfs_create_pending_block_groups+0x3e6/0x470 [btrfs]
CPU: 0 PID: 55087 Comm: btrfs Tainted: G W O 5.4.0-rc1-custom+ #13
Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 0.0.0 02/06/2015
RIP: 0010:btrfs_create_pending_block_groups+0x3e6/0x470 [btrfs]
Call Trace:
__btrfs_end_transaction+0xdb/0x310 [btrfs]
btrfs_end_transaction+0x10/0x20 [btrfs]
btrfs_inc_block_group_ro+0x1c9/0x210 [btrfs]
scrub_enumerate_chunks+0x264/0x940 [btrfs]
btrfs_scrub_dev+0x45c/0x8f0 [btrfs]
btrfs_ioctl+0x31a1/0x3fb0 [btrfs]
do_vfs_ioctl+0x636/0xaa0
ksys_ioctl+0x67/0x90
__x64_sys_ioctl+0x43/0x50
do_syscall_64+0x79/0xe0
entry_SYSCALL_64_after_hwframe+0x49/0xbe
---[ end trace 166c865cec7688e7 ]---
[CAUSE]
The error number -27 is -EFBIG, returned from the following call chain:
btrfs_end_transaction()
|- __btrfs_end_transaction()
|- btrfs_create_pending_block_groups()
|- btrfs_finish_chunk_alloc()
|- btrfs_add_system_chunk()
This happens because we have used up all space of
btrfs_super_block::sys_chunk_array.
The root cause is, we have the following bad loop of creating tons of
system chunks:
1. The only SYSTEM chunk is being scrubbed
It's very common to have only one SYSTEM chunk.
2. New SYSTEM bg will be allocated
As btrfs_inc_block_group_ro() will check if we have enough space
after marking current bg RO. If not, then allocate a new chunk.
3. New SYSTEM bg is still empty, will be reclaimed
During the reclaim, we will mark it RO again.
4. That newly allocated empty SYSTEM bg get scrubbed
We go back to step 2, as the bg is already mark RO but still not
cleaned up yet.
If the cleaner kthread doesn't get executed fast enough (e.g. only one
CPU), then we will get more and more empty SYSTEM chunks, using up all
the space of btrfs_super_block::sys_chunk_array.
[FIX]
Since scrub/dev-replace doesn't always need to allocate new extent,
especially chunk tree extent, so we don't really need to do chunk
pre-allocation.
To break above spiral, here we introduce a new parameter to
btrfs_inc_block_group(), @do_chunk_alloc, which indicates whether we
need extra chunk pre-allocation.
For relocation, we pass @do_chunk_alloc=true, while for scrub, we pass
@do_chunk_alloc=false.
This should keep unnecessary empty chunks from popping up for scrub.
Also, since there are two parameters for btrfs_inc_block_group_ro(),
add more comment for it.
Reviewed-by: Filipe Manana <fdmanana@suse.com>
Signed-off-by: Qu Wenruo <wqu@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2019-11-15 10:09:00 +08:00
|
|
|
/*
|
|
|
|
* Mark one block group RO, can be called several times for the same block
|
|
|
|
* group.
|
|
|
|
*
|
|
|
|
* @cache: the destination block group
|
|
|
|
* @do_chunk_alloc: whether need to do chunk pre-allocation, this is to
|
|
|
|
* ensure we still have some free space after marking this
|
|
|
|
* block group RO.
|
|
|
|
*/
|
|
|
|
int btrfs_inc_block_group_ro(struct btrfs_block_group *cache,
|
|
|
|
bool do_chunk_alloc)
|
2019-06-21 03:37:59 +08:00
|
|
|
{
|
|
|
|
struct btrfs_fs_info *fs_info = cache->fs_info;
|
|
|
|
struct btrfs_trans_handle *trans;
|
|
|
|
u64 alloc_flags;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
again:
|
|
|
|
trans = btrfs_join_transaction(fs_info->extent_root);
|
|
|
|
if (IS_ERR(trans))
|
|
|
|
return PTR_ERR(trans);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* we're not allowed to set block groups readonly after the dirty
|
|
|
|
* block groups cache has started writing. If it already started,
|
|
|
|
* back off and let this transaction commit
|
|
|
|
*/
|
|
|
|
mutex_lock(&fs_info->ro_block_group_mutex);
|
|
|
|
if (test_bit(BTRFS_TRANS_DIRTY_BG_RUN, &trans->transaction->flags)) {
|
|
|
|
u64 transid = trans->transid;
|
|
|
|
|
|
|
|
mutex_unlock(&fs_info->ro_block_group_mutex);
|
|
|
|
btrfs_end_transaction(trans);
|
|
|
|
|
|
|
|
ret = btrfs_wait_for_commit(fs_info, transid);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
goto again;
|
|
|
|
}
|
|
|
|
|
btrfs: scrub: Don't check free space before marking a block group RO
[BUG]
When running btrfs/072 with only one online CPU, it has a pretty high
chance to fail:
btrfs/072 12s ... _check_dmesg: something found in dmesg (see xfstests-dev/results//btrfs/072.dmesg)
- output mismatch (see xfstests-dev/results//btrfs/072.out.bad)
--- tests/btrfs/072.out 2019-10-22 15:18:14.008965340 +0800
+++ /xfstests-dev/results//btrfs/072.out.bad 2019-11-14 15:56:45.877152240 +0800
@@ -1,2 +1,3 @@
QA output created by 072
Silence is golden
+Scrub find errors in "-m dup -d single" test
...
And with the following call trace:
BTRFS info (device dm-5): scrub: started on devid 1
------------[ cut here ]------------
BTRFS: Transaction aborted (error -27)
WARNING: CPU: 0 PID: 55087 at fs/btrfs/block-group.c:1890 btrfs_create_pending_block_groups+0x3e6/0x470 [btrfs]
CPU: 0 PID: 55087 Comm: btrfs Tainted: G W O 5.4.0-rc1-custom+ #13
Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 0.0.0 02/06/2015
RIP: 0010:btrfs_create_pending_block_groups+0x3e6/0x470 [btrfs]
Call Trace:
__btrfs_end_transaction+0xdb/0x310 [btrfs]
btrfs_end_transaction+0x10/0x20 [btrfs]
btrfs_inc_block_group_ro+0x1c9/0x210 [btrfs]
scrub_enumerate_chunks+0x264/0x940 [btrfs]
btrfs_scrub_dev+0x45c/0x8f0 [btrfs]
btrfs_ioctl+0x31a1/0x3fb0 [btrfs]
do_vfs_ioctl+0x636/0xaa0
ksys_ioctl+0x67/0x90
__x64_sys_ioctl+0x43/0x50
do_syscall_64+0x79/0xe0
entry_SYSCALL_64_after_hwframe+0x49/0xbe
---[ end trace 166c865cec7688e7 ]---
[CAUSE]
The error number -27 is -EFBIG, returned from the following call chain:
btrfs_end_transaction()
|- __btrfs_end_transaction()
|- btrfs_create_pending_block_groups()
|- btrfs_finish_chunk_alloc()
|- btrfs_add_system_chunk()
This happens because we have used up all space of
btrfs_super_block::sys_chunk_array.
The root cause is, we have the following bad loop of creating tons of
system chunks:
1. The only SYSTEM chunk is being scrubbed
It's very common to have only one SYSTEM chunk.
2. New SYSTEM bg will be allocated
As btrfs_inc_block_group_ro() will check if we have enough space
after marking current bg RO. If not, then allocate a new chunk.
3. New SYSTEM bg is still empty, will be reclaimed
During the reclaim, we will mark it RO again.
4. That newly allocated empty SYSTEM bg get scrubbed
We go back to step 2, as the bg is already mark RO but still not
cleaned up yet.
If the cleaner kthread doesn't get executed fast enough (e.g. only one
CPU), then we will get more and more empty SYSTEM chunks, using up all
the space of btrfs_super_block::sys_chunk_array.
[FIX]
Since scrub/dev-replace doesn't always need to allocate new extent,
especially chunk tree extent, so we don't really need to do chunk
pre-allocation.
To break above spiral, here we introduce a new parameter to
btrfs_inc_block_group(), @do_chunk_alloc, which indicates whether we
need extra chunk pre-allocation.
For relocation, we pass @do_chunk_alloc=true, while for scrub, we pass
@do_chunk_alloc=false.
This should keep unnecessary empty chunks from popping up for scrub.
Also, since there are two parameters for btrfs_inc_block_group_ro(),
add more comment for it.
Reviewed-by: Filipe Manana <fdmanana@suse.com>
Signed-off-by: Qu Wenruo <wqu@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2019-11-15 10:09:00 +08:00
|
|
|
if (do_chunk_alloc) {
|
2019-06-21 03:37:59 +08:00
|
|
|
/*
|
btrfs: scrub: Don't check free space before marking a block group RO
[BUG]
When running btrfs/072 with only one online CPU, it has a pretty high
chance to fail:
btrfs/072 12s ... _check_dmesg: something found in dmesg (see xfstests-dev/results//btrfs/072.dmesg)
- output mismatch (see xfstests-dev/results//btrfs/072.out.bad)
--- tests/btrfs/072.out 2019-10-22 15:18:14.008965340 +0800
+++ /xfstests-dev/results//btrfs/072.out.bad 2019-11-14 15:56:45.877152240 +0800
@@ -1,2 +1,3 @@
QA output created by 072
Silence is golden
+Scrub find errors in "-m dup -d single" test
...
And with the following call trace:
BTRFS info (device dm-5): scrub: started on devid 1
------------[ cut here ]------------
BTRFS: Transaction aborted (error -27)
WARNING: CPU: 0 PID: 55087 at fs/btrfs/block-group.c:1890 btrfs_create_pending_block_groups+0x3e6/0x470 [btrfs]
CPU: 0 PID: 55087 Comm: btrfs Tainted: G W O 5.4.0-rc1-custom+ #13
Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 0.0.0 02/06/2015
RIP: 0010:btrfs_create_pending_block_groups+0x3e6/0x470 [btrfs]
Call Trace:
__btrfs_end_transaction+0xdb/0x310 [btrfs]
btrfs_end_transaction+0x10/0x20 [btrfs]
btrfs_inc_block_group_ro+0x1c9/0x210 [btrfs]
scrub_enumerate_chunks+0x264/0x940 [btrfs]
btrfs_scrub_dev+0x45c/0x8f0 [btrfs]
btrfs_ioctl+0x31a1/0x3fb0 [btrfs]
do_vfs_ioctl+0x636/0xaa0
ksys_ioctl+0x67/0x90
__x64_sys_ioctl+0x43/0x50
do_syscall_64+0x79/0xe0
entry_SYSCALL_64_after_hwframe+0x49/0xbe
---[ end trace 166c865cec7688e7 ]---
[CAUSE]
The error number -27 is -EFBIG, returned from the following call chain:
btrfs_end_transaction()
|- __btrfs_end_transaction()
|- btrfs_create_pending_block_groups()
|- btrfs_finish_chunk_alloc()
|- btrfs_add_system_chunk()
This happens because we have used up all space of
btrfs_super_block::sys_chunk_array.
The root cause is, we have the following bad loop of creating tons of
system chunks:
1. The only SYSTEM chunk is being scrubbed
It's very common to have only one SYSTEM chunk.
2. New SYSTEM bg will be allocated
As btrfs_inc_block_group_ro() will check if we have enough space
after marking current bg RO. If not, then allocate a new chunk.
3. New SYSTEM bg is still empty, will be reclaimed
During the reclaim, we will mark it RO again.
4. That newly allocated empty SYSTEM bg get scrubbed
We go back to step 2, as the bg is already mark RO but still not
cleaned up yet.
If the cleaner kthread doesn't get executed fast enough (e.g. only one
CPU), then we will get more and more empty SYSTEM chunks, using up all
the space of btrfs_super_block::sys_chunk_array.
[FIX]
Since scrub/dev-replace doesn't always need to allocate new extent,
especially chunk tree extent, so we don't really need to do chunk
pre-allocation.
To break above spiral, here we introduce a new parameter to
btrfs_inc_block_group(), @do_chunk_alloc, which indicates whether we
need extra chunk pre-allocation.
For relocation, we pass @do_chunk_alloc=true, while for scrub, we pass
@do_chunk_alloc=false.
This should keep unnecessary empty chunks from popping up for scrub.
Also, since there are two parameters for btrfs_inc_block_group_ro(),
add more comment for it.
Reviewed-by: Filipe Manana <fdmanana@suse.com>
Signed-off-by: Qu Wenruo <wqu@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2019-11-15 10:09:00 +08:00
|
|
|
* If we are changing raid levels, try to allocate a
|
|
|
|
* corresponding block group with the new raid level.
|
2019-06-21 03:37:59 +08:00
|
|
|
*/
|
btrfs: don't adjust bg flags and use default allocation profiles
btrfs/061 has been failing consistently for me recently with a
transaction abort. We run out of space in the system chunk array, which
means we've allocated way too many system chunks than we need.
Chris added this a long time ago for balance as a poor mans restriping.
If you had a single disk and then added another disk and then did a
balance, update_block_group_flags would then figure out which RAID level
you needed.
Fast forward to today and we have restriping behavior, so we can
explicitly tell the fs that we're trying to change the raid level. This
is accomplished through the normal get_alloc_profile path.
Furthermore this code actually causes btrfs/061 to fail, because we do
things like mkfs -m dup -d single with multiple devices. This trips
this check
alloc_flags = update_block_group_flags(fs_info, cache->flags);
if (alloc_flags != cache->flags) {
ret = btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE);
in btrfs_inc_block_group_ro. Because we're balancing and scrubbing, but
not actually restriping, we keep forcing chunk allocation of RAID1
chunks. This eventually causes us to run out of system space and the
file system aborts and flips read only.
We don't need this poor mans restriping any more, simply use the normal
get_alloc_profile helper, which will get the correct alloc_flags and
thus make the right decision for chunk allocation. This keeps us from
allocating a billion system chunks and falling over.
Tested-by: Holger Hoffstätte <holger@applied-asynchrony.com>
Reviewed-by: Qu Wenruo <wqu@suse.com>
Signed-off-by: Josef Bacik <josef@toxicpanda.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2020-07-21 22:48:45 +08:00
|
|
|
alloc_flags = btrfs_get_alloc_profile(fs_info, cache->flags);
|
btrfs: scrub: Don't check free space before marking a block group RO
[BUG]
When running btrfs/072 with only one online CPU, it has a pretty high
chance to fail:
btrfs/072 12s ... _check_dmesg: something found in dmesg (see xfstests-dev/results//btrfs/072.dmesg)
- output mismatch (see xfstests-dev/results//btrfs/072.out.bad)
--- tests/btrfs/072.out 2019-10-22 15:18:14.008965340 +0800
+++ /xfstests-dev/results//btrfs/072.out.bad 2019-11-14 15:56:45.877152240 +0800
@@ -1,2 +1,3 @@
QA output created by 072
Silence is golden
+Scrub find errors in "-m dup -d single" test
...
And with the following call trace:
BTRFS info (device dm-5): scrub: started on devid 1
------------[ cut here ]------------
BTRFS: Transaction aborted (error -27)
WARNING: CPU: 0 PID: 55087 at fs/btrfs/block-group.c:1890 btrfs_create_pending_block_groups+0x3e6/0x470 [btrfs]
CPU: 0 PID: 55087 Comm: btrfs Tainted: G W O 5.4.0-rc1-custom+ #13
Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 0.0.0 02/06/2015
RIP: 0010:btrfs_create_pending_block_groups+0x3e6/0x470 [btrfs]
Call Trace:
__btrfs_end_transaction+0xdb/0x310 [btrfs]
btrfs_end_transaction+0x10/0x20 [btrfs]
btrfs_inc_block_group_ro+0x1c9/0x210 [btrfs]
scrub_enumerate_chunks+0x264/0x940 [btrfs]
btrfs_scrub_dev+0x45c/0x8f0 [btrfs]
btrfs_ioctl+0x31a1/0x3fb0 [btrfs]
do_vfs_ioctl+0x636/0xaa0
ksys_ioctl+0x67/0x90
__x64_sys_ioctl+0x43/0x50
do_syscall_64+0x79/0xe0
entry_SYSCALL_64_after_hwframe+0x49/0xbe
---[ end trace 166c865cec7688e7 ]---
[CAUSE]
The error number -27 is -EFBIG, returned from the following call chain:
btrfs_end_transaction()
|- __btrfs_end_transaction()
|- btrfs_create_pending_block_groups()
|- btrfs_finish_chunk_alloc()
|- btrfs_add_system_chunk()
This happens because we have used up all space of
btrfs_super_block::sys_chunk_array.
The root cause is, we have the following bad loop of creating tons of
system chunks:
1. The only SYSTEM chunk is being scrubbed
It's very common to have only one SYSTEM chunk.
2. New SYSTEM bg will be allocated
As btrfs_inc_block_group_ro() will check if we have enough space
after marking current bg RO. If not, then allocate a new chunk.
3. New SYSTEM bg is still empty, will be reclaimed
During the reclaim, we will mark it RO again.
4. That newly allocated empty SYSTEM bg get scrubbed
We go back to step 2, as the bg is already mark RO but still not
cleaned up yet.
If the cleaner kthread doesn't get executed fast enough (e.g. only one
CPU), then we will get more and more empty SYSTEM chunks, using up all
the space of btrfs_super_block::sys_chunk_array.
[FIX]
Since scrub/dev-replace doesn't always need to allocate new extent,
especially chunk tree extent, so we don't really need to do chunk
pre-allocation.
To break above spiral, here we introduce a new parameter to
btrfs_inc_block_group(), @do_chunk_alloc, which indicates whether we
need extra chunk pre-allocation.
For relocation, we pass @do_chunk_alloc=true, while for scrub, we pass
@do_chunk_alloc=false.
This should keep unnecessary empty chunks from popping up for scrub.
Also, since there are two parameters for btrfs_inc_block_group_ro(),
add more comment for it.
Reviewed-by: Filipe Manana <fdmanana@suse.com>
Signed-off-by: Qu Wenruo <wqu@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2019-11-15 10:09:00 +08:00
|
|
|
if (alloc_flags != cache->flags) {
|
|
|
|
ret = btrfs_chunk_alloc(trans, alloc_flags,
|
|
|
|
CHUNK_ALLOC_FORCE);
|
|
|
|
/*
|
|
|
|
* ENOSPC is allowed here, we may have enough space
|
|
|
|
* already allocated at the new raid level to carry on
|
|
|
|
*/
|
|
|
|
if (ret == -ENOSPC)
|
|
|
|
ret = 0;
|
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
|
|
|
}
|
2019-06-21 03:37:59 +08:00
|
|
|
}
|
|
|
|
|
2020-01-17 22:07:38 +08:00
|
|
|
ret = inc_block_group_ro(cache, 0);
|
btrfs: fix race between writes to swap files and scrub
When we active a swap file, at btrfs_swap_activate(), we acquire the
exclusive operation lock to prevent the physical location of the swap
file extents to be changed by operations such as balance and device
replace/resize/remove. We also call there can_nocow_extent() which,
among other things, checks if the block group of a swap file extent is
currently RO, and if it is we can not use the extent, since a write
into it would result in COWing the extent.
However we have no protection against a scrub operation running after we
activate the swap file, which can result in the swap file extents to be
COWed while the scrub is running and operating on the respective block
group, because scrub turns a block group into RO before it processes it
and then back again to RW mode after processing it. That means an attempt
to write into a swap file extent while scrub is processing the respective
block group, will result in COWing the extent, changing its physical
location on disk.
Fix this by making sure that block groups that have extents that are used
by active swap files can not be turned into RO mode, therefore making it
not possible for a scrub to turn them into RO mode. When a scrub finds a
block group that can not be turned to RO due to the existence of extents
used by swap files, it proceeds to the next block group and logs a warning
message that mentions the block group was skipped due to active swap
files - this is the same approach we currently use for balance.
Fixes: ed46ff3d42378 ("Btrfs: support swap files")
CC: stable@vger.kernel.org # 5.4+
Reviewed-by: Anand Jain <anand.jain@oracle.com>
Reviewed-by: Josef Bacik <josef@toxicpanda.com>
Signed-off-by: Filipe Manana <fdmanana@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2021-02-05 20:55:37 +08:00
|
|
|
if (!do_chunk_alloc || ret == -ETXTBSY)
|
btrfs: scrub: Don't check free space before marking a block group RO
[BUG]
When running btrfs/072 with only one online CPU, it has a pretty high
chance to fail:
btrfs/072 12s ... _check_dmesg: something found in dmesg (see xfstests-dev/results//btrfs/072.dmesg)
- output mismatch (see xfstests-dev/results//btrfs/072.out.bad)
--- tests/btrfs/072.out 2019-10-22 15:18:14.008965340 +0800
+++ /xfstests-dev/results//btrfs/072.out.bad 2019-11-14 15:56:45.877152240 +0800
@@ -1,2 +1,3 @@
QA output created by 072
Silence is golden
+Scrub find errors in "-m dup -d single" test
...
And with the following call trace:
BTRFS info (device dm-5): scrub: started on devid 1
------------[ cut here ]------------
BTRFS: Transaction aborted (error -27)
WARNING: CPU: 0 PID: 55087 at fs/btrfs/block-group.c:1890 btrfs_create_pending_block_groups+0x3e6/0x470 [btrfs]
CPU: 0 PID: 55087 Comm: btrfs Tainted: G W O 5.4.0-rc1-custom+ #13
Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 0.0.0 02/06/2015
RIP: 0010:btrfs_create_pending_block_groups+0x3e6/0x470 [btrfs]
Call Trace:
__btrfs_end_transaction+0xdb/0x310 [btrfs]
btrfs_end_transaction+0x10/0x20 [btrfs]
btrfs_inc_block_group_ro+0x1c9/0x210 [btrfs]
scrub_enumerate_chunks+0x264/0x940 [btrfs]
btrfs_scrub_dev+0x45c/0x8f0 [btrfs]
btrfs_ioctl+0x31a1/0x3fb0 [btrfs]
do_vfs_ioctl+0x636/0xaa0
ksys_ioctl+0x67/0x90
__x64_sys_ioctl+0x43/0x50
do_syscall_64+0x79/0xe0
entry_SYSCALL_64_after_hwframe+0x49/0xbe
---[ end trace 166c865cec7688e7 ]---
[CAUSE]
The error number -27 is -EFBIG, returned from the following call chain:
btrfs_end_transaction()
|- __btrfs_end_transaction()
|- btrfs_create_pending_block_groups()
|- btrfs_finish_chunk_alloc()
|- btrfs_add_system_chunk()
This happens because we have used up all space of
btrfs_super_block::sys_chunk_array.
The root cause is, we have the following bad loop of creating tons of
system chunks:
1. The only SYSTEM chunk is being scrubbed
It's very common to have only one SYSTEM chunk.
2. New SYSTEM bg will be allocated
As btrfs_inc_block_group_ro() will check if we have enough space
after marking current bg RO. If not, then allocate a new chunk.
3. New SYSTEM bg is still empty, will be reclaimed
During the reclaim, we will mark it RO again.
4. That newly allocated empty SYSTEM bg get scrubbed
We go back to step 2, as the bg is already mark RO but still not
cleaned up yet.
If the cleaner kthread doesn't get executed fast enough (e.g. only one
CPU), then we will get more and more empty SYSTEM chunks, using up all
the space of btrfs_super_block::sys_chunk_array.
[FIX]
Since scrub/dev-replace doesn't always need to allocate new extent,
especially chunk tree extent, so we don't really need to do chunk
pre-allocation.
To break above spiral, here we introduce a new parameter to
btrfs_inc_block_group(), @do_chunk_alloc, which indicates whether we
need extra chunk pre-allocation.
For relocation, we pass @do_chunk_alloc=true, while for scrub, we pass
@do_chunk_alloc=false.
This should keep unnecessary empty chunks from popping up for scrub.
Also, since there are two parameters for btrfs_inc_block_group_ro(),
add more comment for it.
Reviewed-by: Filipe Manana <fdmanana@suse.com>
Signed-off-by: Qu Wenruo <wqu@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2019-11-15 10:09:00 +08:00
|
|
|
goto unlock_out;
|
2019-06-21 03:37:59 +08:00
|
|
|
if (!ret)
|
|
|
|
goto out;
|
|
|
|
alloc_flags = btrfs_get_alloc_profile(fs_info, cache->space_info->flags);
|
|
|
|
ret = btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE);
|
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
2019-06-21 03:38:07 +08:00
|
|
|
ret = inc_block_group_ro(cache, 0);
|
btrfs: fix race between writes to swap files and scrub
When we active a swap file, at btrfs_swap_activate(), we acquire the
exclusive operation lock to prevent the physical location of the swap
file extents to be changed by operations such as balance and device
replace/resize/remove. We also call there can_nocow_extent() which,
among other things, checks if the block group of a swap file extent is
currently RO, and if it is we can not use the extent, since a write
into it would result in COWing the extent.
However we have no protection against a scrub operation running after we
activate the swap file, which can result in the swap file extents to be
COWed while the scrub is running and operating on the respective block
group, because scrub turns a block group into RO before it processes it
and then back again to RW mode after processing it. That means an attempt
to write into a swap file extent while scrub is processing the respective
block group, will result in COWing the extent, changing its physical
location on disk.
Fix this by making sure that block groups that have extents that are used
by active swap files can not be turned into RO mode, therefore making it
not possible for a scrub to turn them into RO mode. When a scrub finds a
block group that can not be turned to RO due to the existence of extents
used by swap files, it proceeds to the next block group and logs a warning
message that mentions the block group was skipped due to active swap
files - this is the same approach we currently use for balance.
Fixes: ed46ff3d42378 ("Btrfs: support swap files")
CC: stable@vger.kernel.org # 5.4+
Reviewed-by: Anand Jain <anand.jain@oracle.com>
Reviewed-by: Josef Bacik <josef@toxicpanda.com>
Signed-off-by: Filipe Manana <fdmanana@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2021-02-05 20:55:37 +08:00
|
|
|
if (ret == -ETXTBSY)
|
|
|
|
goto unlock_out;
|
2019-06-21 03:37:59 +08:00
|
|
|
out:
|
|
|
|
if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
|
btrfs: don't adjust bg flags and use default allocation profiles
btrfs/061 has been failing consistently for me recently with a
transaction abort. We run out of space in the system chunk array, which
means we've allocated way too many system chunks than we need.
Chris added this a long time ago for balance as a poor mans restriping.
If you had a single disk and then added another disk and then did a
balance, update_block_group_flags would then figure out which RAID level
you needed.
Fast forward to today and we have restriping behavior, so we can
explicitly tell the fs that we're trying to change the raid level. This
is accomplished through the normal get_alloc_profile path.
Furthermore this code actually causes btrfs/061 to fail, because we do
things like mkfs -m dup -d single with multiple devices. This trips
this check
alloc_flags = update_block_group_flags(fs_info, cache->flags);
if (alloc_flags != cache->flags) {
ret = btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE);
in btrfs_inc_block_group_ro. Because we're balancing and scrubbing, but
not actually restriping, we keep forcing chunk allocation of RAID1
chunks. This eventually causes us to run out of system space and the
file system aborts and flips read only.
We don't need this poor mans restriping any more, simply use the normal
get_alloc_profile helper, which will get the correct alloc_flags and
thus make the right decision for chunk allocation. This keeps us from
allocating a billion system chunks and falling over.
Tested-by: Holger Hoffstätte <holger@applied-asynchrony.com>
Reviewed-by: Qu Wenruo <wqu@suse.com>
Signed-off-by: Josef Bacik <josef@toxicpanda.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2020-07-21 22:48:45 +08:00
|
|
|
alloc_flags = btrfs_get_alloc_profile(fs_info, cache->flags);
|
2019-06-21 03:37:59 +08:00
|
|
|
mutex_lock(&fs_info->chunk_mutex);
|
|
|
|
check_system_chunk(trans, alloc_flags);
|
|
|
|
mutex_unlock(&fs_info->chunk_mutex);
|
|
|
|
}
|
btrfs: scrub: Don't check free space before marking a block group RO
[BUG]
When running btrfs/072 with only one online CPU, it has a pretty high
chance to fail:
btrfs/072 12s ... _check_dmesg: something found in dmesg (see xfstests-dev/results//btrfs/072.dmesg)
- output mismatch (see xfstests-dev/results//btrfs/072.out.bad)
--- tests/btrfs/072.out 2019-10-22 15:18:14.008965340 +0800
+++ /xfstests-dev/results//btrfs/072.out.bad 2019-11-14 15:56:45.877152240 +0800
@@ -1,2 +1,3 @@
QA output created by 072
Silence is golden
+Scrub find errors in "-m dup -d single" test
...
And with the following call trace:
BTRFS info (device dm-5): scrub: started on devid 1
------------[ cut here ]------------
BTRFS: Transaction aborted (error -27)
WARNING: CPU: 0 PID: 55087 at fs/btrfs/block-group.c:1890 btrfs_create_pending_block_groups+0x3e6/0x470 [btrfs]
CPU: 0 PID: 55087 Comm: btrfs Tainted: G W O 5.4.0-rc1-custom+ #13
Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 0.0.0 02/06/2015
RIP: 0010:btrfs_create_pending_block_groups+0x3e6/0x470 [btrfs]
Call Trace:
__btrfs_end_transaction+0xdb/0x310 [btrfs]
btrfs_end_transaction+0x10/0x20 [btrfs]
btrfs_inc_block_group_ro+0x1c9/0x210 [btrfs]
scrub_enumerate_chunks+0x264/0x940 [btrfs]
btrfs_scrub_dev+0x45c/0x8f0 [btrfs]
btrfs_ioctl+0x31a1/0x3fb0 [btrfs]
do_vfs_ioctl+0x636/0xaa0
ksys_ioctl+0x67/0x90
__x64_sys_ioctl+0x43/0x50
do_syscall_64+0x79/0xe0
entry_SYSCALL_64_after_hwframe+0x49/0xbe
---[ end trace 166c865cec7688e7 ]---
[CAUSE]
The error number -27 is -EFBIG, returned from the following call chain:
btrfs_end_transaction()
|- __btrfs_end_transaction()
|- btrfs_create_pending_block_groups()
|- btrfs_finish_chunk_alloc()
|- btrfs_add_system_chunk()
This happens because we have used up all space of
btrfs_super_block::sys_chunk_array.
The root cause is, we have the following bad loop of creating tons of
system chunks:
1. The only SYSTEM chunk is being scrubbed
It's very common to have only one SYSTEM chunk.
2. New SYSTEM bg will be allocated
As btrfs_inc_block_group_ro() will check if we have enough space
after marking current bg RO. If not, then allocate a new chunk.
3. New SYSTEM bg is still empty, will be reclaimed
During the reclaim, we will mark it RO again.
4. That newly allocated empty SYSTEM bg get scrubbed
We go back to step 2, as the bg is already mark RO but still not
cleaned up yet.
If the cleaner kthread doesn't get executed fast enough (e.g. only one
CPU), then we will get more and more empty SYSTEM chunks, using up all
the space of btrfs_super_block::sys_chunk_array.
[FIX]
Since scrub/dev-replace doesn't always need to allocate new extent,
especially chunk tree extent, so we don't really need to do chunk
pre-allocation.
To break above spiral, here we introduce a new parameter to
btrfs_inc_block_group(), @do_chunk_alloc, which indicates whether we
need extra chunk pre-allocation.
For relocation, we pass @do_chunk_alloc=true, while for scrub, we pass
@do_chunk_alloc=false.
This should keep unnecessary empty chunks from popping up for scrub.
Also, since there are two parameters for btrfs_inc_block_group_ro(),
add more comment for it.
Reviewed-by: Filipe Manana <fdmanana@suse.com>
Signed-off-by: Qu Wenruo <wqu@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2019-11-15 10:09:00 +08:00
|
|
|
unlock_out:
|
2019-06-21 03:37:59 +08:00
|
|
|
mutex_unlock(&fs_info->ro_block_group_mutex);
|
|
|
|
|
|
|
|
btrfs_end_transaction(trans);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-10-30 02:20:18 +08:00
|
|
|
void btrfs_dec_block_group_ro(struct btrfs_block_group *cache)
|
2019-06-21 03:37:59 +08:00
|
|
|
{
|
|
|
|
struct btrfs_space_info *sinfo = cache->space_info;
|
|
|
|
u64 num_bytes;
|
|
|
|
|
|
|
|
BUG_ON(!cache->ro);
|
|
|
|
|
|
|
|
spin_lock(&sinfo->lock);
|
|
|
|
spin_lock(&cache->lock);
|
|
|
|
if (!--cache->ro) {
|
2019-10-24 00:48:22 +08:00
|
|
|
num_bytes = cache->length - cache->reserved -
|
2021-02-04 18:21:52 +08:00
|
|
|
cache->pinned - cache->bytes_super -
|
|
|
|
cache->zone_unusable - cache->used;
|
2019-06-21 03:37:59 +08:00
|
|
|
sinfo->bytes_readonly -= num_bytes;
|
2021-02-04 18:21:52 +08:00
|
|
|
if (btrfs_is_zoned(cache->fs_info)) {
|
|
|
|
/* Migrate zone_unusable bytes back */
|
|
|
|
cache->zone_unusable = cache->alloc_offset - cache->used;
|
|
|
|
sinfo->bytes_zone_unusable += cache->zone_unusable;
|
|
|
|
sinfo->bytes_readonly -= cache->zone_unusable;
|
|
|
|
}
|
2019-06-21 03:37:59 +08:00
|
|
|
list_del_init(&cache->ro_list);
|
|
|
|
}
|
|
|
|
spin_unlock(&cache->lock);
|
|
|
|
spin_unlock(&sinfo->lock);
|
|
|
|
}
|
2019-06-21 03:38:00 +08:00
|
|
|
|
2020-05-05 07:58:23 +08:00
|
|
|
static int update_block_group_item(struct btrfs_trans_handle *trans,
|
|
|
|
struct btrfs_path *path,
|
|
|
|
struct btrfs_block_group *cache)
|
2019-06-21 03:38:00 +08:00
|
|
|
{
|
|
|
|
struct btrfs_fs_info *fs_info = trans->fs_info;
|
|
|
|
int ret;
|
2020-05-05 07:58:23 +08:00
|
|
|
struct btrfs_root *root = fs_info->extent_root;
|
2019-06-21 03:38:00 +08:00
|
|
|
unsigned long bi;
|
|
|
|
struct extent_buffer *leaf;
|
2019-10-24 00:48:11 +08:00
|
|
|
struct btrfs_block_group_item bgi;
|
2019-10-24 00:48:22 +08:00
|
|
|
struct btrfs_key key;
|
|
|
|
|
|
|
|
key.objectid = cache->start;
|
|
|
|
key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
|
|
|
|
key.offset = cache->length;
|
2019-06-21 03:38:00 +08:00
|
|
|
|
2020-05-05 07:58:23 +08:00
|
|
|
ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
|
2019-06-21 03:38:00 +08:00
|
|
|
if (ret) {
|
|
|
|
if (ret > 0)
|
|
|
|
ret = -ENOENT;
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
leaf = path->nodes[0];
|
|
|
|
bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
|
2019-10-24 00:48:18 +08:00
|
|
|
btrfs_set_stack_block_group_used(&bgi, cache->used);
|
|
|
|
btrfs_set_stack_block_group_chunk_objectid(&bgi,
|
2019-10-24 00:48:15 +08:00
|
|
|
BTRFS_FIRST_CHUNK_TREE_OBJECTID);
|
2019-10-24 00:48:18 +08:00
|
|
|
btrfs_set_stack_block_group_flags(&bgi, cache->flags);
|
2019-10-24 00:48:11 +08:00
|
|
|
write_extent_buffer(leaf, &bgi, bi, sizeof(bgi));
|
2019-06-21 03:38:00 +08:00
|
|
|
btrfs_mark_buffer_dirty(leaf);
|
|
|
|
fail:
|
|
|
|
btrfs_release_path(path);
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2019-10-30 02:20:18 +08:00
|
|
|
static int cache_save_setup(struct btrfs_block_group *block_group,
|
2019-06-21 03:38:00 +08:00
|
|
|
struct btrfs_trans_handle *trans,
|
|
|
|
struct btrfs_path *path)
|
|
|
|
{
|
|
|
|
struct btrfs_fs_info *fs_info = block_group->fs_info;
|
|
|
|
struct btrfs_root *root = fs_info->tree_root;
|
|
|
|
struct inode *inode = NULL;
|
|
|
|
struct extent_changeset *data_reserved = NULL;
|
|
|
|
u64 alloc_hint = 0;
|
|
|
|
int dcs = BTRFS_DC_ERROR;
|
|
|
|
u64 num_pages = 0;
|
|
|
|
int retries = 0;
|
|
|
|
int ret = 0;
|
|
|
|
|
2020-11-19 07:06:26 +08:00
|
|
|
if (!btrfs_test_opt(fs_info, SPACE_CACHE))
|
|
|
|
return 0;
|
|
|
|
|
2019-06-21 03:38:00 +08:00
|
|
|
/*
|
|
|
|
* If this block group is smaller than 100 megs don't bother caching the
|
|
|
|
* block group.
|
|
|
|
*/
|
2019-10-24 00:48:22 +08:00
|
|
|
if (block_group->length < (100 * SZ_1M)) {
|
2019-06-21 03:38:00 +08:00
|
|
|
spin_lock(&block_group->lock);
|
|
|
|
block_group->disk_cache_state = BTRFS_DC_WRITTEN;
|
|
|
|
spin_unlock(&block_group->lock);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-02-06 00:34:34 +08:00
|
|
|
if (TRANS_ABORTED(trans))
|
2019-06-21 03:38:00 +08:00
|
|
|
return 0;
|
|
|
|
again:
|
|
|
|
inode = lookup_free_space_inode(block_group, path);
|
|
|
|
if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
|
|
|
|
ret = PTR_ERR(inode);
|
|
|
|
btrfs_release_path(path);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (IS_ERR(inode)) {
|
|
|
|
BUG_ON(retries);
|
|
|
|
retries++;
|
|
|
|
|
|
|
|
if (block_group->ro)
|
|
|
|
goto out_free;
|
|
|
|
|
|
|
|
ret = create_free_space_inode(trans, block_group, path);
|
|
|
|
if (ret)
|
|
|
|
goto out_free;
|
|
|
|
goto again;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We want to set the generation to 0, that way if anything goes wrong
|
|
|
|
* from here on out we know not to trust this cache when we load up next
|
|
|
|
* time.
|
|
|
|
*/
|
|
|
|
BTRFS_I(inode)->generation = 0;
|
2020-11-02 22:48:59 +08:00
|
|
|
ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
|
2019-06-21 03:38:00 +08:00
|
|
|
if (ret) {
|
|
|
|
/*
|
|
|
|
* So theoretically we could recover from this, simply set the
|
|
|
|
* super cache generation to 0 so we know to invalidate the
|
|
|
|
* cache, but then we'd have to keep track of the block groups
|
|
|
|
* that fail this way so we know we _have_ to reset this cache
|
|
|
|
* before the next commit or risk reading stale cache. So to
|
|
|
|
* limit our exposure to horrible edge cases lets just abort the
|
|
|
|
* transaction, this only happens in really bad situations
|
|
|
|
* anyway.
|
|
|
|
*/
|
|
|
|
btrfs_abort_transaction(trans, ret);
|
|
|
|
goto out_put;
|
|
|
|
}
|
|
|
|
WARN_ON(ret);
|
|
|
|
|
|
|
|
/* We've already setup this transaction, go ahead and exit */
|
|
|
|
if (block_group->cache_generation == trans->transid &&
|
|
|
|
i_size_read(inode)) {
|
|
|
|
dcs = BTRFS_DC_SETUP;
|
|
|
|
goto out_put;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (i_size_read(inode) > 0) {
|
|
|
|
ret = btrfs_check_trunc_cache_free_space(fs_info,
|
|
|
|
&fs_info->global_block_rsv);
|
|
|
|
if (ret)
|
|
|
|
goto out_put;
|
|
|
|
|
|
|
|
ret = btrfs_truncate_free_space_cache(trans, NULL, inode);
|
|
|
|
if (ret)
|
|
|
|
goto out_put;
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_lock(&block_group->lock);
|
|
|
|
if (block_group->cached != BTRFS_CACHE_FINISHED ||
|
|
|
|
!btrfs_test_opt(fs_info, SPACE_CACHE)) {
|
|
|
|
/*
|
|
|
|
* don't bother trying to write stuff out _if_
|
|
|
|
* a) we're not cached,
|
|
|
|
* b) we're with nospace_cache mount option,
|
|
|
|
* c) we're with v2 space_cache (FREE_SPACE_TREE).
|
|
|
|
*/
|
|
|
|
dcs = BTRFS_DC_WRITTEN;
|
|
|
|
spin_unlock(&block_group->lock);
|
|
|
|
goto out_put;
|
|
|
|
}
|
|
|
|
spin_unlock(&block_group->lock);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We hit an ENOSPC when setting up the cache in this transaction, just
|
|
|
|
* skip doing the setup, we've already cleared the cache so we're safe.
|
|
|
|
*/
|
|
|
|
if (test_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags)) {
|
|
|
|
ret = -ENOSPC;
|
|
|
|
goto out_put;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Try to preallocate enough space based on how big the block group is.
|
|
|
|
* Keep in mind this has to include any pinned space which could end up
|
|
|
|
* taking up quite a bit since it's not folded into the other space
|
|
|
|
* cache.
|
|
|
|
*/
|
2019-10-24 00:48:22 +08:00
|
|
|
num_pages = div_u64(block_group->length, SZ_256M);
|
2019-06-21 03:38:00 +08:00
|
|
|
if (!num_pages)
|
|
|
|
num_pages = 1;
|
|
|
|
|
|
|
|
num_pages *= 16;
|
|
|
|
num_pages *= PAGE_SIZE;
|
|
|
|
|
2020-06-03 13:55:41 +08:00
|
|
|
ret = btrfs_check_data_free_space(BTRFS_I(inode), &data_reserved, 0,
|
|
|
|
num_pages);
|
2019-06-21 03:38:00 +08:00
|
|
|
if (ret)
|
|
|
|
goto out_put;
|
|
|
|
|
|
|
|
ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
|
|
|
|
num_pages, num_pages,
|
|
|
|
&alloc_hint);
|
|
|
|
/*
|
|
|
|
* Our cache requires contiguous chunks so that we don't modify a bunch
|
|
|
|
* of metadata or split extents when writing the cache out, which means
|
|
|
|
* we can enospc if we are heavily fragmented in addition to just normal
|
|
|
|
* out of space conditions. So if we hit this just skip setting up any
|
|
|
|
* other block groups for this transaction, maybe we'll unpin enough
|
|
|
|
* space the next time around.
|
|
|
|
*/
|
|
|
|
if (!ret)
|
|
|
|
dcs = BTRFS_DC_SETUP;
|
|
|
|
else if (ret == -ENOSPC)
|
|
|
|
set_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags);
|
|
|
|
|
|
|
|
out_put:
|
|
|
|
iput(inode);
|
|
|
|
out_free:
|
|
|
|
btrfs_release_path(path);
|
|
|
|
out:
|
|
|
|
spin_lock(&block_group->lock);
|
|
|
|
if (!ret && dcs == BTRFS_DC_SETUP)
|
|
|
|
block_group->cache_generation = trans->transid;
|
|
|
|
block_group->disk_cache_state = dcs;
|
|
|
|
spin_unlock(&block_group->lock);
|
|
|
|
|
|
|
|
extent_changeset_free(data_reserved);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
int btrfs_setup_space_cache(struct btrfs_trans_handle *trans)
|
|
|
|
{
|
|
|
|
struct btrfs_fs_info *fs_info = trans->fs_info;
|
2019-10-30 02:20:18 +08:00
|
|
|
struct btrfs_block_group *cache, *tmp;
|
2019-06-21 03:38:00 +08:00
|
|
|
struct btrfs_transaction *cur_trans = trans->transaction;
|
|
|
|
struct btrfs_path *path;
|
|
|
|
|
|
|
|
if (list_empty(&cur_trans->dirty_bgs) ||
|
|
|
|
!btrfs_test_opt(fs_info, SPACE_CACHE))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
path = btrfs_alloc_path();
|
|
|
|
if (!path)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
/* Could add new block groups, use _safe just in case */
|
|
|
|
list_for_each_entry_safe(cache, tmp, &cur_trans->dirty_bgs,
|
|
|
|
dirty_list) {
|
|
|
|
if (cache->disk_cache_state == BTRFS_DC_CLEAR)
|
|
|
|
cache_save_setup(cache, trans, path);
|
|
|
|
}
|
|
|
|
|
|
|
|
btrfs_free_path(path);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Transaction commit does final block group cache writeback during a critical
|
|
|
|
* section where nothing is allowed to change the FS. This is required in
|
|
|
|
* order for the cache to actually match the block group, but can introduce a
|
|
|
|
* lot of latency into the commit.
|
|
|
|
*
|
|
|
|
* So, btrfs_start_dirty_block_groups is here to kick off block group cache IO.
|
|
|
|
* There's a chance we'll have to redo some of it if the block group changes
|
|
|
|
* again during the commit, but it greatly reduces the commit latency by
|
|
|
|
* getting rid of the easy block groups while we're still allowing others to
|
|
|
|
* join the commit.
|
|
|
|
*/
|
|
|
|
int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans)
|
|
|
|
{
|
|
|
|
struct btrfs_fs_info *fs_info = trans->fs_info;
|
2019-10-30 02:20:18 +08:00
|
|
|
struct btrfs_block_group *cache;
|
2019-06-21 03:38:00 +08:00
|
|
|
struct btrfs_transaction *cur_trans = trans->transaction;
|
|
|
|
int ret = 0;
|
|
|
|
int should_put;
|
|
|
|
struct btrfs_path *path = NULL;
|
|
|
|
LIST_HEAD(dirty);
|
|
|
|
struct list_head *io = &cur_trans->io_bgs;
|
|
|
|
int num_started = 0;
|
|
|
|
int loops = 0;
|
|
|
|
|
|
|
|
spin_lock(&cur_trans->dirty_bgs_lock);
|
|
|
|
if (list_empty(&cur_trans->dirty_bgs)) {
|
|
|
|
spin_unlock(&cur_trans->dirty_bgs_lock);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
list_splice_init(&cur_trans->dirty_bgs, &dirty);
|
|
|
|
spin_unlock(&cur_trans->dirty_bgs_lock);
|
|
|
|
|
|
|
|
again:
|
|
|
|
/* Make sure all the block groups on our dirty list actually exist */
|
|
|
|
btrfs_create_pending_block_groups(trans);
|
|
|
|
|
|
|
|
if (!path) {
|
|
|
|
path = btrfs_alloc_path();
|
2021-01-15 03:02:43 +08:00
|
|
|
if (!path) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
}
|
2019-06-21 03:38:00 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* cache_write_mutex is here only to save us from balance or automatic
|
|
|
|
* removal of empty block groups deleting this block group while we are
|
|
|
|
* writing out the cache
|
|
|
|
*/
|
|
|
|
mutex_lock(&trans->transaction->cache_write_mutex);
|
|
|
|
while (!list_empty(&dirty)) {
|
|
|
|
bool drop_reserve = true;
|
|
|
|
|
2019-10-30 02:20:18 +08:00
|
|
|
cache = list_first_entry(&dirty, struct btrfs_block_group,
|
2019-06-21 03:38:00 +08:00
|
|
|
dirty_list);
|
|
|
|
/*
|
|
|
|
* This can happen if something re-dirties a block group that
|
|
|
|
* is already under IO. Just wait for it to finish and then do
|
|
|
|
* it all again
|
|
|
|
*/
|
|
|
|
if (!list_empty(&cache->io_list)) {
|
|
|
|
list_del_init(&cache->io_list);
|
|
|
|
btrfs_wait_cache_io(trans, cache, path);
|
|
|
|
btrfs_put_block_group(cache);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* btrfs_wait_cache_io uses the cache->dirty_list to decide if
|
|
|
|
* it should update the cache_state. Don't delete until after
|
|
|
|
* we wait.
|
|
|
|
*
|
|
|
|
* Since we're not running in the commit critical section
|
|
|
|
* we need the dirty_bgs_lock to protect from update_block_group
|
|
|
|
*/
|
|
|
|
spin_lock(&cur_trans->dirty_bgs_lock);
|
|
|
|
list_del_init(&cache->dirty_list);
|
|
|
|
spin_unlock(&cur_trans->dirty_bgs_lock);
|
|
|
|
|
|
|
|
should_put = 1;
|
|
|
|
|
|
|
|
cache_save_setup(cache, trans, path);
|
|
|
|
|
|
|
|
if (cache->disk_cache_state == BTRFS_DC_SETUP) {
|
|
|
|
cache->io_ctl.inode = NULL;
|
|
|
|
ret = btrfs_write_out_cache(trans, cache, path);
|
|
|
|
if (ret == 0 && cache->io_ctl.inode) {
|
|
|
|
num_started++;
|
|
|
|
should_put = 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The cache_write_mutex is protecting the
|
|
|
|
* io_list, also refer to the definition of
|
|
|
|
* btrfs_transaction::io_bgs for more details
|
|
|
|
*/
|
|
|
|
list_add_tail(&cache->io_list, io);
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* If we failed to write the cache, the
|
|
|
|
* generation will be bad and life goes on
|
|
|
|
*/
|
|
|
|
ret = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!ret) {
|
2020-05-05 07:58:23 +08:00
|
|
|
ret = update_block_group_item(trans, path, cache);
|
2019-06-21 03:38:00 +08:00
|
|
|
/*
|
|
|
|
* Our block group might still be attached to the list
|
|
|
|
* of new block groups in the transaction handle of some
|
|
|
|
* other task (struct btrfs_trans_handle->new_bgs). This
|
|
|
|
* means its block group item isn't yet in the extent
|
|
|
|
* tree. If this happens ignore the error, as we will
|
|
|
|
* try again later in the critical section of the
|
|
|
|
* transaction commit.
|
|
|
|
*/
|
|
|
|
if (ret == -ENOENT) {
|
|
|
|
ret = 0;
|
|
|
|
spin_lock(&cur_trans->dirty_bgs_lock);
|
|
|
|
if (list_empty(&cache->dirty_list)) {
|
|
|
|
list_add_tail(&cache->dirty_list,
|
|
|
|
&cur_trans->dirty_bgs);
|
|
|
|
btrfs_get_block_group(cache);
|
|
|
|
drop_reserve = false;
|
|
|
|
}
|
|
|
|
spin_unlock(&cur_trans->dirty_bgs_lock);
|
|
|
|
} else if (ret) {
|
|
|
|
btrfs_abort_transaction(trans, ret);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If it's not on the io list, we need to put the block group */
|
|
|
|
if (should_put)
|
|
|
|
btrfs_put_block_group(cache);
|
|
|
|
if (drop_reserve)
|
|
|
|
btrfs_delayed_refs_rsv_release(fs_info, 1);
|
|
|
|
/*
|
|
|
|
* Avoid blocking other tasks for too long. It might even save
|
|
|
|
* us from writing caches for block groups that are going to be
|
|
|
|
* removed.
|
|
|
|
*/
|
|
|
|
mutex_unlock(&trans->transaction->cache_write_mutex);
|
2021-01-15 03:02:43 +08:00
|
|
|
if (ret)
|
|
|
|
goto out;
|
2019-06-21 03:38:00 +08:00
|
|
|
mutex_lock(&trans->transaction->cache_write_mutex);
|
|
|
|
}
|
|
|
|
mutex_unlock(&trans->transaction->cache_write_mutex);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Go through delayed refs for all the stuff we've just kicked off
|
|
|
|
* and then loop back (just once)
|
|
|
|
*/
|
2020-12-17 00:22:17 +08:00
|
|
|
if (!ret)
|
|
|
|
ret = btrfs_run_delayed_refs(trans, 0);
|
2019-06-21 03:38:00 +08:00
|
|
|
if (!ret && loops == 0) {
|
|
|
|
loops++;
|
|
|
|
spin_lock(&cur_trans->dirty_bgs_lock);
|
|
|
|
list_splice_init(&cur_trans->dirty_bgs, &dirty);
|
|
|
|
/*
|
|
|
|
* dirty_bgs_lock protects us from concurrent block group
|
|
|
|
* deletes too (not just cache_write_mutex).
|
|
|
|
*/
|
|
|
|
if (!list_empty(&dirty)) {
|
|
|
|
spin_unlock(&cur_trans->dirty_bgs_lock);
|
|
|
|
goto again;
|
|
|
|
}
|
|
|
|
spin_unlock(&cur_trans->dirty_bgs_lock);
|
2021-01-15 03:02:43 +08:00
|
|
|
}
|
|
|
|
out:
|
|
|
|
if (ret < 0) {
|
|
|
|
spin_lock(&cur_trans->dirty_bgs_lock);
|
|
|
|
list_splice_init(&dirty, &cur_trans->dirty_bgs);
|
|
|
|
spin_unlock(&cur_trans->dirty_bgs_lock);
|
2019-06-21 03:38:00 +08:00
|
|
|
btrfs_cleanup_dirty_bgs(cur_trans, fs_info);
|
|
|
|
}
|
|
|
|
|
|
|
|
btrfs_free_path(path);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans)
|
|
|
|
{
|
|
|
|
struct btrfs_fs_info *fs_info = trans->fs_info;
|
2019-10-30 02:20:18 +08:00
|
|
|
struct btrfs_block_group *cache;
|
2019-06-21 03:38:00 +08:00
|
|
|
struct btrfs_transaction *cur_trans = trans->transaction;
|
|
|
|
int ret = 0;
|
|
|
|
int should_put;
|
|
|
|
struct btrfs_path *path;
|
|
|
|
struct list_head *io = &cur_trans->io_bgs;
|
|
|
|
int num_started = 0;
|
|
|
|
|
|
|
|
path = btrfs_alloc_path();
|
|
|
|
if (!path)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Even though we are in the critical section of the transaction commit,
|
|
|
|
* we can still have concurrent tasks adding elements to this
|
|
|
|
* transaction's list of dirty block groups. These tasks correspond to
|
|
|
|
* endio free space workers started when writeback finishes for a
|
|
|
|
* space cache, which run inode.c:btrfs_finish_ordered_io(), and can
|
|
|
|
* allocate new block groups as a result of COWing nodes of the root
|
|
|
|
* tree when updating the free space inode. The writeback for the space
|
|
|
|
* caches is triggered by an earlier call to
|
|
|
|
* btrfs_start_dirty_block_groups() and iterations of the following
|
|
|
|
* loop.
|
|
|
|
* Also we want to do the cache_save_setup first and then run the
|
|
|
|
* delayed refs to make sure we have the best chance at doing this all
|
|
|
|
* in one shot.
|
|
|
|
*/
|
|
|
|
spin_lock(&cur_trans->dirty_bgs_lock);
|
|
|
|
while (!list_empty(&cur_trans->dirty_bgs)) {
|
|
|
|
cache = list_first_entry(&cur_trans->dirty_bgs,
|
2019-10-30 02:20:18 +08:00
|
|
|
struct btrfs_block_group,
|
2019-06-21 03:38:00 +08:00
|
|
|
dirty_list);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This can happen if cache_save_setup re-dirties a block group
|
|
|
|
* that is already under IO. Just wait for it to finish and
|
|
|
|
* then do it all again
|
|
|
|
*/
|
|
|
|
if (!list_empty(&cache->io_list)) {
|
|
|
|
spin_unlock(&cur_trans->dirty_bgs_lock);
|
|
|
|
list_del_init(&cache->io_list);
|
|
|
|
btrfs_wait_cache_io(trans, cache, path);
|
|
|
|
btrfs_put_block_group(cache);
|
|
|
|
spin_lock(&cur_trans->dirty_bgs_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Don't remove from the dirty list until after we've waited on
|
|
|
|
* any pending IO
|
|
|
|
*/
|
|
|
|
list_del_init(&cache->dirty_list);
|
|
|
|
spin_unlock(&cur_trans->dirty_bgs_lock);
|
|
|
|
should_put = 1;
|
|
|
|
|
|
|
|
cache_save_setup(cache, trans, path);
|
|
|
|
|
|
|
|
if (!ret)
|
|
|
|
ret = btrfs_run_delayed_refs(trans,
|
|
|
|
(unsigned long) -1);
|
|
|
|
|
|
|
|
if (!ret && cache->disk_cache_state == BTRFS_DC_SETUP) {
|
|
|
|
cache->io_ctl.inode = NULL;
|
|
|
|
ret = btrfs_write_out_cache(trans, cache, path);
|
|
|
|
if (ret == 0 && cache->io_ctl.inode) {
|
|
|
|
num_started++;
|
|
|
|
should_put = 0;
|
|
|
|
list_add_tail(&cache->io_list, io);
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* If we failed to write the cache, the
|
|
|
|
* generation will be bad and life goes on
|
|
|
|
*/
|
|
|
|
ret = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!ret) {
|
2020-05-05 07:58:23 +08:00
|
|
|
ret = update_block_group_item(trans, path, cache);
|
2019-06-21 03:38:00 +08:00
|
|
|
/*
|
|
|
|
* One of the free space endio workers might have
|
|
|
|
* created a new block group while updating a free space
|
|
|
|
* cache's inode (at inode.c:btrfs_finish_ordered_io())
|
|
|
|
* and hasn't released its transaction handle yet, in
|
|
|
|
* which case the new block group is still attached to
|
|
|
|
* its transaction handle and its creation has not
|
|
|
|
* finished yet (no block group item in the extent tree
|
|
|
|
* yet, etc). If this is the case, wait for all free
|
|
|
|
* space endio workers to finish and retry. This is a
|
2020-08-05 10:48:34 +08:00
|
|
|
* very rare case so no need for a more efficient and
|
2019-06-21 03:38:00 +08:00
|
|
|
* complex approach.
|
|
|
|
*/
|
|
|
|
if (ret == -ENOENT) {
|
|
|
|
wait_event(cur_trans->writer_wait,
|
|
|
|
atomic_read(&cur_trans->num_writers) == 1);
|
2020-05-05 07:58:23 +08:00
|
|
|
ret = update_block_group_item(trans, path, cache);
|
2019-06-21 03:38:00 +08:00
|
|
|
}
|
|
|
|
if (ret)
|
|
|
|
btrfs_abort_transaction(trans, ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If its not on the io list, we need to put the block group */
|
|
|
|
if (should_put)
|
|
|
|
btrfs_put_block_group(cache);
|
|
|
|
btrfs_delayed_refs_rsv_release(fs_info, 1);
|
|
|
|
spin_lock(&cur_trans->dirty_bgs_lock);
|
|
|
|
}
|
|
|
|
spin_unlock(&cur_trans->dirty_bgs_lock);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Refer to the definition of io_bgs member for details why it's safe
|
|
|
|
* to use it without any locking
|
|
|
|
*/
|
|
|
|
while (!list_empty(io)) {
|
2019-10-30 02:20:18 +08:00
|
|
|
cache = list_first_entry(io, struct btrfs_block_group,
|
2019-06-21 03:38:00 +08:00
|
|
|
io_list);
|
|
|
|
list_del_init(&cache->io_list);
|
|
|
|
btrfs_wait_cache_io(trans, cache, path);
|
|
|
|
btrfs_put_block_group(cache);
|
|
|
|
}
|
|
|
|
|
|
|
|
btrfs_free_path(path);
|
|
|
|
return ret;
|
|
|
|
}
|
2019-06-21 03:38:02 +08:00
|
|
|
|
|
|
|
int btrfs_update_block_group(struct btrfs_trans_handle *trans,
|
|
|
|
u64 bytenr, u64 num_bytes, int alloc)
|
|
|
|
{
|
|
|
|
struct btrfs_fs_info *info = trans->fs_info;
|
2019-10-30 02:20:18 +08:00
|
|
|
struct btrfs_block_group *cache = NULL;
|
2019-06-21 03:38:02 +08:00
|
|
|
u64 total = num_bytes;
|
|
|
|
u64 old_val;
|
|
|
|
u64 byte_in_group;
|
|
|
|
int factor;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
/* Block accounting for super block */
|
|
|
|
spin_lock(&info->delalloc_root_lock);
|
|
|
|
old_val = btrfs_super_bytes_used(info->super_copy);
|
|
|
|
if (alloc)
|
|
|
|
old_val += num_bytes;
|
|
|
|
else
|
|
|
|
old_val -= num_bytes;
|
|
|
|
btrfs_set_super_bytes_used(info->super_copy, old_val);
|
|
|
|
spin_unlock(&info->delalloc_root_lock);
|
|
|
|
|
|
|
|
while (total) {
|
|
|
|
cache = btrfs_lookup_block_group(info, bytenr);
|
|
|
|
if (!cache) {
|
|
|
|
ret = -ENOENT;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
factor = btrfs_bg_type_to_factor(cache->flags);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If this block group has free space cache written out, we
|
|
|
|
* need to make sure to load it if we are removing space. This
|
|
|
|
* is because we need the unpinning stage to actually add the
|
|
|
|
* space back to the block group, otherwise we will leak space.
|
|
|
|
*/
|
2019-10-30 02:20:18 +08:00
|
|
|
if (!alloc && !btrfs_block_group_done(cache))
|
2019-06-21 03:38:02 +08:00
|
|
|
btrfs_cache_block_group(cache, 1);
|
|
|
|
|
2019-10-24 00:48:22 +08:00
|
|
|
byte_in_group = bytenr - cache->start;
|
|
|
|
WARN_ON(byte_in_group > cache->length);
|
2019-06-21 03:38:02 +08:00
|
|
|
|
|
|
|
spin_lock(&cache->space_info->lock);
|
|
|
|
spin_lock(&cache->lock);
|
|
|
|
|
|
|
|
if (btrfs_test_opt(info, SPACE_CACHE) &&
|
|
|
|
cache->disk_cache_state < BTRFS_DC_CLEAR)
|
|
|
|
cache->disk_cache_state = BTRFS_DC_CLEAR;
|
|
|
|
|
2019-10-24 00:48:11 +08:00
|
|
|
old_val = cache->used;
|
2019-10-24 00:48:22 +08:00
|
|
|
num_bytes = min(total, cache->length - byte_in_group);
|
2019-06-21 03:38:02 +08:00
|
|
|
if (alloc) {
|
|
|
|
old_val += num_bytes;
|
2019-10-24 00:48:11 +08:00
|
|
|
cache->used = old_val;
|
2019-06-21 03:38:02 +08:00
|
|
|
cache->reserved -= num_bytes;
|
|
|
|
cache->space_info->bytes_reserved -= num_bytes;
|
|
|
|
cache->space_info->bytes_used += num_bytes;
|
|
|
|
cache->space_info->disk_used += num_bytes * factor;
|
|
|
|
spin_unlock(&cache->lock);
|
|
|
|
spin_unlock(&cache->space_info->lock);
|
|
|
|
} else {
|
|
|
|
old_val -= num_bytes;
|
2019-10-24 00:48:11 +08:00
|
|
|
cache->used = old_val;
|
2019-06-21 03:38:02 +08:00
|
|
|
cache->pinned += num_bytes;
|
|
|
|
btrfs_space_info_update_bytes_pinned(info,
|
|
|
|
cache->space_info, num_bytes);
|
|
|
|
cache->space_info->bytes_used -= num_bytes;
|
|
|
|
cache->space_info->disk_used -= num_bytes * factor;
|
|
|
|
spin_unlock(&cache->lock);
|
|
|
|
spin_unlock(&cache->space_info->lock);
|
|
|
|
|
2021-01-16 05:48:55 +08:00
|
|
|
__btrfs_mod_total_bytes_pinned(cache->space_info,
|
|
|
|
num_bytes);
|
2020-01-20 22:09:18 +08:00
|
|
|
set_extent_dirty(&trans->transaction->pinned_extents,
|
2019-06-21 03:38:02 +08:00
|
|
|
bytenr, bytenr + num_bytes - 1,
|
|
|
|
GFP_NOFS | __GFP_NOFAIL);
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_lock(&trans->transaction->dirty_bgs_lock);
|
|
|
|
if (list_empty(&cache->dirty_list)) {
|
|
|
|
list_add_tail(&cache->dirty_list,
|
|
|
|
&trans->transaction->dirty_bgs);
|
|
|
|
trans->delayed_ref_updates++;
|
|
|
|
btrfs_get_block_group(cache);
|
|
|
|
}
|
|
|
|
spin_unlock(&trans->transaction->dirty_bgs_lock);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* No longer have used bytes in this block group, queue it for
|
|
|
|
* deletion. We do this after adding the block group to the
|
|
|
|
* dirty list to avoid races between cleaner kthread and space
|
|
|
|
* cache writeout.
|
|
|
|
*/
|
btrfs: handle empty block_group removal for async discard
block_group removal is a little tricky. It can race with the extent
allocator, the cleaner thread, and balancing. The current path is for a
block_group to be added to the unused_bgs list. Then, when the cleaner
thread comes around, it starts a transaction and then proceeds with
removing the block_group. Extents that are pinned are subsequently
removed from the pinned trees and then eventually a discard is issued
for the entire block_group.
Async discard introduces another player into the game, the discard
workqueue. While it has none of the racing issues, the new problem is
ensuring we don't leave free space untrimmed prior to forgetting the
block_group. This is handled by placing fully free block_groups on a
separate discard queue. This is necessary to maintain discarding order
as in the future we will slowly trim even fully free block_groups. The
ordering helps us make progress on the same block_group rather than say
the last fully freed block_group or needing to search through the fully
freed block groups at the beginning of a list and insert after.
The new order of events is a fully freed block group gets placed on the
unused discard queue first. Once it's processed, it will be placed on
the unusued_bgs list and then the original sequence of events will
happen, just without the final whole block_group discard.
The mount flags can change when processing unused_bgs, so when flipping
from DISCARD to DISCARD_ASYNC, the unused_bgs must be punted to the
discard_list to be trimmed. If we flip off DISCARD_ASYNC, we punt
free block groups on the discard_list to the unused_bg queue which will
do the final discard for us.
Reviewed-by: Josef Bacik <josef@toxicpanda.com>
Signed-off-by: Dennis Zhou <dennis@kernel.org>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2019-12-14 08:22:15 +08:00
|
|
|
if (!alloc && old_val == 0) {
|
|
|
|
if (!btrfs_test_opt(info, DISCARD_ASYNC))
|
|
|
|
btrfs_mark_bg_unused(cache);
|
|
|
|
}
|
2019-06-21 03:38:02 +08:00
|
|
|
|
|
|
|
btrfs_put_block_group(cache);
|
|
|
|
total -= num_bytes;
|
|
|
|
bytenr += num_bytes;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Modified block groups are accounted for in the delayed_refs_rsv. */
|
|
|
|
btrfs_update_delayed_refs_rsv(trans);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* btrfs_add_reserved_bytes - update the block_group and space info counters
|
|
|
|
* @cache: The cache we are manipulating
|
|
|
|
* @ram_bytes: The number of bytes of file content, and will be same to
|
|
|
|
* @num_bytes except for the compress path.
|
|
|
|
* @num_bytes: The number of bytes in question
|
|
|
|
* @delalloc: The blocks are allocated for the delalloc write
|
|
|
|
*
|
|
|
|
* This is called by the allocator when it reserves space. If this is a
|
|
|
|
* reservation and the block group has become read only we cannot make the
|
|
|
|
* reservation and return -EAGAIN, otherwise this function always succeeds.
|
|
|
|
*/
|
2019-10-30 02:20:18 +08:00
|
|
|
int btrfs_add_reserved_bytes(struct btrfs_block_group *cache,
|
2019-06-21 03:38:02 +08:00
|
|
|
u64 ram_bytes, u64 num_bytes, int delalloc)
|
|
|
|
{
|
|
|
|
struct btrfs_space_info *space_info = cache->space_info;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
spin_lock(&space_info->lock);
|
|
|
|
spin_lock(&cache->lock);
|
|
|
|
if (cache->ro) {
|
|
|
|
ret = -EAGAIN;
|
|
|
|
} else {
|
|
|
|
cache->reserved += num_bytes;
|
|
|
|
space_info->bytes_reserved += num_bytes;
|
2019-08-23 03:10:56 +08:00
|
|
|
trace_btrfs_space_reservation(cache->fs_info, "space_info",
|
|
|
|
space_info->flags, num_bytes, 1);
|
2019-06-21 03:38:02 +08:00
|
|
|
btrfs_space_info_update_bytes_may_use(cache->fs_info,
|
|
|
|
space_info, -ram_bytes);
|
|
|
|
if (delalloc)
|
|
|
|
cache->delalloc_bytes += num_bytes;
|
2020-07-21 22:22:19 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Compression can use less space than we reserved, so wake
|
|
|
|
* tickets if that happens
|
|
|
|
*/
|
|
|
|
if (num_bytes < ram_bytes)
|
|
|
|
btrfs_try_granting_tickets(cache->fs_info, space_info);
|
2019-06-21 03:38:02 +08:00
|
|
|
}
|
|
|
|
spin_unlock(&cache->lock);
|
|
|
|
spin_unlock(&space_info->lock);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* btrfs_free_reserved_bytes - update the block_group and space info counters
|
|
|
|
* @cache: The cache we are manipulating
|
|
|
|
* @num_bytes: The number of bytes in question
|
|
|
|
* @delalloc: The blocks are allocated for the delalloc write
|
|
|
|
*
|
|
|
|
* This is called by somebody who is freeing space that was never actually used
|
|
|
|
* on disk. For example if you reserve some space for a new leaf in transaction
|
|
|
|
* A and before transaction A commits you free that leaf, you call this with
|
|
|
|
* reserve set to 0 in order to clear the reservation.
|
|
|
|
*/
|
2019-10-30 02:20:18 +08:00
|
|
|
void btrfs_free_reserved_bytes(struct btrfs_block_group *cache,
|
2019-06-21 03:38:02 +08:00
|
|
|
u64 num_bytes, int delalloc)
|
|
|
|
{
|
|
|
|
struct btrfs_space_info *space_info = cache->space_info;
|
|
|
|
|
|
|
|
spin_lock(&space_info->lock);
|
|
|
|
spin_lock(&cache->lock);
|
|
|
|
if (cache->ro)
|
|
|
|
space_info->bytes_readonly += num_bytes;
|
|
|
|
cache->reserved -= num_bytes;
|
|
|
|
space_info->bytes_reserved -= num_bytes;
|
|
|
|
space_info->max_extent_size = 0;
|
|
|
|
|
|
|
|
if (delalloc)
|
|
|
|
cache->delalloc_bytes -= num_bytes;
|
|
|
|
spin_unlock(&cache->lock);
|
2020-07-21 22:22:17 +08:00
|
|
|
|
|
|
|
btrfs_try_granting_tickets(cache->fs_info, space_info);
|
2019-06-21 03:38:02 +08:00
|
|
|
spin_unlock(&space_info->lock);
|
|
|
|
}
|
2019-06-21 03:38:04 +08:00
|
|
|
|
|
|
|
static void force_metadata_allocation(struct btrfs_fs_info *info)
|
|
|
|
{
|
|
|
|
struct list_head *head = &info->space_info;
|
|
|
|
struct btrfs_space_info *found;
|
|
|
|
|
2020-09-02 05:40:37 +08:00
|
|
|
list_for_each_entry(found, head, list) {
|
2019-06-21 03:38:04 +08:00
|
|
|
if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
|
|
|
|
found->force_alloc = CHUNK_ALLOC_FORCE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int should_alloc_chunk(struct btrfs_fs_info *fs_info,
|
|
|
|
struct btrfs_space_info *sinfo, int force)
|
|
|
|
{
|
|
|
|
u64 bytes_used = btrfs_space_info_used(sinfo, false);
|
|
|
|
u64 thresh;
|
|
|
|
|
|
|
|
if (force == CHUNK_ALLOC_FORCE)
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* in limited mode, we want to have some free space up to
|
|
|
|
* about 1% of the FS size.
|
|
|
|
*/
|
|
|
|
if (force == CHUNK_ALLOC_LIMITED) {
|
|
|
|
thresh = btrfs_super_total_bytes(fs_info->super_copy);
|
|
|
|
thresh = max_t(u64, SZ_64M, div_factor_fine(thresh, 1));
|
|
|
|
|
|
|
|
if (sinfo->total_bytes - bytes_used < thresh)
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (bytes_used + SZ_2M < div_factor(sinfo->total_bytes, 8))
|
|
|
|
return 0;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, u64 type)
|
|
|
|
{
|
|
|
|
u64 alloc_flags = btrfs_get_alloc_profile(trans->fs_info, type);
|
|
|
|
|
|
|
|
return btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If force is CHUNK_ALLOC_FORCE:
|
|
|
|
* - return 1 if it successfully allocates a chunk,
|
|
|
|
* - return errors including -ENOSPC otherwise.
|
|
|
|
* If force is NOT CHUNK_ALLOC_FORCE:
|
|
|
|
* - return 0 if it doesn't need to allocate a new chunk,
|
|
|
|
* - return 1 if it successfully allocates a chunk,
|
|
|
|
* - return errors including -ENOSPC otherwise.
|
|
|
|
*/
|
|
|
|
int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
|
|
|
|
enum btrfs_chunk_alloc_enum force)
|
|
|
|
{
|
|
|
|
struct btrfs_fs_info *fs_info = trans->fs_info;
|
|
|
|
struct btrfs_space_info *space_info;
|
|
|
|
bool wait_for_alloc = false;
|
|
|
|
bool should_alloc = false;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
/* Don't re-enter if we're already allocating a chunk */
|
|
|
|
if (trans->allocating_chunk)
|
|
|
|
return -ENOSPC;
|
|
|
|
|
|
|
|
space_info = btrfs_find_space_info(fs_info, flags);
|
|
|
|
ASSERT(space_info);
|
|
|
|
|
|
|
|
do {
|
|
|
|
spin_lock(&space_info->lock);
|
|
|
|
if (force < space_info->force_alloc)
|
|
|
|
force = space_info->force_alloc;
|
|
|
|
should_alloc = should_alloc_chunk(fs_info, space_info, force);
|
|
|
|
if (space_info->full) {
|
|
|
|
/* No more free physical space */
|
|
|
|
if (should_alloc)
|
|
|
|
ret = -ENOSPC;
|
|
|
|
else
|
|
|
|
ret = 0;
|
|
|
|
spin_unlock(&space_info->lock);
|
|
|
|
return ret;
|
|
|
|
} else if (!should_alloc) {
|
|
|
|
spin_unlock(&space_info->lock);
|
|
|
|
return 0;
|
|
|
|
} else if (space_info->chunk_alloc) {
|
|
|
|
/*
|
|
|
|
* Someone is already allocating, so we need to block
|
|
|
|
* until this someone is finished and then loop to
|
|
|
|
* recheck if we should continue with our allocation
|
|
|
|
* attempt.
|
|
|
|
*/
|
|
|
|
wait_for_alloc = true;
|
|
|
|
spin_unlock(&space_info->lock);
|
|
|
|
mutex_lock(&fs_info->chunk_mutex);
|
|
|
|
mutex_unlock(&fs_info->chunk_mutex);
|
|
|
|
} else {
|
|
|
|
/* Proceed with allocation */
|
|
|
|
space_info->chunk_alloc = 1;
|
|
|
|
wait_for_alloc = false;
|
|
|
|
spin_unlock(&space_info->lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
cond_resched();
|
|
|
|
} while (wait_for_alloc);
|
|
|
|
|
|
|
|
mutex_lock(&fs_info->chunk_mutex);
|
|
|
|
trans->allocating_chunk = true;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we have mixed data/metadata chunks we want to make sure we keep
|
|
|
|
* allocating mixed chunks instead of individual chunks.
|
|
|
|
*/
|
|
|
|
if (btrfs_mixed_space_info(space_info))
|
|
|
|
flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* if we're doing a data chunk, go ahead and make sure that
|
|
|
|
* we keep a reasonable number of metadata chunks allocated in the
|
|
|
|
* FS as well.
|
|
|
|
*/
|
|
|
|
if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
|
|
|
|
fs_info->data_chunk_allocations++;
|
|
|
|
if (!(fs_info->data_chunk_allocations %
|
|
|
|
fs_info->metadata_ratio))
|
|
|
|
force_metadata_allocation(fs_info);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check if we have enough space in SYSTEM chunk because we may need
|
|
|
|
* to update devices.
|
|
|
|
*/
|
|
|
|
check_system_chunk(trans, flags);
|
|
|
|
|
|
|
|
ret = btrfs_alloc_chunk(trans, flags);
|
|
|
|
trans->allocating_chunk = false;
|
|
|
|
|
|
|
|
spin_lock(&space_info->lock);
|
|
|
|
if (ret < 0) {
|
|
|
|
if (ret == -ENOSPC)
|
|
|
|
space_info->full = 1;
|
|
|
|
else
|
|
|
|
goto out;
|
|
|
|
} else {
|
|
|
|
ret = 1;
|
|
|
|
space_info->max_extent_size = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
|
|
|
|
out:
|
|
|
|
space_info->chunk_alloc = 0;
|
|
|
|
spin_unlock(&space_info->lock);
|
|
|
|
mutex_unlock(&fs_info->chunk_mutex);
|
|
|
|
/*
|
|
|
|
* When we allocate a new chunk we reserve space in the chunk block
|
|
|
|
* reserve to make sure we can COW nodes/leafs in the chunk tree or
|
|
|
|
* add new nodes/leafs to it if we end up needing to do it when
|
|
|
|
* inserting the chunk item and updating device items as part of the
|
|
|
|
* second phase of chunk allocation, performed by
|
|
|
|
* btrfs_finish_chunk_alloc(). So make sure we don't accumulate a
|
|
|
|
* large number of new block groups to create in our transaction
|
|
|
|
* handle's new_bgs list to avoid exhausting the chunk block reserve
|
|
|
|
* in extreme cases - like having a single transaction create many new
|
|
|
|
* block groups when starting to write out the free space caches of all
|
|
|
|
* the block groups that were made dirty during the lifetime of the
|
|
|
|
* transaction.
|
|
|
|
*/
|
|
|
|
if (trans->chunk_bytes_reserved >= (u64)SZ_2M)
|
|
|
|
btrfs_create_pending_block_groups(trans);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static u64 get_profile_num_devs(struct btrfs_fs_info *fs_info, u64 type)
|
|
|
|
{
|
|
|
|
u64 num_dev;
|
|
|
|
|
|
|
|
num_dev = btrfs_raid_array[btrfs_bg_flags_to_raid_index(type)].devs_max;
|
|
|
|
if (!num_dev)
|
|
|
|
num_dev = fs_info->fs_devices->rw_devices;
|
|
|
|
|
|
|
|
return num_dev;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2019-10-08 08:50:38 +08:00
|
|
|
* Reserve space in the system space for allocating or removing a chunk
|
2019-06-21 03:38:04 +08:00
|
|
|
*/
|
|
|
|
void check_system_chunk(struct btrfs_trans_handle *trans, u64 type)
|
|
|
|
{
|
|
|
|
struct btrfs_fs_info *fs_info = trans->fs_info;
|
|
|
|
struct btrfs_space_info *info;
|
|
|
|
u64 left;
|
|
|
|
u64 thresh;
|
|
|
|
int ret = 0;
|
|
|
|
u64 num_devs;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Needed because we can end up allocating a system chunk and for an
|
|
|
|
* atomic and race free space reservation in the chunk block reserve.
|
|
|
|
*/
|
|
|
|
lockdep_assert_held(&fs_info->chunk_mutex);
|
|
|
|
|
|
|
|
info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
|
|
|
|
spin_lock(&info->lock);
|
|
|
|
left = info->total_bytes - btrfs_space_info_used(info, true);
|
|
|
|
spin_unlock(&info->lock);
|
|
|
|
|
|
|
|
num_devs = get_profile_num_devs(fs_info, type);
|
|
|
|
|
|
|
|
/* num_devs device items to update and 1 chunk item to add or remove */
|
2019-08-23 03:14:33 +08:00
|
|
|
thresh = btrfs_calc_metadata_size(fs_info, num_devs) +
|
|
|
|
btrfs_calc_insert_metadata_size(fs_info, 1);
|
2019-06-21 03:38:04 +08:00
|
|
|
|
|
|
|
if (left < thresh && btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
|
|
|
|
btrfs_info(fs_info, "left=%llu, need=%llu, flags=%llu",
|
|
|
|
left, thresh, type);
|
|
|
|
btrfs_dump_space_info(fs_info, info, 0, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (left < thresh) {
|
|
|
|
u64 flags = btrfs_system_alloc_profile(fs_info);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Ignore failure to create system chunk. We might end up not
|
|
|
|
* needing it, as we might not need to COW all nodes/leafs from
|
|
|
|
* the paths we visit in the chunk tree (they were already COWed
|
|
|
|
* or created in the current transaction for example).
|
|
|
|
*/
|
|
|
|
ret = btrfs_alloc_chunk(trans, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!ret) {
|
|
|
|
ret = btrfs_block_rsv_add(fs_info->chunk_root,
|
|
|
|
&fs_info->chunk_block_rsv,
|
|
|
|
thresh, BTRFS_RESERVE_NO_FLUSH);
|
|
|
|
if (!ret)
|
|
|
|
trans->chunk_bytes_reserved += thresh;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-21 03:38:06 +08:00
|
|
|
void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
|
|
|
|
{
|
2019-10-30 02:20:18 +08:00
|
|
|
struct btrfs_block_group *block_group;
|
2019-06-21 03:38:06 +08:00
|
|
|
u64 last = 0;
|
|
|
|
|
|
|
|
while (1) {
|
|
|
|
struct inode *inode;
|
|
|
|
|
|
|
|
block_group = btrfs_lookup_first_block_group(info, last);
|
|
|
|
while (block_group) {
|
|
|
|
btrfs_wait_block_group_cache_done(block_group);
|
|
|
|
spin_lock(&block_group->lock);
|
|
|
|
if (block_group->iref)
|
|
|
|
break;
|
|
|
|
spin_unlock(&block_group->lock);
|
|
|
|
block_group = btrfs_next_block_group(block_group);
|
|
|
|
}
|
|
|
|
if (!block_group) {
|
|
|
|
if (last == 0)
|
|
|
|
break;
|
|
|
|
last = 0;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
inode = block_group->inode;
|
|
|
|
block_group->iref = 0;
|
|
|
|
block_group->inode = NULL;
|
|
|
|
spin_unlock(&block_group->lock);
|
|
|
|
ASSERT(block_group->io_ctl.inode == NULL);
|
|
|
|
iput(inode);
|
2019-10-24 00:48:22 +08:00
|
|
|
last = block_group->start + block_group->length;
|
2019-06-21 03:38:06 +08:00
|
|
|
btrfs_put_block_group(block_group);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Must be called only after stopping all workers, since we could have block
|
|
|
|
* group caching kthreads running, and therefore they could race with us if we
|
|
|
|
* freed the block groups before stopping them.
|
|
|
|
*/
|
|
|
|
int btrfs_free_block_groups(struct btrfs_fs_info *info)
|
|
|
|
{
|
2019-10-30 02:20:18 +08:00
|
|
|
struct btrfs_block_group *block_group;
|
2019-06-21 03:38:06 +08:00
|
|
|
struct btrfs_space_info *space_info;
|
|
|
|
struct btrfs_caching_control *caching_ctl;
|
|
|
|
struct rb_node *n;
|
|
|
|
|
2020-10-23 21:58:11 +08:00
|
|
|
spin_lock(&info->block_group_cache_lock);
|
2019-06-21 03:38:06 +08:00
|
|
|
while (!list_empty(&info->caching_block_groups)) {
|
|
|
|
caching_ctl = list_entry(info->caching_block_groups.next,
|
|
|
|
struct btrfs_caching_control, list);
|
|
|
|
list_del(&caching_ctl->list);
|
|
|
|
btrfs_put_caching_control(caching_ctl);
|
|
|
|
}
|
2020-10-23 21:58:11 +08:00
|
|
|
spin_unlock(&info->block_group_cache_lock);
|
2019-06-21 03:38:06 +08:00
|
|
|
|
|
|
|
spin_lock(&info->unused_bgs_lock);
|
|
|
|
while (!list_empty(&info->unused_bgs)) {
|
|
|
|
block_group = list_first_entry(&info->unused_bgs,
|
2019-10-30 02:20:18 +08:00
|
|
|
struct btrfs_block_group,
|
2019-06-21 03:38:06 +08:00
|
|
|
bg_list);
|
|
|
|
list_del_init(&block_group->bg_list);
|
|
|
|
btrfs_put_block_group(block_group);
|
|
|
|
}
|
|
|
|
spin_unlock(&info->unused_bgs_lock);
|
|
|
|
|
|
|
|
spin_lock(&info->block_group_cache_lock);
|
|
|
|
while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
|
2019-10-30 02:20:18 +08:00
|
|
|
block_group = rb_entry(n, struct btrfs_block_group,
|
2019-06-21 03:38:06 +08:00
|
|
|
cache_node);
|
|
|
|
rb_erase(&block_group->cache_node,
|
|
|
|
&info->block_group_cache_tree);
|
|
|
|
RB_CLEAR_NODE(&block_group->cache_node);
|
|
|
|
spin_unlock(&info->block_group_cache_lock);
|
|
|
|
|
|
|
|
down_write(&block_group->space_info->groups_sem);
|
|
|
|
list_del(&block_group->list);
|
|
|
|
up_write(&block_group->space_info->groups_sem);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We haven't cached this block group, which means we could
|
|
|
|
* possibly have excluded extents on this block group.
|
|
|
|
*/
|
|
|
|
if (block_group->cached == BTRFS_CACHE_NO ||
|
|
|
|
block_group->cached == BTRFS_CACHE_ERROR)
|
|
|
|
btrfs_free_excluded_extents(block_group);
|
|
|
|
|
|
|
|
btrfs_remove_free_space_cache(block_group);
|
|
|
|
ASSERT(block_group->cached != BTRFS_CACHE_STARTED);
|
|
|
|
ASSERT(list_empty(&block_group->dirty_list));
|
|
|
|
ASSERT(list_empty(&block_group->io_list));
|
|
|
|
ASSERT(list_empty(&block_group->bg_list));
|
2020-07-06 21:14:11 +08:00
|
|
|
ASSERT(refcount_read(&block_group->refs) == 1);
|
btrfs: fix race between writes to swap files and scrub
When we active a swap file, at btrfs_swap_activate(), we acquire the
exclusive operation lock to prevent the physical location of the swap
file extents to be changed by operations such as balance and device
replace/resize/remove. We also call there can_nocow_extent() which,
among other things, checks if the block group of a swap file extent is
currently RO, and if it is we can not use the extent, since a write
into it would result in COWing the extent.
However we have no protection against a scrub operation running after we
activate the swap file, which can result in the swap file extents to be
COWed while the scrub is running and operating on the respective block
group, because scrub turns a block group into RO before it processes it
and then back again to RW mode after processing it. That means an attempt
to write into a swap file extent while scrub is processing the respective
block group, will result in COWing the extent, changing its physical
location on disk.
Fix this by making sure that block groups that have extents that are used
by active swap files can not be turned into RO mode, therefore making it
not possible for a scrub to turn them into RO mode. When a scrub finds a
block group that can not be turned to RO due to the existence of extents
used by swap files, it proceeds to the next block group and logs a warning
message that mentions the block group was skipped due to active swap
files - this is the same approach we currently use for balance.
Fixes: ed46ff3d42378 ("Btrfs: support swap files")
CC: stable@vger.kernel.org # 5.4+
Reviewed-by: Anand Jain <anand.jain@oracle.com>
Reviewed-by: Josef Bacik <josef@toxicpanda.com>
Signed-off-by: Filipe Manana <fdmanana@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2021-02-05 20:55:37 +08:00
|
|
|
ASSERT(block_group->swap_extents == 0);
|
2019-06-21 03:38:06 +08:00
|
|
|
btrfs_put_block_group(block_group);
|
|
|
|
|
|
|
|
spin_lock(&info->block_group_cache_lock);
|
|
|
|
}
|
|
|
|
spin_unlock(&info->block_group_cache_lock);
|
|
|
|
|
|
|
|
btrfs_release_global_block_rsv(info);
|
|
|
|
|
|
|
|
while (!list_empty(&info->space_info)) {
|
|
|
|
space_info = list_entry(info->space_info.next,
|
|
|
|
struct btrfs_space_info,
|
|
|
|
list);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Do not hide this behind enospc_debug, this is actually
|
|
|
|
* important and indicates a real bug if this happens.
|
|
|
|
*/
|
|
|
|
if (WARN_ON(space_info->bytes_pinned > 0 ||
|
|
|
|
space_info->bytes_reserved > 0 ||
|
|
|
|
space_info->bytes_may_use > 0))
|
|
|
|
btrfs_dump_space_info(info, space_info, 0, 0);
|
2020-04-07 18:38:49 +08:00
|
|
|
WARN_ON(space_info->reclaim_size > 0);
|
2019-06-21 03:38:06 +08:00
|
|
|
list_del(&space_info->list);
|
|
|
|
btrfs_sysfs_remove_space_info(space_info);
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
2020-05-08 18:01:59 +08:00
|
|
|
|
|
|
|
void btrfs_freeze_block_group(struct btrfs_block_group *cache)
|
|
|
|
{
|
|
|
|
atomic_inc(&cache->frozen);
|
|
|
|
}
|
|
|
|
|
|
|
|
void btrfs_unfreeze_block_group(struct btrfs_block_group *block_group)
|
|
|
|
{
|
|
|
|
struct btrfs_fs_info *fs_info = block_group->fs_info;
|
|
|
|
struct extent_map_tree *em_tree;
|
|
|
|
struct extent_map *em;
|
|
|
|
bool cleanup;
|
|
|
|
|
|
|
|
spin_lock(&block_group->lock);
|
|
|
|
cleanup = (atomic_dec_and_test(&block_group->frozen) &&
|
|
|
|
block_group->removed);
|
|
|
|
spin_unlock(&block_group->lock);
|
|
|
|
|
|
|
|
if (cleanup) {
|
|
|
|
em_tree = &fs_info->mapping_tree;
|
|
|
|
write_lock(&em_tree->lock);
|
|
|
|
em = lookup_extent_mapping(em_tree, block_group->start,
|
|
|
|
1);
|
|
|
|
BUG_ON(!em); /* logic error, can't happen */
|
|
|
|
remove_extent_mapping(em_tree, em);
|
|
|
|
write_unlock(&em_tree->lock);
|
|
|
|
|
|
|
|
/* once for us and once for the tree */
|
|
|
|
free_extent_map(em);
|
|
|
|
free_extent_map(em);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We may have left one free space entry and other possible
|
|
|
|
* tasks trimming this block group have left 1 entry each one.
|
|
|
|
* Free them if any.
|
|
|
|
*/
|
|
|
|
__btrfs_remove_free_space_cache(block_group->free_space_ctl);
|
|
|
|
}
|
|
|
|
}
|
btrfs: fix race between writes to swap files and scrub
When we active a swap file, at btrfs_swap_activate(), we acquire the
exclusive operation lock to prevent the physical location of the swap
file extents to be changed by operations such as balance and device
replace/resize/remove. We also call there can_nocow_extent() which,
among other things, checks if the block group of a swap file extent is
currently RO, and if it is we can not use the extent, since a write
into it would result in COWing the extent.
However we have no protection against a scrub operation running after we
activate the swap file, which can result in the swap file extents to be
COWed while the scrub is running and operating on the respective block
group, because scrub turns a block group into RO before it processes it
and then back again to RW mode after processing it. That means an attempt
to write into a swap file extent while scrub is processing the respective
block group, will result in COWing the extent, changing its physical
location on disk.
Fix this by making sure that block groups that have extents that are used
by active swap files can not be turned into RO mode, therefore making it
not possible for a scrub to turn them into RO mode. When a scrub finds a
block group that can not be turned to RO due to the existence of extents
used by swap files, it proceeds to the next block group and logs a warning
message that mentions the block group was skipped due to active swap
files - this is the same approach we currently use for balance.
Fixes: ed46ff3d42378 ("Btrfs: support swap files")
CC: stable@vger.kernel.org # 5.4+
Reviewed-by: Anand Jain <anand.jain@oracle.com>
Reviewed-by: Josef Bacik <josef@toxicpanda.com>
Signed-off-by: Filipe Manana <fdmanana@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2021-02-05 20:55:37 +08:00
|
|
|
|
|
|
|
bool btrfs_inc_block_group_swap_extents(struct btrfs_block_group *bg)
|
|
|
|
{
|
|
|
|
bool ret = true;
|
|
|
|
|
|
|
|
spin_lock(&bg->lock);
|
|
|
|
if (bg->ro)
|
|
|
|
ret = false;
|
|
|
|
else
|
|
|
|
bg->swap_extents++;
|
|
|
|
spin_unlock(&bg->lock);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
void btrfs_dec_block_group_swap_extents(struct btrfs_block_group *bg, int amount)
|
|
|
|
{
|
|
|
|
spin_lock(&bg->lock);
|
|
|
|
ASSERT(!bg->ro);
|
|
|
|
ASSERT(bg->swap_extents >= amount);
|
|
|
|
bg->swap_extents -= amount;
|
|
|
|
spin_unlock(&bg->lock);
|
|
|
|
}
|