2020-11-10 19:26:07 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
|
2021-02-04 18:21:48 +08:00
|
|
|
#include <linux/bitops.h>
|
2020-11-10 19:26:07 +08:00
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/blkdev.h>
|
2021-02-04 18:21:50 +08:00
|
|
|
#include <linux/sched/mm.h>
|
2021-08-19 20:19:15 +08:00
|
|
|
#include <linux/atomic.h>
|
2020-11-10 19:26:07 +08:00
|
|
|
#include "ctree.h"
|
|
|
|
#include "volumes.h"
|
|
|
|
#include "zoned.h"
|
|
|
|
#include "rcu-string.h"
|
2021-02-04 18:21:48 +08:00
|
|
|
#include "disk-io.h"
|
2021-02-04 18:21:50 +08:00
|
|
|
#include "block-group.h"
|
2021-02-04 18:21:54 +08:00
|
|
|
#include "transaction.h"
|
2021-02-04 18:22:12 +08:00
|
|
|
#include "dev-replace.h"
|
2021-02-04 18:22:14 +08:00
|
|
|
#include "space-info.h"
|
2020-11-10 19:26:07 +08:00
|
|
|
|
|
|
|
/* Maximum number of zones to report per blkdev_report_zones() call */
|
|
|
|
#define BTRFS_REPORT_NR_ZONES 4096
|
2021-02-04 18:21:50 +08:00
|
|
|
/* Invalid allocation pointer value for missing devices */
|
|
|
|
#define WP_MISSING_DEV ((u64)-1)
|
|
|
|
/* Pseudo write pointer value for conventional zone */
|
|
|
|
#define WP_CONVENTIONAL ((u64)-2)
|
2020-11-10 19:26:07 +08:00
|
|
|
|
btrfs: zoned: move superblock logging zone location
Moves the location of the superblock logging zones. The new locations of
the logging zones are now determined based on fixed block addresses
instead of on fixed zone numbers.
The old placement method based on fixed zone numbers causes problems when
one needs to inspect a file system image without access to the drive zone
information. In such case, the super block locations cannot be reliably
determined as the zone size is unknown. By locating the superblock logging
zones using fixed addresses, we can scan a dumped file system image without
the zone information since a super block copy will always be present at or
after the fixed known locations.
Introduce the following three pairs of zones containing fixed offset
locations, regardless of the device zone size.
- primary superblock: offset 0B (and the following zone)
- first copy: offset 512G (and the following zone)
- Second copy: offset 4T (4096G, and the following zone)
If a logging zone is outside of the disk capacity, we do not record the
superblock copy.
The first copy position is much larger than for a non-zoned filesystem,
which is at 64M. This is to avoid overlapping with the log zones for
the primary superblock. This higher location is arbitrary but allows
supporting devices with very large zone sizes, plus some space around in
between.
Such large zone size is unrealistic and very unlikely to ever be seen in
real devices. Currently, SMR disks have a zone size of 256MB, and we are
expecting ZNS drives to be in the 1-4GB range, so this limit gives us
room to breathe. For now, we only allow zone sizes up to 8GB. The
maximum zone size that would still fit in the space is 256G.
The fixed location addresses are somewhat arbitrary, with the intent of
maintaining superblock reliability for smaller and larger devices, with
the preference for the latter. For this reason, there are two superblocks
under the first 1T. This should cover use cases for physical devices and
for emulated/device-mapper devices.
The superblock logging zones are reserved for superblock logging and
never used for data or metadata blocks. Note that we only reserve the
two zones per primary/copy actually used for superblock logging. We do
not reserve the ranges of zones possibly containing superblocks with the
largest supported zone size (0-16GB, 512G-528GB, 4096G-4112G).
The zones containing the fixed location offsets used to store
superblocks on a non-zoned volume are also reserved to avoid confusion.
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2021-04-08 16:25:28 +08:00
|
|
|
/*
|
|
|
|
* Location of the first zone of superblock logging zone pairs.
|
|
|
|
*
|
|
|
|
* - primary superblock: 0B (zone 0)
|
|
|
|
* - first copy: 512G (zone starting at that offset)
|
|
|
|
* - second copy: 4T (zone starting at that offset)
|
|
|
|
*/
|
|
|
|
#define BTRFS_SB_LOG_PRIMARY_OFFSET (0ULL)
|
|
|
|
#define BTRFS_SB_LOG_FIRST_OFFSET (512ULL * SZ_1G)
|
|
|
|
#define BTRFS_SB_LOG_SECOND_OFFSET (4096ULL * SZ_1G)
|
|
|
|
|
|
|
|
#define BTRFS_SB_LOG_FIRST_SHIFT const_ilog2(BTRFS_SB_LOG_FIRST_OFFSET)
|
|
|
|
#define BTRFS_SB_LOG_SECOND_SHIFT const_ilog2(BTRFS_SB_LOG_SECOND_OFFSET)
|
|
|
|
|
btrfs: implement log-structured superblock for ZONED mode
Superblock (and its copies) is the only data structure in btrfs which
has a fixed location on a device. Since we cannot overwrite in a
sequential write required zone, we cannot place superblock in the zone.
One easy solution is limiting superblock and copies to be placed only in
conventional zones. However, this method has two downsides: one is
reduced number of superblock copies. The location of the second copy of
superblock is 256GB, which is in a sequential write required zone on
typical devices in the market today. So, the number of superblock and
copies is limited to be two. Second downside is that we cannot support
devices which have no conventional zones at all.
To solve these two problems, we employ superblock log writing. It uses
two adjacent zones as a circular buffer to write updated superblocks.
Once the first zone is filled up, start writing into the second one.
Then, when both zones are filled up and before starting to write to the
first zone again, it reset the first zone.
We can determine the position of the latest superblock by reading write
pointer information from a device. One corner case is when both zones
are full. For this situation, we read out the last superblock of each
zone, and compare them to determine which zone is older.
The following zones are reserved as the circular buffer on ZONED btrfs.
- The primary superblock: zones 0 and 1
- The first copy: zones 16 and 17
- The second copy: zones 1024 or zone at 256GB which is minimum, and
next to it
If these reserved zones are conventional, superblock is written fixed at
the start of the zone without logging.
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2020-11-10 19:26:14 +08:00
|
|
|
/* Number of superblock log zones */
|
|
|
|
#define BTRFS_NR_SB_LOG_ZONES 2
|
|
|
|
|
2021-08-19 20:19:15 +08:00
|
|
|
/*
|
|
|
|
* Minimum of active zones we need:
|
|
|
|
*
|
|
|
|
* - BTRFS_SUPER_MIRROR_MAX zones for superblock mirrors
|
|
|
|
* - 3 zones to ensure at least one zone per SYSTEM, META and DATA block group
|
|
|
|
* - 1 zone for tree-log dedicated block group
|
|
|
|
* - 1 zone for relocation
|
|
|
|
*/
|
|
|
|
#define BTRFS_MIN_ACTIVE_ZONES (BTRFS_SUPER_MIRROR_MAX + 5)
|
|
|
|
|
btrfs: zoned: move superblock logging zone location
Moves the location of the superblock logging zones. The new locations of
the logging zones are now determined based on fixed block addresses
instead of on fixed zone numbers.
The old placement method based on fixed zone numbers causes problems when
one needs to inspect a file system image without access to the drive zone
information. In such case, the super block locations cannot be reliably
determined as the zone size is unknown. By locating the superblock logging
zones using fixed addresses, we can scan a dumped file system image without
the zone information since a super block copy will always be present at or
after the fixed known locations.
Introduce the following three pairs of zones containing fixed offset
locations, regardless of the device zone size.
- primary superblock: offset 0B (and the following zone)
- first copy: offset 512G (and the following zone)
- Second copy: offset 4T (4096G, and the following zone)
If a logging zone is outside of the disk capacity, we do not record the
superblock copy.
The first copy position is much larger than for a non-zoned filesystem,
which is at 64M. This is to avoid overlapping with the log zones for
the primary superblock. This higher location is arbitrary but allows
supporting devices with very large zone sizes, plus some space around in
between.
Such large zone size is unrealistic and very unlikely to ever be seen in
real devices. Currently, SMR disks have a zone size of 256MB, and we are
expecting ZNS drives to be in the 1-4GB range, so this limit gives us
room to breathe. For now, we only allow zone sizes up to 8GB. The
maximum zone size that would still fit in the space is 256G.
The fixed location addresses are somewhat arbitrary, with the intent of
maintaining superblock reliability for smaller and larger devices, with
the preference for the latter. For this reason, there are two superblocks
under the first 1T. This should cover use cases for physical devices and
for emulated/device-mapper devices.
The superblock logging zones are reserved for superblock logging and
never used for data or metadata blocks. Note that we only reserve the
two zones per primary/copy actually used for superblock logging. We do
not reserve the ranges of zones possibly containing superblocks with the
largest supported zone size (0-16GB, 512G-528GB, 4096G-4112G).
The zones containing the fixed location offsets used to store
superblocks on a non-zoned volume are also reserved to avoid confusion.
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2021-04-08 16:25:28 +08:00
|
|
|
/*
|
|
|
|
* Maximum supported zone size. Currently, SMR disks have a zone size of
|
|
|
|
* 256MiB, and we are expecting ZNS drives to be in the 1-4GiB range. We do not
|
|
|
|
* expect the zone size to become larger than 8GiB in the near future.
|
|
|
|
*/
|
|
|
|
#define BTRFS_MAX_ZONE_SIZE SZ_8G
|
|
|
|
|
2021-08-19 20:19:12 +08:00
|
|
|
#define SUPER_INFO_SECTORS ((u64)BTRFS_SUPER_INFO_SIZE >> SECTOR_SHIFT)
|
|
|
|
|
|
|
|
static inline bool sb_zone_is_full(const struct blk_zone *zone)
|
|
|
|
{
|
|
|
|
return (zone->cond == BLK_ZONE_COND_FULL) ||
|
|
|
|
(zone->wp + SUPER_INFO_SECTORS > zone->start + zone->capacity);
|
|
|
|
}
|
|
|
|
|
2020-11-10 19:26:07 +08:00
|
|
|
static int copy_zone_info_cb(struct blk_zone *zone, unsigned int idx, void *data)
|
|
|
|
{
|
|
|
|
struct blk_zone *zones = data;
|
|
|
|
|
|
|
|
memcpy(&zones[idx], zone, sizeof(*zone));
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
btrfs: implement log-structured superblock for ZONED mode
Superblock (and its copies) is the only data structure in btrfs which
has a fixed location on a device. Since we cannot overwrite in a
sequential write required zone, we cannot place superblock in the zone.
One easy solution is limiting superblock and copies to be placed only in
conventional zones. However, this method has two downsides: one is
reduced number of superblock copies. The location of the second copy of
superblock is 256GB, which is in a sequential write required zone on
typical devices in the market today. So, the number of superblock and
copies is limited to be two. Second downside is that we cannot support
devices which have no conventional zones at all.
To solve these two problems, we employ superblock log writing. It uses
two adjacent zones as a circular buffer to write updated superblocks.
Once the first zone is filled up, start writing into the second one.
Then, when both zones are filled up and before starting to write to the
first zone again, it reset the first zone.
We can determine the position of the latest superblock by reading write
pointer information from a device. One corner case is when both zones
are full. For this situation, we read out the last superblock of each
zone, and compare them to determine which zone is older.
The following zones are reserved as the circular buffer on ZONED btrfs.
- The primary superblock: zones 0 and 1
- The first copy: zones 16 and 17
- The second copy: zones 1024 or zone at 256GB which is minimum, and
next to it
If these reserved zones are conventional, superblock is written fixed at
the start of the zone without logging.
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2020-11-10 19:26:14 +08:00
|
|
|
static int sb_write_pointer(struct block_device *bdev, struct blk_zone *zones,
|
|
|
|
u64 *wp_ret)
|
|
|
|
{
|
|
|
|
bool empty[BTRFS_NR_SB_LOG_ZONES];
|
|
|
|
bool full[BTRFS_NR_SB_LOG_ZONES];
|
|
|
|
sector_t sector;
|
2021-08-19 20:19:12 +08:00
|
|
|
int i;
|
btrfs: implement log-structured superblock for ZONED mode
Superblock (and its copies) is the only data structure in btrfs which
has a fixed location on a device. Since we cannot overwrite in a
sequential write required zone, we cannot place superblock in the zone.
One easy solution is limiting superblock and copies to be placed only in
conventional zones. However, this method has two downsides: one is
reduced number of superblock copies. The location of the second copy of
superblock is 256GB, which is in a sequential write required zone on
typical devices in the market today. So, the number of superblock and
copies is limited to be two. Second downside is that we cannot support
devices which have no conventional zones at all.
To solve these two problems, we employ superblock log writing. It uses
two adjacent zones as a circular buffer to write updated superblocks.
Once the first zone is filled up, start writing into the second one.
Then, when both zones are filled up and before starting to write to the
first zone again, it reset the first zone.
We can determine the position of the latest superblock by reading write
pointer information from a device. One corner case is when both zones
are full. For this situation, we read out the last superblock of each
zone, and compare them to determine which zone is older.
The following zones are reserved as the circular buffer on ZONED btrfs.
- The primary superblock: zones 0 and 1
- The first copy: zones 16 and 17
- The second copy: zones 1024 or zone at 256GB which is minimum, and
next to it
If these reserved zones are conventional, superblock is written fixed at
the start of the zone without logging.
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2020-11-10 19:26:14 +08:00
|
|
|
|
2021-08-19 20:19:12 +08:00
|
|
|
for (i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++) {
|
|
|
|
ASSERT(zones[i].type != BLK_ZONE_TYPE_CONVENTIONAL);
|
|
|
|
empty[i] = (zones[i].cond == BLK_ZONE_COND_EMPTY);
|
|
|
|
full[i] = sb_zone_is_full(&zones[i]);
|
|
|
|
}
|
btrfs: implement log-structured superblock for ZONED mode
Superblock (and its copies) is the only data structure in btrfs which
has a fixed location on a device. Since we cannot overwrite in a
sequential write required zone, we cannot place superblock in the zone.
One easy solution is limiting superblock and copies to be placed only in
conventional zones. However, this method has two downsides: one is
reduced number of superblock copies. The location of the second copy of
superblock is 256GB, which is in a sequential write required zone on
typical devices in the market today. So, the number of superblock and
copies is limited to be two. Second downside is that we cannot support
devices which have no conventional zones at all.
To solve these two problems, we employ superblock log writing. It uses
two adjacent zones as a circular buffer to write updated superblocks.
Once the first zone is filled up, start writing into the second one.
Then, when both zones are filled up and before starting to write to the
first zone again, it reset the first zone.
We can determine the position of the latest superblock by reading write
pointer information from a device. One corner case is when both zones
are full. For this situation, we read out the last superblock of each
zone, and compare them to determine which zone is older.
The following zones are reserved as the circular buffer on ZONED btrfs.
- The primary superblock: zones 0 and 1
- The first copy: zones 16 and 17
- The second copy: zones 1024 or zone at 256GB which is minimum, and
next to it
If these reserved zones are conventional, superblock is written fixed at
the start of the zone without logging.
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2020-11-10 19:26:14 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Possible states of log buffer zones
|
|
|
|
*
|
|
|
|
* Empty[0] In use[0] Full[0]
|
|
|
|
* Empty[1] * x 0
|
|
|
|
* In use[1] 0 x 0
|
|
|
|
* Full[1] 1 1 C
|
|
|
|
*
|
|
|
|
* Log position:
|
|
|
|
* *: Special case, no superblock is written
|
|
|
|
* 0: Use write pointer of zones[0]
|
|
|
|
* 1: Use write pointer of zones[1]
|
2021-05-21 23:42:23 +08:00
|
|
|
* C: Compare super blocks from zones[0] and zones[1], use the latest
|
btrfs: implement log-structured superblock for ZONED mode
Superblock (and its copies) is the only data structure in btrfs which
has a fixed location on a device. Since we cannot overwrite in a
sequential write required zone, we cannot place superblock in the zone.
One easy solution is limiting superblock and copies to be placed only in
conventional zones. However, this method has two downsides: one is
reduced number of superblock copies. The location of the second copy of
superblock is 256GB, which is in a sequential write required zone on
typical devices in the market today. So, the number of superblock and
copies is limited to be two. Second downside is that we cannot support
devices which have no conventional zones at all.
To solve these two problems, we employ superblock log writing. It uses
two adjacent zones as a circular buffer to write updated superblocks.
Once the first zone is filled up, start writing into the second one.
Then, when both zones are filled up and before starting to write to the
first zone again, it reset the first zone.
We can determine the position of the latest superblock by reading write
pointer information from a device. One corner case is when both zones
are full. For this situation, we read out the last superblock of each
zone, and compare them to determine which zone is older.
The following zones are reserved as the circular buffer on ZONED btrfs.
- The primary superblock: zones 0 and 1
- The first copy: zones 16 and 17
- The second copy: zones 1024 or zone at 256GB which is minimum, and
next to it
If these reserved zones are conventional, superblock is written fixed at
the start of the zone without logging.
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2020-11-10 19:26:14 +08:00
|
|
|
* one determined by generation
|
|
|
|
* x: Invalid state
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (empty[0] && empty[1]) {
|
|
|
|
/* Special case to distinguish no superblock to read */
|
|
|
|
*wp_ret = zones[0].start << SECTOR_SHIFT;
|
|
|
|
return -ENOENT;
|
|
|
|
} else if (full[0] && full[1]) {
|
|
|
|
/* Compare two super blocks */
|
|
|
|
struct address_space *mapping = bdev->bd_inode->i_mapping;
|
|
|
|
struct page *page[BTRFS_NR_SB_LOG_ZONES];
|
|
|
|
struct btrfs_super_block *super[BTRFS_NR_SB_LOG_ZONES];
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++) {
|
|
|
|
u64 bytenr;
|
|
|
|
|
|
|
|
bytenr = ((zones[i].start + zones[i].len)
|
|
|
|
<< SECTOR_SHIFT) - BTRFS_SUPER_INFO_SIZE;
|
|
|
|
|
|
|
|
page[i] = read_cache_page_gfp(mapping,
|
|
|
|
bytenr >> PAGE_SHIFT, GFP_NOFS);
|
|
|
|
if (IS_ERR(page[i])) {
|
|
|
|
if (i == 1)
|
|
|
|
btrfs_release_disk_super(super[0]);
|
|
|
|
return PTR_ERR(page[i]);
|
|
|
|
}
|
|
|
|
super[i] = page_address(page[i]);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (super[0]->generation > super[1]->generation)
|
|
|
|
sector = zones[1].start;
|
|
|
|
else
|
|
|
|
sector = zones[0].start;
|
|
|
|
|
|
|
|
for (i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++)
|
|
|
|
btrfs_release_disk_super(super[i]);
|
|
|
|
} else if (!full[0] && (empty[1] || full[1])) {
|
|
|
|
sector = zones[0].wp;
|
|
|
|
} else if (full[0]) {
|
|
|
|
sector = zones[1].wp;
|
|
|
|
} else {
|
|
|
|
return -EUCLEAN;
|
|
|
|
}
|
|
|
|
*wp_ret = sector << SECTOR_SHIFT;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
btrfs: zoned: move superblock logging zone location
Moves the location of the superblock logging zones. The new locations of
the logging zones are now determined based on fixed block addresses
instead of on fixed zone numbers.
The old placement method based on fixed zone numbers causes problems when
one needs to inspect a file system image without access to the drive zone
information. In such case, the super block locations cannot be reliably
determined as the zone size is unknown. By locating the superblock logging
zones using fixed addresses, we can scan a dumped file system image without
the zone information since a super block copy will always be present at or
after the fixed known locations.
Introduce the following three pairs of zones containing fixed offset
locations, regardless of the device zone size.
- primary superblock: offset 0B (and the following zone)
- first copy: offset 512G (and the following zone)
- Second copy: offset 4T (4096G, and the following zone)
If a logging zone is outside of the disk capacity, we do not record the
superblock copy.
The first copy position is much larger than for a non-zoned filesystem,
which is at 64M. This is to avoid overlapping with the log zones for
the primary superblock. This higher location is arbitrary but allows
supporting devices with very large zone sizes, plus some space around in
between.
Such large zone size is unrealistic and very unlikely to ever be seen in
real devices. Currently, SMR disks have a zone size of 256MB, and we are
expecting ZNS drives to be in the 1-4GB range, so this limit gives us
room to breathe. For now, we only allow zone sizes up to 8GB. The
maximum zone size that would still fit in the space is 256G.
The fixed location addresses are somewhat arbitrary, with the intent of
maintaining superblock reliability for smaller and larger devices, with
the preference for the latter. For this reason, there are two superblocks
under the first 1T. This should cover use cases for physical devices and
for emulated/device-mapper devices.
The superblock logging zones are reserved for superblock logging and
never used for data or metadata blocks. Note that we only reserve the
two zones per primary/copy actually used for superblock logging. We do
not reserve the ranges of zones possibly containing superblocks with the
largest supported zone size (0-16GB, 512G-528GB, 4096G-4112G).
The zones containing the fixed location offsets used to store
superblocks on a non-zoned volume are also reserved to avoid confusion.
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2021-04-08 16:25:28 +08:00
|
|
|
* Get the first zone number of the superblock mirror
|
btrfs: implement log-structured superblock for ZONED mode
Superblock (and its copies) is the only data structure in btrfs which
has a fixed location on a device. Since we cannot overwrite in a
sequential write required zone, we cannot place superblock in the zone.
One easy solution is limiting superblock and copies to be placed only in
conventional zones. However, this method has two downsides: one is
reduced number of superblock copies. The location of the second copy of
superblock is 256GB, which is in a sequential write required zone on
typical devices in the market today. So, the number of superblock and
copies is limited to be two. Second downside is that we cannot support
devices which have no conventional zones at all.
To solve these two problems, we employ superblock log writing. It uses
two adjacent zones as a circular buffer to write updated superblocks.
Once the first zone is filled up, start writing into the second one.
Then, when both zones are filled up and before starting to write to the
first zone again, it reset the first zone.
We can determine the position of the latest superblock by reading write
pointer information from a device. One corner case is when both zones
are full. For this situation, we read out the last superblock of each
zone, and compare them to determine which zone is older.
The following zones are reserved as the circular buffer on ZONED btrfs.
- The primary superblock: zones 0 and 1
- The first copy: zones 16 and 17
- The second copy: zones 1024 or zone at 256GB which is minimum, and
next to it
If these reserved zones are conventional, superblock is written fixed at
the start of the zone without logging.
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2020-11-10 19:26:14 +08:00
|
|
|
*/
|
|
|
|
static inline u32 sb_zone_number(int shift, int mirror)
|
|
|
|
{
|
btrfs: zoned: move superblock logging zone location
Moves the location of the superblock logging zones. The new locations of
the logging zones are now determined based on fixed block addresses
instead of on fixed zone numbers.
The old placement method based on fixed zone numbers causes problems when
one needs to inspect a file system image without access to the drive zone
information. In such case, the super block locations cannot be reliably
determined as the zone size is unknown. By locating the superblock logging
zones using fixed addresses, we can scan a dumped file system image without
the zone information since a super block copy will always be present at or
after the fixed known locations.
Introduce the following three pairs of zones containing fixed offset
locations, regardless of the device zone size.
- primary superblock: offset 0B (and the following zone)
- first copy: offset 512G (and the following zone)
- Second copy: offset 4T (4096G, and the following zone)
If a logging zone is outside of the disk capacity, we do not record the
superblock copy.
The first copy position is much larger than for a non-zoned filesystem,
which is at 64M. This is to avoid overlapping with the log zones for
the primary superblock. This higher location is arbitrary but allows
supporting devices with very large zone sizes, plus some space around in
between.
Such large zone size is unrealistic and very unlikely to ever be seen in
real devices. Currently, SMR disks have a zone size of 256MB, and we are
expecting ZNS drives to be in the 1-4GB range, so this limit gives us
room to breathe. For now, we only allow zone sizes up to 8GB. The
maximum zone size that would still fit in the space is 256G.
The fixed location addresses are somewhat arbitrary, with the intent of
maintaining superblock reliability for smaller and larger devices, with
the preference for the latter. For this reason, there are two superblocks
under the first 1T. This should cover use cases for physical devices and
for emulated/device-mapper devices.
The superblock logging zones are reserved for superblock logging and
never used for data or metadata blocks. Note that we only reserve the
two zones per primary/copy actually used for superblock logging. We do
not reserve the ranges of zones possibly containing superblocks with the
largest supported zone size (0-16GB, 512G-528GB, 4096G-4112G).
The zones containing the fixed location offsets used to store
superblocks on a non-zoned volume are also reserved to avoid confusion.
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2021-04-08 16:25:28 +08:00
|
|
|
u64 zone;
|
btrfs: implement log-structured superblock for ZONED mode
Superblock (and its copies) is the only data structure in btrfs which
has a fixed location on a device. Since we cannot overwrite in a
sequential write required zone, we cannot place superblock in the zone.
One easy solution is limiting superblock and copies to be placed only in
conventional zones. However, this method has two downsides: one is
reduced number of superblock copies. The location of the second copy of
superblock is 256GB, which is in a sequential write required zone on
typical devices in the market today. So, the number of superblock and
copies is limited to be two. Second downside is that we cannot support
devices which have no conventional zones at all.
To solve these two problems, we employ superblock log writing. It uses
two adjacent zones as a circular buffer to write updated superblocks.
Once the first zone is filled up, start writing into the second one.
Then, when both zones are filled up and before starting to write to the
first zone again, it reset the first zone.
We can determine the position of the latest superblock by reading write
pointer information from a device. One corner case is when both zones
are full. For this situation, we read out the last superblock of each
zone, and compare them to determine which zone is older.
The following zones are reserved as the circular buffer on ZONED btrfs.
- The primary superblock: zones 0 and 1
- The first copy: zones 16 and 17
- The second copy: zones 1024 or zone at 256GB which is minimum, and
next to it
If these reserved zones are conventional, superblock is written fixed at
the start of the zone without logging.
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2020-11-10 19:26:14 +08:00
|
|
|
|
btrfs: zoned: move superblock logging zone location
Moves the location of the superblock logging zones. The new locations of
the logging zones are now determined based on fixed block addresses
instead of on fixed zone numbers.
The old placement method based on fixed zone numbers causes problems when
one needs to inspect a file system image without access to the drive zone
information. In such case, the super block locations cannot be reliably
determined as the zone size is unknown. By locating the superblock logging
zones using fixed addresses, we can scan a dumped file system image without
the zone information since a super block copy will always be present at or
after the fixed known locations.
Introduce the following three pairs of zones containing fixed offset
locations, regardless of the device zone size.
- primary superblock: offset 0B (and the following zone)
- first copy: offset 512G (and the following zone)
- Second copy: offset 4T (4096G, and the following zone)
If a logging zone is outside of the disk capacity, we do not record the
superblock copy.
The first copy position is much larger than for a non-zoned filesystem,
which is at 64M. This is to avoid overlapping with the log zones for
the primary superblock. This higher location is arbitrary but allows
supporting devices with very large zone sizes, plus some space around in
between.
Such large zone size is unrealistic and very unlikely to ever be seen in
real devices. Currently, SMR disks have a zone size of 256MB, and we are
expecting ZNS drives to be in the 1-4GB range, so this limit gives us
room to breathe. For now, we only allow zone sizes up to 8GB. The
maximum zone size that would still fit in the space is 256G.
The fixed location addresses are somewhat arbitrary, with the intent of
maintaining superblock reliability for smaller and larger devices, with
the preference for the latter. For this reason, there are two superblocks
under the first 1T. This should cover use cases for physical devices and
for emulated/device-mapper devices.
The superblock logging zones are reserved for superblock logging and
never used for data or metadata blocks. Note that we only reserve the
two zones per primary/copy actually used for superblock logging. We do
not reserve the ranges of zones possibly containing superblocks with the
largest supported zone size (0-16GB, 512G-528GB, 4096G-4112G).
The zones containing the fixed location offsets used to store
superblocks on a non-zoned volume are also reserved to avoid confusion.
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2021-04-08 16:25:28 +08:00
|
|
|
ASSERT(mirror < BTRFS_SUPER_MIRROR_MAX);
|
btrfs: implement log-structured superblock for ZONED mode
Superblock (and its copies) is the only data structure in btrfs which
has a fixed location on a device. Since we cannot overwrite in a
sequential write required zone, we cannot place superblock in the zone.
One easy solution is limiting superblock and copies to be placed only in
conventional zones. However, this method has two downsides: one is
reduced number of superblock copies. The location of the second copy of
superblock is 256GB, which is in a sequential write required zone on
typical devices in the market today. So, the number of superblock and
copies is limited to be two. Second downside is that we cannot support
devices which have no conventional zones at all.
To solve these two problems, we employ superblock log writing. It uses
two adjacent zones as a circular buffer to write updated superblocks.
Once the first zone is filled up, start writing into the second one.
Then, when both zones are filled up and before starting to write to the
first zone again, it reset the first zone.
We can determine the position of the latest superblock by reading write
pointer information from a device. One corner case is when both zones
are full. For this situation, we read out the last superblock of each
zone, and compare them to determine which zone is older.
The following zones are reserved as the circular buffer on ZONED btrfs.
- The primary superblock: zones 0 and 1
- The first copy: zones 16 and 17
- The second copy: zones 1024 or zone at 256GB which is minimum, and
next to it
If these reserved zones are conventional, superblock is written fixed at
the start of the zone without logging.
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2020-11-10 19:26:14 +08:00
|
|
|
switch (mirror) {
|
btrfs: zoned: move superblock logging zone location
Moves the location of the superblock logging zones. The new locations of
the logging zones are now determined based on fixed block addresses
instead of on fixed zone numbers.
The old placement method based on fixed zone numbers causes problems when
one needs to inspect a file system image without access to the drive zone
information. In such case, the super block locations cannot be reliably
determined as the zone size is unknown. By locating the superblock logging
zones using fixed addresses, we can scan a dumped file system image without
the zone information since a super block copy will always be present at or
after the fixed known locations.
Introduce the following three pairs of zones containing fixed offset
locations, regardless of the device zone size.
- primary superblock: offset 0B (and the following zone)
- first copy: offset 512G (and the following zone)
- Second copy: offset 4T (4096G, and the following zone)
If a logging zone is outside of the disk capacity, we do not record the
superblock copy.
The first copy position is much larger than for a non-zoned filesystem,
which is at 64M. This is to avoid overlapping with the log zones for
the primary superblock. This higher location is arbitrary but allows
supporting devices with very large zone sizes, plus some space around in
between.
Such large zone size is unrealistic and very unlikely to ever be seen in
real devices. Currently, SMR disks have a zone size of 256MB, and we are
expecting ZNS drives to be in the 1-4GB range, so this limit gives us
room to breathe. For now, we only allow zone sizes up to 8GB. The
maximum zone size that would still fit in the space is 256G.
The fixed location addresses are somewhat arbitrary, with the intent of
maintaining superblock reliability for smaller and larger devices, with
the preference for the latter. For this reason, there are two superblocks
under the first 1T. This should cover use cases for physical devices and
for emulated/device-mapper devices.
The superblock logging zones are reserved for superblock logging and
never used for data or metadata blocks. Note that we only reserve the
two zones per primary/copy actually used for superblock logging. We do
not reserve the ranges of zones possibly containing superblocks with the
largest supported zone size (0-16GB, 512G-528GB, 4096G-4112G).
The zones containing the fixed location offsets used to store
superblocks on a non-zoned volume are also reserved to avoid confusion.
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2021-04-08 16:25:28 +08:00
|
|
|
case 0: zone = 0; break;
|
|
|
|
case 1: zone = 1ULL << (BTRFS_SB_LOG_FIRST_SHIFT - shift); break;
|
|
|
|
case 2: zone = 1ULL << (BTRFS_SB_LOG_SECOND_SHIFT - shift); break;
|
btrfs: implement log-structured superblock for ZONED mode
Superblock (and its copies) is the only data structure in btrfs which
has a fixed location on a device. Since we cannot overwrite in a
sequential write required zone, we cannot place superblock in the zone.
One easy solution is limiting superblock and copies to be placed only in
conventional zones. However, this method has two downsides: one is
reduced number of superblock copies. The location of the second copy of
superblock is 256GB, which is in a sequential write required zone on
typical devices in the market today. So, the number of superblock and
copies is limited to be two. Second downside is that we cannot support
devices which have no conventional zones at all.
To solve these two problems, we employ superblock log writing. It uses
two adjacent zones as a circular buffer to write updated superblocks.
Once the first zone is filled up, start writing into the second one.
Then, when both zones are filled up and before starting to write to the
first zone again, it reset the first zone.
We can determine the position of the latest superblock by reading write
pointer information from a device. One corner case is when both zones
are full. For this situation, we read out the last superblock of each
zone, and compare them to determine which zone is older.
The following zones are reserved as the circular buffer on ZONED btrfs.
- The primary superblock: zones 0 and 1
- The first copy: zones 16 and 17
- The second copy: zones 1024 or zone at 256GB which is minimum, and
next to it
If these reserved zones are conventional, superblock is written fixed at
the start of the zone without logging.
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2020-11-10 19:26:14 +08:00
|
|
|
}
|
|
|
|
|
btrfs: zoned: move superblock logging zone location
Moves the location of the superblock logging zones. The new locations of
the logging zones are now determined based on fixed block addresses
instead of on fixed zone numbers.
The old placement method based on fixed zone numbers causes problems when
one needs to inspect a file system image without access to the drive zone
information. In such case, the super block locations cannot be reliably
determined as the zone size is unknown. By locating the superblock logging
zones using fixed addresses, we can scan a dumped file system image without
the zone information since a super block copy will always be present at or
after the fixed known locations.
Introduce the following three pairs of zones containing fixed offset
locations, regardless of the device zone size.
- primary superblock: offset 0B (and the following zone)
- first copy: offset 512G (and the following zone)
- Second copy: offset 4T (4096G, and the following zone)
If a logging zone is outside of the disk capacity, we do not record the
superblock copy.
The first copy position is much larger than for a non-zoned filesystem,
which is at 64M. This is to avoid overlapping with the log zones for
the primary superblock. This higher location is arbitrary but allows
supporting devices with very large zone sizes, plus some space around in
between.
Such large zone size is unrealistic and very unlikely to ever be seen in
real devices. Currently, SMR disks have a zone size of 256MB, and we are
expecting ZNS drives to be in the 1-4GB range, so this limit gives us
room to breathe. For now, we only allow zone sizes up to 8GB. The
maximum zone size that would still fit in the space is 256G.
The fixed location addresses are somewhat arbitrary, with the intent of
maintaining superblock reliability for smaller and larger devices, with
the preference for the latter. For this reason, there are two superblocks
under the first 1T. This should cover use cases for physical devices and
for emulated/device-mapper devices.
The superblock logging zones are reserved for superblock logging and
never used for data or metadata blocks. Note that we only reserve the
two zones per primary/copy actually used for superblock logging. We do
not reserve the ranges of zones possibly containing superblocks with the
largest supported zone size (0-16GB, 512G-528GB, 4096G-4112G).
The zones containing the fixed location offsets used to store
superblocks on a non-zoned volume are also reserved to avoid confusion.
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2021-04-08 16:25:28 +08:00
|
|
|
ASSERT(zone <= U32_MAX);
|
|
|
|
|
|
|
|
return (u32)zone;
|
btrfs: implement log-structured superblock for ZONED mode
Superblock (and its copies) is the only data structure in btrfs which
has a fixed location on a device. Since we cannot overwrite in a
sequential write required zone, we cannot place superblock in the zone.
One easy solution is limiting superblock and copies to be placed only in
conventional zones. However, this method has two downsides: one is
reduced number of superblock copies. The location of the second copy of
superblock is 256GB, which is in a sequential write required zone on
typical devices in the market today. So, the number of superblock and
copies is limited to be two. Second downside is that we cannot support
devices which have no conventional zones at all.
To solve these two problems, we employ superblock log writing. It uses
two adjacent zones as a circular buffer to write updated superblocks.
Once the first zone is filled up, start writing into the second one.
Then, when both zones are filled up and before starting to write to the
first zone again, it reset the first zone.
We can determine the position of the latest superblock by reading write
pointer information from a device. One corner case is when both zones
are full. For this situation, we read out the last superblock of each
zone, and compare them to determine which zone is older.
The following zones are reserved as the circular buffer on ZONED btrfs.
- The primary superblock: zones 0 and 1
- The first copy: zones 16 and 17
- The second copy: zones 1024 or zone at 256GB which is minimum, and
next to it
If these reserved zones are conventional, superblock is written fixed at
the start of the zone without logging.
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2020-11-10 19:26:14 +08:00
|
|
|
}
|
|
|
|
|
2021-05-27 14:27:32 +08:00
|
|
|
static inline sector_t zone_start_sector(u32 zone_number,
|
|
|
|
struct block_device *bdev)
|
|
|
|
{
|
|
|
|
return (sector_t)zone_number << ilog2(bdev_zone_sectors(bdev));
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline u64 zone_start_physical(u32 zone_number,
|
|
|
|
struct btrfs_zoned_device_info *zone_info)
|
|
|
|
{
|
|
|
|
return (u64)zone_number << zone_info->zone_size_shift;
|
|
|
|
}
|
|
|
|
|
2021-02-04 18:21:47 +08:00
|
|
|
/*
|
|
|
|
* Emulate blkdev_report_zones() for a non-zoned device. It slices up the block
|
|
|
|
* device into static sized chunks and fake a conventional zone on each of
|
|
|
|
* them.
|
|
|
|
*/
|
|
|
|
static int emulate_report_zones(struct btrfs_device *device, u64 pos,
|
|
|
|
struct blk_zone *zones, unsigned int nr_zones)
|
|
|
|
{
|
|
|
|
const sector_t zone_sectors = device->fs_info->zone_size >> SECTOR_SHIFT;
|
|
|
|
sector_t bdev_size = bdev_nr_sectors(device->bdev);
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
pos >>= SECTOR_SHIFT;
|
|
|
|
for (i = 0; i < nr_zones; i++) {
|
|
|
|
zones[i].start = i * zone_sectors + pos;
|
|
|
|
zones[i].len = zone_sectors;
|
|
|
|
zones[i].capacity = zone_sectors;
|
|
|
|
zones[i].wp = zones[i].start + zone_sectors;
|
|
|
|
zones[i].type = BLK_ZONE_TYPE_CONVENTIONAL;
|
|
|
|
zones[i].cond = BLK_ZONE_COND_NOT_WP;
|
|
|
|
|
|
|
|
if (zones[i].wp >= bdev_size) {
|
|
|
|
i++;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return i;
|
|
|
|
}
|
|
|
|
|
2020-11-10 19:26:07 +08:00
|
|
|
static int btrfs_get_dev_zones(struct btrfs_device *device, u64 pos,
|
|
|
|
struct blk_zone *zones, unsigned int *nr_zones)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!*nr_zones)
|
|
|
|
return 0;
|
|
|
|
|
2021-02-04 18:21:47 +08:00
|
|
|
if (!bdev_is_zoned(device->bdev)) {
|
|
|
|
ret = emulate_report_zones(device, pos, zones, *nr_zones);
|
|
|
|
*nr_zones = ret;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-11-10 19:26:07 +08:00
|
|
|
ret = blkdev_report_zones(device->bdev, pos >> SECTOR_SHIFT, *nr_zones,
|
|
|
|
copy_zone_info_cb, zones);
|
|
|
|
if (ret < 0) {
|
|
|
|
btrfs_err_in_rcu(device->fs_info,
|
|
|
|
"zoned: failed to read zone %llu on %s (devid %llu)",
|
|
|
|
pos, rcu_str_deref(device->name),
|
|
|
|
device->devid);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
*nr_zones = ret;
|
|
|
|
if (!ret)
|
|
|
|
return -EIO;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-02-04 18:21:47 +08:00
|
|
|
/* The emulated zone size is determined from the size of device extent */
|
|
|
|
static int calculate_emulated_zone_size(struct btrfs_fs_info *fs_info)
|
|
|
|
{
|
|
|
|
struct btrfs_path *path;
|
|
|
|
struct btrfs_root *root = fs_info->dev_root;
|
|
|
|
struct btrfs_key key;
|
|
|
|
struct extent_buffer *leaf;
|
|
|
|
struct btrfs_dev_extent *dext;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
key.objectid = 1;
|
|
|
|
key.type = BTRFS_DEV_EXTENT_KEY;
|
|
|
|
key.offset = 0;
|
|
|
|
|
|
|
|
path = btrfs_alloc_path();
|
|
|
|
if (!path)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
|
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
|
2021-07-13 21:58:03 +08:00
|
|
|
ret = btrfs_next_leaf(root, path);
|
2021-02-04 18:21:47 +08:00
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
|
|
|
/* No dev extents at all? Not good */
|
|
|
|
if (ret > 0) {
|
|
|
|
ret = -EUCLEAN;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
leaf = path->nodes[0];
|
|
|
|
dext = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_extent);
|
|
|
|
fs_info->zone_size = btrfs_dev_extent_length(leaf, dext);
|
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
out:
|
|
|
|
btrfs_free_path(path);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2021-02-04 18:21:42 +08:00
|
|
|
int btrfs_get_dev_zone_info_all_devices(struct btrfs_fs_info *fs_info)
|
|
|
|
{
|
|
|
|
struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
|
|
|
|
struct btrfs_device *device;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
/* fs_info->zone_size might not set yet. Use the incomapt flag here. */
|
|
|
|
if (!btrfs_fs_incompat(fs_info, ZONED))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
mutex_lock(&fs_devices->device_list_mutex);
|
|
|
|
list_for_each_entry(device, &fs_devices->devices, dev_list) {
|
|
|
|
/* We can skip reading of zone info for missing devices */
|
|
|
|
if (!device->bdev)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
ret = btrfs_get_dev_zone_info(device);
|
|
|
|
if (ret)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
mutex_unlock(&fs_devices->device_list_mutex);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2020-11-10 19:26:07 +08:00
|
|
|
int btrfs_get_dev_zone_info(struct btrfs_device *device)
|
|
|
|
{
|
2021-02-04 18:21:47 +08:00
|
|
|
struct btrfs_fs_info *fs_info = device->fs_info;
|
2020-11-10 19:26:07 +08:00
|
|
|
struct btrfs_zoned_device_info *zone_info = NULL;
|
|
|
|
struct block_device *bdev = device->bdev;
|
2021-08-19 20:19:15 +08:00
|
|
|
struct request_queue *queue = bdev_get_queue(bdev);
|
|
|
|
unsigned int max_active_zones;
|
|
|
|
unsigned int nactive;
|
2020-11-10 19:26:07 +08:00
|
|
|
sector_t nr_sectors;
|
|
|
|
sector_t sector = 0;
|
|
|
|
struct blk_zone *zones = NULL;
|
|
|
|
unsigned int i, nreported = 0, nr_zones;
|
2021-03-03 16:55:46 +08:00
|
|
|
sector_t zone_sectors;
|
2021-02-04 18:21:47 +08:00
|
|
|
char *model, *emulated;
|
2020-11-10 19:26:07 +08:00
|
|
|
int ret;
|
|
|
|
|
2021-02-04 18:21:47 +08:00
|
|
|
/*
|
|
|
|
* Cannot use btrfs_is_zoned here, since fs_info::zone_size might not
|
|
|
|
* yet be set.
|
|
|
|
*/
|
|
|
|
if (!btrfs_fs_incompat(fs_info, ZONED))
|
2020-11-10 19:26:07 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (device->zone_info)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
zone_info = kzalloc(sizeof(*zone_info), GFP_KERNEL);
|
|
|
|
if (!zone_info)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2021-02-04 18:21:47 +08:00
|
|
|
if (!bdev_is_zoned(bdev)) {
|
|
|
|
if (!fs_info->zone_size) {
|
|
|
|
ret = calculate_emulated_zone_size(fs_info);
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
ASSERT(fs_info->zone_size);
|
|
|
|
zone_sectors = fs_info->zone_size >> SECTOR_SHIFT;
|
|
|
|
} else {
|
|
|
|
zone_sectors = bdev_zone_sectors(bdev);
|
|
|
|
}
|
|
|
|
|
2020-11-10 19:26:07 +08:00
|
|
|
/* Check if it's power of 2 (see is_power_of_2) */
|
|
|
|
ASSERT(zone_sectors != 0 && (zone_sectors & (zone_sectors - 1)) == 0);
|
|
|
|
zone_info->zone_size = zone_sectors << SECTOR_SHIFT;
|
btrfs: zoned: move superblock logging zone location
Moves the location of the superblock logging zones. The new locations of
the logging zones are now determined based on fixed block addresses
instead of on fixed zone numbers.
The old placement method based on fixed zone numbers causes problems when
one needs to inspect a file system image without access to the drive zone
information. In such case, the super block locations cannot be reliably
determined as the zone size is unknown. By locating the superblock logging
zones using fixed addresses, we can scan a dumped file system image without
the zone information since a super block copy will always be present at or
after the fixed known locations.
Introduce the following three pairs of zones containing fixed offset
locations, regardless of the device zone size.
- primary superblock: offset 0B (and the following zone)
- first copy: offset 512G (and the following zone)
- Second copy: offset 4T (4096G, and the following zone)
If a logging zone is outside of the disk capacity, we do not record the
superblock copy.
The first copy position is much larger than for a non-zoned filesystem,
which is at 64M. This is to avoid overlapping with the log zones for
the primary superblock. This higher location is arbitrary but allows
supporting devices with very large zone sizes, plus some space around in
between.
Such large zone size is unrealistic and very unlikely to ever be seen in
real devices. Currently, SMR disks have a zone size of 256MB, and we are
expecting ZNS drives to be in the 1-4GB range, so this limit gives us
room to breathe. For now, we only allow zone sizes up to 8GB. The
maximum zone size that would still fit in the space is 256G.
The fixed location addresses are somewhat arbitrary, with the intent of
maintaining superblock reliability for smaller and larger devices, with
the preference for the latter. For this reason, there are two superblocks
under the first 1T. This should cover use cases for physical devices and
for emulated/device-mapper devices.
The superblock logging zones are reserved for superblock logging and
never used for data or metadata blocks. Note that we only reserve the
two zones per primary/copy actually used for superblock logging. We do
not reserve the ranges of zones possibly containing superblocks with the
largest supported zone size (0-16GB, 512G-528GB, 4096G-4112G).
The zones containing the fixed location offsets used to store
superblocks on a non-zoned volume are also reserved to avoid confusion.
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2021-04-08 16:25:28 +08:00
|
|
|
|
|
|
|
/* We reject devices with a zone size larger than 8GB */
|
|
|
|
if (zone_info->zone_size > BTRFS_MAX_ZONE_SIZE) {
|
|
|
|
btrfs_err_in_rcu(fs_info,
|
|
|
|
"zoned: %s: zone size %llu larger than supported maximum %llu",
|
|
|
|
rcu_str_deref(device->name),
|
|
|
|
zone_info->zone_size, BTRFS_MAX_ZONE_SIZE);
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
nr_sectors = bdev_nr_sectors(bdev);
|
2020-11-10 19:26:07 +08:00
|
|
|
zone_info->zone_size_shift = ilog2(zone_info->zone_size);
|
|
|
|
zone_info->nr_zones = nr_sectors >> ilog2(zone_sectors);
|
|
|
|
if (!IS_ALIGNED(nr_sectors, zone_sectors))
|
|
|
|
zone_info->nr_zones++;
|
|
|
|
|
2021-08-19 20:19:15 +08:00
|
|
|
max_active_zones = queue_max_active_zones(queue);
|
|
|
|
if (max_active_zones && max_active_zones < BTRFS_MIN_ACTIVE_ZONES) {
|
|
|
|
btrfs_err_in_rcu(fs_info,
|
|
|
|
"zoned: %s: max active zones %u is too small, need at least %u active zones",
|
|
|
|
rcu_str_deref(device->name), max_active_zones,
|
|
|
|
BTRFS_MIN_ACTIVE_ZONES);
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
zone_info->max_active_zones = max_active_zones;
|
|
|
|
|
2020-11-10 19:26:07 +08:00
|
|
|
zone_info->seq_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
|
|
|
|
if (!zone_info->seq_zones) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
zone_info->empty_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
|
|
|
|
if (!zone_info->empty_zones) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2021-08-19 20:19:15 +08:00
|
|
|
zone_info->active_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
|
|
|
|
if (!zone_info->active_zones) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2020-11-10 19:26:07 +08:00
|
|
|
zones = kcalloc(BTRFS_REPORT_NR_ZONES, sizeof(struct blk_zone), GFP_KERNEL);
|
|
|
|
if (!zones) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Get zones type */
|
2021-08-19 20:19:15 +08:00
|
|
|
nactive = 0;
|
2020-11-10 19:26:07 +08:00
|
|
|
while (sector < nr_sectors) {
|
|
|
|
nr_zones = BTRFS_REPORT_NR_ZONES;
|
|
|
|
ret = btrfs_get_dev_zones(device, sector << SECTOR_SHIFT, zones,
|
|
|
|
&nr_zones);
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
for (i = 0; i < nr_zones; i++) {
|
|
|
|
if (zones[i].type == BLK_ZONE_TYPE_SEQWRITE_REQ)
|
|
|
|
__set_bit(nreported, zone_info->seq_zones);
|
2021-08-19 20:19:15 +08:00
|
|
|
switch (zones[i].cond) {
|
|
|
|
case BLK_ZONE_COND_EMPTY:
|
2020-11-10 19:26:07 +08:00
|
|
|
__set_bit(nreported, zone_info->empty_zones);
|
2021-08-19 20:19:15 +08:00
|
|
|
break;
|
|
|
|
case BLK_ZONE_COND_IMP_OPEN:
|
|
|
|
case BLK_ZONE_COND_EXP_OPEN:
|
|
|
|
case BLK_ZONE_COND_CLOSED:
|
|
|
|
__set_bit(nreported, zone_info->active_zones);
|
|
|
|
nactive++;
|
|
|
|
break;
|
|
|
|
}
|
2020-11-10 19:26:07 +08:00
|
|
|
nreported++;
|
|
|
|
}
|
|
|
|
sector = zones[nr_zones - 1].start + zones[nr_zones - 1].len;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (nreported != zone_info->nr_zones) {
|
|
|
|
btrfs_err_in_rcu(device->fs_info,
|
|
|
|
"inconsistent number of zones on %s (%u/%u)",
|
|
|
|
rcu_str_deref(device->name), nreported,
|
|
|
|
zone_info->nr_zones);
|
|
|
|
ret = -EIO;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2021-08-19 20:19:15 +08:00
|
|
|
if (max_active_zones) {
|
|
|
|
if (nactive > max_active_zones) {
|
|
|
|
btrfs_err_in_rcu(device->fs_info,
|
|
|
|
"zoned: %u active zones on %s exceeds max_active_zones %u",
|
|
|
|
nactive, rcu_str_deref(device->name),
|
|
|
|
max_active_zones);
|
|
|
|
ret = -EIO;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
atomic_set(&zone_info->active_zones_left,
|
|
|
|
max_active_zones - nactive);
|
|
|
|
}
|
|
|
|
|
btrfs: implement log-structured superblock for ZONED mode
Superblock (and its copies) is the only data structure in btrfs which
has a fixed location on a device. Since we cannot overwrite in a
sequential write required zone, we cannot place superblock in the zone.
One easy solution is limiting superblock and copies to be placed only in
conventional zones. However, this method has two downsides: one is
reduced number of superblock copies. The location of the second copy of
superblock is 256GB, which is in a sequential write required zone on
typical devices in the market today. So, the number of superblock and
copies is limited to be two. Second downside is that we cannot support
devices which have no conventional zones at all.
To solve these two problems, we employ superblock log writing. It uses
two adjacent zones as a circular buffer to write updated superblocks.
Once the first zone is filled up, start writing into the second one.
Then, when both zones are filled up and before starting to write to the
first zone again, it reset the first zone.
We can determine the position of the latest superblock by reading write
pointer information from a device. One corner case is when both zones
are full. For this situation, we read out the last superblock of each
zone, and compare them to determine which zone is older.
The following zones are reserved as the circular buffer on ZONED btrfs.
- The primary superblock: zones 0 and 1
- The first copy: zones 16 and 17
- The second copy: zones 1024 or zone at 256GB which is minimum, and
next to it
If these reserved zones are conventional, superblock is written fixed at
the start of the zone without logging.
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2020-11-10 19:26:14 +08:00
|
|
|
/* Validate superblock log */
|
|
|
|
nr_zones = BTRFS_NR_SB_LOG_ZONES;
|
|
|
|
for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
|
|
|
|
u32 sb_zone;
|
|
|
|
u64 sb_wp;
|
|
|
|
int sb_pos = BTRFS_NR_SB_LOG_ZONES * i;
|
|
|
|
|
|
|
|
sb_zone = sb_zone_number(zone_info->zone_size_shift, i);
|
|
|
|
if (sb_zone + 1 >= zone_info->nr_zones)
|
|
|
|
continue;
|
|
|
|
|
2021-05-27 14:27:32 +08:00
|
|
|
ret = btrfs_get_dev_zones(device,
|
|
|
|
zone_start_physical(sb_zone, zone_info),
|
btrfs: implement log-structured superblock for ZONED mode
Superblock (and its copies) is the only data structure in btrfs which
has a fixed location on a device. Since we cannot overwrite in a
sequential write required zone, we cannot place superblock in the zone.
One easy solution is limiting superblock and copies to be placed only in
conventional zones. However, this method has two downsides: one is
reduced number of superblock copies. The location of the second copy of
superblock is 256GB, which is in a sequential write required zone on
typical devices in the market today. So, the number of superblock and
copies is limited to be two. Second downside is that we cannot support
devices which have no conventional zones at all.
To solve these two problems, we employ superblock log writing. It uses
two adjacent zones as a circular buffer to write updated superblocks.
Once the first zone is filled up, start writing into the second one.
Then, when both zones are filled up and before starting to write to the
first zone again, it reset the first zone.
We can determine the position of the latest superblock by reading write
pointer information from a device. One corner case is when both zones
are full. For this situation, we read out the last superblock of each
zone, and compare them to determine which zone is older.
The following zones are reserved as the circular buffer on ZONED btrfs.
- The primary superblock: zones 0 and 1
- The first copy: zones 16 and 17
- The second copy: zones 1024 or zone at 256GB which is minimum, and
next to it
If these reserved zones are conventional, superblock is written fixed at
the start of the zone without logging.
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2020-11-10 19:26:14 +08:00
|
|
|
&zone_info->sb_zones[sb_pos],
|
|
|
|
&nr_zones);
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (nr_zones != BTRFS_NR_SB_LOG_ZONES) {
|
|
|
|
btrfs_err_in_rcu(device->fs_info,
|
|
|
|
"zoned: failed to read super block log zone info at devid %llu zone %u",
|
|
|
|
device->devid, sb_zone);
|
|
|
|
ret = -EUCLEAN;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2021-05-21 23:42:23 +08:00
|
|
|
* If zones[0] is conventional, always use the beginning of the
|
btrfs: implement log-structured superblock for ZONED mode
Superblock (and its copies) is the only data structure in btrfs which
has a fixed location on a device. Since we cannot overwrite in a
sequential write required zone, we cannot place superblock in the zone.
One easy solution is limiting superblock and copies to be placed only in
conventional zones. However, this method has two downsides: one is
reduced number of superblock copies. The location of the second copy of
superblock is 256GB, which is in a sequential write required zone on
typical devices in the market today. So, the number of superblock and
copies is limited to be two. Second downside is that we cannot support
devices which have no conventional zones at all.
To solve these two problems, we employ superblock log writing. It uses
two adjacent zones as a circular buffer to write updated superblocks.
Once the first zone is filled up, start writing into the second one.
Then, when both zones are filled up and before starting to write to the
first zone again, it reset the first zone.
We can determine the position of the latest superblock by reading write
pointer information from a device. One corner case is when both zones
are full. For this situation, we read out the last superblock of each
zone, and compare them to determine which zone is older.
The following zones are reserved as the circular buffer on ZONED btrfs.
- The primary superblock: zones 0 and 1
- The first copy: zones 16 and 17
- The second copy: zones 1024 or zone at 256GB which is minimum, and
next to it
If these reserved zones are conventional, superblock is written fixed at
the start of the zone without logging.
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2020-11-10 19:26:14 +08:00
|
|
|
* zone to record superblock. No need to validate in that case.
|
|
|
|
*/
|
|
|
|
if (zone_info->sb_zones[BTRFS_NR_SB_LOG_ZONES * i].type ==
|
|
|
|
BLK_ZONE_TYPE_CONVENTIONAL)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
ret = sb_write_pointer(device->bdev,
|
|
|
|
&zone_info->sb_zones[sb_pos], &sb_wp);
|
|
|
|
if (ret != -ENOENT && ret) {
|
|
|
|
btrfs_err_in_rcu(device->fs_info,
|
|
|
|
"zoned: super block log zone corrupted devid %llu zone %u",
|
|
|
|
device->devid, sb_zone);
|
|
|
|
ret = -EUCLEAN;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2020-11-10 19:26:07 +08:00
|
|
|
kfree(zones);
|
|
|
|
|
|
|
|
device->zone_info = zone_info;
|
|
|
|
|
2021-02-04 18:21:47 +08:00
|
|
|
switch (bdev_zoned_model(bdev)) {
|
|
|
|
case BLK_ZONED_HM:
|
|
|
|
model = "host-managed zoned";
|
|
|
|
emulated = "";
|
|
|
|
break;
|
|
|
|
case BLK_ZONED_HA:
|
|
|
|
model = "host-aware zoned";
|
|
|
|
emulated = "";
|
|
|
|
break;
|
|
|
|
case BLK_ZONED_NONE:
|
|
|
|
model = "regular";
|
|
|
|
emulated = "emulated ";
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
/* Just in case */
|
|
|
|
btrfs_err_in_rcu(fs_info, "zoned: unsupported model %d on %s",
|
|
|
|
bdev_zoned_model(bdev),
|
|
|
|
rcu_str_deref(device->name));
|
|
|
|
ret = -EOPNOTSUPP;
|
|
|
|
goto out_free_zone_info;
|
|
|
|
}
|
|
|
|
|
|
|
|
btrfs_info_in_rcu(fs_info,
|
|
|
|
"%s block device %s, %u %szones of %llu bytes",
|
|
|
|
model, rcu_str_deref(device->name), zone_info->nr_zones,
|
|
|
|
emulated, zone_info->zone_size);
|
2020-11-10 19:26:07 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
out:
|
|
|
|
kfree(zones);
|
2021-02-04 18:21:47 +08:00
|
|
|
out_free_zone_info:
|
2021-08-19 20:19:15 +08:00
|
|
|
bitmap_free(zone_info->active_zones);
|
2020-11-10 19:26:07 +08:00
|
|
|
bitmap_free(zone_info->empty_zones);
|
|
|
|
bitmap_free(zone_info->seq_zones);
|
|
|
|
kfree(zone_info);
|
2021-02-04 18:21:47 +08:00
|
|
|
device->zone_info = NULL;
|
2020-11-10 19:26:07 +08:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
void btrfs_destroy_dev_zone_info(struct btrfs_device *device)
|
|
|
|
{
|
|
|
|
struct btrfs_zoned_device_info *zone_info = device->zone_info;
|
|
|
|
|
|
|
|
if (!zone_info)
|
|
|
|
return;
|
|
|
|
|
2021-08-19 20:19:15 +08:00
|
|
|
bitmap_free(zone_info->active_zones);
|
2020-11-10 19:26:07 +08:00
|
|
|
bitmap_free(zone_info->seq_zones);
|
|
|
|
bitmap_free(zone_info->empty_zones);
|
|
|
|
kfree(zone_info);
|
|
|
|
device->zone_info = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
int btrfs_get_dev_zone(struct btrfs_device *device, u64 pos,
|
|
|
|
struct blk_zone *zone)
|
|
|
|
{
|
|
|
|
unsigned int nr_zones = 1;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = btrfs_get_dev_zones(device, pos, zone, &nr_zones);
|
|
|
|
if (ret != 0 || !nr_zones)
|
|
|
|
return ret ? ret : -EIO;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2020-11-10 19:26:08 +08:00
|
|
|
|
|
|
|
int btrfs_check_zoned_mode(struct btrfs_fs_info *fs_info)
|
|
|
|
{
|
|
|
|
struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
|
|
|
|
struct btrfs_device *device;
|
|
|
|
u64 zoned_devices = 0;
|
|
|
|
u64 nr_devices = 0;
|
|
|
|
u64 zone_size = 0;
|
2021-02-04 18:21:47 +08:00
|
|
|
const bool incompat_zoned = btrfs_fs_incompat(fs_info, ZONED);
|
2020-11-10 19:26:08 +08:00
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
/* Count zoned devices */
|
|
|
|
list_for_each_entry(device, &fs_devices->devices, dev_list) {
|
|
|
|
enum blk_zoned_model model;
|
|
|
|
|
|
|
|
if (!device->bdev)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
model = bdev_zoned_model(device->bdev);
|
2021-02-04 18:21:47 +08:00
|
|
|
/*
|
|
|
|
* A Host-Managed zoned device must be used as a zoned device.
|
|
|
|
* A Host-Aware zoned device and a non-zoned devices can be
|
|
|
|
* treated as a zoned device, if ZONED flag is enabled in the
|
|
|
|
* superblock.
|
|
|
|
*/
|
2020-11-10 19:26:08 +08:00
|
|
|
if (model == BLK_ZONED_HM ||
|
2021-02-04 18:21:47 +08:00
|
|
|
(model == BLK_ZONED_HA && incompat_zoned) ||
|
|
|
|
(model == BLK_ZONED_NONE && incompat_zoned)) {
|
|
|
|
struct btrfs_zoned_device_info *zone_info =
|
|
|
|
device->zone_info;
|
2020-11-10 19:26:09 +08:00
|
|
|
|
|
|
|
zone_info = device->zone_info;
|
2020-11-10 19:26:08 +08:00
|
|
|
zoned_devices++;
|
|
|
|
if (!zone_size) {
|
2020-11-10 19:26:09 +08:00
|
|
|
zone_size = zone_info->zone_size;
|
|
|
|
} else if (zone_info->zone_size != zone_size) {
|
2020-11-10 19:26:08 +08:00
|
|
|
btrfs_err(fs_info,
|
|
|
|
"zoned: unequal block device zone sizes: have %llu found %llu",
|
|
|
|
device->zone_info->zone_size,
|
|
|
|
zone_size);
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
nr_devices++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!zoned_devices && !incompat_zoned)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (!zoned_devices && incompat_zoned) {
|
|
|
|
/* No zoned block device found on ZONED filesystem */
|
|
|
|
btrfs_err(fs_info,
|
|
|
|
"zoned: no zoned devices found on a zoned filesystem");
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (zoned_devices && !incompat_zoned) {
|
|
|
|
btrfs_err(fs_info,
|
|
|
|
"zoned: mode not enabled but zoned device found");
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (zoned_devices != nr_devices) {
|
|
|
|
btrfs_err(fs_info,
|
|
|
|
"zoned: cannot mix zoned and regular devices");
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* stripe_size is always aligned to BTRFS_STRIPE_LEN in
|
2021-08-18 18:41:19 +08:00
|
|
|
* btrfs_create_chunk(). Since we want stripe_len == zone_size,
|
2020-11-10 19:26:08 +08:00
|
|
|
* check the alignment here.
|
|
|
|
*/
|
|
|
|
if (!IS_ALIGNED(zone_size, BTRFS_STRIPE_LEN)) {
|
|
|
|
btrfs_err(fs_info,
|
|
|
|
"zoned: zone size %llu not aligned to stripe %u",
|
|
|
|
zone_size, BTRFS_STRIPE_LEN);
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2020-11-10 19:26:13 +08:00
|
|
|
if (btrfs_fs_incompat(fs_info, MIXED_GROUPS)) {
|
|
|
|
btrfs_err(fs_info, "zoned: mixed block groups not supported");
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2020-11-10 19:26:08 +08:00
|
|
|
fs_info->zone_size = zone_size;
|
2021-02-04 18:21:48 +08:00
|
|
|
fs_info->fs_devices->chunk_alloc_policy = BTRFS_CHUNK_ALLOC_ZONED;
|
2020-11-10 19:26:08 +08:00
|
|
|
|
2021-02-04 18:21:45 +08:00
|
|
|
/*
|
|
|
|
* Check mount options here, because we might change fs_info->zoned
|
|
|
|
* from fs_info->zone_size.
|
|
|
|
*/
|
|
|
|
ret = btrfs_check_mountopts_zoned(fs_info);
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
|
2020-11-10 19:26:08 +08:00
|
|
|
btrfs_info(fs_info, "zoned mode enabled with zone size %llu", zone_size);
|
|
|
|
out:
|
|
|
|
return ret;
|
|
|
|
}
|
2020-11-10 19:26:10 +08:00
|
|
|
|
|
|
|
int btrfs_check_mountopts_zoned(struct btrfs_fs_info *info)
|
|
|
|
{
|
|
|
|
if (!btrfs_is_zoned(info))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Space cache writing is not COWed. Disable that to avoid write errors
|
|
|
|
* in sequential zones.
|
|
|
|
*/
|
|
|
|
if (btrfs_test_opt(info, SPACE_CACHE)) {
|
|
|
|
btrfs_err(info, "zoned: space cache v1 is not supported");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2020-11-10 19:26:11 +08:00
|
|
|
if (btrfs_test_opt(info, NODATACOW)) {
|
|
|
|
btrfs_err(info, "zoned: NODATACOW not supported");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2020-11-10 19:26:10 +08:00
|
|
|
return 0;
|
|
|
|
}
|
btrfs: implement log-structured superblock for ZONED mode
Superblock (and its copies) is the only data structure in btrfs which
has a fixed location on a device. Since we cannot overwrite in a
sequential write required zone, we cannot place superblock in the zone.
One easy solution is limiting superblock and copies to be placed only in
conventional zones. However, this method has two downsides: one is
reduced number of superblock copies. The location of the second copy of
superblock is 256GB, which is in a sequential write required zone on
typical devices in the market today. So, the number of superblock and
copies is limited to be two. Second downside is that we cannot support
devices which have no conventional zones at all.
To solve these two problems, we employ superblock log writing. It uses
two adjacent zones as a circular buffer to write updated superblocks.
Once the first zone is filled up, start writing into the second one.
Then, when both zones are filled up and before starting to write to the
first zone again, it reset the first zone.
We can determine the position of the latest superblock by reading write
pointer information from a device. One corner case is when both zones
are full. For this situation, we read out the last superblock of each
zone, and compare them to determine which zone is older.
The following zones are reserved as the circular buffer on ZONED btrfs.
- The primary superblock: zones 0 and 1
- The first copy: zones 16 and 17
- The second copy: zones 1024 or zone at 256GB which is minimum, and
next to it
If these reserved zones are conventional, superblock is written fixed at
the start of the zone without logging.
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2020-11-10 19:26:14 +08:00
|
|
|
|
|
|
|
static int sb_log_location(struct block_device *bdev, struct blk_zone *zones,
|
|
|
|
int rw, u64 *bytenr_ret)
|
|
|
|
{
|
|
|
|
u64 wp;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (zones[0].type == BLK_ZONE_TYPE_CONVENTIONAL) {
|
|
|
|
*bytenr_ret = zones[0].start << SECTOR_SHIFT;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = sb_write_pointer(bdev, zones, &wp);
|
|
|
|
if (ret != -ENOENT && ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
if (rw == WRITE) {
|
|
|
|
struct blk_zone *reset = NULL;
|
|
|
|
|
|
|
|
if (wp == zones[0].start << SECTOR_SHIFT)
|
|
|
|
reset = &zones[0];
|
|
|
|
else if (wp == zones[1].start << SECTOR_SHIFT)
|
|
|
|
reset = &zones[1];
|
|
|
|
|
|
|
|
if (reset && reset->cond != BLK_ZONE_COND_EMPTY) {
|
2021-08-19 20:19:12 +08:00
|
|
|
ASSERT(sb_zone_is_full(reset));
|
btrfs: implement log-structured superblock for ZONED mode
Superblock (and its copies) is the only data structure in btrfs which
has a fixed location on a device. Since we cannot overwrite in a
sequential write required zone, we cannot place superblock in the zone.
One easy solution is limiting superblock and copies to be placed only in
conventional zones. However, this method has two downsides: one is
reduced number of superblock copies. The location of the second copy of
superblock is 256GB, which is in a sequential write required zone on
typical devices in the market today. So, the number of superblock and
copies is limited to be two. Second downside is that we cannot support
devices which have no conventional zones at all.
To solve these two problems, we employ superblock log writing. It uses
two adjacent zones as a circular buffer to write updated superblocks.
Once the first zone is filled up, start writing into the second one.
Then, when both zones are filled up and before starting to write to the
first zone again, it reset the first zone.
We can determine the position of the latest superblock by reading write
pointer information from a device. One corner case is when both zones
are full. For this situation, we read out the last superblock of each
zone, and compare them to determine which zone is older.
The following zones are reserved as the circular buffer on ZONED btrfs.
- The primary superblock: zones 0 and 1
- The first copy: zones 16 and 17
- The second copy: zones 1024 or zone at 256GB which is minimum, and
next to it
If these reserved zones are conventional, superblock is written fixed at
the start of the zone without logging.
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2020-11-10 19:26:14 +08:00
|
|
|
|
|
|
|
ret = blkdev_zone_mgmt(bdev, REQ_OP_ZONE_RESET,
|
|
|
|
reset->start, reset->len,
|
|
|
|
GFP_NOFS);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
reset->cond = BLK_ZONE_COND_EMPTY;
|
|
|
|
reset->wp = reset->start;
|
|
|
|
}
|
|
|
|
} else if (ret != -ENOENT) {
|
2021-08-19 20:19:13 +08:00
|
|
|
/*
|
|
|
|
* For READ, we want the previous one. Move write pointer to
|
|
|
|
* the end of a zone, if it is at the head of a zone.
|
|
|
|
*/
|
|
|
|
u64 zone_end = 0;
|
|
|
|
|
btrfs: implement log-structured superblock for ZONED mode
Superblock (and its copies) is the only data structure in btrfs which
has a fixed location on a device. Since we cannot overwrite in a
sequential write required zone, we cannot place superblock in the zone.
One easy solution is limiting superblock and copies to be placed only in
conventional zones. However, this method has two downsides: one is
reduced number of superblock copies. The location of the second copy of
superblock is 256GB, which is in a sequential write required zone on
typical devices in the market today. So, the number of superblock and
copies is limited to be two. Second downside is that we cannot support
devices which have no conventional zones at all.
To solve these two problems, we employ superblock log writing. It uses
two adjacent zones as a circular buffer to write updated superblocks.
Once the first zone is filled up, start writing into the second one.
Then, when both zones are filled up and before starting to write to the
first zone again, it reset the first zone.
We can determine the position of the latest superblock by reading write
pointer information from a device. One corner case is when both zones
are full. For this situation, we read out the last superblock of each
zone, and compare them to determine which zone is older.
The following zones are reserved as the circular buffer on ZONED btrfs.
- The primary superblock: zones 0 and 1
- The first copy: zones 16 and 17
- The second copy: zones 1024 or zone at 256GB which is minimum, and
next to it
If these reserved zones are conventional, superblock is written fixed at
the start of the zone without logging.
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2020-11-10 19:26:14 +08:00
|
|
|
if (wp == zones[0].start << SECTOR_SHIFT)
|
2021-08-19 20:19:13 +08:00
|
|
|
zone_end = zones[1].start + zones[1].capacity;
|
|
|
|
else if (wp == zones[1].start << SECTOR_SHIFT)
|
|
|
|
zone_end = zones[0].start + zones[0].capacity;
|
|
|
|
if (zone_end)
|
|
|
|
wp = ALIGN_DOWN(zone_end << SECTOR_SHIFT,
|
|
|
|
BTRFS_SUPER_INFO_SIZE);
|
|
|
|
|
btrfs: implement log-structured superblock for ZONED mode
Superblock (and its copies) is the only data structure in btrfs which
has a fixed location on a device. Since we cannot overwrite in a
sequential write required zone, we cannot place superblock in the zone.
One easy solution is limiting superblock and copies to be placed only in
conventional zones. However, this method has two downsides: one is
reduced number of superblock copies. The location of the second copy of
superblock is 256GB, which is in a sequential write required zone on
typical devices in the market today. So, the number of superblock and
copies is limited to be two. Second downside is that we cannot support
devices which have no conventional zones at all.
To solve these two problems, we employ superblock log writing. It uses
two adjacent zones as a circular buffer to write updated superblocks.
Once the first zone is filled up, start writing into the second one.
Then, when both zones are filled up and before starting to write to the
first zone again, it reset the first zone.
We can determine the position of the latest superblock by reading write
pointer information from a device. One corner case is when both zones
are full. For this situation, we read out the last superblock of each
zone, and compare them to determine which zone is older.
The following zones are reserved as the circular buffer on ZONED btrfs.
- The primary superblock: zones 0 and 1
- The first copy: zones 16 and 17
- The second copy: zones 1024 or zone at 256GB which is minimum, and
next to it
If these reserved zones are conventional, superblock is written fixed at
the start of the zone without logging.
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2020-11-10 19:26:14 +08:00
|
|
|
wp -= BTRFS_SUPER_INFO_SIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
*bytenr_ret = wp;
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
int btrfs_sb_log_location_bdev(struct block_device *bdev, int mirror, int rw,
|
|
|
|
u64 *bytenr_ret)
|
|
|
|
{
|
|
|
|
struct blk_zone zones[BTRFS_NR_SB_LOG_ZONES];
|
2021-03-03 16:55:46 +08:00
|
|
|
sector_t zone_sectors;
|
btrfs: implement log-structured superblock for ZONED mode
Superblock (and its copies) is the only data structure in btrfs which
has a fixed location on a device. Since we cannot overwrite in a
sequential write required zone, we cannot place superblock in the zone.
One easy solution is limiting superblock and copies to be placed only in
conventional zones. However, this method has two downsides: one is
reduced number of superblock copies. The location of the second copy of
superblock is 256GB, which is in a sequential write required zone on
typical devices in the market today. So, the number of superblock and
copies is limited to be two. Second downside is that we cannot support
devices which have no conventional zones at all.
To solve these two problems, we employ superblock log writing. It uses
two adjacent zones as a circular buffer to write updated superblocks.
Once the first zone is filled up, start writing into the second one.
Then, when both zones are filled up and before starting to write to the
first zone again, it reset the first zone.
We can determine the position of the latest superblock by reading write
pointer information from a device. One corner case is when both zones
are full. For this situation, we read out the last superblock of each
zone, and compare them to determine which zone is older.
The following zones are reserved as the circular buffer on ZONED btrfs.
- The primary superblock: zones 0 and 1
- The first copy: zones 16 and 17
- The second copy: zones 1024 or zone at 256GB which is minimum, and
next to it
If these reserved zones are conventional, superblock is written fixed at
the start of the zone without logging.
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2020-11-10 19:26:14 +08:00
|
|
|
u32 sb_zone;
|
|
|
|
int ret;
|
|
|
|
u8 zone_sectors_shift;
|
|
|
|
sector_t nr_sectors;
|
|
|
|
u32 nr_zones;
|
|
|
|
|
|
|
|
if (!bdev_is_zoned(bdev)) {
|
|
|
|
*bytenr_ret = btrfs_sb_offset(mirror);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
ASSERT(rw == READ || rw == WRITE);
|
|
|
|
|
|
|
|
zone_sectors = bdev_zone_sectors(bdev);
|
|
|
|
if (!is_power_of_2(zone_sectors))
|
|
|
|
return -EINVAL;
|
|
|
|
zone_sectors_shift = ilog2(zone_sectors);
|
2020-12-17 04:57:51 +08:00
|
|
|
nr_sectors = bdev_nr_sectors(bdev);
|
btrfs: implement log-structured superblock for ZONED mode
Superblock (and its copies) is the only data structure in btrfs which
has a fixed location on a device. Since we cannot overwrite in a
sequential write required zone, we cannot place superblock in the zone.
One easy solution is limiting superblock and copies to be placed only in
conventional zones. However, this method has two downsides: one is
reduced number of superblock copies. The location of the second copy of
superblock is 256GB, which is in a sequential write required zone on
typical devices in the market today. So, the number of superblock and
copies is limited to be two. Second downside is that we cannot support
devices which have no conventional zones at all.
To solve these two problems, we employ superblock log writing. It uses
two adjacent zones as a circular buffer to write updated superblocks.
Once the first zone is filled up, start writing into the second one.
Then, when both zones are filled up and before starting to write to the
first zone again, it reset the first zone.
We can determine the position of the latest superblock by reading write
pointer information from a device. One corner case is when both zones
are full. For this situation, we read out the last superblock of each
zone, and compare them to determine which zone is older.
The following zones are reserved as the circular buffer on ZONED btrfs.
- The primary superblock: zones 0 and 1
- The first copy: zones 16 and 17
- The second copy: zones 1024 or zone at 256GB which is minimum, and
next to it
If these reserved zones are conventional, superblock is written fixed at
the start of the zone without logging.
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2020-11-10 19:26:14 +08:00
|
|
|
nr_zones = nr_sectors >> zone_sectors_shift;
|
|
|
|
|
|
|
|
sb_zone = sb_zone_number(zone_sectors_shift + SECTOR_SHIFT, mirror);
|
|
|
|
if (sb_zone + 1 >= nr_zones)
|
|
|
|
return -ENOENT;
|
|
|
|
|
2021-05-27 14:27:32 +08:00
|
|
|
ret = blkdev_report_zones(bdev, zone_start_sector(sb_zone, bdev),
|
btrfs: implement log-structured superblock for ZONED mode
Superblock (and its copies) is the only data structure in btrfs which
has a fixed location on a device. Since we cannot overwrite in a
sequential write required zone, we cannot place superblock in the zone.
One easy solution is limiting superblock and copies to be placed only in
conventional zones. However, this method has two downsides: one is
reduced number of superblock copies. The location of the second copy of
superblock is 256GB, which is in a sequential write required zone on
typical devices in the market today. So, the number of superblock and
copies is limited to be two. Second downside is that we cannot support
devices which have no conventional zones at all.
To solve these two problems, we employ superblock log writing. It uses
two adjacent zones as a circular buffer to write updated superblocks.
Once the first zone is filled up, start writing into the second one.
Then, when both zones are filled up and before starting to write to the
first zone again, it reset the first zone.
We can determine the position of the latest superblock by reading write
pointer information from a device. One corner case is when both zones
are full. For this situation, we read out the last superblock of each
zone, and compare them to determine which zone is older.
The following zones are reserved as the circular buffer on ZONED btrfs.
- The primary superblock: zones 0 and 1
- The first copy: zones 16 and 17
- The second copy: zones 1024 or zone at 256GB which is minimum, and
next to it
If these reserved zones are conventional, superblock is written fixed at
the start of the zone without logging.
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2020-11-10 19:26:14 +08:00
|
|
|
BTRFS_NR_SB_LOG_ZONES, copy_zone_info_cb,
|
|
|
|
zones);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
if (ret != BTRFS_NR_SB_LOG_ZONES)
|
|
|
|
return -EIO;
|
|
|
|
|
|
|
|
return sb_log_location(bdev, zones, rw, bytenr_ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
int btrfs_sb_log_location(struct btrfs_device *device, int mirror, int rw,
|
|
|
|
u64 *bytenr_ret)
|
|
|
|
{
|
|
|
|
struct btrfs_zoned_device_info *zinfo = device->zone_info;
|
|
|
|
u32 zone_num;
|
|
|
|
|
2021-02-04 18:21:43 +08:00
|
|
|
/*
|
|
|
|
* For a zoned filesystem on a non-zoned block device, use the same
|
|
|
|
* super block locations as regular filesystem. Doing so, the super
|
|
|
|
* block can always be retrieved and the zoned flag of the volume
|
|
|
|
* detected from the super block information.
|
|
|
|
*/
|
|
|
|
if (!bdev_is_zoned(device->bdev)) {
|
btrfs: implement log-structured superblock for ZONED mode
Superblock (and its copies) is the only data structure in btrfs which
has a fixed location on a device. Since we cannot overwrite in a
sequential write required zone, we cannot place superblock in the zone.
One easy solution is limiting superblock and copies to be placed only in
conventional zones. However, this method has two downsides: one is
reduced number of superblock copies. The location of the second copy of
superblock is 256GB, which is in a sequential write required zone on
typical devices in the market today. So, the number of superblock and
copies is limited to be two. Second downside is that we cannot support
devices which have no conventional zones at all.
To solve these two problems, we employ superblock log writing. It uses
two adjacent zones as a circular buffer to write updated superblocks.
Once the first zone is filled up, start writing into the second one.
Then, when both zones are filled up and before starting to write to the
first zone again, it reset the first zone.
We can determine the position of the latest superblock by reading write
pointer information from a device. One corner case is when both zones
are full. For this situation, we read out the last superblock of each
zone, and compare them to determine which zone is older.
The following zones are reserved as the circular buffer on ZONED btrfs.
- The primary superblock: zones 0 and 1
- The first copy: zones 16 and 17
- The second copy: zones 1024 or zone at 256GB which is minimum, and
next to it
If these reserved zones are conventional, superblock is written fixed at
the start of the zone without logging.
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2020-11-10 19:26:14 +08:00
|
|
|
*bytenr_ret = btrfs_sb_offset(mirror);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
zone_num = sb_zone_number(zinfo->zone_size_shift, mirror);
|
|
|
|
if (zone_num + 1 >= zinfo->nr_zones)
|
|
|
|
return -ENOENT;
|
|
|
|
|
|
|
|
return sb_log_location(device->bdev,
|
|
|
|
&zinfo->sb_zones[BTRFS_NR_SB_LOG_ZONES * mirror],
|
|
|
|
rw, bytenr_ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool is_sb_log_zone(struct btrfs_zoned_device_info *zinfo,
|
|
|
|
int mirror)
|
|
|
|
{
|
|
|
|
u32 zone_num;
|
|
|
|
|
|
|
|
if (!zinfo)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
zone_num = sb_zone_number(zinfo->zone_size_shift, mirror);
|
|
|
|
if (zone_num + 1 >= zinfo->nr_zones)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (!test_bit(zone_num, zinfo->seq_zones))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2021-08-19 20:19:14 +08:00
|
|
|
int btrfs_advance_sb_log(struct btrfs_device *device, int mirror)
|
btrfs: implement log-structured superblock for ZONED mode
Superblock (and its copies) is the only data structure in btrfs which
has a fixed location on a device. Since we cannot overwrite in a
sequential write required zone, we cannot place superblock in the zone.
One easy solution is limiting superblock and copies to be placed only in
conventional zones. However, this method has two downsides: one is
reduced number of superblock copies. The location of the second copy of
superblock is 256GB, which is in a sequential write required zone on
typical devices in the market today. So, the number of superblock and
copies is limited to be two. Second downside is that we cannot support
devices which have no conventional zones at all.
To solve these two problems, we employ superblock log writing. It uses
two adjacent zones as a circular buffer to write updated superblocks.
Once the first zone is filled up, start writing into the second one.
Then, when both zones are filled up and before starting to write to the
first zone again, it reset the first zone.
We can determine the position of the latest superblock by reading write
pointer information from a device. One corner case is when both zones
are full. For this situation, we read out the last superblock of each
zone, and compare them to determine which zone is older.
The following zones are reserved as the circular buffer on ZONED btrfs.
- The primary superblock: zones 0 and 1
- The first copy: zones 16 and 17
- The second copy: zones 1024 or zone at 256GB which is minimum, and
next to it
If these reserved zones are conventional, superblock is written fixed at
the start of the zone without logging.
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2020-11-10 19:26:14 +08:00
|
|
|
{
|
|
|
|
struct btrfs_zoned_device_info *zinfo = device->zone_info;
|
|
|
|
struct blk_zone *zone;
|
2021-08-19 20:19:14 +08:00
|
|
|
int i;
|
btrfs: implement log-structured superblock for ZONED mode
Superblock (and its copies) is the only data structure in btrfs which
has a fixed location on a device. Since we cannot overwrite in a
sequential write required zone, we cannot place superblock in the zone.
One easy solution is limiting superblock and copies to be placed only in
conventional zones. However, this method has two downsides: one is
reduced number of superblock copies. The location of the second copy of
superblock is 256GB, which is in a sequential write required zone on
typical devices in the market today. So, the number of superblock and
copies is limited to be two. Second downside is that we cannot support
devices which have no conventional zones at all.
To solve these two problems, we employ superblock log writing. It uses
two adjacent zones as a circular buffer to write updated superblocks.
Once the first zone is filled up, start writing into the second one.
Then, when both zones are filled up and before starting to write to the
first zone again, it reset the first zone.
We can determine the position of the latest superblock by reading write
pointer information from a device. One corner case is when both zones
are full. For this situation, we read out the last superblock of each
zone, and compare them to determine which zone is older.
The following zones are reserved as the circular buffer on ZONED btrfs.
- The primary superblock: zones 0 and 1
- The first copy: zones 16 and 17
- The second copy: zones 1024 or zone at 256GB which is minimum, and
next to it
If these reserved zones are conventional, superblock is written fixed at
the start of the zone without logging.
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2020-11-10 19:26:14 +08:00
|
|
|
|
|
|
|
if (!is_sb_log_zone(zinfo, mirror))
|
2021-08-19 20:19:14 +08:00
|
|
|
return 0;
|
btrfs: implement log-structured superblock for ZONED mode
Superblock (and its copies) is the only data structure in btrfs which
has a fixed location on a device. Since we cannot overwrite in a
sequential write required zone, we cannot place superblock in the zone.
One easy solution is limiting superblock and copies to be placed only in
conventional zones. However, this method has two downsides: one is
reduced number of superblock copies. The location of the second copy of
superblock is 256GB, which is in a sequential write required zone on
typical devices in the market today. So, the number of superblock and
copies is limited to be two. Second downside is that we cannot support
devices which have no conventional zones at all.
To solve these two problems, we employ superblock log writing. It uses
two adjacent zones as a circular buffer to write updated superblocks.
Once the first zone is filled up, start writing into the second one.
Then, when both zones are filled up and before starting to write to the
first zone again, it reset the first zone.
We can determine the position of the latest superblock by reading write
pointer information from a device. One corner case is when both zones
are full. For this situation, we read out the last superblock of each
zone, and compare them to determine which zone is older.
The following zones are reserved as the circular buffer on ZONED btrfs.
- The primary superblock: zones 0 and 1
- The first copy: zones 16 and 17
- The second copy: zones 1024 or zone at 256GB which is minimum, and
next to it
If these reserved zones are conventional, superblock is written fixed at
the start of the zone without logging.
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2020-11-10 19:26:14 +08:00
|
|
|
|
|
|
|
zone = &zinfo->sb_zones[BTRFS_NR_SB_LOG_ZONES * mirror];
|
2021-08-19 20:19:14 +08:00
|
|
|
for (i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++) {
|
|
|
|
/* Advance the next zone */
|
|
|
|
if (zone->cond == BLK_ZONE_COND_FULL) {
|
|
|
|
zone++;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
btrfs: implement log-structured superblock for ZONED mode
Superblock (and its copies) is the only data structure in btrfs which
has a fixed location on a device. Since we cannot overwrite in a
sequential write required zone, we cannot place superblock in the zone.
One easy solution is limiting superblock and copies to be placed only in
conventional zones. However, this method has two downsides: one is
reduced number of superblock copies. The location of the second copy of
superblock is 256GB, which is in a sequential write required zone on
typical devices in the market today. So, the number of superblock and
copies is limited to be two. Second downside is that we cannot support
devices which have no conventional zones at all.
To solve these two problems, we employ superblock log writing. It uses
two adjacent zones as a circular buffer to write updated superblocks.
Once the first zone is filled up, start writing into the second one.
Then, when both zones are filled up and before starting to write to the
first zone again, it reset the first zone.
We can determine the position of the latest superblock by reading write
pointer information from a device. One corner case is when both zones
are full. For this situation, we read out the last superblock of each
zone, and compare them to determine which zone is older.
The following zones are reserved as the circular buffer on ZONED btrfs.
- The primary superblock: zones 0 and 1
- The first copy: zones 16 and 17
- The second copy: zones 1024 or zone at 256GB which is minimum, and
next to it
If these reserved zones are conventional, superblock is written fixed at
the start of the zone without logging.
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2020-11-10 19:26:14 +08:00
|
|
|
if (zone->cond == BLK_ZONE_COND_EMPTY)
|
|
|
|
zone->cond = BLK_ZONE_COND_IMP_OPEN;
|
|
|
|
|
2021-08-19 20:19:14 +08:00
|
|
|
zone->wp += SUPER_INFO_SECTORS;
|
|
|
|
|
|
|
|
if (sb_zone_is_full(zone)) {
|
|
|
|
/*
|
|
|
|
* No room left to write new superblock. Since
|
|
|
|
* superblock is written with REQ_SYNC, it is safe to
|
|
|
|
* finish the zone now.
|
|
|
|
*
|
|
|
|
* If the write pointer is exactly at the capacity,
|
|
|
|
* explicit ZONE_FINISH is not necessary.
|
|
|
|
*/
|
|
|
|
if (zone->wp != zone->start + zone->capacity) {
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = blkdev_zone_mgmt(device->bdev,
|
|
|
|
REQ_OP_ZONE_FINISH, zone->start,
|
|
|
|
zone->len, GFP_NOFS);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
btrfs: implement log-structured superblock for ZONED mode
Superblock (and its copies) is the only data structure in btrfs which
has a fixed location on a device. Since we cannot overwrite in a
sequential write required zone, we cannot place superblock in the zone.
One easy solution is limiting superblock and copies to be placed only in
conventional zones. However, this method has two downsides: one is
reduced number of superblock copies. The location of the second copy of
superblock is 256GB, which is in a sequential write required zone on
typical devices in the market today. So, the number of superblock and
copies is limited to be two. Second downside is that we cannot support
devices which have no conventional zones at all.
To solve these two problems, we employ superblock log writing. It uses
two adjacent zones as a circular buffer to write updated superblocks.
Once the first zone is filled up, start writing into the second one.
Then, when both zones are filled up and before starting to write to the
first zone again, it reset the first zone.
We can determine the position of the latest superblock by reading write
pointer information from a device. One corner case is when both zones
are full. For this situation, we read out the last superblock of each
zone, and compare them to determine which zone is older.
The following zones are reserved as the circular buffer on ZONED btrfs.
- The primary superblock: zones 0 and 1
- The first copy: zones 16 and 17
- The second copy: zones 1024 or zone at 256GB which is minimum, and
next to it
If these reserved zones are conventional, superblock is written fixed at
the start of the zone without logging.
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2020-11-10 19:26:14 +08:00
|
|
|
|
2021-08-19 20:19:14 +08:00
|
|
|
zone->wp = zone->start + zone->len;
|
btrfs: implement log-structured superblock for ZONED mode
Superblock (and its copies) is the only data structure in btrfs which
has a fixed location on a device. Since we cannot overwrite in a
sequential write required zone, we cannot place superblock in the zone.
One easy solution is limiting superblock and copies to be placed only in
conventional zones. However, this method has two downsides: one is
reduced number of superblock copies. The location of the second copy of
superblock is 256GB, which is in a sequential write required zone on
typical devices in the market today. So, the number of superblock and
copies is limited to be two. Second downside is that we cannot support
devices which have no conventional zones at all.
To solve these two problems, we employ superblock log writing. It uses
two adjacent zones as a circular buffer to write updated superblocks.
Once the first zone is filled up, start writing into the second one.
Then, when both zones are filled up and before starting to write to the
first zone again, it reset the first zone.
We can determine the position of the latest superblock by reading write
pointer information from a device. One corner case is when both zones
are full. For this situation, we read out the last superblock of each
zone, and compare them to determine which zone is older.
The following zones are reserved as the circular buffer on ZONED btrfs.
- The primary superblock: zones 0 and 1
- The first copy: zones 16 and 17
- The second copy: zones 1024 or zone at 256GB which is minimum, and
next to it
If these reserved zones are conventional, superblock is written fixed at
the start of the zone without logging.
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2020-11-10 19:26:14 +08:00
|
|
|
zone->cond = BLK_ZONE_COND_FULL;
|
2021-08-19 20:19:14 +08:00
|
|
|
}
|
|
|
|
return 0;
|
btrfs: implement log-structured superblock for ZONED mode
Superblock (and its copies) is the only data structure in btrfs which
has a fixed location on a device. Since we cannot overwrite in a
sequential write required zone, we cannot place superblock in the zone.
One easy solution is limiting superblock and copies to be placed only in
conventional zones. However, this method has two downsides: one is
reduced number of superblock copies. The location of the second copy of
superblock is 256GB, which is in a sequential write required zone on
typical devices in the market today. So, the number of superblock and
copies is limited to be two. Second downside is that we cannot support
devices which have no conventional zones at all.
To solve these two problems, we employ superblock log writing. It uses
two adjacent zones as a circular buffer to write updated superblocks.
Once the first zone is filled up, start writing into the second one.
Then, when both zones are filled up and before starting to write to the
first zone again, it reset the first zone.
We can determine the position of the latest superblock by reading write
pointer information from a device. One corner case is when both zones
are full. For this situation, we read out the last superblock of each
zone, and compare them to determine which zone is older.
The following zones are reserved as the circular buffer on ZONED btrfs.
- The primary superblock: zones 0 and 1
- The first copy: zones 16 and 17
- The second copy: zones 1024 or zone at 256GB which is minimum, and
next to it
If these reserved zones are conventional, superblock is written fixed at
the start of the zone without logging.
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2020-11-10 19:26:14 +08:00
|
|
|
}
|
|
|
|
|
2021-08-19 20:19:14 +08:00
|
|
|
/* All the zones are FULL. Should not reach here. */
|
|
|
|
ASSERT(0);
|
|
|
|
return -EIO;
|
btrfs: implement log-structured superblock for ZONED mode
Superblock (and its copies) is the only data structure in btrfs which
has a fixed location on a device. Since we cannot overwrite in a
sequential write required zone, we cannot place superblock in the zone.
One easy solution is limiting superblock and copies to be placed only in
conventional zones. However, this method has two downsides: one is
reduced number of superblock copies. The location of the second copy of
superblock is 256GB, which is in a sequential write required zone on
typical devices in the market today. So, the number of superblock and
copies is limited to be two. Second downside is that we cannot support
devices which have no conventional zones at all.
To solve these two problems, we employ superblock log writing. It uses
two adjacent zones as a circular buffer to write updated superblocks.
Once the first zone is filled up, start writing into the second one.
Then, when both zones are filled up and before starting to write to the
first zone again, it reset the first zone.
We can determine the position of the latest superblock by reading write
pointer information from a device. One corner case is when both zones
are full. For this situation, we read out the last superblock of each
zone, and compare them to determine which zone is older.
The following zones are reserved as the circular buffer on ZONED btrfs.
- The primary superblock: zones 0 and 1
- The first copy: zones 16 and 17
- The second copy: zones 1024 or zone at 256GB which is minimum, and
next to it
If these reserved zones are conventional, superblock is written fixed at
the start of the zone without logging.
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2020-11-10 19:26:14 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
int btrfs_reset_sb_log_zones(struct block_device *bdev, int mirror)
|
|
|
|
{
|
|
|
|
sector_t zone_sectors;
|
|
|
|
sector_t nr_sectors;
|
|
|
|
u8 zone_sectors_shift;
|
|
|
|
u32 sb_zone;
|
|
|
|
u32 nr_zones;
|
|
|
|
|
|
|
|
zone_sectors = bdev_zone_sectors(bdev);
|
|
|
|
zone_sectors_shift = ilog2(zone_sectors);
|
2020-12-17 04:57:51 +08:00
|
|
|
nr_sectors = bdev_nr_sectors(bdev);
|
btrfs: implement log-structured superblock for ZONED mode
Superblock (and its copies) is the only data structure in btrfs which
has a fixed location on a device. Since we cannot overwrite in a
sequential write required zone, we cannot place superblock in the zone.
One easy solution is limiting superblock and copies to be placed only in
conventional zones. However, this method has two downsides: one is
reduced number of superblock copies. The location of the second copy of
superblock is 256GB, which is in a sequential write required zone on
typical devices in the market today. So, the number of superblock and
copies is limited to be two. Second downside is that we cannot support
devices which have no conventional zones at all.
To solve these two problems, we employ superblock log writing. It uses
two adjacent zones as a circular buffer to write updated superblocks.
Once the first zone is filled up, start writing into the second one.
Then, when both zones are filled up and before starting to write to the
first zone again, it reset the first zone.
We can determine the position of the latest superblock by reading write
pointer information from a device. One corner case is when both zones
are full. For this situation, we read out the last superblock of each
zone, and compare them to determine which zone is older.
The following zones are reserved as the circular buffer on ZONED btrfs.
- The primary superblock: zones 0 and 1
- The first copy: zones 16 and 17
- The second copy: zones 1024 or zone at 256GB which is minimum, and
next to it
If these reserved zones are conventional, superblock is written fixed at
the start of the zone without logging.
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2020-11-10 19:26:14 +08:00
|
|
|
nr_zones = nr_sectors >> zone_sectors_shift;
|
|
|
|
|
|
|
|
sb_zone = sb_zone_number(zone_sectors_shift + SECTOR_SHIFT, mirror);
|
|
|
|
if (sb_zone + 1 >= nr_zones)
|
|
|
|
return -ENOENT;
|
|
|
|
|
|
|
|
return blkdev_zone_mgmt(bdev, REQ_OP_ZONE_RESET,
|
2021-05-27 14:27:32 +08:00
|
|
|
zone_start_sector(sb_zone, bdev),
|
btrfs: implement log-structured superblock for ZONED mode
Superblock (and its copies) is the only data structure in btrfs which
has a fixed location on a device. Since we cannot overwrite in a
sequential write required zone, we cannot place superblock in the zone.
One easy solution is limiting superblock and copies to be placed only in
conventional zones. However, this method has two downsides: one is
reduced number of superblock copies. The location of the second copy of
superblock is 256GB, which is in a sequential write required zone on
typical devices in the market today. So, the number of superblock and
copies is limited to be two. Second downside is that we cannot support
devices which have no conventional zones at all.
To solve these two problems, we employ superblock log writing. It uses
two adjacent zones as a circular buffer to write updated superblocks.
Once the first zone is filled up, start writing into the second one.
Then, when both zones are filled up and before starting to write to the
first zone again, it reset the first zone.
We can determine the position of the latest superblock by reading write
pointer information from a device. One corner case is when both zones
are full. For this situation, we read out the last superblock of each
zone, and compare them to determine which zone is older.
The following zones are reserved as the circular buffer on ZONED btrfs.
- The primary superblock: zones 0 and 1
- The first copy: zones 16 and 17
- The second copy: zones 1024 or zone at 256GB which is minimum, and
next to it
If these reserved zones are conventional, superblock is written fixed at
the start of the zone without logging.
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2020-11-10 19:26:14 +08:00
|
|
|
zone_sectors * BTRFS_NR_SB_LOG_ZONES, GFP_NOFS);
|
|
|
|
}
|
2021-02-04 18:21:48 +08:00
|
|
|
|
|
|
|
/**
|
|
|
|
* btrfs_find_allocatable_zones - find allocatable zones within a given region
|
|
|
|
*
|
|
|
|
* @device: the device to allocate a region on
|
|
|
|
* @hole_start: the position of the hole to allocate the region
|
|
|
|
* @num_bytes: size of wanted region
|
|
|
|
* @hole_end: the end of the hole
|
|
|
|
* @return: position of allocatable zones
|
|
|
|
*
|
|
|
|
* Allocatable region should not contain any superblock locations.
|
|
|
|
*/
|
|
|
|
u64 btrfs_find_allocatable_zones(struct btrfs_device *device, u64 hole_start,
|
|
|
|
u64 hole_end, u64 num_bytes)
|
|
|
|
{
|
|
|
|
struct btrfs_zoned_device_info *zinfo = device->zone_info;
|
|
|
|
const u8 shift = zinfo->zone_size_shift;
|
|
|
|
u64 nzones = num_bytes >> shift;
|
|
|
|
u64 pos = hole_start;
|
|
|
|
u64 begin, end;
|
|
|
|
bool have_sb;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
ASSERT(IS_ALIGNED(hole_start, zinfo->zone_size));
|
|
|
|
ASSERT(IS_ALIGNED(num_bytes, zinfo->zone_size));
|
|
|
|
|
|
|
|
while (pos < hole_end) {
|
|
|
|
begin = pos >> shift;
|
|
|
|
end = begin + nzones;
|
|
|
|
|
|
|
|
if (end > zinfo->nr_zones)
|
|
|
|
return hole_end;
|
|
|
|
|
|
|
|
/* Check if zones in the region are all empty */
|
|
|
|
if (btrfs_dev_is_sequential(device, pos) &&
|
|
|
|
find_next_zero_bit(zinfo->empty_zones, end, begin) != end) {
|
|
|
|
pos += zinfo->zone_size;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
have_sb = false;
|
|
|
|
for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
|
|
|
|
u32 sb_zone;
|
|
|
|
u64 sb_pos;
|
|
|
|
|
|
|
|
sb_zone = sb_zone_number(shift, i);
|
|
|
|
if (!(end <= sb_zone ||
|
|
|
|
sb_zone + BTRFS_NR_SB_LOG_ZONES <= begin)) {
|
|
|
|
have_sb = true;
|
2021-05-27 14:27:32 +08:00
|
|
|
pos = zone_start_physical(
|
|
|
|
sb_zone + BTRFS_NR_SB_LOG_ZONES, zinfo);
|
2021-02-04 18:21:48 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We also need to exclude regular superblock positions */
|
|
|
|
sb_pos = btrfs_sb_offset(i);
|
|
|
|
if (!(pos + num_bytes <= sb_pos ||
|
|
|
|
sb_pos + BTRFS_SUPER_INFO_SIZE <= pos)) {
|
|
|
|
have_sb = true;
|
|
|
|
pos = ALIGN(sb_pos + BTRFS_SUPER_INFO_SIZE,
|
|
|
|
zinfo->zone_size);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!have_sb)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return pos;
|
|
|
|
}
|
|
|
|
|
2021-08-19 20:19:17 +08:00
|
|
|
static bool btrfs_dev_set_active_zone(struct btrfs_device *device, u64 pos)
|
|
|
|
{
|
|
|
|
struct btrfs_zoned_device_info *zone_info = device->zone_info;
|
|
|
|
unsigned int zno = (pos >> zone_info->zone_size_shift);
|
|
|
|
|
|
|
|
/* We can use any number of zones */
|
|
|
|
if (zone_info->max_active_zones == 0)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
if (!test_bit(zno, zone_info->active_zones)) {
|
|
|
|
/* Active zone left? */
|
|
|
|
if (atomic_dec_if_positive(&zone_info->active_zones_left) < 0)
|
|
|
|
return false;
|
|
|
|
if (test_and_set_bit(zno, zone_info->active_zones)) {
|
|
|
|
/* Someone already set the bit */
|
|
|
|
atomic_inc(&zone_info->active_zones_left);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void btrfs_dev_clear_active_zone(struct btrfs_device *device, u64 pos)
|
|
|
|
{
|
|
|
|
struct btrfs_zoned_device_info *zone_info = device->zone_info;
|
|
|
|
unsigned int zno = (pos >> zone_info->zone_size_shift);
|
|
|
|
|
|
|
|
/* We can use any number of zones */
|
|
|
|
if (zone_info->max_active_zones == 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (test_and_clear_bit(zno, zone_info->active_zones))
|
|
|
|
atomic_inc(&zone_info->active_zones_left);
|
|
|
|
}
|
|
|
|
|
2021-02-04 18:21:48 +08:00
|
|
|
int btrfs_reset_device_zone(struct btrfs_device *device, u64 physical,
|
|
|
|
u64 length, u64 *bytes)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
*bytes = 0;
|
|
|
|
ret = blkdev_zone_mgmt(device->bdev, REQ_OP_ZONE_RESET,
|
|
|
|
physical >> SECTOR_SHIFT, length >> SECTOR_SHIFT,
|
|
|
|
GFP_NOFS);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
*bytes = length;
|
|
|
|
while (length) {
|
|
|
|
btrfs_dev_set_zone_empty(device, physical);
|
2021-08-19 20:19:17 +08:00
|
|
|
btrfs_dev_clear_active_zone(device, physical);
|
2021-02-04 18:21:48 +08:00
|
|
|
physical += device->zone_info->zone_size;
|
|
|
|
length -= device->zone_info->zone_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int btrfs_ensure_empty_zones(struct btrfs_device *device, u64 start, u64 size)
|
|
|
|
{
|
|
|
|
struct btrfs_zoned_device_info *zinfo = device->zone_info;
|
|
|
|
const u8 shift = zinfo->zone_size_shift;
|
|
|
|
unsigned long begin = start >> shift;
|
|
|
|
unsigned long end = (start + size) >> shift;
|
|
|
|
u64 pos;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ASSERT(IS_ALIGNED(start, zinfo->zone_size));
|
|
|
|
ASSERT(IS_ALIGNED(size, zinfo->zone_size));
|
|
|
|
|
|
|
|
if (end > zinfo->nr_zones)
|
|
|
|
return -ERANGE;
|
|
|
|
|
|
|
|
/* All the zones are conventional */
|
|
|
|
if (find_next_bit(zinfo->seq_zones, begin, end) == end)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* All the zones are sequential and empty */
|
|
|
|
if (find_next_zero_bit(zinfo->seq_zones, begin, end) == end &&
|
|
|
|
find_next_zero_bit(zinfo->empty_zones, begin, end) == end)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
for (pos = start; pos < start + size; pos += zinfo->zone_size) {
|
|
|
|
u64 reset_bytes;
|
|
|
|
|
|
|
|
if (!btrfs_dev_is_sequential(device, pos) ||
|
|
|
|
btrfs_dev_is_empty_zone(device, pos))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/* Free regions should be empty */
|
|
|
|
btrfs_warn_in_rcu(
|
|
|
|
device->fs_info,
|
|
|
|
"zoned: resetting device %s (devid %llu) zone %llu for allocation",
|
|
|
|
rcu_str_deref(device->name), device->devid, pos >> shift);
|
|
|
|
WARN_ON_ONCE(1);
|
|
|
|
|
|
|
|
ret = btrfs_reset_device_zone(device, pos, zinfo->zone_size,
|
|
|
|
&reset_bytes);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2021-02-04 18:21:50 +08:00
|
|
|
|
2021-02-04 18:21:51 +08:00
|
|
|
/*
|
|
|
|
* Calculate an allocation pointer from the extent allocation information
|
|
|
|
* for a block group consist of conventional zones. It is pointed to the
|
|
|
|
* end of the highest addressed extent in the block group as an allocation
|
|
|
|
* offset.
|
|
|
|
*/
|
|
|
|
static int calculate_alloc_pointer(struct btrfs_block_group *cache,
|
|
|
|
u64 *offset_ret)
|
|
|
|
{
|
|
|
|
struct btrfs_fs_info *fs_info = cache->fs_info;
|
|
|
|
struct btrfs_root *root = fs_info->extent_root;
|
|
|
|
struct btrfs_path *path;
|
|
|
|
struct btrfs_key key;
|
|
|
|
struct btrfs_key found_key;
|
|
|
|
int ret;
|
|
|
|
u64 length;
|
|
|
|
|
|
|
|
path = btrfs_alloc_path();
|
|
|
|
if (!path)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
key.objectid = cache->start + cache->length;
|
|
|
|
key.type = 0;
|
|
|
|
key.offset = 0;
|
|
|
|
|
|
|
|
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
|
|
|
|
/* We should not find the exact match */
|
|
|
|
if (!ret)
|
|
|
|
ret = -EUCLEAN;
|
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
ret = btrfs_previous_extent_item(root, path, cache->start);
|
|
|
|
if (ret) {
|
|
|
|
if (ret == 1) {
|
|
|
|
ret = 0;
|
|
|
|
*offset_ret = 0;
|
|
|
|
}
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
|
|
|
|
|
|
|
|
if (found_key.type == BTRFS_EXTENT_ITEM_KEY)
|
|
|
|
length = found_key.offset;
|
|
|
|
else
|
|
|
|
length = fs_info->nodesize;
|
|
|
|
|
|
|
|
if (!(found_key.objectid >= cache->start &&
|
|
|
|
found_key.objectid + length <= cache->start + cache->length)) {
|
|
|
|
ret = -EUCLEAN;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
*offset_ret = found_key.objectid + length - cache->start;
|
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
out:
|
|
|
|
btrfs_free_path(path);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
|
2021-02-04 18:21:50 +08:00
|
|
|
{
|
|
|
|
struct btrfs_fs_info *fs_info = cache->fs_info;
|
|
|
|
struct extent_map_tree *em_tree = &fs_info->mapping_tree;
|
|
|
|
struct extent_map *em;
|
|
|
|
struct map_lookup *map;
|
|
|
|
struct btrfs_device *device;
|
|
|
|
u64 logical = cache->start;
|
|
|
|
u64 length = cache->length;
|
|
|
|
u64 physical = 0;
|
|
|
|
int ret;
|
|
|
|
int i;
|
|
|
|
unsigned int nofs_flag;
|
|
|
|
u64 *alloc_offsets = NULL;
|
2021-08-19 20:19:08 +08:00
|
|
|
u64 *caps = NULL;
|
2021-08-19 20:19:18 +08:00
|
|
|
unsigned long *active = NULL;
|
2021-02-04 18:21:51 +08:00
|
|
|
u64 last_alloc = 0;
|
2021-02-04 18:21:50 +08:00
|
|
|
u32 num_sequential = 0, num_conventional = 0;
|
|
|
|
|
|
|
|
if (!btrfs_is_zoned(fs_info))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* Sanity check */
|
|
|
|
if (!IS_ALIGNED(length, fs_info->zone_size)) {
|
|
|
|
btrfs_err(fs_info,
|
|
|
|
"zoned: block group %llu len %llu unaligned to zone size %llu",
|
|
|
|
logical, length, fs_info->zone_size);
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Get the chunk mapping */
|
|
|
|
read_lock(&em_tree->lock);
|
|
|
|
em = lookup_extent_mapping(em_tree, logical, length);
|
|
|
|
read_unlock(&em_tree->lock);
|
|
|
|
|
|
|
|
if (!em)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
map = em->map_lookup;
|
|
|
|
|
2021-08-19 20:19:16 +08:00
|
|
|
cache->physical_map = kmalloc(map_lookup_size(map->num_stripes), GFP_NOFS);
|
|
|
|
if (!cache->physical_map) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
memcpy(cache->physical_map, map, map_lookup_size(map->num_stripes));
|
|
|
|
|
2021-02-04 18:21:50 +08:00
|
|
|
alloc_offsets = kcalloc(map->num_stripes, sizeof(*alloc_offsets), GFP_NOFS);
|
|
|
|
if (!alloc_offsets) {
|
2021-08-19 20:19:16 +08:00
|
|
|
ret = -ENOMEM;
|
|
|
|
goto out;
|
2021-02-04 18:21:50 +08:00
|
|
|
}
|
|
|
|
|
2021-08-19 20:19:08 +08:00
|
|
|
caps = kcalloc(map->num_stripes, sizeof(*caps), GFP_NOFS);
|
|
|
|
if (!caps) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2021-08-19 20:19:18 +08:00
|
|
|
active = bitmap_zalloc(map->num_stripes, GFP_NOFS);
|
|
|
|
if (!active) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2021-02-04 18:21:50 +08:00
|
|
|
for (i = 0; i < map->num_stripes; i++) {
|
|
|
|
bool is_sequential;
|
|
|
|
struct blk_zone zone;
|
2021-02-04 18:22:12 +08:00
|
|
|
struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
|
|
|
|
int dev_replace_is_ongoing = 0;
|
2021-02-04 18:21:50 +08:00
|
|
|
|
|
|
|
device = map->stripes[i].dev;
|
|
|
|
physical = map->stripes[i].physical;
|
|
|
|
|
|
|
|
if (device->bdev == NULL) {
|
|
|
|
alloc_offsets[i] = WP_MISSING_DEV;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
is_sequential = btrfs_dev_is_sequential(device, physical);
|
|
|
|
if (is_sequential)
|
|
|
|
num_sequential++;
|
|
|
|
else
|
|
|
|
num_conventional++;
|
|
|
|
|
|
|
|
if (!is_sequential) {
|
|
|
|
alloc_offsets[i] = WP_CONVENTIONAL;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This zone will be used for allocation, so mark this zone
|
|
|
|
* non-empty.
|
|
|
|
*/
|
|
|
|
btrfs_dev_clear_zone_empty(device, physical);
|
|
|
|
|
2021-02-04 18:22:12 +08:00
|
|
|
down_read(&dev_replace->rwsem);
|
|
|
|
dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
|
|
|
|
if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL)
|
|
|
|
btrfs_dev_clear_zone_empty(dev_replace->tgtdev, physical);
|
|
|
|
up_read(&dev_replace->rwsem);
|
|
|
|
|
2021-02-04 18:21:50 +08:00
|
|
|
/*
|
|
|
|
* The group is mapped to a sequential zone. Get the zone write
|
|
|
|
* pointer to determine the allocation offset within the zone.
|
|
|
|
*/
|
|
|
|
WARN_ON(!IS_ALIGNED(physical, fs_info->zone_size));
|
|
|
|
nofs_flag = memalloc_nofs_save();
|
|
|
|
ret = btrfs_get_dev_zone(device, physical, &zone);
|
|
|
|
memalloc_nofs_restore(nofs_flag);
|
|
|
|
if (ret == -EIO || ret == -EOPNOTSUPP) {
|
|
|
|
ret = 0;
|
|
|
|
alloc_offsets[i] = WP_MISSING_DEV;
|
|
|
|
continue;
|
|
|
|
} else if (ret) {
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2021-04-30 21:34:17 +08:00
|
|
|
if (zone.type == BLK_ZONE_TYPE_CONVENTIONAL) {
|
2021-04-30 21:34:17 +08:00
|
|
|
btrfs_err_in_rcu(fs_info,
|
|
|
|
"zoned: unexpected conventional zone %llu on device %s (devid %llu)",
|
|
|
|
zone.start << SECTOR_SHIFT,
|
|
|
|
rcu_str_deref(device->name), device->devid);
|
2021-04-30 21:34:17 +08:00
|
|
|
ret = -EIO;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2021-08-19 20:19:08 +08:00
|
|
|
caps[i] = (zone.capacity << SECTOR_SHIFT);
|
|
|
|
|
2021-02-04 18:21:50 +08:00
|
|
|
switch (zone.cond) {
|
|
|
|
case BLK_ZONE_COND_OFFLINE:
|
|
|
|
case BLK_ZONE_COND_READONLY:
|
|
|
|
btrfs_err(fs_info,
|
|
|
|
"zoned: offline/readonly zone %llu on device %s (devid %llu)",
|
|
|
|
physical >> device->zone_info->zone_size_shift,
|
|
|
|
rcu_str_deref(device->name), device->devid);
|
|
|
|
alloc_offsets[i] = WP_MISSING_DEV;
|
|
|
|
break;
|
|
|
|
case BLK_ZONE_COND_EMPTY:
|
|
|
|
alloc_offsets[i] = 0;
|
|
|
|
break;
|
|
|
|
case BLK_ZONE_COND_FULL:
|
2021-08-19 20:19:08 +08:00
|
|
|
alloc_offsets[i] = caps[i];
|
2021-02-04 18:21:50 +08:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
/* Partially used zone */
|
|
|
|
alloc_offsets[i] =
|
|
|
|
((zone.wp - zone.start) << SECTOR_SHIFT);
|
2021-08-19 20:19:18 +08:00
|
|
|
__set_bit(i, active);
|
2021-02-04 18:21:50 +08:00
|
|
|
break;
|
|
|
|
}
|
2021-08-19 20:19:18 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Consider a zone as active if we can allow any number of
|
|
|
|
* active zones.
|
|
|
|
*/
|
|
|
|
if (!device->zone_info->max_active_zones)
|
|
|
|
__set_bit(i, active);
|
2021-02-04 18:21:50 +08:00
|
|
|
}
|
|
|
|
|
2021-02-04 18:22:03 +08:00
|
|
|
if (num_sequential > 0)
|
|
|
|
cache->seq_zone = true;
|
|
|
|
|
2021-02-04 18:21:50 +08:00
|
|
|
if (num_conventional > 0) {
|
|
|
|
/*
|
2021-02-04 18:21:51 +08:00
|
|
|
* Avoid calling calculate_alloc_pointer() for new BG. It
|
|
|
|
* is no use for new BG. It must be always 0.
|
|
|
|
*
|
|
|
|
* Also, we have a lock chain of extent buffer lock ->
|
|
|
|
* chunk mutex. For new BG, this function is called from
|
|
|
|
* btrfs_make_block_group() which is already taking the
|
|
|
|
* chunk mutex. Thus, we cannot call
|
|
|
|
* calculate_alloc_pointer() which takes extent buffer
|
|
|
|
* locks to avoid deadlock.
|
2021-02-04 18:21:50 +08:00
|
|
|
*/
|
2021-08-19 20:19:08 +08:00
|
|
|
|
|
|
|
/* Zone capacity is always zone size in emulation */
|
|
|
|
cache->zone_capacity = cache->length;
|
2021-02-04 18:21:51 +08:00
|
|
|
if (new) {
|
|
|
|
cache->alloc_offset = 0;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
ret = calculate_alloc_pointer(cache, &last_alloc);
|
|
|
|
if (ret || map->num_stripes == num_conventional) {
|
|
|
|
if (!ret)
|
|
|
|
cache->alloc_offset = last_alloc;
|
|
|
|
else
|
|
|
|
btrfs_err(fs_info,
|
|
|
|
"zoned: failed to determine allocation offset of bg %llu",
|
|
|
|
cache->start);
|
|
|
|
goto out;
|
|
|
|
}
|
2021-02-04 18:21:50 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
switch (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
|
|
|
|
case 0: /* single */
|
2021-04-30 21:34:18 +08:00
|
|
|
if (alloc_offsets[0] == WP_MISSING_DEV) {
|
|
|
|
btrfs_err(fs_info,
|
|
|
|
"zoned: cannot recover write pointer for zone %llu",
|
|
|
|
physical);
|
|
|
|
ret = -EIO;
|
|
|
|
goto out;
|
|
|
|
}
|
2021-02-04 18:21:50 +08:00
|
|
|
cache->alloc_offset = alloc_offsets[0];
|
2021-08-19 20:19:08 +08:00
|
|
|
cache->zone_capacity = caps[0];
|
2021-08-19 20:19:18 +08:00
|
|
|
cache->zone_is_active = test_bit(0, active);
|
2021-02-04 18:21:50 +08:00
|
|
|
break;
|
|
|
|
case BTRFS_BLOCK_GROUP_DUP:
|
|
|
|
case BTRFS_BLOCK_GROUP_RAID1:
|
|
|
|
case BTRFS_BLOCK_GROUP_RAID0:
|
|
|
|
case BTRFS_BLOCK_GROUP_RAID10:
|
|
|
|
case BTRFS_BLOCK_GROUP_RAID5:
|
|
|
|
case BTRFS_BLOCK_GROUP_RAID6:
|
|
|
|
/* non-single profiles are not supported yet */
|
|
|
|
default:
|
|
|
|
btrfs_err(fs_info, "zoned: profile %s not yet supported",
|
|
|
|
btrfs_bg_type_to_raid_name(map->type));
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2021-08-19 20:19:18 +08:00
|
|
|
if (cache->zone_is_active) {
|
|
|
|
btrfs_get_block_group(cache);
|
|
|
|
spin_lock(&fs_info->zone_active_bgs_lock);
|
|
|
|
list_add_tail(&cache->active_bg_list, &fs_info->zone_active_bgs);
|
|
|
|
spin_unlock(&fs_info->zone_active_bgs_lock);
|
|
|
|
}
|
|
|
|
|
2021-02-04 18:21:50 +08:00
|
|
|
out:
|
2021-04-30 21:34:18 +08:00
|
|
|
if (cache->alloc_offset > fs_info->zone_size) {
|
|
|
|
btrfs_err(fs_info,
|
|
|
|
"zoned: invalid write pointer %llu in block group %llu",
|
|
|
|
cache->alloc_offset, cache->start);
|
|
|
|
ret = -EIO;
|
|
|
|
}
|
|
|
|
|
2021-08-19 20:19:08 +08:00
|
|
|
if (cache->alloc_offset > cache->zone_capacity) {
|
|
|
|
btrfs_err(fs_info,
|
|
|
|
"zoned: invalid write pointer %llu (larger than zone capacity %llu) in block group %llu",
|
|
|
|
cache->alloc_offset, cache->zone_capacity,
|
|
|
|
cache->start);
|
|
|
|
ret = -EIO;
|
|
|
|
}
|
|
|
|
|
2021-02-04 18:21:51 +08:00
|
|
|
/* An extent is allocated after the write pointer */
|
|
|
|
if (!ret && num_conventional && last_alloc > cache->alloc_offset) {
|
|
|
|
btrfs_err(fs_info,
|
|
|
|
"zoned: got wrong write pointer in BG %llu: %llu > %llu",
|
|
|
|
logical, last_alloc, cache->alloc_offset);
|
|
|
|
ret = -EIO;
|
|
|
|
}
|
|
|
|
|
2021-02-04 18:22:08 +08:00
|
|
|
if (!ret)
|
|
|
|
cache->meta_write_pointer = cache->alloc_offset + cache->start;
|
|
|
|
|
2021-08-19 20:19:16 +08:00
|
|
|
if (ret) {
|
|
|
|
kfree(cache->physical_map);
|
|
|
|
cache->physical_map = NULL;
|
|
|
|
}
|
2021-08-19 20:19:18 +08:00
|
|
|
bitmap_free(active);
|
2021-08-19 20:19:08 +08:00
|
|
|
kfree(caps);
|
2021-02-04 18:21:50 +08:00
|
|
|
kfree(alloc_offsets);
|
|
|
|
free_extent_map(em);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
2021-02-04 18:21:52 +08:00
|
|
|
|
|
|
|
void btrfs_calc_zone_unusable(struct btrfs_block_group *cache)
|
|
|
|
{
|
|
|
|
u64 unusable, free;
|
|
|
|
|
|
|
|
if (!btrfs_is_zoned(cache->fs_info))
|
|
|
|
return;
|
|
|
|
|
|
|
|
WARN_ON(cache->bytes_super != 0);
|
2021-08-19 20:19:10 +08:00
|
|
|
unusable = (cache->alloc_offset - cache->used) +
|
|
|
|
(cache->length - cache->zone_capacity);
|
|
|
|
free = cache->zone_capacity - cache->alloc_offset;
|
2021-02-04 18:21:52 +08:00
|
|
|
|
|
|
|
/* We only need ->free_space in ALLOC_SEQ block groups */
|
|
|
|
cache->last_byte_to_unpin = (u64)-1;
|
|
|
|
cache->cached = BTRFS_CACHE_FINISHED;
|
|
|
|
cache->free_space_ctl->free_space = free;
|
|
|
|
cache->zone_unusable = unusable;
|
|
|
|
}
|
2021-02-04 18:21:54 +08:00
|
|
|
|
|
|
|
void btrfs_redirty_list_add(struct btrfs_transaction *trans,
|
|
|
|
struct extent_buffer *eb)
|
|
|
|
{
|
|
|
|
struct btrfs_fs_info *fs_info = eb->fs_info;
|
|
|
|
|
|
|
|
if (!btrfs_is_zoned(fs_info) ||
|
|
|
|
btrfs_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN) ||
|
|
|
|
!list_empty(&eb->release_list))
|
|
|
|
return;
|
|
|
|
|
|
|
|
set_extent_buffer_dirty(eb);
|
|
|
|
set_extent_bits_nowait(&trans->dirty_pages, eb->start,
|
|
|
|
eb->start + eb->len - 1, EXTENT_DIRTY);
|
|
|
|
memzero_extent_buffer(eb, 0, eb->len);
|
|
|
|
set_bit(EXTENT_BUFFER_NO_CHECK, &eb->bflags);
|
|
|
|
|
|
|
|
spin_lock(&trans->releasing_ebs_lock);
|
|
|
|
list_add_tail(&eb->release_list, &trans->releasing_ebs);
|
|
|
|
spin_unlock(&trans->releasing_ebs_lock);
|
|
|
|
atomic_inc(&eb->refs);
|
|
|
|
}
|
|
|
|
|
|
|
|
void btrfs_free_redirty_list(struct btrfs_transaction *trans)
|
|
|
|
{
|
|
|
|
spin_lock(&trans->releasing_ebs_lock);
|
|
|
|
while (!list_empty(&trans->releasing_ebs)) {
|
|
|
|
struct extent_buffer *eb;
|
|
|
|
|
|
|
|
eb = list_first_entry(&trans->releasing_ebs,
|
|
|
|
struct extent_buffer, release_list);
|
|
|
|
list_del_init(&eb->release_list);
|
|
|
|
free_extent_buffer(eb);
|
|
|
|
}
|
|
|
|
spin_unlock(&trans->releasing_ebs_lock);
|
|
|
|
}
|
2021-02-04 18:22:03 +08:00
|
|
|
|
2021-05-18 23:40:27 +08:00
|
|
|
bool btrfs_use_zone_append(struct btrfs_inode *inode, u64 start)
|
2021-02-04 18:22:03 +08:00
|
|
|
{
|
|
|
|
struct btrfs_fs_info *fs_info = inode->root->fs_info;
|
|
|
|
struct btrfs_block_group *cache;
|
|
|
|
bool ret = false;
|
|
|
|
|
|
|
|
if (!btrfs_is_zoned(fs_info))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (!is_data_inode(&inode->vfs_inode))
|
|
|
|
return false;
|
|
|
|
|
2021-05-18 23:40:27 +08:00
|
|
|
cache = btrfs_lookup_block_group(fs_info, start);
|
2021-02-04 18:22:03 +08:00
|
|
|
ASSERT(cache);
|
|
|
|
if (!cache)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
ret = cache->seq_zone;
|
|
|
|
btrfs_put_block_group(cache);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
2021-02-04 18:22:05 +08:00
|
|
|
|
|
|
|
void btrfs_record_physical_zoned(struct inode *inode, u64 file_offset,
|
|
|
|
struct bio *bio)
|
|
|
|
{
|
|
|
|
struct btrfs_ordered_extent *ordered;
|
|
|
|
const u64 physical = bio->bi_iter.bi_sector << SECTOR_SHIFT;
|
|
|
|
|
|
|
|
if (bio_op(bio) != REQ_OP_ZONE_APPEND)
|
|
|
|
return;
|
|
|
|
|
|
|
|
ordered = btrfs_lookup_ordered_extent(BTRFS_I(inode), file_offset);
|
|
|
|
if (WARN_ON(!ordered))
|
|
|
|
return;
|
|
|
|
|
|
|
|
ordered->physical = physical;
|
2021-07-22 15:53:59 +08:00
|
|
|
ordered->bdev = bio->bi_bdev;
|
2021-02-04 18:22:05 +08:00
|
|
|
|
|
|
|
btrfs_put_ordered_extent(ordered);
|
|
|
|
}
|
|
|
|
|
|
|
|
void btrfs_rewrite_logical_zoned(struct btrfs_ordered_extent *ordered)
|
|
|
|
{
|
|
|
|
struct btrfs_inode *inode = BTRFS_I(ordered->inode);
|
|
|
|
struct btrfs_fs_info *fs_info = inode->root->fs_info;
|
|
|
|
struct extent_map_tree *em_tree;
|
|
|
|
struct extent_map *em;
|
|
|
|
struct btrfs_ordered_sum *sum;
|
|
|
|
u64 orig_logical = ordered->disk_bytenr;
|
|
|
|
u64 *logical = NULL;
|
|
|
|
int nr, stripe_len;
|
|
|
|
|
|
|
|
/* Zoned devices should not have partitions. So, we can assume it is 0 */
|
2021-07-22 15:53:59 +08:00
|
|
|
ASSERT(!bdev_is_partition(ordered->bdev));
|
|
|
|
if (WARN_ON(!ordered->bdev))
|
2021-02-04 18:22:05 +08:00
|
|
|
return;
|
|
|
|
|
2021-07-22 15:53:59 +08:00
|
|
|
if (WARN_ON(btrfs_rmap_block(fs_info, orig_logical, ordered->bdev,
|
2021-02-04 18:22:05 +08:00
|
|
|
ordered->physical, &logical, &nr,
|
|
|
|
&stripe_len)))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
WARN_ON(nr != 1);
|
|
|
|
|
|
|
|
if (orig_logical == *logical)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
ordered->disk_bytenr = *logical;
|
|
|
|
|
|
|
|
em_tree = &inode->extent_tree;
|
|
|
|
write_lock(&em_tree->lock);
|
|
|
|
em = search_extent_mapping(em_tree, ordered->file_offset,
|
|
|
|
ordered->num_bytes);
|
|
|
|
em->block_start = *logical;
|
|
|
|
free_extent_map(em);
|
|
|
|
write_unlock(&em_tree->lock);
|
|
|
|
|
|
|
|
list_for_each_entry(sum, &ordered->list, list) {
|
|
|
|
if (*logical < orig_logical)
|
|
|
|
sum->bytenr -= orig_logical - *logical;
|
|
|
|
else
|
|
|
|
sum->bytenr += *logical - orig_logical;
|
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
|
|
|
kfree(logical);
|
|
|
|
}
|
2021-02-04 18:22:08 +08:00
|
|
|
|
|
|
|
bool btrfs_check_meta_write_pointer(struct btrfs_fs_info *fs_info,
|
|
|
|
struct extent_buffer *eb,
|
|
|
|
struct btrfs_block_group **cache_ret)
|
|
|
|
{
|
|
|
|
struct btrfs_block_group *cache;
|
|
|
|
bool ret = true;
|
|
|
|
|
|
|
|
if (!btrfs_is_zoned(fs_info))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
cache = *cache_ret;
|
|
|
|
|
|
|
|
if (cache && (eb->start < cache->start ||
|
|
|
|
cache->start + cache->length <= eb->start)) {
|
|
|
|
btrfs_put_block_group(cache);
|
|
|
|
cache = NULL;
|
|
|
|
*cache_ret = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!cache)
|
|
|
|
cache = btrfs_lookup_block_group(fs_info, eb->start);
|
|
|
|
|
|
|
|
if (cache) {
|
|
|
|
if (cache->meta_write_pointer != eb->start) {
|
|
|
|
btrfs_put_block_group(cache);
|
|
|
|
cache = NULL;
|
|
|
|
ret = false;
|
|
|
|
} else {
|
|
|
|
cache->meta_write_pointer = eb->start + eb->len;
|
|
|
|
}
|
|
|
|
|
|
|
|
*cache_ret = cache;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
void btrfs_revert_meta_write_pointer(struct btrfs_block_group *cache,
|
|
|
|
struct extent_buffer *eb)
|
|
|
|
{
|
|
|
|
if (!btrfs_is_zoned(eb->fs_info) || !cache)
|
|
|
|
return;
|
|
|
|
|
|
|
|
ASSERT(cache->meta_write_pointer == eb->start + eb->len);
|
|
|
|
cache->meta_write_pointer = eb->start;
|
|
|
|
}
|
2021-02-04 18:22:13 +08:00
|
|
|
|
|
|
|
int btrfs_zoned_issue_zeroout(struct btrfs_device *device, u64 physical, u64 length)
|
|
|
|
{
|
|
|
|
if (!btrfs_dev_is_sequential(device, physical))
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
|
|
|
return blkdev_issue_zeroout(device->bdev, physical >> SECTOR_SHIFT,
|
|
|
|
length >> SECTOR_SHIFT, GFP_NOFS, 0);
|
|
|
|
}
|
2021-02-04 18:22:14 +08:00
|
|
|
|
|
|
|
static int read_zone_info(struct btrfs_fs_info *fs_info, u64 logical,
|
|
|
|
struct blk_zone *zone)
|
|
|
|
{
|
|
|
|
struct btrfs_bio *bbio = NULL;
|
|
|
|
u64 mapped_length = PAGE_SIZE;
|
|
|
|
unsigned int nofs_flag;
|
|
|
|
int nmirrors;
|
|
|
|
int i, ret;
|
|
|
|
|
|
|
|
ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical,
|
|
|
|
&mapped_length, &bbio);
|
|
|
|
if (ret || !bbio || mapped_length < PAGE_SIZE) {
|
|
|
|
btrfs_put_bbio(bbio);
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
nofs_flag = memalloc_nofs_save();
|
|
|
|
nmirrors = (int)bbio->num_stripes;
|
|
|
|
for (i = 0; i < nmirrors; i++) {
|
|
|
|
u64 physical = bbio->stripes[i].physical;
|
|
|
|
struct btrfs_device *dev = bbio->stripes[i].dev;
|
|
|
|
|
|
|
|
/* Missing device */
|
|
|
|
if (!dev->bdev)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
ret = btrfs_get_dev_zone(dev, physical, zone);
|
|
|
|
/* Failing device */
|
|
|
|
if (ret == -EIO || ret == -EOPNOTSUPP)
|
|
|
|
continue;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
memalloc_nofs_restore(nofs_flag);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Synchronize write pointer in a zone at @physical_start on @tgt_dev, by
|
|
|
|
* filling zeros between @physical_pos to a write pointer of dev-replace
|
|
|
|
* source device.
|
|
|
|
*/
|
|
|
|
int btrfs_sync_zone_write_pointer(struct btrfs_device *tgt_dev, u64 logical,
|
|
|
|
u64 physical_start, u64 physical_pos)
|
|
|
|
{
|
|
|
|
struct btrfs_fs_info *fs_info = tgt_dev->fs_info;
|
|
|
|
struct blk_zone zone;
|
|
|
|
u64 length;
|
|
|
|
u64 wp;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!btrfs_dev_is_sequential(tgt_dev, physical_pos))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
ret = read_zone_info(fs_info, logical, &zone);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
wp = physical_start + ((zone.wp - zone.start) << SECTOR_SHIFT);
|
|
|
|
|
|
|
|
if (physical_pos == wp)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (physical_pos > wp)
|
|
|
|
return -EUCLEAN;
|
|
|
|
|
|
|
|
length = wp - physical_pos;
|
|
|
|
return btrfs_zoned_issue_zeroout(tgt_dev, physical_pos, length);
|
|
|
|
}
|
2021-05-18 23:40:29 +08:00
|
|
|
|
|
|
|
struct btrfs_device *btrfs_zoned_get_device(struct btrfs_fs_info *fs_info,
|
|
|
|
u64 logical, u64 length)
|
|
|
|
{
|
|
|
|
struct btrfs_device *device;
|
|
|
|
struct extent_map *em;
|
|
|
|
struct map_lookup *map;
|
|
|
|
|
|
|
|
em = btrfs_get_chunk_map(fs_info, logical, length);
|
|
|
|
if (IS_ERR(em))
|
|
|
|
return ERR_CAST(em);
|
|
|
|
|
|
|
|
map = em->map_lookup;
|
|
|
|
/* We only support single profile for now */
|
|
|
|
ASSERT(map->num_stripes == 1);
|
|
|
|
device = map->stripes[0].dev;
|
|
|
|
|
|
|
|
free_extent_map(em);
|
|
|
|
|
|
|
|
return device;
|
|
|
|
}
|
2021-08-19 20:19:17 +08:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Activate block group and underlying device zones
|
|
|
|
*
|
|
|
|
* @block_group: the block group to activate
|
|
|
|
*
|
|
|
|
* Return: true on success, false otherwise
|
|
|
|
*/
|
|
|
|
bool btrfs_zone_activate(struct btrfs_block_group *block_group)
|
|
|
|
{
|
|
|
|
struct btrfs_fs_info *fs_info = block_group->fs_info;
|
|
|
|
struct map_lookup *map;
|
|
|
|
struct btrfs_device *device;
|
|
|
|
u64 physical;
|
|
|
|
bool ret;
|
|
|
|
|
|
|
|
if (!btrfs_is_zoned(block_group->fs_info))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
map = block_group->physical_map;
|
|
|
|
/* Currently support SINGLE profile only */
|
|
|
|
ASSERT(map->num_stripes == 1);
|
|
|
|
device = map->stripes[0].dev;
|
|
|
|
physical = map->stripes[0].physical;
|
|
|
|
|
|
|
|
if (device->zone_info->max_active_zones == 0)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
spin_lock(&block_group->lock);
|
|
|
|
|
|
|
|
if (block_group->zone_is_active) {
|
|
|
|
ret = true;
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* No space left */
|
|
|
|
if (block_group->alloc_offset == block_group->zone_capacity) {
|
|
|
|
ret = false;
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!btrfs_dev_set_active_zone(device, physical)) {
|
|
|
|
/* Cannot activate the zone */
|
|
|
|
ret = false;
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Successfully activated all the zones */
|
|
|
|
block_group->zone_is_active = 1;
|
|
|
|
|
|
|
|
spin_unlock(&block_group->lock);
|
|
|
|
|
|
|
|
/* For the active block group list */
|
|
|
|
btrfs_get_block_group(block_group);
|
|
|
|
|
|
|
|
spin_lock(&fs_info->zone_active_bgs_lock);
|
|
|
|
ASSERT(list_empty(&block_group->active_bg_list));
|
|
|
|
list_add_tail(&block_group->active_bg_list, &fs_info->zone_active_bgs);
|
|
|
|
spin_unlock(&fs_info->zone_active_bgs_lock);
|
|
|
|
|
|
|
|
return true;
|
|
|
|
|
|
|
|
out_unlock:
|
|
|
|
spin_unlock(&block_group->lock);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
int btrfs_zone_finish(struct btrfs_block_group *block_group)
|
|
|
|
{
|
|
|
|
struct btrfs_fs_info *fs_info = block_group->fs_info;
|
|
|
|
struct map_lookup *map;
|
|
|
|
struct btrfs_device *device;
|
|
|
|
u64 physical;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
if (!btrfs_is_zoned(fs_info))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
map = block_group->physical_map;
|
|
|
|
/* Currently support SINGLE profile only */
|
|
|
|
ASSERT(map->num_stripes == 1);
|
|
|
|
|
|
|
|
device = map->stripes[0].dev;
|
|
|
|
physical = map->stripes[0].physical;
|
|
|
|
|
|
|
|
if (device->zone_info->max_active_zones == 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
spin_lock(&block_group->lock);
|
|
|
|
if (!block_group->zone_is_active) {
|
|
|
|
spin_unlock(&block_group->lock);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check if we have unwritten allocated space */
|
|
|
|
if ((block_group->flags &
|
|
|
|
(BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_SYSTEM)) &&
|
|
|
|
block_group->alloc_offset > block_group->meta_write_pointer) {
|
|
|
|
spin_unlock(&block_group->lock);
|
|
|
|
return -EAGAIN;
|
|
|
|
}
|
|
|
|
spin_unlock(&block_group->lock);
|
|
|
|
|
|
|
|
ret = btrfs_inc_block_group_ro(block_group, false);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
/* Ensure all writes in this block group finish */
|
|
|
|
btrfs_wait_block_group_reservations(block_group);
|
|
|
|
/* No need to wait for NOCOW writers. Zoned mode does not allow that. */
|
|
|
|
btrfs_wait_ordered_roots(fs_info, U64_MAX, block_group->start,
|
|
|
|
block_group->length);
|
|
|
|
|
|
|
|
spin_lock(&block_group->lock);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Bail out if someone already deactivated the block group, or
|
|
|
|
* allocated space is left in the block group.
|
|
|
|
*/
|
|
|
|
if (!block_group->zone_is_active) {
|
|
|
|
spin_unlock(&block_group->lock);
|
|
|
|
btrfs_dec_block_group_ro(block_group);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (block_group->reserved) {
|
|
|
|
spin_unlock(&block_group->lock);
|
|
|
|
btrfs_dec_block_group_ro(block_group);
|
|
|
|
return -EAGAIN;
|
|
|
|
}
|
|
|
|
|
|
|
|
block_group->zone_is_active = 0;
|
|
|
|
block_group->alloc_offset = block_group->zone_capacity;
|
|
|
|
block_group->free_space_ctl->free_space = 0;
|
|
|
|
btrfs_clear_treelog_bg(block_group);
|
|
|
|
spin_unlock(&block_group->lock);
|
|
|
|
|
|
|
|
ret = blkdev_zone_mgmt(device->bdev, REQ_OP_ZONE_FINISH,
|
|
|
|
physical >> SECTOR_SHIFT,
|
|
|
|
device->zone_info->zone_size >> SECTOR_SHIFT,
|
|
|
|
GFP_NOFS);
|
|
|
|
btrfs_dec_block_group_ro(block_group);
|
|
|
|
|
|
|
|
if (!ret) {
|
|
|
|
btrfs_dev_clear_active_zone(device, physical);
|
|
|
|
|
|
|
|
spin_lock(&fs_info->zone_active_bgs_lock);
|
|
|
|
ASSERT(!list_empty(&block_group->active_bg_list));
|
|
|
|
list_del_init(&block_group->active_bg_list);
|
|
|
|
spin_unlock(&fs_info->zone_active_bgs_lock);
|
|
|
|
|
|
|
|
/* For active_bg_list */
|
|
|
|
btrfs_put_block_group(block_group);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
2021-08-19 20:19:22 +08:00
|
|
|
|
|
|
|
bool btrfs_can_activate_zone(struct btrfs_fs_devices *fs_devices, int raid_index)
|
|
|
|
{
|
|
|
|
struct btrfs_device *device;
|
|
|
|
bool ret = false;
|
|
|
|
|
|
|
|
if (!btrfs_is_zoned(fs_devices->fs_info))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
/* Non-single profiles are not supported yet */
|
|
|
|
if (raid_index != BTRFS_RAID_SINGLE)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
/* Check if there is a device with active zones left */
|
|
|
|
mutex_lock(&fs_devices->device_list_mutex);
|
|
|
|
list_for_each_entry(device, &fs_devices->devices, dev_list) {
|
|
|
|
struct btrfs_zoned_device_info *zinfo = device->zone_info;
|
|
|
|
|
|
|
|
if (!device->bdev)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (!zinfo->max_active_zones ||
|
|
|
|
atomic_read(&zinfo->active_zones_left)) {
|
|
|
|
ret = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
mutex_unlock(&fs_devices->device_list_mutex);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|