zonefs fixes for 5.19-rc2
* Fix handling of the explicit-open mount option, and in particular the conditions under which this option can be ignored. * Fix a problem with zonefs iomap_begin method, causing a hang in iomap_readahead() when a readahead request reaches the end of a file. -----BEGIN PGP SIGNATURE----- iHUEABYKAB0WIQSRPv8tYSvhwAzJdzjdoc3SxdoYdgUCYqMh/wAKCRDdoc3SxdoY dvd+AP4jNRFhAedXl0mIutoP4k0XwblSz9RwrXLOYzkOtgpXGQD+Lps42w6EQliE wWuuL4syVgKamolj0WGcPLarGZC7LQA= =neot -----END PGP SIGNATURE----- Merge tag 'zonefs-5.19-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/dlemoal/zonefs Pull zonefs fixes from Damien Le Moal: - Fix handling of the explicit-open mount option, and in particular the conditions under which this option can be ignored. - Fix a problem with zonefs iomap_begin method, causing a hang in iomap_readahead() when a readahead request reaches the end of a file. * tag 'zonefs-5.19-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/dlemoal/zonefs: zonefs: fix zonefs_iomap_begin() for reads zonefs: Do not ignore explicit_open with active zone limit zonefs: fix handling of explicit_open option on mount
This commit is contained in:
commit
ad6e076498
|
@ -110,15 +110,51 @@ static inline void zonefs_i_size_write(struct inode *inode, loff_t isize)
|
|||
}
|
||||
}
|
||||
|
||||
static int zonefs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
|
||||
unsigned int flags, struct iomap *iomap,
|
||||
struct iomap *srcmap)
|
||||
static int zonefs_read_iomap_begin(struct inode *inode, loff_t offset,
|
||||
loff_t length, unsigned int flags,
|
||||
struct iomap *iomap, struct iomap *srcmap)
|
||||
{
|
||||
struct zonefs_inode_info *zi = ZONEFS_I(inode);
|
||||
struct super_block *sb = inode->i_sb;
|
||||
loff_t isize;
|
||||
|
||||
/* All I/Os should always be within the file maximum size */
|
||||
/*
|
||||
* All blocks are always mapped below EOF. If reading past EOF,
|
||||
* act as if there is a hole up to the file maximum size.
|
||||
*/
|
||||
mutex_lock(&zi->i_truncate_mutex);
|
||||
iomap->bdev = inode->i_sb->s_bdev;
|
||||
iomap->offset = ALIGN_DOWN(offset, sb->s_blocksize);
|
||||
isize = i_size_read(inode);
|
||||
if (iomap->offset >= isize) {
|
||||
iomap->type = IOMAP_HOLE;
|
||||
iomap->addr = IOMAP_NULL_ADDR;
|
||||
iomap->length = length;
|
||||
} else {
|
||||
iomap->type = IOMAP_MAPPED;
|
||||
iomap->addr = (zi->i_zsector << SECTOR_SHIFT) + iomap->offset;
|
||||
iomap->length = isize - iomap->offset;
|
||||
}
|
||||
mutex_unlock(&zi->i_truncate_mutex);
|
||||
|
||||
trace_zonefs_iomap_begin(inode, iomap);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct iomap_ops zonefs_read_iomap_ops = {
|
||||
.iomap_begin = zonefs_read_iomap_begin,
|
||||
};
|
||||
|
||||
static int zonefs_write_iomap_begin(struct inode *inode, loff_t offset,
|
||||
loff_t length, unsigned int flags,
|
||||
struct iomap *iomap, struct iomap *srcmap)
|
||||
{
|
||||
struct zonefs_inode_info *zi = ZONEFS_I(inode);
|
||||
struct super_block *sb = inode->i_sb;
|
||||
loff_t isize;
|
||||
|
||||
/* All write I/Os should always be within the file maximum size */
|
||||
if (WARN_ON_ONCE(offset + length > zi->i_max_size))
|
||||
return -EIO;
|
||||
|
||||
|
@ -128,7 +164,7 @@ static int zonefs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
|
|||
* operation.
|
||||
*/
|
||||
if (WARN_ON_ONCE(zi->i_ztype == ZONEFS_ZTYPE_SEQ &&
|
||||
(flags & IOMAP_WRITE) && !(flags & IOMAP_DIRECT)))
|
||||
!(flags & IOMAP_DIRECT)))
|
||||
return -EIO;
|
||||
|
||||
/*
|
||||
|
@ -137,46 +173,43 @@ static int zonefs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
|
|||
* write pointer) and unwriten beyond.
|
||||
*/
|
||||
mutex_lock(&zi->i_truncate_mutex);
|
||||
isize = i_size_read(inode);
|
||||
if (offset >= isize)
|
||||
iomap->type = IOMAP_UNWRITTEN;
|
||||
else
|
||||
iomap->type = IOMAP_MAPPED;
|
||||
if (flags & IOMAP_WRITE)
|
||||
length = zi->i_max_size - offset;
|
||||
else
|
||||
length = min(length, isize - offset);
|
||||
mutex_unlock(&zi->i_truncate_mutex);
|
||||
|
||||
iomap->offset = ALIGN_DOWN(offset, sb->s_blocksize);
|
||||
iomap->length = ALIGN(offset + length, sb->s_blocksize) - iomap->offset;
|
||||
iomap->bdev = inode->i_sb->s_bdev;
|
||||
iomap->offset = ALIGN_DOWN(offset, sb->s_blocksize);
|
||||
iomap->addr = (zi->i_zsector << SECTOR_SHIFT) + iomap->offset;
|
||||
isize = i_size_read(inode);
|
||||
if (iomap->offset >= isize) {
|
||||
iomap->type = IOMAP_UNWRITTEN;
|
||||
iomap->length = zi->i_max_size - iomap->offset;
|
||||
} else {
|
||||
iomap->type = IOMAP_MAPPED;
|
||||
iomap->length = isize - iomap->offset;
|
||||
}
|
||||
mutex_unlock(&zi->i_truncate_mutex);
|
||||
|
||||
trace_zonefs_iomap_begin(inode, iomap);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct iomap_ops zonefs_iomap_ops = {
|
||||
.iomap_begin = zonefs_iomap_begin,
|
||||
static const struct iomap_ops zonefs_write_iomap_ops = {
|
||||
.iomap_begin = zonefs_write_iomap_begin,
|
||||
};
|
||||
|
||||
static int zonefs_read_folio(struct file *unused, struct folio *folio)
|
||||
{
|
||||
return iomap_read_folio(folio, &zonefs_iomap_ops);
|
||||
return iomap_read_folio(folio, &zonefs_read_iomap_ops);
|
||||
}
|
||||
|
||||
static void zonefs_readahead(struct readahead_control *rac)
|
||||
{
|
||||
iomap_readahead(rac, &zonefs_iomap_ops);
|
||||
iomap_readahead(rac, &zonefs_read_iomap_ops);
|
||||
}
|
||||
|
||||
/*
|
||||
* Map blocks for page writeback. This is used only on conventional zone files,
|
||||
* which implies that the page range can only be within the fixed inode size.
|
||||
*/
|
||||
static int zonefs_map_blocks(struct iomap_writepage_ctx *wpc,
|
||||
static int zonefs_write_map_blocks(struct iomap_writepage_ctx *wpc,
|
||||
struct inode *inode, loff_t offset)
|
||||
{
|
||||
struct zonefs_inode_info *zi = ZONEFS_I(inode);
|
||||
|
@ -191,12 +224,12 @@ static int zonefs_map_blocks(struct iomap_writepage_ctx *wpc,
|
|||
offset < wpc->iomap.offset + wpc->iomap.length)
|
||||
return 0;
|
||||
|
||||
return zonefs_iomap_begin(inode, offset, zi->i_max_size - offset,
|
||||
return zonefs_write_iomap_begin(inode, offset, zi->i_max_size - offset,
|
||||
IOMAP_WRITE, &wpc->iomap, NULL);
|
||||
}
|
||||
|
||||
static const struct iomap_writeback_ops zonefs_writeback_ops = {
|
||||
.map_blocks = zonefs_map_blocks,
|
||||
.map_blocks = zonefs_write_map_blocks,
|
||||
};
|
||||
|
||||
static int zonefs_writepage(struct page *page, struct writeback_control *wbc)
|
||||
|
@ -226,7 +259,8 @@ static int zonefs_swap_activate(struct swap_info_struct *sis,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
return iomap_swapfile_activate(sis, swap_file, span, &zonefs_iomap_ops);
|
||||
return iomap_swapfile_activate(sis, swap_file, span,
|
||||
&zonefs_read_iomap_ops);
|
||||
}
|
||||
|
||||
static const struct address_space_operations zonefs_file_aops = {
|
||||
|
@ -647,7 +681,7 @@ static vm_fault_t zonefs_filemap_page_mkwrite(struct vm_fault *vmf)
|
|||
|
||||
/* Serialize against truncates */
|
||||
filemap_invalidate_lock_shared(inode->i_mapping);
|
||||
ret = iomap_page_mkwrite(vmf, &zonefs_iomap_ops);
|
||||
ret = iomap_page_mkwrite(vmf, &zonefs_write_iomap_ops);
|
||||
filemap_invalidate_unlock_shared(inode->i_mapping);
|
||||
|
||||
sb_end_pagefault(inode->i_sb);
|
||||
|
@ -899,7 +933,7 @@ static ssize_t zonefs_file_dio_write(struct kiocb *iocb, struct iov_iter *from)
|
|||
if (append)
|
||||
ret = zonefs_file_dio_append(iocb, from);
|
||||
else
|
||||
ret = iomap_dio_rw(iocb, from, &zonefs_iomap_ops,
|
||||
ret = iomap_dio_rw(iocb, from, &zonefs_write_iomap_ops,
|
||||
&zonefs_write_dio_ops, 0, NULL, 0);
|
||||
if (zi->i_ztype == ZONEFS_ZTYPE_SEQ &&
|
||||
(ret > 0 || ret == -EIOCBQUEUED)) {
|
||||
|
@ -948,7 +982,7 @@ static ssize_t zonefs_file_buffered_write(struct kiocb *iocb,
|
|||
if (ret <= 0)
|
||||
goto inode_unlock;
|
||||
|
||||
ret = iomap_file_buffered_write(iocb, from, &zonefs_iomap_ops);
|
||||
ret = iomap_file_buffered_write(iocb, from, &zonefs_write_iomap_ops);
|
||||
if (ret > 0)
|
||||
iocb->ki_pos += ret;
|
||||
else if (ret == -EIO)
|
||||
|
@ -1041,7 +1075,7 @@ static ssize_t zonefs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
|
|||
goto inode_unlock;
|
||||
}
|
||||
file_accessed(iocb->ki_filp);
|
||||
ret = iomap_dio_rw(iocb, to, &zonefs_iomap_ops,
|
||||
ret = iomap_dio_rw(iocb, to, &zonefs_read_iomap_ops,
|
||||
&zonefs_read_dio_ops, 0, NULL, 0);
|
||||
} else {
|
||||
ret = generic_file_read_iter(iocb, to);
|
||||
|
@ -1085,7 +1119,8 @@ static int zonefs_seq_file_write_open(struct inode *inode)
|
|||
|
||||
if (sbi->s_mount_opts & ZONEFS_MNTOPT_EXPLICIT_OPEN) {
|
||||
|
||||
if (wro > sbi->s_max_wro_seq_files) {
|
||||
if (sbi->s_max_wro_seq_files
|
||||
&& wro > sbi->s_max_wro_seq_files) {
|
||||
atomic_dec(&sbi->s_wro_seq_files);
|
||||
ret = -EBUSY;
|
||||
goto unlock;
|
||||
|
@ -1760,12 +1795,6 @@ static int zonefs_fill_super(struct super_block *sb, void *data, int silent)
|
|||
|
||||
atomic_set(&sbi->s_wro_seq_files, 0);
|
||||
sbi->s_max_wro_seq_files = bdev_max_open_zones(sb->s_bdev);
|
||||
if (!sbi->s_max_wro_seq_files &&
|
||||
sbi->s_mount_opts & ZONEFS_MNTOPT_EXPLICIT_OPEN) {
|
||||
zonefs_info(sb, "No open zones limit. Ignoring explicit_open mount option\n");
|
||||
sbi->s_mount_opts &= ~ZONEFS_MNTOPT_EXPLICIT_OPEN;
|
||||
}
|
||||
|
||||
atomic_set(&sbi->s_active_seq_files, 0);
|
||||
sbi->s_max_active_seq_files = bdev_max_active_zones(sb->s_bdev);
|
||||
|
||||
|
@ -1790,6 +1819,14 @@ static int zonefs_fill_super(struct super_block *sb, void *data, int silent)
|
|||
zonefs_info(sb, "Mounting %u zones",
|
||||
blkdev_nr_zones(sb->s_bdev->bd_disk));
|
||||
|
||||
if (!sbi->s_max_wro_seq_files &&
|
||||
!sbi->s_max_active_seq_files &&
|
||||
sbi->s_mount_opts & ZONEFS_MNTOPT_EXPLICIT_OPEN) {
|
||||
zonefs_info(sb,
|
||||
"No open and active zone limits. Ignoring explicit_open mount option\n");
|
||||
sbi->s_mount_opts &= ~ZONEFS_MNTOPT_EXPLICIT_OPEN;
|
||||
}
|
||||
|
||||
/* Create root directory inode */
|
||||
ret = -ENOMEM;
|
||||
inode = new_inode(sb);
|
||||
|
|
Loading…
Reference in New Issue