ext4: reset retry counter when ext4_alloc_file_blocks() makes progress
Change the retry policy in ext4_alloc_file_blocks() to allow for a full retry cycle whenever a portion of an allocation request has been fulfilled. A large allocation request often results in multiple calls to ext4_map_blocks(), each of which is potentially subject to a temporary ENOSPC condition and retry cycle. The current code only allows for a single retry cycle. This patch does not address a known bug or reported complaint. However, it should make block allocation for fallocate and zero range more robust. In addition, simplify the conditional controlling the allocation while loop, where testing len alone is sufficient. Remove the assignment to ret2 in the error path after the call to ext4_map_blocks() since its value isn't subsequently used. Signed-off-by: Eric Whitney <enwlinux@gmail.com> Link: https://lore.kernel.org/r/20210113221403.18258-1-enwlinux@gmail.com Signed-off-by: Theodore Ts'o <tytso@mit.edu>
This commit is contained in:
parent
b5776e7524
commit
3258386aba
|
@ -4382,8 +4382,7 @@ static int ext4_alloc_file_blocks(struct file *file, ext4_lblk_t offset,
|
||||||
{
|
{
|
||||||
struct inode *inode = file_inode(file);
|
struct inode *inode = file_inode(file);
|
||||||
handle_t *handle;
|
handle_t *handle;
|
||||||
int ret = 0;
|
int ret, ret2 = 0, ret3 = 0;
|
||||||
int ret2 = 0, ret3 = 0;
|
|
||||||
int retries = 0;
|
int retries = 0;
|
||||||
int depth = 0;
|
int depth = 0;
|
||||||
struct ext4_map_blocks map;
|
struct ext4_map_blocks map;
|
||||||
|
@ -4408,7 +4407,7 @@ static int ext4_alloc_file_blocks(struct file *file, ext4_lblk_t offset,
|
||||||
depth = ext_depth(inode);
|
depth = ext_depth(inode);
|
||||||
|
|
||||||
retry:
|
retry:
|
||||||
while (ret >= 0 && len) {
|
while (len) {
|
||||||
/*
|
/*
|
||||||
* Recalculate credits when extent tree depth changes.
|
* Recalculate credits when extent tree depth changes.
|
||||||
*/
|
*/
|
||||||
|
@ -4430,9 +4429,13 @@ retry:
|
||||||
inode->i_ino, map.m_lblk,
|
inode->i_ino, map.m_lblk,
|
||||||
map.m_len, ret);
|
map.m_len, ret);
|
||||||
ext4_mark_inode_dirty(handle, inode);
|
ext4_mark_inode_dirty(handle, inode);
|
||||||
ret2 = ext4_journal_stop(handle);
|
ext4_journal_stop(handle);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
/*
|
||||||
|
* allow a full retry cycle for any remaining allocations
|
||||||
|
*/
|
||||||
|
retries = 0;
|
||||||
map.m_lblk += ret;
|
map.m_lblk += ret;
|
||||||
map.m_len = len = len - ret;
|
map.m_len = len = len - ret;
|
||||||
epos = (loff_t)map.m_lblk << inode->i_blkbits;
|
epos = (loff_t)map.m_lblk << inode->i_blkbits;
|
||||||
|
@ -4450,11 +4453,8 @@ retry:
|
||||||
if (unlikely(ret2))
|
if (unlikely(ret2))
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if (ret == -ENOSPC &&
|
if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
|
||||||
ext4_should_retry_alloc(inode->i_sb, &retries)) {
|
|
||||||
ret = 0;
|
|
||||||
goto retry;
|
goto retry;
|
||||||
}
|
|
||||||
|
|
||||||
return ret > 0 ? ret2 : ret;
|
return ret > 0 ? ret2 : ret;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue