btrfs: handle errors properly inside btrfs_submit_compressed_write()
Just like btrfs_submit_compressed_read(), there are quite some BUG_ON()s inside btrfs_submit_compressed_write() for the bio submission path. Fix them using the same method: - For last bio, just endio the bio As in that case, one of the endio function of all these submitted bio will be able to free the compressed_bio - For half-submitted bio, wait and finish the compressed_bio manually In this case, as long as all other bio finish, we're the only one referring the compressed bio, and can manually finish it. Signed-off-by: Qu Wenruo <wqu@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
parent
86ccbb4d2a
commit
6853c64a6e
|
@ -366,50 +366,55 @@ static noinline void end_compressed_writeback(struct inode *inode,
|
|||
/* the inode may be gone now */
|
||||
}
|
||||
|
||||
/*
|
||||
* do the cleanup once all the compressed pages hit the disk.
|
||||
* This will clear writeback on the file pages and free the compressed
|
||||
* pages.
|
||||
*
|
||||
* This also calls the writeback end hooks for the file pages so that
|
||||
* metadata and checksums can be updated in the file.
|
||||
*/
|
||||
static void end_compressed_bio_write(struct bio *bio)
|
||||
static void finish_compressed_bio_write(struct compressed_bio *cb)
|
||||
{
|
||||
struct compressed_bio *cb = bio->bi_private;
|
||||
struct inode *inode;
|
||||
struct page *page;
|
||||
struct inode *inode = cb->inode;
|
||||
unsigned int index;
|
||||
|
||||
if (!dec_and_test_compressed_bio(cb, bio))
|
||||
goto out;
|
||||
|
||||
/* ok, we're the last bio for this extent, step one is to
|
||||
* call back into the FS and do all the end_io operations
|
||||
/*
|
||||
* Ok, we're the last bio for this extent, step one is to call back
|
||||
* into the FS and do all the end_io operations.
|
||||
*/
|
||||
inode = cb->inode;
|
||||
btrfs_record_physical_zoned(inode, cb->start, bio);
|
||||
btrfs_writepage_endio_finish_ordered(BTRFS_I(inode), NULL,
|
||||
cb->start, cb->start + cb->len - 1,
|
||||
!cb->errors);
|
||||
|
||||
end_compressed_writeback(inode, cb);
|
||||
/* note, our inode could be gone now */
|
||||
/* Note, our inode could be gone now */
|
||||
|
||||
/*
|
||||
* release the compressed pages, these came from alloc_page and
|
||||
* Release the compressed pages, these came from alloc_page and
|
||||
* are not attached to the inode at all
|
||||
*/
|
||||
index = 0;
|
||||
for (index = 0; index < cb->nr_pages; index++) {
|
||||
page = cb->compressed_pages[index];
|
||||
struct page *page = cb->compressed_pages[index];
|
||||
|
||||
page->mapping = NULL;
|
||||
put_page(page);
|
||||
}
|
||||
|
||||
/* finally free the cb struct */
|
||||
/* Finally free the cb struct */
|
||||
kfree(cb->compressed_pages);
|
||||
kfree(cb);
|
||||
}
|
||||
|
||||
/*
|
||||
* Do the cleanup once all the compressed pages hit the disk. This will clear
|
||||
* writeback on the file pages and free the compressed pages.
|
||||
*
|
||||
* This also calls the writeback end hooks for the file pages so that metadata
|
||||
* and checksums can be updated in the file.
|
||||
*/
|
||||
static void end_compressed_bio_write(struct bio *bio)
|
||||
{
|
||||
struct compressed_bio *cb = bio->bi_private;
|
||||
|
||||
if (!dec_and_test_compressed_bio(cb, bio))
|
||||
goto out;
|
||||
|
||||
btrfs_record_physical_zoned(cb->inode, cb->start, bio);
|
||||
|
||||
finish_compressed_bio_write(cb);
|
||||
out:
|
||||
bio_put(bio);
|
||||
}
|
||||
|
@ -512,18 +517,18 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
|
|||
atomic_inc(&cb->pending_bios);
|
||||
ret = btrfs_bio_wq_end_io(fs_info, bio,
|
||||
BTRFS_WQ_ENDIO_DATA);
|
||||
BUG_ON(ret); /* -ENOMEM */
|
||||
if (ret)
|
||||
goto finish_cb;
|
||||
|
||||
if (!skip_sum) {
|
||||
ret = btrfs_csum_one_bio(inode, bio, start, 1);
|
||||
BUG_ON(ret); /* -ENOMEM */
|
||||
if (ret)
|
||||
goto finish_cb;
|
||||
}
|
||||
|
||||
ret = btrfs_map_bio(fs_info, bio, 0);
|
||||
if (ret) {
|
||||
bio->bi_status = ret;
|
||||
bio_endio(bio);
|
||||
}
|
||||
if (ret)
|
||||
goto finish_cb;
|
||||
|
||||
bio = btrfs_bio_alloc(BIO_MAX_VECS);
|
||||
bio->bi_iter.bi_sector = first_byte >> SECTOR_SHIFT;
|
||||
|
@ -550,23 +555,44 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
|
|||
|
||||
atomic_inc(&cb->pending_bios);
|
||||
ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA);
|
||||
BUG_ON(ret); /* -ENOMEM */
|
||||
if (ret)
|
||||
goto last_bio;
|
||||
|
||||
if (!skip_sum) {
|
||||
ret = btrfs_csum_one_bio(inode, bio, start, 1);
|
||||
BUG_ON(ret); /* -ENOMEM */
|
||||
if (ret)
|
||||
goto last_bio;
|
||||
}
|
||||
|
||||
ret = btrfs_map_bio(fs_info, bio, 0);
|
||||
if (ret) {
|
||||
bio->bi_status = ret;
|
||||
bio_endio(bio);
|
||||
}
|
||||
if (ret)
|
||||
goto last_bio;
|
||||
|
||||
if (blkcg_css)
|
||||
kthread_associate_blkcg(NULL);
|
||||
|
||||
return 0;
|
||||
last_bio:
|
||||
bio->bi_status = ret;
|
||||
/* One of the bios' endio function will free @cb. */
|
||||
bio_endio(bio);
|
||||
return ret;
|
||||
|
||||
finish_cb:
|
||||
if (bio) {
|
||||
bio->bi_status = ret;
|
||||
bio_endio(bio);
|
||||
}
|
||||
|
||||
wait_var_event(cb, atomic_read(&cb->pending_bios) == 0);
|
||||
/*
|
||||
* Even with previous bio ended, we should still have io not yet
|
||||
* submitted, thus need to finish manually.
|
||||
*/
|
||||
ASSERT(refcount_read(&cb->pending_sectors));
|
||||
/* Now we are the only one referring @cb, can finish it safely. */
|
||||
finish_compressed_bio_write(cb);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static u64 bio_end_offset(struct bio *bio)
|
||||
|
|
Loading…
Reference in New Issue