Changes since last update:
- fix fsdax partition offset misbehavior; - clean up z_erofs_decompressqueue_work() declaration; - fix up EOF lcluster inlining, especially for small compressed data. -----BEGIN PGP SIGNATURE----- iIcEABYIAC8WIQThPAmQN9sSA0DVxtI5NzHcH7XmBAUCYf0fBhEceGlhbmdAa2Vy bmVsLm9yZwAKCRA5NzHcH7XmBCuaAP9GtcEDQ38ozhTgiD7ae8mPX808my3WDs+0 lyySofcEeAD+IxKNv0Gx7j7TgeaFWQZc7r16JeOPI9TKAe4BZaa1Tg0= =J/Hq -----END PGP SIGNATURE----- Merge tag 'erofs-for-5.17-rc3-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs Pull erofs fixes from Gao Xiang: "Two fixes related to fsdax cleanup in this cycle and ztailpacking to fix small compressed data inlining. There is also a trivial cleanup to rearrange code for better reading. Summary: - fix fsdax partition offset misbehavior - clean up z_erofs_decompressqueue_work() declaration - fix up EOF lcluster inlining, especially for small compressed data" * tag 'erofs-for-5.17-rc3-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs: erofs: fix small compressed files inlining erofs: avoid unnecessary z_erofs_decompressqueue_work() declaration erofs: fix fsdax partition offset handling
This commit is contained in:
commit
b0bc0cb815
|
@ -252,12 +252,10 @@ static int erofs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
|
|||
return ret;
|
||||
|
||||
iomap->offset = map.m_la;
|
||||
if (flags & IOMAP_DAX) {
|
||||
if (flags & IOMAP_DAX)
|
||||
iomap->dax_dev = mdev.m_daxdev;
|
||||
iomap->offset += mdev.m_dax_part_off;
|
||||
} else {
|
||||
else
|
||||
iomap->bdev = mdev.m_bdev;
|
||||
}
|
||||
iomap->length = map.m_llen;
|
||||
iomap->flags = 0;
|
||||
iomap->private = NULL;
|
||||
|
@ -284,6 +282,8 @@ static int erofs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
|
|||
} else {
|
||||
iomap->type = IOMAP_MAPPED;
|
||||
iomap->addr = mdev.m_pa;
|
||||
if (flags & IOMAP_DAX)
|
||||
iomap->addr += mdev.m_dax_part_off;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
113
fs/erofs/zdata.c
113
fs/erofs/zdata.c
|
@ -810,68 +810,11 @@ static bool z_erofs_get_sync_decompress_policy(struct erofs_sb_info *sbi,
|
|||
return false;
|
||||
}
|
||||
|
||||
static void z_erofs_decompressqueue_work(struct work_struct *work);
|
||||
static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io,
|
||||
bool sync, int bios)
|
||||
{
|
||||
struct erofs_sb_info *const sbi = EROFS_SB(io->sb);
|
||||
|
||||
/* wake up the caller thread for sync decompression */
|
||||
if (sync) {
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&io->u.wait.lock, flags);
|
||||
if (!atomic_add_return(bios, &io->pending_bios))
|
||||
wake_up_locked(&io->u.wait);
|
||||
spin_unlock_irqrestore(&io->u.wait.lock, flags);
|
||||
return;
|
||||
}
|
||||
|
||||
if (atomic_add_return(bios, &io->pending_bios))
|
||||
return;
|
||||
/* Use workqueue and sync decompression for atomic contexts only */
|
||||
if (in_atomic() || irqs_disabled()) {
|
||||
queue_work(z_erofs_workqueue, &io->u.work);
|
||||
/* enable sync decompression for readahead */
|
||||
if (sbi->opt.sync_decompress == EROFS_SYNC_DECOMPRESS_AUTO)
|
||||
sbi->opt.sync_decompress = EROFS_SYNC_DECOMPRESS_FORCE_ON;
|
||||
return;
|
||||
}
|
||||
z_erofs_decompressqueue_work(&io->u.work);
|
||||
}
|
||||
|
||||
static bool z_erofs_page_is_invalidated(struct page *page)
|
||||
{
|
||||
return !page->mapping && !z_erofs_is_shortlived_page(page);
|
||||
}
|
||||
|
||||
static void z_erofs_decompressqueue_endio(struct bio *bio)
|
||||
{
|
||||
tagptr1_t t = tagptr_init(tagptr1_t, bio->bi_private);
|
||||
struct z_erofs_decompressqueue *q = tagptr_unfold_ptr(t);
|
||||
blk_status_t err = bio->bi_status;
|
||||
struct bio_vec *bvec;
|
||||
struct bvec_iter_all iter_all;
|
||||
|
||||
bio_for_each_segment_all(bvec, bio, iter_all) {
|
||||
struct page *page = bvec->bv_page;
|
||||
|
||||
DBG_BUGON(PageUptodate(page));
|
||||
DBG_BUGON(z_erofs_page_is_invalidated(page));
|
||||
|
||||
if (err)
|
||||
SetPageError(page);
|
||||
|
||||
if (erofs_page_is_managed(EROFS_SB(q->sb), page)) {
|
||||
if (!err)
|
||||
SetPageUptodate(page);
|
||||
unlock_page(page);
|
||||
}
|
||||
}
|
||||
z_erofs_decompress_kickoff(q, tagptr_unfold_tags(t), -1);
|
||||
bio_put(bio);
|
||||
}
|
||||
|
||||
static int z_erofs_decompress_pcluster(struct super_block *sb,
|
||||
struct z_erofs_pcluster *pcl,
|
||||
struct page **pagepool)
|
||||
|
@ -1123,6 +1066,35 @@ static void z_erofs_decompressqueue_work(struct work_struct *work)
|
|||
kvfree(bgq);
|
||||
}
|
||||
|
||||
static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io,
|
||||
bool sync, int bios)
|
||||
{
|
||||
struct erofs_sb_info *const sbi = EROFS_SB(io->sb);
|
||||
|
||||
/* wake up the caller thread for sync decompression */
|
||||
if (sync) {
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&io->u.wait.lock, flags);
|
||||
if (!atomic_add_return(bios, &io->pending_bios))
|
||||
wake_up_locked(&io->u.wait);
|
||||
spin_unlock_irqrestore(&io->u.wait.lock, flags);
|
||||
return;
|
||||
}
|
||||
|
||||
if (atomic_add_return(bios, &io->pending_bios))
|
||||
return;
|
||||
/* Use workqueue and sync decompression for atomic contexts only */
|
||||
if (in_atomic() || irqs_disabled()) {
|
||||
queue_work(z_erofs_workqueue, &io->u.work);
|
||||
/* enable sync decompression for readahead */
|
||||
if (sbi->opt.sync_decompress == EROFS_SYNC_DECOMPRESS_AUTO)
|
||||
sbi->opt.sync_decompress = EROFS_SYNC_DECOMPRESS_FORCE_ON;
|
||||
return;
|
||||
}
|
||||
z_erofs_decompressqueue_work(&io->u.work);
|
||||
}
|
||||
|
||||
static struct page *pickup_page_for_submission(struct z_erofs_pcluster *pcl,
|
||||
unsigned int nr,
|
||||
struct page **pagepool,
|
||||
|
@ -1300,6 +1272,33 @@ static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl,
|
|||
qtail[JQ_BYPASS] = &pcl->next;
|
||||
}
|
||||
|
||||
static void z_erofs_decompressqueue_endio(struct bio *bio)
|
||||
{
|
||||
tagptr1_t t = tagptr_init(tagptr1_t, bio->bi_private);
|
||||
struct z_erofs_decompressqueue *q = tagptr_unfold_ptr(t);
|
||||
blk_status_t err = bio->bi_status;
|
||||
struct bio_vec *bvec;
|
||||
struct bvec_iter_all iter_all;
|
||||
|
||||
bio_for_each_segment_all(bvec, bio, iter_all) {
|
||||
struct page *page = bvec->bv_page;
|
||||
|
||||
DBG_BUGON(PageUptodate(page));
|
||||
DBG_BUGON(z_erofs_page_is_invalidated(page));
|
||||
|
||||
if (err)
|
||||
SetPageError(page);
|
||||
|
||||
if (erofs_page_is_managed(EROFS_SB(q->sb), page)) {
|
||||
if (!err)
|
||||
SetPageUptodate(page);
|
||||
unlock_page(page);
|
||||
}
|
||||
}
|
||||
z_erofs_decompress_kickoff(q, tagptr_unfold_tags(t), -1);
|
||||
bio_put(bio);
|
||||
}
|
||||
|
||||
static void z_erofs_submit_queue(struct super_block *sb,
|
||||
struct z_erofs_decompress_frontend *f,
|
||||
struct page **pagepool,
|
||||
|
|
|
@ -630,6 +630,13 @@ static int z_erofs_do_map_blocks(struct inode *inode,
|
|||
if (endoff >= m.clusterofs) {
|
||||
m.headtype = m.type;
|
||||
map->m_la = (m.lcn << lclusterbits) | m.clusterofs;
|
||||
/*
|
||||
* For ztailpacking files, in order to inline data more
|
||||
* effectively, special EOF lclusters are now supported
|
||||
* which can have three parts at most.
|
||||
*/
|
||||
if (ztailpacking && end > inode->i_size)
|
||||
end = inode->i_size;
|
||||
break;
|
||||
}
|
||||
/* m.lcn should be >= 1 if endoff < m.clusterofs */
|
||||
|
|
Loading…
Reference in New Issue