Changes since last update:
- Add sub-page block size support for uncompressed files; - Support flattened block device for multi-blob images to be attached into virtual machines (including cloud servers) and bare metals; - Support long xattr name prefixes to optimize images with common xattr namespaces (e.g. files with overlayfs xattrs) use cases; - Various minor cleanups & fixes. -----BEGIN PGP SIGNATURE----- iIcEABYIAC8WIQThPAmQN9sSA0DVxtI5NzHcH7XmBAUCZETCNREceGlhbmdAa2Vy bmVsLm9yZwAKCRA5NzHcH7XmBJCMAP9VkAPycbbqa6qWUASdyh/HGyuLJTHSfmsJ zO4y6hBgOwD9GXg55sY8ycvcOx9ayaUt5V5f9zhs4wdGcoPhj5fWzgA= =nUva -----END PGP SIGNATURE----- Merge tag 'erofs-for-6.4-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs Pull erofs updates from Gao Xiang: "In this cycle, sub-page block support for uncompressed files is available. It's mainly used to enable original signing ('golden') 4k-block images on arm64 with 16/64k pages. In addition, end users could also use this feature to build a manifest to directly refer to golden tar data. Besides, long xattr name prefix support is also introduced in this cycle to avoid too many xattrs with the same prefix (e.g. overlayfs xattrs). It's useful for erofs + overlayfs combination (like Composefs model): the image size is reduced by ~14% and runtime performance is also slightly improved. Others are random fixes and cleanups as usual. Summary: - Add sub-page block size support for uncompressed files - Support flattened block device for multi-blob images to be attached into virtual machines (including cloud servers) and bare metals - Support long xattr name prefixes to optimize images with common xattr namespaces (e.g. files with overlayfs xattrs) use cases - Various minor cleanups & fixes" * tag 'erofs-for-6.4-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs: erofs: cleanup i_format-related stuffs erofs: sunset erofs_dbg() erofs: fix potential overflow calculating xattr_isize erofs: get rid of z_erofs_fill_inode() erofs: enable long extended attribute name prefixes erofs: handle long xattr name prefixes properly erofs: add helpers to load long xattr name prefixes erofs: introduce on-disk format for long xattr name prefixes erofs: move packed inode out of the compression part erofs: keep meta inode into erofs_buf erofs: initialize packed inode after root inode is assigned erofs: stop parsing non-compact HEAD index if clusterofs is invalid erofs: don't warn ztailpacking feature anymore erofs: simplify erofs_xattr_generic_get() erofs: rename init_inode_xattrs with erofs_ prefix erofs: move several xattr helpers into xattr.c erofs: tidy up EROFS on-disk naming erofs: support flattened block device for multi-blob images erofs: set block size to the on-disk block size erofs: avoid hardcoded blocksize for subpage block support
This commit is contained in:
commit
61d325dcbc
|
@ -40,8 +40,8 @@ Here are the main features of EROFS:
|
|||
- Support multiple devices to refer to external blobs, which can be used
|
||||
for container images;
|
||||
|
||||
- 4KiB block size and 32-bit block addresses for each device, therefore
|
||||
16TiB address space at most for now;
|
||||
- 32-bit block addresses for each device, therefore 16TiB address space at
|
||||
most with 4KiB block size for now;
|
||||
|
||||
- Two inode layouts for different requirements:
|
||||
|
||||
|
|
|
@ -27,11 +27,15 @@ void erofs_put_metabuf(struct erofs_buf *buf)
|
|||
buf->page = NULL;
|
||||
}
|
||||
|
||||
void *erofs_bread(struct erofs_buf *buf, struct inode *inode,
|
||||
erofs_blk_t blkaddr, enum erofs_kmap_type type)
|
||||
/*
|
||||
* Derive the block size from inode->i_blkbits to make compatible with
|
||||
* anonymous inode in fscache mode.
|
||||
*/
|
||||
void *erofs_bread(struct erofs_buf *buf, erofs_blk_t blkaddr,
|
||||
enum erofs_kmap_type type)
|
||||
{
|
||||
struct address_space *const mapping = inode->i_mapping;
|
||||
erofs_off_t offset = blknr_to_addr(blkaddr);
|
||||
struct inode *inode = buf->inode;
|
||||
erofs_off_t offset = (erofs_off_t)blkaddr << inode->i_blkbits;
|
||||
pgoff_t index = offset >> PAGE_SHIFT;
|
||||
struct page *page = buf->page;
|
||||
struct folio *folio;
|
||||
|
@ -41,7 +45,7 @@ void *erofs_bread(struct erofs_buf *buf, struct inode *inode,
|
|||
erofs_put_metabuf(buf);
|
||||
|
||||
nofs_flag = memalloc_nofs_save();
|
||||
folio = read_cache_folio(mapping, index, NULL, NULL);
|
||||
folio = read_cache_folio(inode->i_mapping, index, NULL, NULL);
|
||||
memalloc_nofs_restore(nofs_flag);
|
||||
if (IS_ERR(folio))
|
||||
return folio;
|
||||
|
@ -63,14 +67,19 @@ void *erofs_bread(struct erofs_buf *buf, struct inode *inode,
|
|||
return buf->base + (offset & ~PAGE_MASK);
|
||||
}
|
||||
|
||||
void erofs_init_metabuf(struct erofs_buf *buf, struct super_block *sb)
|
||||
{
|
||||
if (erofs_is_fscache_mode(sb))
|
||||
buf->inode = EROFS_SB(sb)->s_fscache->inode;
|
||||
else
|
||||
buf->inode = sb->s_bdev->bd_inode;
|
||||
}
|
||||
|
||||
void *erofs_read_metabuf(struct erofs_buf *buf, struct super_block *sb,
|
||||
erofs_blk_t blkaddr, enum erofs_kmap_type type)
|
||||
{
|
||||
if (erofs_is_fscache_mode(sb))
|
||||
return erofs_bread(buf, EROFS_SB(sb)->s_fscache->inode,
|
||||
blkaddr, type);
|
||||
|
||||
return erofs_bread(buf, sb->s_bdev->bd_inode, blkaddr, type);
|
||||
erofs_init_metabuf(buf, sb);
|
||||
return erofs_bread(buf, blkaddr, type);
|
||||
}
|
||||
|
||||
static int erofs_map_blocks_flatmode(struct inode *inode,
|
||||
|
@ -79,33 +88,32 @@ static int erofs_map_blocks_flatmode(struct inode *inode,
|
|||
erofs_blk_t nblocks, lastblk;
|
||||
u64 offset = map->m_la;
|
||||
struct erofs_inode *vi = EROFS_I(inode);
|
||||
struct super_block *sb = inode->i_sb;
|
||||
bool tailendpacking = (vi->datalayout == EROFS_INODE_FLAT_INLINE);
|
||||
|
||||
nblocks = DIV_ROUND_UP(inode->i_size, EROFS_BLKSIZ);
|
||||
nblocks = erofs_iblks(inode);
|
||||
lastblk = nblocks - tailendpacking;
|
||||
|
||||
/* there is no hole in flatmode */
|
||||
map->m_flags = EROFS_MAP_MAPPED;
|
||||
if (offset < blknr_to_addr(lastblk)) {
|
||||
map->m_pa = blknr_to_addr(vi->raw_blkaddr) + map->m_la;
|
||||
map->m_plen = blknr_to_addr(lastblk) - offset;
|
||||
if (offset < erofs_pos(sb, lastblk)) {
|
||||
map->m_pa = erofs_pos(sb, vi->raw_blkaddr) + map->m_la;
|
||||
map->m_plen = erofs_pos(sb, lastblk) - offset;
|
||||
} else if (tailendpacking) {
|
||||
map->m_pa = erofs_iloc(inode) + vi->inode_isize +
|
||||
vi->xattr_isize + erofs_blkoff(offset);
|
||||
vi->xattr_isize + erofs_blkoff(sb, offset);
|
||||
map->m_plen = inode->i_size - offset;
|
||||
|
||||
/* inline data should be located in the same meta block */
|
||||
if (erofs_blkoff(map->m_pa) + map->m_plen > EROFS_BLKSIZ) {
|
||||
erofs_err(inode->i_sb,
|
||||
"inline data cross block boundary @ nid %llu",
|
||||
if (erofs_blkoff(sb, map->m_pa) + map->m_plen > sb->s_blocksize) {
|
||||
erofs_err(sb, "inline data cross block boundary @ nid %llu",
|
||||
vi->nid);
|
||||
DBG_BUGON(1);
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
map->m_flags |= EROFS_MAP_META;
|
||||
} else {
|
||||
erofs_err(inode->i_sb,
|
||||
"internal error @ nid: %llu (size %llu), m_la 0x%llx",
|
||||
erofs_err(sb, "internal error @ nid: %llu (size %llu), m_la 0x%llx",
|
||||
vi->nid, inode->i_size, map->m_la);
|
||||
DBG_BUGON(1);
|
||||
return -EIO;
|
||||
|
@ -148,29 +156,29 @@ int erofs_map_blocks(struct inode *inode, struct erofs_map_blocks *map)
|
|||
pos = ALIGN(erofs_iloc(inode) + vi->inode_isize +
|
||||
vi->xattr_isize, unit) + unit * chunknr;
|
||||
|
||||
kaddr = erofs_read_metabuf(&buf, sb, erofs_blknr(pos), EROFS_KMAP);
|
||||
kaddr = erofs_read_metabuf(&buf, sb, erofs_blknr(sb, pos), EROFS_KMAP);
|
||||
if (IS_ERR(kaddr)) {
|
||||
err = PTR_ERR(kaddr);
|
||||
goto out;
|
||||
}
|
||||
map->m_la = chunknr << vi->chunkbits;
|
||||
map->m_plen = min_t(erofs_off_t, 1UL << vi->chunkbits,
|
||||
roundup(inode->i_size - map->m_la, EROFS_BLKSIZ));
|
||||
round_up(inode->i_size - map->m_la, sb->s_blocksize));
|
||||
|
||||
/* handle block map */
|
||||
if (!(vi->chunkformat & EROFS_CHUNK_FORMAT_INDEXES)) {
|
||||
__le32 *blkaddr = kaddr + erofs_blkoff(pos);
|
||||
__le32 *blkaddr = kaddr + erofs_blkoff(sb, pos);
|
||||
|
||||
if (le32_to_cpu(*blkaddr) == EROFS_NULL_ADDR) {
|
||||
map->m_flags = 0;
|
||||
} else {
|
||||
map->m_pa = blknr_to_addr(le32_to_cpu(*blkaddr));
|
||||
map->m_pa = erofs_pos(sb, le32_to_cpu(*blkaddr));
|
||||
map->m_flags = EROFS_MAP_MAPPED;
|
||||
}
|
||||
goto out_unlock;
|
||||
}
|
||||
/* parse chunk indexes */
|
||||
idx = kaddr + erofs_blkoff(pos);
|
||||
idx = kaddr + erofs_blkoff(sb, pos);
|
||||
switch (le32_to_cpu(idx->blkaddr)) {
|
||||
case EROFS_NULL_ADDR:
|
||||
map->m_flags = 0;
|
||||
|
@ -178,7 +186,7 @@ int erofs_map_blocks(struct inode *inode, struct erofs_map_blocks *map)
|
|||
default:
|
||||
map->m_deviceid = le16_to_cpu(idx->device_id) &
|
||||
EROFS_SB(sb)->device_id_mask;
|
||||
map->m_pa = blknr_to_addr(le32_to_cpu(idx->blkaddr));
|
||||
map->m_pa = erofs_pos(sb, le32_to_cpu(idx->blkaddr));
|
||||
map->m_flags = EROFS_MAP_MAPPED;
|
||||
break;
|
||||
}
|
||||
|
@ -197,7 +205,6 @@ int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *map)
|
|||
struct erofs_device_info *dif;
|
||||
int id;
|
||||
|
||||
/* primary device by default */
|
||||
map->m_bdev = sb->s_bdev;
|
||||
map->m_daxdev = EROFS_SB(sb)->dax_dev;
|
||||
map->m_dax_part_off = EROFS_SB(sb)->dax_part_off;
|
||||
|
@ -210,20 +217,25 @@ int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *map)
|
|||
up_read(&devs->rwsem);
|
||||
return -ENODEV;
|
||||
}
|
||||
if (devs->flatdev) {
|
||||
map->m_pa += erofs_pos(sb, dif->mapped_blkaddr);
|
||||
up_read(&devs->rwsem);
|
||||
return 0;
|
||||
}
|
||||
map->m_bdev = dif->bdev;
|
||||
map->m_daxdev = dif->dax_dev;
|
||||
map->m_dax_part_off = dif->dax_part_off;
|
||||
map->m_fscache = dif->fscache;
|
||||
up_read(&devs->rwsem);
|
||||
} else if (devs->extra_devices) {
|
||||
} else if (devs->extra_devices && !devs->flatdev) {
|
||||
down_read(&devs->rwsem);
|
||||
idr_for_each_entry(&devs->tree, dif, id) {
|
||||
erofs_off_t startoff, length;
|
||||
|
||||
if (!dif->mapped_blkaddr)
|
||||
continue;
|
||||
startoff = blknr_to_addr(dif->mapped_blkaddr);
|
||||
length = blknr_to_addr(dif->blocks);
|
||||
startoff = erofs_pos(sb, dif->mapped_blkaddr);
|
||||
length = erofs_pos(sb, dif->blocks);
|
||||
|
||||
if (map->m_pa >= startoff &&
|
||||
map->m_pa < startoff + length) {
|
||||
|
@ -244,6 +256,7 @@ static int erofs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
|
|||
unsigned int flags, struct iomap *iomap, struct iomap *srcmap)
|
||||
{
|
||||
int ret;
|
||||
struct super_block *sb = inode->i_sb;
|
||||
struct erofs_map_blocks map;
|
||||
struct erofs_map_dev mdev;
|
||||
|
||||
|
@ -258,7 +271,7 @@ static int erofs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
|
|||
.m_deviceid = map.m_deviceid,
|
||||
.m_pa = map.m_pa,
|
||||
};
|
||||
ret = erofs_map_dev(inode->i_sb, &mdev);
|
||||
ret = erofs_map_dev(sb, &mdev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -284,11 +297,11 @@ static int erofs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
|
|||
struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
|
||||
|
||||
iomap->type = IOMAP_INLINE;
|
||||
ptr = erofs_read_metabuf(&buf, inode->i_sb,
|
||||
erofs_blknr(mdev.m_pa), EROFS_KMAP);
|
||||
ptr = erofs_read_metabuf(&buf, sb,
|
||||
erofs_blknr(sb, mdev.m_pa), EROFS_KMAP);
|
||||
if (IS_ERR(ptr))
|
||||
return PTR_ERR(ptr);
|
||||
iomap->inline_data = ptr + erofs_blkoff(mdev.m_pa);
|
||||
iomap->inline_data = ptr + erofs_blkoff(sb, mdev.m_pa);
|
||||
iomap->private = buf.base;
|
||||
} else {
|
||||
iomap->type = IOMAP_MAPPED;
|
||||
|
|
|
@ -42,7 +42,7 @@ int z_erofs_load_lz4_config(struct super_block *sb,
|
|||
if (!sbi->lz4.max_pclusterblks) {
|
||||
sbi->lz4.max_pclusterblks = 1; /* reserved case */
|
||||
} else if (sbi->lz4.max_pclusterblks >
|
||||
Z_EROFS_PCLUSTER_MAX_SIZE / EROFS_BLKSIZ) {
|
||||
erofs_blknr(sb, Z_EROFS_PCLUSTER_MAX_SIZE)) {
|
||||
erofs_err(sb, "too large lz4 pclusterblks %u",
|
||||
sbi->lz4.max_pclusterblks);
|
||||
return -EINVAL;
|
||||
|
@ -221,13 +221,13 @@ static int z_erofs_lz4_decompress_mem(struct z_erofs_lz4_decompress_ctx *ctx,
|
|||
support_0padding = true;
|
||||
ret = z_erofs_fixup_insize(rq, headpage + rq->pageofs_in,
|
||||
min_t(unsigned int, rq->inputsize,
|
||||
EROFS_BLKSIZ - rq->pageofs_in));
|
||||
rq->sb->s_blocksize - rq->pageofs_in));
|
||||
if (ret) {
|
||||
kunmap_atomic(headpage);
|
||||
return ret;
|
||||
}
|
||||
may_inplace = !((rq->pageofs_in + rq->inputsize) &
|
||||
(EROFS_BLKSIZ - 1));
|
||||
(rq->sb->s_blocksize - 1));
|
||||
}
|
||||
|
||||
inputmargin = rq->pageofs_in;
|
||||
|
|
|
@ -166,8 +166,8 @@ int z_erofs_lzma_decompress(struct z_erofs_decompress_req *rq,
|
|||
/* 1. get the exact LZMA compressed size */
|
||||
kin = kmap(*rq->in);
|
||||
err = z_erofs_fixup_insize(rq, kin + rq->pageofs_in,
|
||||
min_t(unsigned int, rq->inputsize,
|
||||
EROFS_BLKSIZ - rq->pageofs_in));
|
||||
min_t(unsigned int, rq->inputsize,
|
||||
rq->sb->s_blocksize - rq->pageofs_in));
|
||||
if (err) {
|
||||
kunmap(*rq->in);
|
||||
return err;
|
||||
|
|
|
@ -50,44 +50,43 @@ static int erofs_readdir(struct file *f, struct dir_context *ctx)
|
|||
{
|
||||
struct inode *dir = file_inode(f);
|
||||
struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
|
||||
struct super_block *sb = dir->i_sb;
|
||||
unsigned long bsz = sb->s_blocksize;
|
||||
const size_t dirsize = i_size_read(dir);
|
||||
unsigned int i = ctx->pos / EROFS_BLKSIZ;
|
||||
unsigned int ofs = ctx->pos % EROFS_BLKSIZ;
|
||||
unsigned int i = erofs_blknr(sb, ctx->pos);
|
||||
unsigned int ofs = erofs_blkoff(sb, ctx->pos);
|
||||
int err = 0;
|
||||
bool initial = true;
|
||||
|
||||
buf.inode = dir;
|
||||
while (ctx->pos < dirsize) {
|
||||
struct erofs_dirent *de;
|
||||
unsigned int nameoff, maxsize;
|
||||
|
||||
de = erofs_bread(&buf, dir, i, EROFS_KMAP);
|
||||
de = erofs_bread(&buf, i, EROFS_KMAP);
|
||||
if (IS_ERR(de)) {
|
||||
erofs_err(dir->i_sb,
|
||||
"fail to readdir of logical block %u of nid %llu",
|
||||
erofs_err(sb, "fail to readdir of logical block %u of nid %llu",
|
||||
i, EROFS_I(dir)->nid);
|
||||
err = PTR_ERR(de);
|
||||
break;
|
||||
}
|
||||
|
||||
nameoff = le16_to_cpu(de->nameoff);
|
||||
if (nameoff < sizeof(struct erofs_dirent) ||
|
||||
nameoff >= EROFS_BLKSIZ) {
|
||||
erofs_err(dir->i_sb,
|
||||
"invalid de[0].nameoff %u @ nid %llu",
|
||||
if (nameoff < sizeof(struct erofs_dirent) || nameoff >= bsz) {
|
||||
erofs_err(sb, "invalid de[0].nameoff %u @ nid %llu",
|
||||
nameoff, EROFS_I(dir)->nid);
|
||||
err = -EFSCORRUPTED;
|
||||
break;
|
||||
}
|
||||
|
||||
maxsize = min_t(unsigned int,
|
||||
dirsize - ctx->pos + ofs, EROFS_BLKSIZ);
|
||||
maxsize = min_t(unsigned int, dirsize - ctx->pos + ofs, bsz);
|
||||
|
||||
/* search dirents at the arbitrary position */
|
||||
if (initial) {
|
||||
initial = false;
|
||||
|
||||
ofs = roundup(ofs, sizeof(struct erofs_dirent));
|
||||
ctx->pos = blknr_to_addr(i) + ofs;
|
||||
ctx->pos = erofs_pos(sb, i) + ofs;
|
||||
if (ofs >= nameoff)
|
||||
goto skip_this;
|
||||
}
|
||||
|
@ -97,7 +96,7 @@ static int erofs_readdir(struct file *f, struct dir_context *ctx)
|
|||
if (err)
|
||||
break;
|
||||
skip_this:
|
||||
ctx->pos = blknr_to_addr(i) + maxsize;
|
||||
ctx->pos = erofs_pos(sb, i) + maxsize;
|
||||
++i;
|
||||
ofs = 0;
|
||||
}
|
||||
|
|
|
@ -27,6 +27,7 @@
|
|||
#define EROFS_FEATURE_INCOMPAT_ZTAILPACKING 0x00000010
|
||||
#define EROFS_FEATURE_INCOMPAT_FRAGMENTS 0x00000020
|
||||
#define EROFS_FEATURE_INCOMPAT_DEDUPE 0x00000020
|
||||
#define EROFS_FEATURE_INCOMPAT_XATTR_PREFIXES 0x00000040
|
||||
#define EROFS_ALL_FEATURE_INCOMPAT \
|
||||
(EROFS_FEATURE_INCOMPAT_ZERO_PADDING | \
|
||||
EROFS_FEATURE_INCOMPAT_COMPR_CFGS | \
|
||||
|
@ -36,7 +37,8 @@
|
|||
EROFS_FEATURE_INCOMPAT_COMPR_HEAD2 | \
|
||||
EROFS_FEATURE_INCOMPAT_ZTAILPACKING | \
|
||||
EROFS_FEATURE_INCOMPAT_FRAGMENTS | \
|
||||
EROFS_FEATURE_INCOMPAT_DEDUPE)
|
||||
EROFS_FEATURE_INCOMPAT_DEDUPE | \
|
||||
EROFS_FEATURE_INCOMPAT_XATTR_PREFIXES)
|
||||
|
||||
#define EROFS_SB_EXTSLOT_SIZE 16
|
||||
|
||||
|
@ -53,7 +55,7 @@ struct erofs_super_block {
|
|||
__le32 magic; /* file system magic number */
|
||||
__le32 checksum; /* crc32c(super_block) */
|
||||
__le32 feature_compat;
|
||||
__u8 blkszbits; /* support block_size == PAGE_SIZE only */
|
||||
__u8 blkszbits; /* filesystem block size in bit shift */
|
||||
__u8 sb_extslots; /* superblock size = 128 + sb_extslots * 16 */
|
||||
|
||||
__le16 root_nid; /* nid of root directory */
|
||||
|
@ -75,49 +77,46 @@ struct erofs_super_block {
|
|||
} __packed u1;
|
||||
__le16 extra_devices; /* # of devices besides the primary device */
|
||||
__le16 devt_slotoff; /* startoff = devt_slotoff * devt_slotsize */
|
||||
__u8 reserved[6];
|
||||
__u8 dirblkbits; /* directory block size in bit shift */
|
||||
__u8 xattr_prefix_count; /* # of long xattr name prefixes */
|
||||
__le32 xattr_prefix_start; /* start of long xattr prefixes */
|
||||
__le64 packed_nid; /* nid of the special packed inode */
|
||||
__u8 reserved2[24];
|
||||
};
|
||||
|
||||
/*
|
||||
* erofs inode datalayout (i_format in on-disk inode):
|
||||
* EROFS inode datalayout (i_format in on-disk inode):
|
||||
* 0 - uncompressed flat inode without tail-packing inline data:
|
||||
* inode, [xattrs], ... | ... | no-holed data
|
||||
* 1 - compressed inode with non-compact indexes:
|
||||
* inode, [xattrs], [map_header], extents ... | ...
|
||||
* 2 - uncompressed flat inode with tail-packing inline data:
|
||||
* inode, [xattrs], tailpacking data, ... | ... | no-holed data
|
||||
* 3 - compressed inode with compact indexes:
|
||||
* inode, [xattrs], map_header, extents ... | ...
|
||||
* 4 - chunk-based inode with (optional) multi-device support:
|
||||
* inode, [xattrs], chunk indexes ... | ...
|
||||
* 5~7 - reserved
|
||||
*/
|
||||
enum {
|
||||
EROFS_INODE_FLAT_PLAIN = 0,
|
||||
EROFS_INODE_FLAT_COMPRESSION_LEGACY = 1,
|
||||
EROFS_INODE_COMPRESSED_FULL = 1,
|
||||
EROFS_INODE_FLAT_INLINE = 2,
|
||||
EROFS_INODE_FLAT_COMPRESSION = 3,
|
||||
EROFS_INODE_COMPRESSED_COMPACT = 3,
|
||||
EROFS_INODE_CHUNK_BASED = 4,
|
||||
EROFS_INODE_DATALAYOUT_MAX
|
||||
};
|
||||
|
||||
static inline bool erofs_inode_is_data_compressed(unsigned int datamode)
|
||||
{
|
||||
return datamode == EROFS_INODE_FLAT_COMPRESSION ||
|
||||
datamode == EROFS_INODE_FLAT_COMPRESSION_LEGACY;
|
||||
return datamode == EROFS_INODE_COMPRESSED_COMPACT ||
|
||||
datamode == EROFS_INODE_COMPRESSED_FULL;
|
||||
}
|
||||
|
||||
/* bit definitions of inode i_format */
|
||||
#define EROFS_I_VERSION_BITS 1
|
||||
#define EROFS_I_DATALAYOUT_BITS 3
|
||||
#define EROFS_I_VERSION_MASK 0x01
|
||||
#define EROFS_I_DATALAYOUT_MASK 0x07
|
||||
|
||||
#define EROFS_I_VERSION_BIT 0
|
||||
#define EROFS_I_DATALAYOUT_BIT 1
|
||||
#define EROFS_I_ALL_BIT 4
|
||||
|
||||
#define EROFS_I_ALL \
|
||||
((1 << (EROFS_I_DATALAYOUT_BIT + EROFS_I_DATALAYOUT_BITS)) - 1)
|
||||
#define EROFS_I_ALL ((1 << EROFS_I_ALL_BIT) - 1)
|
||||
|
||||
/* indicate chunk blkbits, thus 'chunksize = blocksize << chunk blkbits' */
|
||||
#define EROFS_CHUNK_FORMAT_BLKBITS_MASK 0x001F
|
||||
|
@ -127,11 +126,30 @@ static inline bool erofs_inode_is_data_compressed(unsigned int datamode)
|
|||
#define EROFS_CHUNK_FORMAT_ALL \
|
||||
(EROFS_CHUNK_FORMAT_BLKBITS_MASK | EROFS_CHUNK_FORMAT_INDEXES)
|
||||
|
||||
/* 32-byte on-disk inode */
|
||||
#define EROFS_INODE_LAYOUT_COMPACT 0
|
||||
/* 64-byte on-disk inode */
|
||||
#define EROFS_INODE_LAYOUT_EXTENDED 1
|
||||
|
||||
struct erofs_inode_chunk_info {
|
||||
__le16 format; /* chunk blkbits, etc. */
|
||||
__le16 reserved;
|
||||
};
|
||||
|
||||
union erofs_inode_i_u {
|
||||
/* total compressed blocks for compressed inodes */
|
||||
__le32 compressed_blocks;
|
||||
|
||||
/* block address for uncompressed flat inodes */
|
||||
__le32 raw_blkaddr;
|
||||
|
||||
/* for device files, used to indicate old/new device # */
|
||||
__le32 rdev;
|
||||
|
||||
/* for chunk-based files, it contains the summary info */
|
||||
struct erofs_inode_chunk_info c;
|
||||
};
|
||||
|
||||
/* 32-byte reduced form of an ondisk inode */
|
||||
struct erofs_inode_compact {
|
||||
__le16 i_format; /* inode format hints */
|
||||
|
@ -142,29 +160,14 @@ struct erofs_inode_compact {
|
|||
__le16 i_nlink;
|
||||
__le32 i_size;
|
||||
__le32 i_reserved;
|
||||
union {
|
||||
/* total compressed blocks for compressed inodes */
|
||||
__le32 compressed_blocks;
|
||||
/* block address for uncompressed flat inodes */
|
||||
__le32 raw_blkaddr;
|
||||
union erofs_inode_i_u i_u;
|
||||
|
||||
/* for device files, used to indicate old/new device # */
|
||||
__le32 rdev;
|
||||
|
||||
/* for chunk-based files, it contains the summary info */
|
||||
struct erofs_inode_chunk_info c;
|
||||
} i_u;
|
||||
__le32 i_ino; /* only used for 32-bit stat compatibility */
|
||||
__le32 i_ino; /* only used for 32-bit stat compatibility */
|
||||
__le16 i_uid;
|
||||
__le16 i_gid;
|
||||
__le32 i_reserved2;
|
||||
};
|
||||
|
||||
/* 32-byte on-disk inode */
|
||||
#define EROFS_INODE_LAYOUT_COMPACT 0
|
||||
/* 64-byte on-disk inode */
|
||||
#define EROFS_INODE_LAYOUT_EXTENDED 1
|
||||
|
||||
/* 64-byte complete form of an ondisk inode */
|
||||
struct erofs_inode_extended {
|
||||
__le16 i_format; /* inode format hints */
|
||||
|
@ -174,22 +177,9 @@ struct erofs_inode_extended {
|
|||
__le16 i_mode;
|
||||
__le16 i_reserved;
|
||||
__le64 i_size;
|
||||
union {
|
||||
/* total compressed blocks for compressed inodes */
|
||||
__le32 compressed_blocks;
|
||||
/* block address for uncompressed flat inodes */
|
||||
__le32 raw_blkaddr;
|
||||
|
||||
/* for device files, used to indicate old/new device # */
|
||||
__le32 rdev;
|
||||
|
||||
/* for chunk-based files, it contains the summary info */
|
||||
struct erofs_inode_chunk_info c;
|
||||
} i_u;
|
||||
|
||||
/* only used for 32-bit stat compatibility */
|
||||
__le32 i_ino;
|
||||
union erofs_inode_i_u i_u;
|
||||
|
||||
__le32 i_ino; /* only used for 32-bit stat compatibility */
|
||||
__le32 i_uid;
|
||||
__le32 i_gid;
|
||||
__le64 i_mtime;
|
||||
|
@ -198,10 +188,6 @@ struct erofs_inode_extended {
|
|||
__u8 i_reserved2[16];
|
||||
};
|
||||
|
||||
#define EROFS_MAX_SHARED_XATTRS (128)
|
||||
/* h_shared_count between 129 ... 255 are special # */
|
||||
#define EROFS_SHARED_XATTR_EXTENT (255)
|
||||
|
||||
/*
|
||||
* inline xattrs (n == i_xattr_icount):
|
||||
* erofs_xattr_ibody_header(1) + (n - 1) * 4 bytes
|
||||
|
@ -228,6 +214,13 @@ struct erofs_xattr_ibody_header {
|
|||
#define EROFS_XATTR_INDEX_LUSTRE 5
|
||||
#define EROFS_XATTR_INDEX_SECURITY 6
|
||||
|
||||
/*
|
||||
* bit 7 of e_name_index is set when it refers to a long xattr name prefix,
|
||||
* while the remained lower bits represent the index of the prefix.
|
||||
*/
|
||||
#define EROFS_XATTR_LONG_PREFIX 0x80
|
||||
#define EROFS_XATTR_LONG_PREFIX_MASK 0x7f
|
||||
|
||||
/* xattr entry (for both inline & shared xattrs) */
|
||||
struct erofs_xattr_entry {
|
||||
__u8 e_name_len; /* length of name */
|
||||
|
@ -237,6 +230,12 @@ struct erofs_xattr_entry {
|
|||
char e_name[]; /* attribute name */
|
||||
};
|
||||
|
||||
/* long xattr name prefix */
|
||||
struct erofs_xattr_long_prefix {
|
||||
__u8 base_index; /* short xattr name prefix index */
|
||||
char infix[]; /* infix apart from short prefix */
|
||||
};
|
||||
|
||||
static inline unsigned int erofs_xattr_ibody_size(__le16 i_xattr_icount)
|
||||
{
|
||||
if (!i_xattr_icount)
|
||||
|
@ -267,6 +266,22 @@ struct erofs_inode_chunk_index {
|
|||
__le32 blkaddr; /* start block address of this inode chunk */
|
||||
};
|
||||
|
||||
/* dirent sorts in alphabet order, thus we can do binary search */
|
||||
struct erofs_dirent {
|
||||
__le64 nid; /* node number */
|
||||
__le16 nameoff; /* start offset of file name */
|
||||
__u8 file_type; /* file type */
|
||||
__u8 reserved; /* reserved */
|
||||
} __packed;
|
||||
|
||||
/*
|
||||
* EROFS file types should match generic FT_* types and
|
||||
* it seems no need to add BUILD_BUG_ONs since potential
|
||||
* unmatchness will break other fses as well...
|
||||
*/
|
||||
|
||||
#define EROFS_NAME_LEN 255
|
||||
|
||||
/* maximum supported size of a physical compression cluster */
|
||||
#define Z_EROFS_PCLUSTER_MAX_SIZE (1024 * 1024)
|
||||
|
||||
|
@ -336,10 +351,8 @@ struct z_erofs_map_header {
|
|||
__u8 h_clusterbits;
|
||||
};
|
||||
|
||||
#define Z_EROFS_VLE_LEGACY_HEADER_PADDING 8
|
||||
|
||||
/*
|
||||
* Fixed-sized output compression on-disk logical cluster type:
|
||||
* On-disk logical cluster type:
|
||||
* 0 - literal (uncompressed) lcluster
|
||||
* 1,3 - compressed lcluster (for HEAD lclusters)
|
||||
* 2 - compressed lcluster (for NONHEAD lclusters)
|
||||
|
@ -363,27 +376,27 @@ struct z_erofs_map_header {
|
|||
* di_u.delta[1] = distance to the next HEAD lcluster
|
||||
*/
|
||||
enum {
|
||||
Z_EROFS_VLE_CLUSTER_TYPE_PLAIN = 0,
|
||||
Z_EROFS_VLE_CLUSTER_TYPE_HEAD1 = 1,
|
||||
Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD = 2,
|
||||
Z_EROFS_VLE_CLUSTER_TYPE_HEAD2 = 3,
|
||||
Z_EROFS_VLE_CLUSTER_TYPE_MAX
|
||||
Z_EROFS_LCLUSTER_TYPE_PLAIN = 0,
|
||||
Z_EROFS_LCLUSTER_TYPE_HEAD1 = 1,
|
||||
Z_EROFS_LCLUSTER_TYPE_NONHEAD = 2,
|
||||
Z_EROFS_LCLUSTER_TYPE_HEAD2 = 3,
|
||||
Z_EROFS_LCLUSTER_TYPE_MAX
|
||||
};
|
||||
|
||||
#define Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS 2
|
||||
#define Z_EROFS_VLE_DI_CLUSTER_TYPE_BIT 0
|
||||
#define Z_EROFS_LI_LCLUSTER_TYPE_BITS 2
|
||||
#define Z_EROFS_LI_LCLUSTER_TYPE_BIT 0
|
||||
|
||||
/* (noncompact only, HEAD) This pcluster refers to partial decompressed data */
|
||||
#define Z_EROFS_VLE_DI_PARTIAL_REF (1 << 15)
|
||||
#define Z_EROFS_LI_PARTIAL_REF (1 << 15)
|
||||
|
||||
/*
|
||||
* D0_CBLKCNT will be marked _only_ at the 1st non-head lcluster to store the
|
||||
* compressed block count of a compressed extent (in logical clusters, aka.
|
||||
* block count of a pcluster).
|
||||
*/
|
||||
#define Z_EROFS_VLE_DI_D0_CBLKCNT (1 << 11)
|
||||
#define Z_EROFS_LI_D0_CBLKCNT (1 << 11)
|
||||
|
||||
struct z_erofs_vle_decompressed_index {
|
||||
struct z_erofs_lcluster_index {
|
||||
__le16 di_advise;
|
||||
/* where to decompress in the head lcluster */
|
||||
__le16 di_clusterofs;
|
||||
|
@ -400,25 +413,8 @@ struct z_erofs_vle_decompressed_index {
|
|||
} di_u;
|
||||
};
|
||||
|
||||
#define Z_EROFS_VLE_LEGACY_INDEX_ALIGN(size) \
|
||||
(round_up(size, sizeof(struct z_erofs_vle_decompressed_index)) + \
|
||||
sizeof(struct z_erofs_map_header) + Z_EROFS_VLE_LEGACY_HEADER_PADDING)
|
||||
|
||||
/* dirent sorts in alphabet order, thus we can do binary search */
|
||||
struct erofs_dirent {
|
||||
__le64 nid; /* node number */
|
||||
__le16 nameoff; /* start offset of file name */
|
||||
__u8 file_type; /* file type */
|
||||
__u8 reserved; /* reserved */
|
||||
} __packed;
|
||||
|
||||
/*
|
||||
* EROFS file types should match generic FT_* types and
|
||||
* it seems no need to add BUILD_BUG_ONs since potential
|
||||
* unmatchness will break other fses as well...
|
||||
*/
|
||||
|
||||
#define EROFS_NAME_LEN 255
|
||||
#define Z_EROFS_FULL_INDEX_ALIGN(end) \
|
||||
(ALIGN(end, 8) + sizeof(struct z_erofs_map_header) + 8)
|
||||
|
||||
/* check the EROFS on-disk layout strictly at compile time */
|
||||
static inline void erofs_check_ondisk_layout_definitions(void)
|
||||
|
@ -435,15 +431,15 @@ static inline void erofs_check_ondisk_layout_definitions(void)
|
|||
BUILD_BUG_ON(sizeof(struct erofs_inode_chunk_info) != 4);
|
||||
BUILD_BUG_ON(sizeof(struct erofs_inode_chunk_index) != 8);
|
||||
BUILD_BUG_ON(sizeof(struct z_erofs_map_header) != 8);
|
||||
BUILD_BUG_ON(sizeof(struct z_erofs_vle_decompressed_index) != 8);
|
||||
BUILD_BUG_ON(sizeof(struct z_erofs_lcluster_index) != 8);
|
||||
BUILD_BUG_ON(sizeof(struct erofs_dirent) != 12);
|
||||
/* keep in sync between 2 index structures for better extendibility */
|
||||
BUILD_BUG_ON(sizeof(struct erofs_inode_chunk_index) !=
|
||||
sizeof(struct z_erofs_vle_decompressed_index));
|
||||
sizeof(struct z_erofs_lcluster_index));
|
||||
BUILD_BUG_ON(sizeof(struct erofs_deviceslot) != 128);
|
||||
|
||||
BUILD_BUG_ON(BIT(Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS) <
|
||||
Z_EROFS_VLE_CLUSTER_TYPE_MAX - 1);
|
||||
BUILD_BUG_ON(BIT(Z_EROFS_LI_LCLUSTER_TYPE_BITS) <
|
||||
Z_EROFS_LCLUSTER_TYPE_MAX - 1);
|
||||
/* exclude old compiler versions like gcc 7.5.0 */
|
||||
BUILD_BUG_ON(__builtin_constant_p(fmh) ?
|
||||
fmh != cpu_to_le64(1ULL << 63) : 0);
|
||||
|
|
|
@ -209,8 +209,8 @@ static int erofs_fscache_data_read_slice(struct erofs_fscache_request *primary)
|
|||
void *src;
|
||||
|
||||
/* For tail packing layout, the offset may be non-zero. */
|
||||
offset = erofs_blkoff(map.m_pa);
|
||||
blknr = erofs_blknr(map.m_pa);
|
||||
offset = erofs_blkoff(sb, map.m_pa);
|
||||
blknr = erofs_blknr(sb, map.m_pa);
|
||||
size = map.m_llen;
|
||||
|
||||
src = erofs_read_metabuf(&buf, sb, blknr, EROFS_KMAP);
|
||||
|
@ -460,6 +460,7 @@ static struct erofs_fscache *erofs_fscache_acquire_cookie(struct super_block *sb
|
|||
inode->i_size = OFFSET_MAX;
|
||||
inode->i_mapping->a_ops = &erofs_fscache_meta_aops;
|
||||
mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
|
||||
inode->i_blkbits = EROFS_SB(sb)->blkszbits;
|
||||
inode->i_private = ctx;
|
||||
|
||||
ctx->cookie = cookie;
|
||||
|
|
|
@ -23,11 +23,8 @@ static void *erofs_read_inode(struct erofs_buf *buf,
|
|||
unsigned int ifmt;
|
||||
int err;
|
||||
|
||||
blkaddr = erofs_blknr(inode_loc);
|
||||
*ofs = erofs_blkoff(inode_loc);
|
||||
|
||||
erofs_dbg("%s, reading inode nid %llu at %u of blkaddr %u",
|
||||
__func__, vi->nid, *ofs, blkaddr);
|
||||
blkaddr = erofs_blknr(sb, inode_loc);
|
||||
*ofs = erofs_blkoff(sb, inode_loc);
|
||||
|
||||
kaddr = erofs_read_metabuf(buf, sb, blkaddr, EROFS_KMAP);
|
||||
if (IS_ERR(kaddr)) {
|
||||
|
@ -58,11 +55,11 @@ static void *erofs_read_inode(struct erofs_buf *buf,
|
|||
case EROFS_INODE_LAYOUT_EXTENDED:
|
||||
vi->inode_isize = sizeof(struct erofs_inode_extended);
|
||||
/* check if the extended inode acrosses block boundary */
|
||||
if (*ofs + vi->inode_isize <= EROFS_BLKSIZ) {
|
||||
if (*ofs + vi->inode_isize <= sb->s_blocksize) {
|
||||
*ofs += vi->inode_isize;
|
||||
die = (struct erofs_inode_extended *)dic;
|
||||
} else {
|
||||
const unsigned int gotten = EROFS_BLKSIZ - *ofs;
|
||||
const unsigned int gotten = sb->s_blocksize - *ofs;
|
||||
|
||||
copied = kmalloc(vi->inode_isize, GFP_NOFS);
|
||||
if (!copied) {
|
||||
|
@ -176,7 +173,7 @@ static void *erofs_read_inode(struct erofs_buf *buf,
|
|||
err = -EOPNOTSUPP;
|
||||
goto err_out;
|
||||
}
|
||||
vi->chunkbits = LOG_BLOCK_SIZE +
|
||||
vi->chunkbits = sb->s_blocksize_bits +
|
||||
(vi->chunkformat & EROFS_CHUNK_FORMAT_BLKBITS_MASK);
|
||||
}
|
||||
inode->i_mtime.tv_sec = inode->i_ctime.tv_sec;
|
||||
|
@ -188,11 +185,12 @@ static void *erofs_read_inode(struct erofs_buf *buf,
|
|||
if (test_opt(&sbi->opt, DAX_ALWAYS) && S_ISREG(inode->i_mode) &&
|
||||
vi->datalayout == EROFS_INODE_FLAT_PLAIN)
|
||||
inode->i_flags |= S_DAX;
|
||||
|
||||
if (!nblks)
|
||||
/* measure inode.i_blocks as generic filesystems */
|
||||
inode->i_blocks = roundup(inode->i_size, EROFS_BLKSIZ) >> 9;
|
||||
inode->i_blocks = round_up(inode->i_size, sb->s_blocksize) >> 9;
|
||||
else
|
||||
inode->i_blocks = nblks << LOG_SECTORS_PER_BLOCK;
|
||||
inode->i_blocks = nblks << (sb->s_blocksize_bits - 9);
|
||||
return kaddr;
|
||||
|
||||
bogusimode:
|
||||
|
@ -210,11 +208,12 @@ static int erofs_fill_symlink(struct inode *inode, void *kaddr,
|
|||
unsigned int m_pofs)
|
||||
{
|
||||
struct erofs_inode *vi = EROFS_I(inode);
|
||||
unsigned int bsz = i_blocksize(inode);
|
||||
char *lnk;
|
||||
|
||||
/* if it cannot be handled with fast symlink scheme */
|
||||
if (vi->datalayout != EROFS_INODE_FLAT_INLINE ||
|
||||
inode->i_size >= EROFS_BLKSIZ || inode->i_size < 0) {
|
||||
inode->i_size >= bsz || inode->i_size < 0) {
|
||||
inode->i_op = &erofs_symlink_iops;
|
||||
return 0;
|
||||
}
|
||||
|
@ -225,7 +224,7 @@ static int erofs_fill_symlink(struct inode *inode, void *kaddr,
|
|||
|
||||
m_pofs += vi->xattr_isize;
|
||||
/* inline symlink data shouldn't cross block boundary */
|
||||
if (m_pofs + inode->i_size > EROFS_BLKSIZ) {
|
||||
if (m_pofs + inode->i_size > bsz) {
|
||||
kfree(lnk);
|
||||
erofs_err(inode->i_sb,
|
||||
"inline data cross block boundary @ nid %llu",
|
||||
|
@ -289,10 +288,15 @@ static int erofs_fill_inode(struct inode *inode)
|
|||
}
|
||||
|
||||
if (erofs_inode_is_data_compressed(vi->datalayout)) {
|
||||
if (!erofs_is_fscache_mode(inode->i_sb))
|
||||
err = z_erofs_fill_inode(inode);
|
||||
else
|
||||
err = -EOPNOTSUPP;
|
||||
#ifdef CONFIG_EROFS_FS_ZIP
|
||||
if (!erofs_is_fscache_mode(inode->i_sb) &&
|
||||
inode->i_sb->s_blocksize_bits == PAGE_SHIFT) {
|
||||
inode->i_mapping->a_ops = &z_erofs_aops;
|
||||
err = 0;
|
||||
goto out_unlock;
|
||||
}
|
||||
#endif
|
||||
err = -EOPNOTSUPP;
|
||||
goto out_unlock;
|
||||
}
|
||||
inode->i_mapping->a_ops = &erofs_raw_access_aops;
|
||||
|
|
|
@ -31,10 +31,8 @@ __printf(3, 4) void _erofs_info(struct super_block *sb,
|
|||
#define erofs_info(sb, fmt, ...) \
|
||||
_erofs_info(sb, __func__, fmt "\n", ##__VA_ARGS__)
|
||||
#ifdef CONFIG_EROFS_FS_DEBUG
|
||||
#define erofs_dbg(x, ...) pr_debug(x "\n", ##__VA_ARGS__)
|
||||
#define DBG_BUGON BUG_ON
|
||||
#else
|
||||
#define erofs_dbg(x, ...) ((void)0)
|
||||
#define DBG_BUGON(x) ((void)(x))
|
||||
#endif /* !CONFIG_EROFS_FS_DEBUG */
|
||||
|
||||
|
@ -81,6 +79,7 @@ struct erofs_dev_context {
|
|||
struct rw_semaphore rwsem;
|
||||
|
||||
unsigned int extra_devices;
|
||||
bool flatdev;
|
||||
};
|
||||
|
||||
struct erofs_fs_context {
|
||||
|
@ -116,6 +115,11 @@ struct erofs_fscache {
|
|||
char *name;
|
||||
};
|
||||
|
||||
struct erofs_xattr_prefix_item {
|
||||
struct erofs_xattr_long_prefix *prefix;
|
||||
u8 infix_len;
|
||||
};
|
||||
|
||||
struct erofs_sb_info {
|
||||
struct erofs_mount_opts opt; /* options */
|
||||
#ifdef CONFIG_EROFS_FS_ZIP
|
||||
|
@ -133,8 +137,8 @@ struct erofs_sb_info {
|
|||
struct inode *managed_cache;
|
||||
|
||||
struct erofs_sb_lz4_info lz4;
|
||||
struct inode *packed_inode;
|
||||
#endif /* CONFIG_EROFS_FS_ZIP */
|
||||
struct inode *packed_inode;
|
||||
struct erofs_dev_context *devs;
|
||||
struct dax_device *dax_dev;
|
||||
u64 dax_part_off;
|
||||
|
@ -144,11 +148,14 @@ struct erofs_sb_info {
|
|||
u32 meta_blkaddr;
|
||||
#ifdef CONFIG_EROFS_FS_XATTR
|
||||
u32 xattr_blkaddr;
|
||||
u32 xattr_prefix_start;
|
||||
u8 xattr_prefix_count;
|
||||
struct erofs_xattr_prefix_item *xattr_prefixes;
|
||||
#endif
|
||||
u16 device_id_mask; /* valid bits of device id to be used */
|
||||
|
||||
/* inode slot unit size in bit shift */
|
||||
unsigned char islotbits;
|
||||
unsigned char islotbits; /* inode slot unit size in bit shift */
|
||||
unsigned char blkszbits; /* filesystem block size in bit shift */
|
||||
|
||||
u32 sb_size; /* total superblock size */
|
||||
u32 build_time_nsec;
|
||||
|
@ -156,6 +163,7 @@ struct erofs_sb_info {
|
|||
|
||||
/* what we really care is nid, rather than ino.. */
|
||||
erofs_nid_t root_nid;
|
||||
erofs_nid_t packed_nid;
|
||||
/* used for statfs, f_files - f_favail */
|
||||
u64 inos;
|
||||
|
||||
|
@ -240,27 +248,13 @@ static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp)
|
|||
VAL != EROFS_LOCKED_MAGIC);
|
||||
}
|
||||
|
||||
/* we strictly follow PAGE_SIZE and no buffer head yet */
|
||||
#define LOG_BLOCK_SIZE PAGE_SHIFT
|
||||
|
||||
#undef LOG_SECTORS_PER_BLOCK
|
||||
#define LOG_SECTORS_PER_BLOCK (PAGE_SHIFT - 9)
|
||||
|
||||
#undef SECTORS_PER_BLOCK
|
||||
#define SECTORS_PER_BLOCK (1 << SECTORS_PER_BLOCK)
|
||||
|
||||
#define EROFS_BLKSIZ (1 << LOG_BLOCK_SIZE)
|
||||
|
||||
#if (EROFS_BLKSIZ % 4096 || !EROFS_BLKSIZ)
|
||||
#error erofs cannot be used in this platform
|
||||
#endif
|
||||
|
||||
enum erofs_kmap_type {
|
||||
EROFS_NO_KMAP, /* don't map the buffer */
|
||||
EROFS_KMAP, /* use kmap_local_page() to map the buffer */
|
||||
};
|
||||
|
||||
struct erofs_buf {
|
||||
struct inode *inode;
|
||||
struct page *page;
|
||||
void *base;
|
||||
enum erofs_kmap_type kmap_type;
|
||||
|
@ -269,9 +263,10 @@ struct erofs_buf {
|
|||
|
||||
#define ROOT_NID(sb) ((sb)->root_nid)
|
||||
|
||||
#define erofs_blknr(addr) ((addr) / EROFS_BLKSIZ)
|
||||
#define erofs_blkoff(addr) ((addr) % EROFS_BLKSIZ)
|
||||
#define blknr_to_addr(nr) ((erofs_off_t)(nr) * EROFS_BLKSIZ)
|
||||
#define erofs_blknr(sb, addr) ((addr) >> (sb)->s_blocksize_bits)
|
||||
#define erofs_blkoff(sb, addr) ((addr) & ((sb)->s_blocksize - 1))
|
||||
#define erofs_pos(sb, blk) ((erofs_off_t)(blk) << (sb)->s_blocksize_bits)
|
||||
#define erofs_iblks(i) (round_up((i)->i_size, i_blocksize(i)) >> (i)->i_blkbits)
|
||||
|
||||
#define EROFS_FEATURE_FUNCS(name, compat, feature) \
|
||||
static inline bool erofs_sb_has_##name(struct erofs_sb_info *sbi) \
|
||||
|
@ -288,6 +283,7 @@ EROFS_FEATURE_FUNCS(compr_head2, incompat, INCOMPAT_COMPR_HEAD2)
|
|||
EROFS_FEATURE_FUNCS(ztailpacking, incompat, INCOMPAT_ZTAILPACKING)
|
||||
EROFS_FEATURE_FUNCS(fragments, incompat, INCOMPAT_FRAGMENTS)
|
||||
EROFS_FEATURE_FUNCS(dedupe, incompat, INCOMPAT_DEDUPE)
|
||||
EROFS_FEATURE_FUNCS(xattr_prefixes, incompat, INCOMPAT_XATTR_PREFIXES)
|
||||
EROFS_FEATURE_FUNCS(sb_chksum, compat, COMPAT_SB_CHKSUM)
|
||||
|
||||
/* atomic flag definitions */
|
||||
|
@ -306,7 +302,7 @@ struct erofs_inode {
|
|||
|
||||
unsigned char datalayout;
|
||||
unsigned char inode_isize;
|
||||
unsigned short xattr_isize;
|
||||
unsigned int xattr_isize;
|
||||
|
||||
unsigned int xattr_shared_count;
|
||||
unsigned int *xattr_shared_xattrs;
|
||||
|
@ -343,28 +339,18 @@ static inline erofs_off_t erofs_iloc(struct inode *inode)
|
|||
{
|
||||
struct erofs_sb_info *sbi = EROFS_I_SB(inode);
|
||||
|
||||
return blknr_to_addr(sbi->meta_blkaddr) +
|
||||
return erofs_pos(inode->i_sb, sbi->meta_blkaddr) +
|
||||
(EROFS_I(inode)->nid << sbi->islotbits);
|
||||
}
|
||||
|
||||
static inline unsigned int erofs_bitrange(unsigned int value, unsigned int bit,
|
||||
unsigned int bits)
|
||||
static inline unsigned int erofs_inode_version(unsigned int ifmt)
|
||||
{
|
||||
|
||||
return (value >> bit) & ((1 << bits) - 1);
|
||||
return (ifmt >> EROFS_I_VERSION_BIT) & EROFS_I_VERSION_MASK;
|
||||
}
|
||||
|
||||
|
||||
static inline unsigned int erofs_inode_version(unsigned int value)
|
||||
static inline unsigned int erofs_inode_datalayout(unsigned int ifmt)
|
||||
{
|
||||
return erofs_bitrange(value, EROFS_I_VERSION_BIT,
|
||||
EROFS_I_VERSION_BITS);
|
||||
}
|
||||
|
||||
static inline unsigned int erofs_inode_datalayout(unsigned int value)
|
||||
{
|
||||
return erofs_bitrange(value, EROFS_I_DATALAYOUT_BIT,
|
||||
EROFS_I_DATALAYOUT_BITS);
|
||||
return (ifmt >> EROFS_I_DATALAYOUT_BIT) & EROFS_I_DATALAYOUT_MASK;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -451,10 +437,13 @@ extern const struct iomap_ops z_erofs_iomap_report_ops;
|
|||
#define EROFS_REG_COOKIE_SHARE 0x0001
|
||||
#define EROFS_REG_COOKIE_NEED_NOEXIST 0x0002
|
||||
|
||||
void *erofs_read_metadata(struct super_block *sb, struct erofs_buf *buf,
|
||||
erofs_off_t *offset, int *lengthp);
|
||||
void erofs_unmap_metabuf(struct erofs_buf *buf);
|
||||
void erofs_put_metabuf(struct erofs_buf *buf);
|
||||
void *erofs_bread(struct erofs_buf *buf, struct inode *inode,
|
||||
erofs_blk_t blkaddr, enum erofs_kmap_type type);
|
||||
void *erofs_bread(struct erofs_buf *buf, erofs_blk_t blkaddr,
|
||||
enum erofs_kmap_type type);
|
||||
void erofs_init_metabuf(struct erofs_buf *buf, struct super_block *sb);
|
||||
void *erofs_read_metabuf(struct erofs_buf *buf, struct super_block *sb,
|
||||
erofs_blk_t blkaddr, enum erofs_kmap_type type);
|
||||
int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *dev);
|
||||
|
@ -521,7 +510,6 @@ int erofs_try_to_free_cached_page(struct page *page);
|
|||
int z_erofs_load_lz4_config(struct super_block *sb,
|
||||
struct erofs_super_block *dsb,
|
||||
struct z_erofs_lz4_cfgs *lz4, int len);
|
||||
int z_erofs_fill_inode(struct inode *inode);
|
||||
int z_erofs_map_blocks_iter(struct inode *inode, struct erofs_map_blocks *map,
|
||||
int flags);
|
||||
#else
|
||||
|
@ -541,7 +529,6 @@ static inline int z_erofs_load_lz4_config(struct super_block *sb,
|
|||
}
|
||||
return 0;
|
||||
}
|
||||
static inline int z_erofs_fill_inode(struct inode *inode) { return -EOPNOTSUPP; }
|
||||
#endif /* !CONFIG_EROFS_FS_ZIP */
|
||||
|
||||
#ifdef CONFIG_EROFS_FS_ZIP_LZMA
|
||||
|
|
|
@ -89,7 +89,8 @@ static struct erofs_dirent *find_target_dirent(struct erofs_qstr *name,
|
|||
static void *erofs_find_target_block(struct erofs_buf *target,
|
||||
struct inode *dir, struct erofs_qstr *name, int *_ndirents)
|
||||
{
|
||||
int head = 0, back = DIV_ROUND_UP(dir->i_size, EROFS_BLKSIZ) - 1;
|
||||
unsigned int bsz = i_blocksize(dir);
|
||||
int head = 0, back = erofs_iblks(dir) - 1;
|
||||
unsigned int startprfx = 0, endprfx = 0;
|
||||
void *candidate = ERR_PTR(-ENOENT);
|
||||
|
||||
|
@ -98,10 +99,10 @@ static void *erofs_find_target_block(struct erofs_buf *target,
|
|||
struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
|
||||
struct erofs_dirent *de;
|
||||
|
||||
de = erofs_bread(&buf, dir, mid, EROFS_KMAP);
|
||||
buf.inode = dir;
|
||||
de = erofs_bread(&buf, mid, EROFS_KMAP);
|
||||
if (!IS_ERR(de)) {
|
||||
const int nameoff = nameoff_from_disk(de->nameoff,
|
||||
EROFS_BLKSIZ);
|
||||
const int nameoff = nameoff_from_disk(de->nameoff, bsz);
|
||||
const int ndirents = nameoff / sizeof(*de);
|
||||
int diff;
|
||||
unsigned int matched;
|
||||
|
@ -121,11 +122,10 @@ static void *erofs_find_target_block(struct erofs_buf *target,
|
|||
|
||||
dname.name = (u8 *)de + nameoff;
|
||||
if (ndirents == 1)
|
||||
dname.end = (u8 *)de + EROFS_BLKSIZ;
|
||||
dname.end = (u8 *)de + bsz;
|
||||
else
|
||||
dname.end = (u8 *)de +
|
||||
nameoff_from_disk(de[1].nameoff,
|
||||
EROFS_BLKSIZ);
|
||||
nameoff_from_disk(de[1].nameoff, bsz);
|
||||
|
||||
/* string comparison without already matched prefix */
|
||||
diff = erofs_dirnamecmp(name, &dname, &matched);
|
||||
|
@ -171,6 +171,7 @@ int erofs_namei(struct inode *dir, const struct qstr *name, erofs_nid_t *nid,
|
|||
|
||||
qn.name = name->name;
|
||||
qn.end = name->name + name->len;
|
||||
buf.inode = dir;
|
||||
|
||||
ndirents = 0;
|
||||
de = erofs_find_target_block(&buf, dir, &qn, &ndirents);
|
||||
|
@ -178,7 +179,8 @@ int erofs_namei(struct inode *dir, const struct qstr *name, erofs_nid_t *nid,
|
|||
return PTR_ERR(de);
|
||||
|
||||
if (ndirents)
|
||||
de = find_target_dirent(&qn, (u8 *)de, EROFS_BLKSIZ, ndirents);
|
||||
de = find_target_dirent(&qn, (u8 *)de, i_blocksize(dir),
|
||||
ndirents);
|
||||
|
||||
if (!IS_ERR(de)) {
|
||||
*nid = le64_to_cpu(de->nid);
|
||||
|
@ -203,16 +205,13 @@ static struct dentry *erofs_lookup(struct inode *dir, struct dentry *dentry,
|
|||
|
||||
err = erofs_namei(dir, &dentry->d_name, &nid, &d_type);
|
||||
|
||||
if (err == -ENOENT) {
|
||||
if (err == -ENOENT)
|
||||
/* negative dentry */
|
||||
inode = NULL;
|
||||
} else if (err) {
|
||||
else if (err)
|
||||
inode = ERR_PTR(err);
|
||||
} else {
|
||||
erofs_dbg("%s, %pd (nid %llu) found, d_type %u", __func__,
|
||||
dentry, nid, d_type);
|
||||
else
|
||||
inode = erofs_iget(dir->i_sb, nid);
|
||||
}
|
||||
return d_splice_alias(inode, dentry);
|
||||
}
|
||||
|
||||
|
|
116
fs/erofs/super.c
116
fs/erofs/super.c
|
@ -52,18 +52,21 @@ void _erofs_info(struct super_block *sb, const char *function,
|
|||
|
||||
static int erofs_superblock_csum_verify(struct super_block *sb, void *sbdata)
|
||||
{
|
||||
size_t len = 1 << EROFS_SB(sb)->blkszbits;
|
||||
struct erofs_super_block *dsb;
|
||||
u32 expected_crc, crc;
|
||||
|
||||
dsb = kmemdup(sbdata + EROFS_SUPER_OFFSET,
|
||||
EROFS_BLKSIZ - EROFS_SUPER_OFFSET, GFP_KERNEL);
|
||||
if (len > EROFS_SUPER_OFFSET)
|
||||
len -= EROFS_SUPER_OFFSET;
|
||||
|
||||
dsb = kmemdup(sbdata + EROFS_SUPER_OFFSET, len, GFP_KERNEL);
|
||||
if (!dsb)
|
||||
return -ENOMEM;
|
||||
|
||||
expected_crc = le32_to_cpu(dsb->checksum);
|
||||
dsb->checksum = 0;
|
||||
/* to allow for x86 boot sectors and other oddities. */
|
||||
crc = crc32c(~0, dsb, EROFS_BLKSIZ - EROFS_SUPER_OFFSET);
|
||||
crc = crc32c(~0, dsb, len);
|
||||
kfree(dsb);
|
||||
|
||||
if (crc != expected_crc) {
|
||||
|
@ -123,20 +126,19 @@ static bool check_layout_compatibility(struct super_block *sb,
|
|||
return true;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_EROFS_FS_ZIP
|
||||
/* read variable-sized metadata, offset will be aligned by 4-byte */
|
||||
static void *erofs_read_metadata(struct super_block *sb, struct erofs_buf *buf,
|
||||
erofs_off_t *offset, int *lengthp)
|
||||
void *erofs_read_metadata(struct super_block *sb, struct erofs_buf *buf,
|
||||
erofs_off_t *offset, int *lengthp)
|
||||
{
|
||||
u8 *buffer, *ptr;
|
||||
int len, i, cnt;
|
||||
|
||||
*offset = round_up(*offset, 4);
|
||||
ptr = erofs_read_metabuf(buf, sb, erofs_blknr(*offset), EROFS_KMAP);
|
||||
ptr = erofs_bread(buf, erofs_blknr(sb, *offset), EROFS_KMAP);
|
||||
if (IS_ERR(ptr))
|
||||
return ptr;
|
||||
|
||||
len = le16_to_cpu(*(__le16 *)&ptr[erofs_blkoff(*offset)]);
|
||||
len = le16_to_cpu(*(__le16 *)&ptr[erofs_blkoff(sb, *offset)]);
|
||||
if (!len)
|
||||
len = U16_MAX + 1;
|
||||
buffer = kmalloc(len, GFP_KERNEL);
|
||||
|
@ -146,19 +148,20 @@ static void *erofs_read_metadata(struct super_block *sb, struct erofs_buf *buf,
|
|||
*lengthp = len;
|
||||
|
||||
for (i = 0; i < len; i += cnt) {
|
||||
cnt = min(EROFS_BLKSIZ - (int)erofs_blkoff(*offset), len - i);
|
||||
ptr = erofs_read_metabuf(buf, sb, erofs_blknr(*offset),
|
||||
EROFS_KMAP);
|
||||
cnt = min_t(int, sb->s_blocksize - erofs_blkoff(sb, *offset),
|
||||
len - i);
|
||||
ptr = erofs_bread(buf, erofs_blknr(sb, *offset), EROFS_KMAP);
|
||||
if (IS_ERR(ptr)) {
|
||||
kfree(buffer);
|
||||
return ptr;
|
||||
}
|
||||
memcpy(buffer + i, ptr + erofs_blkoff(*offset), cnt);
|
||||
memcpy(buffer + i, ptr + erofs_blkoff(sb, *offset), cnt);
|
||||
*offset += cnt;
|
||||
}
|
||||
return buffer;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_EROFS_FS_ZIP
|
||||
static int erofs_load_compr_cfgs(struct super_block *sb,
|
||||
struct erofs_super_block *dsb)
|
||||
{
|
||||
|
@ -175,6 +178,7 @@ static int erofs_load_compr_cfgs(struct super_block *sb,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
erofs_init_metabuf(&buf, sb);
|
||||
offset = EROFS_SUPER_OFFSET + sbi->sb_size;
|
||||
alg = 0;
|
||||
for (algs = sbi->available_compr_algs; algs; algs >>= 1, ++alg) {
|
||||
|
@ -228,10 +232,10 @@ static int erofs_init_device(struct erofs_buf *buf, struct super_block *sb,
|
|||
struct block_device *bdev;
|
||||
void *ptr;
|
||||
|
||||
ptr = erofs_read_metabuf(buf, sb, erofs_blknr(*pos), EROFS_KMAP);
|
||||
ptr = erofs_read_metabuf(buf, sb, erofs_blknr(sb, *pos), EROFS_KMAP);
|
||||
if (IS_ERR(ptr))
|
||||
return PTR_ERR(ptr);
|
||||
dis = ptr + erofs_blkoff(*pos);
|
||||
dis = ptr + erofs_blkoff(sb, *pos);
|
||||
|
||||
if (!dif->path) {
|
||||
if (!dis->tag[0]) {
|
||||
|
@ -248,7 +252,7 @@ static int erofs_init_device(struct erofs_buf *buf, struct super_block *sb,
|
|||
if (IS_ERR(fscache))
|
||||
return PTR_ERR(fscache);
|
||||
dif->fscache = fscache;
|
||||
} else {
|
||||
} else if (!sbi->devs->flatdev) {
|
||||
bdev = blkdev_get_by_path(dif->path, FMODE_READ | FMODE_EXCL,
|
||||
sb->s_type);
|
||||
if (IS_ERR(bdev))
|
||||
|
@ -290,6 +294,9 @@ static int erofs_scan_devices(struct super_block *sb,
|
|||
if (!ondisk_extradevs)
|
||||
return 0;
|
||||
|
||||
if (!sbi->devs->extra_devices && !erofs_is_fscache_mode(sb))
|
||||
sbi->devs->flatdev = true;
|
||||
|
||||
sbi->device_id_mask = roundup_pow_of_two(ondisk_extradevs + 1) - 1;
|
||||
pos = le16_to_cpu(dsb->devt_slotoff) * EROFS_DEVT_SLOT_SIZE;
|
||||
down_read(&sbi->devs->rwsem);
|
||||
|
@ -329,7 +336,6 @@ static int erofs_read_superblock(struct super_block *sb)
|
|||
struct erofs_sb_info *sbi;
|
||||
struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
|
||||
struct erofs_super_block *dsb;
|
||||
unsigned int blkszbits;
|
||||
void *data;
|
||||
int ret;
|
||||
|
||||
|
@ -348,6 +354,16 @@ static int erofs_read_superblock(struct super_block *sb)
|
|||
goto out;
|
||||
}
|
||||
|
||||
sbi->blkszbits = dsb->blkszbits;
|
||||
if (sbi->blkszbits < 9 || sbi->blkszbits > PAGE_SHIFT) {
|
||||
erofs_err(sb, "blkszbits %u isn't supported", sbi->blkszbits);
|
||||
goto out;
|
||||
}
|
||||
if (dsb->dirblkbits) {
|
||||
erofs_err(sb, "dirblkbits %u isn't supported", dsb->dirblkbits);
|
||||
goto out;
|
||||
}
|
||||
|
||||
sbi->feature_compat = le32_to_cpu(dsb->feature_compat);
|
||||
if (erofs_sb_has_sb_chksum(sbi)) {
|
||||
ret = erofs_superblock_csum_verify(sb, data);
|
||||
|
@ -356,19 +372,11 @@ static int erofs_read_superblock(struct super_block *sb)
|
|||
}
|
||||
|
||||
ret = -EINVAL;
|
||||
blkszbits = dsb->blkszbits;
|
||||
/* 9(512 bytes) + LOG_SECTORS_PER_BLOCK == LOG_BLOCK_SIZE */
|
||||
if (blkszbits != LOG_BLOCK_SIZE) {
|
||||
erofs_err(sb, "blkszbits %u isn't supported on this platform",
|
||||
blkszbits);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!check_layout_compatibility(sb, dsb))
|
||||
goto out;
|
||||
|
||||
sbi->sb_size = 128 + dsb->sb_extslots * EROFS_SB_EXTSLOT_SIZE;
|
||||
if (sbi->sb_size > EROFS_BLKSIZ) {
|
||||
if (sbi->sb_size > PAGE_SIZE - EROFS_SUPER_OFFSET) {
|
||||
erofs_err(sb, "invalid sb_extslots %u (more than a fs block)",
|
||||
sbi->sb_size);
|
||||
goto out;
|
||||
|
@ -377,20 +385,12 @@ static int erofs_read_superblock(struct super_block *sb)
|
|||
sbi->meta_blkaddr = le32_to_cpu(dsb->meta_blkaddr);
|
||||
#ifdef CONFIG_EROFS_FS_XATTR
|
||||
sbi->xattr_blkaddr = le32_to_cpu(dsb->xattr_blkaddr);
|
||||
sbi->xattr_prefix_start = le32_to_cpu(dsb->xattr_prefix_start);
|
||||
sbi->xattr_prefix_count = dsb->xattr_prefix_count;
|
||||
#endif
|
||||
sbi->islotbits = ilog2(sizeof(struct erofs_inode_compact));
|
||||
sbi->root_nid = le16_to_cpu(dsb->root_nid);
|
||||
#ifdef CONFIG_EROFS_FS_ZIP
|
||||
sbi->packed_inode = NULL;
|
||||
if (erofs_sb_has_fragments(sbi) && dsb->packed_nid) {
|
||||
sbi->packed_inode =
|
||||
erofs_iget(sb, le64_to_cpu(dsb->packed_nid));
|
||||
if (IS_ERR(sbi->packed_inode)) {
|
||||
ret = PTR_ERR(sbi->packed_inode);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
sbi->packed_nid = le64_to_cpu(dsb->packed_nid);
|
||||
sbi->inos = le64_to_cpu(dsb->inos);
|
||||
|
||||
sbi->build_time = le64_to_cpu(dsb->build_time);
|
||||
|
@ -417,8 +417,6 @@ static int erofs_read_superblock(struct super_block *sb)
|
|||
/* handle multiple devices */
|
||||
ret = erofs_scan_devices(sb, dsb);
|
||||
|
||||
if (erofs_sb_has_ztailpacking(sbi))
|
||||
erofs_info(sb, "EXPERIMENTAL compressed inline data feature in use. Use at your own risk!");
|
||||
if (erofs_is_fscache_mode(sb))
|
||||
erofs_info(sb, "EXPERIMENTAL fscache-based on-demand read feature in use. Use at your own risk!");
|
||||
if (erofs_sb_has_fragments(sbi))
|
||||
|
@ -733,9 +731,10 @@ static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc)
|
|||
sbi->domain_id = ctx->domain_id;
|
||||
ctx->domain_id = NULL;
|
||||
|
||||
sbi->blkszbits = PAGE_SHIFT;
|
||||
if (erofs_is_fscache_mode(sb)) {
|
||||
sb->s_blocksize = EROFS_BLKSIZ;
|
||||
sb->s_blocksize_bits = LOG_BLOCK_SIZE;
|
||||
sb->s_blocksize = PAGE_SIZE;
|
||||
sb->s_blocksize_bits = PAGE_SHIFT;
|
||||
|
||||
err = erofs_fscache_register_fs(sb);
|
||||
if (err)
|
||||
|
@ -745,8 +744,8 @@ static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc)
|
|||
if (err)
|
||||
return err;
|
||||
} else {
|
||||
if (!sb_set_blocksize(sb, EROFS_BLKSIZ)) {
|
||||
erofs_err(sb, "failed to set erofs blksize");
|
||||
if (!sb_set_blocksize(sb, PAGE_SIZE)) {
|
||||
errorfc(fc, "failed to set initial blksize");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -759,12 +758,24 @@ static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc)
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
if (test_opt(&sbi->opt, DAX_ALWAYS)) {
|
||||
BUILD_BUG_ON(EROFS_BLKSIZ != PAGE_SIZE);
|
||||
if (sb->s_blocksize_bits != sbi->blkszbits) {
|
||||
if (erofs_is_fscache_mode(sb)) {
|
||||
errorfc(fc, "unsupported blksize for fscache mode");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (!sb_set_blocksize(sb, 1 << sbi->blkszbits)) {
|
||||
errorfc(fc, "failed to set erofs blksize");
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
if (test_opt(&sbi->opt, DAX_ALWAYS)) {
|
||||
if (!sbi->dax_dev) {
|
||||
errorfc(fc, "DAX unsupported by block device. Turning off DAX.");
|
||||
clear_opt(&sbi->opt, DAX_ALWAYS);
|
||||
} else if (sbi->blkszbits != PAGE_SHIFT) {
|
||||
errorfc(fc, "unsupported blocksize for DAX");
|
||||
clear_opt(&sbi->opt, DAX_ALWAYS);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -799,10 +810,22 @@ static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc)
|
|||
|
||||
erofs_shrinker_register(sb);
|
||||
/* sb->s_umount is already locked, SB_ACTIVE and SB_BORN are not set */
|
||||
if (erofs_sb_has_fragments(sbi) && sbi->packed_nid) {
|
||||
sbi->packed_inode = erofs_iget(sb, sbi->packed_nid);
|
||||
if (IS_ERR(sbi->packed_inode)) {
|
||||
err = PTR_ERR(sbi->packed_inode);
|
||||
sbi->packed_inode = NULL;
|
||||
return err;
|
||||
}
|
||||
}
|
||||
err = erofs_init_managed_cache(sb);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = erofs_xattr_prefixes_init(sb);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = erofs_register_sysfs(sb);
|
||||
if (err)
|
||||
return err;
|
||||
|
@ -962,12 +985,13 @@ static void erofs_put_super(struct super_block *sb)
|
|||
|
||||
erofs_unregister_sysfs(sb);
|
||||
erofs_shrinker_unregister(sb);
|
||||
erofs_xattr_prefixes_cleanup(sb);
|
||||
#ifdef CONFIG_EROFS_FS_ZIP
|
||||
iput(sbi->managed_cache);
|
||||
sbi->managed_cache = NULL;
|
||||
#endif
|
||||
iput(sbi->packed_inode);
|
||||
sbi->packed_inode = NULL;
|
||||
#endif
|
||||
erofs_free_dev_context(sbi->devs);
|
||||
sbi->devs = NULL;
|
||||
erofs_fscache_unregister_fs(sb);
|
||||
|
@ -1060,7 +1084,7 @@ static int erofs_statfs(struct dentry *dentry, struct kstatfs *buf)
|
|||
id = huge_encode_dev(sb->s_bdev->bd_dev);
|
||||
|
||||
buf->f_type = sb->s_magic;
|
||||
buf->f_bsize = EROFS_BLKSIZ;
|
||||
buf->f_bsize = sb->s_blocksize;
|
||||
buf->f_blocks = sbi->total_blocks;
|
||||
buf->f_bfree = buf->f_bavail = 0;
|
||||
|
||||
|
|
224
fs/erofs/xattr.c
224
fs/erofs/xattr.c
|
@ -7,6 +7,19 @@
|
|||
#include <linux/security.h>
|
||||
#include "xattr.h"
|
||||
|
||||
static inline erofs_blk_t erofs_xattr_blkaddr(struct super_block *sb,
|
||||
unsigned int xattr_id)
|
||||
{
|
||||
return EROFS_SB(sb)->xattr_blkaddr +
|
||||
erofs_blknr(sb, xattr_id * sizeof(__u32));
|
||||
}
|
||||
|
||||
static inline unsigned int erofs_xattr_blkoff(struct super_block *sb,
|
||||
unsigned int xattr_id)
|
||||
{
|
||||
return erofs_blkoff(sb, xattr_id * sizeof(__u32));
|
||||
}
|
||||
|
||||
struct xattr_iter {
|
||||
struct super_block *sb;
|
||||
struct erofs_buf buf;
|
||||
|
@ -16,7 +29,7 @@ struct xattr_iter {
|
|||
unsigned int ofs;
|
||||
};
|
||||
|
||||
static int init_inode_xattrs(struct inode *inode)
|
||||
static int erofs_init_inode_xattrs(struct inode *inode)
|
||||
{
|
||||
struct erofs_inode *const vi = EROFS_I(inode);
|
||||
struct xattr_iter it;
|
||||
|
@ -68,8 +81,8 @@ static int init_inode_xattrs(struct inode *inode)
|
|||
}
|
||||
|
||||
it.buf = __EROFS_BUF_INITIALIZER;
|
||||
it.blkaddr = erofs_blknr(erofs_iloc(inode) + vi->inode_isize);
|
||||
it.ofs = erofs_blkoff(erofs_iloc(inode) + vi->inode_isize);
|
||||
it.blkaddr = erofs_blknr(sb, erofs_iloc(inode) + vi->inode_isize);
|
||||
it.ofs = erofs_blkoff(sb, erofs_iloc(inode) + vi->inode_isize);
|
||||
|
||||
/* read in shared xattr array (non-atomic, see kmalloc below) */
|
||||
it.kaddr = erofs_read_metabuf(&it.buf, sb, it.blkaddr, EROFS_KMAP);
|
||||
|
@ -92,9 +105,9 @@ static int init_inode_xattrs(struct inode *inode)
|
|||
it.ofs += sizeof(struct erofs_xattr_ibody_header);
|
||||
|
||||
for (i = 0; i < vi->xattr_shared_count; ++i) {
|
||||
if (it.ofs >= EROFS_BLKSIZ) {
|
||||
if (it.ofs >= sb->s_blocksize) {
|
||||
/* cannot be unaligned */
|
||||
DBG_BUGON(it.ofs != EROFS_BLKSIZ);
|
||||
DBG_BUGON(it.ofs != sb->s_blocksize);
|
||||
|
||||
it.kaddr = erofs_read_metabuf(&it.buf, sb, ++it.blkaddr,
|
||||
EROFS_KMAP);
|
||||
|
@ -139,15 +152,15 @@ struct xattr_iter_handlers {
|
|||
|
||||
static inline int xattr_iter_fixup(struct xattr_iter *it)
|
||||
{
|
||||
if (it->ofs < EROFS_BLKSIZ)
|
||||
if (it->ofs < it->sb->s_blocksize)
|
||||
return 0;
|
||||
|
||||
it->blkaddr += erofs_blknr(it->ofs);
|
||||
it->blkaddr += erofs_blknr(it->sb, it->ofs);
|
||||
it->kaddr = erofs_read_metabuf(&it->buf, it->sb, it->blkaddr,
|
||||
EROFS_KMAP);
|
||||
if (IS_ERR(it->kaddr))
|
||||
return PTR_ERR(it->kaddr);
|
||||
it->ofs = erofs_blkoff(it->ofs);
|
||||
it->ofs = erofs_blkoff(it->sb, it->ofs);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -157,7 +170,8 @@ static int inline_xattr_iter_begin(struct xattr_iter *it,
|
|||
struct erofs_inode *const vi = EROFS_I(inode);
|
||||
unsigned int xattr_header_sz, inline_xattr_ofs;
|
||||
|
||||
xattr_header_sz = inlinexattr_header_size(inode);
|
||||
xattr_header_sz = sizeof(struct erofs_xattr_ibody_header) +
|
||||
sizeof(u32) * vi->xattr_shared_count;
|
||||
if (xattr_header_sz >= vi->xattr_isize) {
|
||||
DBG_BUGON(xattr_header_sz > vi->xattr_isize);
|
||||
return -ENOATTR;
|
||||
|
@ -165,8 +179,8 @@ static int inline_xattr_iter_begin(struct xattr_iter *it,
|
|||
|
||||
inline_xattr_ofs = vi->inode_isize + xattr_header_sz;
|
||||
|
||||
it->blkaddr = erofs_blknr(erofs_iloc(inode) + inline_xattr_ofs);
|
||||
it->ofs = erofs_blkoff(erofs_iloc(inode) + inline_xattr_ofs);
|
||||
it->blkaddr = erofs_blknr(it->sb, erofs_iloc(inode) + inline_xattr_ofs);
|
||||
it->ofs = erofs_blkoff(it->sb, erofs_iloc(inode) + inline_xattr_ofs);
|
||||
it->kaddr = erofs_read_metabuf(&it->buf, inode->i_sb, it->blkaddr,
|
||||
EROFS_KMAP);
|
||||
if (IS_ERR(it->kaddr))
|
||||
|
@ -222,8 +236,8 @@ static int xattr_foreach(struct xattr_iter *it,
|
|||
processed = 0;
|
||||
|
||||
while (processed < entry.e_name_len) {
|
||||
if (it->ofs >= EROFS_BLKSIZ) {
|
||||
DBG_BUGON(it->ofs > EROFS_BLKSIZ);
|
||||
if (it->ofs >= it->sb->s_blocksize) {
|
||||
DBG_BUGON(it->ofs > it->sb->s_blocksize);
|
||||
|
||||
err = xattr_iter_fixup(it);
|
||||
if (err)
|
||||
|
@ -231,7 +245,7 @@ static int xattr_foreach(struct xattr_iter *it,
|
|||
it->ofs = 0;
|
||||
}
|
||||
|
||||
slice = min_t(unsigned int, EROFS_BLKSIZ - it->ofs,
|
||||
slice = min_t(unsigned int, it->sb->s_blocksize - it->ofs,
|
||||
entry.e_name_len - processed);
|
||||
|
||||
/* handle name */
|
||||
|
@ -257,8 +271,8 @@ static int xattr_foreach(struct xattr_iter *it,
|
|||
}
|
||||
|
||||
while (processed < value_sz) {
|
||||
if (it->ofs >= EROFS_BLKSIZ) {
|
||||
DBG_BUGON(it->ofs > EROFS_BLKSIZ);
|
||||
if (it->ofs >= it->sb->s_blocksize) {
|
||||
DBG_BUGON(it->ofs > it->sb->s_blocksize);
|
||||
|
||||
err = xattr_iter_fixup(it);
|
||||
if (err)
|
||||
|
@ -266,7 +280,7 @@ static int xattr_foreach(struct xattr_iter *it,
|
|||
it->ofs = 0;
|
||||
}
|
||||
|
||||
slice = min_t(unsigned int, EROFS_BLKSIZ - it->ofs,
|
||||
slice = min_t(unsigned int, it->sb->s_blocksize - it->ofs,
|
||||
value_sz - processed);
|
||||
op->value(it, processed, it->kaddr + it->ofs, slice);
|
||||
it->ofs += slice;
|
||||
|
@ -283,17 +297,45 @@ struct getxattr_iter {
|
|||
struct xattr_iter it;
|
||||
|
||||
char *buffer;
|
||||
int buffer_size, index;
|
||||
int buffer_size, index, infix_len;
|
||||
struct qstr name;
|
||||
};
|
||||
|
||||
static int erofs_xattr_long_entrymatch(struct getxattr_iter *it,
|
||||
struct erofs_xattr_entry *entry)
|
||||
{
|
||||
struct erofs_sb_info *sbi = EROFS_SB(it->it.sb);
|
||||
struct erofs_xattr_prefix_item *pf = sbi->xattr_prefixes +
|
||||
(entry->e_name_index & EROFS_XATTR_LONG_PREFIX_MASK);
|
||||
|
||||
if (pf >= sbi->xattr_prefixes + sbi->xattr_prefix_count)
|
||||
return -ENOATTR;
|
||||
|
||||
if (it->index != pf->prefix->base_index ||
|
||||
it->name.len != entry->e_name_len + pf->infix_len)
|
||||
return -ENOATTR;
|
||||
|
||||
if (memcmp(it->name.name, pf->prefix->infix, pf->infix_len))
|
||||
return -ENOATTR;
|
||||
|
||||
it->infix_len = pf->infix_len;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int xattr_entrymatch(struct xattr_iter *_it,
|
||||
struct erofs_xattr_entry *entry)
|
||||
{
|
||||
struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
|
||||
|
||||
return (it->index != entry->e_name_index ||
|
||||
it->name.len != entry->e_name_len) ? -ENOATTR : 0;
|
||||
/* should also match the infix for long name prefixes */
|
||||
if (entry->e_name_index & EROFS_XATTR_LONG_PREFIX)
|
||||
return erofs_xattr_long_entrymatch(it, entry);
|
||||
|
||||
if (it->index != entry->e_name_index ||
|
||||
it->name.len != entry->e_name_len)
|
||||
return -ENOATTR;
|
||||
it->infix_len = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int xattr_namematch(struct xattr_iter *_it,
|
||||
|
@ -301,7 +343,9 @@ static int xattr_namematch(struct xattr_iter *_it,
|
|||
{
|
||||
struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
|
||||
|
||||
return memcmp(buf, it->name.name + processed, len) ? -ENOATTR : 0;
|
||||
if (memcmp(buf, it->name.name + it->infix_len + processed, len))
|
||||
return -ENOATTR;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int xattr_checkbuffer(struct xattr_iter *_it,
|
||||
|
@ -351,21 +395,18 @@ static int inline_getxattr(struct inode *inode, struct getxattr_iter *it)
|
|||
static int shared_getxattr(struct inode *inode, struct getxattr_iter *it)
|
||||
{
|
||||
struct erofs_inode *const vi = EROFS_I(inode);
|
||||
struct super_block *const sb = inode->i_sb;
|
||||
struct erofs_sb_info *const sbi = EROFS_SB(sb);
|
||||
unsigned int i;
|
||||
struct super_block *const sb = it->it.sb;
|
||||
unsigned int i, xsid;
|
||||
int ret = -ENOATTR;
|
||||
|
||||
for (i = 0; i < vi->xattr_shared_count; ++i) {
|
||||
erofs_blk_t blkaddr =
|
||||
xattrblock_addr(sbi, vi->xattr_shared_xattrs[i]);
|
||||
|
||||
it->it.ofs = xattrblock_offset(sbi, vi->xattr_shared_xattrs[i]);
|
||||
it->it.kaddr = erofs_read_metabuf(&it->it.buf, sb, blkaddr,
|
||||
EROFS_KMAP);
|
||||
xsid = vi->xattr_shared_xattrs[i];
|
||||
it->it.blkaddr = erofs_xattr_blkaddr(sb, xsid);
|
||||
it->it.ofs = erofs_xattr_blkoff(sb, xsid);
|
||||
it->it.kaddr = erofs_read_metabuf(&it->it.buf, sb,
|
||||
it->it.blkaddr, EROFS_KMAP);
|
||||
if (IS_ERR(it->it.kaddr))
|
||||
return PTR_ERR(it->it.kaddr);
|
||||
it->it.blkaddr = blkaddr;
|
||||
|
||||
ret = xattr_foreach(&it->it, &find_xattr_handlers, NULL);
|
||||
if (ret != -ENOATTR)
|
||||
|
@ -394,7 +435,7 @@ int erofs_getxattr(struct inode *inode, int index,
|
|||
if (!name)
|
||||
return -EINVAL;
|
||||
|
||||
ret = init_inode_xattrs(inode);
|
||||
ret = erofs_init_inode_xattrs(inode);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -421,20 +462,9 @@ static int erofs_xattr_generic_get(const struct xattr_handler *handler,
|
|||
struct dentry *unused, struct inode *inode,
|
||||
const char *name, void *buffer, size_t size)
|
||||
{
|
||||
struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
|
||||
|
||||
switch (handler->flags) {
|
||||
case EROFS_XATTR_INDEX_USER:
|
||||
if (!test_opt(&sbi->opt, XATTR_USER))
|
||||
return -EOPNOTSUPP;
|
||||
break;
|
||||
case EROFS_XATTR_INDEX_TRUSTED:
|
||||
break;
|
||||
case EROFS_XATTR_INDEX_SECURITY:
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
if (handler->flags == EROFS_XATTR_INDEX_USER &&
|
||||
!test_opt(&EROFS_I_SB(inode)->opt, XATTR_USER))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
return erofs_getxattr(inode, handler->flags, name, buffer, size);
|
||||
}
|
||||
|
@ -483,25 +513,40 @@ static int xattr_entrylist(struct xattr_iter *_it,
|
|||
{
|
||||
struct listxattr_iter *it =
|
||||
container_of(_it, struct listxattr_iter, it);
|
||||
unsigned int prefix_len;
|
||||
const char *prefix;
|
||||
unsigned int base_index = entry->e_name_index;
|
||||
unsigned int prefix_len, infix_len = 0;
|
||||
const char *prefix, *infix = NULL;
|
||||
|
||||
prefix = erofs_xattr_prefix(entry->e_name_index, it->dentry);
|
||||
if (entry->e_name_index & EROFS_XATTR_LONG_PREFIX) {
|
||||
struct erofs_sb_info *sbi = EROFS_SB(_it->sb);
|
||||
struct erofs_xattr_prefix_item *pf = sbi->xattr_prefixes +
|
||||
(entry->e_name_index & EROFS_XATTR_LONG_PREFIX_MASK);
|
||||
|
||||
if (pf >= sbi->xattr_prefixes + sbi->xattr_prefix_count)
|
||||
return 1;
|
||||
infix = pf->prefix->infix;
|
||||
infix_len = pf->infix_len;
|
||||
base_index = pf->prefix->base_index;
|
||||
}
|
||||
|
||||
prefix = erofs_xattr_prefix(base_index, it->dentry);
|
||||
if (!prefix)
|
||||
return 1;
|
||||
prefix_len = strlen(prefix);
|
||||
|
||||
if (!it->buffer) {
|
||||
it->buffer_ofs += prefix_len + entry->e_name_len + 1;
|
||||
it->buffer_ofs += prefix_len + infix_len +
|
||||
entry->e_name_len + 1;
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (it->buffer_ofs + prefix_len
|
||||
if (it->buffer_ofs + prefix_len + infix_len +
|
||||
+ entry->e_name_len + 1 > it->buffer_size)
|
||||
return -ERANGE;
|
||||
|
||||
memcpy(it->buffer + it->buffer_ofs, prefix, prefix_len);
|
||||
it->buffer_ofs += prefix_len;
|
||||
memcpy(it->buffer + it->buffer_ofs + prefix_len, infix, infix_len);
|
||||
it->buffer_ofs += prefix_len + infix_len;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -555,21 +600,18 @@ static int shared_listxattr(struct listxattr_iter *it)
|
|||
{
|
||||
struct inode *const inode = d_inode(it->dentry);
|
||||
struct erofs_inode *const vi = EROFS_I(inode);
|
||||
struct super_block *const sb = inode->i_sb;
|
||||
struct erofs_sb_info *const sbi = EROFS_SB(sb);
|
||||
unsigned int i;
|
||||
struct super_block *const sb = it->it.sb;
|
||||
unsigned int i, xsid;
|
||||
int ret = 0;
|
||||
|
||||
for (i = 0; i < vi->xattr_shared_count; ++i) {
|
||||
erofs_blk_t blkaddr =
|
||||
xattrblock_addr(sbi, vi->xattr_shared_xattrs[i]);
|
||||
|
||||
it->it.ofs = xattrblock_offset(sbi, vi->xattr_shared_xattrs[i]);
|
||||
it->it.kaddr = erofs_read_metabuf(&it->it.buf, sb, blkaddr,
|
||||
EROFS_KMAP);
|
||||
xsid = vi->xattr_shared_xattrs[i];
|
||||
it->it.blkaddr = erofs_xattr_blkaddr(sb, xsid);
|
||||
it->it.ofs = erofs_xattr_blkoff(sb, xsid);
|
||||
it->it.kaddr = erofs_read_metabuf(&it->it.buf, sb,
|
||||
it->it.blkaddr, EROFS_KMAP);
|
||||
if (IS_ERR(it->it.kaddr))
|
||||
return PTR_ERR(it->it.kaddr);
|
||||
it->it.blkaddr = blkaddr;
|
||||
|
||||
ret = xattr_foreach(&it->it, &list_xattr_handlers, NULL);
|
||||
if (ret)
|
||||
|
@ -584,7 +626,7 @@ ssize_t erofs_listxattr(struct dentry *dentry,
|
|||
int ret;
|
||||
struct listxattr_iter it;
|
||||
|
||||
ret = init_inode_xattrs(d_inode(dentry));
|
||||
ret = erofs_init_inode_xattrs(d_inode(dentry));
|
||||
if (ret == -ENOATTR)
|
||||
return 0;
|
||||
if (ret)
|
||||
|
@ -605,6 +647,62 @@ ssize_t erofs_listxattr(struct dentry *dentry,
|
|||
return ret;
|
||||
}
|
||||
|
||||
void erofs_xattr_prefixes_cleanup(struct super_block *sb)
|
||||
{
|
||||
struct erofs_sb_info *sbi = EROFS_SB(sb);
|
||||
int i;
|
||||
|
||||
if (sbi->xattr_prefixes) {
|
||||
for (i = 0; i < sbi->xattr_prefix_count; i++)
|
||||
kfree(sbi->xattr_prefixes[i].prefix);
|
||||
kfree(sbi->xattr_prefixes);
|
||||
sbi->xattr_prefixes = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
int erofs_xattr_prefixes_init(struct super_block *sb)
|
||||
{
|
||||
struct erofs_sb_info *sbi = EROFS_SB(sb);
|
||||
struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
|
||||
erofs_off_t pos = (erofs_off_t)sbi->xattr_prefix_start << 2;
|
||||
struct erofs_xattr_prefix_item *pfs;
|
||||
int ret = 0, i, len;
|
||||
|
||||
if (!sbi->xattr_prefix_count)
|
||||
return 0;
|
||||
|
||||
pfs = kzalloc(sbi->xattr_prefix_count * sizeof(*pfs), GFP_KERNEL);
|
||||
if (!pfs)
|
||||
return -ENOMEM;
|
||||
|
||||
if (erofs_sb_has_fragments(sbi))
|
||||
buf.inode = sbi->packed_inode;
|
||||
else
|
||||
erofs_init_metabuf(&buf, sb);
|
||||
|
||||
for (i = 0; i < sbi->xattr_prefix_count; i++) {
|
||||
void *ptr = erofs_read_metadata(sb, &buf, &pos, &len);
|
||||
|
||||
if (IS_ERR(ptr)) {
|
||||
ret = PTR_ERR(ptr);
|
||||
break;
|
||||
} else if (len < sizeof(*pfs->prefix) ||
|
||||
len > EROFS_NAME_LEN + sizeof(*pfs->prefix)) {
|
||||
kfree(ptr);
|
||||
ret = -EFSCORRUPTED;
|
||||
break;
|
||||
}
|
||||
pfs[i].prefix = ptr;
|
||||
pfs[i].infix_len = len - sizeof(struct erofs_xattr_long_prefix);
|
||||
}
|
||||
|
||||
erofs_put_metabuf(&buf);
|
||||
sbi->xattr_prefixes = pfs;
|
||||
if (ret)
|
||||
erofs_xattr_prefixes_cleanup(sb);
|
||||
return ret;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_EROFS_FS_POSIX_ACL
|
||||
struct posix_acl *erofs_get_acl(struct inode *inode, int type, bool rcu)
|
||||
{
|
||||
|
|
|
@ -13,29 +13,6 @@
|
|||
/* Attribute not found */
|
||||
#define ENOATTR ENODATA
|
||||
|
||||
static inline unsigned int inlinexattr_header_size(struct inode *inode)
|
||||
{
|
||||
return sizeof(struct erofs_xattr_ibody_header) +
|
||||
sizeof(u32) * EROFS_I(inode)->xattr_shared_count;
|
||||
}
|
||||
|
||||
static inline erofs_blk_t xattrblock_addr(struct erofs_sb_info *sbi,
|
||||
unsigned int xattr_id)
|
||||
{
|
||||
#ifdef CONFIG_EROFS_FS_XATTR
|
||||
return sbi->xattr_blkaddr +
|
||||
xattr_id * sizeof(__u32) / EROFS_BLKSIZ;
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline unsigned int xattrblock_offset(struct erofs_sb_info *sbi,
|
||||
unsigned int xattr_id)
|
||||
{
|
||||
return (xattr_id * sizeof(__u32)) % EROFS_BLKSIZ;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_EROFS_FS_XATTR
|
||||
extern const struct xattr_handler erofs_xattr_user_handler;
|
||||
extern const struct xattr_handler erofs_xattr_trusted_handler;
|
||||
|
@ -69,9 +46,13 @@ static inline const char *erofs_xattr_prefix(unsigned int idx,
|
|||
|
||||
extern const struct xattr_handler *erofs_xattr_handlers[];
|
||||
|
||||
int erofs_xattr_prefixes_init(struct super_block *sb);
|
||||
void erofs_xattr_prefixes_cleanup(struct super_block *sb);
|
||||
int erofs_getxattr(struct inode *, int, const char *, void *, size_t);
|
||||
ssize_t erofs_listxattr(struct dentry *, char *, size_t);
|
||||
#else
|
||||
static inline int erofs_xattr_prefixes_init(struct super_block *sb) { return 0; }
|
||||
static inline void erofs_xattr_prefixes_cleanup(struct super_block *sb) {}
|
||||
static inline int erofs_getxattr(struct inode *inode, int index,
|
||||
const char *name, void *buffer,
|
||||
size_t buffer_size)
|
||||
|
|
|
@ -807,7 +807,7 @@ static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe)
|
|||
|
||||
if (ztailpacking) {
|
||||
pcl->obj.index = 0; /* which indicates ztailpacking */
|
||||
pcl->pageofs_in = erofs_blkoff(map->m_pa);
|
||||
pcl->pageofs_in = erofs_blkoff(fe->inode->i_sb, map->m_pa);
|
||||
pcl->tailpacking_size = map->m_plen;
|
||||
} else {
|
||||
pcl->obj.index = map->m_pa >> PAGE_SHIFT;
|
||||
|
@ -930,6 +930,7 @@ static int z_erofs_read_fragment(struct inode *inode, erofs_off_t pos,
|
|||
struct page *page, unsigned int pageofs,
|
||||
unsigned int len)
|
||||
{
|
||||
struct super_block *sb = inode->i_sb;
|
||||
struct inode *packed_inode = EROFS_I_SB(inode)->packed_inode;
|
||||
struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
|
||||
u8 *src, *dst;
|
||||
|
@ -938,19 +939,19 @@ static int z_erofs_read_fragment(struct inode *inode, erofs_off_t pos,
|
|||
if (!packed_inode)
|
||||
return -EFSCORRUPTED;
|
||||
|
||||
buf.inode = packed_inode;
|
||||
pos += EROFS_I(inode)->z_fragmentoff;
|
||||
for (i = 0; i < len; i += cnt) {
|
||||
cnt = min_t(unsigned int, len - i,
|
||||
EROFS_BLKSIZ - erofs_blkoff(pos));
|
||||
src = erofs_bread(&buf, packed_inode,
|
||||
erofs_blknr(pos), EROFS_KMAP);
|
||||
sb->s_blocksize - erofs_blkoff(sb, pos));
|
||||
src = erofs_bread(&buf, erofs_blknr(sb, pos), EROFS_KMAP);
|
||||
if (IS_ERR(src)) {
|
||||
erofs_put_metabuf(&buf);
|
||||
return PTR_ERR(src);
|
||||
}
|
||||
|
||||
dst = kmap_local_page(page);
|
||||
memcpy(dst + pageofs + i, src + erofs_blkoff(pos), cnt);
|
||||
memcpy(dst + pageofs + i, src + erofs_blkoff(sb, pos), cnt);
|
||||
kunmap_local(dst);
|
||||
pos += cnt;
|
||||
}
|
||||
|
@ -978,8 +979,6 @@ repeat:
|
|||
|
||||
if (offset + cur < map->m_la ||
|
||||
offset + cur >= map->m_la + map->m_llen) {
|
||||
erofs_dbg("out-of-range map @ pos %llu", offset + cur);
|
||||
|
||||
if (z_erofs_collector_end(fe))
|
||||
fe->backmost = false;
|
||||
map->m_la = offset + cur;
|
||||
|
@ -1005,7 +1004,8 @@ repeat:
|
|||
void *mp;
|
||||
|
||||
mp = erofs_read_metabuf(&fe->map.buf, inode->i_sb,
|
||||
erofs_blknr(map->m_pa), EROFS_NO_KMAP);
|
||||
erofs_blknr(inode->i_sb, map->m_pa),
|
||||
EROFS_NO_KMAP);
|
||||
if (IS_ERR(mp)) {
|
||||
err = PTR_ERR(mp);
|
||||
erofs_err(inode->i_sb,
|
||||
|
@ -1103,9 +1103,6 @@ out:
|
|||
if (err)
|
||||
z_erofs_page_mark_eio(page);
|
||||
z_erofs_onlinepage_endio(page);
|
||||
|
||||
erofs_dbg("%s, finish page: %pK spiltted: %u map->m_llen %llu",
|
||||
__func__, page, spiltted, map->m_llen);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -1726,11 +1723,11 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f,
|
|||
|
||||
/* no device id here, thus it will always succeed */
|
||||
mdev = (struct erofs_map_dev) {
|
||||
.m_pa = blknr_to_addr(pcl->obj.index),
|
||||
.m_pa = erofs_pos(sb, pcl->obj.index),
|
||||
};
|
||||
(void)erofs_map_dev(sb, &mdev);
|
||||
|
||||
cur = erofs_blknr(mdev.m_pa);
|
||||
cur = erofs_blknr(sb, mdev.m_pa);
|
||||
end = cur + pcl->pclusterpages;
|
||||
|
||||
do {
|
||||
|
@ -1764,7 +1761,7 @@ submit_bio_retry:
|
|||
|
||||
last_bdev = mdev.m_bdev;
|
||||
bio->bi_iter.bi_sector = (sector_t)cur <<
|
||||
LOG_SECTORS_PER_BLOCK;
|
||||
(sb->s_blocksize_bits - 9);
|
||||
bio->bi_private = q[JQ_SUBMIT];
|
||||
if (f->readahead)
|
||||
bio->bi_opf |= REQ_RAHEAD;
|
||||
|
|
166
fs/erofs/zmap.c
166
fs/erofs/zmap.c
|
@ -7,24 +7,6 @@
|
|||
#include <asm/unaligned.h>
|
||||
#include <trace/events/erofs.h>
|
||||
|
||||
int z_erofs_fill_inode(struct inode *inode)
|
||||
{
|
||||
struct erofs_inode *const vi = EROFS_I(inode);
|
||||
struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb);
|
||||
|
||||
if (!erofs_sb_has_big_pcluster(sbi) &&
|
||||
!erofs_sb_has_ztailpacking(sbi) && !erofs_sb_has_fragments(sbi) &&
|
||||
vi->datalayout == EROFS_INODE_FLAT_COMPRESSION_LEGACY) {
|
||||
vi->z_advise = 0;
|
||||
vi->z_algorithmtype[0] = 0;
|
||||
vi->z_algorithmtype[1] = 0;
|
||||
vi->z_logical_clusterbits = LOG_BLOCK_SIZE;
|
||||
set_bit(EROFS_I_Z_INITED_BIT, &vi->flags);
|
||||
}
|
||||
inode->i_mapping->a_ops = &z_erofs_aops;
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct z_erofs_maprecorder {
|
||||
struct inode *inode;
|
||||
struct erofs_map_blocks *map;
|
||||
|
@ -45,47 +27,50 @@ static int legacy_load_cluster_from_disk(struct z_erofs_maprecorder *m,
|
|||
{
|
||||
struct inode *const inode = m->inode;
|
||||
struct erofs_inode *const vi = EROFS_I(inode);
|
||||
const erofs_off_t pos =
|
||||
Z_EROFS_VLE_LEGACY_INDEX_ALIGN(erofs_iloc(inode) +
|
||||
vi->inode_isize + vi->xattr_isize) +
|
||||
lcn * sizeof(struct z_erofs_vle_decompressed_index);
|
||||
struct z_erofs_vle_decompressed_index *di;
|
||||
const erofs_off_t pos = Z_EROFS_FULL_INDEX_ALIGN(erofs_iloc(inode) +
|
||||
vi->inode_isize + vi->xattr_isize) +
|
||||
lcn * sizeof(struct z_erofs_lcluster_index);
|
||||
struct z_erofs_lcluster_index *di;
|
||||
unsigned int advise, type;
|
||||
|
||||
m->kaddr = erofs_read_metabuf(&m->map->buf, inode->i_sb,
|
||||
erofs_blknr(pos), EROFS_KMAP);
|
||||
erofs_blknr(inode->i_sb, pos), EROFS_KMAP);
|
||||
if (IS_ERR(m->kaddr))
|
||||
return PTR_ERR(m->kaddr);
|
||||
|
||||
m->nextpackoff = pos + sizeof(struct z_erofs_vle_decompressed_index);
|
||||
m->nextpackoff = pos + sizeof(struct z_erofs_lcluster_index);
|
||||
m->lcn = lcn;
|
||||
di = m->kaddr + erofs_blkoff(pos);
|
||||
di = m->kaddr + erofs_blkoff(inode->i_sb, pos);
|
||||
|
||||
advise = le16_to_cpu(di->di_advise);
|
||||
type = (advise >> Z_EROFS_VLE_DI_CLUSTER_TYPE_BIT) &
|
||||
((1 << Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS) - 1);
|
||||
type = (advise >> Z_EROFS_LI_LCLUSTER_TYPE_BIT) &
|
||||
((1 << Z_EROFS_LI_LCLUSTER_TYPE_BITS) - 1);
|
||||
switch (type) {
|
||||
case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
|
||||
case Z_EROFS_LCLUSTER_TYPE_NONHEAD:
|
||||
m->clusterofs = 1 << vi->z_logical_clusterbits;
|
||||
m->delta[0] = le16_to_cpu(di->di_u.delta[0]);
|
||||
if (m->delta[0] & Z_EROFS_VLE_DI_D0_CBLKCNT) {
|
||||
if (m->delta[0] & Z_EROFS_LI_D0_CBLKCNT) {
|
||||
if (!(vi->z_advise & (Z_EROFS_ADVISE_BIG_PCLUSTER_1 |
|
||||
Z_EROFS_ADVISE_BIG_PCLUSTER_2))) {
|
||||
DBG_BUGON(1);
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
m->compressedblks = m->delta[0] &
|
||||
~Z_EROFS_VLE_DI_D0_CBLKCNT;
|
||||
~Z_EROFS_LI_D0_CBLKCNT;
|
||||
m->delta[0] = 1;
|
||||
}
|
||||
m->delta[1] = le16_to_cpu(di->di_u.delta[1]);
|
||||
break;
|
||||
case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
|
||||
case Z_EROFS_VLE_CLUSTER_TYPE_HEAD1:
|
||||
case Z_EROFS_VLE_CLUSTER_TYPE_HEAD2:
|
||||
if (advise & Z_EROFS_VLE_DI_PARTIAL_REF)
|
||||
case Z_EROFS_LCLUSTER_TYPE_PLAIN:
|
||||
case Z_EROFS_LCLUSTER_TYPE_HEAD1:
|
||||
case Z_EROFS_LCLUSTER_TYPE_HEAD2:
|
||||
if (advise & Z_EROFS_LI_PARTIAL_REF)
|
||||
m->partialref = true;
|
||||
m->clusterofs = le16_to_cpu(di->di_clusterofs);
|
||||
if (m->clusterofs >= 1 << vi->z_logical_clusterbits) {
|
||||
DBG_BUGON(1);
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
m->pblk = le32_to_cpu(di->di_u.blkaddr);
|
||||
break;
|
||||
default:
|
||||
|
@ -121,13 +106,13 @@ static int get_compacted_la_distance(unsigned int lclusterbits,
|
|||
lo = decode_compactedbits(lclusterbits, lomask,
|
||||
in, encodebits * i, &type);
|
||||
|
||||
if (type != Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD)
|
||||
if (type != Z_EROFS_LCLUSTER_TYPE_NONHEAD)
|
||||
return d1;
|
||||
++d1;
|
||||
} while (++i < vcnt);
|
||||
|
||||
/* vcnt - 1 (Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) item */
|
||||
if (!(lo & Z_EROFS_VLE_DI_D0_CBLKCNT))
|
||||
/* vcnt - 1 (Z_EROFS_LCLUSTER_TYPE_NONHEAD) item */
|
||||
if (!(lo & Z_EROFS_LI_D0_CBLKCNT))
|
||||
d1 += lo - 1;
|
||||
return d1;
|
||||
}
|
||||
|
@ -156,7 +141,7 @@ static int unpack_compacted_index(struct z_erofs_maprecorder *m,
|
|||
(vcnt << amortizedshift);
|
||||
big_pcluster = vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1;
|
||||
encodebits = ((vcnt << amortizedshift) - sizeof(__le32)) * 8 / vcnt;
|
||||
eofs = erofs_blkoff(pos);
|
||||
eofs = erofs_blkoff(m->inode->i_sb, pos);
|
||||
base = round_down(eofs, vcnt << amortizedshift);
|
||||
in = m->kaddr + base;
|
||||
|
||||
|
@ -165,19 +150,19 @@ static int unpack_compacted_index(struct z_erofs_maprecorder *m,
|
|||
lo = decode_compactedbits(lclusterbits, lomask,
|
||||
in, encodebits * i, &type);
|
||||
m->type = type;
|
||||
if (type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) {
|
||||
if (type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) {
|
||||
m->clusterofs = 1 << lclusterbits;
|
||||
|
||||
/* figure out lookahead_distance: delta[1] if needed */
|
||||
if (lookahead)
|
||||
m->delta[1] = get_compacted_la_distance(lclusterbits,
|
||||
encodebits, vcnt, in, i);
|
||||
if (lo & Z_EROFS_VLE_DI_D0_CBLKCNT) {
|
||||
if (lo & Z_EROFS_LI_D0_CBLKCNT) {
|
||||
if (!big_pcluster) {
|
||||
DBG_BUGON(1);
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
m->compressedblks = lo & ~Z_EROFS_VLE_DI_D0_CBLKCNT;
|
||||
m->compressedblks = lo & ~Z_EROFS_LI_D0_CBLKCNT;
|
||||
m->delta[0] = 1;
|
||||
return 0;
|
||||
} else if (i + 1 != (int)vcnt) {
|
||||
|
@ -191,9 +176,9 @@ static int unpack_compacted_index(struct z_erofs_maprecorder *m,
|
|||
*/
|
||||
lo = decode_compactedbits(lclusterbits, lomask,
|
||||
in, encodebits * (i - 1), &type);
|
||||
if (type != Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD)
|
||||
if (type != Z_EROFS_LCLUSTER_TYPE_NONHEAD)
|
||||
lo = 0;
|
||||
else if (lo & Z_EROFS_VLE_DI_D0_CBLKCNT)
|
||||
else if (lo & Z_EROFS_LI_D0_CBLKCNT)
|
||||
lo = 1;
|
||||
m->delta[0] = lo + 1;
|
||||
return 0;
|
||||
|
@ -207,7 +192,7 @@ static int unpack_compacted_index(struct z_erofs_maprecorder *m,
|
|||
--i;
|
||||
lo = decode_compactedbits(lclusterbits, lomask,
|
||||
in, encodebits * i, &type);
|
||||
if (type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD)
|
||||
if (type == Z_EROFS_LCLUSTER_TYPE_NONHEAD)
|
||||
i -= lo;
|
||||
|
||||
if (i >= 0)
|
||||
|
@ -219,10 +204,10 @@ static int unpack_compacted_index(struct z_erofs_maprecorder *m,
|
|||
--i;
|
||||
lo = decode_compactedbits(lclusterbits, lomask,
|
||||
in, encodebits * i, &type);
|
||||
if (type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) {
|
||||
if (lo & Z_EROFS_VLE_DI_D0_CBLKCNT) {
|
||||
if (type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) {
|
||||
if (lo & Z_EROFS_LI_D0_CBLKCNT) {
|
||||
--i;
|
||||
nblk += lo & ~Z_EROFS_VLE_DI_D0_CBLKCNT;
|
||||
nblk += lo & ~Z_EROFS_LI_D0_CBLKCNT;
|
||||
continue;
|
||||
}
|
||||
/* bigpcluster shouldn't have plain d0 == 1 */
|
||||
|
@ -249,7 +234,7 @@ static int compacted_load_cluster_from_disk(struct z_erofs_maprecorder *m,
|
|||
const unsigned int lclusterbits = vi->z_logical_clusterbits;
|
||||
const erofs_off_t ebase = sizeof(struct z_erofs_map_header) +
|
||||
ALIGN(erofs_iloc(inode) + vi->inode_isize + vi->xattr_isize, 8);
|
||||
const unsigned int totalidx = DIV_ROUND_UP(inode->i_size, EROFS_BLKSIZ);
|
||||
unsigned int totalidx = erofs_iblks(inode);
|
||||
unsigned int compacted_4b_initial, compacted_2b;
|
||||
unsigned int amortizedshift;
|
||||
erofs_off_t pos;
|
||||
|
@ -290,7 +275,7 @@ static int compacted_load_cluster_from_disk(struct z_erofs_maprecorder *m,
|
|||
out:
|
||||
pos += lcn * (1 << amortizedshift);
|
||||
m->kaddr = erofs_read_metabuf(&m->map->buf, inode->i_sb,
|
||||
erofs_blknr(pos), EROFS_KMAP);
|
||||
erofs_blknr(inode->i_sb, pos), EROFS_KMAP);
|
||||
if (IS_ERR(m->kaddr))
|
||||
return PTR_ERR(m->kaddr);
|
||||
return unpack_compacted_index(m, amortizedshift, pos, lookahead);
|
||||
|
@ -301,10 +286,10 @@ static int z_erofs_load_cluster_from_disk(struct z_erofs_maprecorder *m,
|
|||
{
|
||||
const unsigned int datamode = EROFS_I(m->inode)->datalayout;
|
||||
|
||||
if (datamode == EROFS_INODE_FLAT_COMPRESSION_LEGACY)
|
||||
if (datamode == EROFS_INODE_COMPRESSED_FULL)
|
||||
return legacy_load_cluster_from_disk(m, lcn);
|
||||
|
||||
if (datamode == EROFS_INODE_FLAT_COMPRESSION)
|
||||
if (datamode == EROFS_INODE_COMPRESSED_COMPACT)
|
||||
return compacted_load_cluster_from_disk(m, lcn, lookahead);
|
||||
|
||||
return -EINVAL;
|
||||
|
@ -326,7 +311,7 @@ static int z_erofs_extent_lookback(struct z_erofs_maprecorder *m,
|
|||
return err;
|
||||
|
||||
switch (m->type) {
|
||||
case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
|
||||
case Z_EROFS_LCLUSTER_TYPE_NONHEAD:
|
||||
if (!m->delta[0]) {
|
||||
erofs_err(m->inode->i_sb,
|
||||
"invalid lookback distance 0 @ nid %llu",
|
||||
|
@ -336,9 +321,9 @@ static int z_erofs_extent_lookback(struct z_erofs_maprecorder *m,
|
|||
}
|
||||
lookback_distance = m->delta[0];
|
||||
continue;
|
||||
case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
|
||||
case Z_EROFS_VLE_CLUSTER_TYPE_HEAD1:
|
||||
case Z_EROFS_VLE_CLUSTER_TYPE_HEAD2:
|
||||
case Z_EROFS_LCLUSTER_TYPE_PLAIN:
|
||||
case Z_EROFS_LCLUSTER_TYPE_HEAD1:
|
||||
case Z_EROFS_LCLUSTER_TYPE_HEAD2:
|
||||
m->headtype = m->type;
|
||||
m->map->m_la = (lcn << lclusterbits) | m->clusterofs;
|
||||
return 0;
|
||||
|
@ -360,21 +345,22 @@ static int z_erofs_extent_lookback(struct z_erofs_maprecorder *m,
|
|||
static int z_erofs_get_extent_compressedlen(struct z_erofs_maprecorder *m,
|
||||
unsigned int initial_lcn)
|
||||
{
|
||||
struct super_block *sb = m->inode->i_sb;
|
||||
struct erofs_inode *const vi = EROFS_I(m->inode);
|
||||
struct erofs_map_blocks *const map = m->map;
|
||||
const unsigned int lclusterbits = vi->z_logical_clusterbits;
|
||||
unsigned long lcn;
|
||||
int err;
|
||||
|
||||
DBG_BUGON(m->type != Z_EROFS_VLE_CLUSTER_TYPE_PLAIN &&
|
||||
m->type != Z_EROFS_VLE_CLUSTER_TYPE_HEAD1 &&
|
||||
m->type != Z_EROFS_VLE_CLUSTER_TYPE_HEAD2);
|
||||
DBG_BUGON(m->type != Z_EROFS_LCLUSTER_TYPE_PLAIN &&
|
||||
m->type != Z_EROFS_LCLUSTER_TYPE_HEAD1 &&
|
||||
m->type != Z_EROFS_LCLUSTER_TYPE_HEAD2);
|
||||
DBG_BUGON(m->type != m->headtype);
|
||||
|
||||
if (m->headtype == Z_EROFS_VLE_CLUSTER_TYPE_PLAIN ||
|
||||
((m->headtype == Z_EROFS_VLE_CLUSTER_TYPE_HEAD1) &&
|
||||
if (m->headtype == Z_EROFS_LCLUSTER_TYPE_PLAIN ||
|
||||
((m->headtype == Z_EROFS_LCLUSTER_TYPE_HEAD1) &&
|
||||
!(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1)) ||
|
||||
((m->headtype == Z_EROFS_VLE_CLUSTER_TYPE_HEAD2) &&
|
||||
((m->headtype == Z_EROFS_LCLUSTER_TYPE_HEAD2) &&
|
||||
!(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_2))) {
|
||||
map->m_plen = 1ULL << lclusterbits;
|
||||
return 0;
|
||||
|
@ -396,19 +382,19 @@ static int z_erofs_get_extent_compressedlen(struct z_erofs_maprecorder *m,
|
|||
* BUG_ON in the debugging mode only for developers to notice that.
|
||||
*/
|
||||
DBG_BUGON(lcn == initial_lcn &&
|
||||
m->type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD);
|
||||
m->type == Z_EROFS_LCLUSTER_TYPE_NONHEAD);
|
||||
|
||||
switch (m->type) {
|
||||
case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
|
||||
case Z_EROFS_VLE_CLUSTER_TYPE_HEAD1:
|
||||
case Z_EROFS_VLE_CLUSTER_TYPE_HEAD2:
|
||||
case Z_EROFS_LCLUSTER_TYPE_PLAIN:
|
||||
case Z_EROFS_LCLUSTER_TYPE_HEAD1:
|
||||
case Z_EROFS_LCLUSTER_TYPE_HEAD2:
|
||||
/*
|
||||
* if the 1st NONHEAD lcluster is actually PLAIN or HEAD type
|
||||
* rather than CBLKCNT, it's a 1 lcluster-sized pcluster.
|
||||
*/
|
||||
m->compressedblks = 1 << (lclusterbits - LOG_BLOCK_SIZE);
|
||||
m->compressedblks = 1 << (lclusterbits - sb->s_blocksize_bits);
|
||||
break;
|
||||
case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
|
||||
case Z_EROFS_LCLUSTER_TYPE_NONHEAD:
|
||||
if (m->delta[0] != 1)
|
||||
goto err_bonus_cblkcnt;
|
||||
if (m->compressedblks)
|
||||
|
@ -422,7 +408,7 @@ static int z_erofs_get_extent_compressedlen(struct z_erofs_maprecorder *m,
|
|||
return -EFSCORRUPTED;
|
||||
}
|
||||
out:
|
||||
map->m_plen = (u64)m->compressedblks << LOG_BLOCK_SIZE;
|
||||
map->m_plen = erofs_pos(sb, m->compressedblks);
|
||||
return 0;
|
||||
err_bonus_cblkcnt:
|
||||
erofs_err(m->inode->i_sb,
|
||||
|
@ -452,12 +438,12 @@ static int z_erofs_get_extent_decompressedlen(struct z_erofs_maprecorder *m)
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
if (m->type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) {
|
||||
if (m->type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) {
|
||||
DBG_BUGON(!m->delta[1] &&
|
||||
m->clusterofs != 1 << lclusterbits);
|
||||
} else if (m->type == Z_EROFS_VLE_CLUSTER_TYPE_PLAIN ||
|
||||
m->type == Z_EROFS_VLE_CLUSTER_TYPE_HEAD1 ||
|
||||
m->type == Z_EROFS_VLE_CLUSTER_TYPE_HEAD2) {
|
||||
} else if (m->type == Z_EROFS_LCLUSTER_TYPE_PLAIN ||
|
||||
m->type == Z_EROFS_LCLUSTER_TYPE_HEAD1 ||
|
||||
m->type == Z_EROFS_LCLUSTER_TYPE_HEAD2) {
|
||||
/* go on until the next HEAD lcluster */
|
||||
if (lcn != headlcn)
|
||||
break;
|
||||
|
@ -476,8 +462,7 @@ static int z_erofs_get_extent_decompressedlen(struct z_erofs_maprecorder *m)
|
|||
}
|
||||
|
||||
static int z_erofs_do_map_blocks(struct inode *inode,
|
||||
struct erofs_map_blocks *map,
|
||||
int flags)
|
||||
struct erofs_map_blocks *map, int flags)
|
||||
{
|
||||
struct erofs_inode *const vi = EROFS_I(inode);
|
||||
bool ztailpacking = vi->z_advise & Z_EROFS_ADVISE_INLINE_PCLUSTER;
|
||||
|
@ -507,9 +492,9 @@ static int z_erofs_do_map_blocks(struct inode *inode,
|
|||
end = (m.lcn + 1ULL) << lclusterbits;
|
||||
|
||||
switch (m.type) {
|
||||
case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
|
||||
case Z_EROFS_VLE_CLUSTER_TYPE_HEAD1:
|
||||
case Z_EROFS_VLE_CLUSTER_TYPE_HEAD2:
|
||||
case Z_EROFS_LCLUSTER_TYPE_PLAIN:
|
||||
case Z_EROFS_LCLUSTER_TYPE_HEAD1:
|
||||
case Z_EROFS_LCLUSTER_TYPE_HEAD2:
|
||||
if (endoff >= m.clusterofs) {
|
||||
m.headtype = m.type;
|
||||
map->m_la = (m.lcn << lclusterbits) | m.clusterofs;
|
||||
|
@ -534,7 +519,7 @@ static int z_erofs_do_map_blocks(struct inode *inode,
|
|||
map->m_flags |= EROFS_MAP_FULL_MAPPED;
|
||||
m.delta[0] = 1;
|
||||
fallthrough;
|
||||
case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
|
||||
case Z_EROFS_LCLUSTER_TYPE_NONHEAD:
|
||||
/* get the corresponding first chunk */
|
||||
err = z_erofs_extent_lookback(&m, m.delta[0]);
|
||||
if (err)
|
||||
|
@ -555,7 +540,7 @@ static int z_erofs_do_map_blocks(struct inode *inode,
|
|||
vi->z_tailextent_headlcn = m.lcn;
|
||||
/* for non-compact indexes, fragmentoff is 64 bits */
|
||||
if (fragment &&
|
||||
vi->datalayout == EROFS_INODE_FLAT_COMPRESSION_LEGACY)
|
||||
vi->datalayout == EROFS_INODE_COMPRESSED_FULL)
|
||||
vi->z_fragmentoff |= (u64)m.pblk << 32;
|
||||
}
|
||||
if (ztailpacking && m.lcn == vi->z_tailextent_headlcn) {
|
||||
|
@ -565,13 +550,13 @@ static int z_erofs_do_map_blocks(struct inode *inode,
|
|||
} else if (fragment && m.lcn == vi->z_tailextent_headlcn) {
|
||||
map->m_flags |= EROFS_MAP_FRAGMENT;
|
||||
} else {
|
||||
map->m_pa = blknr_to_addr(m.pblk);
|
||||
map->m_pa = erofs_pos(inode->i_sb, m.pblk);
|
||||
err = z_erofs_get_extent_compressedlen(&m, initial_lcn);
|
||||
if (err)
|
||||
goto unmap_out;
|
||||
}
|
||||
|
||||
if (m.headtype == Z_EROFS_VLE_CLUSTER_TYPE_PLAIN) {
|
||||
if (m.headtype == Z_EROFS_LCLUSTER_TYPE_PLAIN) {
|
||||
if (map->m_llen > map->m_plen) {
|
||||
DBG_BUGON(1);
|
||||
err = -EFSCORRUPTED;
|
||||
|
@ -583,7 +568,7 @@ static int z_erofs_do_map_blocks(struct inode *inode,
|
|||
else
|
||||
map->m_algorithmformat =
|
||||
Z_EROFS_COMPRESSION_SHIFTED;
|
||||
} else if (m.headtype == Z_EROFS_VLE_CLUSTER_TYPE_HEAD2) {
|
||||
} else if (m.headtype == Z_EROFS_LCLUSTER_TYPE_HEAD2) {
|
||||
map->m_algorithmformat = vi->z_algorithmtype[1];
|
||||
} else {
|
||||
map->m_algorithmformat = vi->z_algorithmtype[0];
|
||||
|
@ -592,7 +577,7 @@ static int z_erofs_do_map_blocks(struct inode *inode,
|
|||
if ((flags & EROFS_GET_BLOCKS_FIEMAP) ||
|
||||
((flags & EROFS_GET_BLOCKS_READMORE) &&
|
||||
map->m_algorithmformat == Z_EROFS_COMPRESSION_LZMA &&
|
||||
map->m_llen >= EROFS_BLKSIZ)) {
|
||||
map->m_llen >= i_blocksize(inode))) {
|
||||
err = z_erofs_get_extent_decompressedlen(&m);
|
||||
if (!err)
|
||||
map->m_flags |= EROFS_MAP_FULL_MAPPED;
|
||||
|
@ -600,9 +585,6 @@ static int z_erofs_do_map_blocks(struct inode *inode,
|
|||
|
||||
unmap_out:
|
||||
erofs_unmap_metabuf(&m.map->buf);
|
||||
erofs_dbg("%s, m_la %llu m_pa %llu m_llen %llu m_plen %llu m_flags 0%o",
|
||||
__func__, map->m_la, map->m_pa,
|
||||
map->m_llen, map->m_plen, map->m_flags);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -633,13 +615,13 @@ static int z_erofs_fill_inode_lazy(struct inode *inode)
|
|||
goto out_unlock;
|
||||
|
||||
pos = ALIGN(erofs_iloc(inode) + vi->inode_isize + vi->xattr_isize, 8);
|
||||
kaddr = erofs_read_metabuf(&buf, sb, erofs_blknr(pos), EROFS_KMAP);
|
||||
kaddr = erofs_read_metabuf(&buf, sb, erofs_blknr(sb, pos), EROFS_KMAP);
|
||||
if (IS_ERR(kaddr)) {
|
||||
err = PTR_ERR(kaddr);
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
h = kaddr + erofs_blkoff(pos);
|
||||
h = kaddr + erofs_blkoff(sb, pos);
|
||||
/*
|
||||
* if the highest bit of the 8-byte map header is set, the whole file
|
||||
* is stored in the packed inode. The rest bits keeps z_fragmentoff.
|
||||
|
@ -663,7 +645,7 @@ static int z_erofs_fill_inode_lazy(struct inode *inode)
|
|||
goto out_put_metabuf;
|
||||
}
|
||||
|
||||
vi->z_logical_clusterbits = LOG_BLOCK_SIZE + (h->h_clusterbits & 7);
|
||||
vi->z_logical_clusterbits = sb->s_blocksize_bits + (h->h_clusterbits & 7);
|
||||
if (!erofs_sb_has_big_pcluster(EROFS_SB(sb)) &&
|
||||
vi->z_advise & (Z_EROFS_ADVISE_BIG_PCLUSTER_1 |
|
||||
Z_EROFS_ADVISE_BIG_PCLUSTER_2)) {
|
||||
|
@ -672,7 +654,7 @@ static int z_erofs_fill_inode_lazy(struct inode *inode)
|
|||
err = -EFSCORRUPTED;
|
||||
goto out_put_metabuf;
|
||||
}
|
||||
if (vi->datalayout == EROFS_INODE_FLAT_COMPRESSION &&
|
||||
if (vi->datalayout == EROFS_INODE_COMPRESSED_COMPACT &&
|
||||
!(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1) ^
|
||||
!(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_2)) {
|
||||
erofs_err(sb, "big pcluster head1/2 of compact indexes should be consistent for nid %llu",
|
||||
|
@ -692,7 +674,7 @@ static int z_erofs_fill_inode_lazy(struct inode *inode)
|
|||
erofs_put_metabuf(&map.buf);
|
||||
|
||||
if (!map.m_plen ||
|
||||
erofs_blkoff(map.m_pa) + map.m_plen > EROFS_BLKSIZ) {
|
||||
erofs_blkoff(sb, map.m_pa) + map.m_plen > sb->s_blocksize) {
|
||||
erofs_err(sb, "invalid tail-packing pclustersize %llu",
|
||||
map.m_plen);
|
||||
err = -EFSCORRUPTED;
|
||||
|
|
|
@ -71,8 +71,8 @@ TRACE_EVENT(erofs_fill_inode,
|
|||
TP_fast_assign(
|
||||
__entry->dev = inode->i_sb->s_dev;
|
||||
__entry->nid = EROFS_I(inode)->nid;
|
||||
__entry->blkaddr = erofs_blknr(erofs_iloc(inode));
|
||||
__entry->ofs = erofs_blkoff(erofs_iloc(inode));
|
||||
__entry->blkaddr = erofs_blknr(inode->i_sb, erofs_iloc(inode));
|
||||
__entry->ofs = erofs_blkoff(inode->i_sb, erofs_iloc(inode));
|
||||
),
|
||||
|
||||
TP_printk("dev = (%d,%d), nid = %llu, blkaddr %u ofs %u",
|
||||
|
|
Loading…
Reference in New Issue