btrfs: zero out left over bytes after processing compression streams
Don Bailey noticed that our page zeroing for compression at end-io time isn't complete. This reworks a patch from Linus to push the zeroing into the zlib and lzo specific functions instead of trying to handle the corners inside btrfs_decompress_buf2page Signed-off-by: Chris Mason <clm@fb.com> Reviewed-by: Josef Bacik <jbacik@fb.com> Reported-by: Don A. Bailey <donb@securitymouse.com> cc: stable@vger.kernel.org Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
7a5a4f9787
commit
2f19cad94c
|
@ -1011,8 +1011,6 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
|
|||
bytes = min(bytes, working_bytes);
|
||||
kaddr = kmap_atomic(page_out);
|
||||
memcpy(kaddr + *pg_offset, buf + buf_offset, bytes);
|
||||
if (*pg_index == (vcnt - 1) && *pg_offset == 0)
|
||||
memset(kaddr + bytes, 0, PAGE_CACHE_SIZE - bytes);
|
||||
kunmap_atomic(kaddr);
|
||||
flush_dcache_page(page_out);
|
||||
|
||||
|
@ -1054,3 +1052,34 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
|
|||
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* When uncompressing data, we need to make sure and zero any parts of
|
||||
* the biovec that were not filled in by the decompression code. pg_index
|
||||
* and pg_offset indicate the last page and the last offset of that page
|
||||
* that have been filled in. This will zero everything remaining in the
|
||||
* biovec.
|
||||
*/
|
||||
void btrfs_clear_biovec_end(struct bio_vec *bvec, int vcnt,
|
||||
unsigned long pg_index,
|
||||
unsigned long pg_offset)
|
||||
{
|
||||
while (pg_index < vcnt) {
|
||||
struct page *page = bvec[pg_index].bv_page;
|
||||
unsigned long off = bvec[pg_index].bv_offset;
|
||||
unsigned long len = bvec[pg_index].bv_len;
|
||||
|
||||
if (pg_offset < off)
|
||||
pg_offset = off;
|
||||
if (pg_offset < off + len) {
|
||||
unsigned long bytes = off + len - pg_offset;
|
||||
char *kaddr;
|
||||
|
||||
kaddr = kmap_atomic(page);
|
||||
memset(kaddr + pg_offset, 0, bytes);
|
||||
kunmap_atomic(kaddr);
|
||||
}
|
||||
pg_index++;
|
||||
pg_offset = 0;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -45,7 +45,9 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
|
|||
unsigned long nr_pages);
|
||||
int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
|
||||
int mirror_num, unsigned long bio_flags);
|
||||
|
||||
void btrfs_clear_biovec_end(struct bio_vec *bvec, int vcnt,
|
||||
unsigned long pg_index,
|
||||
unsigned long pg_offset);
|
||||
struct btrfs_compress_op {
|
||||
struct list_head *(*alloc_workspace)(void);
|
||||
|
||||
|
|
|
@ -373,6 +373,8 @@ cont:
|
|||
}
|
||||
done:
|
||||
kunmap(pages_in[page_in_index]);
|
||||
if (!ret)
|
||||
btrfs_clear_biovec_end(bvec, vcnt, page_out_index, pg_offset);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -410,10 +412,23 @@ static int lzo_decompress(struct list_head *ws, unsigned char *data_in,
|
|||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* the caller is already checking against PAGE_SIZE, but lets
|
||||
* move this check closer to the memcpy/memset
|
||||
*/
|
||||
destlen = min_t(unsigned long, destlen, PAGE_SIZE);
|
||||
bytes = min_t(unsigned long, destlen, out_len - start_byte);
|
||||
|
||||
kaddr = kmap_atomic(dest_page);
|
||||
memcpy(kaddr, workspace->buf + start_byte, bytes);
|
||||
|
||||
/*
|
||||
* btrfs_getblock is doing a zero on the tail of the page too,
|
||||
* but this will cover anything missing from the decompressed
|
||||
* data.
|
||||
*/
|
||||
if (bytes < destlen)
|
||||
memset(kaddr+bytes, 0, destlen-bytes);
|
||||
kunmap_atomic(kaddr);
|
||||
out:
|
||||
return ret;
|
||||
|
|
|
@ -299,6 +299,8 @@ done:
|
|||
zlib_inflateEnd(&workspace->strm);
|
||||
if (data_in)
|
||||
kunmap(pages_in[page_in_index]);
|
||||
if (!ret)
|
||||
btrfs_clear_biovec_end(bvec, vcnt, page_out_index, pg_offset);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -310,10 +312,14 @@ static int zlib_decompress(struct list_head *ws, unsigned char *data_in,
|
|||
struct workspace *workspace = list_entry(ws, struct workspace, list);
|
||||
int ret = 0;
|
||||
int wbits = MAX_WBITS;
|
||||
unsigned long bytes_left = destlen;
|
||||
unsigned long bytes_left;
|
||||
unsigned long total_out = 0;
|
||||
unsigned long pg_offset = 0;
|
||||
char *kaddr;
|
||||
|
||||
destlen = min_t(unsigned long, destlen, PAGE_SIZE);
|
||||
bytes_left = destlen;
|
||||
|
||||
workspace->strm.next_in = data_in;
|
||||
workspace->strm.avail_in = srclen;
|
||||
workspace->strm.total_in = 0;
|
||||
|
@ -341,7 +347,6 @@ static int zlib_decompress(struct list_head *ws, unsigned char *data_in,
|
|||
unsigned long buf_start;
|
||||
unsigned long buf_offset;
|
||||
unsigned long bytes;
|
||||
unsigned long pg_offset = 0;
|
||||
|
||||
ret = zlib_inflate(&workspace->strm, Z_NO_FLUSH);
|
||||
if (ret != Z_OK && ret != Z_STREAM_END)
|
||||
|
@ -384,6 +389,17 @@ next:
|
|||
ret = 0;
|
||||
|
||||
zlib_inflateEnd(&workspace->strm);
|
||||
|
||||
/*
|
||||
* this should only happen if zlib returned fewer bytes than we
|
||||
* expected. btrfs_get_block is responsible for zeroing from the
|
||||
* end of the inline extent (destlen) to the end of the page
|
||||
*/
|
||||
if (pg_offset < destlen) {
|
||||
kaddr = kmap_atomic(dest_page);
|
||||
memset(kaddr + pg_offset, 0, destlen - pg_offset);
|
||||
kunmap_atomic(kaddr);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue