staging: erofs: compressed_pages should not be accessed again after freed
This patch resolves the following page use-after-free issue,
z_erofs_vle_unzip:
...
for (i = 0; i < nr_pages; ++i) {
...
z_erofs_onlinepage_endio(page); (1)
}
for (i = 0; i < clusterpages; ++i) {
page = compressed_pages[i];
if (page->mapping == mngda) (2)
continue;
/* recycle all individual staging pages */
(void)z_erofs_gather_if_stagingpage(page_pool, page); (3)
WRITE_ONCE(compressed_pages[i], NULL);
}
...
After (1) is executed, page is freed and could be then reused, if
compressed_pages is scanned after that, it could fall info (2) or
(3) by mistake and that could finally be in a mess.
This patch aims to solve the above issue only with little changes
as much as possible in order to make the fix backport easier.
Fixes: 3883a79abd
("staging: erofs: introduce VLE decompression support")
Cc: <stable@vger.kernel.org> # 4.19+
Signed-off-by: Gao Xiang <gaoxiang25@huawei.com>
Reviewed-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
11f27765f6
commit
af692e117c
|
@ -986,11 +986,10 @@ repeat:
|
|||
if (llen > grp->llen)
|
||||
llen = grp->llen;
|
||||
|
||||
err = z_erofs_vle_unzip_fast_percpu(compressed_pages,
|
||||
clusterpages, pages, llen, work->pageofs,
|
||||
z_erofs_onlinepage_endio);
|
||||
err = z_erofs_vle_unzip_fast_percpu(compressed_pages, clusterpages,
|
||||
pages, llen, work->pageofs);
|
||||
if (err != -ENOTSUPP)
|
||||
goto out_percpu;
|
||||
goto out;
|
||||
|
||||
if (sparsemem_pages >= nr_pages)
|
||||
goto skip_allocpage;
|
||||
|
@ -1011,21 +1010,7 @@ skip_allocpage:
|
|||
erofs_vunmap(vout, nr_pages);
|
||||
|
||||
out:
|
||||
for (i = 0; i < nr_pages; ++i) {
|
||||
page = pages[i];
|
||||
DBG_BUGON(!page->mapping);
|
||||
|
||||
/* recycle all individual staging pages */
|
||||
if (z_erofs_gather_if_stagingpage(page_pool, page))
|
||||
continue;
|
||||
|
||||
if (unlikely(err < 0))
|
||||
SetPageError(page);
|
||||
|
||||
z_erofs_onlinepage_endio(page);
|
||||
}
|
||||
|
||||
out_percpu:
|
||||
/* must handle all compressed pages before endding pages */
|
||||
for (i = 0; i < clusterpages; ++i) {
|
||||
page = compressed_pages[i];
|
||||
|
||||
|
@ -1039,6 +1024,23 @@ out_percpu:
|
|||
WRITE_ONCE(compressed_pages[i], NULL);
|
||||
}
|
||||
|
||||
for (i = 0; i < nr_pages; ++i) {
|
||||
page = pages[i];
|
||||
if (!page)
|
||||
continue;
|
||||
|
||||
DBG_BUGON(!page->mapping);
|
||||
|
||||
/* recycle all individual staging pages */
|
||||
if (z_erofs_gather_if_stagingpage(page_pool, page))
|
||||
continue;
|
||||
|
||||
if (unlikely(err < 0))
|
||||
SetPageError(page);
|
||||
|
||||
z_erofs_onlinepage_endio(page);
|
||||
}
|
||||
|
||||
if (pages == z_pagemap_global)
|
||||
mutex_unlock(&z_pagemap_global_lock);
|
||||
else if (unlikely(pages != pages_onstack))
|
||||
|
|
|
@ -218,8 +218,7 @@ int z_erofs_vle_plain_copy(struct page **compressed_pages,
|
|||
int z_erofs_vle_unzip_fast_percpu(struct page **compressed_pages,
|
||||
unsigned int clusterpages,
|
||||
struct page **pages, unsigned int outlen,
|
||||
unsigned short pageofs,
|
||||
void (*endio)(struct page *));
|
||||
unsigned short pageofs);
|
||||
int z_erofs_vle_unzip_vmap(struct page **compressed_pages,
|
||||
unsigned int clusterpages,
|
||||
void *vaddr, unsigned int llen,
|
||||
|
|
|
@ -125,8 +125,7 @@ int z_erofs_vle_unzip_fast_percpu(struct page **compressed_pages,
|
|||
unsigned int clusterpages,
|
||||
struct page **pages,
|
||||
unsigned int outlen,
|
||||
unsigned short pageofs,
|
||||
void (*endio)(struct page *))
|
||||
unsigned short pageofs)
|
||||
{
|
||||
void *vin, *vout;
|
||||
unsigned int nr_pages, i, j;
|
||||
|
@ -148,19 +147,16 @@ int z_erofs_vle_unzip_fast_percpu(struct page **compressed_pages,
|
|||
ret = z_erofs_unzip_lz4(vin, vout + pageofs,
|
||||
clusterpages * PAGE_SIZE, outlen);
|
||||
|
||||
if (ret >= 0) {
|
||||
outlen = ret;
|
||||
ret = 0;
|
||||
}
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
ret = 0;
|
||||
|
||||
for (i = 0; i < nr_pages; ++i) {
|
||||
j = min((unsigned int)PAGE_SIZE - pageofs, outlen);
|
||||
|
||||
if (pages[i]) {
|
||||
if (ret < 0) {
|
||||
SetPageError(pages[i]);
|
||||
} else if (clusterpages == 1 &&
|
||||
pages[i] == compressed_pages[0]) {
|
||||
if (clusterpages == 1 &&
|
||||
pages[i] == compressed_pages[0]) {
|
||||
memcpy(vin + pageofs, vout + pageofs, j);
|
||||
} else {
|
||||
void *dst = kmap_atomic(pages[i]);
|
||||
|
@ -168,12 +164,13 @@ int z_erofs_vle_unzip_fast_percpu(struct page **compressed_pages,
|
|||
memcpy(dst + pageofs, vout + pageofs, j);
|
||||
kunmap_atomic(dst);
|
||||
}
|
||||
endio(pages[i]);
|
||||
}
|
||||
vout += PAGE_SIZE;
|
||||
outlen -= j;
|
||||
pageofs = 0;
|
||||
}
|
||||
|
||||
out:
|
||||
preempt_enable();
|
||||
|
||||
if (clusterpages == 1)
|
||||
|
|
Loading…
Reference in New Issue