aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/staging/erofs/unzip_vle.c
diff options
context:
space:
mode:
authorGao Xiang <gaoxiang25@huawei.com>2019-02-27 13:33:30 +0800
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2019-02-27 15:41:57 +0100
commitaf692e117cb8cd9d3d844d413095775abc1217f9 (patch)
tree22f9a66ab2dec7c87e91655538557833925d4534 /drivers/staging/erofs/unzip_vle.c
parentstaging: fsl-dpaa2: ethsw: Add missing netdevice check (diff)
downloadlinux-dev-af692e117cb8cd9d3d844d413095775abc1217f9.tar.xz
linux-dev-af692e117cb8cd9d3d844d413095775abc1217f9.zip
staging: erofs: compressed_pages should not be accessed again after freed
This patch resolves the following page use-after-free issue, z_erofs_vle_unzip: ... for (i = 0; i < nr_pages; ++i) { ... z_erofs_onlinepage_endio(page); (1) } for (i = 0; i < clusterpages; ++i) { page = compressed_pages[i]; if (page->mapping == mngda) (2) continue; /* recycle all individual staging pages */ (void)z_erofs_gather_if_stagingpage(page_pool, page); (3) WRITE_ONCE(compressed_pages[i], NULL); } ... After (1) is executed, page is freed and could be then reused, if compressed_pages is scanned after that, it could fall info (2) or (3) by mistake and that could finally be in a mess. This patch aims to solve the above issue only with little changes as much as possible in order to make the fix backport easier. Fixes: 3883a79abd02 ("staging: erofs: introduce VLE decompression support") Cc: <stable@vger.kernel.org> # 4.19+ Signed-off-by: Gao Xiang <gaoxiang25@huawei.com> Reviewed-by: Chao Yu <yuchao0@huawei.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to '')
-rw-r--r--drivers/staging/erofs/unzip_vle.c38
1 files changed, 20 insertions, 18 deletions
diff --git a/drivers/staging/erofs/unzip_vle.c b/drivers/staging/erofs/unzip_vle.c
index a127d8db76d8..416dde4e8ea1 100644
--- a/drivers/staging/erofs/unzip_vle.c
+++ b/drivers/staging/erofs/unzip_vle.c
@@ -986,11 +986,10 @@ repeat:
if (llen > grp->llen)
llen = grp->llen;
- err = z_erofs_vle_unzip_fast_percpu(compressed_pages,
- clusterpages, pages, llen, work->pageofs,
- z_erofs_onlinepage_endio);
+ err = z_erofs_vle_unzip_fast_percpu(compressed_pages, clusterpages,
+ pages, llen, work->pageofs);
if (err != -ENOTSUPP)
- goto out_percpu;
+ goto out;
if (sparsemem_pages >= nr_pages)
goto skip_allocpage;
@@ -1011,8 +1010,25 @@ skip_allocpage:
erofs_vunmap(vout, nr_pages);
out:
+ /* must handle all compressed pages before endding pages */
+ for (i = 0; i < clusterpages; ++i) {
+ page = compressed_pages[i];
+
+#ifdef EROFS_FS_HAS_MANAGED_CACHE
+ if (page->mapping == MNGD_MAPPING(sbi))
+ continue;
+#endif
+ /* recycle all individual staging pages */
+ (void)z_erofs_gather_if_stagingpage(page_pool, page);
+
+ WRITE_ONCE(compressed_pages[i], NULL);
+ }
+
for (i = 0; i < nr_pages; ++i) {
page = pages[i];
+ if (!page)
+ continue;
+
DBG_BUGON(!page->mapping);
/* recycle all individual staging pages */
@@ -1025,20 +1041,6 @@ out:
z_erofs_onlinepage_endio(page);
}
-out_percpu:
- for (i = 0; i < clusterpages; ++i) {
- page = compressed_pages[i];
-
-#ifdef EROFS_FS_HAS_MANAGED_CACHE
- if (page->mapping == MNGD_MAPPING(sbi))
- continue;
-#endif
- /* recycle all individual staging pages */
- (void)z_erofs_gather_if_stagingpage(page_pool, page);
-
- WRITE_ONCE(compressed_pages[i], NULL);
- }
-
if (pages == z_pagemap_global)
mutex_unlock(&z_pagemap_global_lock);
else if (unlikely(pages != pages_onstack))