aboutsummaryrefslogtreecommitdiffstatshomepage
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2024-02-19 07:27:23 +0100
committerChandan Babu R <chandanbabu@kernel.org>2024-02-21 11:36:53 +0530
commite62e26acc9ab85e996eff660318109470eae2607 (patch)
tree860b18b64f284fa5fe4b4d192bcbc2e50ddb27fc
parentxfs: don't try to handle non-update pages in xfile_obj_load (diff)
downloadwireguard-linux-e62e26acc9ab85e996eff660318109470eae2607.tar.xz
wireguard-linux-e62e26acc9ab85e996eff660318109470eae2607.zip
xfs: don't allow highmem pages in xfile mappings
XFS is generally used on 64-bit, non-highmem platforms and xfile mappings are accessed all the time. Reduce our pain by not allowing any highmem mappings in the xfile page cache and remove all the kmap calls for it. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: "Darrick J. Wong" <djwong@kernel.org> Signed-off-by: Chandan Babu R <chandanbabu@kernel.org>
-rw-r--r--fs/xfs/scrub/xfarray.c3
-rw-r--r--fs/xfs/scrub/xfile.c21
2 files changed, 10 insertions, 14 deletions
diff --git a/fs/xfs/scrub/xfarray.c b/fs/xfs/scrub/xfarray.c
index 95ac14bceead..d0f98a43b2ba 100644
--- a/fs/xfs/scrub/xfarray.c
+++ b/fs/xfs/scrub/xfarray.c
@@ -580,7 +580,7 @@ xfarray_sort_get_page(
* xfile pages must never be mapped into userspace, so we skip the
* dcache flush when mapping the page.
*/
- si->page_kaddr = kmap_local_page(si->xfpage.page);
+ si->page_kaddr = page_address(si->xfpage.page);
return 0;
}
@@ -592,7 +592,6 @@ xfarray_sort_put_page(
if (!si->page_kaddr)
return 0;
- kunmap_local(si->page_kaddr);
si->page_kaddr = NULL;
return xfile_put_page(si->array->xfile, &si->xfpage);
diff --git a/fs/xfs/scrub/xfile.c b/fs/xfs/scrub/xfile.c
index 4ec975977dcd..009a760cb690 100644
--- a/fs/xfs/scrub/xfile.c
+++ b/fs/xfs/scrub/xfile.c
@@ -77,6 +77,12 @@ xfile_create(
inode = file_inode(xf->file);
lockdep_set_class(&inode->i_rwsem, &xfile_i_mutex_key);
+ /*
+ * We don't want to bother with kmapping data during repair, so don't
+ * allow highmem pages to back this mapping.
+ */
+ mapping_set_gfp_mask(inode->i_mapping, GFP_KERNEL);
+
trace_xfile_create(xf);
*xfilep = xf;
@@ -126,7 +132,6 @@ xfile_load(
pflags = memalloc_nofs_save();
while (count > 0) {
- void *p, *kaddr;
unsigned int len;
len = min_t(ssize_t, count, PAGE_SIZE - offset_in_page(pos));
@@ -153,10 +158,7 @@ xfile_load(
* xfile pages must never be mapped into userspace, so
* we skip the dcache flush.
*/
- kaddr = kmap_local_page(page);
- p = kaddr + offset_in_page(pos);
- memcpy(buf, p, len);
- kunmap_local(kaddr);
+ memcpy(buf, page_address(page) + offset_in_page(pos), len);
put_page(page);
advance:
@@ -221,14 +223,13 @@ xfile_store(
* the dcache flush. If the page is not uptodate, zero it
* before writing data.
*/
- kaddr = kmap_local_page(page);
+ kaddr = page_address(page);
if (!PageUptodate(page)) {
memset(kaddr, 0, PAGE_SIZE);
SetPageUptodate(page);
}
p = kaddr + offset_in_page(pos);
memcpy(p, buf, len);
- kunmap_local(kaddr);
ret = aops->write_end(NULL, mapping, pos, len, len, page,
fsdata);
@@ -314,11 +315,7 @@ xfile_get_page(
* to the caller and make sure the backing store will hold on to them.
*/
if (!PageUptodate(page)) {
- void *kaddr;
-
- kaddr = kmap_local_page(page);
- memset(kaddr, 0, PAGE_SIZE);
- kunmap_local(kaddr);
+ memset(page_address(page), 0, PAGE_SIZE);
SetPageUptodate(page);
}