aboutsummaryrefslogtreecommitdiffstatshomepage
diff options
context:
space:
mode:
-rw-r--r--fs/xfs/Kconfig1
-rw-r--r--fs/xfs/Makefile2
-rw-r--r--fs/xfs/scrub/trace.c4
-rw-r--r--fs/xfs/scrub/trace.h260
-rw-r--r--fs/xfs/scrub/xfarray.c1083
-rw-r--r--fs/xfs/scrub/xfarray.h141
-rw-r--r--fs/xfs/scrub/xfile.c420
-rw-r--r--fs/xfs/scrub/xfile.h77
8 files changed, 1987 insertions, 1 deletions
diff --git a/fs/xfs/Kconfig b/fs/xfs/Kconfig
index 52e1823241fb..152348b4dece 100644
--- a/fs/xfs/Kconfig
+++ b/fs/xfs/Kconfig
@@ -128,6 +128,7 @@ config XFS_ONLINE_SCRUB
bool "XFS online metadata check support"
default n
depends on XFS_FS
+ depends on TMPFS && SHMEM
select XFS_DRAIN_INTENTS
help
If you say Y here you will be able to check metadata on a
diff --git a/fs/xfs/Makefile b/fs/xfs/Makefile
index 0a5cebb9802b..f175f823fcd4 100644
--- a/fs/xfs/Makefile
+++ b/fs/xfs/Makefile
@@ -164,6 +164,8 @@ xfs-y += $(addprefix scrub/, \
rmap.o \
scrub.o \
symlink.o \
+ xfarray.o \
+ xfile.o \
)
xfs-$(CONFIG_XFS_RT) += scrub/rtbitmap.o
diff --git a/fs/xfs/scrub/trace.c b/fs/xfs/scrub/trace.c
index 0a975439d2b6..46249e7b17e0 100644
--- a/fs/xfs/scrub/trace.c
+++ b/fs/xfs/scrub/trace.c
@@ -12,8 +12,10 @@
#include "xfs_mount.h"
#include "xfs_inode.h"
#include "xfs_btree.h"
-#include "scrub/scrub.h"
#include "xfs_ag.h"
+#include "scrub/scrub.h"
+#include "scrub/xfile.h"
+#include "scrub/xfarray.h"
/* Figure out which block the btree cursor was pointing to. */
static inline xfs_fsblock_t
diff --git a/fs/xfs/scrub/trace.h b/fs/xfs/scrub/trace.h
index 73cf1002bd94..5100745086c8 100644
--- a/fs/xfs/scrub/trace.h
+++ b/fs/xfs/scrub/trace.h
@@ -16,6 +16,10 @@
#include <linux/tracepoint.h>
#include "xfs_bit.h"
+struct xfile;
+struct xfarray;
+struct xfarray_sortinfo;
+
/*
* ftrace's __print_symbolic requires that all enum values be wrapped in the
* TRACE_DEFINE_ENUM macro so that the enum value can be encoded in the ftrace
@@ -725,6 +729,262 @@ TRACE_EVENT(xchk_refcount_incorrect,
__entry->seen)
)
+TRACE_EVENT(xfile_create,
+ TP_PROTO(struct xfile *xf),
+ TP_ARGS(xf),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(unsigned long, ino)
+ __array(char, pathname, 256)
+ ),
+ TP_fast_assign(
+ char pathname[257];
+ char *path;
+
+ __entry->ino = file_inode(xf->file)->i_ino;
+ memset(pathname, 0, sizeof(pathname));
+ path = file_path(xf->file, pathname, sizeof(pathname) - 1);
+ if (IS_ERR(path))
+ path = "(unknown)";
+ strncpy(__entry->pathname, path, sizeof(__entry->pathname));
+ ),
+ TP_printk("xfino 0x%lx path '%s'",
+ __entry->ino,
+ __entry->pathname)
+);
+
+TRACE_EVENT(xfile_destroy,
+ TP_PROTO(struct xfile *xf),
+ TP_ARGS(xf),
+ TP_STRUCT__entry(
+ __field(unsigned long, ino)
+ __field(unsigned long long, bytes)
+ __field(loff_t, size)
+ ),
+ TP_fast_assign(
+ struct xfile_stat statbuf;
+ int ret;
+
+ ret = xfile_stat(xf, &statbuf);
+ if (!ret) {
+ __entry->bytes = statbuf.bytes;
+ __entry->size = statbuf.size;
+ } else {
+ __entry->bytes = -1;
+ __entry->size = -1;
+ }
+ __entry->ino = file_inode(xf->file)->i_ino;
+ ),
+ TP_printk("xfino 0x%lx mem_bytes 0x%llx isize 0x%llx",
+ __entry->ino,
+ __entry->bytes,
+ __entry->size)
+);
+
+DECLARE_EVENT_CLASS(xfile_class,
+ TP_PROTO(struct xfile *xf, loff_t pos, unsigned long long bytecount),
+ TP_ARGS(xf, pos, bytecount),
+ TP_STRUCT__entry(
+ __field(unsigned long, ino)
+ __field(unsigned long long, bytes_used)
+ __field(loff_t, pos)
+ __field(loff_t, size)
+ __field(unsigned long long, bytecount)
+ ),
+ TP_fast_assign(
+ struct xfile_stat statbuf;
+ int ret;
+
+ ret = xfile_stat(xf, &statbuf);
+ if (!ret) {
+ __entry->bytes_used = statbuf.bytes;
+ __entry->size = statbuf.size;
+ } else {
+ __entry->bytes_used = -1;
+ __entry->size = -1;
+ }
+ __entry->ino = file_inode(xf->file)->i_ino;
+ __entry->pos = pos;
+ __entry->bytecount = bytecount;
+ ),
+ TP_printk("xfino 0x%lx mem_bytes 0x%llx pos 0x%llx bytecount 0x%llx isize 0x%llx",
+ __entry->ino,
+ __entry->bytes_used,
+ __entry->pos,
+ __entry->bytecount,
+ __entry->size)
+);
+#define DEFINE_XFILE_EVENT(name) \
+DEFINE_EVENT(xfile_class, name, \
+ TP_PROTO(struct xfile *xf, loff_t pos, unsigned long long bytecount), \
+ TP_ARGS(xf, pos, bytecount))
+DEFINE_XFILE_EVENT(xfile_pread);
+DEFINE_XFILE_EVENT(xfile_pwrite);
+DEFINE_XFILE_EVENT(xfile_seek_data);
+DEFINE_XFILE_EVENT(xfile_get_page);
+DEFINE_XFILE_EVENT(xfile_put_page);
+
+TRACE_EVENT(xfarray_create,
+ TP_PROTO(struct xfarray *xfa, unsigned long long required_capacity),
+ TP_ARGS(xfa, required_capacity),
+ TP_STRUCT__entry(
+ __field(unsigned long, ino)
+ __field(uint64_t, max_nr)
+ __field(size_t, obj_size)
+ __field(int, obj_size_log)
+ __field(unsigned long long, required_capacity)
+ ),
+ TP_fast_assign(
+ __entry->max_nr = xfa->max_nr;
+ __entry->obj_size = xfa->obj_size;
+ __entry->obj_size_log = xfa->obj_size_log;
+ __entry->ino = file_inode(xfa->xfile->file)->i_ino;
+ __entry->required_capacity = required_capacity;
+ ),
+ TP_printk("xfino 0x%lx max_nr %llu reqd_nr %llu objsz %zu objszlog %d",
+ __entry->ino,
+ __entry->max_nr,
+ __entry->required_capacity,
+ __entry->obj_size,
+ __entry->obj_size_log)
+);
+
+TRACE_EVENT(xfarray_isort,
+ TP_PROTO(struct xfarray_sortinfo *si, uint64_t lo, uint64_t hi),
+ TP_ARGS(si, lo, hi),
+ TP_STRUCT__entry(
+ __field(unsigned long, ino)
+ __field(unsigned long long, lo)
+ __field(unsigned long long, hi)
+ ),
+ TP_fast_assign(
+ __entry->ino = file_inode(si->array->xfile->file)->i_ino;
+ __entry->lo = lo;
+ __entry->hi = hi;
+ ),
+ TP_printk("xfino 0x%lx lo %llu hi %llu elts %llu",
+ __entry->ino,
+ __entry->lo,
+ __entry->hi,
+ __entry->hi - __entry->lo)
+);
+
+TRACE_EVENT(xfarray_pagesort,
+ TP_PROTO(struct xfarray_sortinfo *si, uint64_t lo, uint64_t hi),
+ TP_ARGS(si, lo, hi),
+ TP_STRUCT__entry(
+ __field(unsigned long, ino)
+ __field(unsigned long long, lo)
+ __field(unsigned long long, hi)
+ ),
+ TP_fast_assign(
+ __entry->ino = file_inode(si->array->xfile->file)->i_ino;
+ __entry->lo = lo;
+ __entry->hi = hi;
+ ),
+ TP_printk("xfino 0x%lx lo %llu hi %llu elts %llu",
+ __entry->ino,
+ __entry->lo,
+ __entry->hi,
+ __entry->hi - __entry->lo)
+);
+
+TRACE_EVENT(xfarray_qsort,
+ TP_PROTO(struct xfarray_sortinfo *si, uint64_t lo, uint64_t hi),
+ TP_ARGS(si, lo, hi),
+ TP_STRUCT__entry(
+ __field(unsigned long, ino)
+ __field(unsigned long long, lo)
+ __field(unsigned long long, hi)
+ __field(int, stack_depth)
+ __field(int, max_stack_depth)
+ ),
+ TP_fast_assign(
+ __entry->ino = file_inode(si->array->xfile->file)->i_ino;
+ __entry->lo = lo;
+ __entry->hi = hi;
+ __entry->stack_depth = si->stack_depth;
+ __entry->max_stack_depth = si->max_stack_depth;
+ ),
+ TP_printk("xfino 0x%lx lo %llu hi %llu elts %llu stack %d/%d",
+ __entry->ino,
+ __entry->lo,
+ __entry->hi,
+ __entry->hi - __entry->lo,
+ __entry->stack_depth,
+ __entry->max_stack_depth)
+);
+
+TRACE_EVENT(xfarray_sort,
+ TP_PROTO(struct xfarray_sortinfo *si, size_t bytes),
+ TP_ARGS(si, bytes),
+ TP_STRUCT__entry(
+ __field(unsigned long, ino)
+ __field(unsigned long long, nr)
+ __field(size_t, obj_size)
+ __field(size_t, bytes)
+ __field(unsigned int, max_stack_depth)
+ ),
+ TP_fast_assign(
+ __entry->nr = si->array->nr;
+ __entry->obj_size = si->array->obj_size;
+ __entry->ino = file_inode(si->array->xfile->file)->i_ino;
+ __entry->bytes = bytes;
+ __entry->max_stack_depth = si->max_stack_depth;
+ ),
+ TP_printk("xfino 0x%lx nr %llu objsz %zu stack %u bytes %zu",
+ __entry->ino,
+ __entry->nr,
+ __entry->obj_size,
+ __entry->max_stack_depth,
+ __entry->bytes)
+);
+
+TRACE_EVENT(xfarray_sort_stats,
+ TP_PROTO(struct xfarray_sortinfo *si, int error),
+ TP_ARGS(si, error),
+ TP_STRUCT__entry(
+ __field(unsigned long, ino)
+#ifdef DEBUG
+ __field(unsigned long long, loads)
+ __field(unsigned long long, stores)
+ __field(unsigned long long, compares)
+ __field(unsigned long long, heapsorts)
+#endif
+ __field(unsigned int, max_stack_depth)
+ __field(unsigned int, max_stack_used)
+ __field(int, error)
+ ),
+ TP_fast_assign(
+ __entry->ino = file_inode(si->array->xfile->file)->i_ino;
+#ifdef DEBUG
+ __entry->loads = si->loads;
+ __entry->stores = si->stores;
+ __entry->compares = si->compares;
+ __entry->heapsorts = si->heapsorts;
+#endif
+ __entry->max_stack_depth = si->max_stack_depth;
+ __entry->max_stack_used = si->max_stack_used;
+ __entry->error = error;
+ ),
+ TP_printk(
+#ifdef DEBUG
+ "xfino 0x%lx loads %llu stores %llu compares %llu heapsorts %llu stack_depth %u/%u error %d",
+#else
+ "xfino 0x%lx stack_depth %u/%u error %d",
+#endif
+ __entry->ino,
+#ifdef DEBUG
+ __entry->loads,
+ __entry->stores,
+ __entry->compares,
+ __entry->heapsorts,
+#endif
+ __entry->max_stack_used,
+ __entry->max_stack_depth,
+ __entry->error)
+);
+
/* repair tracepoints */
#if IS_ENABLED(CONFIG_XFS_ONLINE_REPAIR)
diff --git a/fs/xfs/scrub/xfarray.c b/fs/xfs/scrub/xfarray.c
new file mode 100644
index 000000000000..f0f532c10a5a
--- /dev/null
+++ b/fs/xfs/scrub/xfarray.c
@@ -0,0 +1,1083 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2021-2023 Oracle. All Rights Reserved.
+ * Author: Darrick J. Wong <djwong@kernel.org>
+ */
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "scrub/xfile.h"
+#include "scrub/xfarray.h"
+#include "scrub/scrub.h"
+#include "scrub/trace.h"
+
+/*
+ * Large Arrays of Fixed-Size Records
+ * ==================================
+ *
+ * This memory array uses an xfile (which itself is a memfd "file") to store
+ * large numbers of fixed-size records in memory that can be paged out. This
+ * puts less stress on the memory reclaim algorithms during an online repair
+ * because we don't have to pin so much memory. However, array access is less
+ * direct than would be in a regular memory array. Access to the array is
+ * performed via indexed load and store methods, and an append method is
+ * provided for convenience. Array elements can be unset, which sets them to
+ * all zeroes. Unset entries are skipped during iteration, though direct loads
+ * will return a zeroed buffer. Callers are responsible for concurrency
+ * control.
+ */
+
+/*
+ * Pointer to scratch space. Because we can't access the xfile data directly,
+ * we allocate a small amount of memory on the end of the xfarray structure to
+ * buffer array items when we need space to store values temporarily.
+ */
+static inline void *xfarray_scratch(struct xfarray *array)
+{
+ return (array + 1);
+}
+
+/* Compute array index given an xfile offset. */
+static xfarray_idx_t
+xfarray_idx(
+ struct xfarray *array,
+ loff_t pos)
+{
+ if (array->obj_size_log >= 0)
+ return (xfarray_idx_t)pos >> array->obj_size_log;
+
+ return div_u64((xfarray_idx_t)pos, array->obj_size);
+}
+
+/* Compute xfile offset of array element. */
+static inline loff_t xfarray_pos(struct xfarray *array, xfarray_idx_t idx)
+{
+ if (array->obj_size_log >= 0)
+ return idx << array->obj_size_log;
+
+ return idx * array->obj_size;
+}
+
+/*
+ * Initialize a big memory array. Array records cannot be larger than a
+ * page, and the array cannot span more bytes than the page cache supports.
+ * If @required_capacity is nonzero, the maximum array size will be set to this
+ * quantity and the array creation will fail if the underlying storage cannot
+ * support that many records.
+ */
+int
+xfarray_create(
+ const char *description,
+ unsigned long long required_capacity,
+ size_t obj_size,
+ struct xfarray **arrayp)
+{
+ struct xfarray *array;
+ struct xfile *xfile;
+ int error;
+
+ ASSERT(obj_size < PAGE_SIZE);
+
+ error = xfile_create(description, 0, &xfile);
+ if (error)
+ return error;
+
+ error = -ENOMEM;
+ array = kzalloc(sizeof(struct xfarray) + obj_size, XCHK_GFP_FLAGS);
+ if (!array)
+ goto out_xfile;
+
+ array->xfile = xfile;
+ array->obj_size = obj_size;
+
+ if (is_power_of_2(obj_size))
+ array->obj_size_log = ilog2(obj_size);
+ else
+ array->obj_size_log = -1;
+
+ array->max_nr = xfarray_idx(array, MAX_LFS_FILESIZE);
+ trace_xfarray_create(array, required_capacity);
+
+ if (required_capacity > 0) {
+ if (array->max_nr < required_capacity) {
+ error = -ENOMEM;
+ goto out_xfarray;
+ }
+ array->max_nr = required_capacity;
+ }
+
+ *arrayp = array;
+ return 0;
+
+out_xfarray:
+ kfree(array);
+out_xfile:
+ xfile_destroy(xfile);
+ return error;
+}
+
+/* Destroy the array. */
+void
+xfarray_destroy(
+ struct xfarray *array)
+{
+ xfile_destroy(array->xfile);
+ kfree(array);
+}
+
+/* Load an element from the array. */
+int
+xfarray_load(
+ struct xfarray *array,
+ xfarray_idx_t idx,
+ void *ptr)
+{
+ if (idx >= array->nr)
+ return -ENODATA;
+
+ return xfile_obj_load(array->xfile, ptr, array->obj_size,
+ xfarray_pos(array, idx));
+}
+
+/* Is this array element potentially unset? */
+static inline bool
+xfarray_is_unset(
+ struct xfarray *array,
+ loff_t pos)
+{
+ void *temp = xfarray_scratch(array);
+ int error;
+
+ if (array->unset_slots == 0)
+ return false;
+
+ error = xfile_obj_load(array->xfile, temp, array->obj_size, pos);
+ if (!error && xfarray_element_is_null(array, temp))
+ return true;
+
+ return false;
+}
+
+/*
+ * Unset an array element. If @idx is the last element in the array, the
+ * array will be truncated. Otherwise, the entry will be zeroed.
+ */
+int
+xfarray_unset(
+ struct xfarray *array,
+ xfarray_idx_t idx)
+{
+ void *temp = xfarray_scratch(array);
+ loff_t pos = xfarray_pos(array, idx);
+ int error;
+
+ if (idx >= array->nr)
+ return -ENODATA;
+
+ if (idx == array->nr - 1) {
+ array->nr--;
+ return 0;
+ }
+
+ if (xfarray_is_unset(array, pos))
+ return 0;
+
+ memset(temp, 0, array->obj_size);
+ error = xfile_obj_store(array->xfile, temp, array->obj_size, pos);
+ if (error)
+ return error;
+
+ array->unset_slots++;
+ return 0;
+}
+
+/*
+ * Store an element in the array. The element must not be completely zeroed,
+ * because those are considered unset sparse elements.
+ */
+int
+xfarray_store(
+ struct xfarray *array,
+ xfarray_idx_t idx,
+ const void *ptr)
+{
+ int ret;
+
+ if (idx >= array->max_nr)
+ return -EFBIG;
+
+ ASSERT(!xfarray_element_is_null(array, ptr));
+
+ ret = xfile_obj_store(array->xfile, ptr, array->obj_size,
+ xfarray_pos(array, idx));
+ if (ret)
+ return ret;
+
+ array->nr = max(array->nr, idx + 1);
+ return 0;
+}
+
+/* Is this array element NULL? */
+bool
+xfarray_element_is_null(
+ struct xfarray *array,
+ const void *ptr)
+{
+ return !memchr_inv(ptr, 0, array->obj_size);
+}
+
+/*
+ * Store an element anywhere in the array that is unset. If there are no
+ * unset slots, append the element to the array.
+ */
+int
+xfarray_store_anywhere(
+ struct xfarray *array,
+ const void *ptr)
+{
+ void *temp = xfarray_scratch(array);
+ loff_t endpos = xfarray_pos(array, array->nr);
+ loff_t pos;
+ int error;
+
+ /* Find an unset slot to put it in. */
+ for (pos = 0;
+ pos < endpos && array->unset_slots > 0;
+ pos += array->obj_size) {
+ error = xfile_obj_load(array->xfile, temp, array->obj_size,
+ pos);
+ if (error || !xfarray_element_is_null(array, temp))
+ continue;
+
+ error = xfile_obj_store(array->xfile, ptr, array->obj_size,
+ pos);
+ if (error)
+ return error;
+
+ array->unset_slots--;
+ return 0;
+ }
+
+ /* No unset slots found; attach it on the end. */
+ array->unset_slots = 0;
+ return xfarray_append(array, ptr);
+}
+
+/* Return length of array. */
+uint64_t
+xfarray_length(
+ struct xfarray *array)
+{
+ return array->nr;
+}
+
+/*
+ * Decide which array item we're going to read as part of an _iter_get.
+ * @cur is the array index, and @pos is the file offset of that array index in
+ * the backing xfile. Returns ENODATA if we reach the end of the records.
+ *
+ * Reading from a hole in a sparse xfile causes page instantiation, so for
+ * iterating a (possibly sparse) array we need to figure out if the cursor is
+ * pointing at a totally uninitialized hole and move the cursor up if
+ * necessary.
+ */
+static inline int
+xfarray_find_data(
+ struct xfarray *array,
+ xfarray_idx_t *cur,
+ loff_t *pos)
+{
+ unsigned int pgoff = offset_in_page(*pos);
+ loff_t end_pos = *pos + array->obj_size - 1;
+ loff_t new_pos;
+
+ /*
+ * If the current array record is not adjacent to a page boundary, we
+ * are in the middle of the page. We do not need to move the cursor.
+ */
+ if (pgoff != 0 && pgoff + array->obj_size - 1 < PAGE_SIZE)
+ return 0;
+
+ /*
+ * Call SEEK_DATA on the last byte in the record we're about to read.
+ * If the record ends at (or crosses) the end of a page then we know
+ * that the first byte of the record is backed by pages and don't need
+ * to query it. If instead the record begins at the start of the page
+ * then we know that querying the last byte is just as good as querying
+ * the first byte, since records cannot be larger than a page.
+ *
+ * If the call returns the same file offset, we know this record is
+ * backed by real pages. We do not need to move the cursor.
+ */
+ new_pos = xfile_seek_data(array->xfile, end_pos);
+ if (new_pos == -ENXIO)
+ return -ENODATA;
+ if (new_pos < 0)
+ return new_pos;
+ if (new_pos == end_pos)
+ return 0;
+
+ /*
+ * Otherwise, SEEK_DATA told us how far up to move the file pointer to
+ * find more data. Move the array index to the first record past the
+ * byte offset we were given.
+ */
+ new_pos = roundup_64(new_pos, array->obj_size);
+ *cur = xfarray_idx(array, new_pos);
+ *pos = xfarray_pos(array, *cur);
+ return 0;
+}
+
+/*
+ * Starting at *idx, fetch the next non-null array entry and advance the index
+ * to set up the next _load_next call. Returns ENODATA if we reach the end of
+ * the array. Callers must set @*idx to XFARRAY_CURSOR_INIT before the first
+ * call to this function.
+ */
+int
+xfarray_load_next(
+ struct xfarray *array,
+ xfarray_idx_t *idx,
+ void *rec)
+{
+ xfarray_idx_t cur = *idx;
+ loff_t pos = xfarray_pos(array, cur);
+ int error;
+
+ do {
+ if (cur >= array->nr)
+ return -ENODATA;
+
+ /*
+ * Ask the backing store for the location of next possible
+ * written record, then retrieve that record.
+ */
+ error = xfarray_find_data(array, &cur, &pos);
+ if (error)
+ return error;
+ error = xfarray_load(array, cur, rec);
+ if (error)
+ return error;
+
+ cur++;
+ pos += array->obj_size;
+ } while (xfarray_element_is_null(array, rec));
+
+ *idx = cur;
+ return 0;
+}
+
+/* Sorting functions */
+
+#ifdef DEBUG
+# define xfarray_sort_bump_loads(si) do { (si)->loads++; } while (0)
+# define xfarray_sort_bump_stores(si) do { (si)->stores++; } while (0)
+# define xfarray_sort_bump_compares(si) do { (si)->compares++; } while (0)
+# define xfarray_sort_bump_heapsorts(si) do { (si)->heapsorts++; } while (0)
+#else
+# define xfarray_sort_bump_loads(si)
+# define xfarray_sort_bump_stores(si)
+# define xfarray_sort_bump_compares(si)
+# define xfarray_sort_bump_heapsorts(si)
+#endif /* DEBUG */
+
+/* Load an array element for sorting. */
+static inline int
+xfarray_sort_load(
+ struct xfarray_sortinfo *si,
+ xfarray_idx_t idx,
+ void *ptr)
+{
+ xfarray_sort_bump_loads(si);
+ return xfarray_load(si->array, idx, ptr);
+}
+
+/* Store an array element for sorting. */
+static inline int
+xfarray_sort_store(
+ struct xfarray_sortinfo *si,
+ xfarray_idx_t idx,
+ void *ptr)
+{
+ xfarray_sort_bump_stores(si);
+ return xfarray_store(si->array, idx, ptr);
+}
+
+/* Compare an array element for sorting. */
+static inline int
+xfarray_sort_cmp(
+ struct xfarray_sortinfo *si,
+ const void *a,
+ const void *b)
+{
+ xfarray_sort_bump_compares(si);
+ return si->cmp_fn(a, b);
+}
+
+/* Return a pointer to the low index stack for quicksort partitioning. */
+static inline xfarray_idx_t *xfarray_sortinfo_lo(struct xfarray_sortinfo *si)
+{
+ return (xfarray_idx_t *)(si + 1);
+}
+
+/* Return a pointer to the high index stack for quicksort partitioning. */
+static inline xfarray_idx_t *xfarray_sortinfo_hi(struct xfarray_sortinfo *si)
+{
+ return xfarray_sortinfo_lo(si) + si->max_stack_depth;
+}
+
+/* Size of each element in the quicksort pivot array. */
+static inline size_t
+xfarray_pivot_rec_sz(
+ struct xfarray *array)
+{
+ return round_up(array->obj_size, 8) + sizeof(xfarray_idx_t);
+}
+
+/* Allocate memory to handle the sort. */
+static inline int
+xfarray_sortinfo_alloc(
+ struct xfarray *array,
+ xfarray_cmp_fn cmp_fn,
+ unsigned int flags,
+ struct xfarray_sortinfo **infop)
+{
+ struct xfarray_sortinfo *si;
+ size_t nr_bytes = sizeof(struct xfarray_sortinfo);
+ size_t pivot_rec_sz = xfarray_pivot_rec_sz(array);
+ int max_stack_depth;
+
+ /*
+ * The median-of-nine pivot algorithm doesn't work if a subset has
+ * fewer than 9 items. Make sure the in-memory sort will always take
+ * over for subsets where this wouldn't be the case.
+ */
+ BUILD_BUG_ON(XFARRAY_QSORT_PIVOT_NR >= XFARRAY_ISORT_NR);
+
+ /*
+ * Tail-call recursion during the partitioning phase means that
+ * quicksort will never recurse more than log2(nr) times. We need one
+ * extra level of stack to hold the initial parameters. In-memory
+ * sort will always take care of the last few levels of recursion for
+ * us, so we can reduce the stack depth by that much.
+ */
+ max_stack_depth = ilog2(array->nr) + 1 - (XFARRAY_ISORT_SHIFT - 1);
+ if (max_stack_depth < 1)
+ max_stack_depth = 1;
+
+ /* Each level of quicksort uses a lo and a hi index */
+ nr_bytes += max_stack_depth * sizeof(xfarray_idx_t) * 2;
+
+ /* Scratchpad for in-memory sort, or finding the pivot */
+ nr_bytes += max_t(size_t,
+ (XFARRAY_QSORT_PIVOT_NR + 1) * pivot_rec_sz,
+ XFARRAY_ISORT_NR * array->obj_size);
+
+ si = kvzalloc(nr_bytes, XCHK_GFP_FLAGS);
+ if (!si)
+ return -ENOMEM;
+
+ si->array = array;
+ si->cmp_fn = cmp_fn;
+ si->flags = flags;
+ si->max_stack_depth = max_stack_depth;
+ si->max_stack_used = 1;
+
+ xfarray_sortinfo_lo(si)[0] = 0;
+ xfarray_sortinfo_hi(si)[0] = array->nr - 1;
+
+ trace_xfarray_sort(si, nr_bytes);
+ *infop = si;
+ return 0;
+}
+
+/* Should this sort be terminated by a fatal signal? */
+static inline bool
+xfarray_sort_terminated(
+ struct xfarray_sortinfo *si,
+ int *error)
+{
+ /*
+ * If preemption is disabled, we need to yield to the scheduler every
+ * few seconds so that we don't run afoul of the soft lockup watchdog
+ * or RCU stall detector.
+ */
+ cond_resched();
+
+ if ((si->flags & XFARRAY_SORT_KILLABLE) &&
+ fatal_signal_pending(current)) {
+ if (*error == 0)
+ *error = -EINTR;
+ return true;
+ }
+ return false;
+}
+
+/* Do we want an in-memory sort? */
+static inline bool
+xfarray_want_isort(
+ struct xfarray_sortinfo *si,
+ xfarray_idx_t start,
+ xfarray_idx_t end)
+{
+ /*
+ * For array subsets that fit in the scratchpad, it's much faster to
+ * use the kernel's heapsort than quicksort's stack machine.
+ */
+ return (end - start) < XFARRAY_ISORT_NR;
+}
+
+/* Return the scratch space within the sortinfo structure. */
+static inline void *xfarray_sortinfo_isort_scratch(struct xfarray_sortinfo *si)
+{
+ return xfarray_sortinfo_hi(si) + si->max_stack_depth;
+}
+
+/*
+ * Sort a small number of array records using scratchpad memory. The records
+ * need not be contiguous in the xfile's memory pages.
+ */
+STATIC int
+xfarray_isort(
+ struct xfarray_sortinfo *si,
+ xfarray_idx_t lo,
+ xfarray_idx_t hi)
+{
+ void *scratch = xfarray_sortinfo_isort_scratch(si);
+ loff_t lo_pos = xfarray_pos(si->array, lo);
+ loff_t len = xfarray_pos(si->array, hi - lo + 1);
+ int error;
+
+ trace_xfarray_isort(si, lo, hi);
+
+ xfarray_sort_bump_loads(si);
+ error = xfile_obj_load(si->array->xfile, scratch, len, lo_pos);
+ if (error)
+ return error;
+
+ xfarray_sort_bump_heapsorts(si);
+ sort(scratch, hi - lo + 1, si->array->obj_size, si->cmp_fn, NULL);
+
+ xfarray_sort_bump_stores(si);
+ return xfile_obj_store(si->array->xfile, scratch, len, lo_pos);
+}
+
+/* Grab a page for sorting records. */
+static inline int
+xfarray_sort_get_page(
+ struct xfarray_sortinfo *si,
+ loff_t pos,
+ uint64_t len)
+{
+ int error;
+
+ error = xfile_get_page(si->array->xfile, pos, len, &si->xfpage);
+ if (error)
+ return error;
+
+ /*
+ * xfile pages must never be mapped into userspace, so we skip the
+ * dcache flush when mapping the page.
+ */
+ si->page_kaddr = kmap_local_page(si->xfpage.page);
+ return 0;
+}
+
+/* Release a page we grabbed for sorting records. */
+static inline int
+xfarray_sort_put_page(
+ struct xfarray_sortinfo *si)
+{
+ if (!si->page_kaddr)
+ return 0;
+
+ kunmap_local(si->page_kaddr);
+ si->page_kaddr = NULL;
+
+ return xfile_put_page(si->array->xfile, &si->xfpage);
+}
+
+/* Decide if these records are eligible for in-page sorting. */
+static inline bool
+xfarray_want_pagesort(
+ struct xfarray_sortinfo *si,
+ xfarray_idx_t lo,
+ xfarray_idx_t hi)
+{
+ pgoff_t lo_page;
+ pgoff_t hi_page;
+ loff_t end_pos;
+
+ /* We can only map one page at a time. */
+ lo_page = xfarray_pos(si->array, lo) >> PAGE_SHIFT;
+ end_pos = xfarray_pos(si->array, hi) + si->array->obj_size - 1;
+ hi_page = end_pos >> PAGE_SHIFT;
+
+ return lo_page == hi_page;
+}
+
+/* Sort a bunch of records that all live in the same memory page. */
+STATIC int
+xfarray_pagesort(
+ struct xfarray_sortinfo *si,
+ xfarray_idx_t lo,
+ xfarray_idx_t hi)
+{
+ void *startp;
+ loff_t lo_pos = xfarray_pos(si->array, lo);
+ uint64_t len = xfarray_pos(si->array, hi - lo);
+ int error = 0;
+
+ trace_xfarray_pagesort(si, lo, hi);
+
+ xfarray_sort_bump_loads(si);
+ error = xfarray_sort_get_page(si, lo_pos, len);
+ if (error)
+ return error;
+
+ xfarray_sort_bump_heapsorts(si);
+ startp = si->page_kaddr + offset_in_page(lo_pos);
+ sort(startp, hi - lo + 1, si->array->obj_size, si->cmp_fn, NULL);
+
+ xfarray_sort_bump_stores(si);
+ return xfarray_sort_put_page(si);
+}
+
+/* Return a pointer to the xfarray pivot record within the sortinfo struct. */
+static inline void *xfarray_sortinfo_pivot(struct xfarray_sortinfo *si)
+{
+ return xfarray_sortinfo_hi(si) + si->max_stack_depth;
+}
+
+/* Return a pointer to the start of the pivot array. */
+static inline void *
+xfarray_sortinfo_pivot_array(
+ struct xfarray_sortinfo *si)
+{
+ return xfarray_sortinfo_pivot(si) + si->array->obj_size;
+}
+
+/* The xfarray record is stored at the start of each pivot array element. */
+static inline void *
+xfarray_pivot_array_rec(
+ void *pa,
+ size_t pa_recsz,
+ unsigned int pa_idx)
+{
+ return pa + (pa_recsz * pa_idx);
+}
+
+/* The xfarray index is stored at the end of each pivot array element. */
+static inline xfarray_idx_t *
+xfarray_pivot_array_idx(
+ void *pa,
+ size_t pa_recsz,
+ unsigned int pa_idx)
+{
+ return xfarray_pivot_array_rec(pa, pa_recsz, pa_idx + 1) -
+ sizeof(xfarray_idx_t);
+}
+
+/*
+ * Find a pivot value for quicksort partitioning, swap it with a[lo], and save
+ * the cached pivot record for the next step.
+ *
+ * Load evenly-spaced records within the given range into memory, sort them,
+ * and choose the pivot from the median record. Using multiple points will
+ * improve the quality of the pivot selection, and hopefully avoid the worst
+ * quicksort behavior, since our array values are nearly always evenly sorted.
+ */
+STATIC int
+xfarray_qsort_pivot(
+ struct xfarray_sortinfo *si,
+ xfarray_idx_t lo,
+ xfarray_idx_t hi)
+{
+ void *pivot = xfarray_sortinfo_pivot(si);
+ void *parray = xfarray_sortinfo_pivot_array(si);
+ void *recp;
+ xfarray_idx_t *idxp;
+ xfarray_idx_t step = (hi - lo) / (XFARRAY_QSORT_PIVOT_NR - 1);
+ size_t pivot_rec_sz = xfarray_pivot_rec_sz(si->array);
+ int i, j;
+ int error;
+
+ ASSERT(step > 0);
+
+ /*
+ * Load the xfarray indexes of the records we intend to sample into the
+ * pivot array.
+ */
+ idxp = xfarray_pivot_array_idx(parray, pivot_rec_sz, 0);
+ *idxp = lo;
+ for (i = 1; i < XFARRAY_QSORT_PIVOT_NR - 1; i++) {
+ idxp = xfarray_pivot_array_idx(parray, pivot_rec_sz, i);
+ *idxp = lo + (i * step);
+ }
+ idxp = xfarray_pivot_array_idx(parray, pivot_rec_sz,
+ XFARRAY_QSORT_PIVOT_NR - 1);
+ *idxp = hi;
+
+ /* Load the selected xfarray records into the pivot array. */
+ for (i = 0; i < XFARRAY_QSORT_PIVOT_NR; i++) {
+ xfarray_idx_t idx;
+
+ recp = xfarray_pivot_array_rec(parray, pivot_rec_sz, i);
+ idxp = xfarray_pivot_array_idx(parray, pivot_rec_sz, i);
+
+ /* No unset records; load directly into the array. */
+ if (likely(si->array->unset_slots == 0)) {
+ error = xfarray_sort_load(si, *idxp, recp);
+ if (error)
+ return error;
+ continue;
+ }
+
+ /*
+ * Load non-null records into the scratchpad without changing
+ * the xfarray_idx_t in the pivot array.
+ */
+ idx = *idxp;
+ xfarray_sort_bump_loads(si);
+ error = xfarray_load_next(si->array, &idx, recp);
+ if (error)
+ return error;
+ }
+
+ xfarray_sort_bump_heapsorts(si);
+ sort(parray, XFARRAY_QSORT_PIVOT_NR, pivot_rec_sz, si->cmp_fn, NULL);
+
+ /*
+ * We sorted the pivot array records (which includes the xfarray
+ * indices) in xfarray record order. The median element of the pivot
+ * array contains the xfarray record that we will use as the pivot.
+ * Copy that xfarray record to the designated space.
+ */
+ recp = xfarray_pivot_array_rec(parray, pivot_rec_sz,
+ XFARRAY_QSORT_PIVOT_NR / 2);
+ memcpy(pivot, recp, si->array->obj_size);
+
+ /* If the pivot record we chose was already in a[lo] then we're done. */
+ idxp = xfarray_pivot_array_idx(parray, pivot_rec_sz,
+ XFARRAY_QSORT_PIVOT_NR / 2);
+ if (*idxp == lo)
+ return 0;
+
+ /*
+ * Find the cached copy of a[lo] in the pivot array so that we can swap
+ * a[lo] and a[pivot].
+ */
+ for (i = 0, j = -1; i < XFARRAY_QSORT_PIVOT_NR; i++) {
+ idxp = xfarray_pivot_array_idx(parray, pivot_rec_sz, i);
+ if (*idxp == lo)
+ j = i;
+ }
+ if (j < 0) {
+ ASSERT(j >= 0);
+ return -EFSCORRUPTED;
+ }
+
+ /* Swap a[lo] and a[pivot]. */
+ error = xfarray_sort_store(si, lo, pivot);
+ if (error)
+ return error;
+
+ recp = xfarray_pivot_array_rec(parray, pivot_rec_sz, j);
+ idxp = xfarray_pivot_array_idx(parray, pivot_rec_sz,
+ XFARRAY_QSORT_PIVOT_NR / 2);
+ return xfarray_sort_store(si, *idxp, recp);
+}
+
+/*
+ * Set up the pointers for the next iteration. We push onto the stack all of
+ * the unsorted values between a[lo + 1] and a[end[i]], and we tweak the
+ * current stack frame to point to the unsorted values between a[beg[i]] and
+ * a[lo] so that those values will be sorted when we pop the stack.
+ */
+static inline int
+xfarray_qsort_push(
+ struct xfarray_sortinfo *si,
+ xfarray_idx_t *si_lo,
+ xfarray_idx_t *si_hi,
+ xfarray_idx_t lo,
+ xfarray_idx_t hi)
+{
+ /* Check for stack overflows */
+ if (si->stack_depth >= si->max_stack_depth - 1) {
+ ASSERT(si->stack_depth < si->max_stack_depth - 1);
+ return -EFSCORRUPTED;
+ }
+
+ si->max_stack_used = max_t(uint8_t, si->max_stack_used,
+ si->stack_depth + 2);
+
+ si_lo[si->stack_depth + 1] = lo + 1;
+ si_hi[si->stack_depth + 1] = si_hi[si->stack_depth];
+ si_hi[si->stack_depth++] = lo - 1;
+
+ /*
+ * Always start with the smaller of the two partitions to keep the
+ * amount of recursion in check.
+ */
+ if (si_hi[si->stack_depth] - si_lo[si->stack_depth] >
+ si_hi[si->stack_depth - 1] - si_lo[si->stack_depth - 1]) {
+ swap(si_lo[si->stack_depth], si_lo[si->stack_depth - 1]);
+ swap(si_hi[si->stack_depth], si_hi[si->stack_depth - 1]);
+ }
+
+ return 0;
+}
+
+/*
+ * Load an element from the array into the first scratchpad and cache the page,
+ * if possible.
+ */
+static inline int
+xfarray_sort_load_cached(
+ struct xfarray_sortinfo *si,
+ xfarray_idx_t idx,
+ void *ptr)
+{
+ loff_t idx_pos = xfarray_pos(si->array, idx);
+ pgoff_t startpage;
+ pgoff_t endpage;
+ int error = 0;
+
+ /*
+ * If this load would split a page, release the cached page, if any,
+ * and perform a traditional read.
+ */
+ startpage = idx_pos >> PAGE_SHIFT;
+ endpage = (idx_pos + si->array->obj_size - 1) >> PAGE_SHIFT;
+ if (startpage != endpage) {
+ error = xfarray_sort_put_page(si);
+ if (error)
+ return error;
+
+ if (xfarray_sort_terminated(si, &error))
+ return error;
+
+ return xfile_obj_load(si->array->xfile, ptr,
+ si->array->obj_size, idx_pos);
+ }
+
+ /* If the cached page is not the one we want, release it. */
+ if (xfile_page_cached(&si->xfpage) &&
+ xfile_page_index(&si->xfpage) != startpage) {
+ error = xfarray_sort_put_page(si);
+ if (error)
+ return error;
+ }
+
+ /*
+ * If we don't have a cached page (and we know the load is contained
+ * in a single page) then grab it.
+ */
+ if (!xfile_page_cached(&si->xfpage)) {
+ if (xfarray_sort_terminated(si, &error))
+ return error;
+
+ error = xfarray_sort_get_page(si, startpage << PAGE_SHIFT,
+ PAGE_SIZE);
+ if (error)
+ return error;
+ }
+
+ memcpy(ptr, si->page_kaddr + offset_in_page(idx_pos),
+ si->array->obj_size);
+ return 0;
+}
+
+/*
+ * Sort the array elements via quicksort. This implementation incorporates
+ * four optimizations discussed in Sedgewick:
+ *
+ * 1. Use an explicit stack of array indices to store the next array partition
+ * to sort. This helps us to avoid recursion in the call stack, which is
+ * particularly expensive in the kernel.
+ *
+ * 2. For arrays with records in arbitrary or user-controlled order, choose the
+ * pivot element using a median-of-nine decision tree. This reduces the
+ * probability of selecting a bad pivot value which causes worst case
+ * behavior (i.e. partition sizes of 1).
+ *
+ * 3. The smaller of the two sub-partitions is pushed onto the stack to start
+ * the next level of recursion, and the larger sub-partition replaces the
+ * current stack frame. This guarantees that we won't need more than
+ * log2(nr) stack space.
+ *
+ * 4. For small sets, load the records into the scratchpad and run heapsort on
+ * them because that is very fast. In the author's experience, this yields
+ * a ~10% reduction in runtime.
+ *
+ * If a small set is contained entirely within a single xfile memory page,
+ * map the page directly and run heap sort directly on the xfile page
+ * instead of using the load/store interface. This halves the runtime.
+ *
+ * 5. This optimization is specific to the implementation. When converging lo
+ * and hi after selecting a pivot, we will try to retain the xfile memory
+ * page between load calls, which reduces run time by 50%.
+ */
+
+/*
+ * Due to the use of signed indices, we can only support up to 2^63 records.
+ * Files can only grow to 2^63 bytes, so this is not much of a limitation.
+ */
+#define QSORT_MAX_RECS (1ULL << 63)
+
+int
+xfarray_sort(
+ struct xfarray *array,
+ xfarray_cmp_fn cmp_fn,
+ unsigned int flags)
+{
+ struct xfarray_sortinfo *si;
+ xfarray_idx_t *si_lo, *si_hi;
+ void *pivot;
+ void *scratch = xfarray_scratch(array);
+ xfarray_idx_t lo, hi;
+ int error = 0;
+
+ if (array->nr < 2)
+ return 0;
+ if (array->nr >= QSORT_MAX_RECS)
+ return -E2BIG;
+
+ error = xfarray_sortinfo_alloc(array, cmp_fn, flags, &si);
+ if (error)
+ return error;
+ si_lo = xfarray_sortinfo_lo(si);
+ si_hi = xfarray_sortinfo_hi(si);
+ pivot = xfarray_sortinfo_pivot(si);
+
+ while (si->stack_depth >= 0) {
+ lo = si_lo[si->stack_depth];
+ hi = si_hi[si->stack_depth];
+
+ trace_xfarray_qsort(si, lo, hi);
+
+ /* Nothing left in this partition to sort; pop stack. */
+ if (lo >= hi) {
+ si->stack_depth--;
+ continue;
+ }
+
+ /*
+ * If directly mapping the page and sorting can solve our
+ * problems, we're done.
+ */
+ if (xfarray_want_pagesort(si, lo, hi)) {
+ error = xfarray_pagesort(si, lo, hi);
+ if (error)
+ goto out_free;
+ si->stack_depth--;
+ continue;
+ }
+
+ /* If insertion sort can solve our problems, we're done. */
+ if (xfarray_want_isort(si, lo, hi)) {
+ error = xfarray_isort(si, lo, hi);
+ if (error)
+ goto out_free;
+ si->stack_depth--;
+ continue;
+ }
+
+ /* Pick a pivot, move it to a[lo] and stash it. */
+ error = xfarray_qsort_pivot(si, lo, hi);
+ if (error)
+ goto out_free;
+
+ /*
+ * Rearrange a[lo..hi] such that everything smaller than the
+ * pivot is on the left side of the range and everything larger
+ * than the pivot is on the right side of the range.
+ */
+ while (lo < hi) {
+ /*
+ * Decrement hi until it finds an a[hi] less than the
+ * pivot value.
+ */
+ error = xfarray_sort_load_cached(si, hi, scratch);
+ if (error)
+ goto out_free;
+ while (xfarray_sort_cmp(si, scratch, pivot) >= 0 &&
+ lo < hi) {
+ hi--;
+ error = xfarray_sort_load_cached(si, hi,
+ scratch);
+ if (error)
+ goto out_free;
+ }
+ error = xfarray_sort_put_page(si);
+ if (error)
+ goto out_free;
+
+ if (xfarray_sort_terminated(si, &error))
+ goto out_free;
+
+ /* Copy that item (a[hi]) to a[lo]. */
+ if (lo < hi) {
+ error = xfarray_sort_store(si, lo++, scratch);
+ if (error)
+ goto out_free;
+ }
+
+ /*
+ * Increment lo until it finds an a[lo] greater than
+ * the pivot value.
+ */
+ error = xfarray_sort_load_cached(si, lo, scratch);
+ if (error)
+ goto out_free;
+ while (xfarray_sort_cmp(si, scratch, pivot) <= 0 &&
+ lo < hi) {
+ lo++;
+ error = xfarray_sort_load_cached(si, lo,
+ scratch);
+ if (error)
+ goto out_free;
+ }
+ error = xfarray_sort_put_page(si);
+ if (error)
+ goto out_free;
+
+ if (xfarray_sort_terminated(si, &error))
+ goto out_free;
+
+ /* Copy that item (a[lo]) to a[hi]. */
+ if (lo < hi) {
+ error = xfarray_sort_store(si, hi--, scratch);
+ if (error)
+ goto out_free;
+ }
+
+ if (xfarray_sort_terminated(si, &error))
+ goto out_free;
+ }
+
+ /*
+ * Put our pivot value in the correct place at a[lo]. All
+ * values between a[beg[i]] and a[lo - 1] should be less than
+ * the pivot; and all values between a[lo + 1] and a[end[i]-1]
+ * should be greater than the pivot.
+ */
+ error = xfarray_sort_store(si, lo, pivot);
+ if (error)
+ goto out_free;
+
+ /* Set up the stack frame to process the two partitions. */
+ error = xfarray_qsort_push(si, si_lo, si_hi, lo, hi);
+ if (error)
+ goto out_free;
+
+ if (xfarray_sort_terminated(si, &error))
+ goto out_free;
+ }
+
+out_free:
+ trace_xfarray_sort_stats(si, error);
+ kvfree(si);
+ return error;
+}
diff --git a/fs/xfs/scrub/xfarray.h b/fs/xfs/scrub/xfarray.h
new file mode 100644
index 000000000000..4ecac01363d9
--- /dev/null
+++ b/fs/xfs/scrub/xfarray.h
@@ -0,0 +1,141 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2021-2023 Oracle. All Rights Reserved.
+ * Author: Darrick J. Wong <djwong@kernel.org>
+ */
+#ifndef __XFS_SCRUB_XFARRAY_H__
+#define __XFS_SCRUB_XFARRAY_H__
+
+/* xfile array index type, along with cursor initialization */
+typedef uint64_t xfarray_idx_t;
+#define XFARRAY_CURSOR_INIT ((__force xfarray_idx_t)0)
+
+/* Iterate each index of an xfile array. */
+#define foreach_xfarray_idx(array, idx) \
+ for ((idx) = XFARRAY_CURSOR_INIT; \
+ (idx) < xfarray_length(array); \
+ (idx)++)
+
+struct xfarray {
+ /* Underlying file that backs the array. */
+ struct xfile *xfile;
+
+ /* Number of array elements. */
+ xfarray_idx_t nr;
+
+ /* Maximum possible array size. */
+ xfarray_idx_t max_nr;
+
+ /* Number of unset slots in the array below @nr. */
+ uint64_t unset_slots;
+
+ /* Size of an array element. */
+ size_t obj_size;
+
+ /* log2 of array element size, if possible. */
+ int obj_size_log;
+};
+
+int xfarray_create(const char *descr, unsigned long long required_capacity,
+ size_t obj_size, struct xfarray **arrayp);
+void xfarray_destroy(struct xfarray *array);
+int xfarray_load(struct xfarray *array, xfarray_idx_t idx, void *ptr);
+int xfarray_unset(struct xfarray *array, xfarray_idx_t idx);
+int xfarray_store(struct xfarray *array, xfarray_idx_t idx, const void *ptr);
+int xfarray_store_anywhere(struct xfarray *array, const void *ptr);
+bool xfarray_element_is_null(struct xfarray *array, const void *ptr);
+
+/* Append an element to the array. */
+static inline int xfarray_append(struct xfarray *array, const void *ptr)
+{
+ return xfarray_store(array, array->nr, ptr);
+}
+
+uint64_t xfarray_length(struct xfarray *array);
+int xfarray_load_next(struct xfarray *array, xfarray_idx_t *idx, void *rec);
+
+/* Declarations for xfile array sort functionality. */
+
+typedef cmp_func_t xfarray_cmp_fn;
+
+/* Perform an in-memory heapsort for small subsets. */
+#define XFARRAY_ISORT_SHIFT (4)
+#define XFARRAY_ISORT_NR (1U << XFARRAY_ISORT_SHIFT)
+
+/* Evalulate this many points to find the qsort pivot. */
+#define XFARRAY_QSORT_PIVOT_NR (9)
+
+struct xfarray_sortinfo {
+ struct xfarray *array;
+
+ /* Comparison function for the sort. */
+ xfarray_cmp_fn cmp_fn;
+
+ /* Maximum height of the partition stack. */
+ uint8_t max_stack_depth;
+
+ /* Current height of the partition stack. */
+ int8_t stack_depth;
+
+ /* Maximum stack depth ever used. */
+ uint8_t max_stack_used;
+
+ /* XFARRAY_SORT_* flags; see below. */
+ unsigned int flags;
+
+ /* Cache a page here for faster access. */
+ struct xfile_page xfpage;
+ void *page_kaddr;
+
+#ifdef DEBUG
+ /* Performance statistics. */
+ uint64_t loads;
+ uint64_t stores;
+ uint64_t compares;
+ uint64_t heapsorts;
+#endif
+ /*
+ * Extra bytes are allocated beyond the end of the structure to store
+ * quicksort information. C does not permit multiple VLAs per struct,
+ * so we document all of this in a comment.
+ *
+ * Pretend that we have a typedef for array records:
+ *
+ * typedef char[array->obj_size] xfarray_rec_t;
+ *
+ * First comes the quicksort partition stack:
+ *
+ * xfarray_idx_t lo[max_stack_depth];
+ * xfarray_idx_t hi[max_stack_depth];
+ *
+ * union {
+ *
+ * If for a given subset we decide to use an in-memory sort, we use a
+ * block of scratchpad records here to compare items:
+ *
+ * xfarray_rec_t scratch[ISORT_NR];
+ *
+ * Otherwise, we want to partition the records to partition the array.
+ * We store the chosen pivot record at the start of the scratchpad area
+ * and use the rest to sample some records to estimate the median.
+ * The format of the qsort_pivot array enables us to use the kernel
+ * heapsort function to place the median value in the middle.
+ *
+ * struct {
+ * xfarray_rec_t pivot;
+ * struct {
+ * xfarray_rec_t rec; (rounded up to 8 bytes)
+ * xfarray_idx_t idx;
+ * } qsort_pivot[QSORT_PIVOT_NR];
+ * };
+ * }
+ */
+};
+
+/* Sort can be interrupted by a fatal signal. */
+#define XFARRAY_SORT_KILLABLE (1U << 0)
+
+int xfarray_sort(struct xfarray *array, xfarray_cmp_fn cmp_fn,
+ unsigned int flags);
+
+#endif /* __XFS_SCRUB_XFARRAY_H__ */
diff --git a/fs/xfs/scrub/xfile.c b/fs/xfs/scrub/xfile.c
new file mode 100644
index 000000000000..d98e8e77c684
--- /dev/null
+++ b/fs/xfs/scrub/xfile.c
@@ -0,0 +1,420 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2018-2023 Oracle. All Rights Reserved.
+ * Author: Darrick J. Wong <djwong@kernel.org>
+ */
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
+#include "xfs_mount.h"
+#include "xfs_format.h"
+#include "scrub/xfile.h"
+#include "scrub/xfarray.h"
+#include "scrub/scrub.h"
+#include "scrub/trace.h"
+#include <linux/shmem_fs.h>
+
+/*
+ * Swappable Temporary Memory
+ * ==========================
+ *
+ * Online checking sometimes needs to be able to stage a large amount of data
+ * in memory. This information might not fit in the available memory and it
+ * doesn't all need to be accessible at all times. In other words, we want an
+ * indexed data buffer to store data that can be paged out.
+ *
+ * When CONFIG_TMPFS=y, shmemfs is enough of a filesystem to meet those
+ * requirements. Therefore, the xfile mechanism uses an unlinked shmem file to
+ * store our staging data. This file is not installed in the file descriptor
+ * table so that user programs cannot access the data, which means that the
+ * xfile must be freed with xfile_destroy.
+ *
+ * xfiles assume that the caller will handle all required concurrency
+ * management; standard vfs locks (freezer and inode) are not taken. Reads
+ * and writes are satisfied directly from the page cache.
+ *
+ * NOTE: The current shmemfs implementation has a quirk that in-kernel reads
+ * of a hole cause a page to be mapped into the file. If you are going to
+ * create a sparse xfile, please be careful about reading from uninitialized
+ * parts of the file. These pages are !Uptodate and will eventually be
+ * reclaimed if not written, but in the short term this boosts memory
+ * consumption.
+ */
+
+/*
+ * xfiles must not be exposed to userspace and require upper layers to
+ * coordinate access to the one handle returned by the constructor, so
+ * establish a separate lock class for xfiles to avoid confusing lockdep.
+ */
+static struct lock_class_key xfile_i_mutex_key;
+
+/*
+ * Create an xfile of the given size. The description will be used in the
+ * trace output.
+ */
+int
+xfile_create(
+ const char *description,
+ loff_t isize,
+ struct xfile **xfilep)
+{
+ struct inode *inode;
+ struct xfile *xf;
+ int error = -ENOMEM;
+
+ xf = kmalloc(sizeof(struct xfile), XCHK_GFP_FLAGS);
+ if (!xf)
+ return -ENOMEM;
+
+ xf->file = shmem_file_setup(description, isize, 0);
+ if (!xf->file)
+ goto out_xfile;
+ if (IS_ERR(xf->file)) {
+ error = PTR_ERR(xf->file);
+ goto out_xfile;
+ }
+
+ /*
+ * We want a large sparse file that we can pread, pwrite, and seek.
+ * xfile users are responsible for keeping the xfile hidden away from
+ * all other callers, so we skip timestamp updates and security checks.
+ * Make the inode only accessible by root, just in case the xfile ever
+ * escapes.
+ */
+ xf->file->f_mode |= FMODE_PREAD | FMODE_PWRITE | FMODE_NOCMTIME |
+ FMODE_LSEEK;
+ xf->file->f_flags |= O_RDWR | O_LARGEFILE | O_NOATIME;
+ inode = file_inode(xf->file);
+ inode->i_flags |= S_PRIVATE | S_NOCMTIME | S_NOATIME;
+ inode->i_mode &= ~0177;
+ inode->i_uid = GLOBAL_ROOT_UID;
+ inode->i_gid = GLOBAL_ROOT_GID;
+
+ lockdep_set_class(&inode->i_rwsem, &xfile_i_mutex_key);
+
+ trace_xfile_create(xf);
+
+ *xfilep = xf;
+ return 0;
+out_xfile:
+ kfree(xf);
+ return error;
+}
+
+/* Close the file and release all resources. */
+void
+xfile_destroy(
+ struct xfile *xf)
+{
+ struct inode *inode = file_inode(xf->file);
+
+ trace_xfile_destroy(xf);
+
+ lockdep_set_class(&inode->i_rwsem, &inode->i_sb->s_type->i_mutex_key);
+ fput(xf->file);
+ kfree(xf);
+}
+
+/*
+ * Read a memory object directly from the xfile's page cache. Unlike regular
+ * pread, we return -E2BIG and -EFBIG for reads that are too large or at too
+ * high an offset, instead of truncating the read. Otherwise, we return
+ * bytes read or an error code, like regular pread.
+ */
+ssize_t
+xfile_pread(
+ struct xfile *xf,
+ void *buf,
+ size_t count,
+ loff_t pos)
+{
+ struct inode *inode = file_inode(xf->file);
+ struct address_space *mapping = inode->i_mapping;
+ struct page *page = NULL;
+ ssize_t read = 0;
+ unsigned int pflags;
+ int error = 0;
+
+ if (count > MAX_RW_COUNT)
+ return -E2BIG;
+ if (inode->i_sb->s_maxbytes - pos < count)
+ return -EFBIG;
+
+ trace_xfile_pread(xf, pos, count);
+
+ pflags = memalloc_nofs_save();
+ while (count > 0) {
+ void *p, *kaddr;
+ unsigned int len;
+
+ len = min_t(ssize_t, count, PAGE_SIZE - offset_in_page(pos));
+
+ /*
+ * In-kernel reads of a shmem file cause it to allocate a page
+ * if the mapping shows a hole. Therefore, if we hit ENOMEM
+ * we can continue by zeroing the caller's buffer.
+ */
+ page = shmem_read_mapping_page_gfp(mapping, pos >> PAGE_SHIFT,
+ __GFP_NOWARN);
+ if (IS_ERR(page)) {
+ error = PTR_ERR(page);
+ if (error != -ENOMEM)
+ break;
+
+ memset(buf, 0, len);
+ goto advance;
+ }
+
+ if (PageUptodate(page)) {
+ /*
+ * xfile pages must never be mapped into userspace, so
+ * we skip the dcache flush.
+ */
+ kaddr = kmap_local_page(page);
+ p = kaddr + offset_in_page(pos);
+ memcpy(buf, p, len);
+ kunmap_local(kaddr);
+ } else {
+ memset(buf, 0, len);
+ }
+ put_page(page);
+
+advance:
+ count -= len;
+ pos += len;
+ buf += len;
+ read += len;
+ }
+ memalloc_nofs_restore(pflags);
+
+ if (read > 0)
+ return read;
+ return error;
+}
+
+/*
+ * Write a memory object directly to the xfile's page cache. Unlike regular
+ * pwrite, we return -E2BIG and -EFBIG for writes that are too large or at too
+ * high an offset, instead of truncating the write. Otherwise, we return
+ * bytes written or an error code, like regular pwrite.
+ */
+ssize_t
+xfile_pwrite(
+ struct xfile *xf,
+ const void *buf,
+ size_t count,
+ loff_t pos)
+{
+ struct inode *inode = file_inode(xf->file);
+ struct address_space *mapping = inode->i_mapping;
+ const struct address_space_operations *aops = mapping->a_ops;
+ struct page *page = NULL;
+ ssize_t written = 0;
+ unsigned int pflags;
+ int error = 0;
+
+ if (count > MAX_RW_COUNT)
+ return -E2BIG;
+ if (inode->i_sb->s_maxbytes - pos < count)
+ return -EFBIG;
+
+ trace_xfile_pwrite(xf, pos, count);
+
+ pflags = memalloc_nofs_save();
+ while (count > 0) {
+ void *fsdata = NULL;
+ void *p, *kaddr;
+ unsigned int len;
+ int ret;
+
+ len = min_t(ssize_t, count, PAGE_SIZE - offset_in_page(pos));
+
+ /*
+ * We call write_begin directly here to avoid all the freezer
+ * protection lock-taking that happens in the normal path.
+ * shmem doesn't support fs freeze, but lockdep doesn't know
+ * that and will trip over that.
+ */
+ error = aops->write_begin(NULL, mapping, pos, len, &page,
+ &fsdata);
+ if (error)
+ break;
+
+ /*
+ * xfile pages must never be mapped into userspace, so we skip
+ * the dcache flush. If the page is not uptodate, zero it
+ * before writing data.
+ */
+ kaddr = kmap_local_page(page);
+ if (!PageUptodate(page)) {
+ memset(kaddr, 0, PAGE_SIZE);
+ SetPageUptodate(page);
+ }
+ p = kaddr + offset_in_page(pos);
+ memcpy(p, buf, len);
+ kunmap_local(kaddr);
+
+ ret = aops->write_end(NULL, mapping, pos, len, len, page,
+ fsdata);
+ if (ret < 0) {
+ error = ret;
+ break;
+ }
+
+ written += ret;
+ if (ret != len)
+ break;
+
+ count -= ret;
+ pos += ret;
+ buf += ret;
+ }
+ memalloc_nofs_restore(pflags);
+
+ if (written > 0)
+ return written;
+ return error;
+}
+
+/* Find the next written area in the xfile data for a given offset. */
+loff_t
+xfile_seek_data(
+ struct xfile *xf,
+ loff_t pos)
+{
+ loff_t ret;
+
+ ret = vfs_llseek(xf->file, pos, SEEK_DATA);
+ trace_xfile_seek_data(xf, pos, ret);
+ return ret;
+}
+
+/* Query stat information for an xfile. */
+int
+xfile_stat(
+ struct xfile *xf,
+ struct xfile_stat *statbuf)
+{
+ struct kstat ks;
+ int error;
+
+ error = vfs_getattr_nosec(&xf->file->f_path, &ks,
+ STATX_SIZE | STATX_BLOCKS, AT_STATX_DONT_SYNC);
+ if (error)
+ return error;
+
+ statbuf->size = ks.size;
+ statbuf->bytes = ks.blocks << SECTOR_SHIFT;
+ return 0;
+}
+
+/*
+ * Grab the (locked) page for a memory object. The object cannot span a page
+ * boundary. Returns 0 (and a locked page) if successful, -ENOTBLK if we
+ * cannot grab the page, or the usual negative errno.
+ */
+int
+xfile_get_page(
+ struct xfile *xf,
+ loff_t pos,
+ unsigned int len,
+ struct xfile_page *xfpage)
+{
+ struct inode *inode = file_inode(xf->file);
+ struct address_space *mapping = inode->i_mapping;
+ const struct address_space_operations *aops = mapping->a_ops;
+ struct page *page = NULL;
+ void *fsdata = NULL;
+ loff_t key = round_down(pos, PAGE_SIZE);
+ unsigned int pflags;
+ int error;
+
+ if (inode->i_sb->s_maxbytes - pos < len)
+ return -ENOMEM;
+ if (len > PAGE_SIZE - offset_in_page(pos))
+ return -ENOTBLK;
+
+ trace_xfile_get_page(xf, pos, len);
+
+ pflags = memalloc_nofs_save();
+
+ /*
+ * We call write_begin directly here to avoid all the freezer
+ * protection lock-taking that happens in the normal path. shmem
+ * doesn't support fs freeze, but lockdep doesn't know that and will
+ * trip over that.
+ */
+ error = aops->write_begin(NULL, mapping, key, PAGE_SIZE, &page,
+ &fsdata);
+ if (error)
+ goto out_pflags;
+
+ /* We got the page, so make sure we push out EOF. */
+ if (i_size_read(inode) < pos + len)
+ i_size_write(inode, pos + len);
+
+ /*
+ * If the page isn't up to date, fill it with zeroes before we hand it
+ * to the caller and make sure the backing store will hold on to them.
+ */
+ if (!PageUptodate(page)) {
+ void *kaddr;
+
+ kaddr = kmap_local_page(page);
+ memset(kaddr, 0, PAGE_SIZE);
+ kunmap_local(kaddr);
+ SetPageUptodate(page);
+ }
+
+ /*
+ * Mark each page dirty so that the contents are written to some
+ * backing store when we drop this buffer, and take an extra reference
+ * to prevent the xfile page from being swapped or removed from the
+ * page cache by reclaim if the caller unlocks the page.
+ */
+ set_page_dirty(page);
+ get_page(page);
+
+ xfpage->page = page;
+ xfpage->fsdata = fsdata;
+ xfpage->pos = key;
+out_pflags:
+ memalloc_nofs_restore(pflags);
+ return error;
+}
+
+/*
+ * Release the (locked) page for a memory object. Returns 0 or a negative
+ * errno.
+ */
+int
+xfile_put_page(
+ struct xfile *xf,
+ struct xfile_page *xfpage)
+{
+ struct inode *inode = file_inode(xf->file);
+ struct address_space *mapping = inode->i_mapping;
+ const struct address_space_operations *aops = mapping->a_ops;
+ unsigned int pflags;
+ int ret;
+
+ trace_xfile_put_page(xf, xfpage->pos, PAGE_SIZE);
+
+ /* Give back the reference that we took in xfile_get_page. */
+ put_page(xfpage->page);
+
+ pflags = memalloc_nofs_save();
+ ret = aops->write_end(NULL, mapping, xfpage->pos, PAGE_SIZE, PAGE_SIZE,
+ xfpage->page, xfpage->fsdata);
+ memalloc_nofs_restore(pflags);
+ memset(xfpage, 0, sizeof(struct xfile_page));
+
+ if (ret < 0)
+ return ret;
+ if (ret != PAGE_SIZE)
+ return -EIO;
+ return 0;
+}
diff --git a/fs/xfs/scrub/xfile.h b/fs/xfs/scrub/xfile.h
new file mode 100644
index 000000000000..d56643b0f429
--- /dev/null
+++ b/fs/xfs/scrub/xfile.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2018-2023 Oracle. All Rights Reserved.
+ * Author: Darrick J. Wong <djwong@kernel.org>
+ */
+#ifndef __XFS_SCRUB_XFILE_H__
+#define __XFS_SCRUB_XFILE_H__
+
+struct xfile_page {
+ struct page *page;
+ void *fsdata;
+ loff_t pos;
+};
+
+static inline bool xfile_page_cached(const struct xfile_page *xfpage)
+{
+ return xfpage->page != NULL;
+}
+
+static inline pgoff_t xfile_page_index(const struct xfile_page *xfpage)
+{
+ return xfpage->page->index;
+}
+
+struct xfile {
+ struct file *file;
+};
+
+int xfile_create(const char *description, loff_t isize, struct xfile **xfilep);
+void xfile_destroy(struct xfile *xf);
+
+ssize_t xfile_pread(struct xfile *xf, void *buf, size_t count, loff_t pos);
+ssize_t xfile_pwrite(struct xfile *xf, const void *buf, size_t count,
+ loff_t pos);
+
+/*
+ * Load an object. Since we're treating this file as "memory", any error or
+ * short IO is treated as a failure to allocate memory.
+ */
+static inline int
+xfile_obj_load(struct xfile *xf, void *buf, size_t count, loff_t pos)
+{
+ ssize_t ret = xfile_pread(xf, buf, count, pos);
+
+ if (ret < 0 || ret != count)
+ return -ENOMEM;
+ return 0;
+}
+
+/*
+ * Store an object. Since we're treating this file as "memory", any error or
+ * short IO is treated as a failure to allocate memory.
+ */
+static inline int
+xfile_obj_store(struct xfile *xf, const void *buf, size_t count, loff_t pos)
+{
+ ssize_t ret = xfile_pwrite(xf, buf, count, pos);
+
+ if (ret < 0 || ret != count)
+ return -ENOMEM;
+ return 0;
+}
+
+loff_t xfile_seek_data(struct xfile *xf, loff_t pos);
+
+struct xfile_stat {
+ loff_t size;
+ unsigned long long bytes;
+};
+
+int xfile_stat(struct xfile *xf, struct xfile_stat *statbuf);
+
+int xfile_get_page(struct xfile *xf, loff_t offset, unsigned int len,
+ struct xfile_page *xbuf);
+int xfile_put_page(struct xfile *xf, struct xfile_page *xbuf);
+
+#endif /* __XFS_SCRUB_XFILE_H__ */