aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2018-12-04 09:38:05 -0700
committerJens Axboe <axboe@kernel.dk>2018-12-04 09:38:05 -0700
commit89d04ec3491bea6d69943b5aed62fdebfad49638 (patch)
tree2388242bc908a4bca33878e109b6f0828069c679 /lib
parentsbitmap: fix sbitmap_for_each_set() (diff)
parentLinux 4.20-rc5 (diff)
downloadlinux-dev-89d04ec3491bea6d69943b5aed62fdebfad49638.tar.xz
linux-dev-89d04ec3491bea6d69943b5aed62fdebfad49638.zip
Merge tag 'v4.20-rc5' into for-4.21/block
Pull in v4.20-rc5, solving a conflict we'll otherwise get in aio.c and also getting the merge fix that went into mainline that users are hitting testing for-4.21/block and/or for-next. * tag 'v4.20-rc5': (664 commits) Linux 4.20-rc5 PCI: Fix incorrect value returned from pcie_get_speed_cap() MAINTAINERS: Update linux-mips mailing list address ocfs2: fix potential use after free mm/khugepaged: fix the xas_create_range() error path mm/khugepaged: collapse_shmem() do not crash on Compound mm/khugepaged: collapse_shmem() without freezing new_page mm/khugepaged: minor reorderings in collapse_shmem() mm/khugepaged: collapse_shmem() remember to clear holes mm/khugepaged: fix crashes due to misaccounted holes mm/khugepaged: collapse_shmem() stop if punched or truncated mm/huge_memory: fix lockdep complaint on 32-bit i_size_read() mm/huge_memory: splitting set mapping+index before unfreeze mm/huge_memory: rename freeze_page() to unmap_page() initramfs: clean old path before creating a hardlink kernel/kcov.c: mark funcs in __sanitizer_cov_trace_pc() as notrace psi: make disabling/enabling easier for vendor kernels proc: fixup map_files test on arm debugobjects: avoid recursive calls with kmemleak userfaultfd: shmem: UFFDIO_COPY: set the page dirty if VM_WRITE is not set ...
Diffstat (limited to 'lib')
-rw-r--r--lib/debugobjects.c5
-rw-r--r--lib/iov_iter.c38
-rw-r--r--lib/test_firmware.c1
-rw-r--r--lib/test_hexdump.c2
-rw-r--r--lib/test_kmod.c1
-rw-r--r--lib/test_xarray.c50
-rw-r--r--lib/xarray.c139
7 files changed, 148 insertions, 88 deletions
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index 70935ed91125..14afeeb7d6ef 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -135,7 +135,6 @@ static void fill_pool(void)
if (!new)
return;
- kmemleak_ignore(new);
raw_spin_lock_irqsave(&pool_lock, flags);
hlist_add_head(&new->node, &obj_pool);
debug_objects_allocated++;
@@ -1128,7 +1127,6 @@ static int __init debug_objects_replace_static_objects(void)
obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL);
if (!obj)
goto free;
- kmemleak_ignore(obj);
hlist_add_head(&obj->node, &objects);
}
@@ -1184,7 +1182,8 @@ void __init debug_objects_mem_init(void)
obj_cache = kmem_cache_create("debug_objects_cache",
sizeof (struct debug_obj), 0,
- SLAB_DEBUG_OBJECTS, NULL);
+ SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE,
+ NULL);
if (!obj_cache || debug_objects_replace_static_objects()) {
debug_objects_enabled = 0;
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index 7ebccb5c1637..54c248526b55 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -560,6 +560,38 @@ static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
return bytes;
}
+static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes,
+ __wsum *csum, struct iov_iter *i)
+{
+ struct pipe_inode_info *pipe = i->pipe;
+ size_t n, r;
+ size_t off = 0;
+ __wsum sum = *csum, next;
+ int idx;
+
+ if (!sanity(i))
+ return 0;
+
+ bytes = n = push_pipe(i, bytes, &idx, &r);
+ if (unlikely(!n))
+ return 0;
+ for ( ; n; idx = next_idx(idx, pipe), r = 0) {
+ size_t chunk = min_t(size_t, n, PAGE_SIZE - r);
+ char *p = kmap_atomic(pipe->bufs[idx].page);
+ next = csum_partial_copy_nocheck(addr, p + r, chunk, 0);
+ sum = csum_block_add(sum, next, off);
+ kunmap_atomic(p);
+ i->idx = idx;
+ i->iov_offset = r + chunk;
+ n -= chunk;
+ off += chunk;
+ addr += chunk;
+ }
+ i->count -= bytes;
+ *csum = sum;
+ return bytes;
+}
+
size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
{
const char *from = addr;
@@ -1438,8 +1470,12 @@ size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum,
const char *from = addr;
__wsum sum, next;
size_t off = 0;
+
+ if (unlikely(iov_iter_is_pipe(i)))
+ return csum_and_copy_to_pipe_iter(addr, bytes, csum, i);
+
sum = *csum;
- if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
+ if (unlikely(iov_iter_is_discard(i))) {
WARN_ON(1); /* for now */
return 0;
}
diff --git a/lib/test_firmware.c b/lib/test_firmware.c
index b984806d7d7b..7cab9a9869ac 100644
--- a/lib/test_firmware.c
+++ b/lib/test_firmware.c
@@ -837,6 +837,7 @@ static ssize_t read_firmware_show(struct device *dev,
if (req->fw->size > PAGE_SIZE) {
pr_err("Testing interface must use PAGE_SIZE firmware for now\n");
rc = -EINVAL;
+ goto out;
}
memcpy(buf, req->fw->data, req->fw->size);
diff --git a/lib/test_hexdump.c b/lib/test_hexdump.c
index 626f580b4ff7..5144899d3c6b 100644
--- a/lib/test_hexdump.c
+++ b/lib/test_hexdump.c
@@ -99,7 +99,7 @@ static void __init test_hexdump_prepare_test(size_t len, int rowsize,
const char *q = *result++;
size_t amount = strlen(q);
- strncpy(p, q, amount);
+ memcpy(p, q, amount);
p += amount;
*p++ = ' ';
diff --git a/lib/test_kmod.c b/lib/test_kmod.c
index e3ddd836491f..d82d022111e0 100644
--- a/lib/test_kmod.c
+++ b/lib/test_kmod.c
@@ -1214,7 +1214,6 @@ void unregister_test_dev_kmod(struct kmod_test_device *test_dev)
dev_info(test_dev->dev, "removing interface\n");
misc_deregister(&test_dev->misc_dev);
- kfree(&test_dev->misc_dev.name);
mutex_unlock(&test_dev->config_mutex);
mutex_unlock(&test_dev->trigger_mutex);
diff --git a/lib/test_xarray.c b/lib/test_xarray.c
index aa47754150ce..0598e86af8fc 100644
--- a/lib/test_xarray.c
+++ b/lib/test_xarray.c
@@ -208,15 +208,19 @@ static noinline void check_xa_mark_1(struct xarray *xa, unsigned long index)
XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_2));
/* We should see two elements in the array */
+ rcu_read_lock();
xas_for_each(&xas, entry, ULONG_MAX)
seen++;
+ rcu_read_unlock();
XA_BUG_ON(xa, seen != 2);
/* One of which is marked */
xas_set(&xas, 0);
seen = 0;
+ rcu_read_lock();
xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0)
seen++;
+ rcu_read_unlock();
XA_BUG_ON(xa, seen != 1);
}
XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_0));
@@ -373,6 +377,12 @@ static noinline void check_reserve(struct xarray *xa)
xa_erase_index(xa, 12345678);
XA_BUG_ON(xa, !xa_empty(xa));
+ /* And so does xa_insert */
+ xa_reserve(xa, 12345678, GFP_KERNEL);
+ XA_BUG_ON(xa, xa_insert(xa, 12345678, xa_mk_value(12345678), 0) != 0);
+ xa_erase_index(xa, 12345678);
+ XA_BUG_ON(xa, !xa_empty(xa));
+
/* Can iterate through a reserved entry */
xa_store_index(xa, 5, GFP_KERNEL);
xa_reserve(xa, 6, GFP_KERNEL);
@@ -436,7 +446,9 @@ static noinline void check_multi_store_1(struct xarray *xa, unsigned long index,
XA_BUG_ON(xa, xa_load(xa, max) != NULL);
XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL);
+ xas_lock(&xas);
XA_BUG_ON(xa, xas_store(&xas, xa_mk_value(min)) != xa_mk_value(index));
+ xas_unlock(&xas);
XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_value(min));
XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_value(min));
XA_BUG_ON(xa, xa_load(xa, max) != NULL);
@@ -452,9 +464,11 @@ static noinline void check_multi_store_2(struct xarray *xa, unsigned long index,
XA_STATE(xas, xa, index);
xa_store_order(xa, index, order, xa_mk_value(0), GFP_KERNEL);
+ xas_lock(&xas);
XA_BUG_ON(xa, xas_store(&xas, xa_mk_value(1)) != xa_mk_value(0));
XA_BUG_ON(xa, xas.xa_index != index);
XA_BUG_ON(xa, xas_store(&xas, NULL) != xa_mk_value(1));
+ xas_unlock(&xas);
XA_BUG_ON(xa, !xa_empty(xa));
}
#endif
@@ -498,7 +512,7 @@ static noinline void check_multi_store(struct xarray *xa)
rcu_read_unlock();
/* We can erase multiple values with a single store */
- xa_store_order(xa, 0, 63, NULL, GFP_KERNEL);
+ xa_store_order(xa, 0, BITS_PER_LONG - 1, NULL, GFP_KERNEL);
XA_BUG_ON(xa, !xa_empty(xa));
/* Even when the first slot is empty but the others aren't */
@@ -702,7 +716,7 @@ static noinline void check_multi_find_2(struct xarray *xa)
}
}
-static noinline void check_find(struct xarray *xa)
+static noinline void check_find_1(struct xarray *xa)
{
unsigned long i, j, k;
@@ -748,6 +762,34 @@ static noinline void check_find(struct xarray *xa)
XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_0));
}
XA_BUG_ON(xa, !xa_empty(xa));
+}
+
+static noinline void check_find_2(struct xarray *xa)
+{
+ void *entry;
+ unsigned long i, j, index = 0;
+
+ xa_for_each(xa, entry, index, ULONG_MAX, XA_PRESENT) {
+ XA_BUG_ON(xa, true);
+ }
+
+ for (i = 0; i < 1024; i++) {
+ xa_store_index(xa, index, GFP_KERNEL);
+ j = 0;
+ index = 0;
+ xa_for_each(xa, entry, index, ULONG_MAX, XA_PRESENT) {
+ XA_BUG_ON(xa, xa_mk_value(index) != entry);
+ XA_BUG_ON(xa, index != j++);
+ }
+ }
+
+ xa_destroy(xa);
+}
+
+static noinline void check_find(struct xarray *xa)
+{
+ check_find_1(xa);
+ check_find_2(xa);
check_multi_find(xa);
check_multi_find_2(xa);
}
@@ -1067,7 +1109,7 @@ static noinline void check_store_range(struct xarray *xa)
__check_store_range(xa, 4095 + i, 4095 + j);
__check_store_range(xa, 4096 + i, 4096 + j);
__check_store_range(xa, 123456 + i, 123456 + j);
- __check_store_range(xa, UINT_MAX + i, UINT_MAX + j);
+ __check_store_range(xa, (1 << 24) + i, (1 << 24) + j);
}
}
}
@@ -1146,10 +1188,12 @@ static noinline void check_account(struct xarray *xa)
XA_STATE(xas, xa, 1 << order);
xa_store_order(xa, 0, order, xa, GFP_KERNEL);
+ rcu_read_lock();
xas_load(&xas);
XA_BUG_ON(xa, xas.xa_node->count == 0);
XA_BUG_ON(xa, xas.xa_node->count > (1 << order));
XA_BUG_ON(xa, xas.xa_node->nr_values != 0);
+ rcu_read_unlock();
xa_store_order(xa, 1 << order, order, xa_mk_value(1 << order),
GFP_KERNEL);
diff --git a/lib/xarray.c b/lib/xarray.c
index 8b176f009c08..bbacca576593 100644
--- a/lib/xarray.c
+++ b/lib/xarray.c
@@ -610,8 +610,8 @@ static int xas_expand(struct xa_state *xas, void *head)
* (see the xa_cmpxchg() implementation for an example).
*
* Return: If the slot already existed, returns the contents of this slot.
- * If the slot was newly created, returns NULL. If it failed to create the
- * slot, returns NULL and indicates the error in @xas.
+ * If the slot was newly created, returns %NULL. If it failed to create the
+ * slot, returns %NULL and indicates the error in @xas.
*/
static void *xas_create(struct xa_state *xas)
{
@@ -1334,44 +1334,31 @@ void *__xa_erase(struct xarray *xa, unsigned long index)
XA_STATE(xas, xa, index);
return xas_result(&xas, xas_store(&xas, NULL));
}
-EXPORT_SYMBOL_GPL(__xa_erase);
+EXPORT_SYMBOL(__xa_erase);
/**
- * xa_store() - Store this entry in the XArray.
+ * xa_erase() - Erase this entry from the XArray.
* @xa: XArray.
- * @index: Index into array.
- * @entry: New entry.
- * @gfp: Memory allocation flags.
+ * @index: Index of entry.
*
- * After this function returns, loads from this index will return @entry.
- * Storing into an existing multislot entry updates the entry of every index.
- * The marks associated with @index are unaffected unless @entry is %NULL.
+ * This function is the equivalent of calling xa_store() with %NULL as
+ * the third argument. The XArray does not need to allocate memory, so
+ * the user does not need to provide GFP flags.
*
- * Context: Process context. Takes and releases the xa_lock. May sleep
- * if the @gfp flags permit.
- * Return: The old entry at this index on success, xa_err(-EINVAL) if @entry
- * cannot be stored in an XArray, or xa_err(-ENOMEM) if memory allocation
- * failed.
+ * Context: Any context. Takes and releases the xa_lock.
+ * Return: The entry which used to be at this index.
*/
-void *xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
+void *xa_erase(struct xarray *xa, unsigned long index)
{
- XA_STATE(xas, xa, index);
- void *curr;
-
- if (WARN_ON_ONCE(xa_is_internal(entry)))
- return XA_ERROR(-EINVAL);
+ void *entry;
- do {
- xas_lock(&xas);
- curr = xas_store(&xas, entry);
- if (xa_track_free(xa) && entry)
- xas_clear_mark(&xas, XA_FREE_MARK);
- xas_unlock(&xas);
- } while (xas_nomem(&xas, gfp));
+ xa_lock(xa);
+ entry = __xa_erase(xa, index);
+ xa_unlock(xa);
- return xas_result(&xas, curr);
+ return entry;
}
-EXPORT_SYMBOL(xa_store);
+EXPORT_SYMBOL(xa_erase);
/**
* __xa_store() - Store this entry in the XArray.
@@ -1395,10 +1382,12 @@ void *__xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
if (WARN_ON_ONCE(xa_is_internal(entry)))
return XA_ERROR(-EINVAL);
+ if (xa_track_free(xa) && !entry)
+ entry = XA_ZERO_ENTRY;
do {
curr = xas_store(&xas, entry);
- if (xa_track_free(xa) && entry)
+ if (xa_track_free(xa))
xas_clear_mark(&xas, XA_FREE_MARK);
} while (__xas_nomem(&xas, gfp));
@@ -1407,45 +1396,33 @@ void *__xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
EXPORT_SYMBOL(__xa_store);
/**
- * xa_cmpxchg() - Conditionally replace an entry in the XArray.
+ * xa_store() - Store this entry in the XArray.
* @xa: XArray.
* @index: Index into array.
- * @old: Old value to test against.
- * @entry: New value to place in array.
+ * @entry: New entry.
* @gfp: Memory allocation flags.
*
- * If the entry at @index is the same as @old, replace it with @entry.
- * If the return value is equal to @old, then the exchange was successful.
+ * After this function returns, loads from this index will return @entry.
+ * Storing into an existing multislot entry updates the entry of every index.
+ * The marks associated with @index are unaffected unless @entry is %NULL.
*
- * Context: Process context. Takes and releases the xa_lock. May sleep
- * if the @gfp flags permit.
- * Return: The old value at this index or xa_err() if an error happened.
+ * Context: Any context. Takes and releases the xa_lock.
+ * May sleep if the @gfp flags permit.
+ * Return: The old entry at this index on success, xa_err(-EINVAL) if @entry
+ * cannot be stored in an XArray, or xa_err(-ENOMEM) if memory allocation
+ * failed.
*/
-void *xa_cmpxchg(struct xarray *xa, unsigned long index,
- void *old, void *entry, gfp_t gfp)
+void *xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
{
- XA_STATE(xas, xa, index);
void *curr;
- if (WARN_ON_ONCE(xa_is_internal(entry)))
- return XA_ERROR(-EINVAL);
-
- do {
- xas_lock(&xas);
- curr = xas_load(&xas);
- if (curr == XA_ZERO_ENTRY)
- curr = NULL;
- if (curr == old) {
- xas_store(&xas, entry);
- if (xa_track_free(xa) && entry)
- xas_clear_mark(&xas, XA_FREE_MARK);
- }
- xas_unlock(&xas);
- } while (xas_nomem(&xas, gfp));
+ xa_lock(xa);
+ curr = __xa_store(xa, index, entry, gfp);
+ xa_unlock(xa);
- return xas_result(&xas, curr);
+ return curr;
}
-EXPORT_SYMBOL(xa_cmpxchg);
+EXPORT_SYMBOL(xa_store);
/**
* __xa_cmpxchg() - Store this entry in the XArray.
@@ -1471,6 +1448,8 @@ void *__xa_cmpxchg(struct xarray *xa, unsigned long index,
if (WARN_ON_ONCE(xa_is_internal(entry)))
return XA_ERROR(-EINVAL);
+ if (xa_track_free(xa) && !entry)
+ entry = XA_ZERO_ENTRY;
do {
curr = xas_load(&xas);
@@ -1478,7 +1457,7 @@ void *__xa_cmpxchg(struct xarray *xa, unsigned long index,
curr = NULL;
if (curr == old) {
xas_store(&xas, entry);
- if (xa_track_free(xa) && entry)
+ if (xa_track_free(xa))
xas_clear_mark(&xas, XA_FREE_MARK);
}
} while (__xas_nomem(&xas, gfp));
@@ -1488,7 +1467,7 @@ void *__xa_cmpxchg(struct xarray *xa, unsigned long index,
EXPORT_SYMBOL(__xa_cmpxchg);
/**
- * xa_reserve() - Reserve this index in the XArray.
+ * __xa_reserve() - Reserve this index in the XArray.
* @xa: XArray.
* @index: Index into array.
* @gfp: Memory allocation flags.
@@ -1496,33 +1475,32 @@ EXPORT_SYMBOL(__xa_cmpxchg);
* Ensures there is somewhere to store an entry at @index in the array.
* If there is already something stored at @index, this function does
* nothing. If there was nothing there, the entry is marked as reserved.
- * Loads from @index will continue to see a %NULL pointer until a
- * subsequent store to @index.
+ * Loading from a reserved entry returns a %NULL pointer.
*
* If you do not use the entry that you have reserved, call xa_release()
* or xa_erase() to free any unnecessary memory.
*
- * Context: Process context. Takes and releases the xa_lock, IRQ or BH safe
- * if specified in XArray flags. May sleep if the @gfp flags permit.
+ * Context: Any context. Expects the xa_lock to be held on entry. May
+ * release the lock, sleep and reacquire the lock if the @gfp flags permit.
* Return: 0 if the reservation succeeded or -ENOMEM if it failed.
*/
-int xa_reserve(struct xarray *xa, unsigned long index, gfp_t gfp)
+int __xa_reserve(struct xarray *xa, unsigned long index, gfp_t gfp)
{
XA_STATE(xas, xa, index);
- unsigned int lock_type = xa_lock_type(xa);
void *curr;
do {
- xas_lock_type(&xas, lock_type);
curr = xas_load(&xas);
- if (!curr)
+ if (!curr) {
xas_store(&xas, XA_ZERO_ENTRY);
- xas_unlock_type(&xas, lock_type);
- } while (xas_nomem(&xas, gfp));
+ if (xa_track_free(xa))
+ xas_clear_mark(&xas, XA_FREE_MARK);
+ }
+ } while (__xas_nomem(&xas, gfp));
return xas_error(&xas);
}
-EXPORT_SYMBOL(xa_reserve);
+EXPORT_SYMBOL(__xa_reserve);
#ifdef CONFIG_XARRAY_MULTI
static void xas_set_range(struct xa_state *xas, unsigned long first,
@@ -1587,8 +1565,9 @@ void *xa_store_range(struct xarray *xa, unsigned long first,
do {
xas_lock(&xas);
if (entry) {
- unsigned int order = (last == ~0UL) ? 64 :
- ilog2(last + 1);
+ unsigned int order = BITS_PER_LONG;
+ if (last + 1)
+ order = __ffs(last + 1);
xas_set_order(&xas, last, order);
xas_create(&xas);
if (xas_error(&xas))
@@ -1662,7 +1641,7 @@ EXPORT_SYMBOL(__xa_alloc);
* @index: Index of entry.
* @mark: Mark number.
*
- * Attempting to set a mark on a NULL entry does not succeed.
+ * Attempting to set a mark on a %NULL entry does not succeed.
*
* Context: Any context. Expects xa_lock to be held on entry.
*/
@@ -1674,7 +1653,7 @@ void __xa_set_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
if (entry)
xas_set_mark(&xas, mark);
}
-EXPORT_SYMBOL_GPL(__xa_set_mark);
+EXPORT_SYMBOL(__xa_set_mark);
/**
* __xa_clear_mark() - Clear this mark on this entry while locked.
@@ -1692,7 +1671,7 @@ void __xa_clear_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
if (entry)
xas_clear_mark(&xas, mark);
}
-EXPORT_SYMBOL_GPL(__xa_clear_mark);
+EXPORT_SYMBOL(__xa_clear_mark);
/**
* xa_get_mark() - Inquire whether this mark is set on this entry.
@@ -1732,7 +1711,7 @@ EXPORT_SYMBOL(xa_get_mark);
* @index: Index of entry.
* @mark: Mark number.
*
- * Attempting to set a mark on a NULL entry does not succeed.
+ * Attempting to set a mark on a %NULL entry does not succeed.
*
* Context: Process context. Takes and releases the xa_lock.
*/
@@ -1829,6 +1808,8 @@ void *xa_find_after(struct xarray *xa, unsigned long *indexp,
entry = xas_find_marked(&xas, max, filter);
else
entry = xas_find(&xas, max);
+ if (xas.xa_node == XAS_BOUNDS)
+ break;
if (xas.xa_shift) {
if (xas.xa_index & ((1UL << xas.xa_shift) - 1))
continue;
@@ -1899,7 +1880,7 @@ static unsigned int xas_extract_marked(struct xa_state *xas, void **dst,
*
* The @filter may be an XArray mark value, in which case entries which are
* marked with that mark will be copied. It may also be %XA_PRESENT, in
- * which case all entries which are not NULL will be copied.
+ * which case all entries which are not %NULL will be copied.
*
* The entries returned may not represent a snapshot of the XArray at a
* moment in time. For example, if another thread stores to index 5, then