aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug17
-rw-r--r--lib/Makefile4
-rw-r--r--lib/bitmap.c28
-rw-r--r--lib/bug.c28
-rw-r--r--lib/cmdline.c57
-rw-r--r--lib/dma-debug.c8
-rw-r--r--lib/iov_iter.c69
-rw-r--r--lib/kobject.c5
-rw-r--r--lib/md5.c95
-rw-r--r--lib/nlattr.c28
-rw-r--r--lib/percpu-refcount.c17
-rw-r--r--lib/radix-tree.c2
-rw-r--r--lib/refcount.c169
-rw-r--r--lib/rhashtable.c33
-rw-r--r--lib/sbitmap.c75
-rw-r--r--lib/string.c2
-rw-r--r--lib/test_bpf.c150
-rw-r--r--lib/test_user_copy.c1
-rw-r--r--lib/usercopy.c26
-rw-r--r--lib/vsprintf.c6
20 files changed, 596 insertions, 224 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 97d62c2da6c2..e2a617e09ab7 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -130,7 +130,8 @@ config DYNAMIC_DEBUG
nullarbor:~ # echo -n 'func svc_process -p' >
<debugfs>/dynamic_debug/control
- See Documentation/dynamic-debug-howto.txt for additional information.
+ See Documentation/admin-guide/dynamic-debug-howto.rst for additional
+ information.
endmenu # "printk and dmesg options"
@@ -356,7 +357,7 @@ config FRAME_POINTER
bool "Compile the kernel with frame pointers"
depends on DEBUG_KERNEL && \
(CRIS || M68K || FRV || UML || \
- AVR32 || SUPERH || BLACKFIN || MN10300 || METAG) || \
+ SUPERH || BLACKFIN || MN10300 || METAG) || \
ARCH_WANT_FRAME_POINTERS
default y if (DEBUG_INFO && UML) || ARCH_WANT_FRAME_POINTERS
help
@@ -404,8 +405,8 @@ config MAGIC_SYSRQ
by pressing various keys while holding SysRq (Alt+PrintScreen). It
also works on a serial console (on PC hardware at least), if you
send a BREAK and then within 5 seconds a command keypress. The
- keys are documented in <file:Documentation/sysrq.txt>. Don't say Y
- unless you really know what this hack does.
+ keys are documented in <file:Documentation/admin-guide/sysrq.rst>.
+ Don't say Y unless you really know what this hack does.
config MAGIC_SYSRQ_DEFAULT_ENABLE
hex "Enable magic SysRq key functions by default"
@@ -414,7 +415,7 @@ config MAGIC_SYSRQ_DEFAULT_ENABLE
help
Specifies which SysRq key functions are enabled by default.
This may be set to 1 or 0 to enable or disable them all, or
- to a bitmask as described in Documentation/sysrq.txt.
+ to a bitmask as described in Documentation/admin-guide/sysrq.rst.
config MAGIC_SYSRQ_SERIAL
bool "Enable magic SysRq key over serial"
@@ -1103,9 +1104,6 @@ config PROVE_LOCKING
For more details, see Documentation/locking/lockdep-design.txt.
-config PROVE_LOCKING_SMALL
- bool
-
config LOCKDEP
bool
depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
@@ -1114,6 +1112,9 @@ config LOCKDEP
select KALLSYMS
select KALLSYMS_ALL
+config LOCKDEP_SMALL
+ bool
+
config LOCK_STAT
bool "Lock usage statistics"
depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
diff --git a/lib/Makefile b/lib/Makefile
index 320ac46a8725..a155c73e3437 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -19,7 +19,7 @@ KCOV_INSTRUMENT_dynamic_debug.o := n
lib-y := ctype.o string.o vsprintf.o cmdline.o \
rbtree.o radix-tree.o dump_stack.o timerqueue.o\
idr.o int_sqrt.o extable.o \
- sha1.o chacha20.o md5.o irq_regs.o argv_split.o \
+ sha1.o chacha20.o irq_regs.o argv_split.o \
flex_proportions.o ratelimit.o show_mem.o \
is_single_threaded.o plist.o decompress.o kobject_uevent.o \
earlycpio.o seq_buf.o siphash.o \
@@ -41,7 +41,7 @@ obj-y += bcd.o div64.o sort.o parser.o debug_locks.o random32.o \
gcd.o lcm.o list_sort.o uuid.o flex_array.o iov_iter.o clz_ctz.o \
bsearch.o find_bit.o llist.o memweight.o kfifo.o \
percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o \
- once.o refcount.o
+ once.o refcount.o usercopy.o
obj-y += string_helpers.o
obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o
obj-y += hexdump.o
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 0b66f0e5eb6b..08c6ef3a2b6f 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -502,11 +502,11 @@ EXPORT_SYMBOL(bitmap_print_to_pagebuf);
* Syntax: range:used_size/group_size
* Example: 0-1023:2/256 ==> 0,1,256,257,512,513,768,769
*
- * Returns 0 on success, -errno on invalid input strings.
- * Error values:
- * %-EINVAL: second number in range smaller than first
- * %-EINVAL: invalid character in string
- * %-ERANGE: bit number specified too large for mask
+ * Returns: 0 on success, -errno on invalid input strings. Error values:
+ *
+ * - ``-EINVAL``: second number in range smaller than first
+ * - ``-EINVAL``: invalid character in string
+ * - ``-ERANGE``: bit number specified too large for mask
*/
static int __bitmap_parselist(const char *buf, unsigned int buflen,
int is_user, unsigned long *maskp,
@@ -864,14 +864,16 @@ EXPORT_SYMBOL(bitmap_bitremap);
* 11 was set in @orig had no affect on @dst.
*
* Example [2] for bitmap_fold() + bitmap_onto():
- * Let's say @relmap has these ten bits set:
+ * Let's say @relmap has these ten bits set::
+ *
* 40 41 42 43 45 48 53 61 74 95
+ *
* (for the curious, that's 40 plus the first ten terms of the
* Fibonacci sequence.)
*
* Further lets say we use the following code, invoking
* bitmap_fold() then bitmap_onto, as suggested above to
- * avoid the possibility of an empty @dst result:
+ * avoid the possibility of an empty @dst result::
*
* unsigned long *tmp; // a temporary bitmap's bits
*
@@ -882,22 +884,26 @@ EXPORT_SYMBOL(bitmap_bitremap);
* various @orig's. I list the zero-based positions of each set bit.
* The tmp column shows the intermediate result, as computed by
* using bitmap_fold() to fold the @orig bitmap modulo ten
- * (the weight of @relmap).
+ * (the weight of @relmap):
*
+ * =============== ============== =================
* @orig tmp @dst
* 0 0 40
* 1 1 41
* 9 9 95
- * 10 0 40 (*)
+ * 10 0 40 [#f1]_
* 1 3 5 7 1 3 5 7 41 43 48 61
* 0 1 2 3 4 0 1 2 3 4 40 41 42 43 45
* 0 9 18 27 0 9 8 7 40 61 74 95
* 0 10 20 30 0 40
* 0 11 22 33 0 1 2 3 40 41 42 43
* 0 12 24 36 0 2 4 6 40 42 45 53
- * 78 102 211 1 2 8 41 42 74 (*)
+ * 78 102 211 1 2 8 41 42 74 [#f1]_
+ * =============== ============== =================
+ *
+ * .. [#f1]
*
- * (*) For these marked lines, if we hadn't first done bitmap_fold()
+ * For these marked lines, if we hadn't first done bitmap_fold()
* into tmp, then the @dst result would have been empty.
*
* If either of @orig or @relmap is empty (no set bits), then @dst
diff --git a/lib/bug.c b/lib/bug.c
index 06edbbef0623..a6a1137d06db 100644
--- a/lib/bug.c
+++ b/lib/bug.c
@@ -47,7 +47,7 @@
#include <linux/sched.h>
#include <linux/rculist.h>
-extern const struct bug_entry __start___bug_table[], __stop___bug_table[];
+extern struct bug_entry __start___bug_table[], __stop___bug_table[];
static inline unsigned long bug_addr(const struct bug_entry *bug)
{
@@ -62,10 +62,10 @@ static inline unsigned long bug_addr(const struct bug_entry *bug)
/* Updates are protected by module mutex */
static LIST_HEAD(module_bug_list);
-static const struct bug_entry *module_find_bug(unsigned long bugaddr)
+static struct bug_entry *module_find_bug(unsigned long bugaddr)
{
struct module *mod;
- const struct bug_entry *bug = NULL;
+ struct bug_entry *bug = NULL;
rcu_read_lock_sched();
list_for_each_entry_rcu(mod, &module_bug_list, bug_list) {
@@ -122,15 +122,15 @@ void module_bug_cleanup(struct module *mod)
#else
-static inline const struct bug_entry *module_find_bug(unsigned long bugaddr)
+static inline struct bug_entry *module_find_bug(unsigned long bugaddr)
{
return NULL;
}
#endif
-const struct bug_entry *find_bug(unsigned long bugaddr)
+struct bug_entry *find_bug(unsigned long bugaddr)
{
- const struct bug_entry *bug;
+ struct bug_entry *bug;
for (bug = __start___bug_table; bug < __stop___bug_table; ++bug)
if (bugaddr == bug_addr(bug))
@@ -141,9 +141,9 @@ const struct bug_entry *find_bug(unsigned long bugaddr)
enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
{
- const struct bug_entry *bug;
+ struct bug_entry *bug;
const char *file;
- unsigned line, warning;
+ unsigned line, warning, once, done;
if (!is_valid_bugaddr(bugaddr))
return BUG_TRAP_TYPE_NONE;
@@ -164,6 +164,18 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
line = bug->line;
#endif
warning = (bug->flags & BUGFLAG_WARNING) != 0;
+ once = (bug->flags & BUGFLAG_ONCE) != 0;
+ done = (bug->flags & BUGFLAG_DONE) != 0;
+
+ if (warning && once) {
+ if (done)
+ return BUG_TRAP_TYPE_WARN;
+
+ /*
+ * Since this is the only store, concurrency is not an issue.
+ */
+ bug->flags |= BUGFLAG_DONE;
+ }
}
if (warning) {
diff --git a/lib/cmdline.c b/lib/cmdline.c
index 8f13cf73c2ec..3c6432df7e63 100644
--- a/lib/cmdline.c
+++ b/lib/cmdline.c
@@ -15,6 +15,7 @@
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/string.h>
+#include <linux/ctype.h>
/*
* If a hyphen was found in get_option, this will handle the
@@ -189,3 +190,59 @@ bool parse_option_str(const char *str, const char *option)
return false;
}
+
+/*
+ * Parse a string to get a param value pair.
+ * You can use " around spaces, but can't escape ".
+ * Hyphens and underscores equivalent in parameter names.
+ */
+char *next_arg(char *args, char **param, char **val)
+{
+ unsigned int i, equals = 0;
+ int in_quote = 0, quoted = 0;
+ char *next;
+
+ if (*args == '"') {
+ args++;
+ in_quote = 1;
+ quoted = 1;
+ }
+
+ for (i = 0; args[i]; i++) {
+ if (isspace(args[i]) && !in_quote)
+ break;
+ if (equals == 0) {
+ if (args[i] == '=')
+ equals = i;
+ }
+ if (args[i] == '"')
+ in_quote = !in_quote;
+ }
+
+ *param = args;
+ if (!equals)
+ *val = NULL;
+ else {
+ args[equals] = '\0';
+ *val = args + equals + 1;
+
+ /* Don't include quotes in value. */
+ if (**val == '"') {
+ (*val)++;
+ if (args[i-1] == '"')
+ args[i-1] = '\0';
+ }
+ }
+ if (quoted && args[i-1] == '"')
+ args[i-1] = '\0';
+
+ if (args[i]) {
+ args[i] = '\0';
+ next = args + i + 1;
+ } else
+ next = args + i;
+
+ /* Chew up trailing spaces. */
+ return skip_spaces(next);
+ //return next;
+}
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index b157b46cc9a6..fe4d50c992df 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -942,21 +942,17 @@ static int device_dma_allocations(struct device *dev, struct dma_debug_entry **o
unsigned long flags;
int count = 0, i;
- local_irq_save(flags);
-
for (i = 0; i < HASH_SIZE; ++i) {
- spin_lock(&dma_entry_hash[i].lock);
+ spin_lock_irqsave(&dma_entry_hash[i].lock, flags);
list_for_each_entry(entry, &dma_entry_hash[i].list, list) {
if (entry->dev == dev) {
count += 1;
*out_entry = entry;
}
}
- spin_unlock(&dma_entry_hash[i].lock);
+ spin_unlock_irqrestore(&dma_entry_hash[i].lock, flags);
}
- local_irq_restore(flags);
-
return count;
}
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index e68604ae3ced..4952311422c1 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -413,7 +413,7 @@ void iov_iter_init(struct iov_iter *i, int direction,
size_t count)
{
/* It will get better. Eventually... */
- if (segment_eq(get_fs(), KERNEL_DS)) {
+ if (uaccess_kernel()) {
direction |= ITER_KVEC;
i->type = direction;
i->kvec = (struct kvec *)iov;
@@ -604,7 +604,7 @@ size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
return 0;
}
iterate_and_advance(i, bytes, v,
- __copy_from_user_nocache((to += v.iov_len) - v.iov_len,
+ __copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len,
v.iov_base, v.iov_len),
memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
v.bv_offset, v.bv_len),
@@ -625,7 +625,7 @@ bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
if (unlikely(i->count < bytes))
return false;
iterate_all_kinds(i, bytes, v, ({
- if (__copy_from_user_nocache((to += v.iov_len) - v.iov_len,
+ if (__copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len,
v.iov_base, v.iov_len))
return false;
0;}),
@@ -786,6 +786,68 @@ void iov_iter_advance(struct iov_iter *i, size_t size)
}
EXPORT_SYMBOL(iov_iter_advance);
+void iov_iter_revert(struct iov_iter *i, size_t unroll)
+{
+ if (!unroll)
+ return;
+ i->count += unroll;
+ if (unlikely(i->type & ITER_PIPE)) {
+ struct pipe_inode_info *pipe = i->pipe;
+ int idx = i->idx;
+ size_t off = i->iov_offset;
+ while (1) {
+ size_t n = off - pipe->bufs[idx].offset;
+ if (unroll < n) {
+ off -= unroll;
+ break;
+ }
+ unroll -= n;
+ if (!unroll && idx == i->start_idx) {
+ off = 0;
+ break;
+ }
+ if (!idx--)
+ idx = pipe->buffers - 1;
+ off = pipe->bufs[idx].offset + pipe->bufs[idx].len;
+ }
+ i->iov_offset = off;
+ i->idx = idx;
+ pipe_truncate(i);
+ return;
+ }
+ if (unroll <= i->iov_offset) {
+ i->iov_offset -= unroll;
+ return;
+ }
+ unroll -= i->iov_offset;
+ if (i->type & ITER_BVEC) {
+ const struct bio_vec *bvec = i->bvec;
+ while (1) {
+ size_t n = (--bvec)->bv_len;
+ i->nr_segs++;
+ if (unroll <= n) {
+ i->bvec = bvec;
+ i->iov_offset = n - unroll;
+ return;
+ }
+ unroll -= n;
+ }
+ } else { /* same logics for iovec and kvec */
+ const struct iovec *iov = i->iov;
+ while (1) {
+ size_t n = (--iov)->iov_len;
+ i->nr_segs++;
+ if (unroll <= n) {
+ i->iov = iov;
+ i->iov_offset = n - unroll;
+ return;
+ }
+ unroll -= n;
+ }
+ }
+}
+EXPORT_SYMBOL(iov_iter_revert);
+
/*
* Return the count of just the current iov_iter segment.
*/
@@ -839,6 +901,7 @@ void iov_iter_pipe(struct iov_iter *i, int direction,
i->idx = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
i->iov_offset = 0;
i->count = count;
+ i->start_idx = i->idx;
}
EXPORT_SYMBOL(iov_iter_pipe);
diff --git a/lib/kobject.c b/lib/kobject.c
index 445dcaeb0f56..763d70a18941 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -601,12 +601,15 @@ struct kobject *kobject_get(struct kobject *kobj)
}
EXPORT_SYMBOL(kobject_get);
-static struct kobject * __must_check kobject_get_unless_zero(struct kobject *kobj)
+struct kobject * __must_check kobject_get_unless_zero(struct kobject *kobj)
{
+ if (!kobj)
+ return NULL;
if (!kref_get_unless_zero(&kobj->kref))
kobj = NULL;
return kobj;
}
+EXPORT_SYMBOL(kobject_get_unless_zero);
/*
* kobject_cleanup - free kobject resources.
diff --git a/lib/md5.c b/lib/md5.c
deleted file mode 100644
index bb0cd01d356d..000000000000
--- a/lib/md5.c
+++ /dev/null
@@ -1,95 +0,0 @@
-#include <linux/compiler.h>
-#include <linux/export.h>
-#include <linux/cryptohash.h>
-
-#define F1(x, y, z) (z ^ (x & (y ^ z)))
-#define F2(x, y, z) F1(z, x, y)
-#define F3(x, y, z) (x ^ y ^ z)
-#define F4(x, y, z) (y ^ (x | ~z))
-
-#define MD5STEP(f, w, x, y, z, in, s) \
- (w += f(x, y, z) + in, w = (w<<s | w>>(32-s)) + x)
-
-void md5_transform(__u32 *hash, __u32 const *in)
-{
- u32 a, b, c, d;
-
- a = hash[0];
- b = hash[1];
- c = hash[2];
- d = hash[3];
-
- MD5STEP(F1, a, b, c, d, in[0] + 0xd76aa478, 7);
- MD5STEP(F1, d, a, b, c, in[1] + 0xe8c7b756, 12);
- MD5STEP(F1, c, d, a, b, in[2] + 0x242070db, 17);
- MD5STEP(F1, b, c, d, a, in[3] + 0xc1bdceee, 22);
- MD5STEP(F1, a, b, c, d, in[4] + 0xf57c0faf, 7);
- MD5STEP(F1, d, a, b, c, in[5] + 0x4787c62a, 12);
- MD5STEP(F1, c, d, a, b, in[6] + 0xa8304613, 17);
- MD5STEP(F1, b, c, d, a, in[7] + 0xfd469501, 22);
- MD5STEP(F1, a, b, c, d, in[8] + 0x698098d8, 7);
- MD5STEP(F1, d, a, b, c, in[9] + 0x8b44f7af, 12);
- MD5STEP(F1, c, d, a, b, in[10] + 0xffff5bb1, 17);
- MD5STEP(F1, b, c, d, a, in[11] + 0x895cd7be, 22);
- MD5STEP(F1, a, b, c, d, in[12] + 0x6b901122, 7);
- MD5STEP(F1, d, a, b, c, in[13] + 0xfd987193, 12);
- MD5STEP(F1, c, d, a, b, in[14] + 0xa679438e, 17);
- MD5STEP(F1, b, c, d, a, in[15] + 0x49b40821, 22);
-
- MD5STEP(F2, a, b, c, d, in[1] + 0xf61e2562, 5);
- MD5STEP(F2, d, a, b, c, in[6] + 0xc040b340, 9);
- MD5STEP(F2, c, d, a, b, in[11] + 0x265e5a51, 14);
- MD5STEP(F2, b, c, d, a, in[0] + 0xe9b6c7aa, 20);
- MD5STEP(F2, a, b, c, d, in[5] + 0xd62f105d, 5);
- MD5STEP(F2, d, a, b, c, in[10] + 0x02441453, 9);
- MD5STEP(F2, c, d, a, b, in[15] + 0xd8a1e681, 14);
- MD5STEP(F2, b, c, d, a, in[4] + 0xe7d3fbc8, 20);
- MD5STEP(F2, a, b, c, d, in[9] + 0x21e1cde6, 5);
- MD5STEP(F2, d, a, b, c, in[14] + 0xc33707d6, 9);
- MD5STEP(F2, c, d, a, b, in[3] + 0xf4d50d87, 14);
- MD5STEP(F2, b, c, d, a, in[8] + 0x455a14ed, 20);
- MD5STEP(F2, a, b, c, d, in[13] + 0xa9e3e905, 5);
- MD5STEP(F2, d, a, b, c, in[2] + 0xfcefa3f8, 9);
- MD5STEP(F2, c, d, a, b, in[7] + 0x676f02d9, 14);
- MD5STEP(F2, b, c, d, a, in[12] + 0x8d2a4c8a, 20);
-
- MD5STEP(F3, a, b, c, d, in[5] + 0xfffa3942, 4);
- MD5STEP(F3, d, a, b, c, in[8] + 0x8771f681, 11);
- MD5STEP(F3, c, d, a, b, in[11] + 0x6d9d6122, 16);
- MD5STEP(F3, b, c, d, a, in[14] + 0xfde5380c, 23);
- MD5STEP(F3, a, b, c, d, in[1] + 0xa4beea44, 4);
- MD5STEP(F3, d, a, b, c, in[4] + 0x4bdecfa9, 11);
- MD5STEP(F3, c, d, a, b, in[7] + 0xf6bb4b60, 16);
- MD5STEP(F3, b, c, d, a, in[10] + 0xbebfbc70, 23);
- MD5STEP(F3, a, b, c, d, in[13] + 0x289b7ec6, 4);
- MD5STEP(F3, d, a, b, c, in[0] + 0xeaa127fa, 11);
- MD5STEP(F3, c, d, a, b, in[3] + 0xd4ef3085, 16);
- MD5STEP(F3, b, c, d, a, in[6] + 0x04881d05, 23);
- MD5STEP(F3, a, b, c, d, in[9] + 0xd9d4d039, 4);
- MD5STEP(F3, d, a, b, c, in[12] + 0xe6db99e5, 11);
- MD5STEP(F3, c, d, a, b, in[15] + 0x1fa27cf8, 16);
- MD5STEP(F3, b, c, d, a, in[2] + 0xc4ac5665, 23);
-
- MD5STEP(F4, a, b, c, d, in[0] + 0xf4292244, 6);
- MD5STEP(F4, d, a, b, c, in[7] + 0x432aff97, 10);
- MD5STEP(F4, c, d, a, b, in[14] + 0xab9423a7, 15);
- MD5STEP(F4, b, c, d, a, in[5] + 0xfc93a039, 21);
- MD5STEP(F4, a, b, c, d, in[12] + 0x655b59c3, 6);
- MD5STEP(F4, d, a, b, c, in[3] + 0x8f0ccc92, 10);
- MD5STEP(F4, c, d, a, b, in[10] + 0xffeff47d, 15);
- MD5STEP(F4, b, c, d, a, in[1] + 0x85845dd1, 21);
- MD5STEP(F4, a, b, c, d, in[8] + 0x6fa87e4f, 6);
- MD5STEP(F4, d, a, b, c, in[15] + 0xfe2ce6e0, 10);
- MD5STEP(F4, c, d, a, b, in[6] + 0xa3014314, 15);
- MD5STEP(F4, b, c, d, a, in[13] + 0x4e0811a1, 21);
- MD5STEP(F4, a, b, c, d, in[4] + 0xf7537e82, 6);
- MD5STEP(F4, d, a, b, c, in[11] + 0xbd3af235, 10);
- MD5STEP(F4, c, d, a, b, in[2] + 0x2ad7d2bb, 15);
- MD5STEP(F4, b, c, d, a, in[9] + 0xeb86d391, 21);
-
- hash[0] += a;
- hash[1] += b;
- hash[2] += c;
- hash[3] += d;
-}
-EXPORT_SYMBOL(md5_transform);
diff --git a/lib/nlattr.c b/lib/nlattr.c
index b42b8577fc23..a7e0b16078df 100644
--- a/lib/nlattr.c
+++ b/lib/nlattr.c
@@ -112,6 +112,7 @@ static int validate_nla(const struct nlattr *nla, int maxtype,
* @len: length of attribute stream
* @maxtype: maximum attribute type to be expected
* @policy: validation policy
+ * @extack: extended ACK report struct
*
* Validates all attributes in the specified attribute stream against the
* specified policy. Attributes with a type exceeding maxtype will be
@@ -120,20 +121,23 @@ static int validate_nla(const struct nlattr *nla, int maxtype,
* Returns 0 on success or a negative error code.
*/
int nla_validate(const struct nlattr *head, int len, int maxtype,
- const struct nla_policy *policy)
+ const struct nla_policy *policy,
+ struct netlink_ext_ack *extack)
{
const struct nlattr *nla;
- int rem, err;
+ int rem;
nla_for_each_attr(nla, head, len, rem) {
- err = validate_nla(nla, maxtype, policy);
- if (err < 0)
- goto errout;
+ int err = validate_nla(nla, maxtype, policy);
+
+ if (err < 0) {
+ if (extack)
+ extack->bad_attr = nla;
+ return err;
+ }
}
- err = 0;
-errout:
- return err;
+ return 0;
}
EXPORT_SYMBOL(nla_validate);
@@ -180,7 +184,8 @@ EXPORT_SYMBOL(nla_policy_len);
* Returns 0 on success or a negative error code.
*/
int nla_parse(struct nlattr **tb, int maxtype, const struct nlattr *head,
- int len, const struct nla_policy *policy)
+ int len, const struct nla_policy *policy,
+ struct netlink_ext_ack *extack)
{
const struct nlattr *nla;
int rem, err;
@@ -193,8 +198,11 @@ int nla_parse(struct nlattr **tb, int maxtype, const struct nlattr *head,
if (type > 0 && type <= maxtype) {
if (policy) {
err = validate_nla(nla, maxtype, policy);
- if (err < 0)
+ if (err < 0) {
+ if (extack)
+ extack->bad_attr = nla;
goto errout;
+ }
}
tb[type] = (struct nlattr *)nla;
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
index 9ac959ef4cae..fe03c6d52761 100644
--- a/lib/percpu-refcount.c
+++ b/lib/percpu-refcount.c
@@ -260,6 +260,22 @@ void percpu_ref_switch_to_atomic(struct percpu_ref *ref,
spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
}
+EXPORT_SYMBOL_GPL(percpu_ref_switch_to_atomic);
+
+/**
+ * percpu_ref_switch_to_atomic_sync - switch a percpu_ref to atomic mode
+ * @ref: percpu_ref to switch to atomic mode
+ *
+ * Schedule switching the ref to atomic mode, and wait for the
+ * switch to complete. Caller must ensure that no other thread
+ * will switch back to percpu mode.
+ */
+void percpu_ref_switch_to_atomic_sync(struct percpu_ref *ref)
+{
+ percpu_ref_switch_to_atomic(ref, NULL);
+ wait_event(percpu_ref_switch_waitq, !ref->confirm_switch);
+}
+EXPORT_SYMBOL_GPL(percpu_ref_switch_to_atomic_sync);
/**
* percpu_ref_switch_to_percpu - switch a percpu_ref to percpu mode
@@ -290,6 +306,7 @@ void percpu_ref_switch_to_percpu(struct percpu_ref *ref)
spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
}
+EXPORT_SYMBOL_GPL(percpu_ref_switch_to_percpu);
/**
* percpu_ref_kill_and_confirm - drop the initial ref and schedule confirmation
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 691a9ad48497..898e87998417 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -2284,6 +2284,8 @@ static int radix_tree_cpu_dead(unsigned int cpu)
void __init radix_tree_init(void)
{
int ret;
+
+ BUILD_BUG_ON(RADIX_TREE_MAX_TAGS + __GFP_BITS_SHIFT > 32);
radix_tree_node_cachep = kmem_cache_create("radix_tree_node",
sizeof(struct radix_tree_node), 0,
SLAB_PANIC | SLAB_RECLAIM_ACCOUNT,
diff --git a/lib/refcount.c b/lib/refcount.c
index aa09ad3c30b0..f42124ccf295 100644
--- a/lib/refcount.c
+++ b/lib/refcount.c
@@ -37,11 +37,29 @@
#include <linux/refcount.h>
#include <linux/bug.h>
+/**
+ * refcount_add_not_zero - add a value to a refcount unless it is 0
+ * @i: the value to add to the refcount
+ * @r: the refcount
+ *
+ * Will saturate at UINT_MAX and WARN.
+ *
+ * Provides no memory ordering, it is assumed the caller has guaranteed the
+ * object memory to be stable (RCU, etc.). It does provide a control dependency
+ * and thereby orders future stores. See the comment on top.
+ *
+ * Use of this function is not recommended for the normal reference counting
+ * use case in which references are taken and released one at a time. In these
+ * cases, refcount_inc(), or one of its variants, should instead be used to
+ * increment a reference count.
+ *
+ * Return: false if the passed refcount is 0, true otherwise
+ */
bool refcount_add_not_zero(unsigned int i, refcount_t *r)
{
- unsigned int old, new, val = atomic_read(&r->refs);
+ unsigned int new, val = atomic_read(&r->refs);
- for (;;) {
+ do {
if (!val)
return false;
@@ -51,12 +69,8 @@ bool refcount_add_not_zero(unsigned int i, refcount_t *r)
new = val + i;
if (new < val)
new = UINT_MAX;
- old = atomic_cmpxchg_relaxed(&r->refs, val, new);
- if (old == val)
- break;
- val = old;
- }
+ } while (!atomic_try_cmpxchg_relaxed(&r->refs, &val, new));
WARN_ONCE(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
@@ -64,24 +78,45 @@ bool refcount_add_not_zero(unsigned int i, refcount_t *r)
}
EXPORT_SYMBOL_GPL(refcount_add_not_zero);
+/**
+ * refcount_add - add a value to a refcount
+ * @i: the value to add to the refcount
+ * @r: the refcount
+ *
+ * Similar to atomic_add(), but will saturate at UINT_MAX and WARN.
+ *
+ * Provides no memory ordering, it is assumed the caller has guaranteed the
+ * object memory to be stable (RCU, etc.). It does provide a control dependency
+ * and thereby orders future stores. See the comment on top.
+ *
+ * Use of this function is not recommended for the normal reference counting
+ * use case in which references are taken and released one at a time. In these
+ * cases, refcount_inc(), or one of its variants, should instead be used to
+ * increment a reference count.
+ */
void refcount_add(unsigned int i, refcount_t *r)
{
WARN_ONCE(!refcount_add_not_zero(i, r), "refcount_t: addition on 0; use-after-free.\n");
}
EXPORT_SYMBOL_GPL(refcount_add);
-/*
- * Similar to atomic_inc_not_zero(), will saturate at UINT_MAX and WARN.
+/**
+ * refcount_inc_not_zero - increment a refcount unless it is 0
+ * @r: the refcount to increment
+ *
+ * Similar to atomic_inc_not_zero(), but will saturate at UINT_MAX and WARN.
*
* Provides no memory ordering, it is assumed the caller has guaranteed the
* object memory to be stable (RCU, etc.). It does provide a control dependency
* and thereby orders future stores. See the comment on top.
+ *
+ * Return: true if the increment was successful, false otherwise
*/
bool refcount_inc_not_zero(refcount_t *r)
{
- unsigned int old, new, val = atomic_read(&r->refs);
+ unsigned int new, val = atomic_read(&r->refs);
- for (;;) {
+ do {
new = val + 1;
if (!val)
@@ -90,12 +125,7 @@ bool refcount_inc_not_zero(refcount_t *r)
if (unlikely(!new))
return true;
- old = atomic_cmpxchg_relaxed(&r->refs, val, new);
- if (old == val)
- break;
-
- val = old;
- }
+ } while (!atomic_try_cmpxchg_relaxed(&r->refs, &val, new));
WARN_ONCE(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
@@ -103,11 +133,17 @@ bool refcount_inc_not_zero(refcount_t *r)
}
EXPORT_SYMBOL_GPL(refcount_inc_not_zero);
-/*
- * Similar to atomic_inc(), will saturate at UINT_MAX and WARN.
+/**
+ * refcount_inc - increment a refcount
+ * @r: the refcount to increment
+ *
+ * Similar to atomic_inc(), but will saturate at UINT_MAX and WARN.
*
* Provides no memory ordering, it is assumed the caller already has a
- * reference on the object, will WARN when this is not so.
+ * reference on the object.
+ *
+ * Will WARN if the refcount is 0, as this represents a possible use-after-free
+ * condition.
*/
void refcount_inc(refcount_t *r)
{
@@ -115,11 +151,31 @@ void refcount_inc(refcount_t *r)
}
EXPORT_SYMBOL_GPL(refcount_inc);
+/**
+ * refcount_sub_and_test - subtract from a refcount and test if it is 0
+ * @i: amount to subtract from the refcount
+ * @r: the refcount
+ *
+ * Similar to atomic_dec_and_test(), but it will WARN, return false and
+ * ultimately leak on underflow and will fail to decrement when saturated
+ * at UINT_MAX.
+ *
+ * Provides release memory ordering, such that prior loads and stores are done
+ * before, and provides a control dependency such that free() must come after.
+ * See the comment on top.
+ *
+ * Use of this function is not recommended for the normal reference counting
+ * use case in which references are taken and released one at a time. In these
+ * cases, refcount_dec(), or one of its variants, should instead be used to
+ * decrement a reference count.
+ *
+ * Return: true if the resulting refcount is 0, false otherwise
+ */
bool refcount_sub_and_test(unsigned int i, refcount_t *r)
{
- unsigned int old, new, val = atomic_read(&r->refs);
+ unsigned int new, val = atomic_read(&r->refs);
- for (;;) {
+ do {
if (unlikely(val == UINT_MAX))
return false;
@@ -129,24 +185,24 @@ bool refcount_sub_and_test(unsigned int i, refcount_t *r)
return false;
}
- old = atomic_cmpxchg_release(&r->refs, val, new);
- if (old == val)
- break;
-
- val = old;
- }
+ } while (!atomic_try_cmpxchg_release(&r->refs, &val, new));
return !new;
}
EXPORT_SYMBOL_GPL(refcount_sub_and_test);
-/*
+/**
+ * refcount_dec_and_test - decrement a refcount and test if it is 0
+ * @r: the refcount
+ *
* Similar to atomic_dec_and_test(), it will WARN on underflow and fail to
* decrement when saturated at UINT_MAX.
*
* Provides release memory ordering, such that prior loads and stores are done
* before, and provides a control dependency such that free() must come after.
* See the comment on top.
+ *
+ * Return: true if the resulting refcount is 0, false otherwise
*/
bool refcount_dec_and_test(refcount_t *r)
{
@@ -154,21 +210,26 @@ bool refcount_dec_and_test(refcount_t *r)
}
EXPORT_SYMBOL_GPL(refcount_dec_and_test);
-/*
+/**
+ * refcount_dec - decrement a refcount
+ * @r: the refcount
+ *
* Similar to atomic_dec(), it will WARN on underflow and fail to decrement
* when saturated at UINT_MAX.
*
* Provides release memory ordering, such that prior loads and stores are done
* before.
*/
-
void refcount_dec(refcount_t *r)
{
WARN_ONCE(refcount_dec_and_test(r), "refcount_t: decrement hit 0; leaking memory.\n");
}
EXPORT_SYMBOL_GPL(refcount_dec);
-/*
+/**
+ * refcount_dec_if_one - decrement a refcount if it is 1
+ * @r: the refcount
+ *
* No atomic_t counterpart, it attempts a 1 -> 0 transition and returns the
* success thereof.
*
@@ -178,24 +239,33 @@ EXPORT_SYMBOL_GPL(refcount_dec);
* It can be used like a try-delete operator; this explicit case is provided
* and not cmpxchg in generic, because that would allow implementing unsafe
* operations.
+ *
+ * Return: true if the resulting refcount is 0, false otherwise
*/
bool refcount_dec_if_one(refcount_t *r)
{
- return atomic_cmpxchg_release(&r->refs, 1, 0) == 1;
+ int val = 1;
+
+ return atomic_try_cmpxchg_release(&r->refs, &val, 0);
}
EXPORT_SYMBOL_GPL(refcount_dec_if_one);
-/*
+/**
+ * refcount_dec_not_one - decrement a refcount if it is not 1
+ * @r: the refcount
+ *
* No atomic_t counterpart, it decrements unless the value is 1, in which case
* it will return false.
*
* Was often done like: atomic_add_unless(&var, -1, 1)
+ *
+ * Return: true if the decrement operation was successful, false otherwise
*/
bool refcount_dec_not_one(refcount_t *r)
{
- unsigned int old, new, val = atomic_read(&r->refs);
+ unsigned int new, val = atomic_read(&r->refs);
- for (;;) {
+ do {
if (unlikely(val == UINT_MAX))
return true;
@@ -208,24 +278,27 @@ bool refcount_dec_not_one(refcount_t *r)
return true;
}
- old = atomic_cmpxchg_release(&r->refs, val, new);
- if (old == val)
- break;
-
- val = old;
- }
+ } while (!atomic_try_cmpxchg_release(&r->refs, &val, new));
return true;
}
EXPORT_SYMBOL_GPL(refcount_dec_not_one);
-/*
+/**
+ * refcount_dec_and_mutex_lock - return holding mutex if able to decrement
+ * refcount to 0
+ * @r: the refcount
+ * @lock: the mutex to be locked
+ *
* Similar to atomic_dec_and_mutex_lock(), it will WARN on underflow and fail
* to decrement when saturated at UINT_MAX.
*
* Provides release memory ordering, such that prior loads and stores are done
* before, and provides a control dependency such that free() must come after.
* See the comment on top.
+ *
+ * Return: true and hold mutex if able to decrement refcount to 0, false
+ * otherwise
*/
bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock)
{
@@ -242,13 +315,21 @@ bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock)
}
EXPORT_SYMBOL_GPL(refcount_dec_and_mutex_lock);
-/*
+/**
+ * refcount_dec_and_lock - return holding spinlock if able to decrement
+ * refcount to 0
+ * @r: the refcount
+ * @lock: the spinlock to be locked
+ *
* Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to
* decrement when saturated at UINT_MAX.
*
* Provides release memory ordering, such that prior loads and stores are done
* before, and provides a control dependency such that free() must come after.
* See the comment on top.
+ *
+ * Return: true and hold spinlock if able to decrement refcount to 0, false
+ * otherwise
*/
bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock)
{
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index f8635fd57442..a930e436db5d 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -535,7 +535,7 @@ static void *rhashtable_lookup_one(struct rhashtable *ht,
struct rhash_head *head;
int elasticity;
- elasticity = ht->elasticity;
+ elasticity = RHT_ELASTICITY;
pprev = rht_bucket_var(tbl, hash);
rht_for_each_continue(head, *pprev, tbl, hash) {
struct rhlist_head *list;
@@ -958,35 +958,20 @@ int rhashtable_init(struct rhashtable *ht,
if (params->min_size)
ht->p.min_size = roundup_pow_of_two(params->min_size);
- if (params->max_size)
- ht->p.max_size = rounddown_pow_of_two(params->max_size);
+ /* Cap total entries at 2^31 to avoid nelems overflow. */
+ ht->max_elems = 1u << 31;
- if (params->insecure_max_entries)
- ht->p.insecure_max_entries =
- rounddown_pow_of_two(params->insecure_max_entries);
- else
- ht->p.insecure_max_entries = ht->p.max_size * 2;
+ if (params->max_size) {
+ ht->p.max_size = rounddown_pow_of_two(params->max_size);
+ if (ht->p.max_size < ht->max_elems / 2)
+ ht->max_elems = ht->p.max_size * 2;
+ }
- ht->p.min_size = max(ht->p.min_size, HASH_MIN_SIZE);
+ ht->p.min_size = max_t(u16, ht->p.min_size, HASH_MIN_SIZE);
if (params->nelem_hint)
size = rounded_hashtable_size(&ht->p);
- /* The maximum (not average) chain length grows with the
- * size of the hash table, at a rate of (log N)/(log log N).
- * The value of 16 is selected so that even if the hash
- * table grew to 2^32 you would not expect the maximum
- * chain length to exceed it unless we are under attack
- * (or extremely unlucky).
- *
- * As this limit is only to detect attacks, we don't need
- * to set it to a lower value as you'd need the chain
- * length to vastly exceed 16 to have any real effect
- * on the system.
- */
- if (!params->insecure_elasticity)
- ht->elasticity = 16;
-
if (params->locks_mul)
ht->p.locks_mul = roundup_pow_of_two(params->locks_mul);
else
diff --git a/lib/sbitmap.c b/lib/sbitmap.c
index 60e800e0b5a0..80aa8d5463fa 100644
--- a/lib/sbitmap.c
+++ b/lib/sbitmap.c
@@ -79,15 +79,15 @@ void sbitmap_resize(struct sbitmap *sb, unsigned int depth)
}
EXPORT_SYMBOL_GPL(sbitmap_resize);
-static int __sbitmap_get_word(struct sbitmap_word *word, unsigned int hint,
- bool wrap)
+static int __sbitmap_get_word(unsigned long *word, unsigned long depth,
+ unsigned int hint, bool wrap)
{
unsigned int orig_hint = hint;
int nr;
while (1) {
- nr = find_next_zero_bit(&word->word, word->depth, hint);
- if (unlikely(nr >= word->depth)) {
+ nr = find_next_zero_bit(word, depth, hint);
+ if (unlikely(nr >= depth)) {
/*
* We started with an offset, and we didn't reset the
* offset to 0 in a failure case, so start from 0 to
@@ -100,11 +100,11 @@ static int __sbitmap_get_word(struct sbitmap_word *word, unsigned int hint,
return -1;
}
- if (!test_and_set_bit(nr, &word->word))
+ if (!test_and_set_bit(nr, word))
break;
hint = nr + 1;
- if (hint >= word->depth - 1)
+ if (hint >= depth - 1)
hint = 0;
}
@@ -119,7 +119,8 @@ int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin)
index = SB_NR_TO_INDEX(sb, alloc_hint);
for (i = 0; i < sb->map_nr; i++) {
- nr = __sbitmap_get_word(&sb->map[index],
+ nr = __sbitmap_get_word(&sb->map[index].word,
+ sb->map[index].depth,
SB_NR_TO_BIT(sb, alloc_hint),
!round_robin);
if (nr != -1) {
@@ -141,6 +142,37 @@ int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin)
}
EXPORT_SYMBOL_GPL(sbitmap_get);
+int sbitmap_get_shallow(struct sbitmap *sb, unsigned int alloc_hint,
+ unsigned long shallow_depth)
+{
+ unsigned int i, index;
+ int nr = -1;
+
+ index = SB_NR_TO_INDEX(sb, alloc_hint);
+
+ for (i = 0; i < sb->map_nr; i++) {
+ nr = __sbitmap_get_word(&sb->map[index].word,
+ min(sb->map[index].depth, shallow_depth),
+ SB_NR_TO_BIT(sb, alloc_hint), true);
+ if (nr != -1) {
+ nr += index << sb->shift;
+ break;
+ }
+
+ /* Jump to next index. */
+ index++;
+ alloc_hint = index << sb->shift;
+
+ if (index >= sb->map_nr) {
+ index = 0;
+ alloc_hint = 0;
+ }
+ }
+
+ return nr;
+}
+EXPORT_SYMBOL_GPL(sbitmap_get_shallow);
+
bool sbitmap_any_bit_set(const struct sbitmap *sb)
{
unsigned int i;
@@ -342,6 +374,35 @@ int __sbitmap_queue_get(struct sbitmap_queue *sbq)
}
EXPORT_SYMBOL_GPL(__sbitmap_queue_get);
+int __sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
+ unsigned int shallow_depth)
+{
+ unsigned int hint, depth;
+ int nr;
+
+ hint = this_cpu_read(*sbq->alloc_hint);
+ depth = READ_ONCE(sbq->sb.depth);
+ if (unlikely(hint >= depth)) {
+ hint = depth ? prandom_u32() % depth : 0;
+ this_cpu_write(*sbq->alloc_hint, hint);
+ }
+ nr = sbitmap_get_shallow(&sbq->sb, hint, shallow_depth);
+
+ if (nr == -1) {
+ /* If the map is full, a hint won't do us much good. */
+ this_cpu_write(*sbq->alloc_hint, 0);
+ } else if (nr == hint || unlikely(sbq->round_robin)) {
+ /* Only update the hint if we used it. */
+ hint = nr + 1;
+ if (hint >= depth - 1)
+ hint = 0;
+ this_cpu_write(*sbq->alloc_hint, hint);
+ }
+
+ return nr;
+}
+EXPORT_SYMBOL_GPL(__sbitmap_queue_get_shallow);
+
static struct sbq_wait_state *sbq_wake_ptr(struct sbitmap_queue *sbq)
{
int i, wake_index;
diff --git a/lib/string.c b/lib/string.c
index 1a7d3fd52541..1c1fc9187b05 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -131,7 +131,7 @@ EXPORT_SYMBOL(strncpy);
* @src: Where to copy the string from
* @size: size of destination buffer
*
- * Compatible with *BSD: the result is always a valid
+ * Compatible with ``*BSD``: the result is always a valid
* NUL-terminated string that fits in the buffer (unless,
* of course, the buffer size is zero). It does not pad
* out the result like strncpy() does.
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 0362da0b66c3..889bc31785be 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -434,6 +434,41 @@ loop:
return 0;
}
+static int __bpf_fill_stxdw(struct bpf_test *self, int size)
+{
+ unsigned int len = BPF_MAXINSNS;
+ struct bpf_insn *insn;
+ int i;
+
+ insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+ if (!insn)
+ return -ENOMEM;
+
+ insn[0] = BPF_ALU32_IMM(BPF_MOV, R0, 1);
+ insn[1] = BPF_ST_MEM(size, R10, -40, 42);
+
+ for (i = 2; i < len - 2; i++)
+ insn[i] = BPF_STX_XADD(size, R10, R0, -40);
+
+ insn[len - 2] = BPF_LDX_MEM(size, R0, R10, -40);
+ insn[len - 1] = BPF_EXIT_INSN();
+
+ self->u.ptr.insns = insn;
+ self->u.ptr.len = len;
+
+ return 0;
+}
+
+static int bpf_fill_stxw(struct bpf_test *self)
+{
+ return __bpf_fill_stxdw(self, BPF_W);
+}
+
+static int bpf_fill_stxdw(struct bpf_test *self)
+{
+ return __bpf_fill_stxdw(self, BPF_DW);
+}
+
static struct bpf_test tests[] = {
{
"TAX",
@@ -4303,6 +4338,41 @@ static struct bpf_test tests[] = {
{ { 0, 0x22 } },
},
{
+ "STX_XADD_W: Test side-effects, r10: 0x12 + 0x10 = 0x22",
+ .u.insns_int = {
+ BPF_ALU64_REG(BPF_MOV, R1, R10),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0x12),
+ BPF_ST_MEM(BPF_W, R10, -40, 0x10),
+ BPF_STX_XADD(BPF_W, R10, R0, -40),
+ BPF_ALU64_REG(BPF_MOV, R0, R10),
+ BPF_ALU64_REG(BPF_SUB, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } },
+ },
+ {
+ "STX_XADD_W: Test side-effects, r0: 0x12 + 0x10 = 0x22",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0x12),
+ BPF_ST_MEM(BPF_W, R10, -40, 0x10),
+ BPF_STX_XADD(BPF_W, R10, R0, -40),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x12 } },
+ },
+ {
+ "STX_XADD_W: X + 1 + 1 + 1 + ...",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 4134 } },
+ .fill_helper = bpf_fill_stxw,
+ },
+ {
"STX_XADD_DW: Test: 0x12 + 0x10 = 0x22",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0x12),
@@ -4315,6 +4385,41 @@ static struct bpf_test tests[] = {
{ },
{ { 0, 0x22 } },
},
+ {
+ "STX_XADD_DW: Test side-effects, r10: 0x12 + 0x10 = 0x22",
+ .u.insns_int = {
+ BPF_ALU64_REG(BPF_MOV, R1, R10),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0x12),
+ BPF_ST_MEM(BPF_DW, R10, -40, 0x10),
+ BPF_STX_XADD(BPF_DW, R10, R0, -40),
+ BPF_ALU64_REG(BPF_MOV, R0, R10),
+ BPF_ALU64_REG(BPF_SUB, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } },
+ },
+ {
+ "STX_XADD_DW: Test side-effects, r0: 0x12 + 0x10 = 0x22",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0x12),
+ BPF_ST_MEM(BPF_DW, R10, -40, 0x10),
+ BPF_STX_XADD(BPF_DW, R10, R0, -40),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x12 } },
+ },
+ {
+ "STX_XADD_DW: X + 1 + 1 + 1 + ...",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 4134 } },
+ .fill_helper = bpf_fill_stxdw,
+ },
/* BPF_JMP | BPF_EXIT */
{
"JMP_EXIT",
@@ -4656,6 +4761,51 @@ static struct bpf_test tests[] = {
{ },
{ { 0, 1 } },
},
+ {
+ /* Mainly testing JIT + imm64 here. */
+ "JMP_JGE_X: ldimm64 test 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, 3),
+ BPF_LD_IMM64(R2, 2),
+ BPF_JMP_REG(BPF_JGE, R1, R2, 2),
+ BPF_LD_IMM64(R0, 0xffffffffffffffffULL),
+ BPF_LD_IMM64(R0, 0xeeeeeeeeeeeeeeeeULL),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xeeeeeeeeU } },
+ },
+ {
+ "JMP_JGE_X: ldimm64 test 2",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, 3),
+ BPF_LD_IMM64(R2, 2),
+ BPF_JMP_REG(BPF_JGE, R1, R2, 0),
+ BPF_LD_IMM64(R0, 0xffffffffffffffffULL),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xffffffffU } },
+ },
+ {
+ "JMP_JGE_X: ldimm64 test 3",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_LD_IMM64(R1, 3),
+ BPF_LD_IMM64(R2, 2),
+ BPF_JMP_REG(BPF_JGE, R1, R2, 4),
+ BPF_LD_IMM64(R0, 0xffffffffffffffffULL),
+ BPF_LD_IMM64(R0, 0xeeeeeeeeeeeeeeeeULL),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
/* BPF_JMP | BPF_JNE | BPF_X */
{
"JMP_JNE_X: if (3 != 2) return 1",
diff --git a/lib/test_user_copy.c b/lib/test_user_copy.c
index 1a8d71a68531..4621db801b23 100644
--- a/lib/test_user_copy.c
+++ b/lib/test_user_copy.c
@@ -31,7 +31,6 @@
* their capability at compile-time, we just have to opt-out certain archs.
*/
#if BITS_PER_LONG == 64 || (!(defined(CONFIG_ARM) && !defined(MMU)) && \
- !defined(CONFIG_AVR32) && \
!defined(CONFIG_BLACKFIN) && \
!defined(CONFIG_M32R) && \
!defined(CONFIG_M68K) && \
diff --git a/lib/usercopy.c b/lib/usercopy.c
new file mode 100644
index 000000000000..1b6010a3beb8
--- /dev/null
+++ b/lib/usercopy.c
@@ -0,0 +1,26 @@
+#include <linux/uaccess.h>
+
+/* out-of-line parts */
+
+#ifndef INLINE_COPY_FROM_USER
+unsigned long _copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+ unsigned long res = n;
+ if (likely(access_ok(VERIFY_READ, from, n)))
+ res = raw_copy_from_user(to, from, n);
+ if (unlikely(res))
+ memset(to + (n - res), 0, res);
+ return res;
+}
+EXPORT_SYMBOL(_copy_from_user);
+#endif
+
+#ifndef INLINE_COPY_TO_USER
+unsigned long _copy_to_user(void *to, const void __user *from, unsigned long n)
+{
+ if (likely(access_ok(VERIFY_WRITE, to, n)))
+ n = raw_copy_to_user(to, from, n);
+ return n;
+}
+EXPORT_SYMBOL(_copy_to_user);
+#endif
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index e3bf4e0f10b5..176641cc549d 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -1954,13 +1954,13 @@ set_precision(struct printf_spec *spec, int prec)
* This function generally follows C99 vsnprintf, but has some
* extensions and a few limitations:
*
- * %n is unsupported
- * %p* is handled by pointer()
+ * - ``%n`` is unsupported
+ * - ``%p*`` is handled by pointer()
*
* See pointer() or Documentation/printk-formats.txt for more
* extensive description.
*
- * ** Please update the documentation in both places when making changes **
+ * **Please update the documentation in both places when making changes**
*
* The return value is the number of characters which would
* be generated for the given input, excluding the trailing