aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug3
-rw-r--r--lib/bitmap.c4
-rw-r--r--lib/dma-debug.c2
-rw-r--r--lib/dynamic_debug.c50
-rw-r--r--lib/flex_proportions.c8
-rw-r--r--lib/genalloc.c49
-rw-r--r--lib/idr.c2
-rw-r--r--lib/lzo/lzo1x_decompress_safe.c103
-rw-r--r--lib/percpu-refcount.c305
-rw-r--r--lib/percpu_counter.c20
-rw-r--r--lib/proportions.c10
-rw-r--r--lib/random32.c39
-rw-r--r--lib/rhashtable.c24
-rw-r--r--lib/test_bpf.c63
-rw-r--r--lib/vsprintf.c2
15 files changed, 464 insertions, 220 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index a28590083622..3ac43f34437b 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1672,7 +1672,8 @@ config TEST_BPF
against the BPF interpreter or BPF JIT compiler depending on the
current setting. This is in particular useful for BPF JIT compiler
development, but also to run regression tests against changes in
- the interpreter code.
+ the interpreter code. It also enables test stubs for eBPF maps and
+ verifier used by user space verifier testsuite.
If unsure, say N.
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 1e031f2c9aba..cd250a2e14cb 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -884,7 +884,7 @@ EXPORT_SYMBOL(bitmap_bitremap);
* read it, you're overqualified for your current job.)
*
* In other words, @orig is mapped onto (surjectively) @dst,
- * using the the map { <n, m> | the n-th bit of @relmap is the
+ * using the map { <n, m> | the n-th bit of @relmap is the
* m-th set bit of @relmap }.
*
* Any set bits in @orig above bit number W, where W is the
@@ -932,7 +932,7 @@ EXPORT_SYMBOL(bitmap_bitremap);
*
* Further lets say we use the following code, invoking
* bitmap_fold() then bitmap_onto, as suggested above to
- * avoid the possitility of an empty @dst result:
+ * avoid the possibility of an empty @dst result:
*
* unsigned long *tmp; // a temporary bitmap's bits
*
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index 98f2d7e91a91..add80cc02dbe 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -1149,7 +1149,7 @@ static void check_unmap(struct dma_debug_entry *ref)
static void check_for_stack(struct device *dev, void *addr)
{
if (object_is_on_stack(addr))
- err_printk(dev, NULL, "DMA-API: device driver maps memory from"
+ err_printk(dev, NULL, "DMA-API: device driver maps memory from "
"stack [addr=%p]\n", addr);
}
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index c9afbe2c445a..31fe79e31ab8 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -537,10 +537,9 @@ static char *dynamic_emit_prefix(const struct _ddebug *desc, char *buf)
return buf;
}
-int __dynamic_pr_debug(struct _ddebug *descriptor, const char *fmt, ...)
+void __dynamic_pr_debug(struct _ddebug *descriptor, const char *fmt, ...)
{
va_list args;
- int res;
struct va_format vaf;
char buf[PREFIX_SIZE];
@@ -552,21 +551,17 @@ int __dynamic_pr_debug(struct _ddebug *descriptor, const char *fmt, ...)
vaf.fmt = fmt;
vaf.va = &args;
- res = printk(KERN_DEBUG "%s%pV",
- dynamic_emit_prefix(descriptor, buf), &vaf);
+ printk(KERN_DEBUG "%s%pV", dynamic_emit_prefix(descriptor, buf), &vaf);
va_end(args);
-
- return res;
}
EXPORT_SYMBOL(__dynamic_pr_debug);
-int __dynamic_dev_dbg(struct _ddebug *descriptor,
+void __dynamic_dev_dbg(struct _ddebug *descriptor,
const struct device *dev, const char *fmt, ...)
{
struct va_format vaf;
va_list args;
- int res;
BUG_ON(!descriptor);
BUG_ON(!fmt);
@@ -577,30 +572,27 @@ int __dynamic_dev_dbg(struct _ddebug *descriptor,
vaf.va = &args;
if (!dev) {
- res = printk(KERN_DEBUG "(NULL device *): %pV", &vaf);
+ printk(KERN_DEBUG "(NULL device *): %pV", &vaf);
} else {
char buf[PREFIX_SIZE];
- res = dev_printk_emit(7, dev, "%s%s %s: %pV",
- dynamic_emit_prefix(descriptor, buf),
- dev_driver_string(dev), dev_name(dev),
- &vaf);
+ dev_printk_emit(7, dev, "%s%s %s: %pV",
+ dynamic_emit_prefix(descriptor, buf),
+ dev_driver_string(dev), dev_name(dev),
+ &vaf);
}
va_end(args);
-
- return res;
}
EXPORT_SYMBOL(__dynamic_dev_dbg);
#ifdef CONFIG_NET
-int __dynamic_netdev_dbg(struct _ddebug *descriptor,
- const struct net_device *dev, const char *fmt, ...)
+void __dynamic_netdev_dbg(struct _ddebug *descriptor,
+ const struct net_device *dev, const char *fmt, ...)
{
struct va_format vaf;
va_list args;
- int res;
BUG_ON(!descriptor);
BUG_ON(!fmt);
@@ -613,23 +605,21 @@ int __dynamic_netdev_dbg(struct _ddebug *descriptor,
if (dev && dev->dev.parent) {
char buf[PREFIX_SIZE];
- res = dev_printk_emit(7, dev->dev.parent,
- "%s%s %s %s%s: %pV",
- dynamic_emit_prefix(descriptor, buf),
- dev_driver_string(dev->dev.parent),
- dev_name(dev->dev.parent),
- netdev_name(dev), netdev_reg_state(dev),
- &vaf);
+ dev_printk_emit(7, dev->dev.parent,
+ "%s%s %s %s%s: %pV",
+ dynamic_emit_prefix(descriptor, buf),
+ dev_driver_string(dev->dev.parent),
+ dev_name(dev->dev.parent),
+ netdev_name(dev), netdev_reg_state(dev),
+ &vaf);
} else if (dev) {
- res = printk(KERN_DEBUG "%s%s: %pV", netdev_name(dev),
- netdev_reg_state(dev), &vaf);
+ printk(KERN_DEBUG "%s%s: %pV", netdev_name(dev),
+ netdev_reg_state(dev), &vaf);
} else {
- res = printk(KERN_DEBUG "(NULL net_device): %pV", &vaf);
+ printk(KERN_DEBUG "(NULL net_device): %pV", &vaf);
}
va_end(args);
-
- return res;
}
EXPORT_SYMBOL(__dynamic_netdev_dbg);
diff --git a/lib/flex_proportions.c b/lib/flex_proportions.c
index ebf3bac460b0..8f25652f40d4 100644
--- a/lib/flex_proportions.c
+++ b/lib/flex_proportions.c
@@ -34,13 +34,13 @@
*/
#include <linux/flex_proportions.h>
-int fprop_global_init(struct fprop_global *p)
+int fprop_global_init(struct fprop_global *p, gfp_t gfp)
{
int err;
p->period = 0;
/* Use 1 to avoid dealing with periods with 0 events... */
- err = percpu_counter_init(&p->events, 1);
+ err = percpu_counter_init(&p->events, 1, gfp);
if (err)
return err;
seqcount_init(&p->sequence);
@@ -168,11 +168,11 @@ void fprop_fraction_single(struct fprop_global *p,
*/
#define PROP_BATCH (8*(1+ilog2(nr_cpu_ids)))
-int fprop_local_init_percpu(struct fprop_local_percpu *pl)
+int fprop_local_init_percpu(struct fprop_local_percpu *pl, gfp_t gfp)
{
int err;
- err = percpu_counter_init(&pl->events, 0);
+ err = percpu_counter_init(&pl->events, 0, gfp);
if (err)
return err;
pl->period = 0;
diff --git a/lib/genalloc.c b/lib/genalloc.c
index 38d2db82228c..cce4dd68c40d 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -403,6 +403,35 @@ void gen_pool_for_each_chunk(struct gen_pool *pool,
EXPORT_SYMBOL(gen_pool_for_each_chunk);
/**
+ * addr_in_gen_pool - checks if an address falls within the range of a pool
+ * @pool: the generic memory pool
+ * @start: start address
+ * @size: size of the region
+ *
+ * Check if the range of addresses falls within the specified pool. Returns
+ * true if the entire range is contained in the pool and false otherwise.
+ */
+bool addr_in_gen_pool(struct gen_pool *pool, unsigned long start,
+ size_t size)
+{
+ bool found = false;
+ unsigned long end = start + size;
+ struct gen_pool_chunk *chunk;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) {
+ if (start >= chunk->start_addr && start <= chunk->end_addr) {
+ if (end <= chunk->end_addr) {
+ found = true;
+ break;
+ }
+ }
+ }
+ rcu_read_unlock();
+ return found;
+}
+
+/**
* gen_pool_avail - get available free space of the pool
* @pool: pool to get available free space
*
@@ -481,6 +510,26 @@ unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size,
EXPORT_SYMBOL(gen_pool_first_fit);
/**
+ * gen_pool_first_fit_order_align - find the first available region
+ * of memory matching the size requirement. The region will be aligned
+ * to the order of the size specified.
+ * @map: The address to base the search on
+ * @size: The bitmap size in bits
+ * @start: The bitnumber to start searching at
+ * @nr: The number of zeroed bits we're looking for
+ * @data: additional data - unused
+ */
+unsigned long gen_pool_first_fit_order_align(unsigned long *map,
+ unsigned long size, unsigned long start,
+ unsigned int nr, void *data)
+{
+ unsigned long align_mask = roundup_pow_of_two(nr) - 1;
+
+ return bitmap_find_next_zero_area(map, size, start, nr, align_mask);
+}
+EXPORT_SYMBOL(gen_pool_first_fit_order_align);
+
+/**
* gen_pool_best_fit - find the best fitting region of memory
* macthing the size requirement (no alignment constraint)
* @map: The address to base the search on
diff --git a/lib/idr.c b/lib/idr.c
index 50be3fa9b657..e654aebd5f80 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -626,7 +626,7 @@ static void __idr_remove_all(struct idr *idp)
* idr_destroy().
*
* A typical clean-up sequence for objects stored in an idr tree will use
- * idr_for_each() to free all objects, if necessay, then idr_destroy() to
+ * idr_for_each() to free all objects, if necessary, then idr_destroy() to
* free up the id mappings and cached idr_layers.
*/
void idr_destroy(struct idr *idp)
diff --git a/lib/lzo/lzo1x_decompress_safe.c b/lib/lzo/lzo1x_decompress_safe.c
index 8563081e8da3..a1c387f6afba 100644
--- a/lib/lzo/lzo1x_decompress_safe.c
+++ b/lib/lzo/lzo1x_decompress_safe.c
@@ -19,31 +19,21 @@
#include <linux/lzo.h>
#include "lzodefs.h"
-#define HAVE_IP(t, x) \
- (((size_t)(ip_end - ip) >= (size_t)(t + x)) && \
- (((t + x) >= t) && ((t + x) >= x)))
+#define HAVE_IP(x) ((size_t)(ip_end - ip) >= (size_t)(x))
+#define HAVE_OP(x) ((size_t)(op_end - op) >= (size_t)(x))
+#define NEED_IP(x) if (!HAVE_IP(x)) goto input_overrun
+#define NEED_OP(x) if (!HAVE_OP(x)) goto output_overrun
+#define TEST_LB(m_pos) if ((m_pos) < out) goto lookbehind_overrun
-#define HAVE_OP(t, x) \
- (((size_t)(op_end - op) >= (size_t)(t + x)) && \
- (((t + x) >= t) && ((t + x) >= x)))
-
-#define NEED_IP(t, x) \
- do { \
- if (!HAVE_IP(t, x)) \
- goto input_overrun; \
- } while (0)
-
-#define NEED_OP(t, x) \
- do { \
- if (!HAVE_OP(t, x)) \
- goto output_overrun; \
- } while (0)
-
-#define TEST_LB(m_pos) \
- do { \
- if ((m_pos) < out) \
- goto lookbehind_overrun; \
- } while (0)
+/* This MAX_255_COUNT is the maximum number of times we can add 255 to a base
+ * count without overflowing an integer. The multiply will overflow when
+ * multiplying 255 by more than MAXINT/255. The sum will overflow earlier
+ * depending on the base count. Since the base count is taken from a u8
+ * and a few bits, it is safe to assume that it will always be lower than
+ * or equal to 2*255, thus we can always prevent any overflow by accepting
+ * two less 255 steps. See Documentation/lzo.txt for more information.
+ */
+#define MAX_255_COUNT ((((size_t)~0) / 255) - 2)
int lzo1x_decompress_safe(const unsigned char *in, size_t in_len,
unsigned char *out, size_t *out_len)
@@ -75,17 +65,24 @@ int lzo1x_decompress_safe(const unsigned char *in, size_t in_len,
if (t < 16) {
if (likely(state == 0)) {
if (unlikely(t == 0)) {
+ size_t offset;
+ const unsigned char *ip_last = ip;
+
while (unlikely(*ip == 0)) {
- t += 255;
ip++;
- NEED_IP(1, 0);
+ NEED_IP(1);
}
- t += 15 + *ip++;
+ offset = ip - ip_last;
+ if (unlikely(offset > MAX_255_COUNT))
+ return LZO_E_ERROR;
+
+ offset = (offset << 8) - offset;
+ t += offset + 15 + *ip++;
}
t += 3;
copy_literal_run:
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
- if (likely(HAVE_IP(t, 15) && HAVE_OP(t, 15))) {
+ if (likely(HAVE_IP(t + 15) && HAVE_OP(t + 15))) {
const unsigned char *ie = ip + t;
unsigned char *oe = op + t;
do {
@@ -101,8 +98,8 @@ copy_literal_run:
} else
#endif
{
- NEED_OP(t, 0);
- NEED_IP(t, 3);
+ NEED_OP(t);
+ NEED_IP(t + 3);
do {
*op++ = *ip++;
} while (--t > 0);
@@ -115,7 +112,7 @@ copy_literal_run:
m_pos -= t >> 2;
m_pos -= *ip++ << 2;
TEST_LB(m_pos);
- NEED_OP(2, 0);
+ NEED_OP(2);
op[0] = m_pos[0];
op[1] = m_pos[1];
op += 2;
@@ -136,13 +133,20 @@ copy_literal_run:
} else if (t >= 32) {
t = (t & 31) + (3 - 1);
if (unlikely(t == 2)) {
+ size_t offset;
+ const unsigned char *ip_last = ip;
+
while (unlikely(*ip == 0)) {
- t += 255;
ip++;
- NEED_IP(1, 0);
+ NEED_IP(1);
}
- t += 31 + *ip++;
- NEED_IP(2, 0);
+ offset = ip - ip_last;
+ if (unlikely(offset > MAX_255_COUNT))
+ return LZO_E_ERROR;
+
+ offset = (offset << 8) - offset;
+ t += offset + 31 + *ip++;
+ NEED_IP(2);
}
m_pos = op - 1;
next = get_unaligned_le16(ip);
@@ -154,13 +158,20 @@ copy_literal_run:
m_pos -= (t & 8) << 11;
t = (t & 7) + (3 - 1);
if (unlikely(t == 2)) {
+ size_t offset;
+ const unsigned char *ip_last = ip;
+
while (unlikely(*ip == 0)) {
- t += 255;
ip++;
- NEED_IP(1, 0);
+ NEED_IP(1);
}
- t += 7 + *ip++;
- NEED_IP(2, 0);
+ offset = ip - ip_last;
+ if (unlikely(offset > MAX_255_COUNT))
+ return LZO_E_ERROR;
+
+ offset = (offset << 8) - offset;
+ t += offset + 7 + *ip++;
+ NEED_IP(2);
}
next = get_unaligned_le16(ip);
ip += 2;
@@ -174,7 +185,7 @@ copy_literal_run:
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
if (op - m_pos >= 8) {
unsigned char *oe = op + t;
- if (likely(HAVE_OP(t, 15))) {
+ if (likely(HAVE_OP(t + 15))) {
do {
COPY8(op, m_pos);
op += 8;
@@ -184,7 +195,7 @@ copy_literal_run:
m_pos += 8;
} while (op < oe);
op = oe;
- if (HAVE_IP(6, 0)) {
+ if (HAVE_IP(6)) {
state = next;
COPY4(op, ip);
op += next;
@@ -192,7 +203,7 @@ copy_literal_run:
continue;
}
} else {
- NEED_OP(t, 0);
+ NEED_OP(t);
do {
*op++ = *m_pos++;
} while (op < oe);
@@ -201,7 +212,7 @@ copy_literal_run:
#endif
{
unsigned char *oe = op + t;
- NEED_OP(t, 0);
+ NEED_OP(t);
op[0] = m_pos[0];
op[1] = m_pos[1];
op += 2;
@@ -214,15 +225,15 @@ match_next:
state = next;
t = next;
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
- if (likely(HAVE_IP(6, 0) && HAVE_OP(4, 0))) {
+ if (likely(HAVE_IP(6) && HAVE_OP(4))) {
COPY4(op, ip);
op += t;
ip += t;
} else
#endif
{
- NEED_IP(t, 3);
- NEED_OP(t, 0);
+ NEED_IP(t + 3);
+ NEED_OP(t);
while (t > 0) {
*op++ = *ip++;
t--;
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
index a89cf09a8268..6111bcb28376 100644
--- a/lib/percpu-refcount.c
+++ b/lib/percpu-refcount.c
@@ -1,6 +1,8 @@
#define pr_fmt(fmt) "%s: " fmt "\n", __func__
#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
#include <linux/percpu-refcount.h>
/*
@@ -11,8 +13,8 @@
* percpu counters will all sum to the correct value
*
* (More precisely: because moduler arithmatic is commutative the sum of all the
- * pcpu_count vars will be equal to what it would have been if all the gets and
- * puts were done to a single integer, even if some of the percpu integers
+ * percpu_count vars will be equal to what it would have been if all the gets
+ * and puts were done to a single integer, even if some of the percpu integers
* overflow or underflow).
*
* The real trick to implementing percpu refcounts is shutdown. We can't detect
@@ -25,75 +27,64 @@
* works.
*
* Converting to non percpu mode is done with some RCUish stuff in
- * percpu_ref_kill. Additionally, we need a bias value so that the atomic_t
- * can't hit 0 before we've added up all the percpu refs.
+ * percpu_ref_kill. Additionally, we need a bias value so that the
+ * atomic_long_t can't hit 0 before we've added up all the percpu refs.
*/
-#define PCPU_COUNT_BIAS (1U << 31)
+#define PERCPU_COUNT_BIAS (1LU << (BITS_PER_LONG - 1))
-static unsigned __percpu *pcpu_count_ptr(struct percpu_ref *ref)
+static DECLARE_WAIT_QUEUE_HEAD(percpu_ref_switch_waitq);
+
+static unsigned long __percpu *percpu_count_ptr(struct percpu_ref *ref)
{
- return (unsigned __percpu *)(ref->pcpu_count_ptr & ~PCPU_REF_DEAD);
+ return (unsigned long __percpu *)
+ (ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC_DEAD);
}
/**
* percpu_ref_init - initialize a percpu refcount
* @ref: percpu_ref to initialize
* @release: function which will be called when refcount hits 0
+ * @flags: PERCPU_REF_INIT_* flags
+ * @gfp: allocation mask to use
*
- * Initializes the refcount in single atomic counter mode with a refcount of 1;
- * analagous to atomic_set(ref, 1).
+ * Initializes @ref. If @flags is zero, @ref starts in percpu mode with a
+ * refcount of 1; analagous to atomic_long_set(ref, 1). See the
+ * definitions of PERCPU_REF_INIT_* flags for flag behaviors.
*
* Note that @release must not sleep - it may potentially be called from RCU
* callback context by percpu_ref_kill().
*/
-int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release)
+int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release,
+ unsigned int flags, gfp_t gfp)
{
- atomic_set(&ref->count, 1 + PCPU_COUNT_BIAS);
+ size_t align = max_t(size_t, 1 << __PERCPU_REF_FLAG_BITS,
+ __alignof__(unsigned long));
+ unsigned long start_count = 0;
- ref->pcpu_count_ptr = (unsigned long)alloc_percpu(unsigned);
- if (!ref->pcpu_count_ptr)
+ ref->percpu_count_ptr = (unsigned long)
+ __alloc_percpu_gfp(sizeof(unsigned long), align, gfp);
+ if (!ref->percpu_count_ptr)
return -ENOMEM;
- ref->release = release;
- return 0;
-}
-EXPORT_SYMBOL_GPL(percpu_ref_init);
-
-/**
- * percpu_ref_reinit - re-initialize a percpu refcount
- * @ref: perpcu_ref to re-initialize
- *
- * Re-initialize @ref so that it's in the same state as when it finished
- * percpu_ref_init(). @ref must have been initialized successfully, killed
- * and reached 0 but not exited.
- *
- * Note that percpu_ref_tryget[_live]() are safe to perform on @ref while
- * this function is in progress.
- */
-void percpu_ref_reinit(struct percpu_ref *ref)
-{
- unsigned __percpu *pcpu_count = pcpu_count_ptr(ref);
- int cpu;
+ ref->force_atomic = flags & PERCPU_REF_INIT_ATOMIC;
- BUG_ON(!pcpu_count);
- WARN_ON(!percpu_ref_is_zero(ref));
+ if (flags & (PERCPU_REF_INIT_ATOMIC | PERCPU_REF_INIT_DEAD))
+ ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC;
+ else
+ start_count += PERCPU_COUNT_BIAS;
- atomic_set(&ref->count, 1 + PCPU_COUNT_BIAS);
+ if (flags & PERCPU_REF_INIT_DEAD)
+ ref->percpu_count_ptr |= __PERCPU_REF_DEAD;
+ else
+ start_count++;
- /*
- * Restore per-cpu operation. smp_store_release() is paired with
- * smp_read_barrier_depends() in __pcpu_ref_alive() and guarantees
- * that the zeroing is visible to all percpu accesses which can see
- * the following PCPU_REF_DEAD clearing.
- */
- for_each_possible_cpu(cpu)
- *per_cpu_ptr(pcpu_count, cpu) = 0;
+ atomic_long_set(&ref->count, start_count);
- smp_store_release(&ref->pcpu_count_ptr,
- ref->pcpu_count_ptr & ~PCPU_REF_DEAD);
+ ref->release = release;
+ return 0;
}
-EXPORT_SYMBOL_GPL(percpu_ref_reinit);
+EXPORT_SYMBOL_GPL(percpu_ref_init);
/**
* percpu_ref_exit - undo percpu_ref_init()
@@ -107,26 +98,39 @@ EXPORT_SYMBOL_GPL(percpu_ref_reinit);
*/
void percpu_ref_exit(struct percpu_ref *ref)
{
- unsigned __percpu *pcpu_count = pcpu_count_ptr(ref);
+ unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
- if (pcpu_count) {
- free_percpu(pcpu_count);
- ref->pcpu_count_ptr = PCPU_REF_DEAD;
+ if (percpu_count) {
+ free_percpu(percpu_count);
+ ref->percpu_count_ptr = __PERCPU_REF_ATOMIC_DEAD;
}
}
EXPORT_SYMBOL_GPL(percpu_ref_exit);
-static void percpu_ref_kill_rcu(struct rcu_head *rcu)
+static void percpu_ref_call_confirm_rcu(struct rcu_head *rcu)
+{
+ struct percpu_ref *ref = container_of(rcu, struct percpu_ref, rcu);
+
+ ref->confirm_switch(ref);
+ ref->confirm_switch = NULL;
+ wake_up_all(&percpu_ref_switch_waitq);
+
+ /* drop ref from percpu_ref_switch_to_atomic() */
+ percpu_ref_put(ref);
+}
+
+static void percpu_ref_switch_to_atomic_rcu(struct rcu_head *rcu)
{
struct percpu_ref *ref = container_of(rcu, struct percpu_ref, rcu);
- unsigned __percpu *pcpu_count = pcpu_count_ptr(ref);
- unsigned count = 0;
+ unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
+ unsigned long count = 0;
int cpu;
for_each_possible_cpu(cpu)
- count += *per_cpu_ptr(pcpu_count, cpu);
+ count += *per_cpu_ptr(percpu_count, cpu);
- pr_debug("global %i pcpu %i", atomic_read(&ref->count), (int) count);
+ pr_debug("global %ld percpu %ld",
+ atomic_long_read(&ref->count), (long)count);
/*
* It's crucial that we sum the percpu counters _before_ adding the sum
@@ -140,21 +144,137 @@ static void percpu_ref_kill_rcu(struct rcu_head *rcu)
* reaching 0 before we add the percpu counts. But doing it at the same
* time is equivalent and saves us atomic operations:
*/
+ atomic_long_add((long)count - PERCPU_COUNT_BIAS, &ref->count);
+
+ WARN_ONCE(atomic_long_read(&ref->count) <= 0,
+ "percpu ref (%pf) <= 0 (%ld) after switching to atomic",
+ ref->release, atomic_long_read(&ref->count));
+
+ /* @ref is viewed as dead on all CPUs, send out switch confirmation */
+ percpu_ref_call_confirm_rcu(rcu);
+}
+
+static void percpu_ref_noop_confirm_switch(struct percpu_ref *ref)
+{
+}
+
+static void __percpu_ref_switch_to_atomic(struct percpu_ref *ref,
+ percpu_ref_func_t *confirm_switch)
+{
+ if (!(ref->percpu_count_ptr & __PERCPU_REF_ATOMIC)) {
+ /* switching from percpu to atomic */
+ ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC;
+
+ /*
+ * Non-NULL ->confirm_switch is used to indicate that
+ * switching is in progress. Use noop one if unspecified.
+ */
+ WARN_ON_ONCE(ref->confirm_switch);
+ ref->confirm_switch =
+ confirm_switch ?: percpu_ref_noop_confirm_switch;
+
+ percpu_ref_get(ref); /* put after confirmation */
+ call_rcu_sched(&ref->rcu, percpu_ref_switch_to_atomic_rcu);
+ } else if (confirm_switch) {
+ /*
+ * Somebody already set ATOMIC. Switching may still be in
+ * progress. @confirm_switch must be invoked after the
+ * switching is complete and a full sched RCU grace period
+ * has passed. Wait synchronously for the previous
+ * switching and schedule @confirm_switch invocation.
+ */
+ wait_event(percpu_ref_switch_waitq, !ref->confirm_switch);
+ ref->confirm_switch = confirm_switch;
- atomic_add((int) count - PCPU_COUNT_BIAS, &ref->count);
+ percpu_ref_get(ref); /* put after confirmation */
+ call_rcu_sched(&ref->rcu, percpu_ref_call_confirm_rcu);
+ }
+}
+
+/**
+ * percpu_ref_switch_to_atomic - switch a percpu_ref to atomic mode
+ * @ref: percpu_ref to switch to atomic mode
+ * @confirm_switch: optional confirmation callback
+ *
+ * There's no reason to use this function for the usual reference counting.
+ * Use percpu_ref_kill[_and_confirm]().
+ *
+ * Schedule switching of @ref to atomic mode. All its percpu counts will
+ * be collected to the main atomic counter. On completion, when all CPUs
+ * are guaraneed to be in atomic mode, @confirm_switch, which may not
+ * block, is invoked. This function may be invoked concurrently with all
+ * the get/put operations and can safely be mixed with kill and reinit
+ * operations. Note that @ref will stay in atomic mode across kill/reinit
+ * cycles until percpu_ref_switch_to_percpu() is called.
+ *
+ * This function normally doesn't block and can be called from any context
+ * but it may block if @confirm_kill is specified and @ref is already in
+ * the process of switching to atomic mode. In such cases, @confirm_switch
+ * will be invoked after the switching is complete.
+ *
+ * Due to the way percpu_ref is implemented, @confirm_switch will be called
+ * after at least one full sched RCU grace period has passed but this is an
+ * implementation detail and must not be depended upon.
+ */
+void percpu_ref_switch_to_atomic(struct percpu_ref *ref,
+ percpu_ref_func_t *confirm_switch)
+{
+ ref->force_atomic = true;
+ __percpu_ref_switch_to_atomic(ref, confirm_switch);
+}
- WARN_ONCE(atomic_read(&ref->count) <= 0, "percpu ref <= 0 (%i)",
- atomic_read(&ref->count));
+static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref)
+{
+ unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
+ int cpu;
+
+ BUG_ON(!percpu_count);
- /* @ref is viewed as dead on all CPUs, send out kill confirmation */
- if (ref->confirm_kill)
- ref->confirm_kill(ref);
+ if (!(ref->percpu_count_ptr & __PERCPU_REF_ATOMIC))
+ return;
+
+ wait_event(percpu_ref_switch_waitq, !ref->confirm_switch);
+
+ atomic_long_add(PERCPU_COUNT_BIAS, &ref->count);
/*
- * Now we're in single atomic_t mode with a consistent refcount, so it's
- * safe to drop our initial ref:
+ * Restore per-cpu operation. smp_store_release() is paired with
+ * smp_read_barrier_depends() in __ref_is_percpu() and guarantees
+ * that the zeroing is visible to all percpu accesses which can see
+ * the following __PERCPU_REF_ATOMIC clearing.
*/
- percpu_ref_put(ref);
+ for_each_possible_cpu(cpu)
+ *per_cpu_ptr(percpu_count, cpu) = 0;
+
+ smp_store_release(&ref->percpu_count_ptr,
+ ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC);
+}
+
+/**
+ * percpu_ref_switch_to_percpu - switch a percpu_ref to percpu mode
+ * @ref: percpu_ref to switch to percpu mode
+ *
+ * There's no reason to use this function for the usual reference counting.
+ * To re-use an expired ref, use percpu_ref_reinit().
+ *
+ * Switch @ref to percpu mode. This function may be invoked concurrently
+ * with all the get/put operations and can safely be mixed with kill and
+ * reinit operations. This function reverses the sticky atomic state set
+ * by PERCPU_REF_INIT_ATOMIC or percpu_ref_switch_to_atomic(). If @ref is
+ * dying or dead, the actual switching takes place on the following
+ * percpu_ref_reinit().
+ *
+ * This function normally doesn't block and can be called from any context
+ * but it may block if @ref is in the process of switching to atomic mode
+ * by percpu_ref_switch_atomic().
+ */
+void percpu_ref_switch_to_percpu(struct percpu_ref *ref)
+{
+ ref->force_atomic = false;
+
+ /* a dying or dead ref can't be switched to percpu mode w/o reinit */
+ if (!(ref->percpu_count_ptr & __PERCPU_REF_DEAD))
+ __percpu_ref_switch_to_percpu(ref);
}
/**
@@ -164,39 +284,48 @@ static void percpu_ref_kill_rcu(struct rcu_head *rcu)
*
* Equivalent to percpu_ref_kill() but also schedules kill confirmation if
* @confirm_kill is not NULL. @confirm_kill, which may not block, will be
- * called after @ref is seen as dead from all CPUs - all further
- * invocations of percpu_ref_tryget() will fail. See percpu_ref_tryget()
- * for more details.
+ * called after @ref is seen as dead from all CPUs at which point all
+ * further invocations of percpu_ref_tryget_live() will fail. See
+ * percpu_ref_tryget_live() for details.
+ *
+ * This function normally doesn't block and can be called from any context
+ * but it may block if @confirm_kill is specified and @ref is in the
+ * process of switching to atomic mode by percpu_ref_switch_atomic().
*
- * Due to the way percpu_ref is implemented, @confirm_kill will be called
- * after at least one full RCU grace period has passed but this is an
- * implementation detail and callers must not depend on it.
+ * Due to the way percpu_ref is implemented, @confirm_switch will be called
+ * after at least one full sched RCU grace period has passed but this is an
+ * implementation detail and must not be depended upon.
*/
void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
percpu_ref_func_t *confirm_kill)
{
- WARN_ONCE(ref->pcpu_count_ptr & PCPU_REF_DEAD,
- "percpu_ref_kill() called more than once!\n");
+ WARN_ONCE(ref->percpu_count_ptr & __PERCPU_REF_DEAD,
+ "%s called more than once on %pf!", __func__, ref->release);
- ref->pcpu_count_ptr |= PCPU_REF_DEAD;
- ref->confirm_kill = confirm_kill;
-
- call_rcu_sched(&ref->rcu, percpu_ref_kill_rcu);
+ ref->percpu_count_ptr |= __PERCPU_REF_DEAD;
+ __percpu_ref_switch_to_atomic(ref, confirm_kill);
+ percpu_ref_put(ref);
}
EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm);
-/*
- * XXX: Temporary kludge to work around SCSI blk-mq stall. Used only by
- * block/blk-mq.c::blk_mq_freeze_queue(). Will be removed during v3.18
- * devel cycle. Do not use anywhere else.
+/**
+ * percpu_ref_reinit - re-initialize a percpu refcount
+ * @ref: perpcu_ref to re-initialize
+ *
+ * Re-initialize @ref so that it's in the same state as when it finished
+ * percpu_ref_init() ignoring %PERCPU_REF_INIT_DEAD. @ref must have been
+ * initialized successfully and reached 0 but not exited.
+ *
+ * Note that percpu_ref_tryget[_live]() are safe to perform on @ref while
+ * this function is in progress.
*/
-void __percpu_ref_kill_expedited(struct percpu_ref *ref)
+void percpu_ref_reinit(struct percpu_ref *ref)
{
- WARN_ONCE(ref->pcpu_count_ptr & PCPU_REF_DEAD,
- "percpu_ref_kill() called more than once on %pf!",
- ref->release);
+ WARN_ON_ONCE(!percpu_ref_is_zero(ref));
- ref->pcpu_count_ptr |= PCPU_REF_DEAD;
- synchronize_sched_expedited();
- percpu_ref_kill_rcu(&ref->rcu);
+ ref->percpu_count_ptr &= ~__PERCPU_REF_DEAD;
+ percpu_ref_get(ref);
+ if (!ref->force_atomic)
+ __percpu_ref_switch_to_percpu(ref);
}
+EXPORT_SYMBOL_GPL(percpu_ref_reinit);
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index 7dd33577b905..48144cdae819 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -112,13 +112,15 @@ s64 __percpu_counter_sum(struct percpu_counter *fbc)
}
EXPORT_SYMBOL(__percpu_counter_sum);
-int __percpu_counter_init(struct percpu_counter *fbc, s64 amount,
+int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp,
struct lock_class_key *key)
{
+ unsigned long flags __maybe_unused;
+
raw_spin_lock_init(&fbc->lock);
lockdep_set_class(&fbc->lock, key);
fbc->count = amount;
- fbc->counters = alloc_percpu(s32);
+ fbc->counters = alloc_percpu_gfp(s32, gfp);
if (!fbc->counters)
return -ENOMEM;
@@ -126,9 +128,9 @@ int __percpu_counter_init(struct percpu_counter *fbc, s64 amount,
#ifdef CONFIG_HOTPLUG_CPU
INIT_LIST_HEAD(&fbc->list);
- spin_lock(&percpu_counters_lock);
+ spin_lock_irqsave(&percpu_counters_lock, flags);
list_add(&fbc->list, &percpu_counters);
- spin_unlock(&percpu_counters_lock);
+ spin_unlock_irqrestore(&percpu_counters_lock, flags);
#endif
return 0;
}
@@ -136,15 +138,17 @@ EXPORT_SYMBOL(__percpu_counter_init);
void percpu_counter_destroy(struct percpu_counter *fbc)
{
+ unsigned long flags __maybe_unused;
+
if (!fbc->counters)
return;
debug_percpu_counter_deactivate(fbc);
#ifdef CONFIG_HOTPLUG_CPU
- spin_lock(&percpu_counters_lock);
+ spin_lock_irqsave(&percpu_counters_lock, flags);
list_del(&fbc->list);
- spin_unlock(&percpu_counters_lock);
+ spin_unlock_irqrestore(&percpu_counters_lock, flags);
#endif
free_percpu(fbc->counters);
fbc->counters = NULL;
@@ -173,7 +177,7 @@ static int percpu_counter_hotcpu_callback(struct notifier_block *nb,
return NOTIFY_OK;
cpu = (unsigned long)hcpu;
- spin_lock(&percpu_counters_lock);
+ spin_lock_irq(&percpu_counters_lock);
list_for_each_entry(fbc, &percpu_counters, list) {
s32 *pcount;
unsigned long flags;
@@ -184,7 +188,7 @@ static int percpu_counter_hotcpu_callback(struct notifier_block *nb,
*pcount = 0;
raw_spin_unlock_irqrestore(&fbc->lock, flags);
}
- spin_unlock(&percpu_counters_lock);
+ spin_unlock_irq(&percpu_counters_lock);
#endif
return NOTIFY_OK;
}
diff --git a/lib/proportions.c b/lib/proportions.c
index 05df84801b56..6f724298f67a 100644
--- a/lib/proportions.c
+++ b/lib/proportions.c
@@ -73,7 +73,7 @@
#include <linux/proportions.h>
#include <linux/rcupdate.h>
-int prop_descriptor_init(struct prop_descriptor *pd, int shift)
+int prop_descriptor_init(struct prop_descriptor *pd, int shift, gfp_t gfp)
{
int err;
@@ -83,11 +83,11 @@ int prop_descriptor_init(struct prop_descriptor *pd, int shift)
pd->index = 0;
pd->pg[0].shift = shift;
mutex_init(&pd->mutex);
- err = percpu_counter_init(&pd->pg[0].events, 0);
+ err = percpu_counter_init(&pd->pg[0].events, 0, gfp);
if (err)
goto out;
- err = percpu_counter_init(&pd->pg[1].events, 0);
+ err = percpu_counter_init(&pd->pg[1].events, 0, gfp);
if (err)
percpu_counter_destroy(&pd->pg[0].events);
@@ -188,12 +188,12 @@ prop_adjust_shift(int *pl_shift, unsigned long *pl_period, int new_shift)
#define PROP_BATCH (8*(1+ilog2(nr_cpu_ids)))
-int prop_local_init_percpu(struct prop_local_percpu *pl)
+int prop_local_init_percpu(struct prop_local_percpu *pl, gfp_t gfp)
{
raw_spin_lock_init(&pl->lock);
pl->shift = 0;
pl->period = 0;
- return percpu_counter_init(&pl->events, 0);
+ return percpu_counter_init(&pl->events, 0, gfp);
}
void prop_local_destroy_percpu(struct prop_local_percpu *pl)
diff --git a/lib/random32.c b/lib/random32.c
index c9b6bf3afe0c..0bee183fa18f 100644
--- a/lib/random32.c
+++ b/lib/random32.c
@@ -37,6 +37,7 @@
#include <linux/jiffies.h>
#include <linux/random.h>
#include <linux/sched.h>
+#include <asm/unaligned.h>
#ifdef CONFIG_RANDOM32_SELFTEST
static void __init prandom_state_selftest(void);
@@ -96,27 +97,23 @@ EXPORT_SYMBOL(prandom_u32);
* This is used for pseudo-randomness with no outside seeding.
* For more random results, use prandom_bytes().
*/
-void prandom_bytes_state(struct rnd_state *state, void *buf, int bytes)
+void prandom_bytes_state(struct rnd_state *state, void *buf, size_t bytes)
{
- unsigned char *p = buf;
- int i;
-
- for (i = 0; i < round_down(bytes, sizeof(u32)); i += sizeof(u32)) {
- u32 random = prandom_u32_state(state);
- int j;
+ u8 *ptr = buf;
- for (j = 0; j < sizeof(u32); j++) {
- p[i + j] = random;
- random >>= BITS_PER_BYTE;
- }
+ while (bytes >= sizeof(u32)) {
+ put_unaligned(prandom_u32_state(state), (u32 *) ptr);
+ ptr += sizeof(u32);
+ bytes -= sizeof(u32);
}
- if (i < bytes) {
- u32 random = prandom_u32_state(state);
- for (; i < bytes; i++) {
- p[i] = random;
- random >>= BITS_PER_BYTE;
- }
+ if (bytes > 0) {
+ u32 rem = prandom_u32_state(state);
+ do {
+ *ptr++ = (u8) rem;
+ bytes--;
+ rem >>= BITS_PER_BYTE;
+ } while (bytes > 0);
}
}
EXPORT_SYMBOL(prandom_bytes_state);
@@ -126,7 +123,7 @@ EXPORT_SYMBOL(prandom_bytes_state);
* @buf: where to copy the pseudo-random bytes to
* @bytes: the requested number of bytes
*/
-void prandom_bytes(void *buf, int bytes)
+void prandom_bytes(void *buf, size_t bytes)
{
struct rnd_state *state = &get_cpu_var(net_rand_state);
@@ -137,7 +134,7 @@ EXPORT_SYMBOL(prandom_bytes);
static void prandom_warmup(struct rnd_state *state)
{
- /* Calling RNG ten times to satify recurrence condition */
+ /* Calling RNG ten times to satisfy recurrence condition */
prandom_u32_state(state);
prandom_u32_state(state);
prandom_u32_state(state);
@@ -152,7 +149,7 @@ static void prandom_warmup(struct rnd_state *state)
static u32 __extract_hwseed(void)
{
- u32 val = 0;
+ unsigned int val = 0;
(void)(arch_get_random_seed_int(&val) ||
arch_get_random_int(&val));
@@ -228,7 +225,7 @@ static void __prandom_timer(unsigned long dontcare)
prandom_seed(entropy);
/* reseed every ~60 seconds, in [40 .. 80) interval with slack */
- expires = 40 + (prandom_u32() % 40);
+ expires = 40 + prandom_u32_max(40);
seed_timer.expires = jiffies + msecs_to_jiffies(expires * MSEC_PER_SEC);
add_timer(&seed_timer);
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 7b36e4d40ed7..081be3ba9ea8 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -54,7 +54,7 @@ static u32 __hashfn(const struct rhashtable *ht, const void *key,
/**
* rhashtable_hashfn - compute hash for key of given length
- * @ht: hash table to compuate for
+ * @ht: hash table to compute for
* @key: pointer to key
* @len: length of key
*
@@ -85,7 +85,7 @@ static u32 obj_hashfn(const struct rhashtable *ht, const void *ptr, u32 hsize)
/**
* rhashtable_obj_hashfn - compute hash for hashed object
- * @ht: hash table to compuate for
+ * @ht: hash table to compute for
* @ptr: pointer to hashed object
*
* Computes the hash value using the hash function `hashfn` respectively
@@ -297,7 +297,7 @@ int rhashtable_shrink(struct rhashtable *ht, gfp_t flags)
ASSERT_RHT_MUTEX(ht);
- if (tbl->size <= HASH_MIN_SIZE)
+ if (ht->shift <= ht->p.min_shift)
return 0;
ntbl = bucket_table_alloc(tbl->size / 2, flags);
@@ -505,9 +505,10 @@ void *rhashtable_lookup_compare(const struct rhashtable *ht, u32 hash,
}
EXPORT_SYMBOL_GPL(rhashtable_lookup_compare);
-static size_t rounded_hashtable_size(unsigned int nelem)
+static size_t rounded_hashtable_size(struct rhashtable_params *params)
{
- return max(roundup_pow_of_two(nelem * 4 / 3), HASH_MIN_SIZE);
+ return max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
+ 1UL << params->min_shift);
}
/**
@@ -565,8 +566,11 @@ int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params)
(!params->key_len && !params->obj_hashfn))
return -EINVAL;
+ params->min_shift = max_t(size_t, params->min_shift,
+ ilog2(HASH_MIN_SIZE));
+
if (params->nelem_hint)
- size = rounded_hashtable_size(params->nelem_hint);
+ size = rounded_hashtable_size(params);
tbl = bucket_table_alloc(size, GFP_KERNEL);
if (tbl == NULL)
@@ -588,13 +592,13 @@ EXPORT_SYMBOL_GPL(rhashtable_init);
* rhashtable_destroy - destroy hash table
* @ht: the hash table to destroy
*
- * Frees the bucket array.
+ * Frees the bucket array. This function is not rcu safe, therefore the caller
+ * has to make sure that no resizing may happen by unpublishing the hashtable
+ * and waiting for the quiescent cycle before releasing the bucket array.
*/
void rhashtable_destroy(const struct rhashtable *ht)
{
- const struct bucket_table *tbl = rht_dereference(ht->tbl, ht);
-
- bucket_table_free(tbl);
+ bucket_table_free(ht->tbl);
}
EXPORT_SYMBOL_GPL(rhashtable_destroy);
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 89e0345733bd..23e070bcf72d 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -1342,6 +1342,44 @@ static struct bpf_test tests[] = {
{ { 0, -1 } }
},
{
+ "INT: shifts by register",
+ .u.insns_int = {
+ BPF_MOV64_IMM(R0, -1234),
+ BPF_MOV64_IMM(R1, 1),
+ BPF_ALU32_REG(BPF_RSH, R0, R1),
+ BPF_JMP_IMM(BPF_JEQ, R0, 0x7ffffd97, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(R2, 1),
+ BPF_ALU64_REG(BPF_LSH, R0, R2),
+ BPF_MOV32_IMM(R4, -1234),
+ BPF_JMP_REG(BPF_JEQ, R0, R4, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_AND, R4, 63),
+ BPF_ALU64_REG(BPF_LSH, R0, R4), /* R0 <= 46 */
+ BPF_MOV64_IMM(R3, 47),
+ BPF_ALU64_REG(BPF_ARSH, R0, R3),
+ BPF_JMP_IMM(BPF_JEQ, R0, -617, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(R2, 1),
+ BPF_ALU64_REG(BPF_LSH, R4, R2), /* R4 = 46 << 1 */
+ BPF_JMP_IMM(BPF_JEQ, R4, 92, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(R4, 4),
+ BPF_ALU64_REG(BPF_LSH, R4, R4), /* R4 = 4 << 4 */
+ BPF_JMP_IMM(BPF_JEQ, R4, 64, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(R4, 5),
+ BPF_ALU32_REG(BPF_LSH, R4, R4), /* R4 = 5 << 5 */
+ BPF_JMP_IMM(BPF_JEQ, R4, 160, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(R0, -1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -1 } }
+ },
+ {
"INT: DIV + ABS",
.u.insns_int = {
BPF_ALU64_REG(BPF_MOV, R6, R1),
@@ -1697,6 +1735,27 @@ static struct bpf_test tests[] = {
{ },
{ { 1, 0 } },
},
+ {
+ "load 64-bit immediate",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, 0x567800001234LL),
+ BPF_MOV64_REG(R2, R1),
+ BPF_MOV64_REG(R3, R2),
+ BPF_ALU64_IMM(BPF_RSH, R2, 32),
+ BPF_ALU64_IMM(BPF_LSH, R3, 32),
+ BPF_ALU64_IMM(BPF_RSH, R3, 32),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_JMP_IMM(BPF_JEQ, R2, 0x5678, 1),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JEQ, R3, 0x1234, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } }
+ },
};
static struct net_device dev;
@@ -1798,7 +1857,7 @@ static struct bpf_prog *generate_filter(int which, int *err)
break;
case INTERNAL:
- fp = kzalloc(bpf_prog_size(flen), GFP_KERNEL);
+ fp = bpf_prog_alloc(bpf_prog_size(flen), 0);
if (fp == NULL) {
pr_cont("UNEXPECTED_FAIL no memory left\n");
*err = -ENOMEM;
@@ -1835,7 +1894,7 @@ static int __run_one(const struct bpf_prog *fp, const void *data,
int runs, u64 *duration)
{
u64 start, finish;
- int ret, i;
+ int ret = 0, i;
start = ktime_to_us(ktime_get());
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 6fe2c84eb055..ba3cd0a35640 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -1937,7 +1937,7 @@ EXPORT_SYMBOL(sprintf);
* @args: Arguments for the format string
*
* The format follows C99 vsnprintf, except %n is ignored, and its argument
- * is skiped.
+ * is skipped.
*
* The return value is the number of words(32bits) which would be generated for
* the given input.