aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig6
-rw-r--r--lib/Kconfig.kasan11
-rw-r--r--lib/Makefile7
-rw-r--r--lib/bitmap.c20
-rw-r--r--lib/dim/dim.c4
-rw-r--r--lib/dim/net_dim.c56
-rw-r--r--lib/kfifo.c3
-rw-r--r--lib/logic_pio.c73
-rw-r--r--lib/raid6/Makefile2
-rw-r--r--lib/sg_split.c12
-rw-r--r--lib/sort.c34
-rw-r--r--lib/stackdepot.c4
-rw-r--r--lib/test_firmware.c55
-rw-r--r--lib/test_meminit.c2
-rw-r--r--lib/timerqueue.c30
-rw-r--r--lib/vdso/gettimeofday.c79
16 files changed, 294 insertions, 104 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index f33d66fc0e86..4e6b1c3e4c98 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -631,6 +631,9 @@ config SBITMAP
config PARMAN
tristate "parman" if COMPILE_TEST
+config OBJAGG
+ tristate "objagg" if COMPILE_TEST
+
config STRING_SELFTEST
tristate "Test string functions"
@@ -653,6 +656,3 @@ config GENERIC_LIB_CMPDI2
config GENERIC_LIB_UCMPDI2
bool
-
-config OBJAGG
- tristate "objagg" if COMPILE_TEST
diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
index 4fafba1a923b..7fa97a8b5717 100644
--- a/lib/Kconfig.kasan
+++ b/lib/Kconfig.kasan
@@ -106,7 +106,6 @@ endchoice
config KASAN_STACK_ENABLE
bool "Enable stack instrumentation (unsafe)" if CC_IS_CLANG && !COMPILE_TEST
- default !(CLANG_VERSION < 90000)
depends on KASAN
help
The LLVM stack address sanitizer has a know problem that
@@ -115,11 +114,11 @@ config KASAN_STACK_ENABLE
Disabling asan-stack makes it safe to run kernels build
with clang-8 with KASAN enabled, though it loses some of
the functionality.
- This feature is always disabled when compile-testing with clang-8
- or earlier to avoid cluttering the output in stack overflow
- warnings, but clang-8 users can still enable it for builds without
- CONFIG_COMPILE_TEST. On gcc and later clang versions it is
- assumed to always be safe to use and enabled by default.
+ This feature is always disabled when compile-testing with clang
+ to avoid cluttering the output in stack overflow warnings,
+ but clang users can still enable it for builds without
+ CONFIG_COMPILE_TEST. On gcc it is assumed to always be safe
+ to use and enabled by default.
config KASAN_STACK
int
diff --git a/lib/Makefile b/lib/Makefile
index 095601ce371d..c5892807e06f 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -21,10 +21,6 @@ KCOV_INSTRUMENT_dynamic_debug.o := n
ifdef CONFIG_AMD_MEM_ENCRYPT
KASAN_SANITIZE_string.o := n
-ifdef CONFIG_FUNCTION_TRACER
-CFLAGS_REMOVE_string.o = -pg
-endif
-
CFLAGS_string.o := $(call cc-option, -fno-stack-protector)
endif
@@ -279,7 +275,8 @@ obj-$(CONFIG_UCS2_STRING) += ucs2_string.o
obj-$(CONFIG_UBSAN) += ubsan.o
UBSAN_SANITIZE_ubsan.o := n
-CFLAGS_ubsan.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector)
+KASAN_SANITIZE_ubsan.o := n
+CFLAGS_ubsan.o := $(call cc-option, -fno-stack-protector) $(DISABLE_STACKLEAK_PLUGIN)
obj-$(CONFIG_SBITMAP) += sbitmap.o
diff --git a/lib/bitmap.c b/lib/bitmap.c
index bbe2589e8497..f9e834841e94 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -59,6 +59,26 @@ int __bitmap_equal(const unsigned long *bitmap1,
}
EXPORT_SYMBOL(__bitmap_equal);
+bool __bitmap_or_equal(const unsigned long *bitmap1,
+ const unsigned long *bitmap2,
+ const unsigned long *bitmap3,
+ unsigned int bits)
+{
+ unsigned int k, lim = bits / BITS_PER_LONG;
+ unsigned long tmp;
+
+ for (k = 0; k < lim; ++k) {
+ if ((bitmap1[k] | bitmap2[k]) != bitmap3[k])
+ return false;
+ }
+
+ if (!(bits % BITS_PER_LONG))
+ return true;
+
+ tmp = (bitmap1[k] | bitmap2[k]) ^ bitmap3[k];
+ return (tmp & BITMAP_LAST_WORD_MASK(bits)) == 0;
+}
+
void __bitmap_complement(unsigned long *dst, const unsigned long *src, unsigned int bits)
{
unsigned int k, lim = BITS_TO_LONGS(bits);
diff --git a/lib/dim/dim.c b/lib/dim/dim.c
index 439d641ec796..38045d6d0538 100644
--- a/lib/dim/dim.c
+++ b/lib/dim/dim.c
@@ -74,8 +74,8 @@ void dim_calc_stats(struct dim_sample *start, struct dim_sample *end,
delta_us);
curr_stats->cpms = DIV_ROUND_UP(ncomps * USEC_PER_MSEC, delta_us);
if (curr_stats->epms != 0)
- curr_stats->cpe_ratio =
- (curr_stats->cpms * 100) / curr_stats->epms;
+ curr_stats->cpe_ratio = DIV_ROUND_DOWN_ULL(
+ curr_stats->cpms * 100, curr_stats->epms);
else
curr_stats->cpe_ratio = 0;
diff --git a/lib/dim/net_dim.c b/lib/dim/net_dim.c
index 5bcc902c5388..a4db51c21266 100644
--- a/lib/dim/net_dim.c
+++ b/lib/dim/net_dim.c
@@ -5,6 +5,62 @@
#include <linux/dim.h>
+/*
+ * Net DIM profiles:
+ * There are different set of profiles for each CQ period mode.
+ * There are different set of profiles for RX/TX CQs.
+ * Each profile size must be of NET_DIM_PARAMS_NUM_PROFILES
+ */
+#define NET_DIM_PARAMS_NUM_PROFILES 5
+#define NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE 256
+#define NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE 128
+#define NET_DIM_DEF_PROFILE_CQE 1
+#define NET_DIM_DEF_PROFILE_EQE 1
+
+#define NET_DIM_RX_EQE_PROFILES { \
+ {1, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
+ {8, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
+ {64, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
+ {128, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
+ {256, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
+}
+
+#define NET_DIM_RX_CQE_PROFILES { \
+ {2, 256}, \
+ {8, 128}, \
+ {16, 64}, \
+ {32, 64}, \
+ {64, 64} \
+}
+
+#define NET_DIM_TX_EQE_PROFILES { \
+ {1, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE}, \
+ {8, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE}, \
+ {32, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE}, \
+ {64, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE}, \
+ {128, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE} \
+}
+
+#define NET_DIM_TX_CQE_PROFILES { \
+ {5, 128}, \
+ {8, 64}, \
+ {16, 32}, \
+ {32, 32}, \
+ {64, 32} \
+}
+
+static const struct dim_cq_moder
+rx_profile[DIM_CQ_PERIOD_NUM_MODES][NET_DIM_PARAMS_NUM_PROFILES] = {
+ NET_DIM_RX_EQE_PROFILES,
+ NET_DIM_RX_CQE_PROFILES,
+};
+
+static const struct dim_cq_moder
+tx_profile[DIM_CQ_PERIOD_NUM_MODES][NET_DIM_PARAMS_NUM_PROFILES] = {
+ NET_DIM_TX_EQE_PROFILES,
+ NET_DIM_TX_CQE_PROFILES,
+};
+
struct dim_cq_moder
net_dim_get_rx_moderation(u8 cq_period_mode, int ix)
{
diff --git a/lib/kfifo.c b/lib/kfifo.c
index 117ad0e7fbf4..70dab9ac7827 100644
--- a/lib/kfifo.c
+++ b/lib/kfifo.c
@@ -68,7 +68,8 @@ int __kfifo_init(struct __kfifo *fifo, void *buffer,
{
size /= esize;
- size = roundup_pow_of_two(size);
+ if (!is_power_of_2(size))
+ size = rounddown_pow_of_two(size);
fifo->in = 0;
fifo->out = 0;
diff --git a/lib/logic_pio.c b/lib/logic_pio.c
index feea48fd1a0d..905027574e5d 100644
--- a/lib/logic_pio.c
+++ b/lib/logic_pio.c
@@ -35,7 +35,7 @@ int logic_pio_register_range(struct logic_pio_hwaddr *new_range)
struct logic_pio_hwaddr *range;
resource_size_t start;
resource_size_t end;
- resource_size_t mmio_sz = 0;
+ resource_size_t mmio_end = 0;
resource_size_t iio_sz = MMIO_UPPER_LIMIT;
int ret = 0;
@@ -46,7 +46,7 @@ int logic_pio_register_range(struct logic_pio_hwaddr *new_range)
end = new_range->hw_start + new_range->size;
mutex_lock(&io_range_mutex);
- list_for_each_entry_rcu(range, &io_range_list, list) {
+ list_for_each_entry(range, &io_range_list, list) {
if (range->fwnode == new_range->fwnode) {
/* range already there */
goto end_register;
@@ -56,7 +56,7 @@ int logic_pio_register_range(struct logic_pio_hwaddr *new_range)
/* for MMIO ranges we need to check for overlap */
if (start >= range->hw_start + range->size ||
end < range->hw_start) {
- mmio_sz += range->size;
+ mmio_end = range->io_start + range->size;
} else {
ret = -EFAULT;
goto end_register;
@@ -69,16 +69,16 @@ int logic_pio_register_range(struct logic_pio_hwaddr *new_range)
/* range not registered yet, check for available space */
if (new_range->flags == LOGIC_PIO_CPU_MMIO) {
- if (mmio_sz + new_range->size - 1 > MMIO_UPPER_LIMIT) {
+ if (mmio_end + new_range->size - 1 > MMIO_UPPER_LIMIT) {
/* if it's too big check if 64K space can be reserved */
- if (mmio_sz + SZ_64K - 1 > MMIO_UPPER_LIMIT) {
+ if (mmio_end + SZ_64K - 1 > MMIO_UPPER_LIMIT) {
ret = -E2BIG;
goto end_register;
}
new_range->size = SZ_64K;
pr_warn("Requested IO range too big, new size set to 64K\n");
}
- new_range->io_start = mmio_sz;
+ new_range->io_start = mmio_end;
} else if (new_range->flags == LOGIC_PIO_INDIRECT) {
if (iio_sz + new_range->size - 1 > IO_SPACE_LIMIT) {
ret = -E2BIG;
@@ -99,6 +99,20 @@ end_register:
}
/**
+ * logic_pio_unregister_range - unregister a logical PIO range for a host
+ * @range: pointer to the IO range which has been already registered.
+ *
+ * Unregister a previously-registered IO range node.
+ */
+void logic_pio_unregister_range(struct logic_pio_hwaddr *range)
+{
+ mutex_lock(&io_range_mutex);
+ list_del_rcu(&range->list);
+ mutex_unlock(&io_range_mutex);
+ synchronize_rcu();
+}
+
+/**
* find_io_range_by_fwnode - find logical PIO range for given FW node
* @fwnode: FW node handle associated with logical PIO range
*
@@ -108,26 +122,38 @@ end_register:
*/
struct logic_pio_hwaddr *find_io_range_by_fwnode(struct fwnode_handle *fwnode)
{
- struct logic_pio_hwaddr *range;
+ struct logic_pio_hwaddr *range, *found_range = NULL;
+ rcu_read_lock();
list_for_each_entry_rcu(range, &io_range_list, list) {
- if (range->fwnode == fwnode)
- return range;
+ if (range->fwnode == fwnode) {
+ found_range = range;
+ break;
+ }
}
- return NULL;
+ rcu_read_unlock();
+
+ return found_range;
}
/* Return a registered range given an input PIO token */
static struct logic_pio_hwaddr *find_io_range(unsigned long pio)
{
- struct logic_pio_hwaddr *range;
+ struct logic_pio_hwaddr *range, *found_range = NULL;
+ rcu_read_lock();
list_for_each_entry_rcu(range, &io_range_list, list) {
- if (in_range(pio, range->io_start, range->size))
- return range;
+ if (in_range(pio, range->io_start, range->size)) {
+ found_range = range;
+ break;
+ }
}
- pr_err("PIO entry token %lx invalid\n", pio);
- return NULL;
+ rcu_read_unlock();
+
+ if (!found_range)
+ pr_err("PIO entry token 0x%lx invalid\n", pio);
+
+ return found_range;
}
/**
@@ -180,14 +206,23 @@ unsigned long logic_pio_trans_cpuaddr(resource_size_t addr)
{
struct logic_pio_hwaddr *range;
+ rcu_read_lock();
list_for_each_entry_rcu(range, &io_range_list, list) {
if (range->flags != LOGIC_PIO_CPU_MMIO)
continue;
- if (in_range(addr, range->hw_start, range->size))
- return addr - range->hw_start + range->io_start;
+ if (in_range(addr, range->hw_start, range->size)) {
+ unsigned long cpuaddr;
+
+ cpuaddr = addr - range->hw_start + range->io_start;
+
+ rcu_read_unlock();
+ return cpuaddr;
+ }
}
- pr_err("addr %llx not registered in io_range_list\n",
- (unsigned long long) addr);
+ rcu_read_unlock();
+
+ pr_err("addr %pa not registered in io_range_list\n", &addr);
+
return ~0UL;
}
diff --git a/lib/raid6/Makefile b/lib/raid6/Makefile
index 42695bc8d451..0083b5cc646c 100644
--- a/lib/raid6/Makefile
+++ b/lib/raid6/Makefile
@@ -66,7 +66,7 @@ CFLAGS_vpermxor1.o += $(altivec_flags)
CFLAGS_vpermxor2.o += $(altivec_flags)
CFLAGS_vpermxor4.o += $(altivec_flags)
CFLAGS_vpermxor8.o += $(altivec_flags)
-targets += vpermxor1.o vpermxor2.o vpermxor4.o vpermxor8.o
+targets += vpermxor1.c vpermxor2.c vpermxor4.c vpermxor8.c
$(obj)/vpermxor%.c: $(src)/vpermxor.uc $(src)/unroll.awk FORCE
$(call if_changed,unroll)
diff --git a/lib/sg_split.c b/lib/sg_split.c
index 9982c63d1063..60a0babebf2e 100644
--- a/lib/sg_split.c
+++ b/lib/sg_split.c
@@ -176,11 +176,13 @@ int sg_split(struct scatterlist *in, const int in_mapped_nents,
* The order of these 3 calls is important and should be kept.
*/
sg_split_phys(splitters, nb_splits);
- ret = sg_calculate_split(in, in_mapped_nents, nb_splits, skip,
- split_sizes, splitters, true);
- if (ret < 0)
- goto err;
- sg_split_mapped(splitters, nb_splits);
+ if (in_mapped_nents) {
+ ret = sg_calculate_split(in, in_mapped_nents, nb_splits, skip,
+ split_sizes, splitters, true);
+ if (ret < 0)
+ goto err;
+ sg_split_mapped(splitters, nb_splits);
+ }
for (i = 0; i < nb_splits; i++) {
out[i] = splitters[i].out_sg;
diff --git a/lib/sort.c b/lib/sort.c
index cf408aec3733..d54cf97e9548 100644
--- a/lib/sort.c
+++ b/lib/sort.c
@@ -144,6 +144,18 @@ static void do_swap(void *a, void *b, size_t size, swap_func_t swap_func)
swap_func(a, b, (int)size);
}
+typedef int (*cmp_func_t)(const void *, const void *);
+typedef int (*cmp_r_func_t)(const void *, const void *, const void *);
+#define _CMP_WRAPPER ((cmp_r_func_t)0L)
+
+static int do_cmp(const void *a, const void *b,
+ cmp_r_func_t cmp, const void *priv)
+{
+ if (cmp == _CMP_WRAPPER)
+ return ((cmp_func_t)(priv))(a, b);
+ return cmp(a, b, priv);
+}
+
/**
* parent - given the offset of the child, find the offset of the parent.
* @i: the offset of the heap element whose parent is sought. Non-zero.
@@ -171,12 +183,13 @@ static size_t parent(size_t i, unsigned int lsbit, size_t size)
}
/**
- * sort - sort an array of elements
+ * sort_r - sort an array of elements
* @base: pointer to data to sort
* @num: number of elements
* @size: size of each element
* @cmp_func: pointer to comparison function
* @swap_func: pointer to swap function or NULL
+ * @priv: third argument passed to comparison function
*
* This function does a heapsort on the given array. You may provide
* a swap_func function if you need to do something more than a memory
@@ -188,9 +201,10 @@ static size_t parent(size_t i, unsigned int lsbit, size_t size)
* O(n*n) worst-case behavior and extra memory requirements that make
* it less suitable for kernel use.
*/
-void sort(void *base, size_t num, size_t size,
- int (*cmp_func)(const void *, const void *),
- void (*swap_func)(void *, void *, int size))
+void sort_r(void *base, size_t num, size_t size,
+ int (*cmp_func)(const void *, const void *, const void *),
+ void (*swap_func)(void *, void *, int size),
+ const void *priv)
{
/* pre-scale counters for performance */
size_t n = num * size, a = (num/2) * size;
@@ -238,12 +252,12 @@ void sort(void *base, size_t num, size_t size,
* average, 3/4 worst-case.)
*/
for (b = a; c = 2*b + size, (d = c + size) < n;)
- b = cmp_func(base + c, base + d) >= 0 ? c : d;
+ b = do_cmp(base + c, base + d, cmp_func, priv) >= 0 ? c : d;
if (d == n) /* Special case last leaf with no sibling */
b = c;
/* Now backtrack from "b" to the correct location for "a" */
- while (b != a && cmp_func(base + a, base + b) >= 0)
+ while (b != a && do_cmp(base + a, base + b, cmp_func, priv) >= 0)
b = parent(b, lsbit, size);
c = b; /* Where "a" belongs */
while (b != a) { /* Shift it into place */
@@ -252,4 +266,12 @@ void sort(void *base, size_t num, size_t size,
}
}
}
+EXPORT_SYMBOL(sort_r);
+
+void sort(void *base, size_t num, size_t size,
+ int (*cmp_func)(const void *, const void *),
+ void (*swap_func)(void *, void *, int size))
+{
+ return sort_r(base, num, size, _CMP_WRAPPER, swap_func, cmp_func);
+}
EXPORT_SYMBOL(sort);
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index 66cab785bea0..ed717dd08ff3 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -87,7 +87,7 @@ static bool init_stack_slab(void **prealloc)
stack_slabs[depot_index + 1] = *prealloc;
/*
* This smp_store_release pairs with smp_load_acquire() from
- * |next_slab_inited| above and in depot_save_stack().
+ * |next_slab_inited| above and in stack_depot_save().
*/
smp_store_release(&next_slab_inited, 1);
}
@@ -114,7 +114,7 @@ static struct stack_record *depot_alloc_stack(unsigned long *entries, int size,
depot_offset = 0;
/*
* smp_store_release() here pairs with smp_load_acquire() from
- * |next_slab_inited| in depot_save_stack() and
+ * |next_slab_inited| in stack_depot_save() and
* init_stack_slab().
*/
if (depot_index + 1 < STACK_ALLOC_MAX_SLABS)
diff --git a/lib/test_firmware.c b/lib/test_firmware.c
index 83ea6c4e623c..251213c872b5 100644
--- a/lib/test_firmware.c
+++ b/lib/test_firmware.c
@@ -18,6 +18,7 @@
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/miscdevice.h>
+#include <linux/sizes.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/delay.h>
@@ -26,6 +27,7 @@
#define TEST_FIRMWARE_NAME "test-firmware.bin"
#define TEST_FIRMWARE_NUM_REQS 4
+#define TEST_FIRMWARE_BUF_SIZE SZ_1K
static DEFINE_MUTEX(test_fw_mutex);
static const struct firmware *test_firmware;
@@ -45,6 +47,8 @@ struct test_batched_req {
* test_config - represents configuration for the test for different triggers
*
* @name: the name of the firmware file to look for
+ * @into_buf: when the into_buf is used if this is true
+ * request_firmware_into_buf() will be used instead.
* @sync_direct: when the sync trigger is used if this is true
* request_firmware_direct() will be used instead.
* @send_uevent: whether or not to send a uevent for async requests
@@ -83,6 +87,7 @@ struct test_batched_req {
*/
struct test_config {
char *name;
+ bool into_buf;
bool sync_direct;
bool send_uevent;
u8 num_requests;
@@ -176,6 +181,7 @@ static int __test_firmware_config_init(void)
test_fw_config->num_requests = TEST_FIRMWARE_NUM_REQS;
test_fw_config->send_uevent = true;
+ test_fw_config->into_buf = false;
test_fw_config->sync_direct = false;
test_fw_config->req_firmware = request_firmware;
test_fw_config->test_result = 0;
@@ -245,6 +251,9 @@ static ssize_t config_show(struct device *dev,
"FW_ACTION_HOTPLUG" :
"FW_ACTION_NOHOTPLUG");
len += scnprintf(buf+len, PAGE_SIZE - len,
+ "into_buf:\t\t%s\n",
+ test_fw_config->into_buf ? "true" : "false");
+ len += scnprintf(buf+len, PAGE_SIZE - len,
"sync_direct:\t\t%s\n",
test_fw_config->sync_direct ? "true" : "false");
len += scnprintf(buf+len, PAGE_SIZE - len,
@@ -393,6 +402,23 @@ static ssize_t config_num_requests_show(struct device *dev,
}
static DEVICE_ATTR_RW(config_num_requests);
+static ssize_t config_into_buf_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return test_dev_config_update_bool(buf,
+ count,
+ &test_fw_config->into_buf);
+}
+
+static ssize_t config_into_buf_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return test_dev_config_show_bool(buf, test_fw_config->into_buf);
+}
+static DEVICE_ATTR_RW(config_into_buf);
+
static ssize_t config_sync_direct_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
@@ -522,7 +548,7 @@ static ssize_t trigger_async_request_store(struct device *dev,
rc = count;
} else {
pr_err("failed to async load firmware\n");
- rc = -ENODEV;
+ rc = -ENOMEM;
}
out:
@@ -585,7 +611,26 @@ static int test_fw_run_batch_request(void *data)
return -EINVAL;
}
- req->rc = test_fw_config->req_firmware(&req->fw, req->name, req->dev);
+ if (test_fw_config->into_buf) {
+ void *test_buf;
+
+ test_buf = kzalloc(TEST_FIRMWARE_BUF_SIZE, GFP_KERNEL);
+ if (!test_buf)
+ return -ENOSPC;
+
+ req->rc = request_firmware_into_buf(&req->fw,
+ req->name,
+ req->dev,
+ test_buf,
+ TEST_FIRMWARE_BUF_SIZE);
+ if (!req->fw)
+ kfree(test_buf);
+ } else {
+ req->rc = test_fw_config->req_firmware(&req->fw,
+ req->name,
+ req->dev);
+ }
+
if (req->rc) {
pr_info("#%u: batched sync load failed: %d\n",
req->idx, req->rc);
@@ -849,6 +894,7 @@ static struct attribute *test_dev_attrs[] = {
TEST_FW_DEV_ATTR(config),
TEST_FW_DEV_ATTR(config_name),
TEST_FW_DEV_ATTR(config_num_requests),
+ TEST_FW_DEV_ATTR(config_into_buf),
TEST_FW_DEV_ATTR(config_sync_direct),
TEST_FW_DEV_ATTR(config_send_uevent),
TEST_FW_DEV_ATTR(config_read_fw_idx),
@@ -886,8 +932,11 @@ static int __init test_firmware_init(void)
return -ENOMEM;
rc = __test_firmware_config_init();
- if (rc)
+ if (rc) {
+ kfree(test_fw_config);
+ pr_err("could not init firmware test config: %d\n", rc);
return rc;
+ }
rc = misc_register(&test_fw_misc_device);
if (rc) {
diff --git a/lib/test_meminit.c b/lib/test_meminit.c
index 62d19f270cad..9729f271d150 100644
--- a/lib/test_meminit.c
+++ b/lib/test_meminit.c
@@ -222,7 +222,7 @@ static int __init do_kmem_cache_size(size_t size, bool want_ctor,
* Copy the buffer to check that it's not wiped on
* free().
*/
- buf_copy = kmalloc(size, GFP_KERNEL);
+ buf_copy = kmalloc(size, GFP_ATOMIC);
if (buf_copy)
memcpy(buf_copy, buf, size);
diff --git a/lib/timerqueue.c b/lib/timerqueue.c
index bc7e64df27df..c52710964593 100644
--- a/lib/timerqueue.c
+++ b/lib/timerqueue.c
@@ -26,9 +26,10 @@
*/
bool timerqueue_add(struct timerqueue_head *head, struct timerqueue_node *node)
{
- struct rb_node **p = &head->head.rb_node;
+ struct rb_node **p = &head->rb_root.rb_root.rb_node;
struct rb_node *parent = NULL;
- struct timerqueue_node *ptr;
+ struct timerqueue_node *ptr;
+ bool leftmost = true;
/* Make sure we don't add nodes that are already added */
WARN_ON_ONCE(!RB_EMPTY_NODE(&node->node));
@@ -36,19 +37,17 @@ bool timerqueue_add(struct timerqueue_head *head, struct timerqueue_node *node)
while (*p) {
parent = *p;
ptr = rb_entry(parent, struct timerqueue_node, node);
- if (node->expires < ptr->expires)
+ if (node->expires < ptr->expires) {
p = &(*p)->rb_left;
- else
+ } else {
p = &(*p)->rb_right;
+ leftmost = false;
+ }
}
rb_link_node(&node->node, parent, p);
- rb_insert_color(&node->node, &head->head);
+ rb_insert_color_cached(&node->node, &head->rb_root, leftmost);
- if (!head->next || node->expires < head->next->expires) {
- head->next = node;
- return true;
- }
- return false;
+ return leftmost;
}
EXPORT_SYMBOL_GPL(timerqueue_add);
@@ -65,15 +64,10 @@ bool timerqueue_del(struct timerqueue_head *head, struct timerqueue_node *node)
{
WARN_ON_ONCE(RB_EMPTY_NODE(&node->node));
- /* update next pointer */
- if (head->next == node) {
- struct rb_node *rbn = rb_next(&node->node);
-
- head->next = rb_entry_safe(rbn, struct timerqueue_node, node);
- }
- rb_erase(&node->node, &head->head);
+ rb_erase_cached(&node->node, &head->rb_root);
RB_CLEAR_NODE(&node->node);
- return head->next != NULL;
+
+ return !RB_EMPTY_ROOT(&head->rb_root.rb_root);
}
EXPORT_SYMBOL_GPL(timerqueue_del);
diff --git a/lib/vdso/gettimeofday.c b/lib/vdso/gettimeofday.c
index 2d1c1f241fd9..e630e7ff57f1 100644
--- a/lib/vdso/gettimeofday.c
+++ b/lib/vdso/gettimeofday.c
@@ -51,7 +51,7 @@ static int do_hres(const struct vdso_data *vd, clockid_t clk,
ns = vdso_ts->nsec;
last = vd->cycle_last;
if (unlikely((s64)cycles < 0))
- return clock_gettime_fallback(clk, ts);
+ return -1;
ns += vdso_calc_delta(cycles, last, vd->mask, vd->mult);
ns >>= vd->shift;
@@ -82,14 +82,14 @@ static void do_coarse(const struct vdso_data *vd, clockid_t clk,
}
static __maybe_unused int
-__cvdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts)
+__cvdso_clock_gettime_common(clockid_t clock, struct __kernel_timespec *ts)
{
const struct vdso_data *vd = __arch_get_vdso_data();
u32 msk;
/* Check for negative values or invalid clocks */
if (unlikely((u32) clock >= MAX_CLOCKS))
- goto fallback;
+ return -1;
/*
* Convert the clockid to a bitmask and use it to check which
@@ -104,9 +104,17 @@ __cvdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts)
} else if (msk & VDSO_RAW) {
return do_hres(&vd[CS_RAW], clock, ts);
}
+ return -1;
+}
+
+static __maybe_unused int
+__cvdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts)
+{
+ int ret = __cvdso_clock_gettime_common(clock, ts);
-fallback:
- return clock_gettime_fallback(clock, ts);
+ if (unlikely(ret))
+ return clock_gettime_fallback(clock, ts);
+ return 0;
}
static __maybe_unused int
@@ -115,20 +123,21 @@ __cvdso_clock_gettime32(clockid_t clock, struct old_timespec32 *res)
struct __kernel_timespec ts;
int ret;
- if (res == NULL)
- goto fallback;
+ ret = __cvdso_clock_gettime_common(clock, &ts);
- ret = __cvdso_clock_gettime(clock, &ts);
+#ifdef VDSO_HAS_32BIT_FALLBACK
+ if (unlikely(ret))
+ return clock_gettime32_fallback(clock, res);
+#else
+ if (unlikely(ret))
+ ret = clock_gettime_fallback(clock, &ts);
+#endif
- if (ret == 0) {
+ if (likely(!ret)) {
res->tv_sec = ts.tv_sec;
res->tv_nsec = ts.tv_nsec;
}
-
return ret;
-
-fallback:
- return clock_gettime_fallback(clock, (struct __kernel_timespec *)res);
}
static __maybe_unused int
@@ -169,17 +178,18 @@ static __maybe_unused time_t __cvdso_time(time_t *time)
#ifdef VDSO_HAS_CLOCK_GETRES
static __maybe_unused
-int __cvdso_clock_getres(clockid_t clock, struct __kernel_timespec *res)
+int __cvdso_clock_getres_common(clockid_t clock, struct __kernel_timespec *res)
{
const struct vdso_data *vd = __arch_get_vdso_data();
- u64 ns;
+ u64 hrtimer_res;
u32 msk;
- u64 hrtimer_res = READ_ONCE(vd[CS_HRES_COARSE].hrtimer_res);
+ u64 ns;
/* Check for negative values or invalid clocks */
if (unlikely((u32) clock >= MAX_CLOCKS))
- goto fallback;
+ return -1;
+ hrtimer_res = READ_ONCE(vd[CS_HRES_COARSE].hrtimer_res);
/*
* Convert the clockid to a bitmask and use it to check which
* clocks are handled in the VDSO directly.
@@ -201,18 +211,22 @@ int __cvdso_clock_getres(clockid_t clock, struct __kernel_timespec *res)
*/
ns = hrtimer_res;
} else {
- goto fallback;
+ return -1;
}
- if (res) {
- res->tv_sec = 0;
- res->tv_nsec = ns;
- }
+ res->tv_sec = 0;
+ res->tv_nsec = ns;
return 0;
+}
+
+int __cvdso_clock_getres(clockid_t clock, struct __kernel_timespec *res)
+{
+ int ret = __cvdso_clock_getres_common(clock, res);
-fallback:
- return clock_getres_fallback(clock, res);
+ if (unlikely(ret))
+ return clock_getres_fallback(clock, res);
+ return 0;
}
static __maybe_unused int
@@ -221,19 +235,20 @@ __cvdso_clock_getres_time32(clockid_t clock, struct old_timespec32 *res)
struct __kernel_timespec ts;
int ret;
- if (res == NULL)
- goto fallback;
+ ret = __cvdso_clock_getres_common(clock, &ts);
- ret = __cvdso_clock_getres(clock, &ts);
+#ifdef VDSO_HAS_32BIT_FALLBACK
+ if (unlikely(ret))
+ return clock_getres32_fallback(clock, res);
+#else
+ if (unlikely(ret))
+ ret = clock_getres_fallback(clock, &ts);
+#endif
- if (ret == 0) {
+ if (likely(!ret)) {
res->tv_sec = ts.tv_sec;
res->tv_nsec = ts.tv_nsec;
}
-
return ret;
-
-fallback:
- return clock_getres_fallback(clock, (struct __kernel_timespec *)res);
}
#endif /* VDSO_HAS_CLOCK_GETRES */