aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2019-02-20 00:34:07 -0800
committerDavid S. Miller <davem@davemloft.net>2019-02-20 00:34:07 -0800
commit375ca548f7e3ac82acdd0959eddd1fa0e17c35cc (patch)
tree5360dc427e4eff7123613419ee522b7fda831de0 /kernel
parentptp_qoriq: don't pass a large struct by value but instead pass it by reference (diff)
parentMerge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net (diff)
downloadlinux-dev-375ca548f7e3ac82acdd0959eddd1fa0e17c35cc.tar.xz
linux-dev-375ca548f7e3ac82acdd0959eddd1fa0e17c35cc.zip
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Two easily resolvable overlapping change conflicts, one in TCP and one in the eBPF verifier. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/bpf/stackmap.c8
-rw-r--r--kernel/events/core.c16
-rw-r--r--kernel/events/ring_buffer.c2
-rw-r--r--kernel/trace/trace.c2
-rw-r--r--kernel/trace/trace_kprobe.c10
5 files changed, 27 insertions, 11 deletions
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
index d43b14535827..950ab2f28922 100644
--- a/kernel/bpf/stackmap.c
+++ b/kernel/bpf/stackmap.c
@@ -44,7 +44,7 @@ static void do_up_read(struct irq_work *entry)
struct stack_map_irq_work *work;
work = container_of(entry, struct stack_map_irq_work, irq_work);
- up_read(work->sem);
+ up_read_non_owner(work->sem);
work->sem = NULL;
}
@@ -338,6 +338,12 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
} else {
work->sem = &current->mm->mmap_sem;
irq_work_queue(&work->irq_work);
+ /*
+ * The irq_work will release the mmap_sem with
+ * up_read_non_owner(). The rwsem_release() is called
+ * here to release the lock from lockdep's perspective.
+ */
+ rwsem_release(&current->mm->mmap_sem.dep_map, 1, _RET_IP_);
}
}
diff --git a/kernel/events/core.c b/kernel/events/core.c
index e5ede6918050..26d6edab051a 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -4963,6 +4963,11 @@ static void __perf_event_period(struct perf_event *event,
}
}
+static int perf_event_check_period(struct perf_event *event, u64 value)
+{
+ return event->pmu->check_period(event, value);
+}
+
static int perf_event_period(struct perf_event *event, u64 __user *arg)
{
u64 value;
@@ -4979,6 +4984,9 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg)
if (event->attr.freq && value > sysctl_perf_event_sample_rate)
return -EINVAL;
+ if (perf_event_check_period(event, value))
+ return -EINVAL;
+
event_function_call(event, __perf_event_period, &value);
return 0;
@@ -9391,6 +9399,11 @@ static int perf_pmu_nop_int(struct pmu *pmu)
return 0;
}
+static int perf_event_nop_int(struct perf_event *event, u64 value)
+{
+ return 0;
+}
+
static DEFINE_PER_CPU(unsigned int, nop_txn_flags);
static void perf_pmu_start_txn(struct pmu *pmu, unsigned int flags)
@@ -9691,6 +9704,9 @@ got_cpu_context:
pmu->pmu_disable = perf_pmu_nop_void;
}
+ if (!pmu->check_period)
+ pmu->check_period = perf_event_nop_int;
+
if (!pmu->event_idx)
pmu->event_idx = perf_event_idx_default;
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index 309ef5a64af5..5ab4fe3b1dcc 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -734,7 +734,7 @@ struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
size = sizeof(struct ring_buffer);
size += nr_pages * sizeof(void *);
- if (order_base_2(size) >= MAX_ORDER)
+ if (order_base_2(size) >= PAGE_SHIFT+MAX_ORDER)
goto fail;
rb = kzalloc(size, GFP_KERNEL);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index c521b7347482..c4238b441624 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -3384,6 +3384,8 @@ static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file
const char tgid_space[] = " ";
const char space[] = " ";
+ print_event_info(buf, m);
+
seq_printf(m, "# %s _-----=> irqs-off\n",
tgid ? tgid_space : space);
seq_printf(m, "# %s / _----=> need-resched\n",
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index d5fb09ebba8b..9eaf07f99212 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -861,22 +861,14 @@ static const struct file_operations kprobe_profile_ops = {
static nokprobe_inline int
fetch_store_strlen(unsigned long addr)
{
- mm_segment_t old_fs;
int ret, len = 0;
u8 c;
- old_fs = get_fs();
- set_fs(KERNEL_DS);
- pagefault_disable();
-
do {
- ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
+ ret = probe_mem_read(&c, (u8 *)addr + len, 1);
len++;
} while (c && ret == 0 && len < MAX_STRING_SIZE);
- pagefault_enable();
- set_fs(old_fs);
-
return (ret < 0) ? ret : len;
}