aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/events/core.c11
-rw-r--r--kernel/kcsan/debugfs.c3
-rw-r--r--kernel/locking/lockdep.c4
-rw-r--r--kernel/locking/mutex-debug.c4
-rw-r--r--kernel/locking/mutex-debug.h2
-rw-r--r--kernel/locking/mutex.c18
-rw-r--r--kernel/locking/mutex.h4
-rw-r--r--kernel/module.c17
-rw-r--r--kernel/ptrace.c18
-rw-r--r--kernel/resource.c2
-rw-r--r--kernel/sched/fair.c2
-rw-r--r--kernel/signal.c59
-rw-r--r--kernel/time/alarmtimer.c2
-rw-r--r--kernel/trace/trace.c31
-rw-r--r--kernel/watchdog.c34
15 files changed, 135 insertions, 76 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 2e947a485898..6fee4a7e88d7 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -6389,8 +6389,6 @@ void perf_event_wakeup(struct perf_event *event)
static void perf_sigtrap(struct perf_event *event)
{
- struct kernel_siginfo info;
-
/*
* We'd expect this to only occur if the irq_work is delayed and either
* ctx->task or current has changed in the meantime. This can be the
@@ -6405,13 +6403,8 @@ static void perf_sigtrap(struct perf_event *event)
if (current->flags & PF_EXITING)
return;
- clear_siginfo(&info);
- info.si_signo = SIGTRAP;
- info.si_code = TRAP_PERF;
- info.si_errno = event->attr.type;
- info.si_perf = event->attr.sig_data;
- info.si_addr = (void __user *)event->pending_addr;
- force_sig_info(&info);
+ force_sig_perf((void __user *)event->pending_addr,
+ event->attr.type, event->attr.sig_data);
}
static void perf_pending_event_disable(struct perf_event *event)
diff --git a/kernel/kcsan/debugfs.c b/kernel/kcsan/debugfs.c
index c1dd02f3be8b..e65de172ccf7 100644
--- a/kernel/kcsan/debugfs.c
+++ b/kernel/kcsan/debugfs.c
@@ -266,9 +266,10 @@ static const struct file_operations debugfs_ops =
.release = single_release
};
-static void __init kcsan_debugfs_init(void)
+static int __init kcsan_debugfs_init(void)
{
debugfs_create_file("kcsan", 0644, NULL, NULL, &debugfs_ops);
+ return 0;
}
late_initcall(kcsan_debugfs_init);
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 48d736aa03b2..7641bd407239 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -5736,7 +5736,7 @@ void lock_contended(struct lockdep_map *lock, unsigned long ip)
{
unsigned long flags;
- trace_lock_acquired(lock, ip);
+ trace_lock_contended(lock, ip);
if (unlikely(!lock_stat || !lockdep_enabled()))
return;
@@ -5754,7 +5754,7 @@ void lock_acquired(struct lockdep_map *lock, unsigned long ip)
{
unsigned long flags;
- trace_lock_contended(lock, ip);
+ trace_lock_acquired(lock, ip);
if (unlikely(!lock_stat || !lockdep_enabled()))
return;
diff --git a/kernel/locking/mutex-debug.c b/kernel/locking/mutex-debug.c
index a7276aaf2abc..db9301591e3f 100644
--- a/kernel/locking/mutex-debug.c
+++ b/kernel/locking/mutex-debug.c
@@ -57,7 +57,7 @@ void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
task->blocked_on = waiter;
}
-void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
+void debug_mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
struct task_struct *task)
{
DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
@@ -65,7 +65,7 @@ void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
task->blocked_on = NULL;
- list_del_init(&waiter->list);
+ INIT_LIST_HEAD(&waiter->list);
waiter->task = NULL;
}
diff --git a/kernel/locking/mutex-debug.h b/kernel/locking/mutex-debug.h
index 1edd3f45a4ec..53e631e1d76d 100644
--- a/kernel/locking/mutex-debug.h
+++ b/kernel/locking/mutex-debug.h
@@ -22,7 +22,7 @@ extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
extern void debug_mutex_add_waiter(struct mutex *lock,
struct mutex_waiter *waiter,
struct task_struct *task);
-extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
+extern void debug_mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
struct task_struct *task);
extern void debug_mutex_unlock(struct mutex *lock);
extern void debug_mutex_init(struct mutex *lock, const char *name,
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index cb6b112ce155..013e1b08a1bf 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -194,7 +194,7 @@ static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_wait
* Add @waiter to a given location in the lock wait_list and set the
* FLAG_WAITERS flag if it's the first waiter.
*/
-static void __sched
+static void
__mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
struct list_head *list)
{
@@ -205,6 +205,16 @@ __mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
__mutex_set_flag(lock, MUTEX_FLAG_WAITERS);
}
+static void
+__mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter)
+{
+ list_del(&waiter->list);
+ if (likely(list_empty(&lock->wait_list)))
+ __mutex_clear_flag(lock, MUTEX_FLAGS);
+
+ debug_mutex_remove_waiter(lock, waiter, current);
+}
+
/*
* Give up ownership to a specific task, when @task = NULL, this is equivalent
* to a regular unlock. Sets PICKUP on a handoff, clears HANDOFF, preserves
@@ -1061,9 +1071,7 @@ acquired:
__ww_mutex_check_waiters(lock, ww_ctx);
}
- mutex_remove_waiter(lock, &waiter, current);
- if (likely(list_empty(&lock->wait_list)))
- __mutex_clear_flag(lock, MUTEX_FLAGS);
+ __mutex_remove_waiter(lock, &waiter);
debug_mutex_free_waiter(&waiter);
@@ -1080,7 +1088,7 @@ skip_wait:
err:
__set_current_state(TASK_RUNNING);
- mutex_remove_waiter(lock, &waiter, current);
+ __mutex_remove_waiter(lock, &waiter);
err_early_kill:
spin_unlock(&lock->wait_lock);
debug_mutex_free_waiter(&waiter);
diff --git a/kernel/locking/mutex.h b/kernel/locking/mutex.h
index 1c2287d3fa71..f0c710b1d192 100644
--- a/kernel/locking/mutex.h
+++ b/kernel/locking/mutex.h
@@ -10,12 +10,10 @@
* !CONFIG_DEBUG_MUTEXES case. Most of them are NOPs:
*/
-#define mutex_remove_waiter(lock, waiter, task) \
- __list_del((waiter)->list.prev, (waiter)->list.next)
-
#define debug_mutex_wake_waiter(lock, waiter) do { } while (0)
#define debug_mutex_free_waiter(waiter) do { } while (0)
#define debug_mutex_add_waiter(lock, waiter, ti) do { } while (0)
+#define debug_mutex_remove_waiter(lock, waiter, ti) do { } while (0)
#define debug_mutex_unlock(lock) do { } while (0)
#define debug_mutex_init(lock, name, key) do { } while (0)
diff --git a/kernel/module.c b/kernel/module.c
index b5dd92e35b02..7e78dfabca97 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -2401,6 +2401,15 @@ static long get_offset(struct module *mod, unsigned int *size,
return ret;
}
+static bool module_init_layout_section(const char *sname)
+{
+#ifndef CONFIG_MODULE_UNLOAD
+ if (module_exit_section(sname))
+ return true;
+#endif
+ return module_init_section(sname);
+}
+
/*
* Lay out the SHF_ALLOC sections in a way not dissimilar to how ld
* might -- code, read-only data, read-write data, small data. Tally
@@ -2435,7 +2444,7 @@ static void layout_sections(struct module *mod, struct load_info *info)
if ((s->sh_flags & masks[m][0]) != masks[m][0]
|| (s->sh_flags & masks[m][1])
|| s->sh_entsize != ~0UL
- || module_init_section(sname))
+ || module_init_layout_section(sname))
continue;
s->sh_entsize = get_offset(mod, &mod->core_layout.size, s, i);
pr_debug("\t%s\n", sname);
@@ -2468,7 +2477,7 @@ static void layout_sections(struct module *mod, struct load_info *info)
if ((s->sh_flags & masks[m][0]) != masks[m][0]
|| (s->sh_flags & masks[m][1])
|| s->sh_entsize != ~0UL
- || !module_init_section(sname))
+ || !module_init_layout_section(sname))
continue;
s->sh_entsize = (get_offset(mod, &mod->init_layout.size, s, i)
| INIT_OFFSET_MASK);
@@ -2807,11 +2816,7 @@ void * __weak module_alloc(unsigned long size)
bool __weak module_init_section(const char *name)
{
-#ifndef CONFIG_MODULE_UNLOAD
- return strstarts(name, ".init") || module_exit_section(name);
-#else
return strstarts(name, ".init");
-#endif
}
bool __weak module_exit_section(const char *name)
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 76f09456ec4b..2997ca600d18 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -170,6 +170,21 @@ void __ptrace_unlink(struct task_struct *child)
spin_unlock(&child->sighand->siglock);
}
+static bool looks_like_a_spurious_pid(struct task_struct *task)
+{
+ if (task->exit_code != ((PTRACE_EVENT_EXEC << 8) | SIGTRAP))
+ return false;
+
+ if (task_pid_vnr(task) == task->ptrace_message)
+ return false;
+ /*
+ * The tracee changed its pid but the PTRACE_EVENT_EXEC event
+ * was not wait()'ed, most probably debugger targets the old
+ * leader which was destroyed in de_thread().
+ */
+ return true;
+}
+
/* Ensure that nothing can wake it up, even SIGKILL */
static bool ptrace_freeze_traced(struct task_struct *task)
{
@@ -180,7 +195,8 @@ static bool ptrace_freeze_traced(struct task_struct *task)
return ret;
spin_lock_irq(&task->sighand->siglock);
- if (task_is_traced(task) && !__fatal_signal_pending(task)) {
+ if (task_is_traced(task) && !looks_like_a_spurious_pid(task) &&
+ !__fatal_signal_pending(task)) {
task->state = __TASK_TRACED;
ret = true;
}
diff --git a/kernel/resource.c b/kernel/resource.c
index 028a5ab18818..ca9f5198a01f 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -1805,7 +1805,7 @@ static struct resource *__request_free_mem_region(struct device *dev,
REGION_DISJOINT)
continue;
- if (!__request_region_locked(res, &iomem_resource, addr, size,
+ if (__request_region_locked(res, &iomem_resource, addr, size,
name, 0))
break;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 20aa234ffe04..3248e24a90b0 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6217,7 +6217,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, bool
}
if (has_idle_core)
- set_idle_cores(this, false);
+ set_idle_cores(target, false);
if (sched_feat(SIS_PROP) && !has_idle_core) {
time = cpu_clock(this) - time;
diff --git a/kernel/signal.c b/kernel/signal.c
index 66e88649cf74..f7c6ffcbd044 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1236,6 +1236,7 @@ static inline bool has_si_pid_and_uid(struct kernel_siginfo *info)
case SIL_TIMER:
case SIL_POLL:
case SIL_FAULT:
+ case SIL_FAULT_TRAPNO:
case SIL_FAULT_MCEERR:
case SIL_FAULT_BNDERR:
case SIL_FAULT_PKUERR:
@@ -1804,6 +1805,21 @@ int force_sig_pkuerr(void __user *addr, u32 pkey)
}
#endif
+int force_sig_perf(void __user *addr, u32 type, u64 sig_data)
+{
+ struct kernel_siginfo info;
+
+ clear_siginfo(&info);
+ info.si_signo = SIGTRAP;
+ info.si_errno = 0;
+ info.si_code = TRAP_PERF;
+ info.si_addr = addr;
+ info.si_perf_data = sig_data;
+ info.si_perf_type = type;
+
+ return force_sig_info(&info);
+}
+
/* For the crazy architectures that include trap information in
* the errno field, instead of an actual errno value.
*/
@@ -2564,6 +2580,7 @@ static void hide_si_addr_tag_bits(struct ksignal *ksig)
{
switch (siginfo_layout(ksig->sig, ksig->info.si_code)) {
case SIL_FAULT:
+ case SIL_FAULT_TRAPNO:
case SIL_FAULT_MCEERR:
case SIL_FAULT_BNDERR:
case SIL_FAULT_PKUERR:
@@ -3251,6 +3268,10 @@ enum siginfo_layout siginfo_layout(unsigned sig, int si_code)
#endif
else if ((sig == SIGTRAP) && (si_code == TRAP_PERF))
layout = SIL_PERF_EVENT;
+#ifdef __ARCH_SI_TRAPNO
+ else if (layout == SIL_FAULT)
+ layout = SIL_FAULT_TRAPNO;
+#endif
}
else if (si_code <= NSIGPOLL)
layout = SIL_POLL;
@@ -3354,35 +3375,28 @@ void copy_siginfo_to_external32(struct compat_siginfo *to,
break;
case SIL_FAULT:
to->si_addr = ptr_to_compat(from->si_addr);
-#ifdef __ARCH_SI_TRAPNO
+ break;
+ case SIL_FAULT_TRAPNO:
+ to->si_addr = ptr_to_compat(from->si_addr);
to->si_trapno = from->si_trapno;
-#endif
break;
case SIL_FAULT_MCEERR:
to->si_addr = ptr_to_compat(from->si_addr);
-#ifdef __ARCH_SI_TRAPNO
- to->si_trapno = from->si_trapno;
-#endif
to->si_addr_lsb = from->si_addr_lsb;
break;
case SIL_FAULT_BNDERR:
to->si_addr = ptr_to_compat(from->si_addr);
-#ifdef __ARCH_SI_TRAPNO
- to->si_trapno = from->si_trapno;
-#endif
to->si_lower = ptr_to_compat(from->si_lower);
to->si_upper = ptr_to_compat(from->si_upper);
break;
case SIL_FAULT_PKUERR:
to->si_addr = ptr_to_compat(from->si_addr);
-#ifdef __ARCH_SI_TRAPNO
- to->si_trapno = from->si_trapno;
-#endif
to->si_pkey = from->si_pkey;
break;
case SIL_PERF_EVENT:
to->si_addr = ptr_to_compat(from->si_addr);
- to->si_perf = from->si_perf;
+ to->si_perf_data = from->si_perf_data;
+ to->si_perf_type = from->si_perf_type;
break;
case SIL_CHLD:
to->si_pid = from->si_pid;
@@ -3438,35 +3452,28 @@ static int post_copy_siginfo_from_user32(kernel_siginfo_t *to,
break;
case SIL_FAULT:
to->si_addr = compat_ptr(from->si_addr);
-#ifdef __ARCH_SI_TRAPNO
+ break;
+ case SIL_FAULT_TRAPNO:
+ to->si_addr = compat_ptr(from->si_addr);
to->si_trapno = from->si_trapno;
-#endif
break;
case SIL_FAULT_MCEERR:
to->si_addr = compat_ptr(from->si_addr);
-#ifdef __ARCH_SI_TRAPNO
- to->si_trapno = from->si_trapno;
-#endif
to->si_addr_lsb = from->si_addr_lsb;
break;
case SIL_FAULT_BNDERR:
to->si_addr = compat_ptr(from->si_addr);
-#ifdef __ARCH_SI_TRAPNO
- to->si_trapno = from->si_trapno;
-#endif
to->si_lower = compat_ptr(from->si_lower);
to->si_upper = compat_ptr(from->si_upper);
break;
case SIL_FAULT_PKUERR:
to->si_addr = compat_ptr(from->si_addr);
-#ifdef __ARCH_SI_TRAPNO
- to->si_trapno = from->si_trapno;
-#endif
to->si_pkey = from->si_pkey;
break;
case SIL_PERF_EVENT:
to->si_addr = compat_ptr(from->si_addr);
- to->si_perf = from->si_perf;
+ to->si_perf_data = from->si_perf_data;
+ to->si_perf_type = from->si_perf_type;
break;
case SIL_CHLD:
to->si_pid = from->si_pid;
@@ -4644,11 +4651,13 @@ static inline void siginfo_buildtime_checks(void)
/* sigfault */
CHECK_OFFSET(si_addr);
+ CHECK_OFFSET(si_trapno);
CHECK_OFFSET(si_addr_lsb);
CHECK_OFFSET(si_lower);
CHECK_OFFSET(si_upper);
CHECK_OFFSET(si_pkey);
- CHECK_OFFSET(si_perf);
+ CHECK_OFFSET(si_perf_data);
+ CHECK_OFFSET(si_perf_type);
/* sigpoll */
CHECK_OFFSET(si_band);
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
index bea9d08b1698..5897828b9d7e 100644
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
@@ -92,7 +92,7 @@ static int alarmtimer_rtc_add_device(struct device *dev,
if (rtcdev)
return -EBUSY;
- if (!rtc->ops->set_alarm)
+ if (!test_bit(RTC_FEATURE_ALARM, rtc->features))
return -1;
if (!device_may_wakeup(rtc->dev.parent))
return -1;
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 560e4c8d3825..a21ef9cd2aae 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -3704,6 +3704,9 @@ void trace_check_vprintf(struct trace_iterator *iter, const char *fmt,
goto print;
while (*p) {
+ bool star = false;
+ int len = 0;
+
j = 0;
/* We only care about %s and variants */
@@ -3725,13 +3728,17 @@ void trace_check_vprintf(struct trace_iterator *iter, const char *fmt,
/* Need to test cases like %08.*s */
for (j = 1; p[i+j]; j++) {
if (isdigit(p[i+j]) ||
- p[i+j] == '*' ||
p[i+j] == '.')
continue;
+ if (p[i+j] == '*') {
+ star = true;
+ continue;
+ }
break;
}
if (p[i+j] == 's')
break;
+ star = false;
}
j = 0;
}
@@ -3744,6 +3751,9 @@ void trace_check_vprintf(struct trace_iterator *iter, const char *fmt,
iter->fmt[i] = '\0';
trace_seq_vprintf(&iter->seq, iter->fmt, ap);
+ if (star)
+ len = va_arg(ap, int);
+
/* The ap now points to the string data of the %s */
str = va_arg(ap, const char *);
@@ -3762,8 +3772,18 @@ void trace_check_vprintf(struct trace_iterator *iter, const char *fmt,
int ret;
/* Try to safely read the string */
- ret = strncpy_from_kernel_nofault(iter->fmt, str,
- iter->fmt_size);
+ if (star) {
+ if (len + 1 > iter->fmt_size)
+ len = iter->fmt_size - 1;
+ if (len < 0)
+ len = 0;
+ ret = copy_from_kernel_nofault(iter->fmt, str, len);
+ iter->fmt[len] = 0;
+ star = false;
+ } else {
+ ret = strncpy_from_kernel_nofault(iter->fmt, str,
+ iter->fmt_size);
+ }
if (ret < 0)
trace_seq_printf(&iter->seq, "(0x%px)", str);
else
@@ -3775,7 +3795,10 @@ void trace_check_vprintf(struct trace_iterator *iter, const char *fmt,
strncpy(iter->fmt, p + i, j + 1);
iter->fmt[j+1] = '\0';
}
- trace_seq_printf(&iter->seq, iter->fmt, str);
+ if (star)
+ trace_seq_printf(&iter->seq, iter->fmt, len, str);
+ else
+ trace_seq_printf(&iter->seq, iter->fmt, str);
p += i + j + 1;
}
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 7c397907d0e9..92d3bcc5a5e0 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -302,10 +302,10 @@ void touch_softlockup_watchdog_sync(void)
__this_cpu_write(watchdog_report_ts, SOFTLOCKUP_DELAY_REPORT);
}
-static int is_softlockup(unsigned long touch_ts, unsigned long period_ts)
+static int is_softlockup(unsigned long touch_ts,
+ unsigned long period_ts,
+ unsigned long now)
{
- unsigned long now = get_timestamp();
-
if ((watchdog_enabled & SOFT_WATCHDOG_ENABLED) && watchdog_thresh){
/* Warn about unreasonable delays. */
if (time_after(now, period_ts + get_softlockup_thresh()))
@@ -353,8 +353,7 @@ static int softlockup_fn(void *data)
/* watchdog kicker functions */
static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
{
- unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts);
- unsigned long period_ts = __this_cpu_read(watchdog_report_ts);
+ unsigned long touch_ts, period_ts, now;
struct pt_regs *regs = get_irq_regs();
int duration;
int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace;
@@ -377,11 +376,22 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period));
/*
+ * Read the current timestamp first. It might become invalid anytime
+ * when a virtual machine is stopped by the host or when the watchog
+ * is touched from NMI.
+ */
+ now = get_timestamp();
+ /*
* If a virtual machine is stopped by the host it can look to
- * the watchdog like a soft lockup. Check to see if the host
- * stopped the vm before we process the timestamps.
+ * the watchdog like a soft lockup. This function touches the watchdog.
*/
kvm_check_and_clear_guest_paused();
+ /*
+ * The stored timestamp is comparable with @now only when not touched.
+ * It might get touched anytime from NMI. Make sure that is_softlockup()
+ * uses the same (valid) value.
+ */
+ period_ts = READ_ONCE(*this_cpu_ptr(&watchdog_report_ts));
/* Reset the interval when touched by known problematic code. */
if (period_ts == SOFTLOCKUP_DELAY_REPORT) {
@@ -398,13 +408,9 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
return HRTIMER_RESTART;
}
- /* check for a softlockup
- * This is done by making sure a high priority task is
- * being scheduled. The task touches the watchdog to
- * indicate it is getting cpu time. If it hasn't then
- * this is a good indication some task is hogging the cpu
- */
- duration = is_softlockup(touch_ts, period_ts);
+ /* Check for a softlockup. */
+ touch_ts = __this_cpu_read(watchdog_touch_ts);
+ duration = is_softlockup(touch_ts, period_ts, now);
if (unlikely(duration)) {
/*
* Prevent multiple soft-lockup reports if one cpu is already