blob: 835cc6df27764bac804ab5d373a360b526a3501a (
plain) (
blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
|
// SPDX-License-Identifier: GPL-2.0
/*
* lib/smp_processor_id.c
*
* DEBUG_PREEMPT variant of smp_processor_id().
*/
#include <linux/export.h>
#include <linux/kallsyms.h>
#include <linux/sched.h>
notrace static unsigned int check_preemption_disabled(const char *what1,
const char *what2)
{
int this_cpu = raw_smp_processor_id();
if (likely(preempt_count()))
goto out;
if (irqs_disabled())
goto out;
/*
* Kernel threads bound to a single CPU can safely use
* smp_processor_id():
*/
if (cpumask_equal(¤t->cpus_allowed, cpumask_of(this_cpu)))
goto out;
/*
* It is valid to assume CPU-locality during early bootup:
*/
if (system_state < SYSTEM_SCHEDULING)
goto out;
/*
* Avoid recursion:
*/
preempt_disable_notrace();
if (!printk_ratelimit())
goto out_enable;
printk(KERN_ERR "BUG: using %s%s() in preemptible [%08x] code: %s/%d\n",
what1, what2, preempt_count() - 1, current->comm, current->pid);
print_symbol("caller is %s\n", (long)__builtin_return_address(0));
dump_stack();
out_enable:
preempt_enable_no_resched_notrace();
out:
return this_cpu;
}
notrace unsigned int debug_smp_processor_id(void)
{
return check_preemption_disabled("smp_processor_id", "");
}
EXPORT_SYMBOL(debug_smp_processor_id);
notrace void __this_cpu_preempt_check(const char *op)
{
check_preemption_disabled("__this_cpu_", op);
}
EXPORT_SYMBOL(__this_cpu_preempt_check);
|