aboutsummaryrefslogtreecommitdiffstatshomepage
diff options
context:
space:
mode:
authorUros Bizjak <ubizjak@gmail.com>2025-03-04 18:34:36 +0100
committerIngo Molnar <mingo@kernel.org>2025-03-04 20:30:33 +0100
commit6d536cad0d55e71442b6d65500f74eb85544269e (patch)
tree537048a8b8f9f8006463dd4e541a6fb4bcfaa230
parentx86/smp: Move this_cpu_off to percpu hot section (diff)
downloadwireguard-linux-6d536cad0d55e71442b6d65500f74eb85544269e.tar.xz
wireguard-linux-6d536cad0d55e71442b6d65500f74eb85544269e.zip
x86/percpu: Fix __per_cpu_hot_end marker
Make __per_cpu_hot_end marker point to the end of the percpu cache hot data, not to the end of the percpu cache hot section. This fixes CONFIG_MPENTIUM4 case where X86_L1_CACHE_SHIFT is set to 7 (128 bytes). Also update assert message accordingly. Reported-by: Ingo Molnar <mingo@kernel.org> Signed-off-by: Uros Bizjak <ubizjak@gmail.com> Signed-off-by: Ingo Molnar <mingo@kernel.org> Cc: Brian Gerst <brgerst@gmail.com> Link: https://lore.kernel.org/r/20250304173455.89361-1-ubizjak@gmail.com Closes: https://lore.kernel.org/lkml/Z8a-NVJs-pm5W-mG@gmail.com/
-rw-r--r--arch/x86/kernel/vmlinux.lds.S2
-rw-r--r--include/asm-generic/vmlinux.lds.h3
2 files changed, 2 insertions, 3 deletions
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index 31f9102b107f..ccdc45e5b759 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -331,7 +331,7 @@ SECTIONS
}
PERCPU_SECTION(L1_CACHE_BYTES)
- ASSERT(__per_cpu_hot_end - __per_cpu_hot_start <= 64, "percpu cache hot section too large")
+ ASSERT(__per_cpu_hot_end - __per_cpu_hot_start <= 64, "percpu cache hot data too large")
RUNTIME_CONST_VARIABLES
RUNTIME_CONST(ptr, USER_PTR_MAX)
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index c4e8fac50254..4925441bc471 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -1072,9 +1072,8 @@ defined(CONFIG_AUTOFDO_CLANG) || defined(CONFIG_PROPELLER_CLANG)
. = ALIGN(cacheline); \
__per_cpu_hot_start = .; \
*(SORT_BY_ALIGNMENT(.data..percpu..hot.*)) \
- __per_cpu_hot_pad = .; \
- . = ALIGN(cacheline); \
__per_cpu_hot_end = .; \
+ . = ALIGN(cacheline); \
*(.data..percpu..read_mostly) \
. = ALIGN(cacheline); \
*(.data..percpu) \