From a48e61de758c6b45f080fabc6fed3f4ed42598dc Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 1 Oct 2019 11:43:13 +0100 Subject: arm64: Mark functions using explicit register variables as '__always_inline' As of ac7c3e4ff401 ("compiler: enable CONFIG_OPTIMIZE_INLINING forcibly"), inline functions are no longer annotated with '__always_inline', which allows the compiler to decide whether inlining is really a good idea or not. Although this is a great idea on paper, the reality is that AArch64 GCC prior to 9.1 has been shown to get confused when creating an out-of-line copy of a function passing explicit 'register' variables into an inline assembly block: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=91111 It's not clear whether this is specific to arm64 or not but, for now, ensure that all of our functions using 'register' variables are marked as '__always_inline' so that the old behaviour is effectively preserved. Hopefully other architectures are luckier with their compilers. Cc: Masahiro Yamada Cc: Nicolas Saenz Julienne Cc: Arnd Bergmann Cc: Russell King Cc: Catalin Marinas Cc: Nick Desaulniers Signed-off-by: Will Deacon --- arch/arm64/include/asm/atomic_lse.h | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'arch/arm64/include') diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h index c6bd87d2915b..574808b9df4c 100644 --- a/arch/arm64/include/asm/atomic_lse.h +++ b/arch/arm64/include/asm/atomic_lse.h @@ -321,7 +321,8 @@ static inline s64 __lse_atomic64_dec_if_positive(atomic64_t *v) } #define __CMPXCHG_CASE(w, sfx, name, sz, mb, cl...) \ -static inline u##sz __lse__cmpxchg_case_##name##sz(volatile void *ptr, \ +static __always_inline u##sz \ +__lse__cmpxchg_case_##name##sz(volatile void *ptr, \ u##sz old, \ u##sz new) \ { \ @@ -362,7 +363,8 @@ __CMPXCHG_CASE(x, , mb_, 64, al, "memory") #undef __CMPXCHG_CASE #define __CMPXCHG_DBL(name, mb, cl...) \ -static inline long __lse__cmpxchg_double##name(unsigned long old1, \ +static __always_inline long \ +__lse__cmpxchg_double##name(unsigned long old1, \ unsigned long old2, \ unsigned long new1, \ unsigned long new2, \ -- cgit v1.2.3-59-g8ed1b From 37a5076098c17f40913c772b017ff8e48e449656 Mon Sep 17 00:00:00 2001 From: Vincenzo Frascino Date: Thu, 3 Oct 2019 18:48:35 +0100 Subject: arm64: vdso: Remove stale files from old assembly implementation Moving over to the generic C implementation of the vDSO inadvertently left some stale files behind which are no longer used. Remove them. Cc: Will Deacon Cc: Catalin Marinas Signed-off-by: Vincenzo Frascino Acked-by: Catalin Marinas Tested-by: Catalin Marinas Signed-off-by: Will Deacon --- arch/arm64/include/asm/vdso_datapage.h | 33 --------------------------------- arch/arm64/kernel/vdso/gettimeofday.S | 0 2 files changed, 33 deletions(-) delete mode 100644 arch/arm64/include/asm/vdso_datapage.h delete mode 100644 arch/arm64/kernel/vdso/gettimeofday.S (limited to 'arch/arm64/include') diff --git a/arch/arm64/include/asm/vdso_datapage.h b/arch/arm64/include/asm/vdso_datapage.h deleted file mode 100644 index 1f38bf330a6e..000000000000 --- a/arch/arm64/include/asm/vdso_datapage.h +++ /dev/null @@ -1,33 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) 2012 ARM Limited - */ -#ifndef __ASM_VDSO_DATAPAGE_H -#define __ASM_VDSO_DATAPAGE_H - -#ifndef __ASSEMBLY__ - -struct vdso_data { - __u64 cs_cycle_last; /* Timebase at clocksource init */ - __u64 raw_time_sec; /* Raw time */ - __u64 raw_time_nsec; - __u64 xtime_clock_sec; /* Kernel time */ - __u64 xtime_clock_nsec; - __u64 xtime_coarse_sec; /* Coarse time */ - __u64 xtime_coarse_nsec; - __u64 wtm_clock_sec; /* Wall to monotonic time */ - __u64 wtm_clock_nsec; - __u32 tb_seq_count; /* Timebase sequence counter */ - /* cs_* members must be adjacent and in this order (ldp accesses) */ - __u32 cs_mono_mult; /* NTP-adjusted clocksource multiplier */ - __u32 cs_shift; /* Clocksource shift (mono = raw) */ - __u32 cs_raw_mult; /* Raw clocksource multiplier */ - __u32 tz_minuteswest; /* Whacky timezone stuff */ - __u32 tz_dsttime; - __u32 use_syscall; - __u32 hrtimer_res; -}; - -#endif /* !__ASSEMBLY__ */ - -#endif /* __ASM_VDSO_DATAPAGE_H */ diff --git a/arch/arm64/kernel/vdso/gettimeofday.S b/arch/arm64/kernel/vdso/gettimeofday.S deleted file mode 100644 index e69de29bb2d1..000000000000 -- cgit v1.2.3-59-g8ed1b From 0df2c90eba60791148cee1823c0bf5fc66e3465c Mon Sep 17 00:00:00 2001 From: Vincenzo Frascino Date: Thu, 3 Oct 2019 18:48:34 +0100 Subject: arm64: vdso32: Detect binutils support for dmb ishld Older versions of binutils (prior to 2.24) do not support the "ISHLD" option for memory barrier instructions, which leads to a build failure when assembling the vdso32 library. Add a compilation time mechanism that detects if binutils supports those instructions and configure the kernel accordingly. Cc: Will Deacon Cc: Catalin Marinas Reported-by: Will Deacon Signed-off-by: Vincenzo Frascino Reviewed-by: Catalin Marinas Tested-by: Catalin Marinas Signed-off-by: Will Deacon --- arch/arm64/include/asm/vdso/compat_barrier.h | 2 +- arch/arm64/kernel/vdso32/Makefile | 9 +++++++++ 2 files changed, 10 insertions(+), 1 deletion(-) (limited to 'arch/arm64/include') diff --git a/arch/arm64/include/asm/vdso/compat_barrier.h b/arch/arm64/include/asm/vdso/compat_barrier.h index fb60a88b5ed4..3fd8fd6d8fc2 100644 --- a/arch/arm64/include/asm/vdso/compat_barrier.h +++ b/arch/arm64/include/asm/vdso/compat_barrier.h @@ -20,7 +20,7 @@ #define dmb(option) __asm__ __volatile__ ("dmb " #option : : : "memory") -#if __LINUX_ARM_ARCH__ >= 8 +#if __LINUX_ARM_ARCH__ >= 8 && defined(CONFIG_AS_DMB_ISHLD) #define aarch32_smp_mb() dmb(ish) #define aarch32_smp_rmb() dmb(ishld) #define aarch32_smp_wmb() dmb(ishst) diff --git a/arch/arm64/kernel/vdso32/Makefile b/arch/arm64/kernel/vdso32/Makefile index 19e0d3115ffe..77aa61340374 100644 --- a/arch/arm64/kernel/vdso32/Makefile +++ b/arch/arm64/kernel/vdso32/Makefile @@ -15,6 +15,8 @@ cc32-disable-warning = $(call try-run,\ $(COMPATCC) -W$(strip $(1)) -c -x c /dev/null -o "$$TMP",-Wno-$(strip $(1))) cc32-ldoption = $(call try-run,\ $(COMPATCC) $(1) -nostdlib -x c /dev/null -o "$$TMP",$(1),$(2)) +cc32-as-instr = $(call try-run,\ + printf "%b\n" "$(1)" | $(COMPATCC) $(VDSO_AFLAGS) -c -x assembler -o "$$TMP" -,$(2),$(3)) # We cannot use the global flags to compile the vDSO files, the main reason # being that the 32-bit compiler may be older than the main (64-bit) compiler @@ -53,6 +55,7 @@ endif VDSO_CAFLAGS += -fPIC -fno-builtin -fno-stack-protector VDSO_CAFLAGS += -DDISABLE_BRANCH_PROFILING + # Try to compile for ARMv8. If the compiler is too old and doesn't support it, # fall back to v7. There is no easy way to check for what architecture the code # is being compiled, so define a macro specifying that (see arch/arm/Makefile). @@ -89,6 +92,12 @@ VDSO_CFLAGS += -Wno-int-to-pointer-cast VDSO_AFLAGS := $(VDSO_CAFLAGS) VDSO_AFLAGS += -D__ASSEMBLY__ +# Check for binutils support for dmb ishld +dmbinstr := $(call cc32-as-instr,dmb ishld,-DCONFIG_AS_DMB_ISHLD=1) + +VDSO_CFLAGS += $(dmbinstr) +VDSO_AFLAGS += $(dmbinstr) + VDSO_LDFLAGS := $(VDSO_CPPFLAGS) # From arm vDSO Makefile VDSO_LDFLAGS += -Wl,-Bsymbolic -Wl,--no-undefined -Wl,-soname=linux-vdso.so.1 -- cgit v1.2.3-59-g8ed1b