From 5429ef62bcf360aae06740cbe065be01e5cfb6fc Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Wed, 22 Jan 2020 19:38:21 +0000 Subject: compiler/gcc: Raise minimum GCC version for kernel builds to 4.8 It is very rare to see versions of GCC prior to 4.8 being used to build the mainline kernel. These old compilers are also know to have codegen issues which can lead to silent miscompilation: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145 Raise the minimum GCC version for kernel build to 4.8 and remove some tautological Kconfig dependencies as a consequence. Cc: Masahiro Yamada Acked-by: Arnd Bergmann Reviewed-by: Nick Desaulniers Signed-off-by: Will Deacon --- arch/arm/crypto/Kconfig | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/arm/crypto/Kconfig b/arch/arm/crypto/Kconfig index 2674de6ada1f..c9bf2df85cb9 100644 --- a/arch/arm/crypto/Kconfig +++ b/arch/arm/crypto/Kconfig @@ -30,7 +30,7 @@ config CRYPTO_SHA1_ARM_NEON config CRYPTO_SHA1_ARM_CE tristate "SHA1 digest algorithm (ARM v8 Crypto Extensions)" - depends on KERNEL_MODE_NEON && (CC_IS_CLANG || GCC_VERSION >= 40800) + depends on KERNEL_MODE_NEON select CRYPTO_SHA1_ARM select CRYPTO_HASH help @@ -39,7 +39,7 @@ config CRYPTO_SHA1_ARM_CE config CRYPTO_SHA2_ARM_CE tristate "SHA-224/256 digest algorithm (ARM v8 Crypto Extensions)" - depends on KERNEL_MODE_NEON && (CC_IS_CLANG || GCC_VERSION >= 40800) + depends on KERNEL_MODE_NEON select CRYPTO_SHA256_ARM select CRYPTO_HASH help @@ -96,7 +96,7 @@ config CRYPTO_AES_ARM_BS config CRYPTO_AES_ARM_CE tristate "Accelerated AES using ARMv8 Crypto Extensions" - depends on KERNEL_MODE_NEON && (CC_IS_CLANG || GCC_VERSION >= 40800) + depends on KERNEL_MODE_NEON select CRYPTO_SKCIPHER select CRYPTO_LIB_AES select CRYPTO_SIMD @@ -106,7 +106,7 @@ config CRYPTO_AES_ARM_CE config CRYPTO_GHASH_ARM_CE tristate "PMULL-accelerated GHASH using NEON/ARMv8 Crypto Extensions" - depends on KERNEL_MODE_NEON && (CC_IS_CLANG || GCC_VERSION >= 40800) + depends on KERNEL_MODE_NEON select CRYPTO_HASH select CRYPTO_CRYPTD select CRYPTO_GF128MUL @@ -118,13 +118,13 @@ config CRYPTO_GHASH_ARM_CE config CRYPTO_CRCT10DIF_ARM_CE tristate "CRCT10DIF digest algorithm using PMULL instructions" - depends on KERNEL_MODE_NEON && (CC_IS_CLANG || GCC_VERSION >= 40800) + depends on KERNEL_MODE_NEON depends on CRC_T10DIF select CRYPTO_HASH config CRYPTO_CRC32_ARM_CE tristate "CRC32(C) digest algorithm using CRC and/or PMULL instructions" - depends on KERNEL_MODE_NEON && (CC_IS_CLANG || GCC_VERSION >= 40800) + depends on KERNEL_MODE_NEON depends on CRC32 select CRYPTO_HASH -- cgit v1.2.3-59-g8ed1b From c6a771d932332568df9f46a3b53507c578e8c8e8 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 14 Apr 2020 22:22:47 +0100 Subject: arm64: csum: Disable KASAN for do_csum() do_csum() over-reads the source buffer and therefore abuses READ_ONCE_NOCHECK() to avoid tripping up KASAN. In preparation for READ_ONCE_NOCHECK() becoming a macro, and therefore losing its '__no_sanitize_address' annotation, just annotate do_csum() explicitly and fall back to normal loads. Cc: Mark Rutland Cc: Robin Murphy Signed-off-by: Will Deacon --- arch/arm64/lib/csum.c | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/arm64/lib/csum.c b/arch/arm64/lib/csum.c index 60eccae2abad..78b87a64ca0a 100644 --- a/arch/arm64/lib/csum.c +++ b/arch/arm64/lib/csum.c @@ -14,7 +14,11 @@ static u64 accumulate(u64 sum, u64 data) return tmp + (tmp >> 64); } -unsigned int do_csum(const unsigned char *buff, int len) +/* + * We over-read the buffer and this makes KASAN unhappy. Instead, disable + * instrumentation and call kasan explicitly. + */ +unsigned int __no_sanitize_address do_csum(const unsigned char *buff, int len) { unsigned int offset, shift, sum; const u64 *ptr; @@ -42,7 +46,7 @@ unsigned int do_csum(const unsigned char *buff, int len) * odd/even alignment, and means we can ignore it until the very end. */ shift = offset * 8; - data = READ_ONCE_NOCHECK(*ptr++); + data = *ptr++; #ifdef __LITTLE_ENDIAN data = (data >> shift) << shift; #else @@ -58,10 +62,10 @@ unsigned int do_csum(const unsigned char *buff, int len) while (unlikely(len > 64)) { __uint128_t tmp1, tmp2, tmp3, tmp4; - tmp1 = READ_ONCE_NOCHECK(*(__uint128_t *)ptr); - tmp2 = READ_ONCE_NOCHECK(*(__uint128_t *)(ptr + 2)); - tmp3 = READ_ONCE_NOCHECK(*(__uint128_t *)(ptr + 4)); - tmp4 = READ_ONCE_NOCHECK(*(__uint128_t *)(ptr + 6)); + tmp1 = *(__uint128_t *)ptr; + tmp2 = *(__uint128_t *)(ptr + 2); + tmp3 = *(__uint128_t *)(ptr + 4); + tmp4 = *(__uint128_t *)(ptr + 6); len -= 64; ptr += 8; @@ -85,7 +89,7 @@ unsigned int do_csum(const unsigned char *buff, int len) __uint128_t tmp; sum64 = accumulate(sum64, data); - tmp = READ_ONCE_NOCHECK(*(__uint128_t *)ptr); + tmp = *(__uint128_t *)ptr; len -= 16; ptr += 2; @@ -100,7 +104,7 @@ unsigned int do_csum(const unsigned char *buff, int len) } if (len > 0) { sum64 = accumulate(sum64, data); - data = READ_ONCE_NOCHECK(*ptr); + data = *ptr; len -= 8; } /* -- cgit v1.2.3-59-g8ed1b From 10223c5286f7389c022e9e91f12c49918790cf36 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 19 Dec 2019 17:11:43 +0000 Subject: arm64: barrier: Use '__unqual_scalar_typeof' for acquire/release macros Passing volatile-qualified pointers to the arm64 implementations of the load-acquire/store-release macros results in a re-load from the stack and a bunch of associated stack-protector churn due to the temporary result variable inheriting the volatile semantics thanks to the use of 'typeof()'. Define these temporary variables using 'unqual_scalar_typeof' to drop the volatile qualifier in the case that they are scalar types. Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Arnd Bergmann Acked-by: Mark Rutland Signed-off-by: Will Deacon --- arch/arm64/include/asm/barrier.h | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h index 7d9cc5ec4971..fb4c27506ef4 100644 --- a/arch/arm64/include/asm/barrier.h +++ b/arch/arm64/include/asm/barrier.h @@ -76,8 +76,8 @@ static inline unsigned long array_index_mask_nospec(unsigned long idx, #define __smp_store_release(p, v) \ do { \ typeof(p) __p = (p); \ - union { typeof(*p) __val; char __c[1]; } __u = \ - { .__val = (__force typeof(*p)) (v) }; \ + union { __unqual_scalar_typeof(*p) __val; char __c[1]; } __u = \ + { .__val = (__force __unqual_scalar_typeof(*p)) (v) }; \ compiletime_assert_atomic_type(*p); \ kasan_check_write(__p, sizeof(*p)); \ switch (sizeof(*p)) { \ @@ -110,7 +110,7 @@ do { \ #define __smp_load_acquire(p) \ ({ \ - union { typeof(*p) __val; char __c[1]; } __u; \ + union { __unqual_scalar_typeof(*p) __val; char __c[1]; } __u; \ typeof(p) __p = (p); \ compiletime_assert_atomic_type(*p); \ kasan_check_read(__p, sizeof(*p)); \ @@ -136,33 +136,33 @@ do { \ : "Q" (*__p) : "memory"); \ break; \ } \ - __u.__val; \ + (typeof(*p))__u.__val; \ }) #define smp_cond_load_relaxed(ptr, cond_expr) \ ({ \ typeof(ptr) __PTR = (ptr); \ - typeof(*ptr) VAL; \ + __unqual_scalar_typeof(*ptr) VAL; \ for (;;) { \ VAL = READ_ONCE(*__PTR); \ if (cond_expr) \ break; \ __cmpwait_relaxed(__PTR, VAL); \ } \ - VAL; \ + (typeof(*ptr))VAL; \ }) #define smp_cond_load_acquire(ptr, cond_expr) \ ({ \ typeof(ptr) __PTR = (ptr); \ - typeof(*ptr) VAL; \ + __unqual_scalar_typeof(*ptr) VAL; \ for (;;) { \ VAL = smp_load_acquire(__PTR); \ if (cond_expr) \ break; \ __cmpwait_relaxed(__PTR, VAL); \ } \ - VAL; \ + (typeof(*ptr))VAL; \ }) #include -- cgit v1.2.3-59-g8ed1b