From 5e5e51422cd189bc1b627f619f0f99324e6e4de9 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Mon, 25 Mar 2024 08:40:08 +0200 Subject: math64: Tidy up mul_u64_u32_shr() Put together declaration and initialization of local variables. Suggested-by: Thomas Gleixner Signed-off-by: Adrian Hunter Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/r/20240325064023.2997-5-adrian.hunter@intel.com --- include/linux/math64.h | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) (limited to 'include/linux/math64.h') diff --git a/include/linux/math64.h b/include/linux/math64.h index bf74478926d4..fd13622b2056 100644 --- a/include/linux/math64.h +++ b/include/linux/math64.h @@ -179,16 +179,12 @@ static __always_inline u64 mul_u64_u64_shr(u64 a, u64 mul, unsigned int shift) #ifndef mul_u64_u32_shr static __always_inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift) { - u32 ah, al; + u32 ah = a >> 32, al = a; u64 ret; - al = a; - ah = a >> 32; - ret = mul_u32_u32(al, mul) >> shift; if (ah) ret += mul_u32_u32(ah, mul) << (32 - shift); - return ret; } #endif /* mul_u64_u32_shr */ -- cgit v1.2.3-59-g8ed1b From 1beb35ec615f676d49d68b6dc23c7418ba8ff145 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Mon, 25 Mar 2024 08:40:09 +0200 Subject: vdso, math64: Provide mul_u64_u32_add_u64_shr() Provide mul_u64_u32_add_u64_shr() which is a calculation that will be used by timekeeping and VDSO. Place #include after #include to allow architecture-specific overrides, at least for the kernel. Signed-off-by: Adrian Hunter Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/r/20240325064023.2997-6-adrian.hunter@intel.com --- include/linux/math64.h | 2 +- include/vdso/math64.h | 38 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 39 insertions(+), 1 deletion(-) (limited to 'include/linux/math64.h') diff --git a/include/linux/math64.h b/include/linux/math64.h index fd13622b2056..d34def7f9a8c 100644 --- a/include/linux/math64.h +++ b/include/linux/math64.h @@ -4,8 +4,8 @@ #include #include -#include #include +#include #if BITS_PER_LONG == 64 diff --git a/include/vdso/math64.h b/include/vdso/math64.h index 7da703ee5561..22ae212f8b28 100644 --- a/include/vdso/math64.h +++ b/include/vdso/math64.h @@ -21,4 +21,42 @@ __iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder) return ret; } +#if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__) + +#ifndef mul_u64_u32_add_u64_shr +static __always_inline u64 mul_u64_u32_add_u64_shr(u64 a, u32 mul, u64 b, unsigned int shift) +{ + return (u64)((((unsigned __int128)a * mul) + b) >> shift); +} +#endif /* mul_u64_u32_add_u64_shr */ + +#else + +#ifndef mul_u64_u32_add_u64_shr +#ifndef mul_u32_u32 +static inline u64 mul_u32_u32(u32 a, u32 b) +{ + return (u64)a * b; +} +#define mul_u32_u32 mul_u32_u32 +#endif +static __always_inline u64 mul_u64_u32_add_u64_shr(u64 a, u32 mul, u64 b, unsigned int shift) +{ + u32 ah = a >> 32, al = a; + bool ovf; + u64 ret; + + ovf = __builtin_add_overflow(mul_u32_u32(al, mul), b, &ret); + ret >>= shift; + if (ovf && shift) + ret += 1ULL << (64 - shift); + if (ah) + ret += mul_u32_u32(ah, mul) << (32 - shift); + + return ret; +} +#endif /* mul_u64_u32_add_u64_shr */ + +#endif + #endif /* __VDSO_MATH64_H */ -- cgit v1.2.3-59-g8ed1b