diff options
Diffstat (limited to 'include/linux/math64.h')
-rw-r--r-- | include/linux/math64.h | 98 |
1 files changed, 84 insertions, 14 deletions
diff --git a/include/linux/math64.h b/include/linux/math64.h index 66deb1fdc2ef..6aaccc1626ab 100644 --- a/include/linux/math64.h +++ b/include/linux/math64.h @@ -3,8 +3,9 @@ #define _LINUX_MATH64_H #include <linux/types.h> -#include <vdso/math64.h> +#include <linux/math.h> #include <asm/div64.h> +#include <vdso/math64.h> #if BITS_PER_LONG == 64 @@ -28,7 +29,7 @@ static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder) return dividend / divisor; } -/* +/** * div_s64_rem - signed 64bit divide with 32bit divisor with remainder * @dividend: signed 64bit dividend * @divisor: signed 32bit divisor @@ -42,7 +43,7 @@ static inline s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder) return dividend / divisor; } -/* +/** * div64_u64_rem - unsigned 64bit divide with 64bit divisor and remainder * @dividend: unsigned 64bit dividend * @divisor: unsigned 64bit divisor @@ -56,7 +57,7 @@ static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder) return dividend / divisor; } -/* +/** * div64_u64 - unsigned 64bit divide with 64bit divisor * @dividend: unsigned 64bit dividend * @divisor: unsigned 64bit divisor @@ -68,7 +69,7 @@ static inline u64 div64_u64(u64 dividend, u64 divisor) return dividend / divisor; } -/* +/** * div64_s64 - signed 64bit divide with 64bit divisor * @dividend: signed 64bit dividend * @divisor: signed 64bit divisor @@ -119,6 +120,8 @@ extern s64 div64_s64(s64 dividend, s64 divisor); * This is the most common 64bit divide and should be used if possible, * as many 32bit archs can optimize this variant better than a full 64bit * divide. + * + * Return: dividend / divisor */ #ifndef div_u64 static inline u64 div_u64(u64 dividend, u32 divisor) @@ -132,6 +135,8 @@ static inline u64 div_u64(u64 dividend, u32 divisor) * div_s64 - signed 64bit divide with 32bit divisor * @dividend: signed 64bit dividend * @divisor: signed 32bit divisor + * + * Return: dividend / divisor */ #ifndef div_s64 static inline s64 div_s64(s64 dividend, s32 divisor) @@ -156,14 +161,14 @@ static inline u64 mul_u32_u32(u32 a, u32 b) #if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__) #ifndef mul_u64_u32_shr -static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift) +static __always_inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift) { return (u64)(((unsigned __int128)a * mul) >> shift); } #endif /* mul_u64_u32_shr */ #ifndef mul_u64_u64_shr -static inline u64 mul_u64_u64_shr(u64 a, u64 mul, unsigned int shift) +static __always_inline u64 mul_u64_u64_shr(u64 a, u64 mul, unsigned int shift) { return (u64)(((unsigned __int128)a * mul) >> shift); } @@ -172,18 +177,14 @@ static inline u64 mul_u64_u64_shr(u64 a, u64 mul, unsigned int shift) #else #ifndef mul_u64_u32_shr -static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift) +static __always_inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift) { - u32 ah, al; + u32 ah = a >> 32, al = a; u64 ret; - al = a; - ah = a >> 32; - ret = mul_u32_u32(al, mul) >> shift; if (ah) ret += mul_u32_u32(ah, mul) << (32 - shift); - return ret; } #endif /* mul_u64_u32_shr */ @@ -234,6 +235,24 @@ static inline u64 mul_u64_u64_shr(u64 a, u64 b, unsigned int shift) #endif +#ifndef mul_s64_u64_shr +static inline u64 mul_s64_u64_shr(s64 a, u64 b, unsigned int shift) +{ + u64 ret; + + /* + * Extract the sign before the multiplication and put it back + * afterwards if needed. + */ + ret = mul_u64_u64_shr(abs(a), b, shift); + + if (a < 0) + ret = -((s64) ret); + + return ret; +} +#endif /* mul_s64_u64_shr */ + #ifndef mul_u64_u32_div static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor) { @@ -265,10 +284,33 @@ static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor) u64 mul_u64_u64_div_u64(u64 a, u64 mul, u64 div); +/** + * DIV64_U64_ROUND_UP - unsigned 64bit divide with 64bit divisor rounded up + * @ll: unsigned 64bit dividend + * @d: unsigned 64bit divisor + * + * Divide unsigned 64bit dividend by unsigned 64bit divisor + * and round up. + * + * Return: dividend / divisor rounded up + */ #define DIV64_U64_ROUND_UP(ll, d) \ ({ u64 _tmp = (d); div64_u64((ll) + _tmp - 1, _tmp); }) /** + * DIV_U64_ROUND_UP - unsigned 64bit divide with 32bit divisor rounded up + * @ll: unsigned 64bit dividend + * @d: unsigned 32bit divisor + * + * Divide unsigned 64bit dividend by unsigned 32bit divisor + * and round up. + * + * Return: dividend / divisor rounded up + */ +#define DIV_U64_ROUND_UP(ll, d) \ + ({ u32 _tmp = (d); div_u64((ll) + _tmp - 1, _tmp); }) + +/** * DIV64_U64_ROUND_CLOSEST - unsigned 64bit divide with 64bit divisor rounded to nearest integer * @dividend: unsigned 64bit dividend * @divisor: unsigned 64bit divisor @@ -281,7 +323,20 @@ u64 mul_u64_u64_div_u64(u64 a, u64 mul, u64 div); #define DIV64_U64_ROUND_CLOSEST(dividend, divisor) \ ({ u64 _tmp = (divisor); div64_u64((dividend) + _tmp / 2, _tmp); }) -/* +/** + * DIV_U64_ROUND_CLOSEST - unsigned 64bit divide with 32bit divisor rounded to nearest integer + * @dividend: unsigned 64bit dividend + * @divisor: unsigned 32bit divisor + * + * Divide unsigned 64bit dividend by unsigned 32bit divisor + * and round to closest integer. + * + * Return: dividend / divisor rounded to nearest integer + */ +#define DIV_U64_ROUND_CLOSEST(dividend, divisor) \ + ({ u32 _tmp = (divisor); div_u64((u64)(dividend) + _tmp / 2, _tmp); }) + +/** * DIV_S64_ROUND_CLOSEST - signed 64bit divide with 32bit divisor rounded to nearest integer * @dividend: signed 64bit dividend * @divisor: signed 32bit divisor @@ -300,4 +355,19 @@ u64 mul_u64_u64_div_u64(u64 a, u64 mul, u64 div); div_s64((__x - (__d / 2)), __d); \ } \ ) + +/** + * roundup_u64 - Round up a 64bit value to the next specified 32bit multiple + * @x: the value to up + * @y: 32bit multiple to round up to + * + * Rounds @x to the next multiple of @y. For 32bit @x values, see roundup and + * the faster round_up() for powers of 2. + * + * Return: rounded up value. + */ +static inline u64 roundup_u64(u64 x, u32 y) +{ + return DIV_U64_ROUND_UP(x, y) * y; +} #endif /* _LINUX_MATH64_H */ |