aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVincent Mailhol <mailhol.vincent@wanadoo.fr>2022-09-07 18:09:35 +0900
committerBorislav Petkov <bp@suse.de>2022-09-20 15:35:37 +0200
commitfdb6649ab7c142e497539a471e573c2593b9c923 (patch)
treeb19c86aff25f6ae9801b20042a455972bb3d65c7
parentx86/asm/bitops: Use __builtin_ffs() to evaluate constant expressions (diff)
downloadlinux-dev-fdb6649ab7c142e497539a471e573c2593b9c923.tar.xz
linux-dev-fdb6649ab7c142e497539a471e573c2593b9c923.zip
x86/asm/bitops: Use __builtin_ctzl() to evaluate constant expressions
If x is not 0, __ffs(x) is equivalent to: (unsigned long)__builtin_ctzl(x) And if x is not ~0UL, ffz(x) is equivalent to: (unsigned long)__builtin_ctzl(~x) Because __builting_ctzl() returns an int, a cast to (unsigned long) is necessary to avoid potential warnings on implicit casts. Concerning the edge cases, __builtin_ctzl(0) is always undefined, whereas __ffs(0) and ffz(~0UL) may or may not be defined, depending on the processor. Regardless, for both functions, developers are asked to check against 0 or ~0UL so replacing __ffs() or ffz() by __builting_ctzl() is safe. For x86_64, the current __ffs() and ffz() implementations do not produce optimized code when called with a constant expression. On the contrary, the __builtin_ctzl() folds into a single instruction. However, for non constant expressions, the __ffs() and ffz() asm versions of the kernel remains slightly better than the code produced by GCC (it produces a useless instruction to clear eax). Use __builtin_constant_p() to select between the kernel's __ffs()/ffz() and the __builtin_ctzl() depending on whether the argument is constant or not. ** Statistics ** On a allyesconfig, before...: $ objdump -d vmlinux.o | grep tzcnt | wc -l 3607 ...and after: $ objdump -d vmlinux.o | grep tzcnt | wc -l 2600 So, roughly 27.9% of the calls to either __ffs() or ffz() were using constant expressions and could be optimized out. (tests done on linux v5.18-rc5 x86_64 using GCC 11.2.1) Note: on x86_64, the BSF instruction produces TZCNT when used with the REP prefix (which explain the use of `grep tzcnt' instead of `grep bsf' in above benchmark). c.f. [1] [1] e26a44a2d618 ("x86: Use REP BSF unconditionally") [ bp: Massage commit message. ] Signed-off-by: Vincent Mailhol <mailhol.vincent@wanadoo.fr> Signed-off-by: Borislav Petkov <bp@suse.de> Reviewed-by: Nick Desaulniers <ndesaulniers@google.com> Reviewed-by: Yury Norov <yury.norov@gmail.com> Link: https://lore.kernel.org/r/20220511160319.1045812-1-mailhol.vincent@wanadoo.fr
-rw-r--r--arch/x86/include/asm/bitops.h28
1 files changed, 19 insertions, 9 deletions
diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
index 879238e5a6a0..2edf68475fec 100644
--- a/arch/x86/include/asm/bitops.h
+++ b/arch/x86/include/asm/bitops.h
@@ -247,17 +247,30 @@ arch_test_bit_acquire(unsigned long nr, const volatile unsigned long *addr)
variable_test_bit(nr, addr);
}
+static __always_inline unsigned long variable__ffs(unsigned long word)
+{
+ asm("rep; bsf %1,%0"
+ : "=r" (word)
+ : "rm" (word));
+ return word;
+}
+
/**
* __ffs - find first set bit in word
* @word: The word to search
*
* Undefined if no bit exists, so code should check against 0 first.
*/
-static __always_inline unsigned long __ffs(unsigned long word)
+#define __ffs(word) \
+ (__builtin_constant_p(word) ? \
+ (unsigned long)__builtin_ctzl(word) : \
+ variable__ffs(word))
+
+static __always_inline unsigned long variable_ffz(unsigned long word)
{
asm("rep; bsf %1,%0"
: "=r" (word)
- : "rm" (word));
+ : "r" (~word));
return word;
}
@@ -267,13 +280,10 @@ static __always_inline unsigned long __ffs(unsigned long word)
*
* Undefined if no zero exists, so code should check against ~0UL first.
*/
-static __always_inline unsigned long ffz(unsigned long word)
-{
- asm("rep; bsf %1,%0"
- : "=r" (word)
- : "r" (~word));
- return word;
-}
+#define ffz(word) \
+ (__builtin_constant_p(word) ? \
+ (unsigned long)__builtin_ctzl(~word) : \
+ variable_ffz(word))
/*
* __fls: find last set bit in word