aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arc/include/asm/bitops.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arc/include/asm/bitops.h')
-rw-r--r--arch/arc/include/asm/bitops.h31
1 files changed, 18 insertions, 13 deletions
diff --git a/arch/arc/include/asm/bitops.h b/arch/arc/include/asm/bitops.h
index 1a5bf07eefe2..4051e9525939 100644
--- a/arch/arc/include/asm/bitops.h
+++ b/arch/arc/include/asm/bitops.h
@@ -32,6 +32,20 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *m)
m += nr >> 5;
+ /*
+ * ARC ISA micro-optimization:
+ *
+ * Instructions dealing with bitpos only consider lower 5 bits (0-31)
+ * e.g (x << 33) is handled like (x << 1) by ASL instruction
+ * (mem pointer still needs adjustment to point to next word)
+ *
+ * Hence the masking to clamp @nr arg can be elided in general.
+ *
+ * However if @nr is a constant (above assumed it in a register),
+ * and greater than 31, gcc can optimize away (x << 33) to 0,
+ * as overflow, given the 32-bit ISA. Thus masking needs to be done
+ * for constant @nr, but no code is generated due to const prop.
+ */
if (__builtin_constant_p(nr))
nr &= 0x1f;
@@ -374,29 +388,20 @@ __test_and_change_bit(unsigned long nr, volatile unsigned long *m)
* This routine doesn't need to be atomic.
*/
static inline int
-__constant_test_bit(unsigned int nr, const volatile unsigned long *addr)
-{
- return ((1UL << (nr & 31)) &
- (((const volatile unsigned int *)addr)[nr >> 5])) != 0;
-}
-
-static inline int
-__test_bit(unsigned int nr, const volatile unsigned long *addr)
+test_bit(unsigned int nr, const volatile unsigned long *addr)
{
unsigned long mask;
addr += nr >> 5;
- /* ARC700 only considers 5 bits in bit-fiddling insn */
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
mask = 1 << nr;
return ((mask & *addr) != 0);
}
-#define test_bit(nr, addr) (__builtin_constant_p(nr) ? \
- __constant_test_bit((nr), (addr)) : \
- __test_bit((nr), (addr)))
-
/*
* Count the number of zeros, starting from MSB
* Helper for fls( ) friends