From b3ac891b67bd4b1fc728d1c784cad1212dea433d Mon Sep 17 00:00:00 2001 From: Luca Barbieri Date: Wed, 24 Feb 2010 10:54:22 +0100 Subject: x86: Add support for lock prefix in alternatives The current lock prefix UP/SMP alternative code doesn't allow LOCK_PREFIX to be used in alternatives code. This patch solves the problem by adding a new LOCK_PREFIX_ALTERNATIVE_PATCH macro that only records the lock prefix location but does not emit the prefix. The user of this macro can then start any alternative sequence with "lock" and have it UP/SMP patched. To make this work, the UP/SMP alternative code is changed to do the lock/DS prefix switching only if the byte actually contains a lock or DS prefix. Thus, if an alternative without the "lock" is selected, it will now do nothing instead of clobbering the code. Changes in v2: - Naming change - Change label to not conflict with alternatives Signed-off-by: Luca Barbieri LKML-Reference: <1267005265-27958-2-git-send-email-luca@luca-barbieri.com> Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/alternative.h | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h index 3b5b828767b6..55fee12cea6d 100644 --- a/arch/x86/include/asm/alternative.h +++ b/arch/x86/include/asm/alternative.h @@ -28,12 +28,14 @@ */ #ifdef CONFIG_SMP -#define LOCK_PREFIX \ +#define LOCK_PREFIX_HERE \ ".section .smp_locks,\"a\"\n" \ _ASM_ALIGN "\n" \ - _ASM_PTR "661f\n" /* address */ \ + _ASM_PTR "671f\n" /* address */ \ ".previous\n" \ - "661:\n\tlock; " + "671:" + +#define LOCK_PREFIX LOCK_PREFIX_HERE "\n\tlock; " #else /* ! CONFIG_SMP */ #define LOCK_PREFIX "" -- cgit v1.2.3-59-g8ed1b From 9c76b38476b18c45f97098a10b0176b321eba3ea Mon Sep 17 00:00:00 2001 From: Luca Barbieri Date: Wed, 24 Feb 2010 10:54:23 +0100 Subject: x86-32: Allow UP/SMP lock replacement in cmpxchg64 Use the functionality just introduced in the previous patch: mark the lock prefixes in cmpxchg64 alternatives for UP removal. Changes in v2: - Naming change Signed-off-by: Luca Barbieri LKML-Reference: <1267005265-27958-3-git-send-email-luca@luca-barbieri.com> Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/cmpxchg_32.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/cmpxchg_32.h b/arch/x86/include/asm/cmpxchg_32.h index ffb9bb6b6c37..8859e12dd3cf 100644 --- a/arch/x86/include/asm/cmpxchg_32.h +++ b/arch/x86/include/asm/cmpxchg_32.h @@ -271,7 +271,8 @@ extern unsigned long long cmpxchg_486_u64(volatile void *, u64, u64); __typeof__(*(ptr)) __ret; \ __typeof__(*(ptr)) __old = (o); \ __typeof__(*(ptr)) __new = (n); \ - alternative_io("call cmpxchg8b_emu", \ + alternative_io(LOCK_PREFIX_HERE \ + "call cmpxchg8b_emu", \ "lock; cmpxchg8b (%%esi)" , \ X86_FEATURE_CX8, \ "=A" (__ret), \ -- cgit v1.2.3-59-g8ed1b From a7e926abc3adfbd2e5e20d2b46177adb4e313915 Mon Sep 17 00:00:00 2001 From: Luca Barbieri Date: Wed, 24 Feb 2010 10:54:25 +0100 Subject: x86-32: Rewrite 32-bit atomic64 functions in assembly This patch replaces atomic64_32.c with two assembly implementations, one for 386/486 machines using pushf/cli/popf and one for 586+ machines using cmpxchg8b. The cmpxchg8b implementation provides the following advantages over the current one: 1. Implements atomic64_add_unless, atomic64_dec_if_positive and atomic64_inc_not_zero 2. Uses the ZF flag changed by cmpxchg8b instead of doing a comparison 3. Uses custom register calling conventions that reduce or eliminate register moves to suit cmpxchg8b 4. Reads the initial value instead of using cmpxchg8b to do that. Currently we use lock xaddl and movl, which seems the fastest. 5. Does not use the lock prefix for atomic64_set 64-bit writes are already atomic, so we don't need that. We still need it for atomic64_read to avoid restoring a value changed in the meantime. 6. Allocates registers as well or better than gcc The 386 implementation provides support for 386 and 486 machines. 386/486 SMP is not supported (we dropped it), but such support can be added easily if desired. A pure assembly implementation is required due to the custom calling conventions, and desire to use %ebp in atomic64_add_return (we need 7 registers...), as well as the ability to use pushf/popf in the 386 code without an intermediate pop/push. The parameter names are changed to match the convention in atomic_64.h Changes in v3 (due to rebasing to tip/x86/asm): - Patches atomic64_32.h instead of atomic_32.h - Uses the CALL alternative mechanism from commit 1b1d9258181bae199dc940f4bd0298126b9a73d9 Changes in v2: - Merged 386 and cx8 support in the same patch - 386 support now done in assembly, C code no longer used at all - cmpxchg64 is used for atomic64_cmpxchg - stop using macros, use one-line inline functions instead - miscellanous changes and improvements Signed-off-by: Luca Barbieri LKML-Reference: <1267005265-27958-5-git-send-email-luca@luca-barbieri.com> Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/atomic64_32.h | 278 ++++++++++++++++++++++++++++--------- arch/x86/lib/Makefile | 3 +- arch/x86/lib/atomic64_32.c | 273 +++++++----------------------------- arch/x86/lib/atomic64_386_32.S | 175 +++++++++++++++++++++++ arch/x86/lib/atomic64_cx8_32.S | 225 ++++++++++++++++++++++++++++++ 5 files changed, 664 insertions(+), 290 deletions(-) create mode 100644 arch/x86/lib/atomic64_386_32.S create mode 100644 arch/x86/lib/atomic64_cx8_32.S (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h index 03027bf28de5..2a934aa19a43 100644 --- a/arch/x86/include/asm/atomic64_32.h +++ b/arch/x86/include/asm/atomic64_32.h @@ -14,109 +14,193 @@ typedef struct { #define ATOMIC64_INIT(val) { (val) } -extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val); +#ifdef CONFIG_X86_CMPXCHG64 +#define ATOMIC64_ALTERNATIVE_(f, g) "call atomic64_" #g "_cx8" +#else +#define ATOMIC64_ALTERNATIVE_(f, g) ALTERNATIVE("call atomic64_" #f "_386", "call atomic64_" #g "_cx8", X86_FEATURE_CX8) +#endif + +#define ATOMIC64_ALTERNATIVE(f) ATOMIC64_ALTERNATIVE_(f, f) + +/** + * atomic64_cmpxchg - cmpxchg atomic64 variable + * @p: pointer to type atomic64_t + * @o: expected value + * @n: new value + * + * Atomically sets @v to @n if it was equal to @o and returns + * the old value. + */ + +static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n) +{ + return cmpxchg64(&v->counter, o, n); +} /** * atomic64_xchg - xchg atomic64 variable - * @ptr: pointer to type atomic64_t - * @new_val: value to assign + * @v: pointer to type atomic64_t + * @n: value to assign * - * Atomically xchgs the value of @ptr to @new_val and returns + * Atomically xchgs the value of @v to @n and returns * the old value. */ -extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val); +static inline long long atomic64_xchg(atomic64_t *v, long long n) +{ + long long o; + unsigned high = (unsigned)(n >> 32); + unsigned low = (unsigned)n; + asm volatile(ATOMIC64_ALTERNATIVE(xchg) + : "=A" (o), "+b" (low), "+c" (high) + : "S" (v) + : "memory" + ); + return o; +} /** * atomic64_set - set atomic64 variable - * @ptr: pointer to type atomic64_t - * @new_val: value to assign + * @v: pointer to type atomic64_t + * @n: value to assign * - * Atomically sets the value of @ptr to @new_val. + * Atomically sets the value of @v to @n. */ -extern void atomic64_set(atomic64_t *ptr, u64 new_val); +static inline void atomic64_set(atomic64_t *v, long long i) +{ + unsigned high = (unsigned)(i >> 32); + unsigned low = (unsigned)i; + asm volatile(ATOMIC64_ALTERNATIVE(set) + : "+b" (low), "+c" (high) + : "S" (v) + : "eax", "edx", "memory" + ); +} /** * atomic64_read - read atomic64 variable - * @ptr: pointer to type atomic64_t + * @v: pointer to type atomic64_t * - * Atomically reads the value of @ptr and returns it. + * Atomically reads the value of @v and returns it. */ -static inline u64 atomic64_read(atomic64_t *ptr) +static inline long long atomic64_read(atomic64_t *v) { - u64 res; - - /* - * Note, we inline this atomic64_t primitive because - * it only clobbers EAX/EDX and leaves the others - * untouched. We also (somewhat subtly) rely on the - * fact that cmpxchg8b returns the current 64-bit value - * of the memory location we are touching: - */ - asm volatile( - "mov %%ebx, %%eax\n\t" - "mov %%ecx, %%edx\n\t" - LOCK_PREFIX "cmpxchg8b %1\n" - : "=&A" (res) - : "m" (*ptr) - ); - - return res; -} - -extern u64 atomic64_read(atomic64_t *ptr); + long long r; + asm volatile(ATOMIC64_ALTERNATIVE(read) + : "=A" (r), "+c" (v) + : : "memory" + ); + return r; + } /** * atomic64_add_return - add and return - * @delta: integer value to add - * @ptr: pointer to type atomic64_t + * @i: integer value to add + * @v: pointer to type atomic64_t * - * Atomically adds @delta to @ptr and returns @delta + *@ptr + * Atomically adds @i to @v and returns @i + *@v */ -extern u64 atomic64_add_return(u64 delta, atomic64_t *ptr); +static inline long long atomic64_add_return(long long i, atomic64_t *v) +{ + asm volatile(ATOMIC64_ALTERNATIVE(add_return) + : "+A" (i), "+c" (v) + : : "memory" + ); + return i; +} /* * Other variants with different arithmetic operators: */ -extern u64 atomic64_sub_return(u64 delta, atomic64_t *ptr); -extern u64 atomic64_inc_return(atomic64_t *ptr); -extern u64 atomic64_dec_return(atomic64_t *ptr); +static inline long long atomic64_sub_return(long long i, atomic64_t *v) +{ + asm volatile(ATOMIC64_ALTERNATIVE(sub_return) + : "+A" (i), "+c" (v) + : : "memory" + ); + return i; +} + +static inline long long atomic64_inc_return(atomic64_t *v) +{ + long long a; + asm volatile(ATOMIC64_ALTERNATIVE(inc_return) + : "=A" (a) + : "S" (v) + : "memory", "ecx" + ); + return a; +} + +static inline long long atomic64_dec_return(atomic64_t *v) +{ + long long a; + asm volatile(ATOMIC64_ALTERNATIVE(dec_return) + : "=A" (a) + : "S" (v) + : "memory", "ecx" + ); + return a; +} /** * atomic64_add - add integer to atomic64 variable - * @delta: integer value to add - * @ptr: pointer to type atomic64_t + * @i: integer value to add + * @v: pointer to type atomic64_t * - * Atomically adds @delta to @ptr. + * Atomically adds @i to @v. */ -extern void atomic64_add(u64 delta, atomic64_t *ptr); +static inline long long atomic64_add(long long i, atomic64_t *v) +{ + asm volatile(ATOMIC64_ALTERNATIVE_(add, add_return) + : "+A" (i), "+c" (v) + : : "memory" + ); + return i; +} /** * atomic64_sub - subtract the atomic64 variable - * @delta: integer value to subtract - * @ptr: pointer to type atomic64_t + * @i: integer value to subtract + * @v: pointer to type atomic64_t * - * Atomically subtracts @delta from @ptr. + * Atomically subtracts @i from @v. */ -extern void atomic64_sub(u64 delta, atomic64_t *ptr); +static inline long long atomic64_sub(long long i, atomic64_t *v) +{ + asm volatile(ATOMIC64_ALTERNATIVE_(sub, sub_return) + : "+A" (i), "+c" (v) + : : "memory" + ); + return i; +} /** * atomic64_sub_and_test - subtract value from variable and test result - * @delta: integer value to subtract - * @ptr: pointer to type atomic64_t - * - * Atomically subtracts @delta from @ptr and returns + * @i: integer value to subtract + * @v: pointer to type atomic64_t + * + * Atomically subtracts @i from @v and returns * true if the result is zero, or false for all * other cases. */ -extern int atomic64_sub_and_test(u64 delta, atomic64_t *ptr); +static inline int atomic64_sub_and_test(long long i, atomic64_t *v) +{ + return atomic64_sub_return(i, v) == 0; +} /** * atomic64_inc - increment atomic64 variable - * @ptr: pointer to type atomic64_t + * @v: pointer to type atomic64_t * - * Atomically increments @ptr by 1. + * Atomically increments @v by 1. */ -extern void atomic64_inc(atomic64_t *ptr); +static inline void atomic64_inc(atomic64_t *v) +{ + asm volatile(ATOMIC64_ALTERNATIVE_(inc, inc_return) + : : "S" (v) + : "memory", "eax", "ecx", "edx" + ); +} /** * atomic64_dec - decrement atomic64 variable @@ -124,37 +208,97 @@ extern void atomic64_inc(atomic64_t *ptr); * * Atomically decrements @ptr by 1. */ -extern void atomic64_dec(atomic64_t *ptr); +static inline void atomic64_dec(atomic64_t *v) +{ + asm volatile(ATOMIC64_ALTERNATIVE_(dec, dec_return) + : : "S" (v) + : "memory", "eax", "ecx", "edx" + ); +} /** * atomic64_dec_and_test - decrement and test - * @ptr: pointer to type atomic64_t + * @v: pointer to type atomic64_t * - * Atomically decrements @ptr by 1 and + * Atomically decrements @v by 1 and * returns true if the result is 0, or false for all other * cases. */ -extern int atomic64_dec_and_test(atomic64_t *ptr); +static inline int atomic64_dec_and_test(atomic64_t *v) +{ + return atomic64_dec_return(v) == 0; +} /** * atomic64_inc_and_test - increment and test - * @ptr: pointer to type atomic64_t + * @v: pointer to type atomic64_t * - * Atomically increments @ptr by 1 + * Atomically increments @v by 1 * and returns true if the result is zero, or false for all * other cases. */ -extern int atomic64_inc_and_test(atomic64_t *ptr); +static inline int atomic64_inc_and_test(atomic64_t *v) +{ + return atomic64_inc_return(v) == 0; +} /** * atomic64_add_negative - add and test if negative - * @delta: integer value to add - * @ptr: pointer to type atomic64_t + * @i: integer value to add + * @v: pointer to type atomic64_t * - * Atomically adds @delta to @ptr and returns true + * Atomically adds @i to @v and returns true * if the result is negative, or false when * result is greater than or equal to zero. */ -extern int atomic64_add_negative(u64 delta, atomic64_t *ptr); +static inline int atomic64_add_negative(long long i, atomic64_t *v) +{ + return atomic64_add_return(i, v) < 0; +} + +/** + * atomic64_add_unless - add unless the number is a given value + * @v: pointer of type atomic64_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. + * + * Atomically adds @a to @v, so long as it was not @u. + * Returns non-zero if @v was not @u, and zero otherwise. + */ +static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u) +{ + unsigned low = (unsigned)u; + unsigned high = (unsigned)(u >> 32); + asm volatile(ATOMIC64_ALTERNATIVE(add_unless) "\n\t" + : "+A" (a), "+c" (v), "+S" (low), "+D" (high) + : : "memory"); + return (int)a; +} + + +static inline int atomic64_inc_not_zero(atomic64_t *v) +{ + int r; + asm volatile(ATOMIC64_ALTERNATIVE(inc_not_zero) + : "=a" (r) + : "S" (v) + : "ecx", "edx", "memory" + ); + return r; +} + +static inline long long atomic64_dec_if_positive(atomic64_t *v) +{ + long long r; + asm volatile(ATOMIC64_ALTERNATIVE(dec_if_positive) + : "=A" (r) + : "S" (v) + : "ecx", "memory" + ); + return r; +} + +#undef ATOMIC64_ALTERNATIVE +#undef ATOMIC64_ALTERNATIVE_ #endif /* _ASM_X86_ATOMIC64_32_H */ diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile index cffd754f3039..05d686bbbe9f 100644 --- a/arch/x86/lib/Makefile +++ b/arch/x86/lib/Makefile @@ -26,11 +26,12 @@ obj-y += msr.o msr-reg.o msr-reg-export.o ifeq ($(CONFIG_X86_32),y) obj-y += atomic64_32.o + lib-y += atomic64_cx8_32.o lib-y += checksum_32.o lib-y += strstr_32.o lib-y += semaphore_32.o string_32.o ifneq ($(CONFIG_X86_CMPXCHG64),y) - lib-y += cmpxchg8b_emu.o + lib-y += cmpxchg8b_emu.o atomic64_386_32.o endif lib-$(CONFIG_X86_USE_3DNOW) += mmx_32.o else diff --git a/arch/x86/lib/atomic64_32.c b/arch/x86/lib/atomic64_32.c index 824fa0be55a3..540179e8e9fa 100644 --- a/arch/x86/lib/atomic64_32.c +++ b/arch/x86/lib/atomic64_32.c @@ -6,225 +6,54 @@ #include #include -static noinline u64 cmpxchg8b(u64 *ptr, u64 old, u64 new) -{ - u32 low = new; - u32 high = new >> 32; - - asm volatile( - LOCK_PREFIX "cmpxchg8b %1\n" - : "+A" (old), "+m" (*ptr) - : "b" (low), "c" (high) - ); - return old; -} - -u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val) -{ - return cmpxchg8b(&ptr->counter, old_val, new_val); -} -EXPORT_SYMBOL(atomic64_cmpxchg); - -/** - * atomic64_xchg - xchg atomic64 variable - * @ptr: pointer to type atomic64_t - * @new_val: value to assign - * - * Atomically xchgs the value of @ptr to @new_val and returns - * the old value. - */ -u64 atomic64_xchg(atomic64_t *ptr, u64 new_val) -{ - /* - * Try first with a (possibly incorrect) assumption about - * what we have there. We'll do two loops most likely, - * but we'll get an ownership MESI transaction straight away - * instead of a read transaction followed by a - * flush-for-ownership transaction: - */ - u64 old_val, real_val = 0; - - do { - old_val = real_val; - - real_val = atomic64_cmpxchg(ptr, old_val, new_val); - - } while (real_val != old_val); - - return old_val; -} -EXPORT_SYMBOL(atomic64_xchg); - -/** - * atomic64_set - set atomic64 variable - * @ptr: pointer to type atomic64_t - * @new_val: value to assign - * - * Atomically sets the value of @ptr to @new_val. - */ -void atomic64_set(atomic64_t *ptr, u64 new_val) -{ - atomic64_xchg(ptr, new_val); -} -EXPORT_SYMBOL(atomic64_set); - -/** -EXPORT_SYMBOL(atomic64_read); - * atomic64_add_return - add and return - * @delta: integer value to add - * @ptr: pointer to type atomic64_t - * - * Atomically adds @delta to @ptr and returns @delta + *@ptr - */ -noinline u64 atomic64_add_return(u64 delta, atomic64_t *ptr) -{ - /* - * Try first with a (possibly incorrect) assumption about - * what we have there. We'll do two loops most likely, - * but we'll get an ownership MESI transaction straight away - * instead of a read transaction followed by a - * flush-for-ownership transaction: - */ - u64 old_val, new_val, real_val = 0; - - do { - old_val = real_val; - new_val = old_val + delta; - - real_val = atomic64_cmpxchg(ptr, old_val, new_val); - - } while (real_val != old_val); - - return new_val; -} -EXPORT_SYMBOL(atomic64_add_return); - -u64 atomic64_sub_return(u64 delta, atomic64_t *ptr) -{ - return atomic64_add_return(-delta, ptr); -} -EXPORT_SYMBOL(atomic64_sub_return); - -u64 atomic64_inc_return(atomic64_t *ptr) -{ - return atomic64_add_return(1, ptr); -} -EXPORT_SYMBOL(atomic64_inc_return); - -u64 atomic64_dec_return(atomic64_t *ptr) -{ - return atomic64_sub_return(1, ptr); -} -EXPORT_SYMBOL(atomic64_dec_return); - -/** - * atomic64_add - add integer to atomic64 variable - * @delta: integer value to add - * @ptr: pointer to type atomic64_t - * - * Atomically adds @delta to @ptr. - */ -void atomic64_add(u64 delta, atomic64_t *ptr) -{ - atomic64_add_return(delta, ptr); -} -EXPORT_SYMBOL(atomic64_add); - -/** - * atomic64_sub - subtract the atomic64 variable - * @delta: integer value to subtract - * @ptr: pointer to type atomic64_t - * - * Atomically subtracts @delta from @ptr. - */ -void atomic64_sub(u64 delta, atomic64_t *ptr) -{ - atomic64_add(-delta, ptr); -} -EXPORT_SYMBOL(atomic64_sub); - -/** - * atomic64_sub_and_test - subtract value from variable and test result - * @delta: integer value to subtract - * @ptr: pointer to type atomic64_t - * - * Atomically subtracts @delta from @ptr and returns - * true if the result is zero, or false for all - * other cases. - */ -int atomic64_sub_and_test(u64 delta, atomic64_t *ptr) -{ - u64 new_val = atomic64_sub_return(delta, ptr); - - return new_val == 0; -} -EXPORT_SYMBOL(atomic64_sub_and_test); - -/** - * atomic64_inc - increment atomic64 variable - * @ptr: pointer to type atomic64_t - * - * Atomically increments @ptr by 1. - */ -void atomic64_inc(atomic64_t *ptr) -{ - atomic64_add(1, ptr); -} -EXPORT_SYMBOL(atomic64_inc); - -/** - * atomic64_dec - decrement atomic64 variable - * @ptr: pointer to type atomic64_t - * - * Atomically decrements @ptr by 1. - */ -void atomic64_dec(atomic64_t *ptr) -{ - atomic64_sub(1, ptr); -} -EXPORT_SYMBOL(atomic64_dec); - -/** - * atomic64_dec_and_test - decrement and test - * @ptr: pointer to type atomic64_t - * - * Atomically decrements @ptr by 1 and - * returns true if the result is 0, or false for all other - * cases. - */ -int atomic64_dec_and_test(atomic64_t *ptr) -{ - return atomic64_sub_and_test(1, ptr); -} -EXPORT_SYMBOL(atomic64_dec_and_test); - -/** - * atomic64_inc_and_test - increment and test - * @ptr: pointer to type atomic64_t - * - * Atomically increments @ptr by 1 - * and returns true if the result is zero, or false for all - * other cases. - */ -int atomic64_inc_and_test(atomic64_t *ptr) -{ - return atomic64_sub_and_test(-1, ptr); -} -EXPORT_SYMBOL(atomic64_inc_and_test); - -/** - * atomic64_add_negative - add and test if negative - * @delta: integer value to add - * @ptr: pointer to type atomic64_t - * - * Atomically adds @delta to @ptr and returns true - * if the result is negative, or false when - * result is greater than or equal to zero. - */ -int atomic64_add_negative(u64 delta, atomic64_t *ptr) -{ - s64 new_val = atomic64_add_return(delta, ptr); - - return new_val < 0; -} -EXPORT_SYMBOL(atomic64_add_negative); +long long atomic64_read_cx8(long long, const atomic64_t *v); +EXPORT_SYMBOL(atomic64_read_cx8); +long long atomic64_set_cx8(long long, const atomic64_t *v); +EXPORT_SYMBOL(atomic64_set_cx8); +long long atomic64_xchg_cx8(long long, unsigned high); +EXPORT_SYMBOL(atomic64_xchg_cx8); +long long atomic64_add_return_cx8(long long a, atomic64_t *v); +EXPORT_SYMBOL(atomic64_add_return_cx8); +long long atomic64_sub_return_cx8(long long a, atomic64_t *v); +EXPORT_SYMBOL(atomic64_sub_return_cx8); +long long atomic64_inc_return_cx8(long long a, atomic64_t *v); +EXPORT_SYMBOL(atomic64_inc_return_cx8); +long long atomic64_dec_return_cx8(long long a, atomic64_t *v); +EXPORT_SYMBOL(atomic64_dec_return_cx8); +long long atomic64_dec_if_positive_cx8(atomic64_t *v); +EXPORT_SYMBOL(atomic64_dec_if_positive_cx8); +int atomic64_inc_not_zero_cx8(atomic64_t *v); +EXPORT_SYMBOL(atomic64_inc_not_zero_cx8); +int atomic64_add_unless_cx8(atomic64_t *v, long long a, long long u); +EXPORT_SYMBOL(atomic64_add_unless_cx8); + +#ifndef CONFIG_X86_CMPXCHG64 +long long atomic64_read_386(long long, const atomic64_t *v); +EXPORT_SYMBOL(atomic64_read_386); +long long atomic64_set_386(long long, const atomic64_t *v); +EXPORT_SYMBOL(atomic64_set_386); +long long atomic64_xchg_386(long long, unsigned high); +EXPORT_SYMBOL(atomic64_xchg_386); +long long atomic64_add_return_386(long long a, atomic64_t *v); +EXPORT_SYMBOL(atomic64_add_return_386); +long long atomic64_sub_return_386(long long a, atomic64_t *v); +EXPORT_SYMBOL(atomic64_sub_return_386); +long long atomic64_inc_return_386(long long a, atomic64_t *v); +EXPORT_SYMBOL(atomic64_inc_return_386); +long long atomic64_dec_return_386(long long a, atomic64_t *v); +EXPORT_SYMBOL(atomic64_dec_return_386); +long long atomic64_add_386(long long a, atomic64_t *v); +EXPORT_SYMBOL(atomic64_add_386); +long long atomic64_sub_386(long long a, atomic64_t *v); +EXPORT_SYMBOL(atomic64_sub_386); +long long atomic64_inc_386(long long a, atomic64_t *v); +EXPORT_SYMBOL(atomic64_inc_386); +long long atomic64_dec_386(long long a, atomic64_t *v); +EXPORT_SYMBOL(atomic64_dec_386); +long long atomic64_dec_if_positive_386(atomic64_t *v); +EXPORT_SYMBOL(atomic64_dec_if_positive_386); +int atomic64_inc_not_zero_386(atomic64_t *v); +EXPORT_SYMBOL(atomic64_inc_not_zero_386); +int atomic64_add_unless_386(atomic64_t *v, long long a, long long u); +EXPORT_SYMBOL(atomic64_add_unless_386); +#endif diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S new file mode 100644 index 000000000000..5db07fe4a0ca --- /dev/null +++ b/arch/x86/lib/atomic64_386_32.S @@ -0,0 +1,175 @@ +/* + * atomic64_t for 386/486 + * + * Copyright © 2010 Luca Barbieri + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include +#include + +/* if you want SMP support, implement these with real spinlocks */ +.macro LOCK reg + pushfl + CFI_ADJUST_CFA_OFFSET 4 + cli +.endm + +.macro UNLOCK reg + popfl + CFI_ADJUST_CFA_OFFSET -4 +.endm + +.macro BEGIN func reg +$v = \reg + +ENTRY(atomic64_\func\()_386) + CFI_STARTPROC + LOCK $v + +.macro RETURN + UNLOCK $v + ret +.endm + +.macro END_ + CFI_ENDPROC +ENDPROC(atomic64_\func\()_386) +.purgem RETURN +.purgem END_ +.purgem END +.endm + +.macro END +RETURN +END_ +.endm +.endm + +BEGIN read %ecx + movl ($v), %eax + movl 4($v), %edx +END + +BEGIN set %esi + movl %ebx, ($v) + movl %ecx, 4($v) +END + +BEGIN xchg %esi + movl ($v), %eax + movl 4($v), %edx + movl %ebx, ($v) + movl %ecx, 4($v) +END + +BEGIN add %ecx + addl %eax, ($v) + adcl %edx, 4($v) +END + +BEGIN add_return %ecx + addl ($v), %eax + adcl 4($v), %edx + movl %eax, ($v) + movl %edx, 4($v) +END + +BEGIN sub %ecx + subl %eax, ($v) + sbbl %edx, 4($v) +END + +BEGIN sub_return %ecx + negl %edx + negl %eax + sbbl $0, %edx + addl ($v), %eax + adcl 4($v), %edx + movl %eax, ($v) + movl %edx, 4($v) +END + +BEGIN inc %esi + addl $1, ($v) + adcl $0, 4($v) +END + +BEGIN inc_return %esi + movl ($v), %eax + movl 4($v), %edx + addl $1, %eax + adcl $0, %edx + movl %eax, ($v) + movl %edx, 4($v) +END + +BEGIN dec %esi + subl $1, ($v) + sbbl $0, 4($v) +END + +BEGIN dec_return %esi + movl ($v), %eax + movl 4($v), %edx + subl $1, %eax + sbbl $0, %edx + movl %eax, ($v) + movl %edx, 4($v) +END + +BEGIN add_unless %ecx + addl %eax, %esi + adcl %edx, %edi + addl ($v), %eax + adcl 4($v), %edx + cmpl %eax, %esi + je 3f +1: + movl %eax, ($v) + movl %edx, 4($v) + xorl %eax, %eax +2: +RETURN +3: + cmpl %edx, %edi + jne 1b + movl $1, %eax + jmp 2b +END_ + +BEGIN inc_not_zero %esi + movl ($v), %eax + movl 4($v), %edx + testl %eax, %eax + je 3f +1: + addl $1, %eax + adcl $0, %edx + movl %eax, ($v) + movl %edx, 4($v) + xorl %eax, %eax +2: +RETURN +3: + testl %edx, %edx + jne 1b + movl $1, %eax + jmp 2b +END_ + +BEGIN dec_if_positive %esi + movl ($v), %eax + movl 4($v), %edx + subl $1, %eax + sbbl $0, %edx + js 1f + movl %eax, ($v) + movl %edx, 4($v) +1: +END diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S new file mode 100644 index 000000000000..e49c4ebca9f4 --- /dev/null +++ b/arch/x86/lib/atomic64_cx8_32.S @@ -0,0 +1,225 @@ +/* + * atomic64_t for 586+ + * + * Copyright © 2010 Luca Barbieri + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include +#include + +.macro SAVE reg + pushl %\reg + CFI_ADJUST_CFA_OFFSET 4 + CFI_REL_OFFSET \reg, 0 +.endm + +.macro RESTORE reg + popl %\reg + CFI_ADJUST_CFA_OFFSET -4 + CFI_RESTORE \reg +.endm + +.macro read64 reg + movl %ebx, %eax + movl %ecx, %edx +/* we need LOCK_PREFIX since otherwise cmpxchg8b always does the write */ + LOCK_PREFIX + cmpxchg8b (\reg) +.endm + +ENTRY(atomic64_read_cx8) + CFI_STARTPROC + + read64 %ecx + ret + CFI_ENDPROC +ENDPROC(atomic64_read_cx8) + +ENTRY(atomic64_set_cx8) + CFI_STARTPROC + +1: +/* we don't need LOCK_PREFIX since aligned 64-bit writes + * are atomic on 586 and newer */ + cmpxchg8b (%esi) + jne 1b + + ret + CFI_ENDPROC +ENDPROC(atomic64_set_cx8) + +ENTRY(atomic64_xchg_cx8) + CFI_STARTPROC + + movl %ebx, %eax + movl %ecx, %edx +1: + LOCK_PREFIX + cmpxchg8b (%esi) + jne 1b + + ret + CFI_ENDPROC +ENDPROC(atomic64_xchg_cx8) + +.macro addsub_return func ins insc +ENTRY(atomic64_\func\()_return_cx8) + CFI_STARTPROC + SAVE ebp + SAVE ebx + SAVE esi + SAVE edi + + movl %eax, %esi + movl %edx, %edi + movl %ecx, %ebp + + read64 %ebp +1: + movl %eax, %ebx + movl %edx, %ecx + \ins\()l %esi, %ebx + \insc\()l %edi, %ecx + LOCK_PREFIX + cmpxchg8b (%ebp) + jne 1b + +10: + movl %ebx, %eax + movl %ecx, %edx + RESTORE edi + RESTORE esi + RESTORE ebx + RESTORE ebp + ret + CFI_ENDPROC +ENDPROC(atomic64_\func\()_return_cx8) +.endm + +addsub_return add add adc +addsub_return sub sub sbb + +.macro incdec_return func ins insc +ENTRY(atomic64_\func\()_return_cx8) + CFI_STARTPROC + SAVE ebx + + read64 %esi +1: + movl %eax, %ebx + movl %edx, %ecx + \ins\()l $1, %ebx + \insc\()l $0, %ecx + LOCK_PREFIX + cmpxchg8b (%esi) + jne 1b + +10: + movl %ebx, %eax + movl %ecx, %edx + RESTORE ebx + ret + CFI_ENDPROC +ENDPROC(atomic64_\func\()_return_cx8) +.endm + +incdec_return inc add adc +incdec_return dec sub sbb + +ENTRY(atomic64_dec_if_positive_cx8) + CFI_STARTPROC + SAVE ebx + + read64 %esi +1: + movl %eax, %ebx + movl %edx, %ecx + subl $1, %ebx + sbb $0, %ecx + js 2f + LOCK_PREFIX + cmpxchg8b (%esi) + jne 1b + +2: + movl %ebx, %eax + movl %ecx, %edx + RESTORE ebx + ret + CFI_ENDPROC +ENDPROC(atomic64_dec_if_positive_cx8) + +ENTRY(atomic64_add_unless_cx8) + CFI_STARTPROC + SAVE ebp + SAVE ebx +/* these just push these two parameters on the stack */ + SAVE edi + SAVE esi + + movl %ecx, %ebp + movl %eax, %esi + movl %edx, %edi + + read64 %ebp +1: + cmpl %eax, 0(%esp) + je 4f +2: + movl %eax, %ebx + movl %edx, %ecx + addl %esi, %ebx + adcl %edi, %ecx + LOCK_PREFIX + cmpxchg8b (%ebp) + jne 1b + + xorl %eax, %eax +3: + addl $8, %esp + CFI_ADJUST_CFA_OFFSET -8 + RESTORE ebx + RESTORE ebp + ret +4: + cmpl %edx, 4(%esp) + jne 2b + movl $1, %eax + jmp 3b + CFI_ENDPROC +ENDPROC(atomic64_add_unless_cx8) + +ENTRY(atomic64_inc_not_zero_cx8) + CFI_STARTPROC + SAVE ebx + + read64 %esi +1: + testl %eax, %eax + je 4f +2: + movl %eax, %ebx + movl %edx, %ecx + addl $1, %ebx + adcl $0, %ecx + LOCK_PREFIX + cmpxchg8b (%esi) + jne 1b + + xorl %eax, %eax +3: + RESTORE ebx + ret +4: + testl %edx, %edx + jne 2b + movl $1, %eax + jmp 3b + CFI_ENDPROC +ENDPROC(atomic64_inc_not_zero_cx8) -- cgit v1.2.3-59-g8ed1b From d7f6de1e9c4a12e11ba7186c70f0f40caa76f590 Mon Sep 17 00:00:00 2001 From: Luca Barbieri Date: Fri, 26 Feb 2010 12:22:41 +0100 Subject: x86: Implement atomic[64]_dec_if_positive() Add support for atomic_dec_if_positive(), and atomic64_dec_if_positive() for x86-64. atomic64_dec_if_positive() for x86-32 was already implemented in a previous patch. Signed-off-by: Luca Barbieri LKML-Reference: <1267183361-20775-2-git-send-email-luca@luca-barbieri.com> Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/atomic.h | 23 +++++++++++++++++++++++ arch/x86/include/asm/atomic64_64.h | 23 +++++++++++++++++++++++ lib/atomic64_test.c | 2 +- 3 files changed, 47 insertions(+), 1 deletion(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h index 8f8217b9bdac..706c69492c14 100644 --- a/arch/x86/include/asm/atomic.h +++ b/arch/x86/include/asm/atomic.h @@ -246,6 +246,29 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u) #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) +/* + * atomic_dec_if_positive - decrement by 1 if old value positive + * @v: pointer of type atomic_t + * + * The function returns the old value of *v minus 1, even if + * the atomic variable, v, was not decremented. + */ +static inline int atomic_dec_if_positive(atomic_t *v) +{ + int c, old, dec; + c = atomic_read(v); + for (;;) { + dec = c - 1; + if (unlikely(dec < 0)) + break; + old = atomic_cmpxchg((v), c, dec); + if (likely(old == c)) + break; + c = old; + } + return dec; +} + /** * atomic_inc_short - increment of a short integer * @v: pointer to type int diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h index 51c5b4056929..4d6e2cd6c88c 100644 --- a/arch/x86/include/asm/atomic64_64.h +++ b/arch/x86/include/asm/atomic64_64.h @@ -221,4 +221,27 @@ static inline int atomic64_add_unless(atomic64_t *v, long a, long u) #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) +/* + * atomic64_dec_if_positive - decrement by 1 if old value positive + * @v: pointer of type atomic_t + * + * The function returns the old value of *v minus 1, even if + * the atomic variable, v, was not decremented. + */ +static inline long atomic64_dec_if_positive(atomic64_t *v) +{ + long c, old, dec; + c = atomic64_read(v); + for (;;) { + dec = c - 1; + if (unlikely(dec < 0)) + break; + old = atomic64_cmpxchg((v), c, dec); + if (likely(old == c)) + break; + c = old; + } + return dec; +} + #endif /* _ASM_X86_ATOMIC64_64_H */ diff --git a/lib/atomic64_test.c b/lib/atomic64_test.c index 0effcacbebda..58efdabb3845 100644 --- a/lib/atomic64_test.c +++ b/lib/atomic64_test.c @@ -112,7 +112,7 @@ static __init int test_atomic64(void) r += one; BUG_ON(v.counter != r); -#if defined(CONFIG_X86_32) || defined(CONFIG_MIPS) || defined(CONFIG_PPC) || defined(_ASM_GENERIC_ATOMIC64_H) +#if defined(CONFIG_X86) || defined(CONFIG_MIPS) || defined(CONFIG_PPC) || defined(_ASM_GENERIC_ATOMIC64_H) INIT(onestwos); BUG_ON(atomic64_dec_if_positive(&v) != (onestwos - 1)); r -= one; -- cgit v1.2.3-59-g8ed1b From ced918eb748ce30b3aace549fd17540e40ffdca0 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 17 Feb 2010 16:47:10 +0000 Subject: i8253: Convert i8253_lock to raw_spinlock i8253_lock needs to be a real spinlock in preempt-rt, i.e. it can not be converted to a sleeping lock. Convert it to raw_spinlock and fix up all users. Signed-off-by: Thomas Gleixner Acked-by: Ralf Baechle Acked-by: Dmitry Torokhov Acked-by: Takashi Iwai Cc: Jens Axboe LKML-Reference: <20100217163751.030764372@linutronix.de> --- arch/mips/include/asm/i8253.h | 2 +- arch/mips/kernel/i8253.c | 14 +++++++------- arch/x86/include/asm/i8253.h | 2 +- arch/x86/kernel/apm_32.c | 4 ++-- arch/x86/kernel/i8253.c | 14 +++++++------- drivers/block/hd.c | 4 ++-- drivers/input/gameport/gameport.c | 4 ++-- drivers/input/joystick/analog.c | 4 ++-- drivers/input/misc/pcspkr.c | 6 +++--- sound/drivers/pcsp/pcsp.h | 2 +- sound/drivers/pcsp/pcsp_input.c | 4 ++-- sound/drivers/pcsp/pcsp_lib.c | 12 ++++++------ 12 files changed, 36 insertions(+), 36 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/mips/include/asm/i8253.h b/arch/mips/include/asm/i8253.h index 032ca73f181b..48bb82372994 100644 --- a/arch/mips/include/asm/i8253.h +++ b/arch/mips/include/asm/i8253.h @@ -12,7 +12,7 @@ #define PIT_CH0 0x40 #define PIT_CH2 0x42 -extern spinlock_t i8253_lock; +extern raw_spinlock_t i8253_lock; extern void setup_pit_timer(void); diff --git a/arch/mips/kernel/i8253.c b/arch/mips/kernel/i8253.c index ed5c441615e4..94794062a177 100644 --- a/arch/mips/kernel/i8253.c +++ b/arch/mips/kernel/i8253.c @@ -15,7 +15,7 @@ #include #include -DEFINE_SPINLOCK(i8253_lock); +DEFINE_RAW_SPINLOCK(i8253_lock); EXPORT_SYMBOL(i8253_lock); /* @@ -26,7 +26,7 @@ EXPORT_SYMBOL(i8253_lock); static void init_pit_timer(enum clock_event_mode mode, struct clock_event_device *evt) { - spin_lock(&i8253_lock); + raw_spin_lock(&i8253_lock); switch(mode) { case CLOCK_EVT_MODE_PERIODIC: @@ -55,7 +55,7 @@ static void init_pit_timer(enum clock_event_mode mode, /* Nothing to do here */ break; } - spin_unlock(&i8253_lock); + raw_spin_unlock(&i8253_lock); } /* @@ -65,10 +65,10 @@ static void init_pit_timer(enum clock_event_mode mode, */ static int pit_next_event(unsigned long delta, struct clock_event_device *evt) { - spin_lock(&i8253_lock); + raw_spin_lock(&i8253_lock); outb_p(delta & 0xff , PIT_CH0); /* LSB */ outb(delta >> 8 , PIT_CH0); /* MSB */ - spin_unlock(&i8253_lock); + raw_spin_unlock(&i8253_lock); return 0; } @@ -137,7 +137,7 @@ static cycle_t pit_read(struct clocksource *cs) static int old_count; static u32 old_jifs; - spin_lock_irqsave(&i8253_lock, flags); + raw_spin_lock_irqsave(&i8253_lock, flags); /* * Although our caller may have the read side of xtime_lock, * this is now a seqlock, and we are cheating in this routine @@ -183,7 +183,7 @@ static cycle_t pit_read(struct clocksource *cs) old_count = count; old_jifs = jifs; - spin_unlock_irqrestore(&i8253_lock, flags); + raw_spin_unlock_irqrestore(&i8253_lock, flags); count = (LATCH - 1) - count; diff --git a/arch/x86/include/asm/i8253.h b/arch/x86/include/asm/i8253.h index 1edbf89680fd..fc1f579fb965 100644 --- a/arch/x86/include/asm/i8253.h +++ b/arch/x86/include/asm/i8253.h @@ -6,7 +6,7 @@ #define PIT_CH0 0x40 #define PIT_CH2 0x42 -extern spinlock_t i8253_lock; +extern raw_spinlock_t i8253_lock; extern struct clock_event_device *global_clock_event; diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c index 031aa887b0eb..c4f9182ca3ac 100644 --- a/arch/x86/kernel/apm_32.c +++ b/arch/x86/kernel/apm_32.c @@ -1224,7 +1224,7 @@ static void reinit_timer(void) #ifdef INIT_TIMER_AFTER_SUSPEND unsigned long flags; - spin_lock_irqsave(&i8253_lock, flags); + raw_spin_lock_irqsave(&i8253_lock, flags); /* set the clock to HZ */ outb_pit(0x34, PIT_MODE); /* binary, mode 2, LSB/MSB, ch 0 */ udelay(10); @@ -1232,7 +1232,7 @@ static void reinit_timer(void) udelay(10); outb_pit(LATCH >> 8, PIT_CH0); /* MSB */ udelay(10); - spin_unlock_irqrestore(&i8253_lock, flags); + raw_spin_unlock_irqrestore(&i8253_lock, flags); #endif } diff --git a/arch/x86/kernel/i8253.c b/arch/x86/kernel/i8253.c index 23c167925a5c..2dfd31597443 100644 --- a/arch/x86/kernel/i8253.c +++ b/arch/x86/kernel/i8253.c @@ -16,7 +16,7 @@ #include #include -DEFINE_SPINLOCK(i8253_lock); +DEFINE_RAW_SPINLOCK(i8253_lock); EXPORT_SYMBOL(i8253_lock); /* @@ -33,7 +33,7 @@ struct clock_event_device *global_clock_event; static void init_pit_timer(enum clock_event_mode mode, struct clock_event_device *evt) { - spin_lock(&i8253_lock); + raw_spin_lock(&i8253_lock); switch (mode) { case CLOCK_EVT_MODE_PERIODIC: @@ -62,7 +62,7 @@ static void init_pit_timer(enum clock_event_mode mode, /* Nothing to do here */ break; } - spin_unlock(&i8253_lock); + raw_spin_unlock(&i8253_lock); } /* @@ -72,10 +72,10 @@ static void init_pit_timer(enum clock_event_mode mode, */ static int pit_next_event(unsigned long delta, struct clock_event_device *evt) { - spin_lock(&i8253_lock); + raw_spin_lock(&i8253_lock); outb_pit(delta & 0xff , PIT_CH0); /* LSB */ outb_pit(delta >> 8 , PIT_CH0); /* MSB */ - spin_unlock(&i8253_lock); + raw_spin_unlock(&i8253_lock); return 0; } @@ -130,7 +130,7 @@ static cycle_t pit_read(struct clocksource *cs) int count; u32 jifs; - spin_lock_irqsave(&i8253_lock, flags); + raw_spin_lock_irqsave(&i8253_lock, flags); /* * Although our caller may have the read side of xtime_lock, * this is now a seqlock, and we are cheating in this routine @@ -176,7 +176,7 @@ static cycle_t pit_read(struct clocksource *cs) old_count = count; old_jifs = jifs; - spin_unlock_irqrestore(&i8253_lock, flags); + raw_spin_unlock_irqrestore(&i8253_lock, flags); count = (LATCH - 1) - count; diff --git a/drivers/block/hd.c b/drivers/block/hd.c index 5116c65c07cb..b9868ad0278d 100644 --- a/drivers/block/hd.c +++ b/drivers/block/hd.c @@ -165,12 +165,12 @@ unsigned long read_timer(void) unsigned long t, flags; int i; - spin_lock_irqsave(&i8253_lock, flags); + raw_spin_lock_irqsave(&i8253_lock, flags); t = jiffies * 11932; outb_p(0, 0x43); i = inb_p(0x40); i |= inb(0x40) << 8; - spin_unlock_irqrestore(&i8253_lock, flags); + raw_spin_unlock_irqrestore(&i8253_lock, flags); return(t - i); } #endif diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c index 7e18bcf05a66..46239e47a260 100644 --- a/drivers/input/gameport/gameport.c +++ b/drivers/input/gameport/gameport.c @@ -59,11 +59,11 @@ static unsigned int get_time_pit(void) unsigned long flags; unsigned int count; - spin_lock_irqsave(&i8253_lock, flags); + raw_spin_lock_irqsave(&i8253_lock, flags); outb_p(0x00, 0x43); count = inb_p(0x40); count |= inb_p(0x40) << 8; - spin_unlock_irqrestore(&i8253_lock, flags); + raw_spin_unlock_irqrestore(&i8253_lock, flags); return count; } diff --git a/drivers/input/joystick/analog.c b/drivers/input/joystick/analog.c index 1c0b529c06aa..4afe0a3b4884 100644 --- a/drivers/input/joystick/analog.c +++ b/drivers/input/joystick/analog.c @@ -146,11 +146,11 @@ static unsigned int get_time_pit(void) unsigned long flags; unsigned int count; - spin_lock_irqsave(&i8253_lock, flags); + raw_spin_lock_irqsave(&i8253_lock, flags); outb_p(0x00, 0x43); count = inb_p(0x40); count |= inb_p(0x40) << 8; - spin_unlock_irqrestore(&i8253_lock, flags); + raw_spin_unlock_irqrestore(&i8253_lock, flags); return count; } diff --git a/drivers/input/misc/pcspkr.c b/drivers/input/misc/pcspkr.c index ea4e1fd12651..f080dd31499b 100644 --- a/drivers/input/misc/pcspkr.c +++ b/drivers/input/misc/pcspkr.c @@ -30,7 +30,7 @@ MODULE_ALIAS("platform:pcspkr"); #include #else #include -static DEFINE_SPINLOCK(i8253_lock); +static DEFINE_RAW_SPINLOCK(i8253_lock); #endif static int pcspkr_event(struct input_dev *dev, unsigned int type, unsigned int code, int value) @@ -50,7 +50,7 @@ static int pcspkr_event(struct input_dev *dev, unsigned int type, unsigned int c if (value > 20 && value < 32767) count = PIT_TICK_RATE / value; - spin_lock_irqsave(&i8253_lock, flags); + raw_spin_lock_irqsave(&i8253_lock, flags); if (count) { /* set command for counter 2, 2 byte write */ @@ -65,7 +65,7 @@ static int pcspkr_event(struct input_dev *dev, unsigned int type, unsigned int c outb(inb_p(0x61) & 0xFC, 0x61); } - spin_unlock_irqrestore(&i8253_lock, flags); + raw_spin_unlock_irqrestore(&i8253_lock, flags); return 0; } diff --git a/sound/drivers/pcsp/pcsp.h b/sound/drivers/pcsp/pcsp.h index 1e123077923d..4ff6c8cc5077 100644 --- a/sound/drivers/pcsp/pcsp.h +++ b/sound/drivers/pcsp/pcsp.h @@ -16,7 +16,7 @@ #include #else #include -static DEFINE_SPINLOCK(i8253_lock); +static DEFINE_RAW_SPINLOCK(i8253_lock); #endif #define PCSP_SOUND_VERSION 0x400 /* read 4.00 */ diff --git a/sound/drivers/pcsp/pcsp_input.c b/sound/drivers/pcsp/pcsp_input.c index 0444cdeb4bec..b5e2b54c2604 100644 --- a/sound/drivers/pcsp/pcsp_input.c +++ b/sound/drivers/pcsp/pcsp_input.c @@ -21,7 +21,7 @@ static void pcspkr_do_sound(unsigned int count) { unsigned long flags; - spin_lock_irqsave(&i8253_lock, flags); + raw_spin_lock_irqsave(&i8253_lock, flags); if (count) { /* set command for counter 2, 2 byte write */ @@ -36,7 +36,7 @@ static void pcspkr_do_sound(unsigned int count) outb(inb_p(0x61) & 0xFC, 0x61); } - spin_unlock_irqrestore(&i8253_lock, flags); + raw_spin_unlock_irqrestore(&i8253_lock, flags); } void pcspkr_stop_sound(void) diff --git a/sound/drivers/pcsp/pcsp_lib.c b/sound/drivers/pcsp/pcsp_lib.c index e1145ac6e908..f6a2e72b8cde 100644 --- a/sound/drivers/pcsp/pcsp_lib.c +++ b/sound/drivers/pcsp/pcsp_lib.c @@ -65,7 +65,7 @@ static u64 pcsp_timer_update(struct snd_pcsp *chip) timer_cnt = val * CUR_DIV() / 256; if (timer_cnt && chip->enable) { - spin_lock_irqsave(&i8253_lock, flags); + raw_spin_lock_irqsave(&i8253_lock, flags); if (!nforce_wa) { outb_p(chip->val61, 0x61); outb_p(timer_cnt, 0x42); @@ -74,7 +74,7 @@ static u64 pcsp_timer_update(struct snd_pcsp *chip) outb(chip->val61 ^ 2, 0x61); chip->thalf = 1; } - spin_unlock_irqrestore(&i8253_lock, flags); + raw_spin_unlock_irqrestore(&i8253_lock, flags); } chip->ns_rem = PCSP_PERIOD_NS(); @@ -158,10 +158,10 @@ static int pcsp_start_playing(struct snd_pcsp *chip) return -EIO; } - spin_lock(&i8253_lock); + raw_spin_lock(&i8253_lock); chip->val61 = inb(0x61) | 0x03; outb_p(0x92, 0x43); /* binary, mode 1, LSB only, ch 2 */ - spin_unlock(&i8253_lock); + raw_spin_unlock(&i8253_lock); atomic_set(&chip->timer_active, 1); chip->thalf = 0; @@ -178,11 +178,11 @@ static void pcsp_stop_playing(struct snd_pcsp *chip) return; atomic_set(&chip->timer_active, 0); - spin_lock(&i8253_lock); + raw_spin_lock(&i8253_lock); /* restore the timer */ outb_p(0xb6, 0x43); /* binary, mode 3, LSB/MSB, ch 2 */ outb(chip->val61 & 0xFC, 0x61); - spin_unlock(&i8253_lock); + raw_spin_unlock(&i8253_lock); } /* -- cgit v1.2.3-59-g8ed1b From cbb9d729f3433c9c2660b01dc52e6deb89488886 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Fri, 15 Jan 2010 14:41:15 +0100 Subject: x86/amd-iommu: Make iommu_map_page and alloc_pte aware of page sizes This patch changes the old map_size parameter of alloc_pte to a page_size parameter which can be used more easily to alloc a pte for intermediate page sizes. Signed-off-by: Joerg Roedel --- arch/x86/include/asm/amd_iommu_types.h | 28 ++++++++++++++++++ arch/x86/kernel/amd_iommu.c | 53 +++++++++++++++++++++------------- 2 files changed, 61 insertions(+), 20 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/amd_iommu_types.h b/arch/x86/include/asm/amd_iommu_types.h index ba19ad4c47d0..5e8da56755dd 100644 --- a/arch/x86/include/asm/amd_iommu_types.h +++ b/arch/x86/include/asm/amd_iommu_types.h @@ -172,6 +172,34 @@ (~((1ULL << (12 + ((lvl) * 9))) - 1))) #define PM_ALIGNED(lvl, addr) ((PM_MAP_MASK(lvl) & (addr)) == (addr)) +/* + * Returns the page table level to use for a given page size + * Pagesize is expected to be a power-of-two + */ +#define PAGE_SIZE_LEVEL(pagesize) \ + ((__ffs(pagesize) - 12) / 9) +/* + * Returns the number of ptes to use for a given page size + * Pagesize is expected to be a power-of-two + */ +#define PAGE_SIZE_PTE_COUNT(pagesize) \ + (1ULL << ((__ffs(pagesize) - 12) % 9)) + +/* + * Aligns a given io-virtual address to a given page size + * Pagesize is expected to be a power-of-two + */ +#define PAGE_SIZE_ALIGN(address, pagesize) \ + ((address) & ~((pagesize) - 1)) +/* + * Creates an IOMMU PTE for an address an a given pagesize + * The PTE has no permission bits set + * Pagesize is expected to be a power-of-two larger than 4096 + */ +#define PAGE_SIZE_PTE(address, pagesize) \ + (((address) | ((pagesize) - 1)) & \ + (~(pagesize >> 1)) & PM_ADDR_MASK) + #define IOMMU_PTE_P (1ULL << 0) #define IOMMU_PTE_TV (1ULL << 1) #define IOMMU_PTE_U (1ULL << 59) diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index 59cae7c4df54..41700314f3e0 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c @@ -730,18 +730,22 @@ static bool increase_address_space(struct protection_domain *domain, static u64 *alloc_pte(struct protection_domain *domain, unsigned long address, - int end_lvl, + unsigned long page_size, u64 **pte_page, gfp_t gfp) { + int level, end_lvl; u64 *pte, *page; - int level; + + BUG_ON(!is_power_of_2(page_size)); while (address > PM_LEVEL_SIZE(domain->mode)) increase_address_space(domain, gfp); - level = domain->mode - 1; - pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)]; + level = domain->mode - 1; + pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)]; + address = PAGE_SIZE_ALIGN(address, page_size); + end_lvl = PAGE_SIZE_LEVEL(page_size); while (level > end_lvl) { if (!IOMMU_PTE_PRESENT(*pte)) { @@ -751,6 +755,10 @@ static u64 *alloc_pte(struct protection_domain *domain, *pte = PM_LEVEL_PDE(level, virt_to_phys(page)); } + /* No level skipping support yet */ + if (PM_PTE_LEVEL(*pte) != level) + return NULL; + level -= 1; pte = IOMMU_PTE_PAGE(*pte); @@ -806,31 +814,36 @@ static int iommu_map_page(struct protection_domain *dom, unsigned long bus_addr, unsigned long phys_addr, int prot, - int map_size) + unsigned long page_size) { u64 __pte, *pte; - - bus_addr = PAGE_ALIGN(bus_addr); - phys_addr = PAGE_ALIGN(phys_addr); - - BUG_ON(!PM_ALIGNED(map_size, bus_addr)); - BUG_ON(!PM_ALIGNED(map_size, phys_addr)); + int i, count; if (!(prot & IOMMU_PROT_MASK)) return -EINVAL; - pte = alloc_pte(dom, bus_addr, map_size, NULL, GFP_KERNEL); + bus_addr = PAGE_ALIGN(bus_addr); + phys_addr = PAGE_ALIGN(phys_addr); + count = PAGE_SIZE_PTE_COUNT(page_size); + pte = alloc_pte(dom, bus_addr, page_size, NULL, GFP_KERNEL); + + for (i = 0; i < count; ++i) + if (IOMMU_PTE_PRESENT(pte[i])) + return -EBUSY; - if (IOMMU_PTE_PRESENT(*pte)) - return -EBUSY; + if (page_size > PAGE_SIZE) { + __pte = PAGE_SIZE_PTE(phys_addr, page_size); + __pte |= PM_LEVEL_ENC(7) | IOMMU_PTE_P | IOMMU_PTE_FC; + } else + __pte = phys_addr | IOMMU_PTE_P | IOMMU_PTE_FC; - __pte = phys_addr | IOMMU_PTE_P; if (prot & IOMMU_PROT_IR) __pte |= IOMMU_PTE_IR; if (prot & IOMMU_PROT_IW) __pte |= IOMMU_PTE_IW; - *pte = __pte; + for (i = 0; i < count; ++i) + pte[i] = __pte; update_domain(dom); @@ -877,7 +890,7 @@ static int dma_ops_unity_map(struct dma_ops_domain *dma_dom, for (addr = e->address_start; addr < e->address_end; addr += PAGE_SIZE) { ret = iommu_map_page(&dma_dom->domain, addr, addr, e->prot, - PM_MAP_4k); + PAGE_SIZE); if (ret) return ret; /* @@ -1005,7 +1018,7 @@ static int alloc_new_range(struct dma_ops_domain *dma_dom, u64 *pte, *pte_page; for (i = 0; i < num_ptes; ++i) { - pte = alloc_pte(&dma_dom->domain, address, PM_MAP_4k, + pte = alloc_pte(&dma_dom->domain, address, PAGE_SIZE, &pte_page, gfp); if (!pte) goto out_free; @@ -1711,7 +1724,7 @@ static u64* dma_ops_get_pte(struct dma_ops_domain *dom, pte = aperture->pte_pages[APERTURE_PAGE_INDEX(address)]; if (!pte) { - pte = alloc_pte(&dom->domain, address, PM_MAP_4k, &pte_page, + pte = alloc_pte(&dom->domain, address, PAGE_SIZE, &pte_page, GFP_ATOMIC); aperture->pte_pages[APERTURE_PAGE_INDEX(address)] = pte_page; } else @@ -2457,7 +2470,7 @@ static int amd_iommu_map_range(struct iommu_domain *dom, paddr &= PAGE_MASK; for (i = 0; i < npages; ++i) { - ret = iommu_map_page(domain, iova, paddr, prot, PM_MAP_4k); + ret = iommu_map_page(domain, iova, paddr, prot, PAGE_SIZE); if (ret) return ret; -- cgit v1.2.3-59-g8ed1b From 24cd772315c19e4d9409d0d21367ec1ebab3149f Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Tue, 19 Jan 2010 17:27:39 +0100 Subject: x86/amd-iommu: Make iommu_unmap_page and fetch_pte aware of page sizes This patch extends the functionality of iommu_unmap_page and fetch_pte to support arbitrary page sizes. Signed-off-by: Joerg Roedel --- arch/x86/include/asm/amd_iommu_types.h | 6 +++ arch/x86/kernel/amd_iommu.c | 90 +++++++++++++++++++++++++++------- 2 files changed, 78 insertions(+), 18 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/amd_iommu_types.h b/arch/x86/include/asm/amd_iommu_types.h index 5e8da56755dd..b150c74e0d48 100644 --- a/arch/x86/include/asm/amd_iommu_types.h +++ b/arch/x86/include/asm/amd_iommu_types.h @@ -200,6 +200,12 @@ (((address) | ((pagesize) - 1)) & \ (~(pagesize >> 1)) & PM_ADDR_MASK) +/* + * Takes a PTE value with mode=0x07 and returns the page size it maps + */ +#define PTE_PAGE_SIZE(pte) \ + (1ULL << (1 + ffz(((pte) | 0xfffULL)))) + #define IOMMU_PTE_P (1ULL << 0) #define IOMMU_PTE_TV (1ULL << 1) #define IOMMU_PTE_U (1ULL << 59) diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index 41700314f3e0..503d312f9d6f 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c @@ -776,28 +776,47 @@ static u64 *alloc_pte(struct protection_domain *domain, * This function checks if there is a PTE for a given dma address. If * there is one, it returns the pointer to it. */ -static u64 *fetch_pte(struct protection_domain *domain, - unsigned long address, int map_size) +static u64 *fetch_pte(struct protection_domain *domain, unsigned long address) { int level; u64 *pte; - level = domain->mode - 1; - pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)]; + if (address > PM_LEVEL_SIZE(domain->mode)) + return NULL; + + level = domain->mode - 1; + pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)]; - while (level > map_size) { + while (level > 0) { + + /* Not Present */ if (!IOMMU_PTE_PRESENT(*pte)) return NULL; + /* Large PTE */ + if (PM_PTE_LEVEL(*pte) == 0x07) { + unsigned long pte_mask, __pte; + + /* + * If we have a series of large PTEs, make + * sure to return a pointer to the first one. + */ + pte_mask = PTE_PAGE_SIZE(*pte); + pte_mask = ~((PAGE_SIZE_PTE_COUNT(pte_mask) << 3) - 1); + __pte = ((unsigned long)pte) & pte_mask; + + return (u64 *)__pte; + } + + /* No level skipping support yet */ + if (PM_PTE_LEVEL(*pte) != level) + return NULL; + level -= 1; + /* Walk to the next level */ pte = IOMMU_PTE_PAGE(*pte); pte = &pte[PM_LEVEL_INDEX(level, address)]; - - if ((PM_PTE_LEVEL(*pte) == 0) && level != map_size) { - pte = NULL; - break; - } } return pte; @@ -850,13 +869,48 @@ static int iommu_map_page(struct protection_domain *dom, return 0; } -static void iommu_unmap_page(struct protection_domain *dom, - unsigned long bus_addr, int map_size) +static unsigned long iommu_unmap_page(struct protection_domain *dom, + unsigned long bus_addr, + unsigned long page_size) { - u64 *pte = fetch_pte(dom, bus_addr, map_size); + unsigned long long unmap_size, unmapped; + u64 *pte; + + BUG_ON(!is_power_of_2(page_size)); + + unmapped = 0; + + while (unmapped < page_size) { + + pte = fetch_pte(dom, bus_addr); + + if (!pte) { + /* + * No PTE for this address + * move forward in 4kb steps + */ + unmap_size = PAGE_SIZE; + } else if (PM_PTE_LEVEL(*pte) == 0) { + /* 4kb PTE found for this address */ + unmap_size = PAGE_SIZE; + *pte = 0ULL; + } else { + int count, i; + + /* Large PTE found which maps this address */ + unmap_size = PTE_PAGE_SIZE(*pte); + count = PAGE_SIZE_PTE_COUNT(unmap_size); + for (i = 0; i < count; i++) + pte[i] = 0ULL; + } + + bus_addr = (bus_addr & ~(unmap_size - 1)) + unmap_size; + unmapped += unmap_size; + } + + BUG_ON(!is_power_of_2(unmapped)); - if (pte) - *pte = 0; + return unmapped; } /* @@ -1054,7 +1108,7 @@ static int alloc_new_range(struct dma_ops_domain *dma_dom, for (i = dma_dom->aperture[index]->offset; i < dma_dom->aperture_size; i += PAGE_SIZE) { - u64 *pte = fetch_pte(&dma_dom->domain, i, PM_MAP_4k); + u64 *pte = fetch_pte(&dma_dom->domain, i); if (!pte || !IOMMU_PTE_PRESENT(*pte)) continue; @@ -2491,7 +2545,7 @@ static void amd_iommu_unmap_range(struct iommu_domain *dom, iova &= PAGE_MASK; for (i = 0; i < npages; ++i) { - iommu_unmap_page(domain, iova, PM_MAP_4k); + iommu_unmap_page(domain, iova, PAGE_SIZE); iova += PAGE_SIZE; } @@ -2506,7 +2560,7 @@ static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom, phys_addr_t paddr; u64 *pte; - pte = fetch_pte(domain, iova, PM_MAP_4k); + pte = fetch_pte(domain, iova); if (!pte || !IOMMU_PTE_PRESENT(*pte)) return 0; -- cgit v1.2.3-59-g8ed1b From 6f4edd69e40aba4f45bf9558c1e9a950d79ab4e4 Mon Sep 17 00:00:00 2001 From: Jack Steiner Date: Wed, 10 Mar 2010 14:44:58 -0600 Subject: x86, UV: Clean up UV headers for MMR definitions Update UV mmr definitions header file. Eliminate definitions no longer needed. Move 2 definitions from tlb_uv.c into the header file where they belong. Signed-off-by: Jack Steiner LKML-Reference: <20100310204458.GA28835@sgi.com> Signed-off-by: Ingo Molnar --- arch/x86/include/asm/uv/uv_mmrs.h | 528 ++++++-------------------------------- arch/x86/kernel/tlb_uv.c | 10 +- 2 files changed, 85 insertions(+), 453 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/uv/uv_mmrs.h b/arch/x86/include/asm/uv/uv_mmrs.h index 2cae46c7c8a2..b2f2d2e05cec 100644 --- a/arch/x86/include/asm/uv/uv_mmrs.h +++ b/arch/x86/include/asm/uv/uv_mmrs.h @@ -1,4 +1,3 @@ - /* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive @@ -14,14 +13,26 @@ #define UV_MMR_ENABLE (1UL << 63) +/* ========================================================================= */ +/* UVH_BAU_DATA_BROADCAST */ +/* ========================================================================= */ +#define UVH_BAU_DATA_BROADCAST 0x61688UL +#define UVH_BAU_DATA_BROADCAST_32 0x0440 + +#define UVH_BAU_DATA_BROADCAST_ENABLE_SHFT 0 +#define UVH_BAU_DATA_BROADCAST_ENABLE_MASK 0x0000000000000001UL + +union uvh_bau_data_broadcast_u { + unsigned long v; + struct uvh_bau_data_broadcast_s { + unsigned long enable : 1; /* RW */ + unsigned long rsvd_1_63: 63; /* */ + } s; +}; + /* ========================================================================= */ /* UVH_BAU_DATA_CONFIG */ /* ========================================================================= */ -#define UVH_LB_BAU_MISC_CONTROL 0x320170UL -#define UV_ENABLE_INTD_SOFT_ACK_MODE_SHIFT 15 -#define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHIFT 16 -#define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD 0x000000000bUL -/* 1011 timebase 7 (168millisec) * 3 ticks -> 500ms */ #define UVH_BAU_DATA_CONFIG 0x61680UL #define UVH_BAU_DATA_CONFIG_32 0x0438 @@ -603,6 +614,68 @@ union uvh_lb_bau_intd_software_acknowledge_u { #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS 0x0000000000320088UL #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS_32 0x0a70 +/* ========================================================================= */ +/* UVH_LB_BAU_MISC_CONTROL */ +/* ========================================================================= */ +#define UVH_LB_BAU_MISC_CONTROL 0x320170UL +#define UVH_LB_BAU_MISC_CONTROL_32 0x00a10 + +#define UVH_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT 0 +#define UVH_LB_BAU_MISC_CONTROL_REJECTION_DELAY_MASK 0x00000000000000ffUL +#define UVH_LB_BAU_MISC_CONTROL_APIC_MODE_SHFT 8 +#define UVH_LB_BAU_MISC_CONTROL_APIC_MODE_MASK 0x0000000000000100UL +#define UVH_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_SHFT 9 +#define UVH_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_MASK 0x0000000000000200UL +#define UVH_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_SHFT 10 +#define UVH_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_MASK 0x0000000000000400UL +#define UVH_LB_BAU_MISC_CONTROL_CSI_AGENT_PRESENCE_VECTOR_SHFT 11 +#define UVH_LB_BAU_MISC_CONTROL_CSI_AGENT_PRESENCE_VECTOR_MASK 0x0000000000003800UL +#define UVH_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_SHFT 14 +#define UVH_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_MASK 0x0000000000004000UL +#define UVH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT 15 +#define UVH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_MASK 0x0000000000008000UL +#define UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT 16 +#define UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK 0x00000000000f0000UL +#define UVH_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_SHFT 20 +#define UVH_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_MASK 0x0000000000100000UL +#define UVH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_SHFT 21 +#define UVH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_MASK 0x0000000000200000UL +#define UVH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_SHFT 22 +#define UVH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_MASK 0x0000000000400000UL +#define UVH_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_SHFT 23 +#define UVH_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_MASK 0x0000000000800000UL +#define UVH_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_SHFT 24 +#define UVH_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000007000000UL +#define UVH_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_SHFT 27 +#define UVH_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_MASK 0x0000000008000000UL +#define UVH_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_SHFT 28 +#define UVH_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000010000000UL +#define UVH_LB_BAU_MISC_CONTROL_FUN_SHFT 48 +#define UVH_LB_BAU_MISC_CONTROL_FUN_MASK 0xffff000000000000UL + +union uvh_lb_bau_misc_control_u { + unsigned long v; + struct uvh_lb_bau_misc_control_s { + unsigned long rejection_delay : 8; /* RW */ + unsigned long apic_mode : 1; /* RW */ + unsigned long force_broadcast : 1; /* RW */ + unsigned long force_lock_nop : 1; /* RW */ + unsigned long csi_agent_presence_vector : 3; /* RW */ + unsigned long descriptor_fetch_mode : 1; /* RW */ + unsigned long enable_intd_soft_ack_mode : 1; /* RW */ + unsigned long intd_soft_ack_timeout_period : 4; /* RW */ + unsigned long enable_dual_mapping_mode : 1; /* RW */ + unsigned long vga_io_port_decode_enable : 1; /* RW */ + unsigned long vga_io_port_16_bit_decode : 1; /* RW */ + unsigned long suppress_dest_registration : 1; /* RW */ + unsigned long programmed_initial_priority : 3; /* RW */ + unsigned long use_incoming_priority : 1; /* RW */ + unsigned long enable_programmed_initial_priority : 1; /* RW */ + unsigned long rsvd_29_47 : 19; /* */ + unsigned long fun : 16; /* RW */ + } s; +}; + /* ========================================================================= */ /* UVH_LB_BAU_SB_ACTIVATION_CONTROL */ /* ========================================================================= */ @@ -680,334 +753,6 @@ union uvh_lb_bau_sb_descriptor_base_u { } s; }; -/* ========================================================================= */ -/* UVH_LB_MCAST_AOERR0_RPT_ENABLE */ -/* ========================================================================= */ -#define UVH_LB_MCAST_AOERR0_RPT_ENABLE 0x50b20UL - -#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_OBESE_MSG_SHFT 0 -#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_OBESE_MSG_MASK 0x0000000000000001UL -#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_DATA_SB_ERR_SHFT 1 -#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_DATA_SB_ERR_MASK 0x0000000000000002UL -#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_NACK_BUFF_PARITY_SHFT 2 -#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_NACK_BUFF_PARITY_MASK 0x0000000000000004UL -#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_TIMEOUT_SHFT 3 -#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_TIMEOUT_MASK 0x0000000000000008UL -#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_INACTIVE_REPLY_SHFT 4 -#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_INACTIVE_REPLY_MASK 0x0000000000000010UL -#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_UPGRADE_ERROR_SHFT 5 -#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_UPGRADE_ERROR_MASK 0x0000000000000020UL -#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_REG_COUNT_UNDERFLOW_SHFT 6 -#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_REG_COUNT_UNDERFLOW_MASK 0x0000000000000040UL -#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_REP_OBESE_MSG_SHFT 7 -#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_REP_OBESE_MSG_MASK 0x0000000000000080UL -#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REQ_RUNT_MSG_SHFT 8 -#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REQ_RUNT_MSG_MASK 0x0000000000000100UL -#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REQ_OBESE_MSG_SHFT 9 -#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REQ_OBESE_MSG_MASK 0x0000000000000200UL -#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REQ_DATA_SB_ERR_SHFT 10 -#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REQ_DATA_SB_ERR_MASK 0x0000000000000400UL -#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REP_RUNT_MSG_SHFT 11 -#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REP_RUNT_MSG_MASK 0x0000000000000800UL -#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REP_OBESE_MSG_SHFT 12 -#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REP_OBESE_MSG_MASK 0x0000000000001000UL -#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REP_DATA_SB_ERR_SHFT 13 -#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REP_DATA_SB_ERR_MASK 0x0000000000002000UL -#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REP_COMMAND_ERR_SHFT 14 -#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REP_COMMAND_ERR_MASK 0x0000000000004000UL -#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_PEND_TIMEOUT_SHFT 15 -#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_PEND_TIMEOUT_MASK 0x0000000000008000UL -#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REQ_RUNT_MSG_SHFT 16 -#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REQ_RUNT_MSG_MASK 0x0000000000010000UL -#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REQ_OBESE_MSG_SHFT 17 -#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REQ_OBESE_MSG_MASK 0x0000000000020000UL -#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REQ_DATA_SB_ERR_SHFT 18 -#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REQ_DATA_SB_ERR_MASK 0x0000000000040000UL -#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REP_RUNT_MSG_SHFT 19 -#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REP_RUNT_MSG_MASK 0x0000000000080000UL -#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REP_OBESE_MSG_SHFT 20 -#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REP_OBESE_MSG_MASK 0x0000000000100000UL -#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REP_DATA_SB_ERR_SHFT 21 -#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REP_DATA_SB_ERR_MASK 0x0000000000200000UL -#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_AMO_TIMEOUT_SHFT 22 -#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_AMO_TIMEOUT_MASK 0x0000000000400000UL -#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_PUT_TIMEOUT_SHFT 23 -#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_PUT_TIMEOUT_MASK 0x0000000000800000UL -#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_SPURIOUS_EVENT_SHFT 24 -#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_SPURIOUS_EVENT_MASK 0x0000000001000000UL -#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_IOH_DESTINATION_TABLE_PARITY_SHFT 25 -#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_IOH_DESTINATION_TABLE_PARITY_MASK 0x0000000002000000UL -#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_GET_HAD_ERROR_REPLY_SHFT 26 -#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_GET_HAD_ERROR_REPLY_MASK 0x0000000004000000UL -#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_GET_TIMEOUT_SHFT 27 -#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_GET_TIMEOUT_MASK 0x0000000008000000UL -#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_LOCK_MANAGER_HAD_ERROR_REPLY_SHFT 28 -#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_LOCK_MANAGER_HAD_ERROR_REPLY_MASK 0x0000000010000000UL -#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_PUT_HAD_ERROR_REPLY_SHFT 29 -#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_PUT_HAD_ERROR_REPLY_MASK 0x0000000020000000UL -#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_PUT_TIMEOUT_SHFT 30 -#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_PUT_TIMEOUT_MASK 0x0000000040000000UL -#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_SB_ACTIVATION_OVERRUN_SHFT 31 -#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_SB_ACTIVATION_OVERRUN_MASK 0x0000000080000000UL -#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_COMPLETED_GB_ACTIVATION_HAD_ERROR_REPLY_SHFT 32 -#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_COMPLETED_GB_ACTIVATION_HAD_ERROR_REPLY_MASK 0x0000000100000000UL -#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_COMPLETED_GB_ACTIVATION_TIMEOUT_SHFT 33 -#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_COMPLETED_GB_ACTIVATION_TIMEOUT_MASK 0x0000000200000000UL -#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_DESCRIPTOR_BUFFER_0_PARITY_SHFT 34 -#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_DESCRIPTOR_BUFFER_0_PARITY_MASK 0x0000000400000000UL -#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_DESCRIPTOR_BUFFER_1_PARITY_SHFT 35 -#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_DESCRIPTOR_BUFFER_1_PARITY_MASK 0x0000000800000000UL -#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_SOCKET_DESTINATION_TABLE_PARITY_SHFT 36 -#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_SOCKET_DESTINATION_TABLE_PARITY_MASK 0x0000001000000000UL -#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_BAU_REPLY_PAYLOAD_CORRUPTION_SHFT 37 -#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_BAU_REPLY_PAYLOAD_CORRUPTION_MASK 0x0000002000000000UL -#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_IO_PORT_DESTINATION_TABLE_PARITY_SHFT 38 -#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_IO_PORT_DESTINATION_TABLE_PARITY_MASK 0x0000004000000000UL -#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_INTD_SOFT_ACK_TIMEOUT_SHFT 39 -#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_INTD_SOFT_ACK_TIMEOUT_MASK 0x0000008000000000UL -#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_INT_REP_OBESE_MSG_SHFT 40 -#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_INT_REP_OBESE_MSG_MASK 0x0000010000000000UL -#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_INT_REP_COMMAND_ERR_SHFT 41 -#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_INT_REP_COMMAND_ERR_MASK 0x0000020000000000UL -#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_INT_TIMEOUT_SHFT 42 -#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_INT_TIMEOUT_MASK 0x0000040000000000UL - -union uvh_lb_mcast_aoerr0_rpt_enable_u { - unsigned long v; - struct uvh_lb_mcast_aoerr0_rpt_enable_s { - unsigned long mcast_obese_msg : 1; /* RW */ - unsigned long mcast_data_sb_err : 1; /* RW */ - unsigned long mcast_nack_buff_parity : 1; /* RW */ - unsigned long mcast_timeout : 1; /* RW */ - unsigned long mcast_inactive_reply : 1; /* RW */ - unsigned long mcast_upgrade_error : 1; /* RW */ - unsigned long mcast_reg_count_underflow : 1; /* RW */ - unsigned long mcast_rep_obese_msg : 1; /* RW */ - unsigned long ucache_req_runt_msg : 1; /* RW */ - unsigned long ucache_req_obese_msg : 1; /* RW */ - unsigned long ucache_req_data_sb_err : 1; /* RW */ - unsigned long ucache_rep_runt_msg : 1; /* RW */ - unsigned long ucache_rep_obese_msg : 1; /* RW */ - unsigned long ucache_rep_data_sb_err : 1; /* RW */ - unsigned long ucache_rep_command_err : 1; /* RW */ - unsigned long ucache_pend_timeout : 1; /* RW */ - unsigned long macc_req_runt_msg : 1; /* RW */ - unsigned long macc_req_obese_msg : 1; /* RW */ - unsigned long macc_req_data_sb_err : 1; /* RW */ - unsigned long macc_rep_runt_msg : 1; /* RW */ - unsigned long macc_rep_obese_msg : 1; /* RW */ - unsigned long macc_rep_data_sb_err : 1; /* RW */ - unsigned long macc_amo_timeout : 1; /* RW */ - unsigned long macc_put_timeout : 1; /* RW */ - unsigned long macc_spurious_event : 1; /* RW */ - unsigned long ioh_destination_table_parity : 1; /* RW */ - unsigned long get_had_error_reply : 1; /* RW */ - unsigned long get_timeout : 1; /* RW */ - unsigned long lock_manager_had_error_reply : 1; /* RW */ - unsigned long put_had_error_reply : 1; /* RW */ - unsigned long put_timeout : 1; /* RW */ - unsigned long sb_activation_overrun : 1; /* RW */ - unsigned long completed_gb_activation_had_error_reply : 1; /* RW */ - unsigned long completed_gb_activation_timeout : 1; /* RW */ - unsigned long descriptor_buffer_0_parity : 1; /* RW */ - unsigned long descriptor_buffer_1_parity : 1; /* RW */ - unsigned long socket_destination_table_parity : 1; /* RW */ - unsigned long bau_reply_payload_corruption : 1; /* RW */ - unsigned long io_port_destination_table_parity : 1; /* RW */ - unsigned long intd_soft_ack_timeout : 1; /* RW */ - unsigned long int_rep_obese_msg : 1; /* RW */ - unsigned long int_rep_command_err : 1; /* RW */ - unsigned long int_timeout : 1; /* RW */ - unsigned long rsvd_43_63 : 21; /* */ - } s; -}; - -/* ========================================================================= */ -/* UVH_LOCAL_INT0_CONFIG */ -/* ========================================================================= */ -#define UVH_LOCAL_INT0_CONFIG 0x61000UL - -#define UVH_LOCAL_INT0_CONFIG_VECTOR_SHFT 0 -#define UVH_LOCAL_INT0_CONFIG_VECTOR_MASK 0x00000000000000ffUL -#define UVH_LOCAL_INT0_CONFIG_DM_SHFT 8 -#define UVH_LOCAL_INT0_CONFIG_DM_MASK 0x0000000000000700UL -#define UVH_LOCAL_INT0_CONFIG_DESTMODE_SHFT 11 -#define UVH_LOCAL_INT0_CONFIG_DESTMODE_MASK 0x0000000000000800UL -#define UVH_LOCAL_INT0_CONFIG_STATUS_SHFT 12 -#define UVH_LOCAL_INT0_CONFIG_STATUS_MASK 0x0000000000001000UL -#define UVH_LOCAL_INT0_CONFIG_P_SHFT 13 -#define UVH_LOCAL_INT0_CONFIG_P_MASK 0x0000000000002000UL -#define UVH_LOCAL_INT0_CONFIG_T_SHFT 15 -#define UVH_LOCAL_INT0_CONFIG_T_MASK 0x0000000000008000UL -#define UVH_LOCAL_INT0_CONFIG_M_SHFT 16 -#define UVH_LOCAL_INT0_CONFIG_M_MASK 0x0000000000010000UL -#define UVH_LOCAL_INT0_CONFIG_APIC_ID_SHFT 32 -#define UVH_LOCAL_INT0_CONFIG_APIC_ID_MASK 0xffffffff00000000UL - -union uvh_local_int0_config_u { - unsigned long v; - struct uvh_local_int0_config_s { - unsigned long vector_ : 8; /* RW */ - unsigned long dm : 3; /* RW */ - unsigned long destmode : 1; /* RW */ - unsigned long status : 1; /* RO */ - unsigned long p : 1; /* RO */ - unsigned long rsvd_14 : 1; /* */ - unsigned long t : 1; /* RO */ - unsigned long m : 1; /* RW */ - unsigned long rsvd_17_31: 15; /* */ - unsigned long apic_id : 32; /* RW */ - } s; -}; - -/* ========================================================================= */ -/* UVH_LOCAL_INT0_ENABLE */ -/* ========================================================================= */ -#define UVH_LOCAL_INT0_ENABLE 0x65000UL - -#define UVH_LOCAL_INT0_ENABLE_LB_HCERR_SHFT 0 -#define UVH_LOCAL_INT0_ENABLE_LB_HCERR_MASK 0x0000000000000001UL -#define UVH_LOCAL_INT0_ENABLE_GR0_HCERR_SHFT 1 -#define UVH_LOCAL_INT0_ENABLE_GR0_HCERR_MASK 0x0000000000000002UL -#define UVH_LOCAL_INT0_ENABLE_GR1_HCERR_SHFT 2 -#define UVH_LOCAL_INT0_ENABLE_GR1_HCERR_MASK 0x0000000000000004UL -#define UVH_LOCAL_INT0_ENABLE_LH_HCERR_SHFT 3 -#define UVH_LOCAL_INT0_ENABLE_LH_HCERR_MASK 0x0000000000000008UL -#define UVH_LOCAL_INT0_ENABLE_RH_HCERR_SHFT 4 -#define UVH_LOCAL_INT0_ENABLE_RH_HCERR_MASK 0x0000000000000010UL -#define UVH_LOCAL_INT0_ENABLE_XN_HCERR_SHFT 5 -#define UVH_LOCAL_INT0_ENABLE_XN_HCERR_MASK 0x0000000000000020UL -#define UVH_LOCAL_INT0_ENABLE_SI_HCERR_SHFT 6 -#define UVH_LOCAL_INT0_ENABLE_SI_HCERR_MASK 0x0000000000000040UL -#define UVH_LOCAL_INT0_ENABLE_LB_AOERR0_SHFT 7 -#define UVH_LOCAL_INT0_ENABLE_LB_AOERR0_MASK 0x0000000000000080UL -#define UVH_LOCAL_INT0_ENABLE_GR0_AOERR0_SHFT 8 -#define UVH_LOCAL_INT0_ENABLE_GR0_AOERR0_MASK 0x0000000000000100UL -#define UVH_LOCAL_INT0_ENABLE_GR1_AOERR0_SHFT 9 -#define UVH_LOCAL_INT0_ENABLE_GR1_AOERR0_MASK 0x0000000000000200UL -#define UVH_LOCAL_INT0_ENABLE_LH_AOERR0_SHFT 10 -#define UVH_LOCAL_INT0_ENABLE_LH_AOERR0_MASK 0x0000000000000400UL -#define UVH_LOCAL_INT0_ENABLE_RH_AOERR0_SHFT 11 -#define UVH_LOCAL_INT0_ENABLE_RH_AOERR0_MASK 0x0000000000000800UL -#define UVH_LOCAL_INT0_ENABLE_XN_AOERR0_SHFT 12 -#define UVH_LOCAL_INT0_ENABLE_XN_AOERR0_MASK 0x0000000000001000UL -#define UVH_LOCAL_INT0_ENABLE_SI_AOERR0_SHFT 13 -#define UVH_LOCAL_INT0_ENABLE_SI_AOERR0_MASK 0x0000000000002000UL -#define UVH_LOCAL_INT0_ENABLE_LB_AOERR1_SHFT 14 -#define UVH_LOCAL_INT0_ENABLE_LB_AOERR1_MASK 0x0000000000004000UL -#define UVH_LOCAL_INT0_ENABLE_GR0_AOERR1_SHFT 15 -#define UVH_LOCAL_INT0_ENABLE_GR0_AOERR1_MASK 0x0000000000008000UL -#define UVH_LOCAL_INT0_ENABLE_GR1_AOERR1_SHFT 16 -#define UVH_LOCAL_INT0_ENABLE_GR1_AOERR1_MASK 0x0000000000010000UL -#define UVH_LOCAL_INT0_ENABLE_LH_AOERR1_SHFT 17 -#define UVH_LOCAL_INT0_ENABLE_LH_AOERR1_MASK 0x0000000000020000UL -#define UVH_LOCAL_INT0_ENABLE_RH_AOERR1_SHFT 18 -#define UVH_LOCAL_INT0_ENABLE_RH_AOERR1_MASK 0x0000000000040000UL -#define UVH_LOCAL_INT0_ENABLE_XN_AOERR1_SHFT 19 -#define UVH_LOCAL_INT0_ENABLE_XN_AOERR1_MASK 0x0000000000080000UL -#define UVH_LOCAL_INT0_ENABLE_SI_AOERR1_SHFT 20 -#define UVH_LOCAL_INT0_ENABLE_SI_AOERR1_MASK 0x0000000000100000UL -#define UVH_LOCAL_INT0_ENABLE_RH_VPI_INT_SHFT 21 -#define UVH_LOCAL_INT0_ENABLE_RH_VPI_INT_MASK 0x0000000000200000UL -#define UVH_LOCAL_INT0_ENABLE_SYSTEM_SHUTDOWN_INT_SHFT 22 -#define UVH_LOCAL_INT0_ENABLE_SYSTEM_SHUTDOWN_INT_MASK 0x0000000000400000UL -#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_0_SHFT 23 -#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_0_MASK 0x0000000000800000UL -#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_1_SHFT 24 -#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_1_MASK 0x0000000001000000UL -#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_2_SHFT 25 -#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_2_MASK 0x0000000002000000UL -#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_3_SHFT 26 -#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_3_MASK 0x0000000004000000UL -#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_4_SHFT 27 -#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_4_MASK 0x0000000008000000UL -#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_5_SHFT 28 -#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_5_MASK 0x0000000010000000UL -#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_6_SHFT 29 -#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_6_MASK 0x0000000020000000UL -#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_7_SHFT 30 -#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_7_MASK 0x0000000040000000UL -#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_8_SHFT 31 -#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_8_MASK 0x0000000080000000UL -#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_9_SHFT 32 -#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_9_MASK 0x0000000100000000UL -#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_10_SHFT 33 -#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_10_MASK 0x0000000200000000UL -#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_11_SHFT 34 -#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_11_MASK 0x0000000400000000UL -#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_12_SHFT 35 -#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_12_MASK 0x0000000800000000UL -#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_13_SHFT 36 -#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_13_MASK 0x0000001000000000UL -#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_14_SHFT 37 -#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_14_MASK 0x0000002000000000UL -#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_15_SHFT 38 -#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_15_MASK 0x0000004000000000UL -#define UVH_LOCAL_INT0_ENABLE_L1_NMI_INT_SHFT 39 -#define UVH_LOCAL_INT0_ENABLE_L1_NMI_INT_MASK 0x0000008000000000UL -#define UVH_LOCAL_INT0_ENABLE_STOP_CLOCK_SHFT 40 -#define UVH_LOCAL_INT0_ENABLE_STOP_CLOCK_MASK 0x0000010000000000UL -#define UVH_LOCAL_INT0_ENABLE_ASIC_TO_L1_SHFT 41 -#define UVH_LOCAL_INT0_ENABLE_ASIC_TO_L1_MASK 0x0000020000000000UL -#define UVH_LOCAL_INT0_ENABLE_L1_TO_ASIC_SHFT 42 -#define UVH_LOCAL_INT0_ENABLE_L1_TO_ASIC_MASK 0x0000040000000000UL -#define UVH_LOCAL_INT0_ENABLE_LTC_INT_SHFT 43 -#define UVH_LOCAL_INT0_ENABLE_LTC_INT_MASK 0x0000080000000000UL -#define UVH_LOCAL_INT0_ENABLE_LA_SEQ_TRIGGER_SHFT 44 -#define UVH_LOCAL_INT0_ENABLE_LA_SEQ_TRIGGER_MASK 0x0000100000000000UL - -union uvh_local_int0_enable_u { - unsigned long v; - struct uvh_local_int0_enable_s { - unsigned long lb_hcerr : 1; /* RW */ - unsigned long gr0_hcerr : 1; /* RW */ - unsigned long gr1_hcerr : 1; /* RW */ - unsigned long lh_hcerr : 1; /* RW */ - unsigned long rh_hcerr : 1; /* RW */ - unsigned long xn_hcerr : 1; /* RW */ - unsigned long si_hcerr : 1; /* RW */ - unsigned long lb_aoerr0 : 1; /* RW */ - unsigned long gr0_aoerr0 : 1; /* RW */ - unsigned long gr1_aoerr0 : 1; /* RW */ - unsigned long lh_aoerr0 : 1; /* RW */ - unsigned long rh_aoerr0 : 1; /* RW */ - unsigned long xn_aoerr0 : 1; /* RW */ - unsigned long si_aoerr0 : 1; /* RW */ - unsigned long lb_aoerr1 : 1; /* RW */ - unsigned long gr0_aoerr1 : 1; /* RW */ - unsigned long gr1_aoerr1 : 1; /* RW */ - unsigned long lh_aoerr1 : 1; /* RW */ - unsigned long rh_aoerr1 : 1; /* RW */ - unsigned long xn_aoerr1 : 1; /* RW */ - unsigned long si_aoerr1 : 1; /* RW */ - unsigned long rh_vpi_int : 1; /* RW */ - unsigned long system_shutdown_int : 1; /* RW */ - unsigned long lb_irq_int_0 : 1; /* RW */ - unsigned long lb_irq_int_1 : 1; /* RW */ - unsigned long lb_irq_int_2 : 1; /* RW */ - unsigned long lb_irq_int_3 : 1; /* RW */ - unsigned long lb_irq_int_4 : 1; /* RW */ - unsigned long lb_irq_int_5 : 1; /* RW */ - unsigned long lb_irq_int_6 : 1; /* RW */ - unsigned long lb_irq_int_7 : 1; /* RW */ - unsigned long lb_irq_int_8 : 1; /* RW */ - unsigned long lb_irq_int_9 : 1; /* RW */ - unsigned long lb_irq_int_10 : 1; /* RW */ - unsigned long lb_irq_int_11 : 1; /* RW */ - unsigned long lb_irq_int_12 : 1; /* RW */ - unsigned long lb_irq_int_13 : 1; /* RW */ - unsigned long lb_irq_int_14 : 1; /* RW */ - unsigned long lb_irq_int_15 : 1; /* RW */ - unsigned long l1_nmi_int : 1; /* RW */ - unsigned long stop_clock : 1; /* RW */ - unsigned long asic_to_l1 : 1; /* RW */ - unsigned long l1_to_asic : 1; /* RW */ - unsigned long ltc_int : 1; /* RW */ - unsigned long la_seq_trigger : 1; /* RW */ - unsigned long rsvd_45_63 : 19; /* */ - } s; -}; - /* ========================================================================= */ /* UVH_NODE_ID */ /* ========================================================================= */ @@ -1111,26 +856,6 @@ union uvh_rh_gam_alias210_redirect_config_2_mmr_u { } s; }; -/* ========================================================================= */ -/* UVH_RH_GAM_CFG_OVERLAY_CONFIG_MMR */ -/* ========================================================================= */ -#define UVH_RH_GAM_CFG_OVERLAY_CONFIG_MMR 0x1600020UL - -#define UVH_RH_GAM_CFG_OVERLAY_CONFIG_MMR_BASE_SHFT 26 -#define UVH_RH_GAM_CFG_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL -#define UVH_RH_GAM_CFG_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63 -#define UVH_RH_GAM_CFG_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL - -union uvh_rh_gam_cfg_overlay_config_mmr_u { - unsigned long v; - struct uvh_rh_gam_cfg_overlay_config_mmr_s { - unsigned long rsvd_0_25: 26; /* */ - unsigned long base : 20; /* RW */ - unsigned long rsvd_46_62: 17; /* */ - unsigned long enable : 1; /* RW */ - } s; -}; - /* ========================================================================= */ /* UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR */ /* ========================================================================= */ @@ -1262,101 +987,6 @@ union uvh_rtc1_int_config_u { } s; }; -/* ========================================================================= */ -/* UVH_RTC2_INT_CONFIG */ -/* ========================================================================= */ -#define UVH_RTC2_INT_CONFIG 0x61600UL - -#define UVH_RTC2_INT_CONFIG_VECTOR_SHFT 0 -#define UVH_RTC2_INT_CONFIG_VECTOR_MASK 0x00000000000000ffUL -#define UVH_RTC2_INT_CONFIG_DM_SHFT 8 -#define UVH_RTC2_INT_CONFIG_DM_MASK 0x0000000000000700UL -#define UVH_RTC2_INT_CONFIG_DESTMODE_SHFT 11 -#define UVH_RTC2_INT_CONFIG_DESTMODE_MASK 0x0000000000000800UL -#define UVH_RTC2_INT_CONFIG_STATUS_SHFT 12 -#define UVH_RTC2_INT_CONFIG_STATUS_MASK 0x0000000000001000UL -#define UVH_RTC2_INT_CONFIG_P_SHFT 13 -#define UVH_RTC2_INT_CONFIG_P_MASK 0x0000000000002000UL -#define UVH_RTC2_INT_CONFIG_T_SHFT 15 -#define UVH_RTC2_INT_CONFIG_T_MASK 0x0000000000008000UL -#define UVH_RTC2_INT_CONFIG_M_SHFT 16 -#define UVH_RTC2_INT_CONFIG_M_MASK 0x0000000000010000UL -#define UVH_RTC2_INT_CONFIG_APIC_ID_SHFT 32 -#define UVH_RTC2_INT_CONFIG_APIC_ID_MASK 0xffffffff00000000UL - -union uvh_rtc2_int_config_u { - unsigned long v; - struct uvh_rtc2_int_config_s { - unsigned long vector_ : 8; /* RW */ - unsigned long dm : 3; /* RW */ - unsigned long destmode : 1; /* RW */ - unsigned long status : 1; /* RO */ - unsigned long p : 1; /* RO */ - unsigned long rsvd_14 : 1; /* */ - unsigned long t : 1; /* RO */ - unsigned long m : 1; /* RW */ - unsigned long rsvd_17_31: 15; /* */ - unsigned long apic_id : 32; /* RW */ - } s; -}; - -/* ========================================================================= */ -/* UVH_RTC3_INT_CONFIG */ -/* ========================================================================= */ -#define UVH_RTC3_INT_CONFIG 0x61640UL - -#define UVH_RTC3_INT_CONFIG_VECTOR_SHFT 0 -#define UVH_RTC3_INT_CONFIG_VECTOR_MASK 0x00000000000000ffUL -#define UVH_RTC3_INT_CONFIG_DM_SHFT 8 -#define UVH_RTC3_INT_CONFIG_DM_MASK 0x0000000000000700UL -#define UVH_RTC3_INT_CONFIG_DESTMODE_SHFT 11 -#define UVH_RTC3_INT_CONFIG_DESTMODE_MASK 0x0000000000000800UL -#define UVH_RTC3_INT_CONFIG_STATUS_SHFT 12 -#define UVH_RTC3_INT_CONFIG_STATUS_MASK 0x0000000000001000UL -#define UVH_RTC3_INT_CONFIG_P_SHFT 13 -#define UVH_RTC3_INT_CONFIG_P_MASK 0x0000000000002000UL -#define UVH_RTC3_INT_CONFIG_T_SHFT 15 -#define UVH_RTC3_INT_CONFIG_T_MASK 0x0000000000008000UL -#define UVH_RTC3_INT_CONFIG_M_SHFT 16 -#define UVH_RTC3_INT_CONFIG_M_MASK 0x0000000000010000UL -#define UVH_RTC3_INT_CONFIG_APIC_ID_SHFT 32 -#define UVH_RTC3_INT_CONFIG_APIC_ID_MASK 0xffffffff00000000UL - -union uvh_rtc3_int_config_u { - unsigned long v; - struct uvh_rtc3_int_config_s { - unsigned long vector_ : 8; /* RW */ - unsigned long dm : 3; /* RW */ - unsigned long destmode : 1; /* RW */ - unsigned long status : 1; /* RO */ - unsigned long p : 1; /* RO */ - unsigned long rsvd_14 : 1; /* */ - unsigned long t : 1; /* RO */ - unsigned long m : 1; /* RW */ - unsigned long rsvd_17_31: 15; /* */ - unsigned long apic_id : 32; /* RW */ - } s; -}; - -/* ========================================================================= */ -/* UVH_RTC_INC_RATIO */ -/* ========================================================================= */ -#define UVH_RTC_INC_RATIO 0x350000UL - -#define UVH_RTC_INC_RATIO_FRACTION_SHFT 0 -#define UVH_RTC_INC_RATIO_FRACTION_MASK 0x00000000000fffffUL -#define UVH_RTC_INC_RATIO_RATIO_SHFT 20 -#define UVH_RTC_INC_RATIO_RATIO_MASK 0x0000000000700000UL - -union uvh_rtc_inc_ratio_u { - unsigned long v; - struct uvh_rtc_inc_ratio_s { - unsigned long fraction : 20; /* RW */ - unsigned long ratio : 3; /* RW */ - unsigned long rsvd_23_63: 41; /* */ - } s; -}; - /* ========================================================================= */ /* UVH_SI_ADDR_MAP_CONFIG */ /* ========================================================================= */ diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c index 364d015efebc..ef68ba48564b 100644 --- a/arch/x86/kernel/tlb_uv.c +++ b/arch/x86/kernel/tlb_uv.c @@ -20,6 +20,8 @@ #include #include +#define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD 0x000000000bUL + static struct bau_control **uv_bau_table_bases __read_mostly; static int uv_bau_retry_limit __read_mostly; @@ -478,16 +480,16 @@ static void uv_enable_timeouts(void) * To program the period, the SOFT_ACK_MODE must be off. */ mmr_image &= ~((unsigned long)1 << - UV_ENABLE_INTD_SOFT_ACK_MODE_SHIFT); + UVH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT); uv_write_global_mmr64 (pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image); /* * Set the 4-bit period. */ mmr_image &= ~((unsigned long)0xf << - UV_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHIFT); + UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT); mmr_image |= (UV_INTD_SOFT_ACK_TIMEOUT_PERIOD << - UV_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHIFT); + UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT); uv_write_global_mmr64 (pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image); /* @@ -496,7 +498,7 @@ static void uv_enable_timeouts(void) * indicated in bits 2:0 (7 causes all of them to timeout). */ mmr_image |= ((unsigned long)1 << - UV_ENABLE_INTD_SOFT_ACK_MODE_SHIFT); + UVH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT); uv_write_global_mmr64 (pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image); } -- cgit v1.2.3-59-g8ed1b From 4bd96a7a8185755b091233b16034c7436cbf57af Mon Sep 17 00:00:00 2001 From: Shane Wang Date: Wed, 10 Mar 2010 14:36:10 +0800 Subject: x86, tboot: Add support for S3 memory integrity protection This patch adds support for S3 memory integrity protection within an Intel(R) TXT launched kernel, for all kernel and userspace memory. All RAM used by the kernel and userspace, as indicated by memory ranges of type E820_RAM and E820_RESERVED_KERN in the e820 table, will be integrity protected. The MAINTAINERS file is also updated to reflect the maintainers of the TXT-related code. All MACing is done in tboot, based on a complexity analysis and tradeoff. v3: Compared with v2, this patch adds a check of array size in tboot.c, and a note to specify which c/s of tboot supports this kind of MACing in intel_txt.txt. Signed-off-by: Shane Wang LKML-Reference: <4B973DDA.6050902@intel.com> Signed-off-by: Joseph Cihula Acked-by: Pavel Machek Acked-by: Rafael J. Wysocki Signed-off-by: H. Peter Anvin --- Documentation/intel_txt.txt | 16 +++++++++------- MAINTAINERS | 11 +++++++++++ arch/x86/include/asm/e820.h | 7 ++++++- arch/x86/kernel/tboot.c | 20 +++++++++++--------- 4 files changed, 37 insertions(+), 17 deletions(-) (limited to 'arch/x86/include') diff --git a/Documentation/intel_txt.txt b/Documentation/intel_txt.txt index f40a1f030019..87c8990dbbd9 100644 --- a/Documentation/intel_txt.txt +++ b/Documentation/intel_txt.txt @@ -161,13 +161,15 @@ o In order to put a system into any of the sleep states after a TXT has been restored, it will restore the TPM PCRs and then transfer control back to the kernel's S3 resume vector. In order to preserve system integrity across S3, the kernel - provides tboot with a set of memory ranges (kernel - code/data/bss, S3 resume code, and AP trampoline) that tboot - will calculate a MAC (message authentication code) over and then - seal with the TPM. On resume and once the measured environment - has been re-established, tboot will re-calculate the MAC and - verify it against the sealed value. Tboot's policy determines - what happens if the verification fails. + provides tboot with a set of memory ranges (RAM and RESERVED_KERN + in the e820 table, but not any memory that BIOS might alter over + the S3 transition) that tboot will calculate a MAC (message + authentication code) over and then seal with the TPM. On resume + and once the measured environment has been re-established, tboot + will re-calculate the MAC and verify it against the sealed value. + Tboot's policy determines what happens if the verification fails. + Note that the c/s 194 of tboot which has the new MAC code supports + this. That's pretty much it for TXT support. diff --git a/MAINTAINERS b/MAINTAINERS index 47cc449d89d8..d3072cb8805d 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2940,6 +2940,17 @@ S: Odd Fixes F: Documentation/networking/README.ipw2200 F: drivers/net/wireless/ipw2x00/ipw2200.* +INTEL(R) TRUSTED EXECUTION TECHNOLOGY (TXT) +M: Joseph Cihula +M: Shane Wang +L: tboot-devel@lists.sourceforge.net +W: http://tboot.sourceforge.net +T: Mercurial http://www.bughost.org/repos.hg/tboot.hg +S: Supported +F: Documentation/intel_txt.txt +F: include/linux/tboot.h +F: arch/x86/kernel/tboot.c + INTEL WIRELESS WIMAX CONNECTION 2400 M: Inaky Perez-Gonzalez M: linux-wimax@intel.com diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h index 0e22296790d3..ec8a52d14ab1 100644 --- a/arch/x86/include/asm/e820.h +++ b/arch/x86/include/asm/e820.h @@ -45,7 +45,12 @@ #define E820_NVS 4 #define E820_UNUSABLE 5 -/* reserved RAM used by kernel itself */ +/* + * reserved RAM used by kernel itself + * if CONFIG_INTEL_TXT is enabled, memory of this type will be + * included in the S3 integrity calculation and so should not include + * any memory that BIOS might alter over the S3 transition + */ #define E820_RESERVED_KERN 128 #ifndef __ASSEMBLY__ diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c index 86c9f91b48ae..cc2c60474fd0 100644 --- a/arch/x86/kernel/tboot.c +++ b/arch/x86/kernel/tboot.c @@ -175,6 +175,9 @@ static void add_mac_region(phys_addr_t start, unsigned long size) struct tboot_mac_region *mr; phys_addr_t end = start + size; + if (tboot->num_mac_regions >= MAX_TB_MAC_REGIONS) + panic("tboot: Too many MAC regions\n"); + if (start && size) { mr = &tboot->mac_regions[tboot->num_mac_regions++]; mr->start = round_down(start, PAGE_SIZE); @@ -184,18 +187,17 @@ static void add_mac_region(phys_addr_t start, unsigned long size) static int tboot_setup_sleep(void) { + int i; + tboot->num_mac_regions = 0; - /* S3 resume code */ - add_mac_region(acpi_wakeup_address, WAKEUP_SIZE); + for (i = 0; i < e820.nr_map; i++) { + if ((e820.map[i].type != E820_RAM) + && (e820.map[i].type != E820_RESERVED_KERN)) + continue; -#ifdef CONFIG_X86_TRAMPOLINE - /* AP trampoline code */ - add_mac_region(virt_to_phys(trampoline_base), TRAMPOLINE_SIZE); -#endif - - /* kernel code + data + bss */ - add_mac_region(virt_to_phys(_text), _end - _text); + add_mac_region(e820.map[i].addr, e820.map[i].size); + } tboot->acpi_sinfo.kernel_s3_resume_vector = acpi_wakeup_address; -- cgit v1.2.3-59-g8ed1b From d61931d89be506372d01a90d1755f6d0a9fafe2d Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Fri, 5 Mar 2010 17:34:46 +0100 Subject: x86: Add optimized popcnt variants Add support for the hardware version of the Hamming weight function, popcnt, present in CPUs which advertize it under CPUID, Function 0x0000_0001_ECX[23]. On CPUs which don't support it, we fallback to the default lib/hweight.c sw versions. A synthetic benchmark comparing popcnt with __sw_hweight64 showed almost a 3x speedup on a F10h machine. Signed-off-by: Borislav Petkov LKML-Reference: <20100318112015.GC11152@aftab> Signed-off-by: H. Peter Anvin --- arch/x86/Kconfig | 5 +++ arch/x86/include/asm/alternative.h | 9 +++-- arch/x86/include/asm/arch_hweight.h | 59 +++++++++++++++++++++++++++++++ arch/x86/include/asm/bitops.h | 4 ++- include/asm-generic/bitops/arch_hweight.h | 22 +++++++++--- lib/Makefile | 3 ++ lib/hweight.c | 20 +++++------ scripts/Makefile.lib | 4 +++ 8 files changed, 108 insertions(+), 18 deletions(-) create mode 100644 arch/x86/include/asm/arch_hweight.h (limited to 'arch/x86/include') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 0eacb1ffb421..89d8c54cdd37 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -238,6 +238,11 @@ config X86_32_LAZY_GS def_bool y depends on X86_32 && !CC_STACKPROTECTOR +config ARCH_HWEIGHT_CFLAGS + string + default "-fcall-saved-ecx -fcall-saved-edx" if X86_32 + default "-fcall-saved-rdi -fcall-saved-rsi -fcall-saved-rdx -fcall-saved-rcx -fcall-saved-r8 -fcall-saved-r9 -fcall-saved-r10 -fcall-saved-r11" if X86_64 + config KTIME_SCALAR def_bool X86_32 source "init/Kconfig" diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h index b09ec55650b3..67dae51e7fd0 100644 --- a/arch/x86/include/asm/alternative.h +++ b/arch/x86/include/asm/alternative.h @@ -39,9 +39,6 @@ #define LOCK_PREFIX "" #endif -/* This must be included *after* the definition of LOCK_PREFIX */ -#include - struct alt_instr { u8 *instr; /* original instruction */ u8 *replacement; @@ -95,6 +92,12 @@ static inline int alternatives_text_reserved(void *start, void *end) "663:\n\t" newinstr "\n664:\n" /* replacement */ \ ".previous" +/* + * This must be included *after* the definition of ALTERNATIVE due to + * + */ +#include + /* * Alternative instructions for different CPU types or capabilities. * diff --git a/arch/x86/include/asm/arch_hweight.h b/arch/x86/include/asm/arch_hweight.h new file mode 100644 index 000000000000..d1fc3c219ae6 --- /dev/null +++ b/arch/x86/include/asm/arch_hweight.h @@ -0,0 +1,59 @@ +#ifndef _ASM_X86_HWEIGHT_H +#define _ASM_X86_HWEIGHT_H + +#ifdef CONFIG_64BIT +/* popcnt %rdi, %rax */ +#define POPCNT ".byte 0xf3,0x48,0x0f,0xb8,0xc7" +#define REG_IN "D" +#define REG_OUT "a" +#else +/* popcnt %eax, %eax */ +#define POPCNT ".byte 0xf3,0x0f,0xb8,0xc0" +#define REG_IN "a" +#define REG_OUT "a" +#endif + +/* + * __sw_hweightXX are called from within the alternatives below + * and callee-clobbered registers need to be taken care of. See + * ARCH_HWEIGHT_CFLAGS in for the respective + * compiler switches. + */ +static inline unsigned int __arch_hweight32(unsigned int w) +{ + unsigned int res = 0; + + asm (ALTERNATIVE("call __sw_hweight32", POPCNT, X86_FEATURE_POPCNT) + : "="REG_OUT (res) + : REG_IN (w)); + + return res; +} + +static inline unsigned int __arch_hweight16(unsigned int w) +{ + return __arch_hweight32(w & 0xffff); +} + +static inline unsigned int __arch_hweight8(unsigned int w) +{ + return __arch_hweight32(w & 0xff); +} + +static inline unsigned long __arch_hweight64(__u64 w) +{ + unsigned long res = 0; + +#ifdef CONFIG_X86_32 + return __arch_hweight32((u32)w) + + __arch_hweight32((u32)(w >> 32)); +#else + asm (ALTERNATIVE("call __sw_hweight64", POPCNT, X86_FEATURE_POPCNT) + : "="REG_OUT (res) + : REG_IN (w)); +#endif /* CONFIG_X86_32 */ + + return res; +} + +#endif diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h index 02b47a603fc8..545776efeb16 100644 --- a/arch/x86/include/asm/bitops.h +++ b/arch/x86/include/asm/bitops.h @@ -444,7 +444,9 @@ static inline int fls(int x) #define ARCH_HAS_FAST_MULTIPLIER 1 -#include +#include + +#include #endif /* __KERNEL__ */ diff --git a/include/asm-generic/bitops/arch_hweight.h b/include/asm-generic/bitops/arch_hweight.h index 3a7be842cdce..9a81c1e9436c 100644 --- a/include/asm-generic/bitops/arch_hweight.h +++ b/include/asm-generic/bitops/arch_hweight.h @@ -3,9 +3,23 @@ #include -extern unsigned int __arch_hweight32(unsigned int w); -extern unsigned int __arch_hweight16(unsigned int w); -extern unsigned int __arch_hweight8(unsigned int w); -extern unsigned long __arch_hweight64(__u64 w); +inline unsigned int __arch_hweight32(unsigned int w) +{ + return __sw_hweight32(w); +} +inline unsigned int __arch_hweight16(unsigned int w) +{ + return __sw_hweight16(w); +} + +inline unsigned int __arch_hweight8(unsigned int w) +{ + return __sw_hweight8(w); +} + +inline unsigned long __arch_hweight64(__u64 w) +{ + return __sw_hweight64(w); +} #endif /* _ASM_GENERIC_BITOPS_HWEIGHT_H_ */ diff --git a/lib/Makefile b/lib/Makefile index 2e152aed7198..abe63a8ad143 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -39,7 +39,10 @@ lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o lib-$(CONFIG_GENERIC_FIND_FIRST_BIT) += find_next_bit.o lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o obj-$(CONFIG_GENERIC_FIND_LAST_BIT) += find_last_bit.o + +CFLAGS_hweight.o = $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS)) obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o + obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o obj-$(CONFIG_BTREE) += btree.o obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o diff --git a/lib/hweight.c b/lib/hweight.c index a6927e76840f..3c79d50814cf 100644 --- a/lib/hweight.c +++ b/lib/hweight.c @@ -9,7 +9,7 @@ * The Hamming Weight of a number is the total number of bits set in it. */ -unsigned int __arch_hweight32(unsigned int w) +unsigned int __sw_hweight32(unsigned int w) { #ifdef ARCH_HAS_FAST_MULTIPLIER w -= (w >> 1) & 0x55555555; @@ -24,30 +24,30 @@ unsigned int __arch_hweight32(unsigned int w) return (res + (res >> 16)) & 0x000000FF; #endif } -EXPORT_SYMBOL(__arch_hweight32); +EXPORT_SYMBOL(__sw_hweight32); -unsigned int __arch_hweight16(unsigned int w) +unsigned int __sw_hweight16(unsigned int w) { unsigned int res = w - ((w >> 1) & 0x5555); res = (res & 0x3333) + ((res >> 2) & 0x3333); res = (res + (res >> 4)) & 0x0F0F; return (res + (res >> 8)) & 0x00FF; } -EXPORT_SYMBOL(__arch_hweight16); +EXPORT_SYMBOL(__sw_hweight16); -unsigned int __arch_hweight8(unsigned int w) +unsigned int __sw_hweight8(unsigned int w) { unsigned int res = w - ((w >> 1) & 0x55); res = (res & 0x33) + ((res >> 2) & 0x33); return (res + (res >> 4)) & 0x0F; } -EXPORT_SYMBOL(__arch_hweight8); +EXPORT_SYMBOL(__sw_hweight8); -unsigned long __arch_hweight64(__u64 w) +unsigned long __sw_hweight64(__u64 w) { #if BITS_PER_LONG == 32 - return __arch_hweight32((unsigned int)(w >> 32)) + - __arch_hweight32((unsigned int)w); + return __sw_hweight32((unsigned int)(w >> 32)) + + __sw_hweight32((unsigned int)w); #elif BITS_PER_LONG == 64 #ifdef ARCH_HAS_FAST_MULTIPLIER w -= (w >> 1) & 0x5555555555555555ul; @@ -64,4 +64,4 @@ unsigned long __arch_hweight64(__u64 w) #endif #endif } -EXPORT_SYMBOL(__arch_hweight64); +EXPORT_SYMBOL(__sw_hweight64); diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib index f9bdf264473d..cbcd654215e6 100644 --- a/scripts/Makefile.lib +++ b/scripts/Makefile.lib @@ -245,3 +245,7 @@ quiet_cmd_lzo = LZO $@ cmd_lzo = (cat $(filter-out FORCE,$^) | \ lzop -9 && $(call size_append, $(filter-out FORCE,$^))) > $@ || \ (rm -f $@ ; false) + +# misc stuff +# --------------------------------------------------------------------------- +quote:=" -- cgit v1.2.3-59-g8ed1b From 5958f1d5d722df7a9e5d129676614a8e5219bacd Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Wed, 31 Mar 2010 21:56:41 +0200 Subject: x86, cpu: Add AMD core boosting feature flag to /proc/cpuinfo By semi-popular demand, this adds the Core Performance Boost feature flag to /proc/cpuinfo. Possible use case for this is userspace tools like cpufreq-aperf, for example, so that they don't have to jump through hoops of accessing "/dev/cpu/%d/cpuid" in order to check for CPB hw support, or call cpuid from userspace. Signed-off-by: Borislav Petkov LKML-Reference: <1270065406-1814-2-git-send-email-bp@amd64.org> Reviewed-by: Thomas Renninger Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/cpufeature.h | 1 + arch/x86/kernel/cpu/addon_cpuid_features.c | 5 +++-- 2 files changed, 4 insertions(+), 2 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index 0cd82d068613..630e623f61e0 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h @@ -161,6 +161,7 @@ */ #define X86_FEATURE_IDA (7*32+ 0) /* Intel Dynamic Acceleration */ #define X86_FEATURE_ARAT (7*32+ 1) /* Always Running APIC Timer */ +#define X86_FEATURE_CPB (7*32+ 2) /* AMD Core Performance Boost */ /* Virtualization flags: Linux defined */ #define X86_FEATURE_TPR_SHADOW (8*32+ 0) /* Intel TPR Shadow */ diff --git a/arch/x86/kernel/cpu/addon_cpuid_features.c b/arch/x86/kernel/cpu/addon_cpuid_features.c index 97ad79cdf688..ead2a1cfa570 100644 --- a/arch/x86/kernel/cpu/addon_cpuid_features.c +++ b/arch/x86/kernel/cpu/addon_cpuid_features.c @@ -30,8 +30,9 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c) const struct cpuid_bit *cb; static const struct cpuid_bit __cpuinitconst cpuid_bits[] = { - { X86_FEATURE_IDA, CR_EAX, 1, 0x00000006 }, - { X86_FEATURE_ARAT, CR_EAX, 2, 0x00000006 }, + { X86_FEATURE_IDA, CR_EAX, 1, 0x00000006 }, + { X86_FEATURE_ARAT, CR_EAX, 2, 0x00000006 }, + { X86_FEATURE_CPB, CR_EDX, 9, 0x80000007 }, { X86_FEATURE_NPT, CR_EDX, 0, 0x8000000a }, { X86_FEATURE_LBRV, CR_EDX, 1, 0x8000000a }, { X86_FEATURE_SVML, CR_EDX, 2, 0x8000000a }, -- cgit v1.2.3-59-g8ed1b From b8f7fb13d2d7ff14818fd1d3edd8b834d38b0217 Mon Sep 17 00:00:00 2001 From: Cliff Wickman Date: Wed, 14 Apr 2010 11:35:46 -0500 Subject: x86, UV: Improve BAU performance and error recovery - increase performance of the interrupt handler - release timed-out software acknowledge resources - recover from continuous-busy status due to a hardware issue - add a 'throttle' to keep a uvhub from sending more than a specified number of broadcasts concurrently (work around the hardware issue) - provide a 'nobau' boot command line option - rename 'pnode' and 'node' to 'uvhub' (the 'node' terminology is ambiguous) - add some new statistics about the scope of broadcasts, retries, the hardware issue and the 'throttle' - split off new function uv_bau_retry_msg() from uv_bau_process_message() per community coding style feedback. - simplify the argument list to uv_bau_process_message(), per community coding style feedback. Signed-off-by: Cliff Wickman Cc: linux-mm@kvack.org Cc: Jack Steiner Cc: Russ Anderson Cc: Mike Travis Cc: "H. Peter Anvin" Cc: Thomas Gleixner LKML-Reference: Signed-off-by: Ingo Molnar --- arch/x86/include/asm/uv/uv_bau.h | 247 +++++--- arch/x86/kernel/tlb_uv.c | 1270 +++++++++++++++++++++++++++----------- 2 files changed, 1075 insertions(+), 442 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h index b414d2b401f6..aa558ac0306e 100644 --- a/arch/x86/include/asm/uv/uv_bau.h +++ b/arch/x86/include/asm/uv/uv_bau.h @@ -27,13 +27,14 @@ * set 2 is at BASE + 2*512, set 3 at BASE + 3*512, and so on. * * We will use 31 sets, one for sending BAU messages from each of the 32 - * cpu's on the node. + * cpu's on the uvhub. * * TLB shootdown will use the first of the 8 descriptors of each set. * Each of the descriptors is 64 bytes in size (8*64 = 512 bytes in a set). */ #define UV_ITEMS_PER_DESCRIPTOR 8 +#define MAX_BAU_CONCURRENT 3 #define UV_CPUS_PER_ACT_STATUS 32 #define UV_ACT_STATUS_MASK 0x3 #define UV_ACT_STATUS_SIZE 2 @@ -45,6 +46,9 @@ #define UV_PAYLOADQ_PNODE_SHIFT 49 #define UV_PTC_BASENAME "sgi_uv/ptc_statistics" #define uv_physnodeaddr(x) ((__pa((unsigned long)(x)) & uv_mmask)) +#define UV_ENABLE_INTD_SOFT_ACK_MODE_SHIFT 15 +#define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHIFT 16 +#define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD 0x000000000bUL /* * bits in UVH_LB_BAU_SB_ACTIVATION_STATUS_0/1 @@ -55,15 +59,29 @@ #define DESC_STATUS_SOURCE_TIMEOUT 3 /* - * source side thresholds at which message retries print a warning + * source side threshholds at which message retries print a warning */ #define SOURCE_TIMEOUT_LIMIT 20 #define DESTINATION_TIMEOUT_LIMIT 20 +/* + * misc. delays, in microseconds + */ +#define THROTTLE_DELAY 10 +#define TIMEOUT_DELAY 10 +#define BIOS_TO 1000 +/* BIOS is assumed to set the destination timeout to 1003520 nanoseconds */ + +/* + * threshholds at which to use IPI to free resources + */ +#define PLUGSB4RESET 100 +#define TIMEOUTSB4RESET 100 + /* * number of entries in the destination side payload queue */ -#define DEST_Q_SIZE 17 +#define DEST_Q_SIZE 20 /* * number of destination side software ack resources */ @@ -72,9 +90,10 @@ /* * completion statuses for sending a TLB flush message */ -#define FLUSH_RETRY 1 -#define FLUSH_GIVEUP 2 -#define FLUSH_COMPLETE 3 +#define FLUSH_RETRY_PLUGGED 1 +#define FLUSH_RETRY_TIMEOUT 2 +#define FLUSH_GIVEUP 3 +#define FLUSH_COMPLETE 4 /* * Distribution: 32 bytes (256 bits) (bytes 0-0x1f of descriptor) @@ -86,14 +105,14 @@ * 'base_dest_nodeid' field of the header corresponds to the * destination nodeID associated with that specified bit. */ -struct bau_target_nodemask { - unsigned long bits[BITS_TO_LONGS(256)]; +struct bau_target_uvhubmask { + unsigned long bits[BITS_TO_LONGS(UV_DISTRIBUTION_SIZE)]; }; /* - * mask of cpu's on a node + * mask of cpu's on a uvhub * (during initialization we need to check that unsigned long has - * enough bits for max. cpu's per node) + * enough bits for max. cpu's per uvhub) */ struct bau_local_cpumask { unsigned long bits; @@ -135,8 +154,8 @@ struct bau_msg_payload { struct bau_msg_header { unsigned int dest_subnodeid:6; /* must be 0x10, for the LB */ /* bits 5:0 */ - unsigned int base_dest_nodeid:15; /* nasid>>1 (pnode) of */ - /* bits 20:6 */ /* first bit in node_map */ + unsigned int base_dest_nodeid:15; /* nasid (pnode<<1) of */ + /* bits 20:6 */ /* first bit in uvhub map */ unsigned int command:8; /* message type */ /* bits 28:21 */ /* 0x38: SN3net EndPoint Message */ @@ -146,26 +165,38 @@ struct bau_msg_header { unsigned int rsvd_2:9; /* must be zero */ /* bits 40:32 */ /* Suppl_A is 56-41 */ - unsigned int payload_2a:8;/* becomes byte 16 of msg */ - /* bits 48:41 */ /* not currently using */ - unsigned int payload_2b:8;/* becomes byte 17 of msg */ - /* bits 56:49 */ /* not currently using */ + unsigned int sequence:16;/* message sequence number */ + /* bits 56:41 */ /* becomes bytes 16-17 of msg */ /* Address field (96:57) is never used as an address (these are address bits 42:3) */ + unsigned int rsvd_3:1; /* must be zero */ /* bit 57 */ /* address bits 27:4 are payload */ - /* these 24 bits become bytes 12-14 of msg */ + /* these next 24 (58-81) bits become bytes 12-14 of msg */ + + /* bits 65:58 land in byte 12 */ unsigned int replied_to:1;/* sent as 0 by the source to byte 12 */ /* bit 58 */ - - unsigned int payload_1a:5;/* not currently used */ - /* bits 63:59 */ - unsigned int payload_1b:8;/* not currently used */ - /* bits 71:64 */ - unsigned int payload_1c:8;/* not currently used */ - /* bits 79:72 */ - unsigned int payload_1d:2;/* not currently used */ + unsigned int msg_type:3; /* software type of the message*/ + /* bits 61:59 */ + unsigned int canceled:1; /* message canceled, resource to be freed*/ + /* bit 62 */ + unsigned int payload_1a:1;/* not currently used */ + /* bit 63 */ + unsigned int payload_1b:2;/* not currently used */ + /* bits 65:64 */ + + /* bits 73:66 land in byte 13 */ + unsigned int payload_1ca:6;/* not currently used */ + /* bits 71:66 */ + unsigned int payload_1c:2;/* not currently used */ + /* bits 73:72 */ + + /* bits 81:74 land in byte 14 */ + unsigned int payload_1d:6;/* not currently used */ + /* bits 79:74 */ + unsigned int payload_1e:2;/* not currently used */ /* bits 81:80 */ unsigned int rsvd_4:7; /* must be zero */ @@ -178,7 +209,7 @@ struct bau_msg_header { /* bits 95:90 */ unsigned int rsvd_6:5; /* must be zero */ /* bits 100:96 */ - unsigned int int_both:1;/* if 1, interrupt both sockets on the blade */ + unsigned int int_both:1;/* if 1, interrupt both sockets on the uvhub */ /* bit 101*/ unsigned int fairness:3;/* usually zero */ /* bits 104:102 */ @@ -191,13 +222,18 @@ struct bau_msg_header { /* bits 127:107 */ }; +/* see msg_type: */ +#define MSG_NOOP 0 +#define MSG_REGULAR 1 +#define MSG_RETRY 2 + /* * The activation descriptor: * The format of the message to send, plus all accompanying control * Should be 64 bytes */ struct bau_desc { - struct bau_target_nodemask distribution; + struct bau_target_uvhubmask distribution; /* * message template, consisting of header and payload: */ @@ -237,19 +273,25 @@ struct bau_payload_queue_entry { unsigned short acknowledge_count; /* filled in by destination */ /* 16 bits, bytes 10-11 */ - unsigned short replied_to:1; /* sent as 0 by the source */ - /* 1 bit */ - unsigned short unused1:7; /* not currently using */ - /* 7 bits: byte 12) */ + /* these next 3 bytes come from bits 58-81 of the message header */ + unsigned short replied_to:1; /* sent as 0 by the source */ + unsigned short msg_type:3; /* software message type */ + unsigned short canceled:1; /* sent as 0 by the source */ + unsigned short unused1:3; /* not currently using */ + /* byte 12 */ - unsigned char unused2[2]; /* not currently using */ - /* bytes 13-14 */ + unsigned char unused2a; /* not currently using */ + /* byte 13 */ + unsigned char unused2; /* not currently using */ + /* byte 14 */ unsigned char sw_ack_vector; /* filled in by the hardware */ /* byte 15 (bits 127:120) */ - unsigned char unused4[3]; /* not currently using bytes 17-19 */ - /* bytes 17-19 */ + unsigned short sequence; /* message sequence number */ + /* bytes 16-17 */ + unsigned char unused4[2]; /* not currently using bytes 18-19 */ + /* bytes 18-19 */ int number_of_cpus; /* filled in at destination */ /* 32 bits, bytes 20-23 (aligned) */ @@ -259,63 +301,93 @@ struct bau_payload_queue_entry { }; /* - * one for every slot in the destination payload queue - */ -struct bau_msg_status { - struct bau_local_cpumask seen_by; /* map of cpu's */ -}; - -/* - * one for every slot in the destination software ack resources - */ -struct bau_sw_ack_status { - struct bau_payload_queue_entry *msg; /* associated message */ - int watcher; /* cpu monitoring, or -1 */ -}; - -/* - * one on every node and per-cpu; to locate the software tables + * one per-cpu; to locate the software tables */ struct bau_control { struct bau_desc *descriptor_base; - struct bau_payload_queue_entry *bau_msg_head; struct bau_payload_queue_entry *va_queue_first; struct bau_payload_queue_entry *va_queue_last; - struct bau_msg_status *msg_statuses; - int *watching; /* pointer to array */ + struct bau_payload_queue_entry *bau_msg_head; + struct bau_control *uvhub_master; + struct bau_control *socket_master; + unsigned long timeout_interval; + atomic_t active_descriptor_count; + int max_concurrent; + int max_concurrent_constant; + int retry_message_scans; + int plugged_tries; + int timeout_tries; + int ipi_attempts; + int conseccompletes; + short cpu; + short uvhub_cpu; + short uvhub; + short cpus_in_socket; + short cpus_in_uvhub; + unsigned short message_number; + unsigned short uvhub_quiesce; + short socket_acknowledge_count[DEST_Q_SIZE]; + cycles_t send_message; + spinlock_t masks_lock; + spinlock_t uvhub_lock; + spinlock_t queue_lock; }; /* * This structure is allocated per_cpu for UV TLB shootdown statistics. */ struct ptc_stats { - unsigned long ptc_i; /* number of IPI-style flushes */ - unsigned long requestor; /* number of nodes this cpu sent to */ - unsigned long requestee; /* times cpu was remotely requested */ - unsigned long alltlb; /* times all tlb's on this cpu were flushed */ - unsigned long onetlb; /* times just one tlb on this cpu was flushed */ - unsigned long s_retry; /* retries on source side timeouts */ - unsigned long d_retry; /* retries on destination side timeouts */ - unsigned long sflush; /* cycles spent in uv_flush_tlb_others */ - unsigned long dflush; /* cycles spent on destination side */ - unsigned long retriesok; /* successes on retries */ - unsigned long nomsg; /* interrupts with no message */ - unsigned long multmsg; /* interrupts with multiple messages */ - unsigned long ntargeted;/* nodes targeted */ + /* sender statistics */ + unsigned long s_giveup; /* number of fall backs to IPI-style flushes */ + unsigned long s_requestor; /* number of shootdown requests */ + unsigned long s_stimeout; /* source side timeouts */ + unsigned long s_dtimeout; /* destination side timeouts */ + unsigned long s_time; /* time spent in sending side */ + unsigned long s_retriesok; /* successful retries */ + unsigned long s_ntargcpu; /* number of cpus targeted */ + unsigned long s_ntarguvhub; /* number of uvhubs targeted */ + unsigned long s_ntarguvhub16; /* number of times >= 16 target hubs */ + unsigned long s_ntarguvhub8; /* number of times >= 8 target hubs */ + unsigned long s_ntarguvhub4; /* number of times >= 4 target hubs */ + unsigned long s_ntarguvhub2; /* number of times >= 2 target hubs */ + unsigned long s_ntarguvhub1; /* number of times == 1 target hub */ + unsigned long s_resets_plug; /* ipi-style resets from plug state */ + unsigned long s_resets_timeout; /* ipi-style resets from timeouts */ + unsigned long s_busy; /* status stayed busy past s/w timer */ + unsigned long s_throttles; /* waits in throttle */ + unsigned long s_retry_messages; /* retry broadcasts */ + /* destination statistics */ + unsigned long d_alltlb; /* times all tlb's on this cpu were flushed */ + unsigned long d_onetlb; /* times just one tlb on this cpu was flushed */ + unsigned long d_multmsg; /* interrupts with multiple messages */ + unsigned long d_nomsg; /* interrupts with no message */ + unsigned long d_time; /* time spent on destination side */ + unsigned long d_requestee; /* number of messages processed */ + unsigned long d_retries; /* number of retry messages processed */ + unsigned long d_canceled; /* number of messages canceled by retries */ + unsigned long d_nocanceled; /* retries that found nothing to cancel */ + unsigned long d_resets; /* number of ipi-style requests processed */ + unsigned long d_rcanceled; /* number of messages canceled by resets */ }; -static inline int bau_node_isset(int node, struct bau_target_nodemask *dstp) +static inline int bau_uvhub_isset(int uvhub, struct bau_target_uvhubmask *dstp) { - return constant_test_bit(node, &dstp->bits[0]); + return constant_test_bit(uvhub, &dstp->bits[0]); } -static inline void bau_node_set(int node, struct bau_target_nodemask *dstp) +static inline void bau_uvhub_set(int uvhub, struct bau_target_uvhubmask *dstp) { - __set_bit(node, &dstp->bits[0]); + __set_bit(uvhub, &dstp->bits[0]); } -static inline void bau_nodes_clear(struct bau_target_nodemask *dstp, int nbits) +static inline void bau_uvhubs_clear(struct bau_target_uvhubmask *dstp, + int nbits) { bitmap_zero(&dstp->bits[0], nbits); } +static inline int bau_uvhub_weight(struct bau_target_uvhubmask *dstp) +{ + return bitmap_weight((unsigned long *)&dstp->bits[0], + UV_DISTRIBUTION_SIZE); +} static inline void bau_cpubits_clear(struct bau_local_cpumask *dstp, int nbits) { @@ -328,4 +400,35 @@ static inline void bau_cpubits_clear(struct bau_local_cpumask *dstp, int nbits) extern void uv_bau_message_intr1(void); extern void uv_bau_timeout_intr1(void); +struct atomic_short { + short counter; +}; + +/** + * atomic_read_short - read a short atomic variable + * @v: pointer of type atomic_short + * + * Atomically reads the value of @v. + */ +static inline int atomic_read_short(const struct atomic_short *v) +{ + return v->counter; +} + +/** + * atomic_add_short_return - add and return a short int + * @i: short value to add + * @v: pointer of type atomic_short + * + * Atomically adds @i to @v and returns @i + @v + */ +static inline int atomic_add_short_return(short i, struct atomic_short *v) +{ + short __i = i; + asm volatile(LOCK_PREFIX "xaddw %0, %1" + : "+r" (i), "+m" (v->counter) + : : "memory"); + return i + __i; +} + #endif /* _ASM_X86_UV_UV_BAU_H */ diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c index ef68ba48564b..414f7c4fe76c 100644 --- a/arch/x86/kernel/tlb_uv.c +++ b/arch/x86/kernel/tlb_uv.c @@ -1,7 +1,7 @@ /* * SGI UltraViolet TLB flush routines. * - * (c) 2008 Cliff Wickman , SGI. + * (c) 2008-2010 Cliff Wickman , SGI. * * This code is released under the GNU General Public License version 2 or * later. @@ -19,44 +19,67 @@ #include #include #include +#include + +struct msg_desc { + struct bau_payload_queue_entry *msg; + int msg_slot; + int sw_ack_slot; + struct bau_payload_queue_entry *va_queue_first; + struct bau_payload_queue_entry *va_queue_last; +}; #define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD 0x000000000bUL -static struct bau_control **uv_bau_table_bases __read_mostly; -static int uv_bau_retry_limit __read_mostly; +static int uv_bau_max_concurrent __read_mostly; -/* base pnode in this partition */ -static int uv_partition_base_pnode __read_mostly; +static int nobau; +static int __init setup_nobau(char *arg) +{ + nobau = 1; + return 0; +} +early_param("nobau", setup_nobau); -static unsigned long uv_mmask __read_mostly; +/* base pnode in this partition */ +static int uv_partition_base_pnode __read_mostly; +/* position of pnode (which is nasid>>1): */ +static int uv_nshift __read_mostly; +static unsigned long uv_mmask __read_mostly; static DEFINE_PER_CPU(struct ptc_stats, ptcstats); static DEFINE_PER_CPU(struct bau_control, bau_control); +static DEFINE_PER_CPU(cpumask_var_t, uv_flush_tlb_mask); + +struct reset_args { + int sender; +}; /* - * Determine the first node on a blade. + * Determine the first node on a uvhub. 'Nodes' are used for kernel + * memory allocation. */ -static int __init blade_to_first_node(int blade) +static int __init uvhub_to_first_node(int uvhub) { int node, b; for_each_online_node(node) { b = uv_node_to_blade_id(node); - if (blade == b) + if (uvhub == b) return node; } - return -1; /* shouldn't happen */ + return -1; } /* - * Determine the apicid of the first cpu on a blade. + * Determine the apicid of the first cpu on a uvhub. */ -static int __init blade_to_first_apicid(int blade) +static int __init uvhub_to_first_apicid(int uvhub) { int cpu; for_each_present_cpu(cpu) - if (blade == uv_cpu_to_blade_id(cpu)) + if (uvhub == uv_cpu_to_blade_id(cpu)) return per_cpu(x86_cpu_to_apicid, cpu); return -1; } @@ -69,195 +92,459 @@ static int __init blade_to_first_apicid(int blade) * clear of the Timeout bit (as well) will free the resource. No reply will * be sent (the hardware will only do one reply per message). */ -static void uv_reply_to_message(int resource, - struct bau_payload_queue_entry *msg, - struct bau_msg_status *msp) +static inline void uv_reply_to_message(struct msg_desc *mdp, + struct bau_control *bcp) { unsigned long dw; + struct bau_payload_queue_entry *msg; - dw = (1 << (resource + UV_SW_ACK_NPENDING)) | (1 << resource); + msg = mdp->msg; + if (!msg->canceled) { + dw = (msg->sw_ack_vector << UV_SW_ACK_NPENDING) | + msg->sw_ack_vector; + uv_write_local_mmr( + UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, dw); + } msg->replied_to = 1; msg->sw_ack_vector = 0; - if (msp) - msp->seen_by.bits = 0; - uv_write_local_mmr(UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, dw); } /* - * Do all the things a cpu should do for a TLB shootdown message. - * Other cpu's may come here at the same time for this message. + * Process the receipt of a RETRY message */ -static void uv_bau_process_message(struct bau_payload_queue_entry *msg, - int msg_slot, int sw_ack_slot) +static inline void uv_bau_process_retry_msg(struct msg_desc *mdp, + struct bau_control *bcp) { - unsigned long this_cpu_mask; - struct bau_msg_status *msp; - int cpu; + int i; + int cancel_count = 0; + int slot2; + unsigned long msg_res; + unsigned long mmr = 0; + struct bau_payload_queue_entry *msg; + struct bau_payload_queue_entry *msg2; + struct ptc_stats *stat; - msp = __get_cpu_var(bau_control).msg_statuses + msg_slot; - cpu = uv_blade_processor_id(); - msg->number_of_cpus = - uv_blade_nr_online_cpus(uv_node_to_blade_id(numa_node_id())); - this_cpu_mask = 1UL << cpu; - if (msp->seen_by.bits & this_cpu_mask) - return; - atomic_or_long(&msp->seen_by.bits, this_cpu_mask); + msg = mdp->msg; + stat = &per_cpu(ptcstats, bcp->cpu); + stat->d_retries++; + /* + * cancel any message from msg+1 to the retry itself + */ + for (msg2 = msg+1, i = 0; i < DEST_Q_SIZE; msg2++, i++) { + if (msg2 > mdp->va_queue_last) + msg2 = mdp->va_queue_first; + if (msg2 == msg) + break; + + /* same conditions for cancellation as uv_do_reset */ + if ((msg2->replied_to == 0) && (msg2->canceled == 0) && + (msg2->sw_ack_vector) && ((msg2->sw_ack_vector & + msg->sw_ack_vector) == 0) && + (msg2->sending_cpu == msg->sending_cpu) && + (msg2->msg_type != MSG_NOOP)) { + slot2 = msg2 - mdp->va_queue_first; + mmr = uv_read_local_mmr + (UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE); + msg_res = ((msg2->sw_ack_vector << 8) | + msg2->sw_ack_vector); + /* + * This is a message retry; clear the resources held + * by the previous message only if they timed out. + * If it has not timed out we have an unexpected + * situation to report. + */ + if (mmr & (msg_res << 8)) { + /* + * is the resource timed out? + * make everyone ignore the cancelled message. + */ + msg2->canceled = 1; + stat->d_canceled++; + cancel_count++; + uv_write_local_mmr( + UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, + (msg_res << 8) | msg_res); + } else + printk(KERN_INFO "note bau retry: no effect\n"); + } + } + if (!cancel_count) + stat->d_nocanceled++; +} - if (msg->replied_to == 1) - return; +/* + * Do all the things a cpu should do for a TLB shootdown message. + * Other cpu's may come here at the same time for this message. + */ +static void uv_bau_process_message(struct msg_desc *mdp, + struct bau_control *bcp) +{ + int msg_ack_count; + short socket_ack_count = 0; + struct ptc_stats *stat; + struct bau_payload_queue_entry *msg; + struct bau_control *smaster = bcp->socket_master; + /* + * This must be a normal message, or retry of a normal message + */ + msg = mdp->msg; + stat = &per_cpu(ptcstats, bcp->cpu); if (msg->address == TLB_FLUSH_ALL) { local_flush_tlb(); - __get_cpu_var(ptcstats).alltlb++; + stat->d_alltlb++; } else { __flush_tlb_one(msg->address); - __get_cpu_var(ptcstats).onetlb++; + stat->d_onetlb++; } + stat->d_requestee++; + + /* + * One cpu on each uvhub has the additional job on a RETRY + * of releasing the resource held by the message that is + * being retried. That message is identified by sending + * cpu number. + */ + if (msg->msg_type == MSG_RETRY && bcp == bcp->uvhub_master) + uv_bau_process_retry_msg(mdp, bcp); - __get_cpu_var(ptcstats).requestee++; + /* + * This is a sw_ack message, so we have to reply to it. + * Count each responding cpu on the socket. This avoids + * pinging the count's cache line back and forth between + * the sockets. + */ + socket_ack_count = atomic_add_short_return(1, (struct atomic_short *) + &smaster->socket_acknowledge_count[mdp->msg_slot]); + if (socket_ack_count == bcp->cpus_in_socket) { + /* + * Both sockets dump their completed count total into + * the message's count. + */ + smaster->socket_acknowledge_count[mdp->msg_slot] = 0; + msg_ack_count = atomic_add_short_return(socket_ack_count, + (struct atomic_short *)&msg->acknowledge_count); + + if (msg_ack_count == bcp->cpus_in_uvhub) { + /* + * All cpus in uvhub saw it; reply + */ + uv_reply_to_message(mdp, bcp); + } + } - atomic_inc_short(&msg->acknowledge_count); - if (msg->number_of_cpus == msg->acknowledge_count) - uv_reply_to_message(sw_ack_slot, msg, msp); + return; } /* - * Examine the payload queue on one distribution node to see - * which messages have not been seen, and which cpu(s) have not seen them. + * Determine the first cpu on a uvhub. + */ +static int uvhub_to_first_cpu(int uvhub) +{ + int cpu; + for_each_present_cpu(cpu) + if (uvhub == uv_cpu_to_blade_id(cpu)) + return cpu; + return -1; +} + +/* + * Last resort when we get a large number of destination timeouts is + * to clear resources held by a given cpu. + * Do this with IPI so that all messages in the BAU message queue + * can be identified by their nonzero sw_ack_vector field. * - * Returns the number of cpu's that have not responded. + * This is entered for a single cpu on the uvhub. + * The sender want's this uvhub to free a specific message's + * sw_ack resources. */ -static int uv_examine_destination(struct bau_control *bau_tablesp, int sender) +static void +uv_do_reset(void *ptr) { - struct bau_payload_queue_entry *msg; - struct bau_msg_status *msp; - int count = 0; int i; - int j; + int slot; + int count = 0; + unsigned long mmr; + unsigned long msg_res; + struct bau_control *bcp; + struct reset_args *rap; + struct bau_payload_queue_entry *msg; + struct ptc_stats *stat; - for (msg = bau_tablesp->va_queue_first, i = 0; i < DEST_Q_SIZE; - msg++, i++) { - if ((msg->sending_cpu == sender) && (!msg->replied_to)) { - msp = bau_tablesp->msg_statuses + i; - printk(KERN_DEBUG - "blade %d: address:%#lx %d of %d, not cpu(s): ", - i, msg->address, msg->acknowledge_count, - msg->number_of_cpus); - for (j = 0; j < msg->number_of_cpus; j++) { - if (!((1L << j) & msp->seen_by.bits)) { - count++; - printk("%d ", j); - } + bcp = &per_cpu(bau_control, smp_processor_id()); + rap = (struct reset_args *)ptr; + stat = &per_cpu(ptcstats, bcp->cpu); + stat->d_resets++; + + /* + * We're looking for the given sender, and + * will free its sw_ack resource. + * If all cpu's finally responded after the timeout, its + * message 'replied_to' was set. + */ + for (msg = bcp->va_queue_first, i = 0; i < DEST_Q_SIZE; msg++, i++) { + /* uv_do_reset: same conditions for cancellation as + uv_bau_process_retry_msg() */ + if ((msg->replied_to == 0) && + (msg->canceled == 0) && + (msg->sending_cpu == rap->sender) && + (msg->sw_ack_vector) && + (msg->msg_type != MSG_NOOP)) { + /* + * make everyone else ignore this message + */ + msg->canceled = 1; + slot = msg - bcp->va_queue_first; + count++; + /* + * only reset the resource if it is still pending + */ + mmr = uv_read_local_mmr + (UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE); + msg_res = ((msg->sw_ack_vector << 8) | + msg->sw_ack_vector); + if (mmr & msg_res) { + stat->d_rcanceled++; + uv_write_local_mmr( + UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, + msg_res); } - printk("\n"); } } - return count; + return; } /* - * Examine the payload queue on all the distribution nodes to see - * which messages have not been seen, and which cpu(s) have not seen them. - * - * Returns the number of cpu's that have not responded. + * Use IPI to get all target uvhubs to release resources held by + * a given sending cpu number. */ -static int uv_examine_destinations(struct bau_target_nodemask *distribution) +static void uv_reset_with_ipi(struct bau_target_uvhubmask *distribution, + int sender) { - int sender; - int i; - int count = 0; + int uvhub; + int cpu; + cpumask_t mask; + struct reset_args reset_args; - sender = smp_processor_id(); - for (i = 0; i < sizeof(struct bau_target_nodemask) * BITSPERBYTE; i++) { - if (!bau_node_isset(i, distribution)) + reset_args.sender = sender; + + cpus_clear(mask); + /* find a single cpu for each uvhub in this distribution mask */ + for (uvhub = 0; + uvhub < sizeof(struct bau_target_uvhubmask) * BITSPERBYTE; + uvhub++) { + if (!bau_uvhub_isset(uvhub, distribution)) continue; - count += uv_examine_destination(uv_bau_table_bases[i], sender); + /* find a cpu for this uvhub */ + cpu = uvhub_to_first_cpu(uvhub); + cpu_set(cpu, mask); } - return count; + /* IPI all cpus; Preemption is already disabled */ + smp_call_function_many(&mask, uv_do_reset, (void *)&reset_args, 1); + return; +} + +static inline unsigned long +cycles_2_us(unsigned long long cyc) +{ + unsigned long long ns; + unsigned long us; + ns = (cyc * per_cpu(cyc2ns, smp_processor_id())) + >> CYC2NS_SCALE_FACTOR; + us = ns / 1000; + return us; } /* - * wait for completion of a broadcast message - * - * return COMPLETE, RETRY or GIVEUP + * wait for all cpus on this hub to finish their sends and go quiet + * leaves uvhub_quiesce set so that no new broadcasts are started by + * bau_flush_send_and_wait() + */ +static inline void +quiesce_local_uvhub(struct bau_control *hmaster) +{ + atomic_add_short_return(1, (struct atomic_short *) + &hmaster->uvhub_quiesce); +} + +/* + * mark this quiet-requestor as done + */ +static inline void +end_uvhub_quiesce(struct bau_control *hmaster) +{ + atomic_add_short_return(-1, (struct atomic_short *) + &hmaster->uvhub_quiesce); +} + +/* + * Wait for completion of a broadcast software ack message + * return COMPLETE, RETRY(PLUGGED or TIMEOUT) or GIVEUP */ static int uv_wait_completion(struct bau_desc *bau_desc, - unsigned long mmr_offset, int right_shift) + unsigned long mmr_offset, int right_shift, int this_cpu, + struct bau_control *bcp, struct bau_control *smaster, long try) { - int exams = 0; - long destination_timeouts = 0; - long source_timeouts = 0; + int relaxes = 0; unsigned long descriptor_status; + unsigned long mmr; + unsigned long mask; + cycles_t ttime; + cycles_t timeout_time; + struct ptc_stats *stat = &per_cpu(ptcstats, this_cpu); + struct bau_control *hmaster; + hmaster = bcp->uvhub_master; + timeout_time = get_cycles() + bcp->timeout_interval; + + /* spin on the status MMR, waiting for it to go idle */ while ((descriptor_status = (((unsigned long) uv_read_local_mmr(mmr_offset) >> right_shift) & UV_ACT_STATUS_MASK)) != DESC_STATUS_IDLE) { - if (descriptor_status == DESC_STATUS_SOURCE_TIMEOUT) { - source_timeouts++; - if (source_timeouts > SOURCE_TIMEOUT_LIMIT) - source_timeouts = 0; - __get_cpu_var(ptcstats).s_retry++; - return FLUSH_RETRY; - } /* - * spin here looking for progress at the destinations + * Our software ack messages may be blocked because there are + * no swack resources available. As long as none of them + * has timed out hardware will NACK our message and its + * state will stay IDLE. */ - if (descriptor_status == DESC_STATUS_DESTINATION_TIMEOUT) { - destination_timeouts++; - if (destination_timeouts > DESTINATION_TIMEOUT_LIMIT) { - /* - * returns number of cpus not responding - */ - if (uv_examine_destinations - (&bau_desc->distribution) == 0) { - __get_cpu_var(ptcstats).d_retry++; - return FLUSH_RETRY; - } - exams++; - if (exams >= uv_bau_retry_limit) { - printk(KERN_DEBUG - "uv_flush_tlb_others"); - printk("giving up on cpu %d\n", - smp_processor_id()); + if (descriptor_status == DESC_STATUS_SOURCE_TIMEOUT) { + stat->s_stimeout++; + return FLUSH_GIVEUP; + } else if (descriptor_status == + DESC_STATUS_DESTINATION_TIMEOUT) { + stat->s_dtimeout++; + ttime = get_cycles(); + + /* + * Our retries may be blocked by all destination + * swack resources being consumed, and a timeout + * pending. In that case hardware returns the + * ERROR that looks like a destination timeout. + */ + if (cycles_2_us(ttime - bcp->send_message) < BIOS_TO) { + bcp->conseccompletes = 0; + return FLUSH_RETRY_PLUGGED; + } + + bcp->conseccompletes = 0; + return FLUSH_RETRY_TIMEOUT; + } else { + /* + * descriptor_status is still BUSY + */ + cpu_relax(); + relaxes++; + if (relaxes >= 10000) { + relaxes = 0; + if (get_cycles() > timeout_time) { + quiesce_local_uvhub(hmaster); + + /* single-thread the register change */ + spin_lock(&hmaster->masks_lock); + mmr = uv_read_local_mmr(mmr_offset); + mask = 0UL; + mask |= (3UL < right_shift); + mask = ~mask; + mmr &= mask; + uv_write_local_mmr(mmr_offset, mmr); + spin_unlock(&hmaster->masks_lock); + end_uvhub_quiesce(hmaster); + stat->s_busy++; return FLUSH_GIVEUP; } - /* - * delays can hang the simulator - udelay(1000); - */ - destination_timeouts = 0; } } - cpu_relax(); } + bcp->conseccompletes++; return FLUSH_COMPLETE; } +static inline cycles_t +sec_2_cycles(unsigned long sec) +{ + unsigned long ns; + cycles_t cyc; + + ns = sec * 1000000000; + cyc = (ns << CYC2NS_SCALE_FACTOR)/(per_cpu(cyc2ns, smp_processor_id())); + return cyc; +} + +/* + * conditionally add 1 to *v, unless *v is >= u + * return 0 if we cannot add 1 to *v because it is >= u + * return 1 if we can add 1 to *v because it is < u + * the add is atomic + * + * This is close to atomic_add_unless(), but this allows the 'u' value + * to be lowered below the current 'v'. atomic_add_unless can only stop + * on equal. + */ +static inline int atomic_inc_unless_ge(spinlock_t *lock, atomic_t *v, int u) +{ + spin_lock(lock); + if (atomic_read(v) >= u) { + spin_unlock(lock); + return 0; + } + atomic_inc(v); + spin_unlock(lock); + return 1; +} + /** * uv_flush_send_and_wait * - * Send a broadcast and wait for a broadcast message to complete. + * Send a broadcast and wait for it to complete. * - * The flush_mask contains the cpus the broadcast was sent to. + * The flush_mask contains the cpus the broadcast is to be sent to, plus + * cpus that are on the local uvhub. * - * Returns NULL if all remote flushing was done. The mask is zeroed. + * Returns NULL if all flushing represented in the mask was done. The mask + * is zeroed. * Returns @flush_mask if some remote flushing remains to be done. The - * mask will have some bits still set. + * mask will have some bits still set, representing any cpus on the local + * uvhub (not current cpu) and any on remote uvhubs if the broadcast failed. */ -const struct cpumask *uv_flush_send_and_wait(int cpu, int this_pnode, - struct bau_desc *bau_desc, - struct cpumask *flush_mask) +const struct cpumask *uv_flush_send_and_wait(struct bau_desc *bau_desc, + struct cpumask *flush_mask, + struct bau_control *bcp) { - int completion_status = 0; int right_shift; - int tries = 0; - int pnode; + int uvhub; int bit; + int completion_status = 0; + int seq_number = 0; + long try = 0; + int cpu = bcp->uvhub_cpu; + int this_cpu = bcp->cpu; + int this_uvhub = bcp->uvhub; unsigned long mmr_offset; unsigned long index; cycles_t time1; cycles_t time2; + struct ptc_stats *stat = &per_cpu(ptcstats, bcp->cpu); + struct bau_control *smaster = bcp->socket_master; + struct bau_control *hmaster = bcp->uvhub_master; + + /* + * Spin here while there are hmaster->max_concurrent or more active + * descriptors. This is the per-uvhub 'throttle'. + */ + if (!atomic_inc_unless_ge(&hmaster->uvhub_lock, + &hmaster->active_descriptor_count, + hmaster->max_concurrent)) { + stat->s_throttles++; + do { + cpu_relax(); + } while (!atomic_inc_unless_ge(&hmaster->uvhub_lock, + &hmaster->active_descriptor_count, + hmaster->max_concurrent)); + } + + while (hmaster->uvhub_quiesce) + cpu_relax(); if (cpu < UV_CPUS_PER_ACT_STATUS) { mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_0; @@ -269,24 +556,108 @@ const struct cpumask *uv_flush_send_and_wait(int cpu, int this_pnode, } time1 = get_cycles(); do { - tries++; + /* + * Every message from any given cpu gets a unique message + * sequence number. But retries use that same number. + * Our message may have timed out at the destination because + * all sw-ack resources are in use and there is a timeout + * pending there. In that case, our last send never got + * placed into the queue and we need to persist until it + * does. + * + * Make any retry a type MSG_RETRY so that the destination will + * free any resource held by a previous message from this cpu. + */ + if (try == 0) { + /* use message type set by the caller the first time */ + seq_number = bcp->message_number++; + } else { + /* use RETRY type on all the rest; same sequence */ + bau_desc->header.msg_type = MSG_RETRY; + stat->s_retry_messages++; + } + bau_desc->header.sequence = seq_number; index = (1UL << UVH_LB_BAU_SB_ACTIVATION_CONTROL_PUSH_SHFT) | - cpu; + bcp->uvhub_cpu; + bcp->send_message = get_cycles(); + uv_write_local_mmr(UVH_LB_BAU_SB_ACTIVATION_CONTROL, index); + + try++; completion_status = uv_wait_completion(bau_desc, mmr_offset, - right_shift); - } while (completion_status == FLUSH_RETRY); + right_shift, this_cpu, bcp, smaster, try); + + if (completion_status == FLUSH_RETRY_PLUGGED) { + /* + * Our retries may be blocked by all destination swack + * resources being consumed, and a timeout pending. In + * that case hardware immediately returns the ERROR + * that looks like a destination timeout. + */ + udelay(TIMEOUT_DELAY); + bcp->plugged_tries++; + if (bcp->plugged_tries >= PLUGSB4RESET) { + bcp->plugged_tries = 0; + quiesce_local_uvhub(hmaster); + spin_lock(&hmaster->queue_lock); + uv_reset_with_ipi(&bau_desc->distribution, + this_cpu); + spin_unlock(&hmaster->queue_lock); + end_uvhub_quiesce(hmaster); + bcp->ipi_attempts++; + stat->s_resets_plug++; + } + } else if (completion_status == FLUSH_RETRY_TIMEOUT) { + hmaster->max_concurrent = 1; + bcp->timeout_tries++; + udelay(TIMEOUT_DELAY); + if (bcp->timeout_tries >= TIMEOUTSB4RESET) { + bcp->timeout_tries = 0; + quiesce_local_uvhub(hmaster); + spin_lock(&hmaster->queue_lock); + uv_reset_with_ipi(&bau_desc->distribution, + this_cpu); + spin_unlock(&hmaster->queue_lock); + end_uvhub_quiesce(hmaster); + bcp->ipi_attempts++; + stat->s_resets_timeout++; + } + } + if (bcp->ipi_attempts >= 3) { + bcp->ipi_attempts = 0; + completion_status = FLUSH_GIVEUP; + break; + } + cpu_relax(); + } while ((completion_status == FLUSH_RETRY_PLUGGED) || + (completion_status == FLUSH_RETRY_TIMEOUT)); time2 = get_cycles(); - __get_cpu_var(ptcstats).sflush += (time2 - time1); - if (tries > 1) - __get_cpu_var(ptcstats).retriesok++; - if (completion_status == FLUSH_GIVEUP) { + if ((completion_status == FLUSH_COMPLETE) && (bcp->conseccompletes > 5) + && (hmaster->max_concurrent < hmaster->max_concurrent_constant)) + hmaster->max_concurrent++; + + /* + * hold any cpu not timing out here; no other cpu currently held by + * the 'throttle' should enter the activation code + */ + while (hmaster->uvhub_quiesce) + cpu_relax(); + atomic_dec(&hmaster->active_descriptor_count); + + /* guard against cycles wrap */ + if (time2 > time1) + stat->s_time += (time2 - time1); + else + stat->s_requestor--; /* don't count this one */ + if (completion_status == FLUSH_COMPLETE && try > 1) + stat->s_retriesok++; + else if (completion_status == FLUSH_GIVEUP) { /* * Cause the caller to do an IPI-style TLB shootdown on - * the cpu's, all of which are still in the mask. + * the target cpu's, all of which are still in the mask. */ - __get_cpu_var(ptcstats).ptc_i++; + stat->s_giveup++; return flush_mask; } @@ -295,18 +666,17 @@ const struct cpumask *uv_flush_send_and_wait(int cpu, int this_pnode, * use the IPI method of shootdown on them. */ for_each_cpu(bit, flush_mask) { - pnode = uv_cpu_to_pnode(bit); - if (pnode == this_pnode) + uvhub = uv_cpu_to_blade_id(bit); + if (uvhub == this_uvhub) continue; cpumask_clear_cpu(bit, flush_mask); } if (!cpumask_empty(flush_mask)) return flush_mask; + return NULL; } -static DEFINE_PER_CPU(cpumask_var_t, uv_flush_tlb_mask); - /** * uv_flush_tlb_others - globally purge translation cache of a virtual * address or all TLB's @@ -323,8 +693,8 @@ static DEFINE_PER_CPU(cpumask_var_t, uv_flush_tlb_mask); * The caller has derived the cpumask from the mm_struct. This function * is called only if there are bits set in the mask. (e.g. flush_tlb_page()) * - * The cpumask is converted into a nodemask of the nodes containing - * the cpus. + * The cpumask is converted into a uvhubmask of the uvhubs containing + * those cpus. * * Note that this function should be called with preemption disabled. * @@ -336,52 +706,82 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, struct mm_struct *mm, unsigned long va, unsigned int cpu) { - struct cpumask *flush_mask = __get_cpu_var(uv_flush_tlb_mask); - int i; - int bit; - int pnode; - int uv_cpu; - int this_pnode; + int remotes; + int tcpu; + int uvhub; int locals = 0; struct bau_desc *bau_desc; + struct cpumask *flush_mask; + struct ptc_stats *stat; + struct bau_control *bcp; - cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu)); + if (nobau) + return cpumask; - uv_cpu = uv_blade_processor_id(); - this_pnode = uv_hub_info->pnode; - bau_desc = __get_cpu_var(bau_control).descriptor_base; - bau_desc += UV_ITEMS_PER_DESCRIPTOR * uv_cpu; + bcp = &per_cpu(bau_control, cpu); + /* + * Each sending cpu has a per-cpu mask which it fills from the caller's + * cpu mask. Only remote cpus are converted to uvhubs and copied. + */ + flush_mask = (struct cpumask *)per_cpu(uv_flush_tlb_mask, cpu); + /* + * copy cpumask to flush_mask, removing current cpu + * (current cpu should already have been flushed by the caller and + * should never be returned if we return flush_mask) + */ + cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu)); + if (cpu_isset(cpu, *cpumask)) + locals++; /* current cpu was targeted */ - bau_nodes_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE); + bau_desc = bcp->descriptor_base; + bau_desc += UV_ITEMS_PER_DESCRIPTOR * bcp->uvhub_cpu; - i = 0; - for_each_cpu(bit, flush_mask) { - pnode = uv_cpu_to_pnode(bit); - BUG_ON(pnode > (UV_DISTRIBUTION_SIZE - 1)); - if (pnode == this_pnode) { + bau_uvhubs_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE); + remotes = 0; + for_each_cpu(tcpu, flush_mask) { + uvhub = uv_cpu_to_blade_id(tcpu); + if (uvhub == bcp->uvhub) { locals++; continue; } - bau_node_set(pnode - uv_partition_base_pnode, - &bau_desc->distribution); - i++; + bau_uvhub_set(uvhub, &bau_desc->distribution); + remotes++; } - if (i == 0) { + if (remotes == 0) { /* - * no off_node flushing; return status for local node + * No off_hub flushing; return status for local hub. + * Return the caller's mask if all were local (the current + * cpu may be in that mask). */ if (locals) - return flush_mask; + return cpumask; else return NULL; } - __get_cpu_var(ptcstats).requestor++; - __get_cpu_var(ptcstats).ntargeted += i; + stat = &per_cpu(ptcstats, cpu); + stat->s_requestor++; + stat->s_ntargcpu += remotes; + remotes = bau_uvhub_weight(&bau_desc->distribution); + stat->s_ntarguvhub += remotes; + if (remotes >= 16) + stat->s_ntarguvhub16++; + else if (remotes >= 8) + stat->s_ntarguvhub8++; + else if (remotes >= 4) + stat->s_ntarguvhub4++; + else if (remotes >= 2) + stat->s_ntarguvhub2++; + else + stat->s_ntarguvhub1++; bau_desc->payload.address = va; bau_desc->payload.sending_cpu = cpu; - return uv_flush_send_and_wait(uv_cpu, this_pnode, bau_desc, flush_mask); + /* + * uv_flush_send_and_wait returns null if all cpu's were messaged, or + * the adjusted flush_mask if any cpu's were not messaged. + */ + return uv_flush_send_and_wait(bau_desc, flush_mask, bcp); } /* @@ -390,87 +790,70 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, * * We received a broadcast assist message. * - * Interrupts may have been disabled; this interrupt could represent + * Interrupts are disabled; this interrupt could represent * the receipt of several messages. * - * All cores/threads on this node get this interrupt. - * The last one to see it does the s/w ack. + * All cores/threads on this hub get this interrupt. + * The last one to see it does the software ack. * (the resource will not be freed until noninterruptable cpus see this - * interrupt; hardware will timeout the s/w ack and reply ERROR) + * interrupt; hardware may timeout the s/w ack and reply ERROR) */ void uv_bau_message_interrupt(struct pt_regs *regs) { - struct bau_payload_queue_entry *va_queue_first; - struct bau_payload_queue_entry *va_queue_last; - struct bau_payload_queue_entry *msg; - struct pt_regs *old_regs = set_irq_regs(regs); - cycles_t time1; - cycles_t time2; - int msg_slot; - int sw_ack_slot; - int fw; int count = 0; - unsigned long local_pnode; - - ack_APIC_irq(); - exit_idle(); - irq_enter(); - - time1 = get_cycles(); - - local_pnode = uv_blade_to_pnode(uv_numa_blade_id()); - - va_queue_first = __get_cpu_var(bau_control).va_queue_first; - va_queue_last = __get_cpu_var(bau_control).va_queue_last; - - msg = __get_cpu_var(bau_control).bau_msg_head; + cycles_t time_start; + struct bau_payload_queue_entry *msg; + struct bau_control *bcp; + struct ptc_stats *stat; + struct msg_desc msgdesc; + + time_start = get_cycles(); + bcp = &per_cpu(bau_control, smp_processor_id()); + stat = &per_cpu(ptcstats, smp_processor_id()); + msgdesc.va_queue_first = bcp->va_queue_first; + msgdesc.va_queue_last = bcp->va_queue_last; + msg = bcp->bau_msg_head; while (msg->sw_ack_vector) { count++; - fw = msg->sw_ack_vector; - msg_slot = msg - va_queue_first; - sw_ack_slot = ffs(fw) - 1; - - uv_bau_process_message(msg, msg_slot, sw_ack_slot); - + msgdesc.msg_slot = msg - msgdesc.va_queue_first; + msgdesc.sw_ack_slot = ffs(msg->sw_ack_vector) - 1; + msgdesc.msg = msg; + uv_bau_process_message(&msgdesc, bcp); msg++; - if (msg > va_queue_last) - msg = va_queue_first; - __get_cpu_var(bau_control).bau_msg_head = msg; + if (msg > msgdesc.va_queue_last) + msg = msgdesc.va_queue_first; + bcp->bau_msg_head = msg; } + stat->d_time += (get_cycles() - time_start); if (!count) - __get_cpu_var(ptcstats).nomsg++; + stat->d_nomsg++; else if (count > 1) - __get_cpu_var(ptcstats).multmsg++; - - time2 = get_cycles(); - __get_cpu_var(ptcstats).dflush += (time2 - time1); - - irq_exit(); - set_irq_regs(old_regs); + stat->d_multmsg++; + ack_APIC_irq(); } /* * uv_enable_timeouts * - * Each target blade (i.e. blades that have cpu's) needs to have + * Each target uvhub (i.e. a uvhub that has no cpu's) needs to have * shootdown message timeouts enabled. The timeout does not cause * an interrupt, but causes an error message to be returned to * the sender. */ static void uv_enable_timeouts(void) { - int blade; - int nblades; + int uvhub; + int nuvhubs; int pnode; unsigned long mmr_image; - nblades = uv_num_possible_blades(); + nuvhubs = uv_num_possible_blades(); - for (blade = 0; blade < nblades; blade++) { - if (!uv_blade_nr_possible_cpus(blade)) + for (uvhub = 0; uvhub < nuvhubs; uvhub++) { + if (!uv_blade_nr_possible_cpus(uvhub)) continue; - pnode = uv_blade_to_pnode(blade); + pnode = uv_blade_to_pnode(uvhub); mmr_image = uv_read_global_mmr64(pnode, UVH_LB_BAU_MISC_CONTROL); /* @@ -523,9 +906,20 @@ static void uv_ptc_seq_stop(struct seq_file *file, void *data) { } +static inline unsigned long long +millisec_2_cycles(unsigned long millisec) +{ + unsigned long ns; + unsigned long long cyc; + + ns = millisec * 1000; + cyc = (ns << CYC2NS_SCALE_FACTOR)/(per_cpu(cyc2ns, smp_processor_id())); + return cyc; +} + /* - * Display the statistics thru /proc - * data points to the cpu number + * Display the statistics thru /proc. + * 'data' points to the cpu number */ static int uv_ptc_seq_show(struct seq_file *file, void *data) { @@ -536,78 +930,155 @@ static int uv_ptc_seq_show(struct seq_file *file, void *data) if (!cpu) { seq_printf(file, - "# cpu requestor requestee one all sretry dretry ptc_i "); + "# cpu sent stime numuvhubs numuvhubs16 numuvhubs8 "); seq_printf(file, - "sw_ack sflush dflush sok dnomsg dmult starget\n"); + "numuvhubs4 numuvhubs2 numuvhubs1 numcpus dto "); + seq_printf(file, + "retries rok resetp resett giveup sto bz throt "); + seq_printf(file, + "sw_ack recv rtime all "); + seq_printf(file, + "one mult none retry canc nocan reset rcan\n"); } if (cpu < num_possible_cpus() && cpu_online(cpu)) { stat = &per_cpu(ptcstats, cpu); - seq_printf(file, "cpu %d %ld %ld %ld %ld %ld %ld %ld ", - cpu, stat->requestor, - stat->requestee, stat->onetlb, stat->alltlb, - stat->s_retry, stat->d_retry, stat->ptc_i); - seq_printf(file, "%lx %ld %ld %ld %ld %ld %ld\n", + /* source side statistics */ + seq_printf(file, + "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld ", + cpu, stat->s_requestor, cycles_2_us(stat->s_time), + stat->s_ntarguvhub, stat->s_ntarguvhub16, + stat->s_ntarguvhub8, stat->s_ntarguvhub4, + stat->s_ntarguvhub2, stat->s_ntarguvhub1, + stat->s_ntargcpu, stat->s_dtimeout); + seq_printf(file, "%ld %ld %ld %ld %ld %ld %ld %ld ", + stat->s_retry_messages, stat->s_retriesok, + stat->s_resets_plug, stat->s_resets_timeout, + stat->s_giveup, stat->s_stimeout, + stat->s_busy, stat->s_throttles); + /* destination side statistics */ + seq_printf(file, + "%lx %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n", uv_read_global_mmr64(uv_cpu_to_pnode(cpu), UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE), - stat->sflush, stat->dflush, - stat->retriesok, stat->nomsg, - stat->multmsg, stat->ntargeted); + stat->d_requestee, cycles_2_us(stat->d_time), + stat->d_alltlb, stat->d_onetlb, stat->d_multmsg, + stat->d_nomsg, stat->d_retries, stat->d_canceled, + stat->d_nocanceled, stat->d_resets, + stat->d_rcanceled); } return 0; } /* + * -1: resetf the statistics * 0: display meaning of the statistics - * >0: retry limit + * >0: maximum concurrent active descriptors per uvhub (throttle) */ static ssize_t uv_ptc_proc_write(struct file *file, const char __user *user, size_t count, loff_t *data) { - long newmode; + int cpu; + long input_arg; char optstr[64]; + struct ptc_stats *stat; + struct bau_control *bcp; if (count == 0 || count > sizeof(optstr)) return -EINVAL; if (copy_from_user(optstr, user, count)) return -EFAULT; optstr[count - 1] = '\0'; - if (strict_strtoul(optstr, 10, &newmode) < 0) { + if (strict_strtol(optstr, 10, &input_arg) < 0) { printk(KERN_DEBUG "%s is invalid\n", optstr); return -EINVAL; } - if (newmode == 0) { + if (input_arg == 0) { printk(KERN_DEBUG "# cpu: cpu number\n"); + printk(KERN_DEBUG "Sender statistics:\n"); + printk(KERN_DEBUG + "sent: number of shootdown messages sent\n"); + printk(KERN_DEBUG + "stime: time spent sending messages\n"); + printk(KERN_DEBUG + "numuvhubs: number of hubs targeted with shootdown\n"); + printk(KERN_DEBUG + "numuvhubs16: number times 16 or more hubs targeted\n"); + printk(KERN_DEBUG + "numuvhubs8: number times 8 or more hubs targeted\n"); + printk(KERN_DEBUG + "numuvhubs4: number times 4 or more hubs targeted\n"); + printk(KERN_DEBUG + "numuvhubs2: number times 2 or more hubs targeted\n"); + printk(KERN_DEBUG + "numuvhubs1: number times 1 hub targeted\n"); + printk(KERN_DEBUG + "numcpus: number of cpus targeted with shootdown\n"); + printk(KERN_DEBUG + "dto: number of destination timeouts\n"); + printk(KERN_DEBUG + "retries: destination timeout retries sent\n"); + printk(KERN_DEBUG + "rok: : destination timeouts successfully retried\n"); + printk(KERN_DEBUG + "resetp: ipi-style resource resets for plugs\n"); + printk(KERN_DEBUG + "resett: ipi-style resource resets for timeouts\n"); + printk(KERN_DEBUG + "giveup: fall-backs to ipi-style shootdowns\n"); + printk(KERN_DEBUG + "sto: number of source timeouts\n"); + printk(KERN_DEBUG + "bz: number of stay-busy's\n"); + printk(KERN_DEBUG + "throt: number times spun in throttle\n"); + printk(KERN_DEBUG "Destination side statistics:\n"); printk(KERN_DEBUG - "requestor: times this cpu was the flush requestor\n"); + "sw_ack: image of UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE\n"); printk(KERN_DEBUG - "requestee: times this cpu was requested to flush its TLBs\n"); + "recv: shootdown messages received\n"); printk(KERN_DEBUG - "one: times requested to flush a single address\n"); + "rtime: time spent processing messages\n"); printk(KERN_DEBUG - "all: times requested to flush all TLB's\n"); + "all: shootdown all-tlb messages\n"); printk(KERN_DEBUG - "sretry: number of retries of source-side timeouts\n"); + "one: shootdown one-tlb messages\n"); printk(KERN_DEBUG - "dretry: number of retries of destination-side timeouts\n"); + "mult: interrupts that found multiple messages\n"); printk(KERN_DEBUG - "ptc_i: times UV fell through to IPI-style flushes\n"); + "none: interrupts that found no messages\n"); printk(KERN_DEBUG - "sw_ack: image of UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE\n"); + "retry: number of retry messages processed\n"); printk(KERN_DEBUG - "sflush_us: cycles spent in uv_flush_tlb_others()\n"); + "canc: number messages canceled by retries\n"); printk(KERN_DEBUG - "dflush_us: cycles spent in handling flush requests\n"); - printk(KERN_DEBUG "sok: successes on retry\n"); - printk(KERN_DEBUG "dnomsg: interrupts with no message\n"); + "nocan: number retries that found nothing to cancel\n"); printk(KERN_DEBUG - "dmult: interrupts with multiple messages\n"); - printk(KERN_DEBUG "starget: nodes targeted\n"); + "reset: number of ipi-style reset requests processed\n"); + printk(KERN_DEBUG + "rcan: number messages canceled by reset requests\n"); + } else if (input_arg == -1) { + for_each_present_cpu(cpu) { + stat = &per_cpu(ptcstats, cpu); + memset(stat, 0, sizeof(struct ptc_stats)); + } } else { - uv_bau_retry_limit = newmode; - printk(KERN_DEBUG "timeout retry limit:%d\n", - uv_bau_retry_limit); + uv_bau_max_concurrent = input_arg; + bcp = &per_cpu(bau_control, smp_processor_id()); + if (uv_bau_max_concurrent < 1 || + uv_bau_max_concurrent > bcp->cpus_in_uvhub) { + printk(KERN_DEBUG + "Error: BAU max concurrent %d; %d is invalid\n", + bcp->max_concurrent, uv_bau_max_concurrent); + return -EINVAL; + } + printk(KERN_DEBUG "Set BAU max concurrent:%d\n", + uv_bau_max_concurrent); + for_each_present_cpu(cpu) { + bcp = &per_cpu(bau_control, cpu); + bcp->max_concurrent = uv_bau_max_concurrent; + } } return count; @@ -650,80 +1121,31 @@ static int __init uv_ptc_init(void) return 0; } -/* - * begin the initialization of the per-blade control structures - */ -static struct bau_control * __init uv_table_bases_init(int blade, int node) -{ - int i; - struct bau_msg_status *msp; - struct bau_control *bau_tabp; - - bau_tabp = - kmalloc_node(sizeof(struct bau_control), GFP_KERNEL, node); - BUG_ON(!bau_tabp); - - bau_tabp->msg_statuses = - kmalloc_node(sizeof(struct bau_msg_status) * - DEST_Q_SIZE, GFP_KERNEL, node); - BUG_ON(!bau_tabp->msg_statuses); - - for (i = 0, msp = bau_tabp->msg_statuses; i < DEST_Q_SIZE; i++, msp++) - bau_cpubits_clear(&msp->seen_by, (int) - uv_blade_nr_possible_cpus(blade)); - - uv_bau_table_bases[blade] = bau_tabp; - - return bau_tabp; -} - -/* - * finish the initialization of the per-blade control structures - */ -static void __init -uv_table_bases_finish(int blade, - struct bau_control *bau_tablesp, - struct bau_desc *adp) -{ - struct bau_control *bcp; - int cpu; - - for_each_present_cpu(cpu) { - if (blade != uv_cpu_to_blade_id(cpu)) - continue; - - bcp = (struct bau_control *)&per_cpu(bau_control, cpu); - bcp->bau_msg_head = bau_tablesp->va_queue_first; - bcp->va_queue_first = bau_tablesp->va_queue_first; - bcp->va_queue_last = bau_tablesp->va_queue_last; - bcp->msg_statuses = bau_tablesp->msg_statuses; - bcp->descriptor_base = adp; - } -} - /* * initialize the sending side's sending buffers */ -static struct bau_desc * __init +static void uv_activation_descriptor_init(int node, int pnode) { int i; + int cpu; unsigned long pa; unsigned long m; unsigned long n; - struct bau_desc *adp; - struct bau_desc *ad2; + struct bau_desc *bau_desc; + struct bau_desc *bd2; + struct bau_control *bcp; /* * each bau_desc is 64 bytes; there are 8 (UV_ITEMS_PER_DESCRIPTOR) - * per cpu; and up to 32 (UV_ADP_SIZE) cpu's per blade + * per cpu; and up to 32 (UV_ADP_SIZE) cpu's per uvhub */ - adp = (struct bau_desc *)kmalloc_node(sizeof(struct bau_desc)* + bau_desc = (struct bau_desc *)kmalloc_node(sizeof(struct bau_desc)* UV_ADP_SIZE*UV_ITEMS_PER_DESCRIPTOR, GFP_KERNEL, node); - BUG_ON(!adp); + BUG_ON(!bau_desc); - pa = uv_gpa(adp); /* need the real nasid*/ - n = uv_gpa_to_pnode(pa); + pa = uv_gpa(bau_desc); /* need the real nasid*/ + n = pa >> uv_nshift; m = pa & uv_mmask; uv_write_global_mmr64(pnode, UVH_LB_BAU_SB_DESCRIPTOR_BASE, @@ -732,96 +1154,188 @@ uv_activation_descriptor_init(int node, int pnode) /* * initializing all 8 (UV_ITEMS_PER_DESCRIPTOR) descriptors for each * cpu even though we only use the first one; one descriptor can - * describe a broadcast to 256 nodes. + * describe a broadcast to 256 uv hubs. */ - for (i = 0, ad2 = adp; i < (UV_ADP_SIZE*UV_ITEMS_PER_DESCRIPTOR); - i++, ad2++) { - memset(ad2, 0, sizeof(struct bau_desc)); - ad2->header.sw_ack_flag = 1; + for (i = 0, bd2 = bau_desc; i < (UV_ADP_SIZE*UV_ITEMS_PER_DESCRIPTOR); + i++, bd2++) { + memset(bd2, 0, sizeof(struct bau_desc)); + bd2->header.sw_ack_flag = 1; /* - * base_dest_nodeid is the first node in the partition, so - * the bit map will indicate partition-relative node numbers. - * note that base_dest_nodeid is actually a nasid. + * base_dest_nodeid is the nasid (pnode<<1) of the first uvhub + * in the partition. The bit map will indicate uvhub numbers, + * which are 0-N in a partition. Pnodes are unique system-wide. */ - ad2->header.base_dest_nodeid = uv_partition_base_pnode << 1; - ad2->header.dest_subnodeid = 0x10; /* the LB */ - ad2->header.command = UV_NET_ENDPOINT_INTD; - ad2->header.int_both = 1; + bd2->header.base_dest_nodeid = uv_partition_base_pnode << 1; + bd2->header.dest_subnodeid = 0x10; /* the LB */ + bd2->header.command = UV_NET_ENDPOINT_INTD; + bd2->header.int_both = 1; /* * all others need to be set to zero: * fairness chaining multilevel count replied_to */ } - return adp; + for_each_present_cpu(cpu) { + if (pnode != uv_blade_to_pnode(uv_cpu_to_blade_id(cpu))) + continue; + bcp = &per_cpu(bau_control, cpu); + bcp->descriptor_base = bau_desc; + } } /* * initialize the destination side's receiving buffers + * entered for each uvhub in the partition + * - node is first node (kernel memory notion) on the uvhub + * - pnode is the uvhub's physical identifier */ -static struct bau_payload_queue_entry * __init -uv_payload_queue_init(int node, int pnode, struct bau_control *bau_tablesp) +static void +uv_payload_queue_init(int node, int pnode) { - struct bau_payload_queue_entry *pqp; - unsigned long pa; int pn; + int cpu; char *cp; + unsigned long pa; + struct bau_payload_queue_entry *pqp; + struct bau_payload_queue_entry *pqp_malloc; + struct bau_control *bcp; pqp = (struct bau_payload_queue_entry *) kmalloc_node( (DEST_Q_SIZE + 1) * sizeof(struct bau_payload_queue_entry), GFP_KERNEL, node); BUG_ON(!pqp); + pqp_malloc = pqp; cp = (char *)pqp + 31; pqp = (struct bau_payload_queue_entry *)(((unsigned long)cp >> 5) << 5); - bau_tablesp->va_queue_first = pqp; + + for_each_present_cpu(cpu) { + if (pnode != uv_cpu_to_pnode(cpu)) + continue; + /* for every cpu on this pnode: */ + bcp = &per_cpu(bau_control, cpu); + bcp->va_queue_first = pqp; + bcp->bau_msg_head = pqp; + bcp->va_queue_last = pqp + (DEST_Q_SIZE - 1); + } /* * need the pnode of where the memory was really allocated */ pa = uv_gpa(pqp); - pn = uv_gpa_to_pnode(pa); + pn = pa >> uv_nshift; uv_write_global_mmr64(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST, ((unsigned long)pn << UV_PAYLOADQ_PNODE_SHIFT) | uv_physnodeaddr(pqp)); uv_write_global_mmr64(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL, uv_physnodeaddr(pqp)); - bau_tablesp->va_queue_last = pqp + (DEST_Q_SIZE - 1); uv_write_global_mmr64(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST, (unsigned long) - uv_physnodeaddr(bau_tablesp->va_queue_last)); + uv_physnodeaddr(pqp + (DEST_Q_SIZE - 1))); + /* in effect, all msg_type's are set to MSG_NOOP */ memset(pqp, 0, sizeof(struct bau_payload_queue_entry) * DEST_Q_SIZE); - - return pqp; } /* - * Initialization of each UV blade's structures + * Initialization of each UV hub's structures */ -static int __init uv_init_blade(int blade) +static void __init uv_init_uvhub(int uvhub, int vector) { int node; int pnode; - unsigned long pa; unsigned long apicid; - struct bau_desc *adp; - struct bau_payload_queue_entry *pqp; - struct bau_control *bau_tablesp; - - node = blade_to_first_node(blade); - bau_tablesp = uv_table_bases_init(blade, node); - pnode = uv_blade_to_pnode(blade); - adp = uv_activation_descriptor_init(node, pnode); - pqp = uv_payload_queue_init(node, pnode, bau_tablesp); - uv_table_bases_finish(blade, bau_tablesp, adp); + + node = uvhub_to_first_node(uvhub); + pnode = uv_blade_to_pnode(uvhub); + uv_activation_descriptor_init(node, pnode); + uv_payload_queue_init(node, pnode); /* * the below initialization can't be in firmware because the * messaging IRQ will be determined by the OS */ - apicid = blade_to_first_apicid(blade); - pa = uv_read_global_mmr64(pnode, UVH_BAU_DATA_CONFIG); + apicid = uvhub_to_first_apicid(uvhub); uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG, - ((apicid << 32) | UV_BAU_MESSAGE)); - return 0; + ((apicid << 32) | vector)); +} + +/* + * initialize the bau_control structure for each cpu + */ +static void uv_init_per_cpu(int nuvhubs) +{ + int i, j, k; + int cpu; + int pnode; + int uvhub; + short socket = 0; + struct bau_control *bcp; + struct uvhub_desc *bdp; + struct socket_desc *sdp; + struct bau_control *hmaster = NULL; + struct bau_control *smaster = NULL; + struct socket_desc { + short num_cpus; + short cpu_number[16]; + }; + struct uvhub_desc { + short num_sockets; + short num_cpus; + short uvhub; + short pnode; + struct socket_desc socket[2]; + }; + struct uvhub_desc *uvhub_descs; + + uvhub_descs = (struct uvhub_desc *) + kmalloc(nuvhubs * sizeof(struct uvhub_desc), GFP_KERNEL); + memset(uvhub_descs, 0, nuvhubs * sizeof(struct uvhub_desc)); + for_each_present_cpu(cpu) { + bcp = &per_cpu(bau_control, cpu); + memset(bcp, 0, sizeof(struct bau_control)); + spin_lock_init(&bcp->masks_lock); + bcp->max_concurrent = uv_bau_max_concurrent; + pnode = uv_cpu_hub_info(cpu)->pnode; + uvhub = uv_cpu_hub_info(cpu)->numa_blade_id; + bdp = &uvhub_descs[uvhub]; + bdp->num_cpus++; + bdp->uvhub = uvhub; + bdp->pnode = pnode; + /* time interval to catch a hardware stay-busy bug */ + bcp->timeout_interval = millisec_2_cycles(3); + /* kludge: assume uv_hub.h is constant */ + socket = (cpu_physical_id(cpu)>>5)&1; + if (socket >= bdp->num_sockets) + bdp->num_sockets = socket+1; + sdp = &bdp->socket[socket]; + sdp->cpu_number[sdp->num_cpus] = cpu; + sdp->num_cpus++; + } + socket = 0; + for_each_possible_blade(uvhub) { + bdp = &uvhub_descs[uvhub]; + for (i = 0; i < bdp->num_sockets; i++) { + sdp = &bdp->socket[i]; + for (j = 0; j < sdp->num_cpus; j++) { + cpu = sdp->cpu_number[j]; + bcp = &per_cpu(bau_control, cpu); + bcp->cpu = cpu; + if (j == 0) { + smaster = bcp; + if (i == 0) + hmaster = bcp; + } + bcp->cpus_in_uvhub = bdp->num_cpus; + bcp->cpus_in_socket = sdp->num_cpus; + bcp->socket_master = smaster; + bcp->uvhub_master = hmaster; + for (k = 0; k < DEST_Q_SIZE; k++) + bcp->socket_acknowledge_count[k] = 0; + bcp->uvhub_cpu = + uv_cpu_hub_info(cpu)->blade_processor_id; + } + socket++; + } + } + kfree(uvhub_descs); } /* @@ -829,38 +1343,54 @@ static int __init uv_init_blade(int blade) */ static int __init uv_bau_init(void) { - int blade; - int nblades; + int uvhub; + int pnode; + int nuvhubs; int cur_cpu; + int vector; + unsigned long mmr; if (!is_uv_system()) return 0; + if (nobau) + return 0; + for_each_possible_cpu(cur_cpu) zalloc_cpumask_var_node(&per_cpu(uv_flush_tlb_mask, cur_cpu), GFP_KERNEL, cpu_to_node(cur_cpu)); - uv_bau_retry_limit = 1; + uv_bau_max_concurrent = MAX_BAU_CONCURRENT; + uv_nshift = uv_hub_info->m_val; uv_mmask = (1UL << uv_hub_info->m_val) - 1; - nblades = uv_num_possible_blades(); + nuvhubs = uv_num_possible_blades(); - uv_bau_table_bases = (struct bau_control **) - kmalloc(nblades * sizeof(struct bau_control *), GFP_KERNEL); - BUG_ON(!uv_bau_table_bases); + uv_init_per_cpu(nuvhubs); uv_partition_base_pnode = 0x7fffffff; - for (blade = 0; blade < nblades; blade++) - if (uv_blade_nr_possible_cpus(blade) && - (uv_blade_to_pnode(blade) < uv_partition_base_pnode)) - uv_partition_base_pnode = uv_blade_to_pnode(blade); - for (blade = 0; blade < nblades; blade++) - if (uv_blade_nr_possible_cpus(blade)) - uv_init_blade(blade); - - alloc_intr_gate(UV_BAU_MESSAGE, uv_bau_message_intr1); + for (uvhub = 0; uvhub < nuvhubs; uvhub++) + if (uv_blade_nr_possible_cpus(uvhub) && + (uv_blade_to_pnode(uvhub) < uv_partition_base_pnode)) + uv_partition_base_pnode = uv_blade_to_pnode(uvhub); + + vector = UV_BAU_MESSAGE; + for_each_possible_blade(uvhub) + if (uv_blade_nr_possible_cpus(uvhub)) + uv_init_uvhub(uvhub, vector); + uv_enable_timeouts(); + alloc_intr_gate(vector, uv_bau_message_intr1); + + for_each_possible_blade(uvhub) { + pnode = uv_blade_to_pnode(uvhub); + /* INIT the bau */ + uv_write_global_mmr64(pnode, UVH_LB_BAU_SB_ACTIVATION_CONTROL, + ((unsigned long)1 << 63)); + mmr = 1; /* should be 1 to broadcast to both sockets */ + uv_write_global_mmr64(pnode, UVH_BAU_DATA_BROADCAST, mmr); + } return 0; } -__initcall(uv_bau_init); -__initcall(uv_ptc_init); +core_initcall(uv_bau_init); +core_initcall(uv_ptc_init); -- cgit v1.2.3-59-g8ed1b From a289cc7c70da784a2d370b91885cab4f966dcb0f Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Fri, 16 Apr 2010 17:51:42 -0700 Subject: x86, UV: uv_irq.c: Fix all sparse warnings Fix all sparse warnings in building uv_irq.c. arch/x86/kernel/uv_irq.c:46:17: warning: symbol 'uv_irq_chip' was not declared. Should it be static? arch/x86/kernel/uv_irq.c:143:50: error: no identifier for function argument arch/x86/kernel/uv_irq.c:162:13: error: typename in expression arch/x86/kernel/uv_irq.c:162:13: error: undefined identifier 'restrict' arch/x86/kernel/uv_irq.c:250:44: error: no identifier for function argument arch/x86/kernel/uv_irq.c:260:17: error: typename in expression arch/x86/kernel/uv_irq.c:260:17: error: undefined identifier 'restrict' arch/x86/kernel/uv_irq.c:233:50: warning: incorrect type in argument 3 (different signedness) arch/x86/kernel/uv_irq.c:233:50: expected int *pnode arch/x86/kernel/uv_irq.c:233:50: got unsigned int * arch/x86/include/asm/uv/uv_hub.h:318:44: warning: incorrect type in argument 2 (different address spaces) arch/x86/include/asm/uv/uv_hub.h:318:44: expected void volatile [noderef] *addr arch/x86/include/asm/uv/uv_hub.h:318:44: got unsigned long * Signed-off-by: Randy Dunlap Cc: Dimitri Sivanich Cc: Russ Anderson Cc: Robin Holt Cc: Mike Travis Cc: Cliff Wickman Cc: Jack Steiner LKML-Reference: <20100416175142.f4b59683.randy.dunlap@oracle.com> Signed-off-by: Ingo Molnar --- arch/x86/include/asm/uv/uv_hub.h | 2 +- arch/x86/kernel/uv_irq.c | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h index 14cc74ba5d23..bf6b88ef8eeb 100644 --- a/arch/x86/include/asm/uv/uv_hub.h +++ b/arch/x86/include/asm/uv/uv_hub.h @@ -307,7 +307,7 @@ static inline unsigned long uv_read_global_mmr32(int pnode, unsigned long offset * Access Global MMR space using the MMR space located at the top of physical * memory. */ -static inline unsigned long *uv_global_mmr64_address(int pnode, unsigned long offset) +static inline volatile void __iomem *uv_global_mmr64_address(int pnode, unsigned long offset) { return __va(UV_GLOBAL_MMR64_BASE | UV_GLOBAL_MMR64_PNODE_BITS(pnode) | offset); diff --git a/arch/x86/kernel/uv_irq.c b/arch/x86/kernel/uv_irq.c index ece73d8e3240..1a9f55a33489 100644 --- a/arch/x86/kernel/uv_irq.c +++ b/arch/x86/kernel/uv_irq.c @@ -43,7 +43,7 @@ static void uv_ack_apic(unsigned int irq) ack_APIC_irq(); } -struct irq_chip uv_irq_chip = { +static struct irq_chip uv_irq_chip = { .name = "UV-CORE", .startup = uv_noop_ret, .shutdown = uv_noop, @@ -140,7 +140,7 @@ int uv_irq_2_mmr_info(int irq, unsigned long *offset, int *pnode) */ static int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade, - unsigned long mmr_offset, int restrict) + unsigned long mmr_offset, int limit) { const struct cpumask *eligible_cpu = cpumask_of(cpu); struct irq_desc *desc = irq_to_desc(irq); @@ -159,7 +159,7 @@ arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade, if (err != 0) return err; - if (restrict == UV_AFFINITY_CPU) + if (limit == UV_AFFINITY_CPU) desc->status |= IRQ_NO_BALANCING; else desc->status |= IRQ_MOVE_PCNTXT; @@ -213,7 +213,7 @@ static int uv_set_irq_affinity(unsigned int irq, const struct cpumask *mask) unsigned long mmr_value; struct uv_IO_APIC_route_entry *entry; unsigned long mmr_offset; - unsigned mmr_pnode; + int mmr_pnode; if (set_desc_affinity(desc, mask, &dest)) return -1; @@ -247,7 +247,7 @@ static int uv_set_irq_affinity(unsigned int irq, const struct cpumask *mask) * interrupt is raised. */ int uv_setup_irq(char *irq_name, int cpu, int mmr_blade, - unsigned long mmr_offset, int restrict) + unsigned long mmr_offset, int limit) { int irq, ret; @@ -257,7 +257,7 @@ int uv_setup_irq(char *irq_name, int cpu, int mmr_blade, return -EBUSY; ret = arch_enable_uv_irq(irq_name, irq, cpu, mmr_blade, mmr_offset, - restrict); + limit); if (ret == irq) uv_set_irq_2_mmr_info(irq, mmr_offset, mmr_blade); else -- cgit v1.2.3-59-g8ed1b From 1f9cc3cb6a27521edfe0a21abf97d2bb11c4d237 Mon Sep 17 00:00:00 2001 From: Robin Holt Date: Fri, 23 Apr 2010 10:36:22 -0500 Subject: x86, pat: Update the page flags for memtype atomically instead of using memtype_lock While testing an application using the xpmem (out of kernel) driver, we noticed a significant page fault rate reduction of x86_64 with respect to ia64. For one test running with 32 cpus, one thread per cpu, it took 01:08 for each of the threads to vm_insert_pfn 2GB worth of pages. For the same test running on 256 cpus, one thread per cpu, it took 14:48 to vm_insert_pfn 2 GB worth of pages. The slowdown was tracked to lookup_memtype which acquires the spinlock memtype_lock. This heavily contended lock was slowing down vm_insert_pfn(). With the cmpxchg on page->flags method, both the 32 cpu and 256 cpu cases take approx 00:01.3 seconds to complete. Signed-off-by: Robin Holt LKML-Reference: <20100423153627.751194346@gulag1.americas.sgi.com> Cc: Venkatesh Pallipadi Cc: Rafael Wysocki Reviewed-by: Suresh Siddha Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/cacheflush.h | 44 ++++++++++++++++++++++----------------- arch/x86/mm/pat.c | 8 ------- 2 files changed, 25 insertions(+), 27 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h index 634c40a739a6..c70068d05f70 100644 --- a/arch/x86/include/asm/cacheflush.h +++ b/arch/x86/include/asm/cacheflush.h @@ -44,9 +44,6 @@ static inline void copy_from_user_page(struct vm_area_struct *vma, memcpy(dst, src, len); } -#define PG_WC PG_arch_1 -PAGEFLAG(WC, WC) - #ifdef CONFIG_X86_PAT /* * X86 PAT uses page flags WC and Uncached together to keep track of @@ -55,16 +52,24 @@ PAGEFLAG(WC, WC) * _PAGE_CACHE_UC_MINUS and fourth state where page's memory type has not * been changed from its default (value of -1 used to denote this). * Note we do not support _PAGE_CACHE_UC here. - * - * Caller must hold memtype_lock for atomicity. */ + +#define _PGMT_DEFAULT 0 +#define _PGMT_WC (1UL << PG_arch_1) +#define _PGMT_UC_MINUS (1UL << PG_uncached) +#define _PGMT_WB (1UL << PG_uncached | 1UL << PG_arch_1) +#define _PGMT_MASK (1UL << PG_uncached | 1UL << PG_arch_1) +#define _PGMT_CLEAR_MASK (~_PGMT_MASK) + static inline unsigned long get_page_memtype(struct page *pg) { - if (!PageUncached(pg) && !PageWC(pg)) + unsigned long pg_flags = pg->flags & _PGMT_MASK; + + if (pg_flags == _PGMT_DEFAULT) return -1; - else if (!PageUncached(pg) && PageWC(pg)) + else if (pg_flags == _PGMT_WC) return _PAGE_CACHE_WC; - else if (PageUncached(pg) && !PageWC(pg)) + else if (pg_flags == _PGMT_UC_MINUS) return _PAGE_CACHE_UC_MINUS; else return _PAGE_CACHE_WB; @@ -72,25 +77,26 @@ static inline unsigned long get_page_memtype(struct page *pg) static inline void set_page_memtype(struct page *pg, unsigned long memtype) { + unsigned long memtype_flags = _PGMT_DEFAULT; + unsigned long old_flags; + unsigned long new_flags; + switch (memtype) { case _PAGE_CACHE_WC: - ClearPageUncached(pg); - SetPageWC(pg); + memtype_flags = _PGMT_WC; break; case _PAGE_CACHE_UC_MINUS: - SetPageUncached(pg); - ClearPageWC(pg); + memtype_flags = _PGMT_UC_MINUS; break; case _PAGE_CACHE_WB: - SetPageUncached(pg); - SetPageWC(pg); - break; - default: - case -1: - ClearPageUncached(pg); - ClearPageWC(pg); + memtype_flags = _PGMT_WB; break; } + + do { + old_flags = pg->flags; + new_flags = (old_flags & _PGMT_CLEAR_MASK) | memtype_flags; + } while (cmpxchg(&pg->flags, old_flags, new_flags) != old_flags); } #else static inline unsigned long get_page_memtype(struct page *pg) { return -1; } diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index 951011166ef5..501fc60e5e4d 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c @@ -190,8 +190,6 @@ static int pat_pagerange_is_ram(unsigned long start, unsigned long end) * Here we do two pass: * - Find the memtype of all the pages in the range, look for any conflicts * - In case of no conflicts, set the new memtype for pages in the range - * - * Caller must hold memtype_lock for atomicity. */ static int reserve_ram_pages_type(u64 start, u64 end, unsigned long req_type, unsigned long *new_type) @@ -297,9 +295,7 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type, is_range_ram = pat_pagerange_is_ram(start, end); if (is_range_ram == 1) { - spin_lock(&memtype_lock); err = reserve_ram_pages_type(start, end, req_type, new_type); - spin_unlock(&memtype_lock); return err; } else if (is_range_ram < 0) { @@ -351,9 +347,7 @@ int free_memtype(u64 start, u64 end) is_range_ram = pat_pagerange_is_ram(start, end); if (is_range_ram == 1) { - spin_lock(&memtype_lock); err = free_ram_pages_type(start, end); - spin_unlock(&memtype_lock); return err; } else if (is_range_ram < 0) { @@ -394,10 +388,8 @@ static unsigned long lookup_memtype(u64 paddr) if (pat_pagerange_is_ram(paddr, paddr + PAGE_SIZE)) { struct page *page; - spin_lock(&memtype_lock); page = pfn_to_page(paddr >> PAGE_SHIFT); rettype = get_page_memtype(page); - spin_unlock(&memtype_lock); /* * -1 from get_page_memtype() implies RAM page is in its * default state and not reserved, and hence of type WB -- cgit v1.2.3-59-g8ed1b From 402af0d7c692ddcfa2333e93d3f275ebd0487926 Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Wed, 21 Apr 2010 15:21:51 +0100 Subject: x86, asm: Introduce and use percpu_inc() ... generating slightly smaller code. Signed-off-by: Jan Beulich LKML-Reference: <4BCF261F020000780003B33C@vpn.id2.novell.com> Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/hardirq.h | 2 +- arch/x86/include/asm/percpu.h | 24 ++++++++++++++++++++++++ arch/x86/kernel/cpu/mcheck/mce.c | 4 ++-- 3 files changed, 27 insertions(+), 3 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/hardirq.h b/arch/x86/include/asm/hardirq.h index 0f8576427cfe..aeab29aee617 100644 --- a/arch/x86/include/asm/hardirq.h +++ b/arch/x86/include/asm/hardirq.h @@ -35,7 +35,7 @@ DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); #define __ARCH_IRQ_STAT -#define inc_irq_stat(member) percpu_add(irq_stat.member, 1) +#define inc_irq_stat(member) percpu_inc(irq_stat.member) #define local_softirq_pending() percpu_read(irq_stat.__softirq_pending) diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index 66a272dfd8b8..0ec6d12d84e6 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h @@ -190,6 +190,29 @@ do { \ pfo_ret__; \ }) +#define percpu_unary_op(op, var) \ +({ \ + switch (sizeof(var)) { \ + case 1: \ + asm(op "b "__percpu_arg(0) \ + : "+m" (var)); \ + break; \ + case 2: \ + asm(op "w "__percpu_arg(0) \ + : "+m" (var)); \ + break; \ + case 4: \ + asm(op "l "__percpu_arg(0) \ + : "+m" (var)); \ + break; \ + case 8: \ + asm(op "q "__percpu_arg(0) \ + : "+m" (var)); \ + break; \ + default: __bad_percpu_size(); \ + } \ +}) + /* * percpu_read() makes gcc load the percpu variable every time it is * accessed while percpu_read_stable() allows the value to be cached. @@ -207,6 +230,7 @@ do { \ #define percpu_and(var, val) percpu_to_op("and", var, val) #define percpu_or(var, val) percpu_to_op("or", var, val) #define percpu_xor(var, val) percpu_to_op("xor", var, val) +#define percpu_inc(var) percpu_unary_op("inc", var) #define __this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) #define __this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 8a6f0afa767e..7a355ddcc64b 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -539,7 +539,7 @@ void machine_check_poll(enum mcp_flags flags, mce_banks_t *b) struct mce m; int i; - __get_cpu_var(mce_poll_count)++; + percpu_inc(mce_poll_count); mce_setup(&m); @@ -934,7 +934,7 @@ void do_machine_check(struct pt_regs *regs, long error_code) atomic_inc(&mce_entry); - __get_cpu_var(mce_exception_count)++; + percpu_inc(mce_exception_count); if (notify_die(DIE_NMI, "machine check", regs, error_code, 18, SIGKILL) == NOTIFY_STOP) -- cgit v1.2.3-59-g8ed1b From 5967ed87ade85a421ef814296c3c7f182b08c225 Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Wed, 21 Apr 2010 16:08:14 +0100 Subject: x86-64: Reduce SMP locks table size Reduce the SMP locks table size by using relative pointers instead of absolute ones, thus cutting the table size by half. Signed-off-by: Jan Beulich LKML-Reference: <4BCF30FE020000780003B3B6@vpn.id2.novell.com> Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/alternative-asm.h | 4 +-- arch/x86/include/asm/alternative.h | 4 +-- arch/x86/kernel/alternative.c | 45 +++++++++++++++++++--------------- 3 files changed, 29 insertions(+), 24 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h index b97f786a48d5..a63a68be1cce 100644 --- a/arch/x86/include/asm/alternative-asm.h +++ b/arch/x86/include/asm/alternative-asm.h @@ -6,8 +6,8 @@ .macro LOCK_PREFIX 1: lock .section .smp_locks,"a" - _ASM_ALIGN - _ASM_PTR 1b + .balign 4 + .long 1b - . .previous .endm #else diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h index b09ec55650b3..714bf2417284 100644 --- a/arch/x86/include/asm/alternative.h +++ b/arch/x86/include/asm/alternative.h @@ -30,8 +30,8 @@ #ifdef CONFIG_SMP #define LOCK_PREFIX \ ".section .smp_locks,\"a\"\n" \ - _ASM_ALIGN "\n" \ - _ASM_PTR "661f\n" /* address */ \ + ".balign 4\n" \ + ".long 661f - .\n" /* offset */ \ ".previous\n" \ "661:\n\tlock; " diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 1a160d5d44d0..936738427223 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -194,7 +194,7 @@ static void __init_or_module add_nops(void *insns, unsigned int len) } extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; -extern u8 *__smp_locks[], *__smp_locks_end[]; +extern s32 __smp_locks[], __smp_locks_end[]; static void *text_poke_early(void *addr, const void *opcode, size_t len); /* Replace instructions with better alternatives for this CPU type. @@ -235,37 +235,39 @@ void __init_or_module apply_alternatives(struct alt_instr *start, #ifdef CONFIG_SMP -static void alternatives_smp_lock(u8 **start, u8 **end, u8 *text, u8 *text_end) +static void alternatives_smp_lock(const s32 *start, const s32 *end, + u8 *text, u8 *text_end) { - u8 **ptr; + const s32 *poff; mutex_lock(&text_mutex); - for (ptr = start; ptr < end; ptr++) { - if (*ptr < text) - continue; - if (*ptr > text_end) + for (poff = start; poff < end; poff++) { + u8 *ptr = (u8 *)poff + *poff; + + if (!*poff || ptr < text || ptr >= text_end) continue; /* turn DS segment override prefix into lock prefix */ - text_poke(*ptr, ((unsigned char []){0xf0}), 1); + text_poke(ptr, ((unsigned char []){0xf0}), 1); }; mutex_unlock(&text_mutex); } -static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end) +static void alternatives_smp_unlock(const s32 *start, const s32 *end, + u8 *text, u8 *text_end) { - u8 **ptr; + const s32 *poff; if (noreplace_smp) return; mutex_lock(&text_mutex); - for (ptr = start; ptr < end; ptr++) { - if (*ptr < text) - continue; - if (*ptr > text_end) + for (poff = start; poff < end; poff++) { + u8 *ptr = (u8 *)poff + *poff; + + if (!*poff || ptr < text || ptr >= text_end) continue; /* turn lock prefix into DS segment override prefix */ - text_poke(*ptr, ((unsigned char []){0x3E}), 1); + text_poke(ptr, ((unsigned char []){0x3E}), 1); }; mutex_unlock(&text_mutex); } @@ -276,8 +278,8 @@ struct smp_alt_module { char *name; /* ptrs to lock prefixes */ - u8 **locks; - u8 **locks_end; + const s32 *locks; + const s32 *locks_end; /* .text segment, needed to avoid patching init code ;) */ u8 *text; @@ -398,16 +400,19 @@ void alternatives_smp_switch(int smp) int alternatives_text_reserved(void *start, void *end) { struct smp_alt_module *mod; - u8 **ptr; + const s32 *poff; u8 *text_start = start; u8 *text_end = end; list_for_each_entry(mod, &smp_alt_modules, next) { if (mod->text > text_end || mod->text_end < text_start) continue; - for (ptr = mod->locks; ptr < mod->locks_end; ptr++) - if (text_start <= *ptr && text_end >= *ptr) + for (poff = mod->locks; poff < mod->locks_end; poff++) { + const u8 *ptr = (const u8 *)poff + *poff; + + if (text_start <= ptr && text_end > ptr) return 1; + } } return 0; -- cgit v1.2.3-59-g8ed1b From 6fc108a08dcddf8f9113cc7102ddaacf7ed37a6b Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Wed, 21 Apr 2010 15:23:44 +0100 Subject: x86: Clean up arch/x86/Kconfig* No functional change intended. Signed-off-by: Jan Beulich LKML-Reference: <4BCF2690020000780003B340@vpn.id2.novell.com> Signed-off-by: H. Peter Anvin --- arch/x86/Kconfig | 49 ++++++++++++++------------------------------- arch/x86/Kconfig.debug | 2 -- arch/x86/include/asm/boot.h | 2 +- 3 files changed, 16 insertions(+), 37 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 9458685902bd..85e2252625bc 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -197,20 +197,17 @@ config HAVE_INTEL_TXT # Use the generic interrupt handling code in kernel/irq/: config GENERIC_HARDIRQS - bool - default y + def_bool y config GENERIC_HARDIRQS_NO__DO_IRQ def_bool y config GENERIC_IRQ_PROBE - bool - default y + def_bool y config GENERIC_PENDING_IRQ - bool + def_bool y depends on GENERIC_HARDIRQS && SMP - default y config USE_GENERIC_SMP_HELPERS def_bool y @@ -225,14 +222,12 @@ config X86_64_SMP depends on X86_64 && SMP config X86_HT - bool + def_bool y depends on SMP - default y config X86_TRAMPOLINE - bool + def_bool y depends on SMP || (64BIT && ACPI_SLEEP) - default y config X86_32_LAZY_GS def_bool y @@ -447,7 +442,7 @@ config X86_NUMAQ firmware with - send email to . config X86_SUPPORTS_MEMORY_FAILURE - bool + def_bool y # MCE code calls memory_failure(): depends on X86_MCE # On 32-bit this adds too big of NODES_SHIFT and we run out of page flags: @@ -455,7 +450,6 @@ config X86_SUPPORTS_MEMORY_FAILURE # On 32-bit SPARSEMEM adds too big of SECTIONS_WIDTH: depends on X86_64 || !SPARSEMEM select ARCH_SUPPORTS_MEMORY_FAILURE - default y config X86_VISWS bool "SGI 320/540 (Visual Workstation)" @@ -570,7 +564,6 @@ config PARAVIRT_SPINLOCKS config PARAVIRT_CLOCK bool - default n endif @@ -749,7 +742,6 @@ config MAXSMP bool "Configure Maximum number of SMP Processors and NUMA Nodes" depends on X86_64 && SMP && DEBUG_KERNEL && EXPERIMENTAL select CPUMASK_OFFSTACK - default n ---help--- Configure maximum number of CPUS and NUMA Nodes for this architecture. If unsure, say N. @@ -829,7 +821,6 @@ config X86_VISWS_APIC config X86_REROUTE_FOR_BROKEN_BOOT_IRQS bool "Reroute for broken boot IRQs" - default n depends on X86_IO_APIC ---help--- This option enables a workaround that fixes a source of @@ -876,9 +867,8 @@ config X86_MCE_AMD the DRAM Error Threshold. config X86_ANCIENT_MCE - def_bool n + bool "Support for old Pentium 5 / WinChip machine checks" depends on X86_32 && X86_MCE - prompt "Support for old Pentium 5 / WinChip machine checks" ---help--- Include support for machine check handling on old Pentium 5 or WinChip systems. These typically need to be enabled explicitely on the command @@ -886,8 +876,7 @@ config X86_ANCIENT_MCE config X86_MCE_THRESHOLD depends on X86_MCE_AMD || X86_MCE_INTEL - bool - default y + def_bool y config X86_MCE_INJECT depends on X86_MCE @@ -1026,8 +1015,8 @@ config X86_CPUID choice prompt "High Memory Support" - default HIGHMEM4G if !X86_NUMAQ default HIGHMEM64G if X86_NUMAQ + default HIGHMEM4G depends on X86_32 config NOHIGHMEM @@ -1285,7 +1274,7 @@ source "mm/Kconfig" config HIGHPTE bool "Allocate 3rd-level pagetables from highmem" - depends on X86_32 && (HIGHMEM4G || HIGHMEM64G) + depends on HIGHMEM ---help--- The VM uses one page table entry for each page of physical memory. For systems with a lot of RAM, this can be wasteful of precious @@ -1369,8 +1358,7 @@ config MATH_EMULATION kernel, it won't hurt. config MTRR - bool - default y + def_bool y prompt "MTRR (Memory Type Range Register) support" if EMBEDDED ---help--- On Intel P6 family processors (Pentium Pro, Pentium II and later) @@ -1436,8 +1424,7 @@ config MTRR_SANITIZER_SPARE_REG_NR_DEFAULT mtrr_spare_reg_nr=N on the kernel command line. config X86_PAT - bool - default y + def_bool y prompt "x86 PAT support" if EMBEDDED depends on MTRR ---help--- @@ -1605,8 +1592,7 @@ config X86_NEED_RELOCS depends on X86_32 && RELOCATABLE config PHYSICAL_ALIGN - hex - prompt "Alignment value to which kernel should be aligned" if X86_32 + hex "Alignment value to which kernel should be aligned" if X86_32 default "0x1000000" range 0x2000 0x1000000 ---help--- @@ -1653,7 +1639,6 @@ config COMPAT_VDSO config CMDLINE_BOOL bool "Built-in kernel command line" - default n ---help--- Allow for specifying boot arguments to the kernel at build time. On some systems (e.g. embedded ones), it is @@ -1687,7 +1672,6 @@ config CMDLINE config CMDLINE_OVERRIDE bool "Built-in command line overrides boot loader arguments" - default n depends on CMDLINE_BOOL ---help--- Set this option to 'Y' to have the kernel ignore the boot loader @@ -1723,8 +1707,7 @@ source "drivers/acpi/Kconfig" source "drivers/sfi/Kconfig" config X86_APM_BOOT - bool - default y + def_bool y depends on APM || APM_MODULE menuconfig APM @@ -1953,8 +1936,7 @@ config DMAR_DEFAULT_ON experimental. config DMAR_BROKEN_GFX_WA - def_bool n - prompt "Workaround broken graphics drivers (going away soon)" + bool "Workaround broken graphics drivers (going away soon)" depends on DMAR && BROKEN ---help--- Current Graphics drivers tend to use physical address @@ -2052,7 +2034,6 @@ config SCx200HR_TIMER config OLPC bool "One Laptop Per Child support" select GPIOLIB - default n ---help--- Add support for detecting the unique features of the OLPC XO hardware. diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug index bc01e3ebfeb2..e7edd66d866e 100644 --- a/arch/x86/Kconfig.debug +++ b/arch/x86/Kconfig.debug @@ -45,7 +45,6 @@ config EARLY_PRINTK config EARLY_PRINTK_DBGP bool "Early printk via EHCI debug port" - default n depends on EARLY_PRINTK && PCI ---help--- Write kernel log output directly into the EHCI debug port. @@ -76,7 +75,6 @@ config DEBUG_PER_CPU_MAPS bool "Debug access to per_cpu maps" depends on DEBUG_KERNEL depends on SMP - default n ---help--- Say Y to verify that the per_cpu map being accessed has been setup. Adds a fair amount of code to kernel memory diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h index 7a1065958ba9..3b62ab56c7a0 100644 --- a/arch/x86/include/asm/boot.h +++ b/arch/x86/include/asm/boot.h @@ -24,7 +24,7 @@ #define MIN_KERNEL_ALIGN (_AC(1, UL) << MIN_KERNEL_ALIGN_LG2) #if (CONFIG_PHYSICAL_ALIGN & (CONFIG_PHYSICAL_ALIGN-1)) || \ - (CONFIG_PHYSICAL_ALIGN < (_AC(1, UL) << MIN_KERNEL_ALIGN_LG2)) + (CONFIG_PHYSICAL_ALIGN < MIN_KERNEL_ALIGN) #error "Invalid value for CONFIG_PHYSICAL_ALIGN" #endif -- cgit v1.2.3-59-g8ed1b From b701a47ba48b698976fb2fe05fb285b0edc1d26a Mon Sep 17 00:00:00 2001 From: "H. Peter Anvin" Date: Thu, 29 Apr 2010 16:03:57 -0700 Subject: x86: Fix LOCK_PREFIX_HERE for uniprocessor build Checkin b3ac891b67bd4b1fc728d1c784cad1212dea433d: x86: Add support for lock prefix in alternatives ... did not define LOCK_PREFIX_HERE in the case of a uniprocessor build. As a result, it would cause any of the usages of this macro to fail on a uniprocessor build. Fix this by defining LOCK_PREFIX_HERE as a null string. Signed-off-by: H. Peter Anvin Cc: Luca Barbieri LKML-Reference: <1267005265-27958-2-git-send-email-luca@luca-barbieri.com> --- arch/x86/include/asm/alternative.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h index 55fee12cea6d..e29a6c9bba00 100644 --- a/arch/x86/include/asm/alternative.h +++ b/arch/x86/include/asm/alternative.h @@ -38,6 +38,7 @@ #define LOCK_PREFIX LOCK_PREFIX_HERE "\n\tlock; " #else /* ! CONFIG_SMP */ +#define LOCK_PREFIX_HERE "" #define LOCK_PREFIX "" #endif -- cgit v1.2.3-59-g8ed1b From 9b6dba9e0798325dab427b9d60c61630ccc39b28 Mon Sep 17 00:00:00 2001 From: Brian Gerst Date: Sun, 21 Mar 2010 09:00:44 -0400 Subject: x86: Merge simd_math_error() into math_error() The only difference between FPU and SIMD exceptions is where the status bits are read from (cwd/swd vs. mxcsr). This also fixes the discrepency introduced by commit adf77bac, which fixed FPU but not SIMD. Signed-off-by: Brian Gerst LKML-Reference: <1269176446-2489-3-git-send-email-brgerst@gmail.com> Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/traps.h | 2 +- arch/x86/kernel/irqinit.c | 2 +- arch/x86/kernel/traps.c | 100 ++++++++++++++----------------------------- 3 files changed, 34 insertions(+), 70 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h index 4da91ad69e0d..f66cda56781d 100644 --- a/arch/x86/include/asm/traps.h +++ b/arch/x86/include/asm/traps.h @@ -79,7 +79,7 @@ static inline int get_si_code(unsigned long condition) extern int panic_on_unrecovered_nmi; -void math_error(void __user *); +void math_error(struct pt_regs *, int, int); void math_emulate(struct math_emu_info *); #ifndef CONFIG_X86_32 asmlinkage void smp_thermal_interrupt(void); diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c index 0ed2d300cd46..990ae7cfc578 100644 --- a/arch/x86/kernel/irqinit.c +++ b/arch/x86/kernel/irqinit.c @@ -60,7 +60,7 @@ static irqreturn_t math_error_irq(int cpl, void *dev_id) outb(0, 0xF0); if (ignore_fpu_irq || !boot_cpu_data.hard_math) return IRQ_NONE; - math_error((void __user *)get_irq_regs()->ip); + math_error(get_irq_regs(), 0, 16); return IRQ_HANDLED; } diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index a16c9dfe6b70..a472992cecdc 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -595,36 +595,48 @@ static int kernel_math_error(struct pt_regs *regs, const char *str, int trapnr) * the correct behaviour even in the presence of the asynchronous * IRQ13 behaviour */ -void math_error(void __user *ip) +void math_error(struct pt_regs *regs, int error_code, int trapnr) { struct task_struct *task; siginfo_t info; - unsigned short cwd, swd, err; + unsigned short err; /* * Save the info for the exception handler and clear the error. */ task = current; save_init_fpu(task); - task->thread.trap_no = 16; - task->thread.error_code = 0; + task->thread.trap_no = trapnr; + task->thread.error_code = error_code; info.si_signo = SIGFPE; info.si_errno = 0; - info.si_addr = ip; - /* - * (~cwd & swd) will mask out exceptions that are not set to unmasked - * status. 0x3f is the exception bits in these regs, 0x200 is the - * C1 reg you need in case of a stack fault, 0x040 is the stack - * fault bit. We should only be taking one exception at a time, - * so if this combination doesn't produce any single exception, - * then we have a bad program that isn't synchronizing its FPU usage - * and it will suffer the consequences since we won't be able to - * fully reproduce the context of the exception - */ - cwd = get_fpu_cwd(task); - swd = get_fpu_swd(task); + info.si_addr = (void __user *)regs->ip; + if (trapnr == 16) { + unsigned short cwd, swd; + /* + * (~cwd & swd) will mask out exceptions that are not set to unmasked + * status. 0x3f is the exception bits in these regs, 0x200 is the + * C1 reg you need in case of a stack fault, 0x040 is the stack + * fault bit. We should only be taking one exception at a time, + * so if this combination doesn't produce any single exception, + * then we have a bad program that isn't synchronizing its FPU usage + * and it will suffer the consequences since we won't be able to + * fully reproduce the context of the exception + */ + cwd = get_fpu_cwd(task); + swd = get_fpu_swd(task); - err = swd & ~cwd; + err = swd & ~cwd; + } else { + /* + * The SIMD FPU exceptions are handled a little differently, as there + * is only a single status/control register. Thus, to determine which + * unmasked exception was caught we must mask the exception mask bits + * at 0x1f80, and then use these to mask the exception bits at 0x3f. + */ + unsigned short mxcsr = get_fpu_mxcsr(task); + err = ~(mxcsr >> 7) & mxcsr; + } if (err & 0x001) { /* Invalid op */ /* @@ -663,55 +675,7 @@ dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code) return; #endif - math_error((void __user *)regs->ip); -} - -static void simd_math_error(void __user *ip) -{ - struct task_struct *task; - siginfo_t info; - unsigned short mxcsr; - - /* - * Save the info for the exception handler and clear the error. - */ - task = current; - save_init_fpu(task); - task->thread.trap_no = 19; - task->thread.error_code = 0; - info.si_signo = SIGFPE; - info.si_errno = 0; - info.si_code = __SI_FAULT; - info.si_addr = ip; - /* - * The SIMD FPU exceptions are handled a little differently, as there - * is only a single status/control register. Thus, to determine which - * unmasked exception was caught we must mask the exception mask bits - * at 0x1f80, and then use these to mask the exception bits at 0x3f. - */ - mxcsr = get_fpu_mxcsr(task); - switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) { - case 0x000: - default: - break; - case 0x001: /* Invalid Op */ - info.si_code = FPE_FLTINV; - break; - case 0x002: /* Denormalize */ - case 0x010: /* Underflow */ - info.si_code = FPE_FLTUND; - break; - case 0x004: /* Zero Divide */ - info.si_code = FPE_FLTDIV; - break; - case 0x008: /* Overflow */ - info.si_code = FPE_FLTOVF; - break; - case 0x020: /* Precision */ - info.si_code = FPE_FLTRES; - break; - } - force_sig_info(SIGFPE, &info, task); + math_error(regs, error_code, 16); } dotraplinkage void @@ -727,7 +691,7 @@ do_simd_coprocessor_error(struct pt_regs *regs, long error_code) return; #endif - simd_math_error((void __user *)regs->ip); + math_error(regs, error_code, 19); } dotraplinkage void -- cgit v1.2.3-59-g8ed1b From d88d95eb1c2a72b6126a550debe0883ff723a948 Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Sat, 24 Apr 2010 09:56:53 +0200 Subject: x86, k8: Fix build error when K8_NB is disabled K8_NB depends on PCI and when the last is disabled (allnoconfig) we fail at the final linking stage due to missing exported num_k8_northbridges. Add a header stub for that. Signed-off-by: Borislav Petkov LKML-Reference: <20100503183036.GJ26107@aftab> Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/k8.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/k8.h b/arch/x86/include/asm/k8.h index f70e60071fe8..af00bd1d2089 100644 --- a/arch/x86/include/asm/k8.h +++ b/arch/x86/include/asm/k8.h @@ -16,11 +16,16 @@ extern int k8_numa_init(unsigned long start_pfn, unsigned long end_pfn); extern int k8_scan_nodes(void); #ifdef CONFIG_K8_NB +extern int num_k8_northbridges; + static inline struct pci_dev *node_to_k8_nb_misc(int node) { return (node < num_k8_northbridges) ? k8_northbridges[node] : NULL; } + #else +#define num_k8_northbridges 0 + static inline struct pci_dev *node_to_k8_nb_misc(int node) { return NULL; -- cgit v1.2.3-59-g8ed1b From 9638fa521e42c9281c874c6b5a382b1ced4ee496 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Tue, 30 Mar 2010 01:07:07 -0700 Subject: x86, ioapic: Only export mp_find_ioapic and mp_find_ioapic_pin in io_apic.h Multiple declarations of the same function in different headers is a pain to maintain. Signed-off-by: Eric W. Biederman LKML-Reference: <1269936436-7039-6-git-send-email-ebiederm@xmission.com> Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/mpspec.h | 4 ---- 1 file changed, 4 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/mpspec.h b/arch/x86/include/asm/mpspec.h index d8bf23a88d05..29994f06c7e2 100644 --- a/arch/x86/include/asm/mpspec.h +++ b/arch/x86/include/asm/mpspec.h @@ -106,10 +106,6 @@ struct device; extern int mp_register_gsi(struct device *dev, u32 gsi, int edge_level, int active_high_low); extern int acpi_probe_gsi(void); -#ifdef CONFIG_X86_IO_APIC -extern int mp_find_ioapic(int gsi); -extern int mp_find_ioapic_pin(int ioapic, int gsi); -#endif #else /* !CONFIG_ACPI: */ static inline int acpi_probe_gsi(void) { -- cgit v1.2.3-59-g8ed1b From eddb0c55a14074d6bac8c2ef169aefd7e2c6f139 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Tue, 30 Mar 2010 01:07:09 -0700 Subject: x86, ioapic: Fix the types of gsi values This patches fixes the types of gsi_base and gsi_end values in struct mp_ioapic_gsi, and the gsi parameter of mp_find_ioapic and mp_find_ioapic_pin A gsi is cannonically a u32, not an int. Signed-off-by: Eric W. Biederman LKML-Reference: <1269936436-7039-8-git-send-email-ebiederm@xmission.com> Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/io_apic.h | 10 +++++----- arch/x86/kernel/apic/io_apic.c | 4 ++-- 2 files changed, 7 insertions(+), 7 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/io_apic.h b/arch/x86/include/asm/io_apic.h index 35832a03a515..feeaf0d92460 100644 --- a/arch/x86/include/asm/io_apic.h +++ b/arch/x86/include/asm/io_apic.h @@ -180,12 +180,12 @@ extern void ioapic_write_entry(int apic, int pin, extern void setup_ioapic_ids_from_mpc(void); struct mp_ioapic_gsi{ - int gsi_base; - int gsi_end; + u32 gsi_base; + u32 gsi_end; }; extern struct mp_ioapic_gsi mp_gsi_routing[]; -int mp_find_ioapic(int gsi); -int mp_find_ioapic_pin(int ioapic, int gsi); +int mp_find_ioapic(u32 gsi); +int mp_find_ioapic_pin(int ioapic, u32 gsi); void __init mp_register_ioapic(int id, u32 address, u32 gsi_base); extern void __init pre_init_apic_IRQ0(void); @@ -197,7 +197,7 @@ static const int timer_through_8259 = 0; static inline void ioapic_init_mappings(void) { } static inline void ioapic_insert_resources(void) { } static inline void probe_nr_irqs_gsi(void) { } -static inline int mp_find_ioapic(int gsi) { return 0; } +static inline int mp_find_ioapic(u32 gsi) { return 0; } struct io_apic_irq_attr; static inline int io_apic_set_pci_routing(struct device *dev, int irq, diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 0a053e61b3ea..9ab97622b8e6 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -4247,7 +4247,7 @@ void __init ioapic_insert_resources(void) } } -int mp_find_ioapic(int gsi) +int mp_find_ioapic(u32 gsi) { int i = 0; @@ -4262,7 +4262,7 @@ int mp_find_ioapic(int gsi) return -1; } -int mp_find_ioapic_pin(int ioapic, int gsi) +int mp_find_ioapic_pin(int ioapic, u32 gsi) { if (WARN_ON(ioapic == -1)) return -1; -- cgit v1.2.3-59-g8ed1b From 5777372af5c929b8f3c706ed7b295b7279537c88 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Tue, 30 Mar 2010 01:07:10 -0700 Subject: x86, ioapic: Teach mp_register_ioapic to compute a global gsi_end Add the global variable gsi_end and teach mp_register_ioapic to keep it uptodate as we add more ioapics into the system. ioapics can only be added early in boot so the code that runs later can treat gsi_end as a constant. Remove the have hacks in sfi.c to second guess mp_register_ioapic by keeping t's own running total of how many gsi's have been seen, and instead use the gsi_end. Signed-off-by: Eric W. Biederman LKML-Reference: <1269936436-7039-9-git-send-email-ebiederm@xmission.com> Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/io_apic.h | 1 + arch/x86/kernel/apic/io_apic.c | 6 ++++++ arch/x86/kernel/sfi.c | 4 +--- 3 files changed, 8 insertions(+), 3 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/io_apic.h b/arch/x86/include/asm/io_apic.h index feeaf0d92460..37b0f2bb5034 100644 --- a/arch/x86/include/asm/io_apic.h +++ b/arch/x86/include/asm/io_apic.h @@ -184,6 +184,7 @@ struct mp_ioapic_gsi{ u32 gsi_end; }; extern struct mp_ioapic_gsi mp_gsi_routing[]; +extern u32 gsi_end; int mp_find_ioapic(u32 gsi); int mp_find_ioapic_pin(int ioapic, u32 gsi); void __init mp_register_ioapic(int id, u32 address, u32 gsi_base); diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 9ab97622b8e6..f80725571577 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -89,6 +89,9 @@ int nr_ioapics; /* IO APIC gsi routing info */ struct mp_ioapic_gsi mp_gsi_routing[MAX_IO_APICS]; +/* The last gsi number used */ +u32 gsi_end; + /* MP IRQ source entries */ struct mpc_intsrc mp_irqs[MAX_IRQ_SOURCES]; @@ -4312,6 +4315,9 @@ void __init mp_register_ioapic(int id, u32 address, u32 gsi_base) mp_gsi_routing[idx].gsi_end = gsi_base + io_apic_get_redir_entries(idx) - 1; + if (mp_gsi_routing[idx].gsi_end > gsi_end) + gsi_end = mp_gsi_routing[idx].gsi_end; + printk(KERN_INFO "IOAPIC[%d]: apic_id %d, version %d, address 0x%x, " "GSI %d-%d\n", idx, mp_ioapics[idx].apicid, mp_ioapics[idx].apicver, mp_ioapics[idx].apicaddr, diff --git a/arch/x86/kernel/sfi.c b/arch/x86/kernel/sfi.c index 34e099382651..7ded57896c0a 100644 --- a/arch/x86/kernel/sfi.c +++ b/arch/x86/kernel/sfi.c @@ -81,7 +81,6 @@ static int __init sfi_parse_cpus(struct sfi_table_header *table) #endif /* CONFIG_X86_LOCAL_APIC */ #ifdef CONFIG_X86_IO_APIC -static u32 gsi_base; static int __init sfi_parse_ioapic(struct sfi_table_header *table) { @@ -94,8 +93,7 @@ static int __init sfi_parse_ioapic(struct sfi_table_header *table) pentry = (struct sfi_apic_table_entry *)sb->pentry; for (i = 0; i < num; i++) { - mp_register_ioapic(i, pentry->phys_addr, gsi_base); - gsi_base += io_apic_get_redir_entries(i); + mp_register_ioapic(i, pentry->phys_addr, gsi_end + 1); pentry++; } -- cgit v1.2.3-59-g8ed1b From 4afc51a835d3aeba11c35090f524e05c84586d27 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Tue, 30 Mar 2010 01:07:14 -0700 Subject: x86, ioapic: Simplify probe_nr_irqs_gsi. Use the global gsi_end value now that all ioapics have valid gsi numbers instead of a combination of acpi_probe_gsi and walking all of the ioapics and couting their number of entries by hand if acpi_probe_gsi gave us an answer we did not like. Signed-off-by: Eric W. Biederman LKML-Reference: <1269936436-7039-13-git-send-email-ebiederm@xmission.com> Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/mpspec.h | 6 ------ arch/x86/kernel/acpi/boot.c | 23 ----------------------- arch/x86/kernel/apic/io_apic.c | 17 +++-------------- 3 files changed, 3 insertions(+), 43 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/mpspec.h b/arch/x86/include/asm/mpspec.h index 29994f06c7e2..c82868e9f905 100644 --- a/arch/x86/include/asm/mpspec.h +++ b/arch/x86/include/asm/mpspec.h @@ -105,12 +105,6 @@ extern void mp_config_acpi_legacy_irqs(void); struct device; extern int mp_register_gsi(struct device *dev, u32 gsi, int edge_level, int active_high_low); -extern int acpi_probe_gsi(void); -#else /* !CONFIG_ACPI: */ -static inline int acpi_probe_gsi(void) -{ - return 0; -} #endif /* CONFIG_ACPI */ #define PHYSID_ARRAY_SIZE BITS_TO_LONGS(MAX_APICS) diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 3ee92f28a4b2..07a63ce5811a 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -876,29 +876,6 @@ static int __init acpi_parse_madt_lapic_entries(void) extern int es7000_plat; #endif -int __init acpi_probe_gsi(void) -{ - int idx; - int gsi; - int max_gsi = 0; - - if (acpi_disabled) - return 0; - - if (!acpi_ioapic) - return 0; - - max_gsi = 0; - for (idx = 0; idx < nr_ioapics; idx++) { - gsi = mp_gsi_routing[idx].gsi_end; - - if (gsi > max_gsi) - max_gsi = gsi; - } - - return max_gsi + 1; -} - static void assign_to_mp_irq(struct mpc_intsrc *m, struct mpc_intsrc *mp_irq) { diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 0d35f46929d1..9f3f6ca86dac 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -3850,22 +3850,11 @@ int __init io_apic_get_redir_entries (int ioapic) void __init probe_nr_irqs_gsi(void) { - int nr = 0; + int nr; - nr = acpi_probe_gsi(); - if (nr > nr_irqs_gsi) { + nr = gsi_end + 1; + if (nr > nr_irqs_gsi) nr_irqs_gsi = nr; - } else { - /* for acpi=off or acpi is not compiled in */ - int idx; - - nr = 0; - for (idx = 0; idx < nr_ioapics; idx++) - nr += io_apic_get_redir_entries(idx); - - if (nr > nr_irqs_gsi) - nr_irqs_gsi = nr; - } printk(KERN_DEBUG "nr_irqs_gsi: %d\n", nr_irqs_gsi); } -- cgit v1.2.3-59-g8ed1b From 7b20bd5fb902088579af4e70f7f802b93181a628 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Tue, 30 Mar 2010 01:07:16 -0700 Subject: x86, irq: Kill io_apic_renumber_irq Now that the generic irq layer is performing the exact same remapping as io_apic_renumber_irq we can kill this weird es7000 specific function. Signed-off-by: Eric W. Biederman LKML-Reference: <1269936436-7039-15-git-send-email-ebiederm@xmission.com> Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/io_apic.h | 1 - arch/x86/kernel/acpi/boot.c | 5 ----- arch/x86/kernel/apic/es7000_32.c | 19 ------------------- arch/x86/kernel/apic/io_apic.c | 6 ------ 4 files changed, 31 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/io_apic.h b/arch/x86/include/asm/io_apic.h index 37b0f2bb5034..9da192a17f0f 100644 --- a/arch/x86/include/asm/io_apic.h +++ b/arch/x86/include/asm/io_apic.h @@ -159,7 +159,6 @@ struct io_apic_irq_attr; extern int io_apic_set_pci_routing(struct device *dev, int irq, struct io_apic_irq_attr *irq_attr); void setup_IO_APIC_irq_extra(u32 gsi); -extern int (*ioapic_renumber_irq)(int ioapic, int irq); extern void ioapic_init_mappings(void); extern void ioapic_insert_resources(void); diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 325fbbab7f89..9a5ed58f09dc 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -1117,11 +1117,6 @@ int mp_register_gsi(struct device *dev, u32 gsi, int trigger, int polarity) ioapic_pin = mp_find_ioapic_pin(ioapic, gsi); -#ifdef CONFIG_X86_32 - if (ioapic_renumber_irq) - gsi = ioapic_renumber_irq(ioapic, gsi); -#endif - if (ioapic_pin > MP_MAX_IOAPIC_PIN) { printk(KERN_ERR "Invalid reference to IOAPIC pin " "%d-%d\n", mp_ioapics[ioapic].apicid, diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c index 03ba1b895f5e..425e53a87feb 100644 --- a/arch/x86/kernel/apic/es7000_32.c +++ b/arch/x86/kernel/apic/es7000_32.c @@ -131,24 +131,6 @@ int es7000_plat; static unsigned int base; -static int -es7000_rename_gsi(int ioapic, int gsi) -{ - if (es7000_plat == ES7000_ZORRO) - return gsi; - - if (!base) { - int i; - for (i = 0; i < nr_ioapics; i++) - base += nr_ioapic_registers[i]; - } - - if (!ioapic && (gsi < 16)) - gsi += base; - - return gsi; -} - static int __cpuinit wakeup_secondary_cpu_via_mip(int cpu, unsigned long eip) { unsigned long vect = 0, psaival = 0; @@ -190,7 +172,6 @@ static void setup_unisys(void) es7000_plat = ES7000_ZORRO; else es7000_plat = ES7000_CLASSIC; - ioapic_renumber_irq = es7000_rename_gsi; } /* diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 594827c3c615..d174d8866547 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -1016,7 +1016,6 @@ static inline int irq_trigger(int idx) return MPBIOS_trigger(idx); } -int (*ioapic_renumber_irq)(int ioapic, int irq); static int pin_2_irq(int idx, int apic, int pin) { int irq; @@ -1032,11 +1031,6 @@ static int pin_2_irq(int idx, int apic, int pin) irq = mp_irqs[idx].srcbusirq; } else { u32 gsi = mp_gsi_routing[apic].gsi_base + pin; - /* - * For MPS mode, so far only needed by ES7000 platform - */ - if (ioapic_renumber_irq) - gsi = ioapic_renumber_irq(apic, gsi); if (gsi >= NR_IRQS_LEGACY) irq = gsi; -- cgit v1.2.3-59-g8ed1b From 4f47b4c9f0b711bf84adb8c27774ae80d346b628 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Wed, 5 May 2010 13:22:25 -0700 Subject: x86, acpi/irq: Define gsi_end when X86_IO_APIC is undefined My recent changes introducing a global gsi_end variable failed to take into account the case of using acpi on a system not built to support IO_APICs, causing the build to fail. Define gsi_end to 15 when CONFIG_X86_IO_APIC is not set to avoid compile errors. Signed-off-by: Eric W. Biederman Cc: Yinghai Lu LKML-Reference: Signed-off-by: Ingo Molnar --- arch/x86/include/asm/io_apic.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/io_apic.h b/arch/x86/include/asm/io_apic.h index 9da192a17f0f..63cb4096c3dc 100644 --- a/arch/x86/include/asm/io_apic.h +++ b/arch/x86/include/asm/io_apic.h @@ -197,6 +197,7 @@ static const int timer_through_8259 = 0; static inline void ioapic_init_mappings(void) { } static inline void ioapic_insert_resources(void) { } static inline void probe_nr_irqs_gsi(void) { } +#define gsi_end (NR_IRQS_LEGACY - 1) static inline int mp_find_ioapic(u32 gsi) { return 0; } struct io_apic_irq_attr; -- cgit v1.2.3-59-g8ed1b From a2a47c6c3d1a7c01da4464b3b7be93b924f874c1 Mon Sep 17 00:00:00 2001 From: Ky Srinivasan Date: Thu, 6 May 2010 12:08:41 -0700 Subject: x86: Detect running on a Microsoft HyperV system This patch integrates HyperV detection within the framework currently used by VmWare. With this patch, we can avoid having to replicate the HyperV detection code in each of the Microsoft HyperV drivers. Reworked and tweaked by Greg K-H to build properly. Signed-off-by: K. Y. Srinivasan LKML-Reference: <20100506190841.GA1605@kroah.com> Cc: Thomas Gleixner Cc: Ingo Molnar Cc: "H. Peter Anvin" Cc: Vadim Rozenfeld Cc: Avi Kivity Cc: Gleb Natapov Cc: Frederic Weisbecker Cc: Alexey Dobriyan Cc: "K.Prasad" Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Paul Mackerras Cc: Alan Cox Cc: Haiyang Zhang Cc: Hank Janssen Signed-off-by: Greg Kroah-Hartman Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/hyperv.h | 6 ++++ arch/x86/include/asm/mshyperv.h | 7 +++++ arch/x86/include/asm/processor.h | 3 ++ arch/x86/kernel/cpu/Makefile | 2 +- arch/x86/kernel/cpu/hypervisor.c | 10 ++++-- arch/x86/kernel/cpu/mshyperv.c | 67 ++++++++++++++++++++++++++++++++++++++++ 6 files changed, 91 insertions(+), 4 deletions(-) create mode 100644 arch/x86/include/asm/mshyperv.h create mode 100644 arch/x86/kernel/cpu/mshyperv.c (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/hyperv.h b/arch/x86/include/asm/hyperv.h index e153a2b3889a..46040473e122 100644 --- a/arch/x86/include/asm/hyperv.h +++ b/arch/x86/include/asm/hyperv.h @@ -14,6 +14,9 @@ #define HYPERV_CPUID_ENLIGHTMENT_INFO 0x40000004 #define HYPERV_CPUID_IMPLEMENT_LIMITS 0x40000005 +#define HYPERV_HYPERVISOR_PRESENT_BIT 0x80000000 +#define HYPERV_CPUID_MIN 0x40000005 + /* * Feature identification. EAX indicates which features are available * to the partition based upon the current partition privileges. @@ -129,6 +132,9 @@ /* MSR used to provide vcpu index */ #define HV_X64_MSR_VP_INDEX 0x40000002 +/* MSR used to read the per-partition time reference counter */ +#define HV_X64_MSR_TIME_REF_COUNT 0x40000020 + /* Define the virtual APIC registers */ #define HV_X64_MSR_EOI 0x40000070 #define HV_X64_MSR_ICR 0x40000071 diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h new file mode 100644 index 000000000000..6cd8101d1344 --- /dev/null +++ b/arch/x86/include/asm/mshyperv.h @@ -0,0 +1,7 @@ +#ifndef ASM_X86__MSHYPER_H +#define ASM_X86__MSHYPER_H + +int ms_hyperv_platform(void); +void __cpuinit ms_hyperv_set_feature_bits(struct cpuinfo_x86 *c); + +#endif diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index b753ea59703a..597c041bd124 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -114,6 +114,8 @@ struct cpuinfo_x86 { u16 cpu_index; #endif unsigned int x86_hyper_vendor; + /* The layout of this field is hypervisor specific */ + unsigned int x86_hyper_features; } __attribute__((__aligned__(SMP_CACHE_BYTES))); #define X86_VENDOR_INTEL 0 @@ -129,6 +131,7 @@ struct cpuinfo_x86 { #define X86_HYPER_VENDOR_NONE 0 #define X86_HYPER_VENDOR_VMWARE 1 +#define X86_HYPER_VENDOR_MSFT 2 /* * capabilities of CPUs diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index c202b62f3671..3a785da34b6f 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile @@ -14,7 +14,7 @@ CFLAGS_common.o := $(nostackp) obj-y := intel_cacheinfo.o addon_cpuid_features.o obj-y += proc.o capflags.o powerflags.o common.o -obj-y += vmware.o hypervisor.o sched.o +obj-y += vmware.o hypervisor.o sched.o mshyperv.o obj-$(CONFIG_X86_32) += bugs.o cmpxchg.o obj-$(CONFIG_X86_64) += bugs_64.o diff --git a/arch/x86/kernel/cpu/hypervisor.c b/arch/x86/kernel/cpu/hypervisor.c index 08be922de33a..de3f4e0ce8eb 100644 --- a/arch/x86/kernel/cpu/hypervisor.c +++ b/arch/x86/kernel/cpu/hypervisor.c @@ -23,6 +23,7 @@ #include #include +#include #include static inline void __cpuinit @@ -30,6 +31,8 @@ detect_hypervisor_vendor(struct cpuinfo_x86 *c) { if (vmware_platform()) c->x86_hyper_vendor = X86_HYPER_VENDOR_VMWARE; + else if (ms_hyperv_platform()) + c->x86_hyper_vendor = X86_HYPER_VENDOR_MSFT; else c->x86_hyper_vendor = X86_HYPER_VENDOR_NONE; } @@ -37,10 +40,11 @@ detect_hypervisor_vendor(struct cpuinfo_x86 *c) static inline void __cpuinit hypervisor_set_feature_bits(struct cpuinfo_x86 *c) { - if (boot_cpu_data.x86_hyper_vendor == X86_HYPER_VENDOR_VMWARE) { + if (boot_cpu_data.x86_hyper_vendor == X86_HYPER_VENDOR_VMWARE) vmware_set_feature_bits(c); - return; - } + else if (boot_cpu_data.x86_hyper_vendor == X86_HYPER_VENDOR_MSFT) + ms_hyperv_set_feature_bits(c); + return; } void __cpuinit init_hypervisor(struct cpuinfo_x86 *c) diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c new file mode 100644 index 000000000000..2443b61cdb17 --- /dev/null +++ b/arch/x86/kernel/cpu/mshyperv.c @@ -0,0 +1,67 @@ +/* + * HyperV Detection code. + * + * Copyright (C) 2010, Novell, Inc. + * Author : K. Y. Srinivasan + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + */ + +#include +#include +#include +#include + + +int ms_hyperv_platform(void) +{ + u32 eax, ebx, ecx, edx; + char hyp_signature[13]; + + cpuid(1, &eax, &ebx, &ecx, &edx); + if (!(ecx & HYPERV_HYPERVISOR_PRESENT_BIT)) + return 0; + + cpuid(HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS, &eax, &ebx, &ecx, &edx); + *(u32 *)(hyp_signature + 0) = ebx; + *(u32 *)(hyp_signature + 4) = ecx; + *(u32 *)(hyp_signature + 8) = edx; + + if ((eax < HYPERV_CPUID_MIN) || (memcmp("Microsoft Hv", hyp_signature, 12))) + return 0; + return 1; +} + +void __cpuinit ms_hyperv_set_feature_bits(struct cpuinfo_x86 *c) +{ + u32 eax, ebx, ecx, edx; + + c->x86_hyper_features = 0; + /* + * Extract the features, recommendations etc. + * The first 9 bits will be used to track hypervisor features. + * The next 6 bits will be used to track the hypervisor + * recommendations. + */ + cpuid(HYPERV_CPUID_FEATURES, &eax, &ebx, &ecx, &edx); + c->x86_hyper_features |= (eax & 0x1ff); + + cpuid(HYPERV_CPUID_ENLIGHTMENT_INFO, &eax, &ebx, &ecx, &edx); + c->x86_hyper_features |= ((eax & 0x3f) << 9); + printk(KERN_INFO "Detected HyperV with features: %x\n", + c->x86_hyper_features); +} -- cgit v1.2.3-59-g8ed1b From e08cae4181af9483b04ecfac48f01c8e5a5f27bf Mon Sep 17 00:00:00 2001 From: "H. Peter Anvin" Date: Fri, 7 May 2010 16:57:28 -0700 Subject: x86: Clean up the hypervisor layer Clean up the hypervisor layer and the hypervisor drivers, using an ops structure instead of an enumeration with if statements. The identity of the hypervisor, if needed, can be tested by testing the pointer value in x86_hyper. The MS-HyperV private state is moved into a normal global variable (it's per-system state, not per-CPU state). Being a normal bss variable, it will be left at all zero on non-HyperV platforms, and so can generally be tested for HyperV-specific features without additional qualification. Signed-off-by: H. Peter Anvin Acked-by: Greg KH Cc: Hank Janssen Cc: Alok Kataria Cc: Ky Srinivasan LKML-Reference: <4BE49778.6060800@zytor.com> --- arch/x86/include/asm/hyperv.h | 5 ++-- arch/x86/include/asm/hypervisor.h | 27 +++++++++++++++++-- arch/x86/include/asm/mshyperv.h | 15 ++++++++--- arch/x86/include/asm/processor.h | 7 ----- arch/x86/include/asm/vmware.h | 27 ------------------- arch/x86/kernel/cpu/hypervisor.c | 56 ++++++++++++++++++++++++--------------- arch/x86/kernel/cpu/mshyperv.c | 51 +++++++++++++++++------------------ arch/x86/kernel/cpu/vmware.c | 36 ++++++++++++++----------- 8 files changed, 117 insertions(+), 107 deletions(-) delete mode 100644 arch/x86/include/asm/vmware.h (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/hyperv.h b/arch/x86/include/asm/hyperv.h index 46040473e122..5df477ac3af7 100644 --- a/arch/x86/include/asm/hyperv.h +++ b/arch/x86/include/asm/hyperv.h @@ -1,5 +1,5 @@ -#ifndef _ASM_X86_KVM_HYPERV_H -#define _ASM_X86_KVM_HYPERV_H +#ifndef _ASM_X86_HYPERV_H +#define _ASM_X86_HYPERV_H #include @@ -16,6 +16,7 @@ #define HYPERV_HYPERVISOR_PRESENT_BIT 0x80000000 #define HYPERV_CPUID_MIN 0x40000005 +#define HYPERV_CPUID_MAX 0x4000ffff /* * Feature identification. EAX indicates which features are available diff --git a/arch/x86/include/asm/hypervisor.h b/arch/x86/include/asm/hypervisor.h index b78c0941e422..70abda7058c8 100644 --- a/arch/x86/include/asm/hypervisor.h +++ b/arch/x86/include/asm/hypervisor.h @@ -17,10 +17,33 @@ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * */ -#ifndef ASM_X86__HYPERVISOR_H -#define ASM_X86__HYPERVISOR_H +#ifndef _ASM_X86_HYPERVISOR_H +#define _ASM_X86_HYPERVISOR_H extern void init_hypervisor(struct cpuinfo_x86 *c); extern void init_hypervisor_platform(void); +/* + * x86 hypervisor information + */ +struct hypervisor_x86 { + /* Hypervisor name */ + const char *name; + + /* Detection routine */ + bool (*detect)(void); + + /* Adjust CPU feature bits (run once per CPU) */ + void (*set_cpu_features)(struct cpuinfo_x86 *); + + /* Platform setup (run once per boot) */ + void (*init_platform)(void); +}; + +extern const struct hypervisor_x86 *x86_hyper; + +/* Recognized hypervisors */ +extern const struct hypervisor_x86 x86_hyper_vmware; +extern const struct hypervisor_x86 x86_hyper_ms_hyperv; + #endif diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h index 6cd8101d1344..79ce5685ab64 100644 --- a/arch/x86/include/asm/mshyperv.h +++ b/arch/x86/include/asm/mshyperv.h @@ -1,7 +1,14 @@ -#ifndef ASM_X86__MSHYPER_H -#define ASM_X86__MSHYPER_H +#ifndef _ASM_X86_MSHYPER_H +#define _ASM_X86_MSHYPER_H -int ms_hyperv_platform(void); -void __cpuinit ms_hyperv_set_feature_bits(struct cpuinfo_x86 *c); +#include +#include + +struct ms_hyperv_info { + u32 features; + u32 hints; +}; + +extern struct ms_hyperv_info ms_hyperv; #endif diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 597c041bd124..e4f1dfb2d05b 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -113,9 +113,6 @@ struct cpuinfo_x86 { /* Index into per_cpu list: */ u16 cpu_index; #endif - unsigned int x86_hyper_vendor; - /* The layout of this field is hypervisor specific */ - unsigned int x86_hyper_features; } __attribute__((__aligned__(SMP_CACHE_BYTES))); #define X86_VENDOR_INTEL 0 @@ -129,10 +126,6 @@ struct cpuinfo_x86 { #define X86_VENDOR_UNKNOWN 0xff -#define X86_HYPER_VENDOR_NONE 0 -#define X86_HYPER_VENDOR_VMWARE 1 -#define X86_HYPER_VENDOR_MSFT 2 - /* * capabilities of CPUs */ diff --git a/arch/x86/include/asm/vmware.h b/arch/x86/include/asm/vmware.h deleted file mode 100644 index e49ed6d2fd4e..000000000000 --- a/arch/x86/include/asm/vmware.h +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Copyright (C) 2008, VMware, Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or - * NON INFRINGEMENT. See the GNU General Public License for more - * details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - */ -#ifndef ASM_X86__VMWARE_H -#define ASM_X86__VMWARE_H - -extern void vmware_platform_setup(void); -extern int vmware_platform(void); -extern void vmware_set_feature_bits(struct cpuinfo_x86 *c); - -#endif diff --git a/arch/x86/kernel/cpu/hypervisor.c b/arch/x86/kernel/cpu/hypervisor.c index de3f4e0ce8eb..87381759d3cb 100644 --- a/arch/x86/kernel/cpu/hypervisor.c +++ b/arch/x86/kernel/cpu/hypervisor.c @@ -22,40 +22,52 @@ */ #include -#include -#include #include -static inline void __cpuinit -detect_hypervisor_vendor(struct cpuinfo_x86 *c) +/* + * Hypervisor detect order. This is specified explicitly here because + * some hypervisors might implement compatibility modes for other + * hypervisors and therefore need to be detected in specific sequence. + */ +static const __initconst struct hypervisor_x86 * const hypervisors[] = { - if (vmware_platform()) - c->x86_hyper_vendor = X86_HYPER_VENDOR_VMWARE; - else if (ms_hyperv_platform()) - c->x86_hyper_vendor = X86_HYPER_VENDOR_MSFT; - else - c->x86_hyper_vendor = X86_HYPER_VENDOR_NONE; -} + &x86_hyper_vmware, + &x86_hyper_ms_hyperv, +}; -static inline void __cpuinit -hypervisor_set_feature_bits(struct cpuinfo_x86 *c) +const struct hypervisor_x86 *x86_hyper; + +static inline void __init +detect_hypervisor_vendor(void) { - if (boot_cpu_data.x86_hyper_vendor == X86_HYPER_VENDOR_VMWARE) - vmware_set_feature_bits(c); - else if (boot_cpu_data.x86_hyper_vendor == X86_HYPER_VENDOR_MSFT) - ms_hyperv_set_feature_bits(c); - return; + const struct hypervisor_x86 *h, * const *p; + + for (p = hypervisors; p < hypervisors + ARRAY_SIZE(hypervisors); p++) { + h = *p; + if (h->detect()) { + x86_hyper = h; + printk(KERN_INFO "Hypervisor detected: %s\n", h->name); + break; + } + } } void __cpuinit init_hypervisor(struct cpuinfo_x86 *c) { - detect_hypervisor_vendor(c); - hypervisor_set_feature_bits(c); + if (x86_hyper && x86_hyper->set_cpu_features) + x86_hyper->set_cpu_features(c); } void __init init_hypervisor_platform(void) { + + detect_hypervisor_vendor(); + + if (!x86_hyper) + return; + init_hypervisor(&boot_cpu_data); - if (boot_cpu_data.x86_hyper_vendor == X86_HYPER_VENDOR_VMWARE) - vmware_platform_setup(); + + if (x86_hyper->init_platform) + x86_hyper->init_platform(); } diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c index a58d8e64fc7c..5969c3ee3186 100644 --- a/arch/x86/kernel/cpu/mshyperv.c +++ b/arch/x86/kernel/cpu/mshyperv.c @@ -12,45 +12,42 @@ #include #include +#include #include #include +struct ms_hyperv_info ms_hyperv; -int ms_hyperv_platform(void) +static bool __init ms_hyperv_platform(void) { - u32 eax, ebx, ecx, edx; - char hyp_signature[13]; + u32 eax; + u32 hyp_signature[3]; - cpuid(1, &eax, &ebx, &ecx, &edx); - if (!(ecx & HYPERV_HYPERVISOR_PRESENT_BIT)) - return 0; + if (!boot_cpu_has(X86_FEATURE_HYPERVISOR)) + return false; - cpuid(HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS, &eax, &ebx, &ecx, &edx); - *(u32 *)(hyp_signature + 0) = ebx; - *(u32 *)(hyp_signature + 4) = ecx; - *(u32 *)(hyp_signature + 8) = edx; + cpuid(HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS, + &eax, &hyp_signature[0], &hyp_signature[1], &hyp_signature[2]); - if ((eax < HYPERV_CPUID_MIN) || (memcmp("Microsoft Hv", hyp_signature, 12))) - return 0; - return 1; + return eax >= HYPERV_CPUID_MIN && + eax <= HYPERV_CPUID_MAX && + !memcmp("Microsoft Hv", hyp_signature, 12); } -void __cpuinit ms_hyperv_set_feature_bits(struct cpuinfo_x86 *c) +static void __init ms_hyperv_init_platform(void) { - u32 eax, ebx, ecx, edx; - - c->x86_hyper_features = 0; /* - * Extract the features, recommendations etc. - * The first 9 bits will be used to track hypervisor features. - * The next 6 bits will be used to track the hypervisor - * recommendations. + * Extract the features and hints */ - cpuid(HYPERV_CPUID_FEATURES, &eax, &ebx, &ecx, &edx); - c->x86_hyper_features |= (eax & 0x1ff); + ms_hyperv.features = cpuid_eax(HYPERV_CPUID_FEATURES); + ms_hyperv.hints = cpuid_eax(HYPERV_CPUID_ENLIGHTMENT_INFO); - cpuid(HYPERV_CPUID_ENLIGHTMENT_INFO, &eax, &ebx, &ecx, &edx); - c->x86_hyper_features |= ((eax & 0x3f) << 9); - printk(KERN_INFO "Detected HyperV with features: %x\n", - c->x86_hyper_features); + printk(KERN_INFO "HyperV: features 0x%x, hints 0x%x\n", + ms_hyperv.features, ms_hyperv.hints); } + +const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = { + .name = "Microsoft HyperV", + .detect = ms_hyperv_platform, + .init_platform = ms_hyperv_init_platform, +}; diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c index 1cbed97b59cf..46a5b5d3ba5e 100644 --- a/arch/x86/kernel/cpu/vmware.c +++ b/arch/x86/kernel/cpu/vmware.c @@ -23,8 +23,8 @@ #include #include -#include #include +#include #define CPUID_VMWARE_INFO_LEAF 0x40000000 #define VMWARE_HYPERVISOR_MAGIC 0x564D5868 @@ -64,7 +64,7 @@ static unsigned long vmware_get_tsc_khz(void) return tsc_hz; } -void __init vmware_platform_setup(void) +static void __init vmware_platform_setup(void) { uint32_t eax, ebx, ecx, edx; @@ -82,24 +82,21 @@ void __init vmware_platform_setup(void) * serial key should be enough, as this will always have a VMware * specific string when running under VMware hypervisor. */ -int vmware_platform(void) +static bool __init vmware_platform(void) { if (cpu_has_hypervisor) { - unsigned int eax, ebx, ecx, edx; - char hyper_vendor_id[13]; - - cpuid(CPUID_VMWARE_INFO_LEAF, &eax, &ebx, &ecx, &edx); - memcpy(hyper_vendor_id + 0, &ebx, 4); - memcpy(hyper_vendor_id + 4, &ecx, 4); - memcpy(hyper_vendor_id + 8, &edx, 4); - hyper_vendor_id[12] = '\0'; - if (!strcmp(hyper_vendor_id, "VMwareVMware")) - return 1; + unsigned int eax; + unsigned int hyper_vendor_id[3]; + + cpuid(CPUID_VMWARE_INFO_LEAF, &eax, &hyper_vendor_id[0], + &hyper_vendor_id[1], &hyper_vendor_id[2]); + if (!memcmp(hyper_vendor_id, "VMwareVMware", 12)) + return true; } else if (dmi_available && dmi_name_in_serial("VMware") && __vmware_platform()) - return 1; + return true; - return 0; + return false; } /* @@ -114,8 +111,15 @@ int vmware_platform(void) * so that the kernel could just trust the hypervisor with providing a * reliable virtual TSC that is suitable for timekeeping. */ -void __cpuinit vmware_set_feature_bits(struct cpuinfo_x86 *c) +static void __cpuinit vmware_set_cpu_features(struct cpuinfo_x86 *c) { set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); set_cpu_cap(c, X86_FEATURE_TSC_RELIABLE); } + +const __refconst struct hypervisor_x86 x86_hyper_vmware = { + .name = "VMware", + .detect = vmware_platform, + .set_cpu_features = vmware_set_cpu_features, + .init_platform = vmware_platform_setup, +}; -- cgit v1.2.3-59-g8ed1b From c9ad488289144ae5ef53b012e15895ef1f5e4bb6 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Thu, 6 May 2010 11:45:45 +0300 Subject: x86: Eliminate TS_XSAVE The fpu code currently uses current->thread_info->status & TS_XSAVE as a way to distinguish between XSAVE capable processors and older processors. The decision is not really task specific; instead we use the task status to avoid a global memory reference - the value should be the same across all threads. Eliminate this tie-in into the task structure by using an alternative instruction keyed off the XSAVE cpu feature; this results in shorter and faster code, without introducing a global memory reference. [ hpa: in the future, this probably should use an asm jmp ] Signed-off-by: Avi Kivity Acked-by: Suresh Siddha LKML-Reference: <1273135546-29690-2-git-send-email-avi@redhat.com> Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/i387.h | 20 ++++++++++++++++---- arch/x86/include/asm/thread_info.h | 1 - arch/x86/kernel/cpu/common.c | 5 +---- arch/x86/kernel/i387.c | 5 +---- arch/x86/kernel/xsave.c | 6 +++--- 5 files changed, 21 insertions(+), 16 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h index da2930924501..a301a6825c3a 100644 --- a/arch/x86/include/asm/i387.h +++ b/arch/x86/include/asm/i387.h @@ -56,6 +56,18 @@ extern int restore_i387_xstate_ia32(void __user *buf); #define X87_FSW_ES (1 << 7) /* Exception Summary */ +static inline bool use_xsave(void) +{ + u8 has_xsave; + + alternative_io("mov $0, %0", + "mov $1, %0", + X86_FEATURE_XSAVE, + "=g"(has_xsave)); + + return has_xsave; +} + #ifdef CONFIG_X86_64 /* Ignore delayed exceptions from user space */ @@ -99,7 +111,7 @@ static inline void clear_fpu_state(struct task_struct *tsk) /* * xsave header may indicate the init state of the FP. */ - if ((task_thread_info(tsk)->status & TS_XSAVE) && + if (use_xsave() && !(xstate->xsave_hdr.xstate_bv & XSTATE_FP)) return; @@ -164,7 +176,7 @@ static inline void fxsave(struct task_struct *tsk) static inline void __save_init_fpu(struct task_struct *tsk) { - if (task_thread_info(tsk)->status & TS_XSAVE) + if (use_xsave()) xsave(tsk); else fxsave(tsk); @@ -218,7 +230,7 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx) */ static inline void __save_init_fpu(struct task_struct *tsk) { - if (task_thread_info(tsk)->status & TS_XSAVE) { + if (use_xsave()) { struct xsave_struct *xstate = &tsk->thread.xstate->xsave; struct i387_fxsave_struct *fx = &tsk->thread.xstate->fxsave; @@ -266,7 +278,7 @@ end: static inline int restore_fpu_checking(struct task_struct *tsk) { - if (task_thread_info(tsk)->status & TS_XSAVE) + if (use_xsave()) return xrstor_checking(&tsk->thread.xstate->xsave); else return fxrstor_checking(&tsk->thread.xstate->fxsave); diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index e0d28901e969..e9e341505ab3 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h @@ -244,7 +244,6 @@ static inline struct thread_info *current_thread_info(void) #define TS_POLLING 0x0004 /* true if in idle loop and not sleeping */ #define TS_RESTORE_SIGMASK 0x0008 /* restore signal mask in do_signal() */ -#define TS_XSAVE 0x0010 /* Use xsave/xrstor */ #define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING) diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 4868e4a951ee..c1c00d0b1692 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1243,10 +1243,7 @@ void __cpuinit cpu_init(void) /* * Force FPU initialization: */ - if (cpu_has_xsave) - current_thread_info()->status = TS_XSAVE; - else - current_thread_info()->status = 0; + current_thread_info()->status = 0; clear_used_math(); mxcsr_feature_mask_init(); diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c index 54c31c285488..14ca1dc7a703 100644 --- a/arch/x86/kernel/i387.c +++ b/arch/x86/kernel/i387.c @@ -102,10 +102,7 @@ void __cpuinit fpu_init(void) mxcsr_feature_mask_init(); /* clean state in init */ - if (cpu_has_xsave) - current_thread_info()->status = TS_XSAVE; - else - current_thread_info()->status = 0; + current_thread_info()->status = 0; clear_used_math(); } #endif /* CONFIG_X86_64 */ diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c index 782c3a362ec6..c1b0a11033a2 100644 --- a/arch/x86/kernel/xsave.c +++ b/arch/x86/kernel/xsave.c @@ -99,7 +99,7 @@ int save_i387_xstate(void __user *buf) if (err) return err; - if (task_thread_info(tsk)->status & TS_XSAVE) + if (use_xsave()) err = xsave_user(buf); else err = fxsave_user(buf); @@ -116,7 +116,7 @@ int save_i387_xstate(void __user *buf) clear_used_math(); /* trigger finit */ - if (task_thread_info(tsk)->status & TS_XSAVE) { + if (use_xsave()) { struct _fpstate __user *fx = buf; struct _xstate __user *x = buf; u64 xstate_bv; @@ -225,7 +225,7 @@ int restore_i387_xstate(void __user *buf) clts(); task_thread_info(current)->status |= TS_USEDFPU; } - if (task_thread_info(tsk)->status & TS_XSAVE) + if (use_xsave()) err = restore_user_xstate(buf); else err = fxrstor_checking((__force struct i387_fxsave_struct *) -- cgit v1.2.3-59-g8ed1b From 86603283326c9e95e5ad4e9fdddeec93cac5d9ad Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Thu, 6 May 2010 11:45:46 +0300 Subject: x86: Introduce 'struct fpu' and related API Currently all fpu state access is through tsk->thread.xstate. Since we wish to generalize fpu access to non-task contexts, wrap the state in a new 'struct fpu' and convert existing access to use an fpu API. Signal frame handlers are not converted to the API since they will remain task context only things. Signed-off-by: Avi Kivity Acked-by: Suresh Siddha LKML-Reference: <1273135546-29690-3-git-send-email-avi@redhat.com> Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/i387.h | 115 +++++++++++++++++++++++++++++---------- arch/x86/include/asm/processor.h | 6 +- arch/x86/include/asm/xsave.h | 7 ++- arch/x86/kernel/i387.c | 102 +++++++++++++++++----------------- arch/x86/kernel/process.c | 21 +++---- arch/x86/kernel/process_32.c | 2 +- arch/x86/kernel/process_64.c | 2 +- arch/x86/kernel/xsave.c | 2 +- arch/x86/math-emu/fpu_aux.c | 6 +- 9 files changed, 160 insertions(+), 103 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h index a301a6825c3a..1a8cca33b736 100644 --- a/arch/x86/include/asm/i387.h +++ b/arch/x86/include/asm/i387.h @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include @@ -103,10 +104,10 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx) values. The kernel data segment can be sometimes 0 and sometimes new user value. Both should be ok. Use the PDA as safe address because it should be already in L1. */ -static inline void clear_fpu_state(struct task_struct *tsk) +static inline void fpu_clear(struct fpu *fpu) { - struct xsave_struct *xstate = &tsk->thread.xstate->xsave; - struct i387_fxsave_struct *fx = &tsk->thread.xstate->fxsave; + struct xsave_struct *xstate = &fpu->state->xsave; + struct i387_fxsave_struct *fx = &fpu->state->fxsave; /* * xsave header may indicate the init state of the FP. @@ -123,6 +124,11 @@ static inline void clear_fpu_state(struct task_struct *tsk) X86_FEATURE_FXSAVE_LEAK); } +static inline void clear_fpu_state(struct task_struct *tsk) +{ + fpu_clear(&tsk->thread.fpu); +} + static inline int fxsave_user(struct i387_fxsave_struct __user *fx) { int err; @@ -147,7 +153,7 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx) return err; } -static inline void fxsave(struct task_struct *tsk) +static inline void fpu_fxsave(struct fpu *fpu) { /* Using "rex64; fxsave %0" is broken because, if the memory operand uses any extended registers for addressing, a second REX prefix @@ -157,42 +163,45 @@ static inline void fxsave(struct task_struct *tsk) /* Using "fxsaveq %0" would be the ideal choice, but is only supported starting with gas 2.16. */ __asm__ __volatile__("fxsaveq %0" - : "=m" (tsk->thread.xstate->fxsave)); + : "=m" (fpu->state->fxsave)); #elif 0 /* Using, as a workaround, the properly prefixed form below isn't accepted by any binutils version so far released, complaining that the same type of prefix is used twice if an extended register is needed for addressing (fix submitted to mainline 2005-11-21). */ __asm__ __volatile__("rex64/fxsave %0" - : "=m" (tsk->thread.xstate->fxsave)); + : "=m" (fpu->state->fxsave)); #else /* This, however, we can work around by forcing the compiler to select an addressing mode that doesn't require extended registers. */ __asm__ __volatile__("rex64/fxsave (%1)" - : "=m" (tsk->thread.xstate->fxsave) - : "cdaSDb" (&tsk->thread.xstate->fxsave)); + : "=m" (fpu->state->fxsave) + : "cdaSDb" (&fpu->state->fxsave)); #endif } -static inline void __save_init_fpu(struct task_struct *tsk) +static inline void fpu_save_init(struct fpu *fpu) { if (use_xsave()) - xsave(tsk); + fpu_xsave(fpu); else - fxsave(tsk); + fpu_fxsave(fpu); - clear_fpu_state(tsk); + fpu_clear(fpu); +} + +static inline void __save_init_fpu(struct task_struct *tsk) +{ + fpu_save_init(&tsk->thread.fpu); task_thread_info(tsk)->status &= ~TS_USEDFPU; } #else /* CONFIG_X86_32 */ #ifdef CONFIG_MATH_EMULATION -extern void finit_task(struct task_struct *tsk); +extern void finit_soft_fpu(struct i387_soft_struct *soft); #else -static inline void finit_task(struct task_struct *tsk) -{ -} +static inline void finit_soft_fpu(struct i387_soft_struct *soft) {} #endif static inline void tolerant_fwait(void) @@ -228,13 +237,13 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx) /* * These must be called with preempt disabled */ -static inline void __save_init_fpu(struct task_struct *tsk) +static inline void fpu_save_init(struct fpu *fpu) { if (use_xsave()) { - struct xsave_struct *xstate = &tsk->thread.xstate->xsave; - struct i387_fxsave_struct *fx = &tsk->thread.xstate->fxsave; + struct xsave_struct *xstate = &fpu->state->xsave; + struct i387_fxsave_struct *fx = &fpu->state->fxsave; - xsave(tsk); + fpu_xsave(fpu); /* * xsave header may indicate the init state of the FP. @@ -258,8 +267,8 @@ static inline void __save_init_fpu(struct task_struct *tsk) "fxsave %[fx]\n" "bt $7,%[fsw] ; jnc 1f ; fnclex\n1:", X86_FEATURE_FXSR, - [fx] "m" (tsk->thread.xstate->fxsave), - [fsw] "m" (tsk->thread.xstate->fxsave.swd) : "memory"); + [fx] "m" (fpu->state->fxsave), + [fsw] "m" (fpu->state->fxsave.swd) : "memory"); clear_state: /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception is pending. Clear the x87 state here by setting it to fixed @@ -271,17 +280,34 @@ clear_state: X86_FEATURE_FXSAVE_LEAK, [addr] "m" (safe_address)); end: + ; +} + +static inline void __save_init_fpu(struct task_struct *tsk) +{ + fpu_save_init(&tsk->thread.fpu); task_thread_info(tsk)->status &= ~TS_USEDFPU; } + #endif /* CONFIG_X86_64 */ -static inline int restore_fpu_checking(struct task_struct *tsk) +static inline int fpu_fxrstor_checking(struct fpu *fpu) +{ + return fxrstor_checking(&fpu->state->fxsave); +} + +static inline int fpu_restore_checking(struct fpu *fpu) { if (use_xsave()) - return xrstor_checking(&tsk->thread.xstate->xsave); + return fpu_xrstor_checking(fpu); else - return fxrstor_checking(&tsk->thread.xstate->fxsave); + return fpu_fxrstor_checking(fpu); +} + +static inline int restore_fpu_checking(struct task_struct *tsk) +{ + return fpu_restore_checking(&tsk->thread.fpu); } /* @@ -409,30 +435,59 @@ static inline void clear_fpu(struct task_struct *tsk) static inline unsigned short get_fpu_cwd(struct task_struct *tsk) { if (cpu_has_fxsr) { - return tsk->thread.xstate->fxsave.cwd; + return tsk->thread.fpu.state->fxsave.cwd; } else { - return (unsigned short)tsk->thread.xstate->fsave.cwd; + return (unsigned short)tsk->thread.fpu.state->fsave.cwd; } } static inline unsigned short get_fpu_swd(struct task_struct *tsk) { if (cpu_has_fxsr) { - return tsk->thread.xstate->fxsave.swd; + return tsk->thread.fpu.state->fxsave.swd; } else { - return (unsigned short)tsk->thread.xstate->fsave.swd; + return (unsigned short)tsk->thread.fpu.state->fsave.swd; } } static inline unsigned short get_fpu_mxcsr(struct task_struct *tsk) { if (cpu_has_xmm) { - return tsk->thread.xstate->fxsave.mxcsr; + return tsk->thread.fpu.state->fxsave.mxcsr; } else { return MXCSR_DEFAULT; } } +static bool fpu_allocated(struct fpu *fpu) +{ + return fpu->state != NULL; +} + +static inline int fpu_alloc(struct fpu *fpu) +{ + if (fpu_allocated(fpu)) + return 0; + fpu->state = kmem_cache_alloc(task_xstate_cachep, GFP_KERNEL); + if (!fpu->state) + return -ENOMEM; + WARN_ON((unsigned long)fpu->state & 15); + return 0; +} + +static inline void fpu_free(struct fpu *fpu) +{ + if (fpu->state) { + kmem_cache_free(task_xstate_cachep, fpu->state); + fpu->state = NULL; + } +} + +static inline void fpu_copy(struct fpu *dst, struct fpu *src) +{ + memcpy(dst->state, src->state, xstate_size); +} + #endif /* __ASSEMBLY__ */ #define PSHUFB_XMM5_XMM0 .byte 0x66, 0x0f, 0x38, 0x00, 0xc5 diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index b753ea59703a..b684f587647c 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -380,6 +380,10 @@ union thread_xstate { struct xsave_struct xsave; }; +struct fpu { + union thread_xstate *state; +}; + #ifdef CONFIG_X86_64 DECLARE_PER_CPU(struct orig_ist, orig_ist); @@ -457,7 +461,7 @@ struct thread_struct { unsigned long trap_no; unsigned long error_code; /* floating point and extended processor state */ - union thread_xstate *xstate; + struct fpu fpu; #ifdef CONFIG_X86_32 /* Virtual 86 mode info */ struct vm86_struct __user *vm86_info; diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h index ddc04ccad03b..2c4390cae228 100644 --- a/arch/x86/include/asm/xsave.h +++ b/arch/x86/include/asm/xsave.h @@ -37,8 +37,9 @@ extern int check_for_xstate(struct i387_fxsave_struct __user *buf, void __user *fpstate, struct _fpx_sw_bytes *sw); -static inline int xrstor_checking(struct xsave_struct *fx) +static inline int fpu_xrstor_checking(struct fpu *fpu) { + struct xsave_struct *fx = &fpu->state->xsave; int err; asm volatile("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n\t" @@ -110,12 +111,12 @@ static inline void xrstor_state(struct xsave_struct *fx, u64 mask) : "memory"); } -static inline void xsave(struct task_struct *tsk) +static inline void fpu_xsave(struct fpu *fpu) { /* This, however, we can work around by forcing the compiler to select an addressing mode that doesn't require extended registers. */ __asm__ __volatile__(".byte " REX_PREFIX "0x0f,0xae,0x27" - : : "D" (&(tsk->thread.xstate->xsave)), + : : "D" (&(fpu->state->xsave)), "a" (-1), "d"(-1) : "memory"); } #endif diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c index 14ca1dc7a703..86cef6b32253 100644 --- a/arch/x86/kernel/i387.c +++ b/arch/x86/kernel/i387.c @@ -107,57 +107,57 @@ void __cpuinit fpu_init(void) } #endif /* CONFIG_X86_64 */ -/* - * The _current_ task is using the FPU for the first time - * so initialize it and set the mxcsr to its default - * value at reset if we support XMM instructions and then - * remeber the current task has used the FPU. - */ -int init_fpu(struct task_struct *tsk) +static void fpu_finit(struct fpu *fpu) { - if (tsk_used_math(tsk)) { - if (HAVE_HWFP && tsk == current) - unlazy_fpu(tsk); - return 0; - } - - /* - * Memory allocation at the first usage of the FPU and other state. - */ - if (!tsk->thread.xstate) { - tsk->thread.xstate = kmem_cache_alloc(task_xstate_cachep, - GFP_KERNEL); - if (!tsk->thread.xstate) - return -ENOMEM; - } - #ifdef CONFIG_X86_32 if (!HAVE_HWFP) { - memset(tsk->thread.xstate, 0, xstate_size); - finit_task(tsk); - set_stopped_child_used_math(tsk); - return 0; + finit_soft_fpu(&fpu->state->soft); + return; } #endif if (cpu_has_fxsr) { - struct i387_fxsave_struct *fx = &tsk->thread.xstate->fxsave; + struct i387_fxsave_struct *fx = &fpu->state->fxsave; memset(fx, 0, xstate_size); fx->cwd = 0x37f; if (cpu_has_xmm) fx->mxcsr = MXCSR_DEFAULT; } else { - struct i387_fsave_struct *fp = &tsk->thread.xstate->fsave; + struct i387_fsave_struct *fp = &fpu->state->fsave; memset(fp, 0, xstate_size); fp->cwd = 0xffff037fu; fp->swd = 0xffff0000u; fp->twd = 0xffffffffu; fp->fos = 0xffff0000u; } +} + +/* + * The _current_ task is using the FPU for the first time + * so initialize it and set the mxcsr to its default + * value at reset if we support XMM instructions and then + * remeber the current task has used the FPU. + */ +int init_fpu(struct task_struct *tsk) +{ + int ret; + + if (tsk_used_math(tsk)) { + if (HAVE_HWFP && tsk == current) + unlazy_fpu(tsk); + return 0; + } + /* - * Only the device not available exception or ptrace can call init_fpu. + * Memory allocation at the first usage of the FPU and other state. */ + ret = fpu_alloc(&tsk->thread.fpu); + if (ret) + return ret; + + fpu_finit(&tsk->thread.fpu); + set_stopped_child_used_math(tsk); return 0; } @@ -191,7 +191,7 @@ int xfpregs_get(struct task_struct *target, const struct user_regset *regset, return ret; return user_regset_copyout(&pos, &count, &kbuf, &ubuf, - &target->thread.xstate->fxsave, 0, -1); + &target->thread.fpu.state->fxsave, 0, -1); } int xfpregs_set(struct task_struct *target, const struct user_regset *regset, @@ -208,19 +208,19 @@ int xfpregs_set(struct task_struct *target, const struct user_regset *regset, return ret; ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, - &target->thread.xstate->fxsave, 0, -1); + &target->thread.fpu.state->fxsave, 0, -1); /* * mxcsr reserved bits must be masked to zero for security reasons. */ - target->thread.xstate->fxsave.mxcsr &= mxcsr_feature_mask; + target->thread.fpu.state->fxsave.mxcsr &= mxcsr_feature_mask; /* * update the header bits in the xsave header, indicating the * presence of FP and SSE state. */ if (cpu_has_xsave) - target->thread.xstate->xsave.xsave_hdr.xstate_bv |= XSTATE_FPSSE; + target->thread.fpu.state->xsave.xsave_hdr.xstate_bv |= XSTATE_FPSSE; return ret; } @@ -243,14 +243,14 @@ int xstateregs_get(struct task_struct *target, const struct user_regset *regset, * memory layout in the thread struct, so that we can copy the entire * xstateregs to the user using one user_regset_copyout(). */ - memcpy(&target->thread.xstate->fxsave.sw_reserved, + memcpy(&target->thread.fpu.state->fxsave.sw_reserved, xstate_fx_sw_bytes, sizeof(xstate_fx_sw_bytes)); /* * Copy the xstate memory layout. */ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, - &target->thread.xstate->xsave, 0, -1); + &target->thread.fpu.state->xsave, 0, -1); return ret; } @@ -269,14 +269,14 @@ int xstateregs_set(struct task_struct *target, const struct user_regset *regset, return ret; ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, - &target->thread.xstate->xsave, 0, -1); + &target->thread.fpu.state->xsave, 0, -1); /* * mxcsr reserved bits must be masked to zero for security reasons. */ - target->thread.xstate->fxsave.mxcsr &= mxcsr_feature_mask; + target->thread.fpu.state->fxsave.mxcsr &= mxcsr_feature_mask; - xsave_hdr = &target->thread.xstate->xsave.xsave_hdr; + xsave_hdr = &target->thread.fpu.state->xsave.xsave_hdr; xsave_hdr->xstate_bv &= pcntxt_mask; /* @@ -362,7 +362,7 @@ static inline u32 twd_fxsr_to_i387(struct i387_fxsave_struct *fxsave) static void convert_from_fxsr(struct user_i387_ia32_struct *env, struct task_struct *tsk) { - struct i387_fxsave_struct *fxsave = &tsk->thread.xstate->fxsave; + struct i387_fxsave_struct *fxsave = &tsk->thread.fpu.state->fxsave; struct _fpreg *to = (struct _fpreg *) &env->st_space[0]; struct _fpxreg *from = (struct _fpxreg *) &fxsave->st_space[0]; int i; @@ -402,7 +402,7 @@ static void convert_to_fxsr(struct task_struct *tsk, const struct user_i387_ia32_struct *env) { - struct i387_fxsave_struct *fxsave = &tsk->thread.xstate->fxsave; + struct i387_fxsave_struct *fxsave = &tsk->thread.fpu.state->fxsave; struct _fpreg *from = (struct _fpreg *) &env->st_space[0]; struct _fpxreg *to = (struct _fpxreg *) &fxsave->st_space[0]; int i; @@ -442,7 +442,7 @@ int fpregs_get(struct task_struct *target, const struct user_regset *regset, if (!cpu_has_fxsr) { return user_regset_copyout(&pos, &count, &kbuf, &ubuf, - &target->thread.xstate->fsave, 0, + &target->thread.fpu.state->fsave, 0, -1); } @@ -472,7 +472,7 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset, if (!cpu_has_fxsr) { return user_regset_copyin(&pos, &count, &kbuf, &ubuf, - &target->thread.xstate->fsave, 0, -1); + &target->thread.fpu.state->fsave, 0, -1); } if (pos > 0 || count < sizeof(env)) @@ -487,7 +487,7 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset, * presence of FP. */ if (cpu_has_xsave) - target->thread.xstate->xsave.xsave_hdr.xstate_bv |= XSTATE_FP; + target->thread.fpu.state->xsave.xsave_hdr.xstate_bv |= XSTATE_FP; return ret; } @@ -498,7 +498,7 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset, static inline int save_i387_fsave(struct _fpstate_ia32 __user *buf) { struct task_struct *tsk = current; - struct i387_fsave_struct *fp = &tsk->thread.xstate->fsave; + struct i387_fsave_struct *fp = &tsk->thread.fpu.state->fsave; fp->status = fp->swd; if (__copy_to_user(buf, fp, sizeof(struct i387_fsave_struct))) @@ -509,7 +509,7 @@ static inline int save_i387_fsave(struct _fpstate_ia32 __user *buf) static int save_i387_fxsave(struct _fpstate_ia32 __user *buf) { struct task_struct *tsk = current; - struct i387_fxsave_struct *fx = &tsk->thread.xstate->fxsave; + struct i387_fxsave_struct *fx = &tsk->thread.fpu.state->fxsave; struct user_i387_ia32_struct env; int err = 0; @@ -544,7 +544,7 @@ static int save_i387_xsave(void __user *buf) * header as well as change any contents in the memory layout. * xrestore as part of sigreturn will capture all the changes. */ - tsk->thread.xstate->xsave.xsave_hdr.xstate_bv |= XSTATE_FPSSE; + tsk->thread.fpu.state->xsave.xsave_hdr.xstate_bv |= XSTATE_FPSSE; if (save_i387_fxsave(fx) < 0) return -1; @@ -596,7 +596,7 @@ static inline int restore_i387_fsave(struct _fpstate_ia32 __user *buf) { struct task_struct *tsk = current; - return __copy_from_user(&tsk->thread.xstate->fsave, buf, + return __copy_from_user(&tsk->thread.fpu.state->fsave, buf, sizeof(struct i387_fsave_struct)); } @@ -607,10 +607,10 @@ static int restore_i387_fxsave(struct _fpstate_ia32 __user *buf, struct user_i387_ia32_struct env; int err; - err = __copy_from_user(&tsk->thread.xstate->fxsave, &buf->_fxsr_env[0], + err = __copy_from_user(&tsk->thread.fpu.state->fxsave, &buf->_fxsr_env[0], size); /* mxcsr reserved bits must be masked to zero for security reasons */ - tsk->thread.xstate->fxsave.mxcsr &= mxcsr_feature_mask; + tsk->thread.fpu.state->fxsave.mxcsr &= mxcsr_feature_mask; if (err || __copy_from_user(&env, buf, sizeof(env))) return 1; convert_to_fxsr(tsk, &env); @@ -626,7 +626,7 @@ static int restore_i387_xsave(void __user *buf) struct i387_fxsave_struct __user *fx = (struct i387_fxsave_struct __user *) &fx_user->_fxsr_env[0]; struct xsave_hdr_struct *xsave_hdr = - ¤t->thread.xstate->xsave.xsave_hdr; + ¤t->thread.fpu.state->xsave.xsave_hdr; u64 mask; int err; diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 28ad9f4d8b94..f18fd9c15247 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -32,25 +32,22 @@ struct kmem_cache *task_xstate_cachep; int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) { + int ret; + *dst = *src; - if (src->thread.xstate) { - dst->thread.xstate = kmem_cache_alloc(task_xstate_cachep, - GFP_KERNEL); - if (!dst->thread.xstate) - return -ENOMEM; - WARN_ON((unsigned long)dst->thread.xstate & 15); - memcpy(dst->thread.xstate, src->thread.xstate, xstate_size); + if (fpu_allocated(&src->thread.fpu)) { + memset(&dst->thread.fpu, 0, sizeof(dst->thread.fpu)); + ret = fpu_alloc(&dst->thread.fpu); + if (ret) + return ret; + fpu_copy(&dst->thread.fpu, &src->thread.fpu); } return 0; } void free_thread_xstate(struct task_struct *tsk) { - if (tsk->thread.xstate) { - kmem_cache_free(task_xstate_cachep, tsk->thread.xstate); - tsk->thread.xstate = NULL; - } - + fpu_free(&tsk->thread.fpu); WARN(tsk->thread.ds_ctx, "leaking DS context\n"); } diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index f6c62667e30c..0a7a4f5bbaa9 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -317,7 +317,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) /* we're going to use this soon, after a few expensive things */ if (preload_fpu) - prefetch(next->xstate); + prefetch(next->fpu.state); /* * Reload esp0. diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 17cb3295cbf7..979215f51985 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -396,7 +396,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) /* we're going to use this soon, after a few expensive things */ if (preload_fpu) - prefetch(next->xstate); + prefetch(next->fpu.state); /* * Reload esp0, LDT and the page table pointer: diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c index c1b0a11033a2..37e68fc5e24a 100644 --- a/arch/x86/kernel/xsave.c +++ b/arch/x86/kernel/xsave.c @@ -109,7 +109,7 @@ int save_i387_xstate(void __user *buf) task_thread_info(tsk)->status &= ~TS_USEDFPU; stts(); } else { - if (__copy_to_user(buf, &tsk->thread.xstate->fxsave, + if (__copy_to_user(buf, &tsk->thread.fpu.state->fxsave, xstate_size)) return -1; } diff --git a/arch/x86/math-emu/fpu_aux.c b/arch/x86/math-emu/fpu_aux.c index aa0987088774..62797f930511 100644 --- a/arch/x86/math-emu/fpu_aux.c +++ b/arch/x86/math-emu/fpu_aux.c @@ -30,10 +30,10 @@ static void fclex(void) } /* Needs to be externally visible */ -void finit_task(struct task_struct *tsk) +void finit_soft_fpu(struct i387_soft_struct *soft) { - struct i387_soft_struct *soft = &tsk->thread.xstate->soft; struct address *oaddr, *iaddr; + memset(soft, 0, sizeof(*soft)); soft->cwd = 0x037f; soft->swd = 0; soft->ftop = 0; /* We don't keep top in the status word internally. */ @@ -52,7 +52,7 @@ void finit_task(struct task_struct *tsk) void finit(void) { - finit_task(current); + finit_task(¤t->thread.fpu); } /* -- cgit v1.2.3-59-g8ed1b From dce8bf4e115aa44d590802ce3554e926840c9042 Mon Sep 17 00:00:00 2001 From: "H. Peter Anvin" Date: Mon, 10 May 2010 13:41:41 -0700 Subject: x86, fpu: Use the proper asm constraint in use_xsave() The proper constraint for a receiving 8-bit variable is "=qm", not "=g" which equals "=rim"; even though the "i" will never match, bugs can and do happen due to the difference between "q" and "r". Signed-off-by: H. Peter Anvin Cc: Avi Kivity Cc: Suresh Siddha LKML-Reference: <1273135546-29690-2-git-send-email-avi@redhat.com> --- arch/x86/include/asm/i387.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h index 1a8cca33b736..8002e9ce25fc 100644 --- a/arch/x86/include/asm/i387.h +++ b/arch/x86/include/asm/i387.h @@ -64,7 +64,7 @@ static inline bool use_xsave(void) alternative_io("mov $0, %0", "mov $1, %0", X86_FEATURE_XSAVE, - "=g"(has_xsave)); + "=qm" (has_xsave)); return has_xsave; } -- cgit v1.2.3-59-g8ed1b From a3c8acd04376d604370dcb6cd2143c9c14078a50 Mon Sep 17 00:00:00 2001 From: "H. Peter Anvin" Date: Tue, 11 May 2010 17:47:07 -0700 Subject: x86: Add new static_cpu_has() function using alternatives For CPU-feature-specific code that touches performance-critical paths, introduce a static patching version of [boot_]cpu_has(). This is run at alternatives time and is therefore not appropriate for most initialization code, but on the other hand initialization code is generally not performance critical. On gcc 4.5+ this uses the new "asm goto" feature. Signed-off-by: H. Peter Anvin Cc: Avi Kivity Cc: Suresh Siddha LKML-Reference: <1273135546-29690-2-git-send-email-avi@redhat.com> --- arch/x86/include/asm/cpufeature.h | 57 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 57 insertions(+) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index 0cd82d068613..9b11a5cc6662 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h @@ -175,6 +175,7 @@ #if defined(__KERNEL__) && !defined(__ASSEMBLY__) +#include #include extern const char * const x86_cap_flags[NCAPINTS*32]; @@ -283,6 +284,62 @@ extern const char * const x86_power_flags[32]; #endif /* CONFIG_X86_64 */ +/* + * Static testing of CPU features. Used the same as boot_cpu_has(). + * These are only valid after alternatives have run, but will statically + * patch the target code for additional performance. + * + */ +static __always_inline __pure bool __static_cpu_has(u8 bit) +{ +#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5) + asm goto("1: jmp %l[t_no]\n" + "2:\n" + ".section .altinstructions,\"a\"\n" + _ASM_ALIGN "\n" + _ASM_PTR "1b\n" + _ASM_PTR "0\n" /* no replacement */ + " .byte %P0\n" /* feature bit */ + " .byte 2b - 1b\n" /* source len */ + " .byte 0\n" /* replacement len */ + " .byte 0xff + 0 - (2b-1b)\n" /* padding */ + ".previous\n" + : : "i" (bit) : : t_no); + return true; + t_no: + return false; +#else + u8 flag; + /* Open-coded due to __stringify() in ALTERNATIVE() */ + asm volatile("1: movb $0,%0\n" + "2:\n" + ".section .altinstructions,\"a\"\n" + _ASM_ALIGN "\n" + _ASM_PTR "1b\n" + _ASM_PTR "3f\n" + " .byte %P1\n" /* feature bit */ + " .byte 2b - 1b\n" /* source len */ + " .byte 4f - 3f\n" /* replacement len */ + " .byte 0xff + (4f-3f) - (2b-1b)\n" /* padding */ + ".previous\n" + ".section .altinstr_replacement,\"ax\"\n" + "3: movb $1,%0\n" + "4:\n" + ".previous\n" + : "=qm" (flag) : "i" (bit)); + return flag; +#endif +} + +#define static_cpu_has(bit) \ +( \ + __builtin_constant_p(boot_cpu_has(bit)) ? \ + boot_cpu_has(bit) : \ + (__builtin_constant_p(bit) && !((bit) & ~0xff)) ? \ + __static_cpu_has(bit) : \ + boot_cpu_has(bit) \ +) + #endif /* defined(__KERNEL__) && !defined(__ASSEMBLY__) */ #endif /* _ASM_X86_CPUFEATURE_H */ -- cgit v1.2.3-59-g8ed1b From c9775b4cc522e5f1b40b1366a993f0f05f600f39 Mon Sep 17 00:00:00 2001 From: "H. Peter Anvin" Date: Tue, 11 May 2010 17:49:54 -0700 Subject: x86, fpu: Use static_cpu_has() to implement use_xsave() use_xsave() is now just a special case of static_cpu_has(), so use static_cpu_has(). Signed-off-by: H. Peter Anvin Cc: Avi Kivity Cc: Suresh Siddha LKML-Reference: <1273135546-29690-2-git-send-email-avi@redhat.com> --- arch/x86/include/asm/i387.h | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h index 8002e9ce25fc..c991b3a7b904 100644 --- a/arch/x86/include/asm/i387.h +++ b/arch/x86/include/asm/i387.h @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -57,16 +58,9 @@ extern int restore_i387_xstate_ia32(void __user *buf); #define X87_FSW_ES (1 << 7) /* Exception Summary */ -static inline bool use_xsave(void) +static __always_inline __pure bool use_xsave(void) { - u8 has_xsave; - - alternative_io("mov $0, %0", - "mov $1, %0", - X86_FEATURE_XSAVE, - "=qm" (has_xsave)); - - return has_xsave; + return static_cpu_has(X86_FEATURE_XSAVE); } #ifdef CONFIG_X86_64 -- cgit v1.2.3-59-g8ed1b From 9e565292270a2d55524be38835104c564ac8f795 Mon Sep 17 00:00:00 2001 From: Roland McGrath Date: Thu, 13 May 2010 21:43:03 -0700 Subject: x86: Use .cfi_sections for assembly code The newer assemblers support the .cfi_sections directive so we can put the CFI from .S files into the .debug_frame section that is preserved in unstripped vmlinux and in separate debuginfo, rather than the .eh_frame section that is now discarded by vmlinux.lds.S. Signed-off-by: Roland McGrath LKML-Reference: <20100514044303.A6FE7400BE@magilla.sf.frob.com> Signed-off-by: H. Peter Anvin --- arch/x86/Makefile | 5 +++-- arch/x86/include/asm/dwarf2.h | 12 ++++++++++++ 2 files changed, 15 insertions(+), 2 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/Makefile b/arch/x86/Makefile index 0a43dc515e4c..8aa1b59b9074 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -95,8 +95,9 @@ sp-$(CONFIG_X86_64) := rsp cfi := $(call as-instr,.cfi_startproc\n.cfi_rel_offset $(sp-y)$(comma)0\n.cfi_endproc,-DCONFIG_AS_CFI=1) # is .cfi_signal_frame supported too? cfi-sigframe := $(call as-instr,.cfi_startproc\n.cfi_signal_frame\n.cfi_endproc,-DCONFIG_AS_CFI_SIGNAL_FRAME=1) -KBUILD_AFLAGS += $(cfi) $(cfi-sigframe) -KBUILD_CFLAGS += $(cfi) $(cfi-sigframe) +cfi-sections := $(call as-instr,.cfi_sections .debug_frame,-DCONFIG_AS_CFI_SECTIONS=1) +KBUILD_AFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) +KBUILD_CFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) LDFLAGS := -m elf_$(UTS_MACHINE) diff --git a/arch/x86/include/asm/dwarf2.h b/arch/x86/include/asm/dwarf2.h index ae6253ab9029..733f7e91e7a9 100644 --- a/arch/x86/include/asm/dwarf2.h +++ b/arch/x86/include/asm/dwarf2.h @@ -34,6 +34,18 @@ #define CFI_SIGNAL_FRAME #endif +#if defined(CONFIG_AS_CFI_SECTIONS) && defined(__ASSEMBLY__) + /* + * Emit CFI data in .debug_frame sections, not .eh_frame sections. + * The latter we currently just discard since we don't do DWARF + * unwinding at runtime. So only the offline DWARF information is + * useful to anyone. Note we should not use this directive if this + * file is used in the vDSO assembly, or if vmlinux.lds.S gets + * changed so it doesn't discard .eh_frame. + */ + .cfi_sections .debug_frame +#endif + #else /* -- cgit v1.2.3-59-g8ed1b From ade029e2aaacc8965a548b0b0f80c5bee97ffc68 Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Sat, 24 Apr 2010 09:56:53 +0200 Subject: x86, k8: Fix build error when K8_NB is disabled K8_NB depends on PCI and when the last is disabled (allnoconfig) we fail at the final linking stage due to missing exported num_k8_northbridges. Add a header stub for that. Signed-off-by: Borislav Petkov LKML-Reference: <20100503183036.GJ26107@aftab> Signed-off-by: H. Peter Anvin Cc: --- arch/x86/include/asm/k8.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/k8.h b/arch/x86/include/asm/k8.h index f70e60071fe8..af00bd1d2089 100644 --- a/arch/x86/include/asm/k8.h +++ b/arch/x86/include/asm/k8.h @@ -16,11 +16,16 @@ extern int k8_numa_init(unsigned long start_pfn, unsigned long end_pfn); extern int k8_scan_nodes(void); #ifdef CONFIG_K8_NB +extern int num_k8_northbridges; + static inline struct pci_dev *node_to_k8_nb_misc(int node) { return (node < num_k8_northbridges) ? k8_northbridges[node] : NULL; } + #else +#define num_k8_northbridges 0 + static inline struct pci_dev *node_to_k8_nb_misc(int node) { return NULL; -- cgit v1.2.3-59-g8ed1b From f3d46f9d3194e0329216002a8724d4c0957abc79 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Mon, 17 May 2010 14:33:53 +1000 Subject: atomic_t: Cast to volatile when accessing atomic variables In preparation for removing volatile from the atomic_t definition, this patch adds a volatile cast to all the atomic read functions. Signed-off-by: Anton Blanchard Signed-off-by: Linus Torvalds --- arch/alpha/include/asm/atomic.h | 4 ++-- arch/arm/include/asm/atomic.h | 2 +- arch/avr32/include/asm/atomic.h | 2 +- arch/cris/include/asm/atomic.h | 2 +- arch/frv/include/asm/atomic.h | 2 +- arch/h8300/include/asm/atomic.h | 2 +- arch/ia64/include/asm/atomic.h | 4 ++-- arch/m32r/include/asm/atomic.h | 2 +- arch/m68k/include/asm/atomic_mm.h | 2 +- arch/m68k/include/asm/atomic_no.h | 2 +- arch/mips/include/asm/atomic.h | 4 ++-- arch/mn10300/include/asm/atomic.h | 2 +- arch/parisc/include/asm/atomic.h | 4 ++-- arch/sh/include/asm/atomic.h | 2 +- arch/sparc/include/asm/atomic_32.h | 2 +- arch/sparc/include/asm/atomic_64.h | 4 ++-- arch/x86/include/asm/atomic.h | 2 +- arch/x86/include/asm/atomic64_64.h | 2 +- arch/xtensa/include/asm/atomic.h | 2 +- include/asm-generic/atomic.h | 2 +- 20 files changed, 25 insertions(+), 25 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h index 610dff44d94b..e756d04b6cd5 100644 --- a/arch/alpha/include/asm/atomic.h +++ b/arch/alpha/include/asm/atomic.h @@ -17,8 +17,8 @@ #define ATOMIC_INIT(i) ( (atomic_t) { (i) } ) #define ATOMIC64_INIT(i) ( (atomic64_t) { (i) } ) -#define atomic_read(v) ((v)->counter + 0) -#define atomic64_read(v) ((v)->counter + 0) +#define atomic_read(v) (*(volatile int *)&(v)->counter) +#define atomic64_read(v) (*(volatile long *)&(v)->counter) #define atomic_set(v,i) ((v)->counter = (i)) #define atomic64_set(v,i) ((v)->counter = (i)) diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h index e8ddec2cb158..a0162fa94564 100644 --- a/arch/arm/include/asm/atomic.h +++ b/arch/arm/include/asm/atomic.h @@ -24,7 +24,7 @@ * strex/ldrex monitor on some implementations. The reason we can use it for * atomic_set() is the clrex or dummy strex done on every exception return. */ -#define atomic_read(v) ((v)->counter) +#define atomic_read(v) (*(volatile int *)&(v)->counter) #define atomic_set(v,i) (((v)->counter) = (i)) #if __LINUX_ARM_ARCH__ >= 6 diff --git a/arch/avr32/include/asm/atomic.h b/arch/avr32/include/asm/atomic.h index b131c27ddf57..bbce6a1c6bb6 100644 --- a/arch/avr32/include/asm/atomic.h +++ b/arch/avr32/include/asm/atomic.h @@ -19,7 +19,7 @@ #define ATOMIC_INIT(i) { (i) } -#define atomic_read(v) ((v)->counter) +#define atomic_read(v) (*(volatile int *)&(v)->counter) #define atomic_set(v, i) (((v)->counter) = i) /* diff --git a/arch/cris/include/asm/atomic.h b/arch/cris/include/asm/atomic.h index a6aca819e9f3..88dc9b9c4ba0 100644 --- a/arch/cris/include/asm/atomic.h +++ b/arch/cris/include/asm/atomic.h @@ -15,7 +15,7 @@ #define ATOMIC_INIT(i) { (i) } -#define atomic_read(v) ((v)->counter) +#define atomic_read(v) (*(volatile int *)&(v)->counter) #define atomic_set(v,i) (((v)->counter) = (i)) /* These should be written in asm but we do it in C for now. */ diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h index 00a57af79afc..fae32c7fdcb6 100644 --- a/arch/frv/include/asm/atomic.h +++ b/arch/frv/include/asm/atomic.h @@ -36,7 +36,7 @@ #define smp_mb__after_atomic_inc() barrier() #define ATOMIC_INIT(i) { (i) } -#define atomic_read(v) ((v)->counter) +#define atomic_read(v) (*(volatile int *)&(v)->counter) #define atomic_set(v, i) (((v)->counter) = (i)) #ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS diff --git a/arch/h8300/include/asm/atomic.h b/arch/h8300/include/asm/atomic.h index 33c8c0fa9583..e936804b7508 100644 --- a/arch/h8300/include/asm/atomic.h +++ b/arch/h8300/include/asm/atomic.h @@ -10,7 +10,7 @@ #define ATOMIC_INIT(i) { (i) } -#define atomic_read(v) ((v)->counter) +#define atomic_read(v) (*(volatile int *)&(v)->counter) #define atomic_set(v, i) (((v)->counter) = i) #include diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h index 88405cb0832a..4e1948447a00 100644 --- a/arch/ia64/include/asm/atomic.h +++ b/arch/ia64/include/asm/atomic.h @@ -21,8 +21,8 @@ #define ATOMIC_INIT(i) ((atomic_t) { (i) }) #define ATOMIC64_INIT(i) ((atomic64_t) { (i) }) -#define atomic_read(v) ((v)->counter) -#define atomic64_read(v) ((v)->counter) +#define atomic_read(v) (*(volatile int *)&(v)->counter) +#define atomic64_read(v) (*(volatile long *)&(v)->counter) #define atomic_set(v,i) (((v)->counter) = (i)) #define atomic64_set(v,i) (((v)->counter) = (i)) diff --git a/arch/m32r/include/asm/atomic.h b/arch/m32r/include/asm/atomic.h index 63f0cf0f50dd..d44a51e5271b 100644 --- a/arch/m32r/include/asm/atomic.h +++ b/arch/m32r/include/asm/atomic.h @@ -26,7 +26,7 @@ * * Atomically reads the value of @v. */ -#define atomic_read(v) ((v)->counter) +#define atomic_read(v) (*(volatile int *)&(v)->counter) /** * atomic_set - set atomic variable diff --git a/arch/m68k/include/asm/atomic_mm.h b/arch/m68k/include/asm/atomic_mm.h index d9d2ed647435..6a223b3f7e74 100644 --- a/arch/m68k/include/asm/atomic_mm.h +++ b/arch/m68k/include/asm/atomic_mm.h @@ -15,7 +15,7 @@ #define ATOMIC_INIT(i) { (i) } -#define atomic_read(v) ((v)->counter) +#define atomic_read(v) (*(volatile int *)&(v)->counter) #define atomic_set(v, i) (((v)->counter) = i) static inline void atomic_add(int i, atomic_t *v) diff --git a/arch/m68k/include/asm/atomic_no.h b/arch/m68k/include/asm/atomic_no.h index 5674cb9449bd..289310c63a8a 100644 --- a/arch/m68k/include/asm/atomic_no.h +++ b/arch/m68k/include/asm/atomic_no.h @@ -15,7 +15,7 @@ #define ATOMIC_INIT(i) { (i) } -#define atomic_read(v) ((v)->counter) +#define atomic_read(v) (*(volatile int *)&(v)->counter) #define atomic_set(v, i) (((v)->counter) = i) static __inline__ void atomic_add(int i, atomic_t *v) diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h index 519197ede089..59dc0c7ef733 100644 --- a/arch/mips/include/asm/atomic.h +++ b/arch/mips/include/asm/atomic.h @@ -29,7 +29,7 @@ * * Atomically reads the value of @v. */ -#define atomic_read(v) ((v)->counter) +#define atomic_read(v) (*(volatile int *)&(v)->counter) /* * atomic_set - set atomic variable @@ -410,7 +410,7 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) * @v: pointer of type atomic64_t * */ -#define atomic64_read(v) ((v)->counter) +#define atomic64_read(v) (*(volatile long *)&(v)->counter) /* * atomic64_set - set atomic variable diff --git a/arch/mn10300/include/asm/atomic.h b/arch/mn10300/include/asm/atomic.h index 5bf5be9566de..e41222d6c2fd 100644 --- a/arch/mn10300/include/asm/atomic.h +++ b/arch/mn10300/include/asm/atomic.h @@ -31,7 +31,7 @@ * Atomically reads the value of @v. Note that the guaranteed * useful range of an atomic_t is only 24 bits. */ -#define atomic_read(v) ((v)->counter) +#define atomic_read(v) (*(volatile int *)&(v)->counter) /** * atomic_set - set atomic variable diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h index 716634d1f546..f81955934aeb 100644 --- a/arch/parisc/include/asm/atomic.h +++ b/arch/parisc/include/asm/atomic.h @@ -189,7 +189,7 @@ static __inline__ void atomic_set(atomic_t *v, int i) static __inline__ int atomic_read(const atomic_t *v) { - return v->counter; + return (*(volatile int *)&(v)->counter); } /* exported interface */ @@ -286,7 +286,7 @@ atomic64_set(atomic64_t *v, s64 i) static __inline__ s64 atomic64_read(const atomic64_t *v) { - return v->counter; + return (*(volatile long *)&(v)->counter); } #define atomic64_add(i,v) ((void)(__atomic64_add_return( ((s64)(i)),(v)))) diff --git a/arch/sh/include/asm/atomic.h b/arch/sh/include/asm/atomic.h index 275a448ae8c2..c7983124d99d 100644 --- a/arch/sh/include/asm/atomic.h +++ b/arch/sh/include/asm/atomic.h @@ -13,7 +13,7 @@ #define ATOMIC_INIT(i) ( (atomic_t) { (i) } ) -#define atomic_read(v) ((v)->counter) +#define atomic_read(v) (*(volatile int *)&(v)->counter) #define atomic_set(v,i) ((v)->counter = (i)) #if defined(CONFIG_GUSA_RB) diff --git a/arch/sparc/include/asm/atomic_32.h b/arch/sparc/include/asm/atomic_32.h index f0d343c3b956..7ae128b19d3f 100644 --- a/arch/sparc/include/asm/atomic_32.h +++ b/arch/sparc/include/asm/atomic_32.h @@ -25,7 +25,7 @@ extern int atomic_cmpxchg(atomic_t *, int, int); extern int atomic_add_unless(atomic_t *, int, int); extern void atomic_set(atomic_t *, int); -#define atomic_read(v) ((v)->counter) +#define atomic_read(v) (*(volatile int *)&(v)->counter) #define atomic_add(i, v) ((void)__atomic_add_return( (int)(i), (v))) #define atomic_sub(i, v) ((void)__atomic_add_return(-(int)(i), (v))) diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h index f2e48009989e..2050ca02c423 100644 --- a/arch/sparc/include/asm/atomic_64.h +++ b/arch/sparc/include/asm/atomic_64.h @@ -13,8 +13,8 @@ #define ATOMIC_INIT(i) { (i) } #define ATOMIC64_INIT(i) { (i) } -#define atomic_read(v) ((v)->counter) -#define atomic64_read(v) ((v)->counter) +#define atomic_read(v) (*(volatile int *)&(v)->counter) +#define atomic64_read(v) (*(volatile long *)&(v)->counter) #define atomic_set(v, i) (((v)->counter) = i) #define atomic64_set(v, i) (((v)->counter) = i) diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h index 8f8217b9bdac..37b39d27abe0 100644 --- a/arch/x86/include/asm/atomic.h +++ b/arch/x86/include/asm/atomic.h @@ -22,7 +22,7 @@ */ static inline int atomic_read(const atomic_t *v) { - return v->counter; + return (*(volatile int *)&(v)->counter); } /** diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h index 51c5b4056929..b014e235ea8d 100644 --- a/arch/x86/include/asm/atomic64_64.h +++ b/arch/x86/include/asm/atomic64_64.h @@ -18,7 +18,7 @@ */ static inline long atomic64_read(const atomic64_t *v) { - return v->counter; + return (*(volatile long *)&(v)->counter); } /** diff --git a/arch/xtensa/include/asm/atomic.h b/arch/xtensa/include/asm/atomic.h index 22d6dde42619..a96a0619d0b7 100644 --- a/arch/xtensa/include/asm/atomic.h +++ b/arch/xtensa/include/asm/atomic.h @@ -46,7 +46,7 @@ * * Atomically reads the value of @v. */ -#define atomic_read(v) ((v)->counter) +#define atomic_read(v) (*(volatile int *)&(v)->counter) /** * atomic_set - set atomic variable diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h index c99c64dc5f3d..c33749f95b32 100644 --- a/include/asm-generic/atomic.h +++ b/include/asm-generic/atomic.h @@ -33,7 +33,7 @@ * Atomically reads the value of @v. Note that the guaranteed * useful range of an atomic_t is only 24 bits. */ -#define atomic_read(v) ((v)->counter) +#define atomic_read(v) (*(volatile int *)&(v)->counter) /** * atomic_set - set atomic variable -- cgit v1.2.3-59-g8ed1b From c59bd5688299cddb71183e156e7a3c1409b90df2 Mon Sep 17 00:00:00 2001 From: "H. Peter Anvin" Date: Mon, 17 May 2010 15:13:23 -0700 Subject: x86, hweight: Use a 32-bit popcnt for __arch_hweight32() Use a 32-bit popcnt instruction for __arch_hweight32(), even on x86-64. Even though the input register will *usually* be zero-extended due to the standard operation of the hardware, it isn't necessarily so if the input value was the result of truncating a 64-bit operation. Note: the POPCNT32 variant used on x86-64 has a technically unnecessary REX prefix to make it five bytes long, the same as a CALL instruction, therefore avoiding an unnecessary NOP. Reported-by: Linus Torvalds Signed-off-by: H. Peter Anvin Cc: Borislav Petkov LKML-Reference: --- arch/x86/include/asm/arch_hweight.h | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/arch_hweight.h b/arch/x86/include/asm/arch_hweight.h index d1fc3c219ae6..9686c3d9ff73 100644 --- a/arch/x86/include/asm/arch_hweight.h +++ b/arch/x86/include/asm/arch_hweight.h @@ -2,13 +2,15 @@ #define _ASM_X86_HWEIGHT_H #ifdef CONFIG_64BIT +/* popcnt %edi, %eax -- redundant REX prefix for alignment */ +#define POPCNT32 ".byte 0xf3,0x40,0x0f,0xb8,0xc7" /* popcnt %rdi, %rax */ -#define POPCNT ".byte 0xf3,0x48,0x0f,0xb8,0xc7" +#define POPCNT64 ".byte 0xf3,0x48,0x0f,0xb8,0xc7" #define REG_IN "D" #define REG_OUT "a" #else /* popcnt %eax, %eax */ -#define POPCNT ".byte 0xf3,0x0f,0xb8,0xc0" +#define POPCNT32 ".byte 0xf3,0x0f,0xb8,0xc0" #define REG_IN "a" #define REG_OUT "a" #endif @@ -23,7 +25,7 @@ static inline unsigned int __arch_hweight32(unsigned int w) { unsigned int res = 0; - asm (ALTERNATIVE("call __sw_hweight32", POPCNT, X86_FEATURE_POPCNT) + asm (ALTERNATIVE("call __sw_hweight32", POPCNT32, X86_FEATURE_POPCNT) : "="REG_OUT (res) : REG_IN (w)); @@ -48,7 +50,7 @@ static inline unsigned long __arch_hweight64(__u64 w) return __arch_hweight32((u32)w) + __arch_hweight32((u32)(w >> 32)); #else - asm (ALTERNATIVE("call __sw_hweight64", POPCNT, X86_FEATURE_POPCNT) + asm (ALTERNATIVE("call __sw_hweight64", POPCNT64, X86_FEATURE_POPCNT) : "="REG_OUT (res) : REG_IN (w)); #endif /* CONFIG_X86_32 */ -- cgit v1.2.3-59-g8ed1b