diff options
author | Anton Blanchard <anton@samba.org> | 2013-09-23 12:04:39 +1000 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2013-10-11 16:48:28 +1100 |
commit | 926f160f460170b0361f600f365369685ad74009 (patch) | |
tree | 04537aba608b597cbf3af3d8a774b4c09bd52924 /arch/powerpc/include/asm/ppc_asm.h | |
parent | powerpc: PTRACE_PEEKUSR/PTRACE_POKEUSER of FPR registers in little endian builds (diff) | |
download | wireguard-linux-926f160f460170b0361f600f365369685ad74009.tar.xz wireguard-linux-926f160f460170b0361f600f365369685ad74009.zip |
powerpc: Little endian builds double word swap VSX state during context save/restore
The elements within VSX loads and stores are big endian ordered
regardless of endianness. Our VSX context save/restore code uses
lxvd2x and stxvd2x which is a 2x doubleword operation. This means
the two doublewords will be swapped and we have to perform another
swap to undo it.
We need to do this on save and restore.
Signed-off-by: Anton Blanchard <anton@samba.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/include/asm/ppc_asm.h')
-rw-r--r-- | arch/powerpc/include/asm/ppc_asm.h | 21 |
1 files changed, 17 insertions, 4 deletions
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h index 599545738af3..0c51fb4fd2aa 100644 --- a/arch/powerpc/include/asm/ppc_asm.h +++ b/arch/powerpc/include/asm/ppc_asm.h @@ -180,9 +180,20 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR) #define REST_32VRS_TRANSACT(n,b,base) REST_16VRS_TRANSACT(n,b,base); \ REST_16VRS_TRANSACT(n+16,b,base) +#ifdef __BIG_ENDIAN__ +#define STXVD2X_ROT(n,b,base) STXVD2X(n,b,base) +#define LXVD2X_ROT(n,b,base) LXVD2X(n,b,base) +#else +#define STXVD2X_ROT(n,b,base) XXSWAPD(n,n); \ + STXVD2X(n,b,base); \ + XXSWAPD(n,n) + +#define LXVD2X_ROT(n,b,base) LXVD2X(n,b,base); \ + XXSWAPD(n,n) +#endif #define SAVE_VSR_TRANSACT(n,b,base) li b,THREAD_TRANSACT_VSR0+(16*(n)); \ - STXVD2X(n,R##base,R##b) + STXVD2X_ROT(n,R##base,R##b) #define SAVE_2VSRS_TRANSACT(n,b,base) SAVE_VSR_TRANSACT(n,b,base); \ SAVE_VSR_TRANSACT(n+1,b,base) #define SAVE_4VSRS_TRANSACT(n,b,base) SAVE_2VSRS_TRANSACT(n,b,base); \ @@ -195,7 +206,7 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR) SAVE_16VSRS_TRANSACT(n+16,b,base) #define REST_VSR_TRANSACT(n,b,base) li b,THREAD_TRANSACT_VSR0+(16*(n)); \ - LXVD2X(n,R##base,R##b) + LXVD2X_ROT(n,R##base,R##b) #define REST_2VSRS_TRANSACT(n,b,base) REST_VSR_TRANSACT(n,b,base); \ REST_VSR_TRANSACT(n+1,b,base) #define REST_4VSRS_TRANSACT(n,b,base) REST_2VSRS_TRANSACT(n,b,base); \ @@ -208,13 +219,15 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR) REST_16VSRS_TRANSACT(n+16,b,base) /* Save the lower 32 VSRs in the thread VSR region */ -#define SAVE_VSR(n,b,base) li b,THREAD_VSR0+(16*(n)); STXVD2X(n,R##base,R##b) +#define SAVE_VSR(n,b,base) li b,THREAD_VSR0+(16*(n)); \ + STXVD2X_ROT(n,R##base,R##b) #define SAVE_2VSRS(n,b,base) SAVE_VSR(n,b,base); SAVE_VSR(n+1,b,base) #define SAVE_4VSRS(n,b,base) SAVE_2VSRS(n,b,base); SAVE_2VSRS(n+2,b,base) #define SAVE_8VSRS(n,b,base) SAVE_4VSRS(n,b,base); SAVE_4VSRS(n+4,b,base) #define SAVE_16VSRS(n,b,base) SAVE_8VSRS(n,b,base); SAVE_8VSRS(n+8,b,base) #define SAVE_32VSRS(n,b,base) SAVE_16VSRS(n,b,base); SAVE_16VSRS(n+16,b,base) -#define REST_VSR(n,b,base) li b,THREAD_VSR0+(16*(n)); LXVD2X(n,R##base,R##b) +#define REST_VSR(n,b,base) li b,THREAD_VSR0+(16*(n)); \ + LXVD2X_ROT(n,R##base,R##b) #define REST_2VSRS(n,b,base) REST_VSR(n,b,base); REST_VSR(n+1,b,base) #define REST_4VSRS(n,b,base) REST_2VSRS(n,b,base); REST_2VSRS(n+2,b,base) #define REST_8VSRS(n,b,base) REST_4VSRS(n,b,base); REST_4VSRS(n+4,b,base) |