aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/align.c
diff options
context:
space:
mode:
authorAnton Blanchard <anton@samba.org>2013-09-23 12:04:50 +1000
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2013-10-11 16:48:38 +1100
commit52055d07ce0a465949552c9c5d65d2b8b37672c5 (patch)
tree7421607319a385d64810626693993853a16cc54a /arch/powerpc/kernel/align.c
parentpowerpc: Add little endian support to alignment handler (diff)
downloadlinux-dev-52055d07ce0a465949552c9c5d65d2b8b37672c5.tar.xz
linux-dev-52055d07ce0a465949552c9c5d65d2b8b37672c5.zip
powerpc: Handle VSX alignment faults in little endian mode
Things are complicated by the fact that VSX elements are big endian ordered even in little endian mode. 8 byte loads and stores also write to the top 8 bytes of the register. Signed-off-by: Anton Blanchard <anton@samba.org> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/kernel/align.c')
-rw-r--r--arch/powerpc/kernel/align.c41
1 files changed, 33 insertions, 8 deletions
diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c
index cce82b1e0374..59f70adcbcd9 100644
--- a/arch/powerpc/kernel/align.c
+++ b/arch/powerpc/kernel/align.c
@@ -630,7 +630,7 @@ static int emulate_spe(struct pt_regs *regs, unsigned int reg,
}
#endif /* CONFIG_SPE */
-#if defined(CONFIG_VSX) && defined(__BIG_ENDIAN__)
+#ifdef CONFIG_VSX
/*
* Emulate VSX instructions...
*/
@@ -658,8 +658,25 @@ static int emulate_vsx(unsigned char __user *addr, unsigned int reg,
lptr = (unsigned long *) ptr;
+#ifdef __LITTLE_ENDIAN__
+ if (flags & SW) {
+ elsize = length;
+ sw = length-1;
+ } else {
+ /*
+ * The elements are BE ordered, even in LE mode, so process
+ * them in reverse order.
+ */
+ addr += length - elsize;
+
+ /* 8 byte memory accesses go in the top 8 bytes of the VR */
+ if (length == 8)
+ ptr += 8;
+ }
+#else
if (flags & SW)
sw = elsize-1;
+#endif
for (j = 0; j < length; j += elsize) {
for (i = 0; i < elsize; ++i) {
@@ -669,19 +686,31 @@ static int emulate_vsx(unsigned char __user *addr, unsigned int reg,
ret |= __get_user(ptr[i^sw], addr + i);
}
ptr += elsize;
+#ifdef __LITTLE_ENDIAN__
+ addr -= elsize;
+#else
addr += elsize;
+#endif
}
+#ifdef __BIG_ENDIAN__
+#define VSX_HI 0
+#define VSX_LO 1
+#else
+#define VSX_HI 1
+#define VSX_LO 0
+#endif
+
if (!ret) {
if (flags & U)
regs->gpr[areg] = regs->dar;
/* Splat load copies the same data to top and bottom 8 bytes */
if (flags & SPLT)
- lptr[1] = lptr[0];
- /* For 8 byte loads, zero the top 8 bytes */
+ lptr[VSX_LO] = lptr[VSX_HI];
+ /* For 8 byte loads, zero the low 8 bytes */
else if (!(flags & ST) && (8 == length))
- lptr[1] = 0;
+ lptr[VSX_LO] = 0;
} else
return -EFAULT;
@@ -805,7 +834,6 @@ int fix_alignment(struct pt_regs *regs)
/* DAR has the operand effective address */
addr = (unsigned char __user *)regs->dar;
-#ifdef __BIG_ENDIAN__
#ifdef CONFIG_VSX
if ((instruction & 0xfc00003e) == 0x7c000018) {
unsigned int elsize;
@@ -840,9 +868,6 @@ int fix_alignment(struct pt_regs *regs)
return emulate_vsx(addr, reg, areg, regs, flags, nb, elsize);
}
#endif
-#else
- return -EFAULT;
-#endif
/* A size of 0 indicates an instruction we don't support, with
* the exception of DCBZ which is handled as a special case here
*/