aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/slb.c
diff options
context:
space:
mode:
authorNicholas Piggin <npiggin@gmail.com>2018-09-15 01:30:47 +1000
committerMichael Ellerman <mpe@ellerman.id.au>2018-09-19 21:59:41 +1000
commit8b92887ced2e3fce223412487f99d4ef3f07b490 (patch)
tree71d8134ce8272b810f9a847834656e5b09116fc1 /arch/powerpc/mm/slb.c
parentpowerpc/64s/hash: avoid the POWER5 < DD2.1 slb invalidate workaround on POWER8/9 (diff)
downloadlinux-dev-8b92887ced2e3fce223412487f99d4ef3f07b490.tar.xz
linux-dev-8b92887ced2e3fce223412487f99d4ef3f07b490.zip
powerpc/64s/hash: move POWER5 < DD2.1 slbie workaround where it is needed
The POWER5 < DD2.1 issue is that slbie needs to be issued more than once. It came in with this change: ChangeSet@1.1608, 2004-04-29 07:12:31-07:00, david@gibson.dropbear.id.au [PATCH] POWER5 erratum workaround Early POWER5 revisions (<DD2.1) have a problem requiring slbie instructions to be repeated under some circumstances. The patch below adds a workaround (patch made by Anton Blanchard). (aka. 3e4520f7605243abf66a7ccd3d2e49e48e8c0483 in the full history tree) The extra slbie in switch_slb is done even for the case where slbia is called (slb_flush_and_rebolt). I don't believe that is required because there are other slb_flush_and_rebolt callers which do not issue the workaround slbie, which would be broken if it was required. It also seems to be fine inside the isync with the first slbie, as it is in the kernel stack switch code. So move this workaround to where it is required. This is not much of an optimisation because this is the fast path, but it makes the code more understandable and neater. Signed-off-by: Nicholas Piggin <npiggin@gmail.com> [mpe: Retain slbie_data initialisation to avoid compiler warning] Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to '')
-rw-r--r--arch/powerpc/mm/slb.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index 0656d20d59ec..a9c66feb3c43 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -296,7 +296,6 @@ static inline int esids_match(unsigned long addr1, unsigned long addr2)
void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
{
unsigned long offset;
- unsigned long slbie_data = 0;
unsigned long pc = KSTK_EIP(tsk);
unsigned long stack = KSTK_ESP(tsk);
unsigned long exec_base;
@@ -311,7 +310,9 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
offset = get_paca()->slb_cache_ptr;
if (!mmu_has_feature(MMU_FTR_NO_SLBIE_B) &&
offset <= SLB_CACHE_ENTRIES) {
+ unsigned long slbie_data = 0;
int i;
+
asm volatile("isync" : : : "memory");
for (i = 0; i < offset; i++) {
slbie_data = (unsigned long)get_paca()->slb_cache[i]
@@ -321,15 +322,14 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
slbie_data |= SLBIE_C; /* C set for user addresses */
asm volatile("slbie %0" : : "r" (slbie_data));
}
- asm volatile("isync" : : : "memory");
- } else {
- __slb_flush_and_rebolt();
- }
- if (!cpu_has_feature(CPU_FTR_ARCH_207S)) {
/* Workaround POWER5 < DD2.1 issue */
- if (offset == 1 || offset > SLB_CACHE_ENTRIES)
+ if (!cpu_has_feature(CPU_FTR_ARCH_207S) && offset == 1)
asm volatile("slbie %0" : : "r" (slbie_data));
+
+ asm volatile("isync" : : : "memory");
+ } else {
+ __slb_flush_and_rebolt();
}
get_paca()->slb_cache_ptr = 0;