aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2005-09-27 21:45:45 -0700
committerLinus Torvalds <torvalds@g5.osdl.org>2005-09-28 07:46:42 -0700
commit0f9578b70a9f112bfb541e1d5ab486a376e64503 (patch)
tree00e93df9f9920c43ace34e28298255dc8a0f9263
parent[PATCH] device-mapper: Fix queue_if_no_path initialisation (diff)
downloadlinux-dev-0f9578b70a9f112bfb541e1d5ab486a376e64503.tar.xz
linux-dev-0f9578b70a9f112bfb541e1d5ab486a376e64503.zip
[PATCH] ppc64: More hugepage fixes
My previous patch fixing invalidation of huge PTEs wasn't good enough, we still had an issue if a PTE invalidation batch contained both small and large pages. This patch fixes this by making sure the batch is flushed if the page size fed to it changes. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--arch/ppc64/mm/hash_native.c5
-rw-r--r--arch/ppc64/mm/tlb.c4
-rw-r--r--include/asm-ppc64/tlbflush.h1
3 files changed, 6 insertions, 4 deletions
diff --git a/arch/ppc64/mm/hash_native.c b/arch/ppc64/mm/hash_native.c
index eb1bbb5b6c16..bfd385b7713c 100644
--- a/arch/ppc64/mm/hash_native.c
+++ b/arch/ppc64/mm/hash_native.c
@@ -343,7 +343,7 @@ static void native_flush_hash_range(unsigned long context,
hpte_t *hptep;
unsigned long hpte_v;
struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
- unsigned long large;
+ unsigned long large = batch->large;
local_irq_save(flags);
@@ -356,7 +356,6 @@ static void native_flush_hash_range(unsigned long context,
va = (vsid << 28) | (batch->addr[i] & 0x0fffffff);
batch->vaddr[j] = va;
- large = pte_huge(batch->pte[i]);
if (large)
vpn = va >> HPAGE_SHIFT;
else
@@ -406,7 +405,7 @@ static void native_flush_hash_range(unsigned long context,
asm volatile("ptesync":::"memory");
for (i = 0; i < j; i++)
- __tlbie(batch->vaddr[i], 0);
+ __tlbie(batch->vaddr[i], large);
asm volatile("eieio; tlbsync; ptesync":::"memory");
diff --git a/arch/ppc64/mm/tlb.c b/arch/ppc64/mm/tlb.c
index d8a6593a13f0..21fbffb23a43 100644
--- a/arch/ppc64/mm/tlb.c
+++ b/arch/ppc64/mm/tlb.c
@@ -143,7 +143,8 @@ void hpte_update(struct mm_struct *mm, unsigned long addr,
* up scanning and resetting referenced bits then our batch context
* will change mid stream.
*/
- if (unlikely(i != 0 && context != batch->context)) {
+ if (i != 0 && (context != batch->context ||
+ batch->large != pte_huge(pte))) {
flush_tlb_pending();
i = 0;
}
@@ -151,6 +152,7 @@ void hpte_update(struct mm_struct *mm, unsigned long addr,
if (i == 0) {
batch->context = context;
batch->mm = mm;
+ batch->large = pte_huge(pte);
}
batch->pte[i] = __pte(pte);
batch->addr[i] = addr;
diff --git a/include/asm-ppc64/tlbflush.h b/include/asm-ppc64/tlbflush.h
index 45411a67e082..74271d7c1d16 100644
--- a/include/asm-ppc64/tlbflush.h
+++ b/include/asm-ppc64/tlbflush.h
@@ -25,6 +25,7 @@ struct ppc64_tlb_batch {
pte_t pte[PPC64_TLB_BATCH_NR];
unsigned long addr[PPC64_TLB_BATCH_NR];
unsigned long vaddr[PPC64_TLB_BATCH_NR];
+ unsigned int large;
};
DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);