aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/include/asm/tlb.h
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2014-04-03 13:54:59 +0200
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2014-04-03 14:30:55 +0200
commit02a8f3abb708919149cb657a5202f4603f0c38e2 (patch)
tree2bf430c528af833f0544f575512f7e49faf81f57 /arch/s390/include/asm/tlb.h
parents390/irq: Use defines for external interruption codes (diff)
downloadlinux-dev-02a8f3abb708919149cb657a5202f4603f0c38e2.tar.xz
linux-dev-02a8f3abb708919149cb657a5202f4603f0c38e2.zip
s390/mm,tlb: safeguard against speculative TLB creation
The principles of operations states that the CPU is allowed to create TLB entries for an address space anytime while an ASCE is loaded to the control register. This is true even if the CPU is running in the kernel and the user address space is not (actively) accessed. In theory this can affect two aspects of the TLB flush logic. For full-mm flushes the ASCE of the dying process is still attached. The approach to flush first with IDTE and then just free all page tables can in theory lead to stale TLB entries. Use the batched free of page tables for the full-mm flushes as well. For operations that can have a stale ASCE in the control register, e.g. a delayed update_user_asce in switch_mm, load the kernel ASCE to prevent invalid TLBs from being created. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/include/asm/tlb.h')
-rw-r--r--arch/s390/include/asm/tlb.h14
1 files changed, 3 insertions, 11 deletions
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
index 2cb846c4b37f..c544b6f05d95 100644
--- a/arch/s390/include/asm/tlb.h
+++ b/arch/s390/include/asm/tlb.h
@@ -57,8 +57,6 @@ static inline void tlb_gather_mmu(struct mmu_gather *tlb,
tlb->end = end;
tlb->fullmm = !(start | (end+1));
tlb->batch = NULL;
- if (tlb->fullmm)
- __tlb_flush_mm(mm);
}
static inline void tlb_flush_mmu(struct mmu_gather *tlb)
@@ -96,9 +94,7 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
unsigned long address)
{
- if (!tlb->fullmm)
- return page_table_free_rcu(tlb, (unsigned long *) pte);
- page_table_free(tlb->mm, (unsigned long *) pte);
+ page_table_free_rcu(tlb, (unsigned long *) pte);
}
/*
@@ -114,9 +110,7 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
#ifdef CONFIG_64BIT
if (tlb->mm->context.asce_limit <= (1UL << 31))
return;
- if (!tlb->fullmm)
- return tlb_remove_table(tlb, pmd);
- crst_table_free(tlb->mm, (unsigned long *) pmd);
+ tlb_remove_table(tlb, pmd);
#endif
}
@@ -133,9 +127,7 @@ static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
#ifdef CONFIG_64BIT
if (tlb->mm->context.asce_limit <= (1UL << 42))
return;
- if (!tlb->fullmm)
- return tlb_remove_table(tlb, pud);
- crst_table_free(tlb->mm, (unsigned long *) pud);
+ tlb_remove_table(tlb, pud);
#endif
}