aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/include/asm/mmu_context.h
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2014-04-03 13:54:59 +0200
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2014-04-03 14:30:55 +0200
commit02a8f3abb708919149cb657a5202f4603f0c38e2 (patch)
tree2bf430c528af833f0544f575512f7e49faf81f57 /arch/s390/include/asm/mmu_context.h
parents390/irq: Use defines for external interruption codes (diff)
downloadlinux-dev-02a8f3abb708919149cb657a5202f4603f0c38e2.tar.xz
linux-dev-02a8f3abb708919149cb657a5202f4603f0c38e2.zip
s390/mm,tlb: safeguard against speculative TLB creation
The principles of operations states that the CPU is allowed to create TLB entries for an address space anytime while an ASCE is loaded to the control register. This is true even if the CPU is running in the kernel and the user address space is not (actively) accessed. In theory this can affect two aspects of the TLB flush logic. For full-mm flushes the ASCE of the dying process is still attached. The approach to flush first with IDTE and then just free all page tables can in theory lead to stale TLB entries. Use the batched free of page tables for the full-mm flushes as well. For operations that can have a stale ASCE in the control register, e.g. a delayed update_user_asce in switch_mm, load the kernel ASCE to prevent invalid TLBs from being created. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/include/asm/mmu_context.h')
-rw-r--r--arch/s390/include/asm/mmu_context.h17
1 files changed, 13 insertions, 4 deletions
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index 38149b63dc44..7abf318b1522 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -35,7 +35,7 @@ static inline int init_new_context(struct task_struct *tsk,
#define LCTL_OPCODE "lctlg"
#endif
-static inline void update_mm(struct mm_struct *mm, struct task_struct *tsk)
+static inline void update_user_asce(struct mm_struct *mm)
{
pgd_t *pgd = mm->pgd;
@@ -45,6 +45,13 @@ static inline void update_mm(struct mm_struct *mm, struct task_struct *tsk)
set_fs(current->thread.mm_segment);
}
+static inline void clear_user_asce(struct mm_struct *mm)
+{
+ S390_lowcore.user_asce = S390_lowcore.kernel_asce;
+ asm volatile(LCTL_OPCODE" 1,1,%0\n" : : "m" (S390_lowcore.user_asce));
+ asm volatile(LCTL_OPCODE" 7,7,%0\n" : : "m" (S390_lowcore.user_asce));
+}
+
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
@@ -53,11 +60,13 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
if (prev == next)
return;
if (atomic_inc_return(&next->context.attach_count) >> 16) {
- /* Delay update_mm until all TLB flushes are done. */
+ /* Delay update_user_asce until all TLB flushes are done. */
set_tsk_thread_flag(tsk, TIF_TLB_WAIT);
+ /* Clear old ASCE by loading the kernel ASCE. */
+ clear_user_asce(next);
} else {
cpumask_set_cpu(cpu, mm_cpumask(next));
- update_mm(next, tsk);
+ update_user_asce(next);
if (next->context.flush_mm)
/* Flush pending TLBs */
__tlb_flush_mm(next);
@@ -80,7 +89,7 @@ static inline void finish_arch_post_lock_switch(void)
cpu_relax();
cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
- update_mm(mm, tsk);
+ update_user_asce(mm);
if (mm->context.flush_mm)
__tlb_flush_mm(mm);
preempt_enable();