aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/include/asm/mmu_context.h
diff options
context:
space:
mode:
authorHeiko Carstens <hca@linux.ibm.com>2020-11-16 08:06:40 +0100
committerHeiko Carstens <hca@linux.ibm.com>2020-11-23 12:01:12 +0100
commit87d5986345219a7e4f204726d9085ea87f3e22d0 (patch)
tree7c3cb89736d699e56186dce5f3863c311afb6e57 /arch/s390/include/asm/mmu_context.h
parentMerge branch 'fixes' into features (diff)
downloadlinux-dev-87d5986345219a7e4f204726d9085ea87f3e22d0.tar.xz
linux-dev-87d5986345219a7e4f204726d9085ea87f3e22d0.zip
s390/mm: remove set_fs / rework address space handling
Remove set_fs support from s390. With doing this rework address space handling and simplify it. As a result address spaces are now setup like this: CPU running in | %cr1 ASCE | %cr7 ASCE | %cr13 ASCE ----------------------------|-----------|-----------|----------- user space | user | user | kernel kernel, normal execution | kernel | user | kernel kernel, kvm guest execution | gmap | user | kernel To achieve this the getcpu vdso syscall is removed in order to avoid secondary address mode and a separate vdso address space in for user space. The getcpu vdso syscall will be implemented differently with a subsequent patch. The kernel accesses user space always via secondary address space. This happens in different ways: - with mvcos in home space mode and directly read/write to secondary address space - with mvcs/mvcp in primary space mode and copy from primary space to secondary space or vice versa - with e.g. cs in secondary space mode and access secondary space Switching translation modes happens with sacf before and after instructions which access user space, like before. Lazy handling of control register reloading is removed in the hope to make everything simpler, but at the cost of making kernel entry and exit a bit slower. That is: on kernel entry the primary asce is always changed to contain the kernel asce, and on kernel exit the primary asce is changed again so it contains the user asce. In kernel mode there is only one exception to the primary asce: when kvm guests are executed the primary asce contains the gmap asce (which describes the guest address space). The primary asce is reset to kernel asce whenever kvm guest execution is interrupted, so that this doesn't has to be taken into account for any user space accesses. Reviewed-by: Sven Schnelle <svens@linux.ibm.com> Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
Diffstat (limited to 'arch/s390/include/asm/mmu_context.h')
-rw-r--r--arch/s390/include/asm/mmu_context.h25
1 files changed, 4 insertions, 21 deletions
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index cec19ae164eb..51def960a3dd 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -71,16 +71,6 @@ static inline int init_new_context(struct task_struct *tsk,
#define destroy_context(mm) do { } while (0)
-static inline void set_user_asce(struct mm_struct *mm)
-{
- S390_lowcore.user_asce = mm->context.asce;
- __ctl_load(S390_lowcore.user_asce, 1, 1);
- clear_cpu_flag(CIF_ASCE_PRIMARY);
-}
-
-mm_segment_t enable_sacf_uaccess(void);
-void disable_sacf_uaccess(mm_segment_t old_fs);
-
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
@@ -88,15 +78,8 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
S390_lowcore.user_asce = next->context.asce;
cpumask_set_cpu(cpu, &next->context.cpu_attach_mask);
- /* Clear previous user-ASCE from CR1 and CR7 */
- if (!test_cpu_flag(CIF_ASCE_PRIMARY)) {
- __ctl_load(S390_lowcore.kernel_asce, 1, 1);
- set_cpu_flag(CIF_ASCE_PRIMARY);
- }
- if (test_cpu_flag(CIF_ASCE_SECONDARY)) {
- __ctl_load(S390_lowcore.vdso_asce, 7, 7);
- clear_cpu_flag(CIF_ASCE_SECONDARY);
- }
+ /* Clear previous user-ASCE from CR7 */
+ __ctl_load(S390_lowcore.kernel_asce, 7, 7);
if (prev != next)
cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask);
}
@@ -115,7 +98,7 @@ static inline void finish_arch_post_lock_switch(void)
__tlb_flush_mm_lazy(mm);
preempt_enable();
}
- set_fs(current->thread.mm_segment);
+ __ctl_load(S390_lowcore.user_asce, 7, 7);
}
#define enter_lazy_tlb(mm,tsk) do { } while (0)
@@ -126,7 +109,7 @@ static inline void activate_mm(struct mm_struct *prev,
{
switch_mm(prev, next, current);
cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
- set_user_asce(next);
+ __ctl_load(S390_lowcore.user_asce, 7, 7);
}
#endif /* __S390_MMU_CONTEXT_H */