aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorAndy Lutomirski <luto@kernel.org>2017-10-14 09:59:50 -0700
committerIngo Molnar <mingo@kernel.org>2017-10-18 15:25:02 +0200
commit4e57b94664fef55aa71cac33b4632fdfdd52b695 (patch)
treee1d26524e19b53b33f6dc38942b0f0e9ea645256 /arch/x86
parentx86/mm/64: Remove the last VM_BUG_ON() from the TLB code (diff)
downloadlinux-dev-4e57b94664fef55aa71cac33b4632fdfdd52b695.tar.xz
linux-dev-4e57b94664fef55aa71cac33b4632fdfdd52b695.zip
x86/mm: Tidy up "x86/mm: Flush more aggressively in lazy TLB mode"
Due to timezones, commit: b956575bed91 ("x86/mm: Flush more aggressively in lazy TLB mode") was an outdated patch that well tested and fixed the bug but didn't address Borislav's review comments. Tidy it up: - The name "tlb_use_lazy_mode()" was highly confusing. Change it to "tlb_defer_switch_to_init_mm()", which describes what it actually means. - Move the static_branch crap into a helper. - Improve comments. Actually removing the debugfs option is in the next patch. Reported-by: Borislav Petkov <bp@alien8.de> Signed-off-by: Andy Lutomirski <luto@kernel.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Fixes: b956575bed91 ("x86/mm: Flush more aggressively in lazy TLB mode") Link: http://lkml.kernel.org/r/154ef95428d4592596b6e98b0af1d2747d6cfbf8.1508000261.git.luto@kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/include/asm/tlbflush.h7
-rw-r--r--arch/x86/mm/tlb.c30
2 files changed, 24 insertions, 13 deletions
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index d362161d3291..0d4a1bb7e303 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -87,7 +87,12 @@ static inline u64 inc_mm_tlb_gen(struct mm_struct *mm)
* to init_mm when we switch to a kernel thread (e.g. the idle thread). If
* it's false, then we immediately switch CR3 when entering a kernel thread.
*/
-DECLARE_STATIC_KEY_TRUE(tlb_use_lazy_mode);
+DECLARE_STATIC_KEY_TRUE(__tlb_defer_switch_to_init_mm);
+
+static inline bool tlb_defer_switch_to_init_mm(void)
+{
+ return static_branch_unlikely(&__tlb_defer_switch_to_init_mm);
+}
/*
* 6 because 6 should be plenty and struct tlb_state will fit in
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 7db23f9f804e..5ee3b59baa85 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -30,7 +30,7 @@
atomic64_t last_mm_ctx_id = ATOMIC64_INIT(1);
-DEFINE_STATIC_KEY_TRUE(tlb_use_lazy_mode);
+DEFINE_STATIC_KEY_TRUE(__tlb_defer_switch_to_init_mm);
static void choose_new_asid(struct mm_struct *next, u64 next_tlb_gen,
u16 *new_asid, bool *need_flush)
@@ -213,6 +213,9 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
}
/*
+ * Please ignore the name of this function. It should be called
+ * switch_to_kernel_thread().
+ *
* enter_lazy_tlb() is a hint from the scheduler that we are entering a
* kernel thread or other context without an mm. Acceptable implementations
* include doing nothing whatsoever, switching to init_mm, or various clever
@@ -227,7 +230,7 @@ void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
if (this_cpu_read(cpu_tlbstate.loaded_mm) == &init_mm)
return;
- if (static_branch_unlikely(&tlb_use_lazy_mode)) {
+ if (tlb_defer_switch_to_init_mm()) {
/*
* There's a significant optimization that may be possible
* here. We have accurate enough TLB flush tracking that we
@@ -632,7 +635,8 @@ static ssize_t tlblazy_read_file(struct file *file, char __user *user_buf,
{
char buf[2];
- buf[0] = static_branch_likely(&tlb_use_lazy_mode) ? '1' : '0';
+ buf[0] = static_branch_likely(&__tlb_defer_switch_to_init_mm)
+ ? '1' : '0';
buf[1] = '\n';
return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
@@ -647,9 +651,9 @@ static ssize_t tlblazy_write_file(struct file *file,
return -EINVAL;
if (val)
- static_branch_enable(&tlb_use_lazy_mode);
+ static_branch_enable(&__tlb_defer_switch_to_init_mm);
else
- static_branch_disable(&tlb_use_lazy_mode);
+ static_branch_disable(&__tlb_defer_switch_to_init_mm);
return count;
}
@@ -660,23 +664,25 @@ static const struct file_operations fops_tlblazy = {
.llseek = default_llseek,
};
-static int __init init_tlb_use_lazy_mode(void)
+static int __init init_tlblazy(void)
{
if (boot_cpu_has(X86_FEATURE_PCID)) {
/*
- * Heuristic: with PCID on, switching to and from
- * init_mm is reasonably fast, but remote flush IPIs
- * as expensive as ever, so turn off lazy TLB mode.
+ * If we have PCID, then switching to init_mm is reasonably
+ * fast. If we don't have PCID, then switching to init_mm is
+ * quite slow, so we default to trying to defer it in the
+ * hopes that we can avoid it entirely. The latter approach
+ * runs the risk of receiving otherwise unnecessary IPIs.
*
* We can't do this in setup_pcid() because static keys
* haven't been initialized yet, and it would blow up
* badly.
*/
- static_branch_disable(&tlb_use_lazy_mode);
+ static_branch_disable(&__tlb_defer_switch_to_init_mm);
}
- debugfs_create_file("tlb_use_lazy_mode", S_IRUSR | S_IWUSR,
+ debugfs_create_file("tlb_defer_switch_to_init_mm", S_IRUSR | S_IWUSR,
arch_debugfs_dir, NULL, &fops_tlblazy);
return 0;
}
-late_initcall(init_tlb_use_lazy_mode);
+late_initcall(init_tlblazy);