aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/include/asm/mmu.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/include/asm/mmu.h')
-rw-r--r--arch/arm64/include/asm/mmu.h79
1 files changed, 32 insertions, 47 deletions
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
index d79ce6df9e12..48f8466a4be9 100644
--- a/arch/arm64/include/asm/mmu.h
+++ b/arch/arm64/include/asm/mmu.h
@@ -12,69 +12,54 @@
#define USER_ASID_FLAG (UL(1) << USER_ASID_BIT)
#define TTBR_ASID_MASK (UL(0xffff) << 48)
-#define BP_HARDEN_EL2_SLOTS 4
-
#ifndef __ASSEMBLY__
+#include <linux/refcount.h>
+#include <asm/cpufeature.h>
+
typedef struct {
atomic64_t id;
+#ifdef CONFIG_COMPAT
+ void *sigpage;
+#endif
+ refcount_t pinned;
void *vdso;
unsigned long flags;
} mm_context_t;
/*
- * This macro is only used by the TLBI code, which cannot race with an
- * ASID change and therefore doesn't need to reload the counter using
- * atomic64_read.
+ * We use atomic64_read() here because the ASID for an 'mm_struct' can
+ * be reallocated when scheduling one of its threads following a
+ * rollover event (see new_context() and flush_context()). In this case,
+ * a concurrent TLBI (e.g. via try_to_unmap_one() and ptep_clear_flush())
+ * may use a stale ASID. This is fine in principle as the new ASID is
+ * guaranteed to be clean in the TLB, but the TLBI routines have to take
+ * care to handle the following race:
+ *
+ * CPU 0 CPU 1 CPU 2
+ *
+ * // ptep_clear_flush(mm)
+ * xchg_relaxed(pte, 0)
+ * DSB ISHST
+ * old = ASID(mm)
+ * | <rollover>
+ * | new = new_context(mm)
+ * \-----------------> atomic_set(mm->context.id, new)
+ * cpu_switch_mm(mm)
+ * // Hardware walk of pte using new ASID
+ * TLBI(old)
+ *
+ * In this scenario, the barrier on CPU 0 and the dependency on CPU 1
+ * ensure that the page-table walker on CPU 1 *must* see the invalid PTE
+ * written by CPU 0.
*/
-#define ASID(mm) ((mm)->context.id.counter & 0xffff)
+#define ASID(mm) (atomic64_read(&(mm)->context.id) & 0xffff)
static inline bool arm64_kernel_unmapped_at_el0(void)
{
return cpus_have_const_cap(ARM64_UNMAP_KERNEL_AT_EL0);
}
-typedef void (*bp_hardening_cb_t)(void);
-
-struct bp_hardening_data {
- int hyp_vectors_slot;
- bp_hardening_cb_t fn;
-};
-
-#if (defined(CONFIG_HARDEN_BRANCH_PREDICTOR) || \
- defined(CONFIG_HARDEN_EL2_VECTORS))
-extern char __bp_harden_hyp_vecs_start[], __bp_harden_hyp_vecs_end[];
-extern atomic_t arm64_el2_vector_last_slot;
-#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR || CONFIG_HARDEN_EL2_VECTORS */
-
-#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
-DECLARE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
-
-static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void)
-{
- return this_cpu_ptr(&bp_hardening_data);
-}
-
-static inline void arm64_apply_bp_hardening(void)
-{
- struct bp_hardening_data *d;
-
- if (!cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR))
- return;
-
- d = arm64_get_bp_hardening_data();
- if (d->fn)
- d->fn();
-}
-#else
-static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void)
-{
- return NULL;
-}
-
-static inline void arm64_apply_bp_hardening(void) { }
-#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */
-
extern void arm64_memblock_init(void);
extern void paging_init(void);
extern void bootmem_init(void);