aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/mm_types.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/mm_types.h')
-rw-r--r--include/linux/mm_types.h64
1 files changed, 38 insertions, 26 deletions
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 7f384bb62d8e..3cadee0a3508 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -487,14 +487,12 @@ struct mm_struct {
/* numa_scan_seq prevents two threads setting pte_numa */
int numa_scan_seq;
#endif
-#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
/*
* An operation with batched TLB flushing is going on. Anything that
* can move process memory needs to flush the TLB when moving a
* PROT_NONE or PROT_NUMA mapped page.
*/
- bool tlb_flush_pending;
-#endif
+ atomic_t tlb_flush_pending;
#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
/* See flush_tlb_batched_pending() */
bool tlb_flush_batched;
@@ -522,46 +520,60 @@ static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
return mm->cpu_vm_mask_var;
}
-#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
+struct mmu_gather;
+extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
+ unsigned long start, unsigned long end);
+extern void tlb_finish_mmu(struct mmu_gather *tlb,
+ unsigned long start, unsigned long end);
+
/*
* Memory barriers to keep this state in sync are graciously provided by
* the page table locks, outside of which no page table modifications happen.
- * The barriers below prevent the compiler from re-ordering the instructions
- * around the memory barriers that are already present in the code.
+ * The barriers are used to ensure the order between tlb_flush_pending updates,
+ * which happen while the lock is not taken, and the PTE updates, which happen
+ * while the lock is taken, are serialized.
*/
static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
{
- barrier();
- return mm->tlb_flush_pending;
+ return atomic_read(&mm->tlb_flush_pending) > 0;
+}
+
+/*
+ * Returns true if there are two above TLB batching threads in parallel.
+ */
+static inline bool mm_tlb_flush_nested(struct mm_struct *mm)
+{
+ return atomic_read(&mm->tlb_flush_pending) > 1;
+}
+
+static inline void init_tlb_flush_pending(struct mm_struct *mm)
+{
+ atomic_set(&mm->tlb_flush_pending, 0);
}
-static inline void set_tlb_flush_pending(struct mm_struct *mm)
+
+static inline void inc_tlb_flush_pending(struct mm_struct *mm)
{
- mm->tlb_flush_pending = true;
+ atomic_inc(&mm->tlb_flush_pending);
/*
- * Guarantee that the tlb_flush_pending store does not leak into the
+ * Guarantee that the tlb_flush_pending increase does not leak into the
* critical section updating the page tables
*/
smp_mb__before_spinlock();
}
+
/* Clearing is done after a TLB flush, which also provides a barrier. */
-static inline void clear_tlb_flush_pending(struct mm_struct *mm)
-{
- barrier();
- mm->tlb_flush_pending = false;
-}
-#else
-static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
-{
- return false;
-}
-static inline void set_tlb_flush_pending(struct mm_struct *mm)
-{
-}
-static inline void clear_tlb_flush_pending(struct mm_struct *mm)
+static inline void dec_tlb_flush_pending(struct mm_struct *mm)
{
+ /*
+ * Guarantee that the tlb_flush_pending does not not leak into the
+ * critical section, since we must order the PTE change and changes to
+ * the pending TLB flush indication. We could have relied on TLB flush
+ * as a memory barrier, but this behavior is not clearly documented.
+ */
+ smp_mb__before_atomic();
+ atomic_dec(&mm->tlb_flush_pending);
}
-#endif
struct vm_fault;