diff options
author | 2023-11-06 15:42:08 -0800 | |
---|---|---|
committer | 2023-11-06 15:42:08 -0800 | |
commit | cdd5b5a9761fd66d17586e4f4ba6588c70e640ea (patch) | |
tree | aba8409818be01f6af8683bf76594c790942d0bc /include/linux/mm_types.h | |
parent | Input: powermate - fix use-after-free in powermate_config_complete (diff) | |
parent | Input: walkera0701 - use module_parport_driver macro to simplify the code (diff) | |
download | wireguard-linux-cdd5b5a9761fd66d17586e4f4ba6588c70e640ea.tar.xz wireguard-linux-cdd5b5a9761fd66d17586e4f4ba6588c70e640ea.zip |
Merge branch 'next' into for-linus
Prepare input updates for 6.7 merge window.
Diffstat (limited to 'include/linux/mm_types.h')
-rw-r--r-- | include/linux/mm_types.h | 60 |
1 files changed, 52 insertions, 8 deletions
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 306a3d1a0fa6..7d30dc4ff0ff 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -514,6 +514,20 @@ struct vm_area_struct { }; #ifdef CONFIG_PER_VMA_LOCK + /* + * Can only be written (using WRITE_ONCE()) while holding both: + * - mmap_lock (in write mode) + * - vm_lock->lock (in write mode) + * Can be read reliably while holding one of: + * - mmap_lock (in read or write mode) + * - vm_lock->lock (in read or write mode) + * Can be read unreliably (using READ_ONCE()) for pessimistic bailout + * while holding nothing (except RCU to keep the VMA struct allocated). + * + * This sequence counter is explicitly allowed to overflow; sequence + * counter reuse can only lead to occasional unnecessary use of the + * slowpath. + */ int vm_lock_seq; struct vma_lock *vm_lock; @@ -583,6 +597,21 @@ struct mm_cid { struct kioctx_table; struct mm_struct { struct { + /* + * Fields which are often written to are placed in a separate + * cache line. + */ + struct { + /** + * @mm_count: The number of references to &struct + * mm_struct (@mm_users count as 1). + * + * Use mmgrab()/mmdrop() to modify. When this drops to + * 0, the &struct mm_struct is freed. + */ + atomic_t mm_count; + } ____cacheline_aligned_in_smp; + struct maple_tree mm_mt; #ifdef CONFIG_MMU unsigned long (*get_unmapped_area) (struct file *filp, @@ -620,14 +649,6 @@ struct mm_struct { */ atomic_t mm_users; - /** - * @mm_count: The number of references to &struct mm_struct - * (@mm_users count as 1). - * - * Use mmgrab()/mmdrop() to modify. When this drops to 0, the - * &struct mm_struct is freed. - */ - atomic_t mm_count; #ifdef CONFIG_SCHED_MM_CID /** * @pcpu_cid: Per-cpu current cid. @@ -672,6 +693,20 @@ struct mm_struct { * by mmlist_lock */ #ifdef CONFIG_PER_VMA_LOCK + /* + * This field has lock-like semantics, meaning it is sometimes + * accessed with ACQUIRE/RELEASE semantics. + * Roughly speaking, incrementing the sequence number is + * equivalent to releasing locks on VMAs; reading the sequence + * number can be part of taking a read lock on a VMA. + * + * Can be modified under write mmap_lock using RELEASE + * semantics. + * Can be read with no other protection when holding write + * mmap_lock. + * Can be read with ACQUIRE semantics if not holding write + * mmap_lock. + */ int mm_lock_seq; #endif @@ -1251,6 +1286,15 @@ enum { FOLL_PCI_P2PDMA = 1 << 10, /* allow interrupts from generic signals */ FOLL_INTERRUPTIBLE = 1 << 11, + /* + * Always honor (trigger) NUMA hinting faults. + * + * FOLL_WRITE implicitly honors NUMA hinting faults because a + * PROT_NONE-mapped page is not writable (exceptions with FOLL_FORCE + * apply). get_user_pages_fast_only() always implicitly honors NUMA + * hinting faults. + */ + FOLL_HONOR_NUMA_FAULT = 1 << 12, /* See also internal only FOLL flags in mm/internal.h */ }; |