diff options
Diffstat (limited to 'include/linux/ksm.h')
-rw-r--r-- | include/linux/ksm.h | 94 |
1 files changed, 79 insertions, 15 deletions
diff --git a/include/linux/ksm.h b/include/linux/ksm.h index 161e8164abcf..d73095b5cd96 100644 --- a/include/linux/ksm.h +++ b/include/linux/ksm.h @@ -13,21 +13,58 @@ #include <linux/pagemap.h> #include <linux/rmap.h> #include <linux/sched.h> -#include <linux/sched/coredump.h> - -struct stable_node; -struct mem_cgroup; #ifdef CONFIG_KSM int ksm_madvise(struct vm_area_struct *vma, unsigned long start, unsigned long end, int advice, unsigned long *vm_flags); + +void ksm_add_vma(struct vm_area_struct *vma); +int ksm_enable_merge_any(struct mm_struct *mm); +int ksm_disable_merge_any(struct mm_struct *mm); +int ksm_disable(struct mm_struct *mm); + int __ksm_enter(struct mm_struct *mm); void __ksm_exit(struct mm_struct *mm); +/* + * To identify zeropages that were mapped by KSM, we reuse the dirty bit + * in the PTE. If the PTE is dirty, the zeropage was mapped by KSM when + * deduplicating memory. + */ +#define is_ksm_zero_pte(pte) (is_zero_pfn(pte_pfn(pte)) && pte_dirty(pte)) + +extern atomic_long_t ksm_zero_pages; + +static inline void ksm_map_zero_page(struct mm_struct *mm) +{ + atomic_long_inc(&ksm_zero_pages); + atomic_long_inc(&mm->ksm_zero_pages); +} -static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) +static inline void ksm_might_unmap_zero_page(struct mm_struct *mm, pte_t pte) { + if (is_ksm_zero_pte(pte)) { + atomic_long_dec(&ksm_zero_pages); + atomic_long_dec(&mm->ksm_zero_pages); + } +} + +static inline long mm_ksm_zero_pages(struct mm_struct *mm) +{ + return atomic_long_read(&mm->ksm_zero_pages); +} + +static inline void ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) +{ + /* Adding mm to ksm is best effort on fork. */ if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags)) + __ksm_enter(mm); +} + +static inline int ksm_execve(struct mm_struct *mm) +{ + if (test_bit(MMF_VM_MERGE_ANY, &mm->flags)) return __ksm_enter(mm); + return 0; } @@ -48,15 +85,32 @@ static inline void ksm_exit(struct mm_struct *mm) * We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE, * but what if the vma was unmerged while the page was swapped out? */ -struct page *ksm_might_need_to_copy(struct page *page, - struct vm_area_struct *vma, unsigned long address); +struct folio *ksm_might_need_to_copy(struct folio *folio, + struct vm_area_struct *vma, unsigned long addr); -void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc); -void ksm_migrate_page(struct page *newpage, struct page *oldpage); +void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc); +void folio_migrate_ksm(struct folio *newfolio, struct folio *folio); +void collect_procs_ksm(const struct folio *folio, const struct page *page, + struct list_head *to_kill, int force_early); +long ksm_process_profit(struct mm_struct *); +bool ksm_process_mergeable(struct mm_struct *mm); #else /* !CONFIG_KSM */ -static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) +static inline void ksm_add_vma(struct vm_area_struct *vma) +{ +} + +static inline int ksm_disable(struct mm_struct *mm) +{ + return 0; +} + +static inline void ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) +{ +} + +static inline int ksm_execve(struct mm_struct *mm) { return 0; } @@ -65,6 +119,16 @@ static inline void ksm_exit(struct mm_struct *mm) { } +static inline void ksm_might_unmap_zero_page(struct mm_struct *mm, pte_t pte) +{ +} + +static inline void collect_procs_ksm(const struct folio *folio, + const struct page *page, struct list_head *to_kill, + int force_early) +{ +} + #ifdef CONFIG_MMU static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start, unsigned long end, int advice, unsigned long *vm_flags) @@ -72,18 +136,18 @@ static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start, return 0; } -static inline struct page *ksm_might_need_to_copy(struct page *page, - struct vm_area_struct *vma, unsigned long address) +static inline struct folio *ksm_might_need_to_copy(struct folio *folio, + struct vm_area_struct *vma, unsigned long addr) { - return page; + return folio; } -static inline void rmap_walk_ksm(struct page *page, +static inline void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc) { } -static inline void ksm_migrate_page(struct page *newpage, struct page *oldpage) +static inline void folio_migrate_ksm(struct folio *newfolio, struct folio *old) { } #endif /* CONFIG_MMU */ |