aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/mmu_notifier.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/mmu_notifier.h')
-rw-r--r--include/linux/mmu_notifier.h63
1 files changed, 59 insertions, 4 deletions
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index 4050ec1c3b45..b6c004bd9f6a 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -10,6 +10,36 @@
struct mmu_notifier;
struct mmu_notifier_ops;
+/**
+ * enum mmu_notifier_event - reason for the mmu notifier callback
+ * @MMU_NOTIFY_UNMAP: either munmap() that unmap the range or a mremap() that
+ * move the range
+ *
+ * @MMU_NOTIFY_CLEAR: clear page table entry (many reasons for this like
+ * madvise() or replacing a page by another one, ...).
+ *
+ * @MMU_NOTIFY_PROTECTION_VMA: update is due to protection change for the range
+ * ie using the vma access permission (vm_page_prot) to update the whole range
+ * is enough no need to inspect changes to the CPU page table (mprotect()
+ * syscall)
+ *
+ * @MMU_NOTIFY_PROTECTION_PAGE: update is due to change in read/write flag for
+ * pages in the range so to mirror those changes the user must inspect the CPU
+ * page table (from the end callback).
+ *
+ * @MMU_NOTIFY_SOFT_DIRTY: soft dirty accounting (still same page and same
+ * access flags). User should soft dirty the page in the end callback to make
+ * sure that anyone relying on soft dirtyness catch pages that might be written
+ * through non CPU mappings.
+ */
+enum mmu_notifier_event {
+ MMU_NOTIFY_UNMAP = 0,
+ MMU_NOTIFY_CLEAR,
+ MMU_NOTIFY_PROTECTION_VMA,
+ MMU_NOTIFY_PROTECTION_PAGE,
+ MMU_NOTIFY_SOFT_DIRTY,
+};
+
#ifdef CONFIG_MMU_NOTIFIER
/*
@@ -25,11 +55,15 @@ struct mmu_notifier_mm {
spinlock_t lock;
};
+#define MMU_NOTIFIER_RANGE_BLOCKABLE (1 << 0)
+
struct mmu_notifier_range {
+ struct vm_area_struct *vma;
struct mm_struct *mm;
unsigned long start;
unsigned long end;
- bool blockable;
+ unsigned flags;
+ enum mmu_notifier_event event;
};
struct mmu_notifier_ops {
@@ -225,6 +259,14 @@ extern void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *r,
bool only_end);
extern void __mmu_notifier_invalidate_range(struct mm_struct *mm,
unsigned long start, unsigned long end);
+extern bool
+mmu_notifier_range_update_to_read_only(const struct mmu_notifier_range *range);
+
+static inline bool
+mmu_notifier_range_blockable(const struct mmu_notifier_range *range)
+{
+ return (range->flags & MMU_NOTIFIER_RANGE_BLOCKABLE);
+}
static inline void mmu_notifier_release(struct mm_struct *mm)
{
@@ -269,7 +311,7 @@ static inline void
mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
{
if (mm_has_notifiers(range->mm)) {
- range->blockable = true;
+ range->flags |= MMU_NOTIFIER_RANGE_BLOCKABLE;
__mmu_notifier_invalidate_range_start(range);
}
}
@@ -278,7 +320,7 @@ static inline int
mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range)
{
if (mm_has_notifiers(range->mm)) {
- range->blockable = false;
+ range->flags &= ~MMU_NOTIFIER_RANGE_BLOCKABLE;
return __mmu_notifier_invalidate_range_start(range);
}
return 0;
@@ -318,13 +360,19 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
static inline void mmu_notifier_range_init(struct mmu_notifier_range *range,
+ enum mmu_notifier_event event,
+ unsigned flags,
+ struct vm_area_struct *vma,
struct mm_struct *mm,
unsigned long start,
unsigned long end)
{
+ range->vma = vma;
+ range->event = event;
range->mm = mm;
range->start = start;
range->end = end;
+ range->flags = flags;
}
#define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
@@ -452,9 +500,14 @@ static inline void _mmu_notifier_range_init(struct mmu_notifier_range *range,
range->end = end;
}
-#define mmu_notifier_range_init(range, mm, start, end) \
+#define mmu_notifier_range_init(range,event,flags,vma,mm,start,end) \
_mmu_notifier_range_init(range, start, end)
+static inline bool
+mmu_notifier_range_blockable(const struct mmu_notifier_range *range)
+{
+ return true;
+}
static inline int mm_has_notifiers(struct mm_struct *mm)
{
@@ -517,6 +570,8 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
{
}
+#define mmu_notifier_range_update_to_read_only(r) false
+
#define ptep_clear_flush_young_notify ptep_clear_flush_young
#define pmdp_clear_flush_young_notify pmdp_clear_flush_young
#define ptep_clear_young_notify ptep_test_and_clear_young