aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/hmm.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/hmm.h')
-rw-r--r--include/linux/hmm.h179
1 files changed, 20 insertions, 159 deletions
diff --git a/include/linux/hmm.h b/include/linux/hmm.h
index b8a08b2a10ca..3fec513b9c00 100644
--- a/include/linux/hmm.h
+++ b/include/linux/hmm.h
@@ -84,15 +84,12 @@
* @notifiers: count of active mmu notifiers
*/
struct hmm {
- struct mm_struct *mm;
- struct kref kref;
+ struct mmu_notifier mmu_notifier;
spinlock_t ranges_lock;
struct list_head ranges;
struct list_head mirrors;
- struct mmu_notifier mmu_notifier;
struct rw_semaphore mirrors_sem;
wait_queue_head_t wq;
- struct rcu_head rcu;
long notifiers;
};
@@ -158,13 +155,11 @@ enum hmm_pfn_value_e {
* @values: pfn value for some special case (none, special, error, ...)
* @default_flags: default flags for the range (write, read, ... see hmm doc)
* @pfn_flags_mask: allows to mask pfn flags so that only default_flags matter
- * @page_shift: device virtual address shift value (should be >= PAGE_SHIFT)
* @pfn_shifts: pfn shift value (should be <= PAGE_SHIFT)
* @valid: pfns array did not change since it has been fill by an HMM function
*/
struct hmm_range {
struct hmm *hmm;
- struct vm_area_struct *vma;
struct list_head list;
unsigned long start;
unsigned long end;
@@ -173,32 +168,11 @@ struct hmm_range {
const uint64_t *values;
uint64_t default_flags;
uint64_t pfn_flags_mask;
- uint8_t page_shift;
uint8_t pfn_shift;
bool valid;
};
/*
- * hmm_range_page_shift() - return the page shift for the range
- * @range: range being queried
- * Return: page shift (page size = 1 << page shift) for the range
- */
-static inline unsigned hmm_range_page_shift(const struct hmm_range *range)
-{
- return range->page_shift;
-}
-
-/*
- * hmm_range_page_size() - return the page size for the range
- * @range: range being queried
- * Return: page size for the range in bytes
- */
-static inline unsigned long hmm_range_page_size(const struct hmm_range *range)
-{
- return 1UL << hmm_range_page_shift(range);
-}
-
-/*
* hmm_range_wait_until_valid() - wait for range to be valid
* @range: range affected by invalidation to wait on
* @timeout: time out for wait in ms (ie abort wait after that period of time)
@@ -291,40 +265,6 @@ static inline uint64_t hmm_device_entry_from_pfn(const struct hmm_range *range,
}
/*
- * Old API:
- * hmm_pfn_to_page()
- * hmm_pfn_to_pfn()
- * hmm_pfn_from_page()
- * hmm_pfn_from_pfn()
- *
- * This are the OLD API please use new API, it is here to avoid cross-tree
- * merge painfullness ie we convert things to new API in stages.
- */
-static inline struct page *hmm_pfn_to_page(const struct hmm_range *range,
- uint64_t pfn)
-{
- return hmm_device_entry_to_page(range, pfn);
-}
-
-static inline unsigned long hmm_pfn_to_pfn(const struct hmm_range *range,
- uint64_t pfn)
-{
- return hmm_device_entry_to_pfn(range, pfn);
-}
-
-static inline uint64_t hmm_pfn_from_page(const struct hmm_range *range,
- struct page *page)
-{
- return hmm_device_entry_from_page(range, page);
-}
-
-static inline uint64_t hmm_pfn_from_pfn(const struct hmm_range *range,
- unsigned long pfn)
-{
- return hmm_device_entry_from_pfn(range, pfn);
-}
-
-/*
* Mirroring: how to synchronize device page table with CPU page table.
*
* A device driver that is participating in HMM mirroring must always
@@ -375,29 +315,6 @@ static inline uint64_t hmm_pfn_from_pfn(const struct hmm_range *range,
struct hmm_mirror;
/*
- * enum hmm_update_event - type of update
- * @HMM_UPDATE_INVALIDATE: invalidate range (no indication as to why)
- */
-enum hmm_update_event {
- HMM_UPDATE_INVALIDATE,
-};
-
-/*
- * struct hmm_update - HMM update information for callback
- *
- * @start: virtual start address of the range to update
- * @end: virtual end address of the range to update
- * @event: event triggering the update (what is happening)
- * @blockable: can the callback block/sleep ?
- */
-struct hmm_update {
- unsigned long start;
- unsigned long end;
- enum hmm_update_event event;
- bool blockable;
-};
-
-/*
* struct hmm_mirror_ops - HMM mirror device operations callback
*
* @update: callback to update range on a device
@@ -417,9 +334,9 @@ struct hmm_mirror_ops {
/* sync_cpu_device_pagetables() - synchronize page tables
*
* @mirror: pointer to struct hmm_mirror
- * @update: update information (see struct hmm_update)
- * Return: -EAGAIN if update.blockable false and callback need to
- * block, 0 otherwise.
+ * @update: update information (see struct mmu_notifier_range)
+ * Return: -EAGAIN if mmu_notifier_range_blockable(update) is false
+ * and callback needs to block, 0 otherwise.
*
* This callback ultimately originates from mmu_notifiers when the CPU
* page table is updated. The device driver must update its page table
@@ -430,8 +347,9 @@ struct hmm_mirror_ops {
* page tables are completely updated (TLBs flushed, etc); this is a
* synchronous call.
*/
- int (*sync_cpu_device_pagetables)(struct hmm_mirror *mirror,
- const struct hmm_update *update);
+ int (*sync_cpu_device_pagetables)(
+ struct hmm_mirror *mirror,
+ const struct mmu_notifier_range *update);
};
/*
@@ -457,20 +375,24 @@ void hmm_mirror_unregister(struct hmm_mirror *mirror);
/*
* Please see Documentation/vm/hmm.rst for how to use the range API.
*/
-int hmm_range_register(struct hmm_range *range,
- struct hmm_mirror *mirror,
- unsigned long start,
- unsigned long end,
- unsigned page_shift);
+int hmm_range_register(struct hmm_range *range, struct hmm_mirror *mirror);
void hmm_range_unregister(struct hmm_range *range);
-long hmm_range_snapshot(struct hmm_range *range);
-long hmm_range_fault(struct hmm_range *range, bool block);
+
+/*
+ * Retry fault if non-blocking, drop mmap_sem and return -EAGAIN in that case.
+ */
+#define HMM_FAULT_ALLOW_RETRY (1 << 0)
+
+/* Don't fault in missing PTEs, just snapshot the current state. */
+#define HMM_FAULT_SNAPSHOT (1 << 1)
+
+long hmm_range_fault(struct hmm_range *range, unsigned int flags);
+
long hmm_range_dma_map(struct hmm_range *range,
struct device *device,
dma_addr_t *daddrs,
- bool block);
+ unsigned int flags);
long hmm_range_dma_unmap(struct hmm_range *range,
- struct vm_area_struct *vma,
struct device *device,
dma_addr_t *daddrs,
bool dirty);
@@ -484,67 +406,6 @@ long hmm_range_dma_unmap(struct hmm_range *range,
*/
#define HMM_RANGE_DEFAULT_TIMEOUT 1000
-/* This is a temporary helper to avoid merge conflict between trees. */
-static inline bool hmm_vma_range_done(struct hmm_range *range)
-{
- bool ret = hmm_range_valid(range);
-
- hmm_range_unregister(range);
- return ret;
-}
-
-/* This is a temporary helper to avoid merge conflict between trees. */
-static inline int hmm_vma_fault(struct hmm_mirror *mirror,
- struct hmm_range *range, bool block)
-{
- long ret;
-
- /*
- * With the old API the driver must set each individual entries with
- * the requested flags (valid, write, ...). So here we set the mask to
- * keep intact the entries provided by the driver and zero out the
- * default_flags.
- */
- range->default_flags = 0;
- range->pfn_flags_mask = -1UL;
-
- ret = hmm_range_register(range, mirror,
- range->start, range->end,
- PAGE_SHIFT);
- if (ret)
- return (int)ret;
-
- if (!hmm_range_wait_until_valid(range, HMM_RANGE_DEFAULT_TIMEOUT)) {
- /*
- * The mmap_sem was taken by driver we release it here and
- * returns -EAGAIN which correspond to mmap_sem have been
- * drop in the old API.
- */
- up_read(&range->vma->vm_mm->mmap_sem);
- return -EAGAIN;
- }
-
- ret = hmm_range_fault(range, block);
- if (ret <= 0) {
- if (ret == -EBUSY || !ret) {
- /* Same as above, drop mmap_sem to match old API. */
- up_read(&range->vma->vm_mm->mmap_sem);
- ret = -EBUSY;
- } else if (ret == -EAGAIN)
- ret = -EBUSY;
- hmm_range_unregister(range);
- return ret;
- }
- return 0;
-}
-
-/* Below are for HMM internal use only! Not to be used by device driver! */
-static inline void hmm_mm_init(struct mm_struct *mm)
-{
- mm->hmm = NULL;
-}
-#else /* IS_ENABLED(CONFIG_HMM_MIRROR) */
-static inline void hmm_mm_init(struct mm_struct *mm) {}
#endif /* IS_ENABLED(CONFIG_HMM_MIRROR) */
#endif /* LINUX_HMM_H */