aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorJason Gunthorpe <jgg@mellanox.com>2019-07-02 15:07:52 -0300
committerJason Gunthorpe <jgg@mellanox.com>2019-07-02 15:10:45 -0300
commitcc5dfd59e375f4d0f2b64643723d16b38b2f2d78 (patch)
tree0a8f526169ee889d6af4e7679122c946773ec33a /include
parentMerge tag 'v5.2-rc7' into rdma.git hmm (diff)
parentmm: don't select MIGRATE_VMA_HELPER from HMM_MIRROR (diff)
downloadlinux-dev-cc5dfd59e375f4d0f2b64643723d16b38b2f2d78.tar.xz
linux-dev-cc5dfd59e375f4d0f2b64643723d16b38b2f2d78.zip
Merge branch 'hmm-devmem-cleanup.4' into rdma.git hmm
Christoph Hellwig says: ==================== Below is a series that cleans up the dev_pagemap interface so that it is more easily usable, which removes the need to wrap it in hmm and thus allowing to kill a lot of code Changes since v3: - pull in "mm/swap: Fix release_pages() when releasing devmap pages" and rebase the other patches on top of that - fold the hmm_devmem_add_resource into the DEVICE_PUBLIC memory removal patch - remove _vm_normal_page as it isn't needed without DEVICE_PUBLIC memory - pick up various ACKs Changes since v2: - fix nvdimm kunit build - add a new memory type for device dax - fix a few issues in intermediate patches that didn't show up in the end result - incorporate feedback from Michal Hocko, including killing of the DEVICE_PUBLIC memory type entirely Changes since v1: - rebase - also switch p2pdma to the internal refcount - add type checking for pgmap->type - rename the migrate method to migrate_to_ram - cleanup the altmap_valid flag - various tidbits from the reviews ==================== Conflicts resolved by: - Keeping Ira's version of the code in swap.c - Using the delete for the section in hmm.rst - Using the delete for the devmap code in hmm.c and .h * branch 'hmm-devmem-cleanup.4': (24 commits) mm: don't select MIGRATE_VMA_HELPER from HMM_MIRROR mm: remove the HMM config option mm: sort out the DEVICE_PRIVATE Kconfig mess mm: simplify ZONE_DEVICE page private data mm: remove hmm_devmem_add mm: remove hmm_vma_alloc_locked_page nouveau: use devm_memremap_pages directly nouveau: use alloc_page_vma directly PCI/P2PDMA: use the dev_pagemap internal refcount device-dax: use the dev_pagemap internal refcount memremap: provide an optional internal refcount in struct dev_pagemap memremap: replace the altmap_valid field with a PGMAP_ALTMAP_VALID flag memremap: remove the data field in struct dev_pagemap memremap: add a migrate_to_ram method to struct dev_pagemap_ops memremap: lift the devmap_enable manipulation into devm_memremap_pages memremap: pass a struct dev_pagemap to ->kill and ->cleanup memremap: move dev_pagemap callbacks into a separate structure memremap: validate the pagemap type passed to devm_memremap_pages mm: factor out a devm_request_free_mem_region helper mm: export alloc_pages_vma ... Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
Diffstat (limited to 'include')
-rw-r--r--include/linux/hmm.h198
-rw-r--r--include/linux/ioport.h3
-rw-r--r--include/linux/memremap.h75
-rw-r--r--include/linux/mm.h28
-rw-r--r--include/linux/mm_types.h4
-rw-r--r--include/linux/swapops.h15
6 files changed, 56 insertions, 267 deletions
diff --git a/include/linux/hmm.h b/include/linux/hmm.h
index 0fa8ea34ccef..b8a08b2a10ca 100644
--- a/include/linux/hmm.h
+++ b/include/linux/hmm.h
@@ -62,7 +62,7 @@
#include <linux/kconfig.h>
#include <asm/pgtable.h>
-#if IS_ENABLED(CONFIG_HMM)
+#ifdef CONFIG_HMM_MIRROR
#include <linux/device.h>
#include <linux/migrate.h>
@@ -324,9 +324,6 @@ static inline uint64_t hmm_pfn_from_pfn(const struct hmm_range *range,
return hmm_device_entry_from_pfn(range, pfn);
}
-
-
-#if IS_ENABLED(CONFIG_HMM_MIRROR)
/*
* Mirroring: how to synchronize device page table with CPU page table.
*
@@ -550,197 +547,4 @@ static inline void hmm_mm_init(struct mm_struct *mm)
static inline void hmm_mm_init(struct mm_struct *mm) {}
#endif /* IS_ENABLED(CONFIG_HMM_MIRROR) */
-#if IS_ENABLED(CONFIG_DEVICE_PRIVATE) || IS_ENABLED(CONFIG_DEVICE_PUBLIC)
-struct hmm_devmem;
-
-struct page *hmm_vma_alloc_locked_page(struct vm_area_struct *vma,
- unsigned long addr);
-
-/*
- * struct hmm_devmem_ops - callback for ZONE_DEVICE memory events
- *
- * @free: call when refcount on page reach 1 and thus is no longer use
- * @fault: call when there is a page fault to unaddressable memory
- *
- * Both callback happens from page_free() and page_fault() callback of struct
- * dev_pagemap respectively. See include/linux/memremap.h for more details on
- * those.
- *
- * The hmm_devmem_ops callback are just here to provide a coherent and
- * uniq API to device driver and device driver should not register their
- * own page_free() or page_fault() but rely on the hmm_devmem_ops call-
- * back.
- */
-struct hmm_devmem_ops {
- /*
- * free() - free a device page
- * @devmem: device memory structure (see struct hmm_devmem)
- * @page: pointer to struct page being freed
- *
- * Call back occurs whenever a device page refcount reach 1 which
- * means that no one is holding any reference on the page anymore
- * (ZONE_DEVICE page have an elevated refcount of 1 as default so
- * that they are not release to the general page allocator).
- *
- * Note that callback has exclusive ownership of the page (as no
- * one is holding any reference).
- */
- void (*free)(struct hmm_devmem *devmem, struct page *page);
- /*
- * fault() - CPU page fault or get user page (GUP)
- * @devmem: device memory structure (see struct hmm_devmem)
- * @vma: virtual memory area containing the virtual address
- * @addr: virtual address that faulted or for which there is a GUP
- * @page: pointer to struct page backing virtual address (unreliable)
- * @flags: FAULT_FLAG_* (see include/linux/mm.h)
- * @pmdp: page middle directory
- * Return: VM_FAULT_MINOR/MAJOR on success or one of VM_FAULT_ERROR
- * on error
- *
- * The callback occurs whenever there is a CPU page fault or GUP on a
- * virtual address. This means that the device driver must migrate the
- * page back to regular memory (CPU accessible).
- *
- * The device driver is free to migrate more than one page from the
- * fault() callback as an optimization. However if the device decides
- * to migrate more than one page it must always priotirize the faulting
- * address over the others.
- *
- * The struct page pointer is only given as a hint to allow quick
- * lookup of internal device driver data. A concurrent migration
- * might have already freed that page and the virtual address might
- * no longer be backed by it. So it should not be modified by the
- * callback.
- *
- * Note that mmap semaphore is held in read mode at least when this
- * callback occurs, hence the vma is valid upon callback entry.
- */
- vm_fault_t (*fault)(struct hmm_devmem *devmem,
- struct vm_area_struct *vma,
- unsigned long addr,
- const struct page *page,
- unsigned int flags,
- pmd_t *pmdp);
-};
-
-/*
- * struct hmm_devmem - track device memory
- *
- * @completion: completion object for device memory
- * @pfn_first: first pfn for this resource (set by hmm_devmem_add())
- * @pfn_last: last pfn for this resource (set by hmm_devmem_add())
- * @resource: IO resource reserved for this chunk of memory
- * @pagemap: device page map for that chunk
- * @device: device to bind resource to
- * @ops: memory operations callback
- * @ref: per CPU refcount
- * @page_fault: callback when CPU fault on an unaddressable device page
- *
- * This is a helper structure for device drivers that do not wish to implement
- * the gory details related to hotplugging new memoy and allocating struct
- * pages.
- *
- * Device drivers can directly use ZONE_DEVICE memory on their own if they
- * wish to do so.
- *
- * The page_fault() callback must migrate page back, from device memory to
- * system memory, so that the CPU can access it. This might fail for various
- * reasons (device issues, device have been unplugged, ...). When such error
- * conditions happen, the page_fault() callback must return VM_FAULT_SIGBUS and
- * set the CPU page table entry to "poisoned".
- *
- * Note that because memory cgroup charges are transferred to the device memory,
- * this should never fail due to memory restrictions. However, allocation
- * of a regular system page might still fail because we are out of memory. If
- * that happens, the page_fault() callback must return VM_FAULT_OOM.
- *
- * The page_fault() callback can also try to migrate back multiple pages in one
- * chunk, as an optimization. It must, however, prioritize the faulting address
- * over all the others.
- */
-typedef vm_fault_t (*dev_page_fault_t)(struct vm_area_struct *vma,
- unsigned long addr,
- const struct page *page,
- unsigned int flags,
- pmd_t *pmdp);
-
-struct hmm_devmem {
- struct completion completion;
- unsigned long pfn_first;
- unsigned long pfn_last;
- struct resource *resource;
- struct device *device;
- struct dev_pagemap pagemap;
- const struct hmm_devmem_ops *ops;
- struct percpu_ref ref;
- dev_page_fault_t page_fault;
-};
-
-/*
- * To add (hotplug) device memory, HMM assumes that there is no real resource
- * that reserves a range in the physical address space (this is intended to be
- * use by unaddressable device memory). It will reserve a physical range big
- * enough and allocate struct page for it.
- *
- * The device driver can wrap the hmm_devmem struct inside a private device
- * driver struct.
- */
-struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops,
- struct device *device,
- unsigned long size);
-struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops,
- struct device *device,
- struct resource *res);
-
-/*
- * hmm_devmem_page_set_drvdata - set per-page driver data field
- *
- * @page: pointer to struct page
- * @data: driver data value to set
- *
- * Because page can not be on lru we have an unsigned long that driver can use
- * to store a per page field. This just a simple helper to do that.
- */
-static inline void hmm_devmem_page_set_drvdata(struct page *page,
- unsigned long data)
-{
- page->hmm_data = data;
-}
-
-/*
- * hmm_devmem_page_get_drvdata - get per page driver data field
- *
- * @page: pointer to struct page
- * Return: driver data value
- */
-static inline unsigned long hmm_devmem_page_get_drvdata(const struct page *page)
-{
- return page->hmm_data;
-}
-
-
-/*
- * struct hmm_device - fake device to hang device memory onto
- *
- * @device: device struct
- * @minor: device minor number
- */
-struct hmm_device {
- struct device device;
- unsigned int minor;
-};
-
-/*
- * A device driver that wants to handle multiple devices memory through a
- * single fake device can use hmm_device to do so. This is purely a helper and
- * it is not strictly needed, in order to make use of any HMM functionality.
- */
-struct hmm_device *hmm_device_new(void *drvdata);
-void hmm_device_put(struct hmm_device *hmm_device);
-#endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */
-#else /* IS_ENABLED(CONFIG_HMM) */
-static inline void hmm_mm_destroy(struct mm_struct *mm) {}
-static inline void hmm_mm_init(struct mm_struct *mm) {}
-#endif /* IS_ENABLED(CONFIG_HMM) */
-
#endif /* LINUX_HMM_H */
diff --git a/include/linux/ioport.h b/include/linux/ioport.h
index da0ebaec25f0..a02b290ca08a 100644
--- a/include/linux/ioport.h
+++ b/include/linux/ioport.h
@@ -132,7 +132,6 @@ enum {
IORES_DESC_PERSISTENT_MEMORY = 4,
IORES_DESC_PERSISTENT_MEMORY_LEGACY = 5,
IORES_DESC_DEVICE_PRIVATE_MEMORY = 6,
- IORES_DESC_DEVICE_PUBLIC_MEMORY = 7,
};
/* helpers to define resources */
@@ -286,6 +285,8 @@ static inline bool resource_overlaps(struct resource *r1, struct resource *r2)
return (r1->start <= r2->end && r1->end >= r2->start);
}
+struct resource *devm_request_free_mem_region(struct device *dev,
+ struct resource *base, unsigned long size);
#endif /* __ASSEMBLY__ */
#endif /* _LINUX_IOPORT_H */
diff --git a/include/linux/memremap.h b/include/linux/memremap.h
index 1732dea030b2..f8a5b2a19945 100644
--- a/include/linux/memremap.h
+++ b/include/linux/memremap.h
@@ -37,13 +37,6 @@ struct vmem_altmap {
* A more complete discussion of unaddressable memory may be found in
* include/linux/hmm.h and Documentation/vm/hmm.rst.
*
- * MEMORY_DEVICE_PUBLIC:
- * Device memory that is cache coherent from device and CPU point of view. This
- * is use on platform that have an advance system bus (like CAPI or CCIX). A
- * driver can hotplug the device memory using ZONE_DEVICE and with that memory
- * type. Any page of a process can be migrated to such memory. However no one
- * should be allow to pin such memory so that it can always be evicted.
- *
* MEMORY_DEVICE_FS_DAX:
* Host memory that has similar access semantics as System RAM i.e. DMA
* coherent and supports page pinning. In support of coordinating page
@@ -52,54 +45,84 @@ struct vmem_altmap {
* wakeup is used to coordinate physical address space management (ex:
* fs truncate/hole punch) vs pinned pages (ex: device dma).
*
+ * MEMORY_DEVICE_DEVDAX:
+ * Host memory that has similar access semantics as System RAM i.e. DMA
+ * coherent and supports page pinning. In contrast to
+ * MEMORY_DEVICE_FS_DAX, this memory is access via a device-dax
+ * character device.
+ *
* MEMORY_DEVICE_PCI_P2PDMA:
* Device memory residing in a PCI BAR intended for use with Peer-to-Peer
* transactions.
*/
enum memory_type {
+ /* 0 is reserved to catch uninitialized type fields */
MEMORY_DEVICE_PRIVATE = 1,
- MEMORY_DEVICE_PUBLIC,
MEMORY_DEVICE_FS_DAX,
+ MEMORY_DEVICE_DEVDAX,
MEMORY_DEVICE_PCI_P2PDMA,
};
-/*
- * Additional notes about MEMORY_DEVICE_PRIVATE may be found in
- * include/linux/hmm.h and Documentation/vm/hmm.rst. There is also a brief
- * explanation in include/linux/memory_hotplug.h.
- *
- * The page_free() callback is called once the page refcount reaches 1
- * (ZONE_DEVICE pages never reach 0 refcount unless there is a refcount bug.
- * This allows the device driver to implement its own memory management.)
- */
-typedef void (*dev_page_free_t)(struct page *page, void *data);
+struct dev_pagemap_ops {
+ /*
+ * Called once the page refcount reaches 1. (ZONE_DEVICE pages never
+ * reach 0 refcount unless there is a refcount bug. This allows the
+ * device driver to implement its own memory management.)
+ */
+ void (*page_free)(struct page *page);
+
+ /*
+ * Transition the refcount in struct dev_pagemap to the dead state.
+ */
+ void (*kill)(struct dev_pagemap *pgmap);
+
+ /*
+ * Wait for refcount in struct dev_pagemap to be idle and reap it.
+ */
+ void (*cleanup)(struct dev_pagemap *pgmap);
+
+ /*
+ * Used for private (un-addressable) device memory only. Must migrate
+ * the page back to a CPU accessible page.
+ */
+ vm_fault_t (*migrate_to_ram)(struct vm_fault *vmf);
+};
+
+#define PGMAP_ALTMAP_VALID (1 << 0)
/**
* struct dev_pagemap - metadata for ZONE_DEVICE mappings
- * @page_free: free page callback when page refcount reaches 1
* @altmap: pre-allocated/reserved memory for vmemmap allocations
* @res: physical address range covered by @ref
* @ref: reference count that pins the devm_memremap_pages() mapping
- * @kill: callback to transition @ref to the dead state
- * @cleanup: callback to wait for @ref to be idle and reap it
+ * @internal_ref: internal reference if @ref is not provided by the caller
+ * @done: completion for @internal_ref
* @dev: host device of the mapping for debug
* @data: private data pointer for page_free()
* @type: memory type: see MEMORY_* in memory_hotplug.h
+ * @flags: PGMAP_* flags to specify defailed behavior
+ * @ops: method table
*/
struct dev_pagemap {
- dev_page_free_t page_free;
struct vmem_altmap altmap;
- bool altmap_valid;
struct resource res;
struct percpu_ref *ref;
- void (*kill)(struct percpu_ref *ref);
- void (*cleanup)(struct percpu_ref *ref);
+ struct percpu_ref internal_ref;
+ struct completion done;
struct device *dev;
- void *data;
enum memory_type type;
+ unsigned int flags;
u64 pci_p2pdma_bus_offset;
+ const struct dev_pagemap_ops *ops;
};
+static inline struct vmem_altmap *pgmap_altmap(struct dev_pagemap *pgmap)
+{
+ if (pgmap->flags & PGMAP_ALTMAP_VALID)
+ return &pgmap->altmap;
+ return NULL;
+}
+
#ifdef CONFIG_ZONE_DEVICE
void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap);
void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap);
diff --git a/include/linux/mm.h b/include/linux/mm.h
index dd0b5f4e1e45..2425f4167ec2 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -932,8 +932,6 @@ static inline bool is_zone_device_page(const struct page *page)
#endif
#ifdef CONFIG_DEV_PAGEMAP_OPS
-void dev_pagemap_get_ops(void);
-void dev_pagemap_put_ops(void);
void __put_devmap_managed_page(struct page *page);
DECLARE_STATIC_KEY_FALSE(devmap_managed_key);
static inline bool put_devmap_managed_page(struct page *page)
@@ -944,7 +942,6 @@ static inline bool put_devmap_managed_page(struct page *page)
return false;
switch (page->pgmap->type) {
case MEMORY_DEVICE_PRIVATE:
- case MEMORY_DEVICE_PUBLIC:
case MEMORY_DEVICE_FS_DAX:
__put_devmap_managed_page(page);
return true;
@@ -960,12 +957,6 @@ static inline bool is_device_private_page(const struct page *page)
page->pgmap->type == MEMORY_DEVICE_PRIVATE;
}
-static inline bool is_device_public_page(const struct page *page)
-{
- return is_zone_device_page(page) &&
- page->pgmap->type == MEMORY_DEVICE_PUBLIC;
-}
-
#ifdef CONFIG_PCI_P2PDMA
static inline bool is_pci_p2pdma_page(const struct page *page)
{
@@ -980,14 +971,6 @@ static inline bool is_pci_p2pdma_page(const struct page *page)
#endif /* CONFIG_PCI_P2PDMA */
#else /* CONFIG_DEV_PAGEMAP_OPS */
-static inline void dev_pagemap_get_ops(void)
-{
-}
-
-static inline void dev_pagemap_put_ops(void)
-{
-}
-
static inline bool put_devmap_managed_page(struct page *page)
{
return false;
@@ -998,11 +981,6 @@ static inline bool is_device_private_page(const struct page *page)
return false;
}
-static inline bool is_device_public_page(const struct page *page)
-{
- return false;
-}
-
static inline bool is_pci_p2pdma_page(const struct page *page)
{
return false;
@@ -1431,10 +1409,8 @@ struct zap_details {
pgoff_t last_index; /* Highest page->index to unmap */
};
-struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
- pte_t pte, bool with_public_device);
-#define vm_normal_page(vma, addr, pte) _vm_normal_page(vma, addr, pte, false)
-
+struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
+ pte_t pte);
struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
pmd_t pmd);
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 8ec38b11b361..8d37182f8dbe 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -158,7 +158,7 @@ struct page {
struct { /* ZONE_DEVICE pages */
/** @pgmap: Points to the hosting device page map. */
struct dev_pagemap *pgmap;
- unsigned long hmm_data;
+ void *zone_device_data;
unsigned long _zd_pad_1; /* uses mapping */
};
@@ -501,7 +501,7 @@ struct mm_struct {
#endif
struct work_struct async_put_work;
-#if IS_ENABLED(CONFIG_HMM)
+#ifdef CONFIG_HMM_MIRROR
/* HMM needs to track a few things per mm */
struct hmm *hmm;
#endif
diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index 4d961668e5fc..15bdb6fe71e5 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -129,12 +129,6 @@ static inline struct page *device_private_entry_to_page(swp_entry_t entry)
{
return pfn_to_page(swp_offset(entry));
}
-
-vm_fault_t device_private_entry_fault(struct vm_area_struct *vma,
- unsigned long addr,
- swp_entry_t entry,
- unsigned int flags,
- pmd_t *pmdp);
#else /* CONFIG_DEVICE_PRIVATE */
static inline swp_entry_t make_device_private_entry(struct page *page, bool write)
{
@@ -164,15 +158,6 @@ static inline struct page *device_private_entry_to_page(swp_entry_t entry)
{
return NULL;
}
-
-static inline vm_fault_t device_private_entry_fault(struct vm_area_struct *vma,
- unsigned long addr,
- swp_entry_t entry,
- unsigned int flags,
- pmd_t *pmdp)
-{
- return VM_FAULT_SIGBUS;
-}
#endif /* CONFIG_DEVICE_PRIVATE */
#ifdef CONFIG_MIGRATION