aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2019-06-26 14:27:11 +0200
committerJason Gunthorpe <jgg@mellanox.com>2019-07-02 14:32:44 -0300
commit897e6365cda6ba6356e83a3aaa68dec82ef4c548 (patch)
tree91a2776f8a994aa78d9eb1f91fce9debc51b1727 /mm/memory.c
parentmemremap: lift the devmap_enable manipulation into devm_memremap_pages (diff)
downloadlinux-dev-897e6365cda6ba6356e83a3aaa68dec82ef4c548.tar.xz
linux-dev-897e6365cda6ba6356e83a3aaa68dec82ef4c548.zip
memremap: add a migrate_to_ram method to struct dev_pagemap_ops
This replaces the hacky ->fault callback, which is currently directly called from common code through a hmm specific data structure as an exercise in layering violations. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Ralph Campbell <rcampbell@nvidia.com> Reviewed-by: Jason Gunthorpe <jgg@mellanox.com> Reviewed-by: Dan Williams <dan.j.williams@intel.com> Tested-by: Dan Williams <dan.j.williams@intel.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
Diffstat (limited to '')
-rw-r--r--mm/memory.c9
1 files changed, 2 insertions, 7 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 2d14f4c7e152..d437ccdb210c 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2748,13 +2748,8 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
migration_entry_wait(vma->vm_mm, vmf->pmd,
vmf->address);
} else if (is_device_private_entry(entry)) {
- /*
- * For un-addressable device memory we call the pgmap
- * fault handler callback. The callback must migrate
- * the page back to some CPU accessible page.
- */
- ret = device_private_entry_fault(vma, vmf->address, entry,
- vmf->flags, vmf->pmd);
+ vmf->page = device_private_entry_to_page(entry);
+ ret = vmf->page->pgmap->ops->migrate_to_ram(vmf);
} else if (is_hwpoison_entry(entry)) {
ret = VM_FAULT_HWPOISON;
} else {