diff options
Diffstat (limited to 'drivers/xen')
-rw-r--r-- | drivers/xen/Kconfig | 63 | ||||
-rw-r--r-- | drivers/xen/balloon.c | 25 | ||||
-rw-r--r-- | drivers/xen/efi.c | 84 | ||||
-rw-r--r-- | drivers/xen/gntdev-common.h | 8 | ||||
-rw-r--r-- | drivers/xen/gntdev.c | 192 | ||||
-rw-r--r-- | drivers/xen/grant-table.c | 3 | ||||
-rw-r--r-- | drivers/xen/mcelog.c | 14 | ||||
-rw-r--r-- | drivers/xen/platform-pci.c | 14 | ||||
-rw-r--r-- | drivers/xen/pvcalls-back.c | 2 | ||||
-rw-r--r-- | drivers/xen/swiotlb-xen.c | 12 | ||||
-rw-r--r-- | drivers/xen/xenbus/xenbus_dev_frontend.c | 20 |
11 files changed, 184 insertions, 253 deletions
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig index 79cc75096f42..61212fc7f0c7 100644 --- a/drivers/xen/Kconfig +++ b/drivers/xen/Kconfig @@ -106,27 +106,27 @@ config XENFS If in doubt, say yes. config XEN_COMPAT_XENFS - bool "Create compatibility mount point /proc/xen" - depends on XENFS - default y - help - The old xenstore userspace tools expect to find "xenbus" - under /proc/xen, but "xenbus" is now found at the root of the - xenfs filesystem. Selecting this causes the kernel to create - the compatibility mount point /proc/xen if it is running on - a xen platform. - If in doubt, say yes. + bool "Create compatibility mount point /proc/xen" + depends on XENFS + default y + help + The old xenstore userspace tools expect to find "xenbus" + under /proc/xen, but "xenbus" is now found at the root of the + xenfs filesystem. Selecting this causes the kernel to create + the compatibility mount point /proc/xen if it is running on + a xen platform. + If in doubt, say yes. config XEN_SYS_HYPERVISOR - bool "Create xen entries under /sys/hypervisor" - depends on SYSFS - select SYS_HYPERVISOR - default y - help - Create entries under /sys/hypervisor describing the Xen - hypervisor environment. When running native or in another - virtual environment, /sys/hypervisor will still be present, - but will have no xen contents. + bool "Create xen entries under /sys/hypervisor" + depends on SYSFS + select SYS_HYPERVISOR + default y + help + Create entries under /sys/hypervisor describing the Xen + hypervisor environment. When running native or in another + virtual environment, /sys/hypervisor will still be present, + but will have no xen contents. config XEN_XENBUS_FRONTEND tristate @@ -141,7 +141,8 @@ config XEN_GNTDEV config XEN_GNTDEV_DMABUF bool "Add support for dma-buf grant access device driver extension" - depends on XEN_GNTDEV && XEN_GRANT_DMA_ALLOC && DMA_SHARED_BUFFER + depends on XEN_GNTDEV && XEN_GRANT_DMA_ALLOC + select DMA_SHARED_BUFFER help Allows userspace processes and kernel modules to use Xen backed dma-buf implementation. With this extension grant references to @@ -270,7 +271,7 @@ config XEN_ACPI_PROCESSOR depends on XEN && XEN_DOM0 && X86 && ACPI_PROCESSOR && CPU_FREQ default m help - This ACPI processor uploads Power Management information to the Xen + This ACPI processor uploads Power Management information to the Xen hypervisor. To do that the driver parses the Power Management data and uploads @@ -279,19 +280,19 @@ config XEN_ACPI_PROCESSOR SMM so that other drivers (such as ACPI cpufreq scaling driver) will not load. - To compile this driver as a module, choose M here: the module will be + To compile this driver as a module, choose M here: the module will be called xen_acpi_processor If you do not know what to choose, select M here. If the CPUFREQ drivers are built in, select Y here. config XEN_MCE_LOG bool "Xen platform mcelog" - depends on XEN_DOM0 && X86_64 && X86_MCE + depends on XEN_DOM0 && X86_MCE help Allow kernel fetching MCE error from Xen platform and converting it into Linux mcelog format for mcelog tools config XEN_HAVE_PVMMU - bool + bool config XEN_EFI def_bool y @@ -308,15 +309,15 @@ config XEN_ACPI depends on X86 && ACPI config XEN_SYMS - bool "Xen symbols" - depends on X86 && XEN_DOM0 && XENFS - default y if KALLSYMS - help - Exports hypervisor symbols (along with their types and addresses) via - /proc/xen/xensyms file, similar to /proc/kallsyms + bool "Xen symbols" + depends on X86 && XEN_DOM0 && XENFS + default y if KALLSYMS + help + Exports hypervisor symbols (along with their types and addresses) via + /proc/xen/xensyms file, similar to /proc/kallsyms config XEN_HAVE_VPMU - bool + bool config XEN_FRONT_PGDIR_SHBUF tristate diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index 4e11de6cde81..4f2e78a5e4db 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c @@ -156,8 +156,10 @@ static DECLARE_DELAYED_WORK(balloon_worker, balloon_process); (GFP_HIGHUSER | __GFP_NOWARN | __GFP_NORETRY | __GFP_NOMEMALLOC) /* balloon_append: add the given page to the balloon. */ -static void __balloon_append(struct page *page) +static void balloon_append(struct page *page) { + __SetPageOffline(page); + /* Lowmem is re-populated first, so highmem pages go at list tail. */ if (PageHighMem(page)) { list_add_tail(&page->lru, &ballooned_pages); @@ -169,11 +171,6 @@ static void __balloon_append(struct page *page) wake_up(&balloon_wq); } -static void balloon_append(struct page *page) -{ - __balloon_append(page); -} - /* balloon_retrieve: rescue a page from the balloon, if it is not empty. */ static struct page *balloon_retrieve(bool require_lowmem) { @@ -192,6 +189,7 @@ static struct page *balloon_retrieve(bool require_lowmem) else balloon_stats.balloon_low--; + __ClearPageOffline(page); return page; } @@ -376,9 +374,7 @@ static void xen_online_page(struct page *page, unsigned int order) mutex_lock(&balloon_mutex); for (i = 0; i < size; i++) { p = pfn_to_page(start_pfn + i); - __online_page_set_limits(p); - __SetPageOffline(p); - __balloon_append(p); + balloon_append(p); } mutex_unlock(&balloon_mutex); } @@ -444,7 +440,6 @@ static enum bp_state increase_reservation(unsigned long nr_pages) xenmem_reservation_va_mapping_update(1, &page, &frame_list[i]); /* Relinquish the page back to the allocator. */ - __ClearPageOffline(page); free_reserved_page(page); } @@ -471,7 +466,6 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp) state = BP_EAGAIN; break; } - __SetPageOffline(page); adjust_managed_page_count(page, -1); xenmem_reservation_scrub_page(page); list_add(&page->lru, &pages); @@ -611,7 +605,6 @@ int alloc_xenballooned_pages(int nr_pages, struct page **pages) while (pgno < nr_pages) { page = balloon_retrieve(true); if (page) { - __ClearPageOffline(page); pages[pgno++] = page; #ifdef CONFIG_XEN_HAVE_PVMMU /* @@ -653,10 +646,8 @@ void free_xenballooned_pages(int nr_pages, struct page **pages) mutex_lock(&balloon_mutex); for (i = 0; i < nr_pages; i++) { - if (pages[i]) { - __SetPageOffline(pages[i]); + if (pages[i]) balloon_append(pages[i]); - } } balloon_stats.target_unpopulated -= nr_pages; @@ -674,7 +665,6 @@ static void __init balloon_add_region(unsigned long start_pfn, unsigned long pages) { unsigned long pfn, extra_pfn_end; - struct page *page; /* * If the amount of usable memory has been limited (e.g., with @@ -684,11 +674,10 @@ static void __init balloon_add_region(unsigned long start_pfn, extra_pfn_end = min(max_pfn, start_pfn + pages); for (pfn = start_pfn; pfn < extra_pfn_end; pfn++) { - page = pfn_to_page(pfn); /* totalram_pages and totalhigh_pages do not include the boot-time balloon extension, so don't subtract from it. */ - __balloon_append(page); + balloon_append(pfn_to_page(pfn)); } balloon_stats.total_pages += extra_pfn_end - start_pfn; diff --git a/drivers/xen/efi.c b/drivers/xen/efi.c index 89d60f8e3c18..d1ff2186ebb4 100644 --- a/drivers/xen/efi.c +++ b/drivers/xen/efi.c @@ -40,7 +40,7 @@ #define efi_data(op) (op.u.efi_runtime_call) -efi_status_t xen_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc) +static efi_status_t xen_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc) { struct xen_platform_op op = INIT_EFI_OP(get_time); @@ -61,9 +61,8 @@ efi_status_t xen_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc) return efi_data(op).status; } -EXPORT_SYMBOL_GPL(xen_efi_get_time); -efi_status_t xen_efi_set_time(efi_time_t *tm) +static efi_status_t xen_efi_set_time(efi_time_t *tm) { struct xen_platform_op op = INIT_EFI_OP(set_time); @@ -75,10 +74,10 @@ efi_status_t xen_efi_set_time(efi_time_t *tm) return efi_data(op).status; } -EXPORT_SYMBOL_GPL(xen_efi_set_time); -efi_status_t xen_efi_get_wakeup_time(efi_bool_t *enabled, efi_bool_t *pending, - efi_time_t *tm) +static efi_status_t xen_efi_get_wakeup_time(efi_bool_t *enabled, + efi_bool_t *pending, + efi_time_t *tm) { struct xen_platform_op op = INIT_EFI_OP(get_wakeup_time); @@ -98,9 +97,8 @@ efi_status_t xen_efi_get_wakeup_time(efi_bool_t *enabled, efi_bool_t *pending, return efi_data(op).status; } -EXPORT_SYMBOL_GPL(xen_efi_get_wakeup_time); -efi_status_t xen_efi_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm) +static efi_status_t xen_efi_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm) { struct xen_platform_op op = INIT_EFI_OP(set_wakeup_time); @@ -117,11 +115,10 @@ efi_status_t xen_efi_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm) return efi_data(op).status; } -EXPORT_SYMBOL_GPL(xen_efi_set_wakeup_time); -efi_status_t xen_efi_get_variable(efi_char16_t *name, efi_guid_t *vendor, - u32 *attr, unsigned long *data_size, - void *data) +static efi_status_t xen_efi_get_variable(efi_char16_t *name, efi_guid_t *vendor, + u32 *attr, unsigned long *data_size, + void *data) { struct xen_platform_op op = INIT_EFI_OP(get_variable); @@ -141,11 +138,10 @@ efi_status_t xen_efi_get_variable(efi_char16_t *name, efi_guid_t *vendor, return efi_data(op).status; } -EXPORT_SYMBOL_GPL(xen_efi_get_variable); -efi_status_t xen_efi_get_next_variable(unsigned long *name_size, - efi_char16_t *name, - efi_guid_t *vendor) +static efi_status_t xen_efi_get_next_variable(unsigned long *name_size, + efi_char16_t *name, + efi_guid_t *vendor) { struct xen_platform_op op = INIT_EFI_OP(get_next_variable_name); @@ -165,11 +161,10 @@ efi_status_t xen_efi_get_next_variable(unsigned long *name_size, return efi_data(op).status; } -EXPORT_SYMBOL_GPL(xen_efi_get_next_variable); -efi_status_t xen_efi_set_variable(efi_char16_t *name, efi_guid_t *vendor, - u32 attr, unsigned long data_size, - void *data) +static efi_status_t xen_efi_set_variable(efi_char16_t *name, efi_guid_t *vendor, + u32 attr, unsigned long data_size, + void *data) { struct xen_platform_op op = INIT_EFI_OP(set_variable); @@ -186,11 +181,10 @@ efi_status_t xen_efi_set_variable(efi_char16_t *name, efi_guid_t *vendor, return efi_data(op).status; } -EXPORT_SYMBOL_GPL(xen_efi_set_variable); -efi_status_t xen_efi_query_variable_info(u32 attr, u64 *storage_space, - u64 *remaining_space, - u64 *max_variable_size) +static efi_status_t xen_efi_query_variable_info(u32 attr, u64 *storage_space, + u64 *remaining_space, + u64 *max_variable_size) { struct xen_platform_op op = INIT_EFI_OP(query_variable_info); @@ -208,9 +202,8 @@ efi_status_t xen_efi_query_variable_info(u32 attr, u64 *storage_space, return efi_data(op).status; } -EXPORT_SYMBOL_GPL(xen_efi_query_variable_info); -efi_status_t xen_efi_get_next_high_mono_count(u32 *count) +static efi_status_t xen_efi_get_next_high_mono_count(u32 *count) { struct xen_platform_op op = INIT_EFI_OP(get_next_high_monotonic_count); @@ -221,10 +214,9 @@ efi_status_t xen_efi_get_next_high_mono_count(u32 *count) return efi_data(op).status; } -EXPORT_SYMBOL_GPL(xen_efi_get_next_high_mono_count); -efi_status_t xen_efi_update_capsule(efi_capsule_header_t **capsules, - unsigned long count, unsigned long sg_list) +static efi_status_t xen_efi_update_capsule(efi_capsule_header_t **capsules, + unsigned long count, unsigned long sg_list) { struct xen_platform_op op = INIT_EFI_OP(update_capsule); @@ -241,11 +233,9 @@ efi_status_t xen_efi_update_capsule(efi_capsule_header_t **capsules, return efi_data(op).status; } -EXPORT_SYMBOL_GPL(xen_efi_update_capsule); -efi_status_t xen_efi_query_capsule_caps(efi_capsule_header_t **capsules, - unsigned long count, u64 *max_size, - int *reset_type) +static efi_status_t xen_efi_query_capsule_caps(efi_capsule_header_t **capsules, + unsigned long count, u64 *max_size, int *reset_type) { struct xen_platform_op op = INIT_EFI_OP(query_capsule_capabilities); @@ -264,10 +254,9 @@ efi_status_t xen_efi_query_capsule_caps(efi_capsule_header_t **capsules, return efi_data(op).status; } -EXPORT_SYMBOL_GPL(xen_efi_query_capsule_caps); -void xen_efi_reset_system(int reset_type, efi_status_t status, - unsigned long data_size, efi_char16_t *data) +static void xen_efi_reset_system(int reset_type, efi_status_t status, + unsigned long data_size, efi_char16_t *data) { switch (reset_type) { case EFI_RESET_COLD: @@ -281,4 +270,25 @@ void xen_efi_reset_system(int reset_type, efi_status_t status, BUG(); } } -EXPORT_SYMBOL_GPL(xen_efi_reset_system); + +/* + * Set XEN EFI runtime services function pointers. Other fields of struct efi, + * e.g. efi.systab, will be set like normal EFI. + */ +void __init xen_efi_runtime_setup(void) +{ + efi.get_time = xen_efi_get_time; + efi.set_time = xen_efi_set_time; + efi.get_wakeup_time = xen_efi_get_wakeup_time; + efi.set_wakeup_time = xen_efi_set_wakeup_time; + efi.get_variable = xen_efi_get_variable; + efi.get_next_variable = xen_efi_get_next_variable; + efi.set_variable = xen_efi_set_variable; + efi.set_variable_nonblocking = xen_efi_set_variable; + efi.query_variable_info = xen_efi_query_variable_info; + efi.query_variable_info_nonblocking = xen_efi_query_variable_info; + efi.update_capsule = xen_efi_update_capsule; + efi.query_capsule_caps = xen_efi_query_capsule_caps; + efi.get_next_high_mono_count = xen_efi_get_next_high_mono_count; + efi.reset_system = xen_efi_reset_system; +} diff --git a/drivers/xen/gntdev-common.h b/drivers/xen/gntdev-common.h index 2f8b949c3eeb..91e44c04f787 100644 --- a/drivers/xen/gntdev-common.h +++ b/drivers/xen/gntdev-common.h @@ -21,15 +21,8 @@ struct gntdev_dmabuf_priv; struct gntdev_priv { /* Maps with visible offsets in the file descriptor. */ struct list_head maps; - /* - * Maps that are not visible; will be freed on munmap. - * Only populated if populate_freeable_maps == 1 - */ - struct list_head freeable_maps; /* lock protects maps and freeable_maps. */ struct mutex lock; - struct mm_struct *mm; - struct mmu_notifier mn; #ifdef CONFIG_XEN_GRANT_DMA_ALLOC /* Device for which DMA memory is allocated. */ @@ -49,6 +42,7 @@ struct gntdev_unmap_notify { }; struct gntdev_grant_map { + struct mmu_interval_notifier notifier; struct list_head next; struct vm_area_struct *vma; int index; diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c index a446a7221e13..a04ddf2a68af 100644 --- a/drivers/xen/gntdev.c +++ b/drivers/xen/gntdev.c @@ -22,6 +22,7 @@ #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt +#include <linux/dma-mapping.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> @@ -34,9 +35,6 @@ #include <linux/slab.h> #include <linux/highmem.h> #include <linux/refcount.h> -#ifdef CONFIG_XEN_GRANT_DMA_ALLOC -#include <linux/of_device.h> -#endif #include <xen/xen.h> #include <xen/grant_table.h> @@ -65,7 +63,6 @@ MODULE_PARM_DESC(limit, "Maximum number of grants that may be mapped by " static atomic_t pages_mapped = ATOMIC_INIT(0); static int use_ptemod; -#define populate_freeable_maps use_ptemod static int unmap_grant_pages(struct gntdev_grant_map *map, int offset, int pages); @@ -251,12 +248,6 @@ void gntdev_put_map(struct gntdev_priv *priv, struct gntdev_grant_map *map) evtchn_put(map->notify.event); } - if (populate_freeable_maps && priv) { - mutex_lock(&priv->lock); - list_del(&map->next); - mutex_unlock(&priv->lock); - } - if (map->pages && !use_ptemod) unmap_grant_pages(map, 0, map->count); gntdev_free_map(map); @@ -446,16 +437,9 @@ static void gntdev_vma_close(struct vm_area_struct *vma) pr_debug("gntdev_vma_close %p\n", vma); if (use_ptemod) { - /* It is possible that an mmu notifier could be running - * concurrently, so take priv->lock to ensure that the vma won't - * vanishing during the unmap_grant_pages call, since we will - * spin here until that completes. Such a concurrent call will - * not do any unmapping, since that has been done prior to - * closing the vma, but it may still iterate the unmap_ops list. - */ - mutex_lock(&priv->lock); + WARN_ON(map->vma != vma); + mmu_interval_notifier_remove(&map->notifier); map->vma = NULL; - mutex_unlock(&priv->lock); } vma->vm_private_data = NULL; gntdev_put_map(priv, map); @@ -477,109 +461,44 @@ static const struct vm_operations_struct gntdev_vmops = { /* ------------------------------------------------------------------ */ -static bool in_range(struct gntdev_grant_map *map, - unsigned long start, unsigned long end) -{ - if (!map->vma) - return false; - if (map->vma->vm_start >= end) - return false; - if (map->vma->vm_end <= start) - return false; - - return true; -} - -static int unmap_if_in_range(struct gntdev_grant_map *map, - unsigned long start, unsigned long end, - bool blockable) +static bool gntdev_invalidate(struct mmu_interval_notifier *mn, + const struct mmu_notifier_range *range, + unsigned long cur_seq) { + struct gntdev_grant_map *map = + container_of(mn, struct gntdev_grant_map, notifier); unsigned long mstart, mend; int err; - if (!in_range(map, start, end)) - return 0; + if (!mmu_notifier_range_blockable(range)) + return false; - if (!blockable) - return -EAGAIN; + /* + * If the VMA is split or otherwise changed the notifier is not + * updated, but we don't want to process VA's outside the modified + * VMA. FIXME: It would be much more understandable to just prevent + * modifying the VMA in the first place. + */ + if (map->vma->vm_start >= range->end || + map->vma->vm_end <= range->start) + return true; - mstart = max(start, map->vma->vm_start); - mend = min(end, map->vma->vm_end); + mstart = max(range->start, map->vma->vm_start); + mend = min(range->end, map->vma->vm_end); pr_debug("map %d+%d (%lx %lx), range %lx %lx, mrange %lx %lx\n", map->index, map->count, map->vma->vm_start, map->vma->vm_end, - start, end, mstart, mend); + range->start, range->end, mstart, mend); err = unmap_grant_pages(map, (mstart - map->vma->vm_start) >> PAGE_SHIFT, (mend - mstart) >> PAGE_SHIFT); WARN_ON(err); - return 0; -} - -static int mn_invl_range_start(struct mmu_notifier *mn, - const struct mmu_notifier_range *range) -{ - struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn); - struct gntdev_grant_map *map; - int ret = 0; - - if (mmu_notifier_range_blockable(range)) - mutex_lock(&priv->lock); - else if (!mutex_trylock(&priv->lock)) - return -EAGAIN; - - list_for_each_entry(map, &priv->maps, next) { - ret = unmap_if_in_range(map, range->start, range->end, - mmu_notifier_range_blockable(range)); - if (ret) - goto out_unlock; - } - list_for_each_entry(map, &priv->freeable_maps, next) { - ret = unmap_if_in_range(map, range->start, range->end, - mmu_notifier_range_blockable(range)); - if (ret) - goto out_unlock; - } - -out_unlock: - mutex_unlock(&priv->lock); - - return ret; -} - -static void mn_release(struct mmu_notifier *mn, - struct mm_struct *mm) -{ - struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn); - struct gntdev_grant_map *map; - int err; - - mutex_lock(&priv->lock); - list_for_each_entry(map, &priv->maps, next) { - if (!map->vma) - continue; - pr_debug("map %d+%d (%lx %lx)\n", - map->index, map->count, - map->vma->vm_start, map->vma->vm_end); - err = unmap_grant_pages(map, /* offset */ 0, map->count); - WARN_ON(err); - } - list_for_each_entry(map, &priv->freeable_maps, next) { - if (!map->vma) - continue; - pr_debug("map %d+%d (%lx %lx)\n", - map->index, map->count, - map->vma->vm_start, map->vma->vm_end); - err = unmap_grant_pages(map, /* offset */ 0, map->count); - WARN_ON(err); - } - mutex_unlock(&priv->lock); + return true; } -static const struct mmu_notifier_ops gntdev_mmu_ops = { - .release = mn_release, - .invalidate_range_start = mn_invl_range_start, +static const struct mmu_interval_notifier_ops gntdev_mmu_ops = { + .invalidate = gntdev_invalidate, }; /* ------------------------------------------------------------------ */ @@ -594,7 +513,6 @@ static int gntdev_open(struct inode *inode, struct file *flip) return -ENOMEM; INIT_LIST_HEAD(&priv->maps); - INIT_LIST_HEAD(&priv->freeable_maps); mutex_init(&priv->lock); #ifdef CONFIG_XEN_GNTDEV_DMABUF @@ -606,17 +524,6 @@ static int gntdev_open(struct inode *inode, struct file *flip) } #endif - if (use_ptemod) { - priv->mm = get_task_mm(current); - if (!priv->mm) { - kfree(priv); - return -ENOMEM; - } - priv->mn.ops = &gntdev_mmu_ops; - ret = mmu_notifier_register(&priv->mn, priv->mm); - mmput(priv->mm); - } - if (ret) { kfree(priv); return ret; @@ -625,14 +532,7 @@ static int gntdev_open(struct inode *inode, struct file *flip) flip->private_data = priv; #ifdef CONFIG_XEN_GRANT_DMA_ALLOC priv->dma_dev = gntdev_miscdev.this_device; - - /* - * The device is not spawn from a device tree, so arch_setup_dma_ops - * is not called, thus leaving the device with dummy DMA ops. - * Fix this by calling of_dma_configure() with a NULL node to set - * default DMA ops. - */ - of_dma_configure(priv->dma_dev, NULL, true); + dma_coerce_mask_and_coherent(priv->dma_dev, DMA_BIT_MASK(64)); #endif pr_debug("priv %p\n", priv); @@ -653,16 +553,12 @@ static int gntdev_release(struct inode *inode, struct file *flip) list_del(&map->next); gntdev_put_map(NULL /* already removed */, map); } - WARN_ON(!list_empty(&priv->freeable_maps)); mutex_unlock(&priv->lock); #ifdef CONFIG_XEN_GNTDEV_DMABUF gntdev_dmabuf_fini(priv->dmabuf_priv); #endif - if (use_ptemod) - mmu_notifier_unregister(&priv->mn, priv->mm); - kfree(priv); return 0; } @@ -723,8 +619,6 @@ static long gntdev_ioctl_unmap_grant_ref(struct gntdev_priv *priv, map = gntdev_find_map_index(priv, op.index >> PAGE_SHIFT, op.count); if (map) { list_del(&map->next); - if (populate_freeable_maps) - list_add_tail(&map->next, &priv->freeable_maps); err = 0; } mutex_unlock(&priv->lock); @@ -1096,11 +990,6 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma) goto unlock_out; if (use_ptemod && map->vma) goto unlock_out; - if (use_ptemod && priv->mm != vma->vm_mm) { - pr_warn("Huh? Other mm?\n"); - goto unlock_out; - } - refcount_inc(&map->users); vma->vm_ops = &gntdev_vmops; @@ -1111,10 +1000,6 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma) vma->vm_flags |= VM_DONTCOPY; vma->vm_private_data = map; - - if (use_ptemod) - map->vma = vma; - if (map->flags) { if ((vma->vm_flags & VM_WRITE) && (map->flags & GNTMAP_readonly)) @@ -1125,8 +1010,28 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma) map->flags |= GNTMAP_readonly; } + if (use_ptemod) { + map->vma = vma; + err = mmu_interval_notifier_insert_locked( + &map->notifier, vma->vm_mm, vma->vm_start, + vma->vm_end - vma->vm_start, &gntdev_mmu_ops); + if (err) + goto out_unlock_put; + } mutex_unlock(&priv->lock); + /* + * gntdev takes the address of the PTE in find_grant_ptes() and passes + * it to the hypervisor in gntdev_map_grant_pages(). The purpose of + * the notifier is to prevent the hypervisor pointer to the PTE from + * going stale. + * + * Since this vma's mappings can't be touched without the mmap_sem, + * and we are holding it now, there is no need for the notifier_range + * locking pattern. + */ + mmu_interval_read_begin(&map->notifier); + if (use_ptemod) { map->pages_vm_start = vma->vm_start; err = apply_to_page_range(vma->vm_mm, vma->vm_start, @@ -1175,8 +1080,11 @@ out_unlock_put: mutex_unlock(&priv->lock); out_put_map: if (use_ptemod) { - map->vma = NULL; unmap_grant_pages(map, 0, map->count); + if (map->vma) { + mmu_interval_notifier_remove(&map->notifier); + map->vma = NULL; + } } gntdev_put_map(priv, map); return err; diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c index 7ea6fb6a2e5d..49b381e104ef 100644 --- a/drivers/xen/grant-table.c +++ b/drivers/xen/grant-table.c @@ -1363,8 +1363,7 @@ static int gnttab_setup(void) if (xen_feature(XENFEAT_auto_translated_physmap) && gnttab_shared.addr == NULL) { gnttab_shared.addr = xen_auto_xlat_grant_frames.vaddr; if (gnttab_shared.addr == NULL) { - pr_warn("gnttab share frames (addr=0x%08lx) is not mapped!\n", - (unsigned long)xen_auto_xlat_grant_frames.vaddr); + pr_warn("gnttab share frames is not mapped!\n"); return -ENOMEM; } } diff --git a/drivers/xen/mcelog.c b/drivers/xen/mcelog.c index b8bf61abb65b..e9ac3b8c4167 100644 --- a/drivers/xen/mcelog.c +++ b/drivers/xen/mcelog.c @@ -222,7 +222,7 @@ static int convert_log(struct mc_info *mi) struct mcinfo_global *mc_global; struct mcinfo_bank *mc_bank; struct xen_mce m; - uint32_t i; + unsigned int i, j; mic = NULL; x86_mcinfo_lookup(&mic, mi, MC_TYPE_GLOBAL); @@ -248,7 +248,17 @@ static int convert_log(struct mc_info *mi) m.socketid = g_physinfo[i].mc_chipid; m.cpu = m.extcpu = g_physinfo[i].mc_cpunr; m.cpuvendor = (__u8)g_physinfo[i].mc_vendor; - m.mcgcap = g_physinfo[i].mc_msrvalues[__MC_MSR_MCGCAP].value; + for (j = 0; j < g_physinfo[i].mc_nmsrvals; ++j) + switch (g_physinfo[i].mc_msrvalues[j].reg) { + case MSR_IA32_MCG_CAP: + m.mcgcap = g_physinfo[i].mc_msrvalues[j].value; + break; + + case MSR_PPIN: + case MSR_AMD_PPIN: + m.ppin = g_physinfo[i].mc_msrvalues[j].value; + break; + } mic = NULL; x86_mcinfo_lookup(&mic, mi, MC_TYPE_BANK); diff --git a/drivers/xen/platform-pci.c b/drivers/xen/platform-pci.c index 5e30602fdbad..59e85e408c23 100644 --- a/drivers/xen/platform-pci.c +++ b/drivers/xen/platform-pci.c @@ -74,7 +74,7 @@ static int xen_allocate_irq(struct pci_dev *pdev) "xen-platform-pci", pdev); } -static int platform_pci_resume(struct pci_dev *pdev) +static int platform_pci_resume(struct device *dev) { int err; @@ -83,7 +83,7 @@ static int platform_pci_resume(struct pci_dev *pdev) err = xen_set_callback_via(callback_via); if (err) { - dev_err(&pdev->dev, "platform_pci_resume failure!\n"); + dev_err(dev, "platform_pci_resume failure!\n"); return err; } return 0; @@ -168,13 +168,17 @@ static const struct pci_device_id platform_pci_tbl[] = { {0,} }; +static struct dev_pm_ops platform_pm_ops = { + .resume_noirq = platform_pci_resume, +}; + static struct pci_driver platform_driver = { .name = DRV_NAME, .probe = platform_pci_probe, .id_table = platform_pci_tbl, -#ifdef CONFIG_PM - .resume_early = platform_pci_resume, -#endif + .driver = { + .pm = &platform_pm_ops, + }, }; builtin_pci_driver(platform_driver); diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c index 69a626b0e594..c57c71b7d53d 100644 --- a/drivers/xen/pvcalls-back.c +++ b/drivers/xen/pvcalls-back.c @@ -775,7 +775,7 @@ static int pvcalls_back_poll(struct xenbus_device *dev, mappass->reqcopy = *req; icsk = inet_csk(mappass->sock->sk); queue = &icsk->icsk_accept_queue; - data = queue->rskq_accept_head != NULL; + data = READ_ONCE(queue->rskq_accept_head) != NULL; if (data) { mappass->reqcopy.cmd = 0; ret = 0; diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index bd3a10dfac15..b6d27762c6f8 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c @@ -375,7 +375,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, * we can safely return the device addr and not worry about bounce * buffering it. */ - if (dma_capable(dev, dev_addr, size) && + if (dma_capable(dev, dev_addr, size, true) && !range_straddles_page_boundary(phys, size) && !xen_arch_need_swiotlb(dev, phys, dev_addr) && swiotlb_force != SWIOTLB_FORCE) @@ -397,7 +397,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, /* * Ensure that the address returned is DMA'ble */ - if (unlikely(!dma_capable(dev, dev_addr, size))) { + if (unlikely(!dma_capable(dev, dev_addr, size, true))) { swiotlb_tbl_unmap_single(dev, map, size, size, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC); return DMA_MAPPING_ERROR; @@ -405,7 +405,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, done: if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) - xen_dma_sync_for_device(dev, dev_addr, phys, size, dir); + xen_dma_sync_for_device(dev_addr, phys, size, dir); return dev_addr; } @@ -425,7 +425,7 @@ static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, BUG_ON(dir == DMA_NONE); if (!dev_is_dma_coherent(hwdev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) - xen_dma_sync_for_cpu(hwdev, dev_addr, paddr, size, dir); + xen_dma_sync_for_cpu(dev_addr, paddr, size, dir); /* NOTE: We use dev_addr here, not paddr! */ if (is_xen_swiotlb_buffer(dev_addr)) @@ -439,7 +439,7 @@ xen_swiotlb_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr, phys_addr_t paddr = xen_bus_to_phys(dma_addr); if (!dev_is_dma_coherent(dev)) - xen_dma_sync_for_cpu(dev, dma_addr, paddr, size, dir); + xen_dma_sync_for_cpu(dma_addr, paddr, size, dir); if (is_xen_swiotlb_buffer(dma_addr)) swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU); @@ -455,7 +455,7 @@ xen_swiotlb_sync_single_for_device(struct device *dev, dma_addr_t dma_addr, swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE); if (!dev_is_dma_coherent(dev)) - xen_dma_sync_for_device(dev, dma_addr, paddr, size, dir); + xen_dma_sync_for_device(dma_addr, paddr, size, dir); } /* diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c index 08adc590f631..597af455a522 100644 --- a/drivers/xen/xenbus/xenbus_dev_frontend.c +++ b/drivers/xen/xenbus/xenbus_dev_frontend.c @@ -55,6 +55,7 @@ #include <linux/string.h> #include <linux/slab.h> #include <linux/miscdevice.h> +#include <linux/workqueue.h> #include <xen/xenbus.h> #include <xen/xen.h> @@ -116,6 +117,8 @@ struct xenbus_file_priv { wait_queue_head_t read_waitq; struct kref kref; + + struct work_struct wq; }; /* Read out any raw xenbus messages queued up. */ @@ -300,14 +303,14 @@ static void watch_fired(struct xenbus_watch *watch, mutex_unlock(&adap->dev_data->reply_mutex); } -static void xenbus_file_free(struct kref *kref) +static void xenbus_worker(struct work_struct *wq) { struct xenbus_file_priv *u; struct xenbus_transaction_holder *trans, *tmp; struct watch_adapter *watch, *tmp_watch; struct read_buffer *rb, *tmp_rb; - u = container_of(kref, struct xenbus_file_priv, kref); + u = container_of(wq, struct xenbus_file_priv, wq); /* * No need for locking here because there are no other users, @@ -333,6 +336,18 @@ static void xenbus_file_free(struct kref *kref) kfree(u); } +static void xenbus_file_free(struct kref *kref) +{ + struct xenbus_file_priv *u; + + /* + * We might be called in xenbus_thread(). + * Use workqueue to avoid deadlock. + */ + u = container_of(kref, struct xenbus_file_priv, kref); + schedule_work(&u->wq); +} + static struct xenbus_transaction_holder *xenbus_get_transaction( struct xenbus_file_priv *u, uint32_t tx_id) { @@ -650,6 +665,7 @@ static int xenbus_file_open(struct inode *inode, struct file *filp) INIT_LIST_HEAD(&u->watches); INIT_LIST_HEAD(&u->read_buffers); init_waitqueue_head(&u->read_waitq); + INIT_WORK(&u->wq, xenbus_worker); mutex_init(&u->reply_mutex); mutex_init(&u->msgbuffer_mutex); |