From 8c3799ee25e1fda159099af09f5f2e86091e41d4 Mon Sep 17 00:00:00 2001 From: Oleksandr Andrushchenko Date: Fri, 20 Jul 2018 12:01:43 +0300 Subject: xen/grant-table: Make set/clear page private code shared Make set/clear page private code shared and accessible to other kernel modules which can re-use these instead of open-coding. Signed-off-by: Oleksandr Andrushchenko Reviewed-by: Boris Ostrovsky Signed-off-by: Boris Ostrovsky --- drivers/xen/grant-table.c | 54 ++++++++++++++++++++++++++++++----------------- include/xen/grant_table.h | 3 +++ 2 files changed, 38 insertions(+), 19 deletions(-) diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c index ba9f3eec2bd0..bb4840653bf2 100644 --- a/drivers/xen/grant-table.c +++ b/drivers/xen/grant-table.c @@ -769,29 +769,18 @@ void gnttab_free_auto_xlat_frames(void) } EXPORT_SYMBOL_GPL(gnttab_free_auto_xlat_frames); -/** - * gnttab_alloc_pages - alloc pages suitable for grant mapping into - * @nr_pages: number of pages to alloc - * @pages: returns the pages - */ -int gnttab_alloc_pages(int nr_pages, struct page **pages) +int gnttab_pages_set_private(int nr_pages, struct page **pages) { int i; - int ret; - - ret = alloc_xenballooned_pages(nr_pages, pages); - if (ret < 0) - return ret; for (i = 0; i < nr_pages; i++) { #if BITS_PER_LONG < 64 struct xen_page_foreign *foreign; foreign = kzalloc(sizeof(*foreign), GFP_KERNEL); - if (!foreign) { - gnttab_free_pages(nr_pages, pages); + if (!foreign) return -ENOMEM; - } + set_page_private(pages[i], (unsigned long)foreign); #endif SetPagePrivate(pages[i]); @@ -799,14 +788,30 @@ int gnttab_alloc_pages(int nr_pages, struct page **pages) return 0; } -EXPORT_SYMBOL_GPL(gnttab_alloc_pages); +EXPORT_SYMBOL_GPL(gnttab_pages_set_private); /** - * gnttab_free_pages - free pages allocated by gnttab_alloc_pages() - * @nr_pages; number of pages to free - * @pages: the pages + * gnttab_alloc_pages - alloc pages suitable for grant mapping into + * @nr_pages: number of pages to alloc + * @pages: returns the pages */ -void gnttab_free_pages(int nr_pages, struct page **pages) +int gnttab_alloc_pages(int nr_pages, struct page **pages) +{ + int ret; + + ret = alloc_xenballooned_pages(nr_pages, pages); + if (ret < 0) + return ret; + + ret = gnttab_pages_set_private(nr_pages, pages); + if (ret < 0) + gnttab_free_pages(nr_pages, pages); + + return ret; +} +EXPORT_SYMBOL_GPL(gnttab_alloc_pages); + +void gnttab_pages_clear_private(int nr_pages, struct page **pages) { int i; @@ -818,6 +823,17 @@ void gnttab_free_pages(int nr_pages, struct page **pages) ClearPagePrivate(pages[i]); } } +} +EXPORT_SYMBOL_GPL(gnttab_pages_clear_private); + +/** + * gnttab_free_pages - free pages allocated by gnttab_alloc_pages() + * @nr_pages; number of pages to free + * @pages: the pages + */ +void gnttab_free_pages(int nr_pages, struct page **pages) +{ + gnttab_pages_clear_private(nr_pages, pages); free_xenballooned_pages(nr_pages, pages); } EXPORT_SYMBOL_GPL(gnttab_free_pages); diff --git a/include/xen/grant_table.h b/include/xen/grant_table.h index 2e37741f6b8d..de03f2542bb7 100644 --- a/include/xen/grant_table.h +++ b/include/xen/grant_table.h @@ -198,6 +198,9 @@ void gnttab_free_auto_xlat_frames(void); int gnttab_alloc_pages(int nr_pages, struct page **pages); void gnttab_free_pages(int nr_pages, struct page **pages); +int gnttab_pages_set_private(int nr_pages, struct page **pages); +void gnttab_pages_clear_private(int nr_pages, struct page **pages); + int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops, struct gnttab_map_grant_ref *kmap_ops, struct page **pages, unsigned int count); -- cgit v1.2.3-59-g8ed1b From ae4c51a50c990d6feba7058c181dc8f22ca5f1d8 Mon Sep 17 00:00:00 2001 From: Oleksandr Andrushchenko Date: Fri, 20 Jul 2018 12:01:44 +0300 Subject: xen/balloon: Share common memory reservation routines Memory {increase|decrease}_reservation and VA mappings update/reset code used in balloon driver can be made common, so other drivers can also re-use the same functionality without open-coding. Create a dedicated file for the shared code and export corresponding symbols for other kernel modules. Signed-off-by: Oleksandr Andrushchenko Reviewed-by: Boris Ostrovsky Signed-off-by: Boris Ostrovsky --- drivers/xen/Makefile | 1 + drivers/xen/balloon.c | 75 +++------------------------ drivers/xen/mem-reservation.c | 118 ++++++++++++++++++++++++++++++++++++++++++ include/xen/mem-reservation.h | 59 +++++++++++++++++++++ 4 files changed, 184 insertions(+), 69 deletions(-) create mode 100644 drivers/xen/mem-reservation.c create mode 100644 include/xen/mem-reservation.h diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile index 48b154276179..129dd1cc1b83 100644 --- a/drivers/xen/Makefile +++ b/drivers/xen/Makefile @@ -2,6 +2,7 @@ obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o obj-$(CONFIG_X86) += fallback.o obj-y += grant-table.o features.o balloon.o manage.o preempt.o time.o +obj-y += mem-reservation.o obj-y += events/ obj-y += xenbus/ diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index 065f0b607373..e12bb256036f 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c @@ -71,6 +71,7 @@ #include #include #include +#include static int xen_hotplug_unpopulated; @@ -157,13 +158,6 @@ static DECLARE_DELAYED_WORK(balloon_worker, balloon_process); #define GFP_BALLOON \ (GFP_HIGHUSER | __GFP_NOWARN | __GFP_NORETRY | __GFP_NOMEMALLOC) -static void scrub_page(struct page *page) -{ -#ifdef CONFIG_XEN_SCRUB_PAGES - clear_highpage(page); -#endif -} - /* balloon_append: add the given page to the balloon. */ static void __balloon_append(struct page *page) { @@ -463,11 +457,6 @@ static enum bp_state increase_reservation(unsigned long nr_pages) int rc; unsigned long i; struct page *page; - struct xen_memory_reservation reservation = { - .address_bits = 0, - .extent_order = EXTENT_ORDER, - .domid = DOMID_SELF - }; if (nr_pages > ARRAY_SIZE(frame_list)) nr_pages = ARRAY_SIZE(frame_list); @@ -479,16 +468,11 @@ static enum bp_state increase_reservation(unsigned long nr_pages) break; } - /* XENMEM_populate_physmap requires a PFN based on Xen - * granularity. - */ frame_list[i] = page_to_xen_pfn(page); page = balloon_next_page(page); } - set_xen_guest_handle(reservation.extent_start, frame_list); - reservation.nr_extents = nr_pages; - rc = HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation); + rc = xenmem_reservation_increase(nr_pages, frame_list); if (rc <= 0) return BP_EAGAIN; @@ -496,29 +480,7 @@ static enum bp_state increase_reservation(unsigned long nr_pages) page = balloon_retrieve(false); BUG_ON(page == NULL); -#ifdef CONFIG_XEN_HAVE_PVMMU - /* - * We don't support PV MMU when Linux and Xen is using - * different page granularity. - */ - BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE); - - if (!xen_feature(XENFEAT_auto_translated_physmap)) { - unsigned long pfn = page_to_pfn(page); - - set_phys_to_machine(pfn, frame_list[i]); - - /* Link back into the page tables if not highmem. */ - if (!PageHighMem(page)) { - int ret; - ret = HYPERVISOR_update_va_mapping( - (unsigned long)__va(pfn << PAGE_SHIFT), - mfn_pte(frame_list[i], PAGE_KERNEL), - 0); - BUG_ON(ret); - } - } -#endif + xenmem_reservation_va_mapping_update(1, &page, &frame_list[i]); /* Relinquish the page back to the allocator. */ free_reserved_page(page); @@ -535,11 +497,6 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp) unsigned long i; struct page *page, *tmp; int ret; - struct xen_memory_reservation reservation = { - .address_bits = 0, - .extent_order = EXTENT_ORDER, - .domid = DOMID_SELF - }; LIST_HEAD(pages); if (nr_pages > ARRAY_SIZE(frame_list)) @@ -553,7 +510,7 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp) break; } adjust_managed_page_count(page, -1); - scrub_page(page); + xenmem_reservation_scrub_page(page); list_add(&page->lru, &pages); } @@ -572,28 +529,10 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp) */ i = 0; list_for_each_entry_safe(page, tmp, &pages, lru) { - /* XENMEM_decrease_reservation requires a GFN */ frame_list[i++] = xen_page_to_gfn(page); -#ifdef CONFIG_XEN_HAVE_PVMMU - /* - * We don't support PV MMU when Linux and Xen is using - * different page granularity. - */ - BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE); - - if (!xen_feature(XENFEAT_auto_translated_physmap)) { - unsigned long pfn = page_to_pfn(page); + xenmem_reservation_va_mapping_reset(1, &page); - if (!PageHighMem(page)) { - ret = HYPERVISOR_update_va_mapping( - (unsigned long)__va(pfn << PAGE_SHIFT), - __pte_ma(0), 0); - BUG_ON(ret); - } - __set_phys_to_machine(pfn, INVALID_P2M_ENTRY); - } -#endif list_del(&page->lru); balloon_append(page); @@ -601,9 +540,7 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp) flush_tlb_all(); - set_xen_guest_handle(reservation.extent_start, frame_list); - reservation.nr_extents = nr_pages; - ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation); + ret = xenmem_reservation_decrease(nr_pages, frame_list); BUG_ON(ret != nr_pages); balloon_stats.current_pages -= nr_pages; diff --git a/drivers/xen/mem-reservation.c b/drivers/xen/mem-reservation.c new file mode 100644 index 000000000000..084799c6180e --- /dev/null +++ b/drivers/xen/mem-reservation.c @@ -0,0 +1,118 @@ +// SPDX-License-Identifier: GPL-2.0 + +/****************************************************************************** + * Xen memory reservation utilities. + * + * Copyright (c) 2003, B Dragovic + * Copyright (c) 2003-2004, M Williamson, K Fraser + * Copyright (c) 2005 Dan M. Smith, IBM Corporation + * Copyright (c) 2010 Daniel Kiper + * Copyright (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc. + */ + +#include + +#include +#include + +/* + * Use one extent per PAGE_SIZE to avoid to break down the page into + * multiple frame. + */ +#define EXTENT_ORDER (fls(XEN_PFN_PER_PAGE) - 1) + +#ifdef CONFIG_XEN_HAVE_PVMMU +void __xenmem_reservation_va_mapping_update(unsigned long count, + struct page **pages, + xen_pfn_t *frames) +{ + int i; + + for (i = 0; i < count; i++) { + struct page *page = pages[i]; + unsigned long pfn = page_to_pfn(page); + + BUG_ON(!page); + + /* + * We don't support PV MMU when Linux and Xen is using + * different page granularity. + */ + BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE); + + set_phys_to_machine(pfn, frames[i]); + + /* Link back into the page tables if not highmem. */ + if (!PageHighMem(page)) { + int ret; + + ret = HYPERVISOR_update_va_mapping( + (unsigned long)__va(pfn << PAGE_SHIFT), + mfn_pte(frames[i], PAGE_KERNEL), + 0); + BUG_ON(ret); + } + } +} +EXPORT_SYMBOL_GPL(__xenmem_reservation_va_mapping_update); + +void __xenmem_reservation_va_mapping_reset(unsigned long count, + struct page **pages) +{ + int i; + + for (i = 0; i < count; i++) { + struct page *page = pages[i]; + unsigned long pfn = page_to_pfn(page); + + /* + * We don't support PV MMU when Linux and Xen are using + * different page granularity. + */ + BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE); + + if (!PageHighMem(page)) { + int ret; + + ret = HYPERVISOR_update_va_mapping( + (unsigned long)__va(pfn << PAGE_SHIFT), + __pte_ma(0), 0); + BUG_ON(ret); + } + __set_phys_to_machine(pfn, INVALID_P2M_ENTRY); + } +} +EXPORT_SYMBOL_GPL(__xenmem_reservation_va_mapping_reset); +#endif /* CONFIG_XEN_HAVE_PVMMU */ + +/* @frames is an array of PFNs */ +int xenmem_reservation_increase(int count, xen_pfn_t *frames) +{ + struct xen_memory_reservation reservation = { + .address_bits = 0, + .extent_order = EXTENT_ORDER, + .domid = DOMID_SELF + }; + + /* XENMEM_populate_physmap requires a PFN based on Xen granularity. */ + set_xen_guest_handle(reservation.extent_start, frames); + reservation.nr_extents = count; + return HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation); +} +EXPORT_SYMBOL_GPL(xenmem_reservation_increase); + +/* @frames is an array of GFNs */ +int xenmem_reservation_decrease(int count, xen_pfn_t *frames) +{ + struct xen_memory_reservation reservation = { + .address_bits = 0, + .extent_order = EXTENT_ORDER, + .domid = DOMID_SELF + }; + + /* XENMEM_decrease_reservation requires a GFN */ + set_xen_guest_handle(reservation.extent_start, frames); + reservation.nr_extents = count; + return HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation); +} +EXPORT_SYMBOL_GPL(xenmem_reservation_decrease); diff --git a/include/xen/mem-reservation.h b/include/xen/mem-reservation.h new file mode 100644 index 000000000000..80b52b4945e9 --- /dev/null +++ b/include/xen/mem-reservation.h @@ -0,0 +1,59 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* + * Xen memory reservation utilities. + * + * Copyright (c) 2003, B Dragovic + * Copyright (c) 2003-2004, M Williamson, K Fraser + * Copyright (c) 2005 Dan M. Smith, IBM Corporation + * Copyright (c) 2010 Daniel Kiper + * Copyright (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc. + */ + +#ifndef _XENMEM_RESERVATION_H +#define _XENMEM_RESERVATION_H + +#include + +#include + +static inline void xenmem_reservation_scrub_page(struct page *page) +{ +#ifdef CONFIG_XEN_SCRUB_PAGES + clear_highpage(page); +#endif +} + +#ifdef CONFIG_XEN_HAVE_PVMMU +void __xenmem_reservation_va_mapping_update(unsigned long count, + struct page **pages, + xen_pfn_t *frames); + +void __xenmem_reservation_va_mapping_reset(unsigned long count, + struct page **pages); +#endif + +static inline void xenmem_reservation_va_mapping_update(unsigned long count, + struct page **pages, + xen_pfn_t *frames) +{ +#ifdef CONFIG_XEN_HAVE_PVMMU + if (!xen_feature(XENFEAT_auto_translated_physmap)) + __xenmem_reservation_va_mapping_update(count, pages, frames); +#endif +} + +static inline void xenmem_reservation_va_mapping_reset(unsigned long count, + struct page **pages) +{ +#ifdef CONFIG_XEN_HAVE_PVMMU + if (!xen_feature(XENFEAT_auto_translated_physmap)) + __xenmem_reservation_va_mapping_reset(count, pages); +#endif +} + +int xenmem_reservation_increase(int count, xen_pfn_t *frames); + +int xenmem_reservation_decrease(int count, xen_pfn_t *frames); + +#endif -- cgit v1.2.3-59-g8ed1b From 9bdc7304f536f3f77f0a69e7c3a8f5afda561a68 Mon Sep 17 00:00:00 2001 From: Oleksandr Andrushchenko Date: Fri, 20 Jul 2018 12:01:45 +0300 Subject: xen/grant-table: Allow allocating buffers suitable for DMA Extend grant table module API to allow allocating buffers that can be used for DMA operations and mapping foreign grant references on top of those. The resulting buffer is similar to the one allocated by the balloon driver in that proper memory reservation is made by ({increase|decrease}_reservation and VA mappings are updated if needed). This is useful for sharing foreign buffers with HW drivers which cannot work with scattered buffers provided by the balloon driver, but require DMAable memory instead. Signed-off-by: Oleksandr Andrushchenko Reviewed-by: Boris Ostrovsky Signed-off-by: Boris Ostrovsky --- drivers/xen/Kconfig | 14 +++++++ drivers/xen/grant-table.c | 97 +++++++++++++++++++++++++++++++++++++++++++++++ include/xen/grant_table.h | 18 +++++++++ 3 files changed, 129 insertions(+) diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig index e5d0c28372ea..75e5c40f80a5 100644 --- a/drivers/xen/Kconfig +++ b/drivers/xen/Kconfig @@ -161,6 +161,20 @@ config XEN_GRANT_DEV_ALLOC to other domains. This can be used to implement frontend drivers or as part of an inter-domain shared memory channel. +config XEN_GRANT_DMA_ALLOC + bool "Allow allocating DMA capable buffers with grant reference module" + depends on XEN && HAS_DMA + help + Extends grant table module API to allow allocating DMA capable + buffers and mapping foreign grant references on top of it. + The resulting buffer is similar to one allocated by the balloon + driver in that proper memory reservation is made by + ({increase|decrease}_reservation and VA mappings are updated if + needed). + This is useful for sharing foreign buffers with HW drivers which + cannot work with scattered buffers provided by the balloon driver, + but require DMAable memory instead. + config SWIOTLB_XEN def_bool y select SWIOTLB diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c index bb4840653bf2..7bafa703a992 100644 --- a/drivers/xen/grant-table.c +++ b/drivers/xen/grant-table.c @@ -45,6 +45,9 @@ #include #include #include +#ifdef CONFIG_XEN_GRANT_DMA_ALLOC +#include +#endif #include #include @@ -57,6 +60,7 @@ #ifdef CONFIG_X86 #include #endif +#include #include #include @@ -838,6 +842,99 @@ void gnttab_free_pages(int nr_pages, struct page **pages) } EXPORT_SYMBOL_GPL(gnttab_free_pages); +#ifdef CONFIG_XEN_GRANT_DMA_ALLOC +/** + * gnttab_dma_alloc_pages - alloc DMAable pages suitable for grant mapping into + * @args: arguments to the function + */ +int gnttab_dma_alloc_pages(struct gnttab_dma_alloc_args *args) +{ + unsigned long pfn, start_pfn; + size_t size; + int i, ret; + + size = args->nr_pages << PAGE_SHIFT; + if (args->coherent) + args->vaddr = dma_alloc_coherent(args->dev, size, + &args->dev_bus_addr, + GFP_KERNEL | __GFP_NOWARN); + else + args->vaddr = dma_alloc_wc(args->dev, size, + &args->dev_bus_addr, + GFP_KERNEL | __GFP_NOWARN); + if (!args->vaddr) { + pr_debug("Failed to allocate DMA buffer of size %zu\n", size); + return -ENOMEM; + } + + start_pfn = __phys_to_pfn(args->dev_bus_addr); + for (pfn = start_pfn, i = 0; pfn < start_pfn + args->nr_pages; + pfn++, i++) { + struct page *page = pfn_to_page(pfn); + + args->pages[i] = page; + args->frames[i] = xen_page_to_gfn(page); + xenmem_reservation_scrub_page(page); + } + + xenmem_reservation_va_mapping_reset(args->nr_pages, args->pages); + + ret = xenmem_reservation_decrease(args->nr_pages, args->frames); + if (ret != args->nr_pages) { + pr_debug("Failed to decrease reservation for DMA buffer\n"); + ret = -EFAULT; + goto fail; + } + + ret = gnttab_pages_set_private(args->nr_pages, args->pages); + if (ret < 0) + goto fail; + + return 0; + +fail: + gnttab_dma_free_pages(args); + return ret; +} +EXPORT_SYMBOL_GPL(gnttab_dma_alloc_pages); + +/** + * gnttab_dma_free_pages - free DMAable pages + * @args: arguments to the function + */ +int gnttab_dma_free_pages(struct gnttab_dma_alloc_args *args) +{ + size_t size; + int i, ret; + + gnttab_pages_clear_private(args->nr_pages, args->pages); + + for (i = 0; i < args->nr_pages; i++) + args->frames[i] = page_to_xen_pfn(args->pages[i]); + + ret = xenmem_reservation_increase(args->nr_pages, args->frames); + if (ret != args->nr_pages) { + pr_debug("Failed to decrease reservation for DMA buffer\n"); + ret = -EFAULT; + } else { + ret = 0; + } + + xenmem_reservation_va_mapping_update(args->nr_pages, args->pages, + args->frames); + + size = args->nr_pages << PAGE_SHIFT; + if (args->coherent) + dma_free_coherent(args->dev, size, + args->vaddr, args->dev_bus_addr); + else + dma_free_wc(args->dev, size, + args->vaddr, args->dev_bus_addr); + return ret; +} +EXPORT_SYMBOL_GPL(gnttab_dma_free_pages); +#endif + /* Handling of paged out grant targets (GNTST_eagain) */ #define MAX_DELAY 256 static inline void diff --git a/include/xen/grant_table.h b/include/xen/grant_table.h index de03f2542bb7..9bc5bc07d4d3 100644 --- a/include/xen/grant_table.h +++ b/include/xen/grant_table.h @@ -198,6 +198,24 @@ void gnttab_free_auto_xlat_frames(void); int gnttab_alloc_pages(int nr_pages, struct page **pages); void gnttab_free_pages(int nr_pages, struct page **pages); +#ifdef CONFIG_XEN_GRANT_DMA_ALLOC +struct gnttab_dma_alloc_args { + /* Device for which DMA memory will be/was allocated. */ + struct device *dev; + /* If set then DMA buffer is coherent and write-combine otherwise. */ + bool coherent; + + int nr_pages; + struct page **pages; + xen_pfn_t *frames; + void *vaddr; + dma_addr_t dev_bus_addr; +}; + +int gnttab_dma_alloc_pages(struct gnttab_dma_alloc_args *args); +int gnttab_dma_free_pages(struct gnttab_dma_alloc_args *args); +#endif + int gnttab_pages_set_private(int nr_pages, struct page **pages); void gnttab_pages_clear_private(int nr_pages, struct page **pages); -- cgit v1.2.3-59-g8ed1b From 975ef7ff81bb000af6e6c8e63e81f89f3468dcf7 Mon Sep 17 00:00:00 2001 From: Oleksandr Andrushchenko Date: Fri, 20 Jul 2018 12:01:46 +0300 Subject: xen/gntdev: Allow mappings for DMA buffers Allow mappings for DMA backed buffers if grant table module supports such: this extends grant device to not only map buffers made of balloon pages, but also from buffers allocated with dma_alloc_xxx. Signed-off-by: Oleksandr Andrushchenko Reviewed-by: Boris Ostrovsky Signed-off-by: Boris Ostrovsky --- drivers/xen/gntdev.c | 99 ++++++++++++++++++++++++++++++++++++++++++++++- include/uapi/xen/gntdev.h | 15 +++++++ 2 files changed, 112 insertions(+), 2 deletions(-) diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c index bd56653b9bbc..173332f439d8 100644 --- a/drivers/xen/gntdev.c +++ b/drivers/xen/gntdev.c @@ -37,6 +37,9 @@ #include #include #include +#ifdef CONFIG_XEN_GRANT_DMA_ALLOC +#include +#endif #include #include @@ -72,6 +75,11 @@ struct gntdev_priv { struct mutex lock; struct mm_struct *mm; struct mmu_notifier mn; + +#ifdef CONFIG_XEN_GRANT_DMA_ALLOC + /* Device for which DMA memory is allocated. */ + struct device *dma_dev; +#endif }; struct unmap_notify { @@ -96,10 +104,27 @@ struct grant_map { struct gnttab_unmap_grant_ref *kunmap_ops; struct page **pages; unsigned long pages_vm_start; + +#ifdef CONFIG_XEN_GRANT_DMA_ALLOC + /* + * If dmabuf_vaddr is not NULL then this mapping is backed by DMA + * capable memory. + */ + + struct device *dma_dev; + /* Flags used to create this DMA buffer: GNTDEV_DMA_FLAG_XXX. */ + int dma_flags; + void *dma_vaddr; + dma_addr_t dma_bus_addr; + /* Needed to avoid allocation in gnttab_dma_free_pages(). */ + xen_pfn_t *frames; +#endif }; static int unmap_grant_pages(struct grant_map *map, int offset, int pages); +static struct miscdevice gntdev_miscdev; + /* ------------------------------------------------------------------ */ static void gntdev_print_maps(struct gntdev_priv *priv, @@ -121,8 +146,27 @@ static void gntdev_free_map(struct grant_map *map) if (map == NULL) return; +#ifdef CONFIG_XEN_GRANT_DMA_ALLOC + if (map->dma_vaddr) { + struct gnttab_dma_alloc_args args; + + args.dev = map->dma_dev; + args.coherent = !!(map->dma_flags & GNTDEV_DMA_FLAG_COHERENT); + args.nr_pages = map->count; + args.pages = map->pages; + args.frames = map->frames; + args.vaddr = map->dma_vaddr; + args.dev_bus_addr = map->dma_bus_addr; + + gnttab_dma_free_pages(&args); + } else +#endif if (map->pages) gnttab_free_pages(map->count, map->pages); + +#ifdef CONFIG_XEN_GRANT_DMA_ALLOC + kfree(map->frames); +#endif kfree(map->pages); kfree(map->grants); kfree(map->map_ops); @@ -132,7 +176,8 @@ static void gntdev_free_map(struct grant_map *map) kfree(map); } -static struct grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count) +static struct grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count, + int dma_flags) { struct grant_map *add; int i; @@ -155,6 +200,37 @@ static struct grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count) NULL == add->pages) goto err; +#ifdef CONFIG_XEN_GRANT_DMA_ALLOC + add->dma_flags = dma_flags; + + /* + * Check if this mapping is requested to be backed + * by a DMA buffer. + */ + if (dma_flags & (GNTDEV_DMA_FLAG_WC | GNTDEV_DMA_FLAG_COHERENT)) { + struct gnttab_dma_alloc_args args; + + add->frames = kcalloc(count, sizeof(add->frames[0]), + GFP_KERNEL); + if (!add->frames) + goto err; + + /* Remember the device, so we can free DMA memory. */ + add->dma_dev = priv->dma_dev; + + args.dev = priv->dma_dev; + args.coherent = !!(dma_flags & GNTDEV_DMA_FLAG_COHERENT); + args.nr_pages = count; + args.pages = add->pages; + args.frames = add->frames; + + if (gnttab_dma_alloc_pages(&args)) + goto err; + + add->dma_vaddr = args.vaddr; + add->dma_bus_addr = args.dev_bus_addr; + } else +#endif if (gnttab_alloc_pages(count, add->pages)) goto err; @@ -325,6 +401,14 @@ static int map_grant_pages(struct grant_map *map) map->unmap_ops[i].handle = map->map_ops[i].handle; if (use_ptemod) map->kunmap_ops[i].handle = map->kmap_ops[i].handle; +#ifdef CONFIG_XEN_GRANT_DMA_ALLOC + else if (map->dma_vaddr) { + unsigned long bfn; + + bfn = pfn_to_bfn(page_to_pfn(map->pages[i])); + map->unmap_ops[i].dev_bus_addr = __pfn_to_phys(bfn); + } +#endif } return err; } @@ -548,6 +632,17 @@ static int gntdev_open(struct inode *inode, struct file *flip) } flip->private_data = priv; +#ifdef CONFIG_XEN_GRANT_DMA_ALLOC + priv->dma_dev = gntdev_miscdev.this_device; + + /* + * The device is not spawn from a device tree, so arch_setup_dma_ops + * is not called, thus leaving the device with dummy DMA ops. + * Fix this by calling of_dma_configure() with a NULL node to set + * default DMA ops. + */ + of_dma_configure(priv->dma_dev, NULL, true); +#endif pr_debug("priv %p\n", priv); return 0; @@ -589,7 +684,7 @@ static long gntdev_ioctl_map_grant_ref(struct gntdev_priv *priv, return -EINVAL; err = -ENOMEM; - map = gntdev_alloc_map(priv, op.count); + map = gntdev_alloc_map(priv, op.count, 0 /* This is not a dma-buf. */); if (!map) return err; diff --git a/include/uapi/xen/gntdev.h b/include/uapi/xen/gntdev.h index 6d1163456c03..4b9d498a31d4 100644 --- a/include/uapi/xen/gntdev.h +++ b/include/uapi/xen/gntdev.h @@ -200,4 +200,19 @@ struct ioctl_gntdev_grant_copy { /* Send an interrupt on the indicated event channel */ #define UNMAP_NOTIFY_SEND_EVENT 0x2 +/* + * Flags to be used while requesting memory mapping's backing storage + * to be allocated with DMA API. + */ + +/* + * The buffer is backed with memory allocated with dma_alloc_wc. + */ +#define GNTDEV_DMA_FLAG_WC (1 << 0) + +/* + * The buffer is backed with memory allocated with dma_alloc_coherent. + */ +#define GNTDEV_DMA_FLAG_COHERENT (1 << 1) + #endif /* __LINUX_PUBLIC_GNTDEV_H__ */ -- cgit v1.2.3-59-g8ed1b From 1d314567553883d9f606cc59e8e66f465a4b6ccd Mon Sep 17 00:00:00 2001 From: Oleksandr Andrushchenko Date: Fri, 20 Jul 2018 12:01:47 +0300 Subject: xen/gntdev: Make private routines/structures accessible This is in preparation for adding support of DMA buffer functionality: make map/unmap related code and structures, used privately by gntdev, ready for dma-buf extension, which will re-use these. Rename corresponding structures as those become non-private to gntdev now. Signed-off-by: Oleksandr Andrushchenko Reviewed-by: Boris Ostrovsky Signed-off-by: Boris Ostrovsky --- drivers/xen/gntdev-common.h | 88 +++++++++++++++++++++++++++++ drivers/xen/gntdev.c | 134 ++++++++++++++------------------------------ 2 files changed, 131 insertions(+), 91 deletions(-) create mode 100644 drivers/xen/gntdev-common.h diff --git a/drivers/xen/gntdev-common.h b/drivers/xen/gntdev-common.h new file mode 100644 index 000000000000..2346c198f72e --- /dev/null +++ b/drivers/xen/gntdev-common.h @@ -0,0 +1,88 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* + * Common functionality of grant device. + * + * Copyright (c) 2006-2007, D G Murray. + * (c) 2009 Gerd Hoffmann + * (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc. + */ + +#ifndef _GNTDEV_COMMON_H +#define _GNTDEV_COMMON_H + +#include +#include +#include +#include + +struct gntdev_priv { + /* Maps with visible offsets in the file descriptor. */ + struct list_head maps; + /* + * Maps that are not visible; will be freed on munmap. + * Only populated if populate_freeable_maps == 1 + */ + struct list_head freeable_maps; + /* lock protects maps and freeable_maps. */ + struct mutex lock; + struct mm_struct *mm; + struct mmu_notifier mn; + +#ifdef CONFIG_XEN_GRANT_DMA_ALLOC + /* Device for which DMA memory is allocated. */ + struct device *dma_dev; +#endif +}; + +struct gntdev_unmap_notify { + int flags; + /* Address relative to the start of the gntdev_grant_map. */ + int addr; + int event; +}; + +struct gntdev_grant_map { + struct list_head next; + struct vm_area_struct *vma; + int index; + int count; + int flags; + refcount_t users; + struct gntdev_unmap_notify notify; + struct ioctl_gntdev_grant_ref *grants; + struct gnttab_map_grant_ref *map_ops; + struct gnttab_unmap_grant_ref *unmap_ops; + struct gnttab_map_grant_ref *kmap_ops; + struct gnttab_unmap_grant_ref *kunmap_ops; + struct page **pages; + unsigned long pages_vm_start; + +#ifdef CONFIG_XEN_GRANT_DMA_ALLOC + /* + * If dmabuf_vaddr is not NULL then this mapping is backed by DMA + * capable memory. + */ + + struct device *dma_dev; + /* Flags used to create this DMA buffer: GNTDEV_DMA_FLAG_XXX. */ + int dma_flags; + void *dma_vaddr; + dma_addr_t dma_bus_addr; + /* Needed to avoid allocation in gnttab_dma_free_pages(). */ + xen_pfn_t *frames; +#endif +}; + +struct gntdev_grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count, + int dma_flags); + +void gntdev_add_map(struct gntdev_priv *priv, struct gntdev_grant_map *add); + +void gntdev_put_map(struct gntdev_priv *priv, struct gntdev_grant_map *map); + +bool gntdev_account_mapped_pages(int count); + +int gntdev_map_grant_pages(struct gntdev_grant_map *map); + +#endif diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c index 173332f439d8..e03f50052f3e 100644 --- a/drivers/xen/gntdev.c +++ b/drivers/xen/gntdev.c @@ -6,6 +6,7 @@ * * Copyright (c) 2006-2007, D G Murray. * (c) 2009 Gerd Hoffmann + * (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -26,10 +27,6 @@ #include #include #include -#include -#include -#include -#include #include #include #include @@ -50,6 +47,8 @@ #include #include +#include "gntdev-common.h" + MODULE_LICENSE("GPL"); MODULE_AUTHOR("Derek G. Murray , " "Gerd Hoffmann "); @@ -65,73 +64,23 @@ static atomic_t pages_mapped = ATOMIC_INIT(0); static int use_ptemod; #define populate_freeable_maps use_ptemod -struct gntdev_priv { - /* maps with visible offsets in the file descriptor */ - struct list_head maps; - /* maps that are not visible; will be freed on munmap. - * Only populated if populate_freeable_maps == 1 */ - struct list_head freeable_maps; - /* lock protects maps and freeable_maps */ - struct mutex lock; - struct mm_struct *mm; - struct mmu_notifier mn; - -#ifdef CONFIG_XEN_GRANT_DMA_ALLOC - /* Device for which DMA memory is allocated. */ - struct device *dma_dev; -#endif -}; - -struct unmap_notify { - int flags; - /* Address relative to the start of the grant_map */ - int addr; - int event; -}; - -struct grant_map { - struct list_head next; - struct vm_area_struct *vma; - int index; - int count; - int flags; - refcount_t users; - struct unmap_notify notify; - struct ioctl_gntdev_grant_ref *grants; - struct gnttab_map_grant_ref *map_ops; - struct gnttab_unmap_grant_ref *unmap_ops; - struct gnttab_map_grant_ref *kmap_ops; - struct gnttab_unmap_grant_ref *kunmap_ops; - struct page **pages; - unsigned long pages_vm_start; - -#ifdef CONFIG_XEN_GRANT_DMA_ALLOC - /* - * If dmabuf_vaddr is not NULL then this mapping is backed by DMA - * capable memory. - */ - - struct device *dma_dev; - /* Flags used to create this DMA buffer: GNTDEV_DMA_FLAG_XXX. */ - int dma_flags; - void *dma_vaddr; - dma_addr_t dma_bus_addr; - /* Needed to avoid allocation in gnttab_dma_free_pages(). */ - xen_pfn_t *frames; -#endif -}; - -static int unmap_grant_pages(struct grant_map *map, int offset, int pages); +static int unmap_grant_pages(struct gntdev_grant_map *map, + int offset, int pages); static struct miscdevice gntdev_miscdev; /* ------------------------------------------------------------------ */ +bool gntdev_account_mapped_pages(int count) +{ + return atomic_add_return(count, &pages_mapped) > limit; +} + static void gntdev_print_maps(struct gntdev_priv *priv, char *text, int text_index) { #ifdef DEBUG - struct grant_map *map; + struct gntdev_grant_map *map; pr_debug("%s: maps list (priv %p)\n", __func__, priv); list_for_each_entry(map, &priv->maps, next) @@ -141,7 +90,7 @@ static void gntdev_print_maps(struct gntdev_priv *priv, #endif } -static void gntdev_free_map(struct grant_map *map) +static void gntdev_free_map(struct gntdev_grant_map *map) { if (map == NULL) return; @@ -176,13 +125,13 @@ static void gntdev_free_map(struct grant_map *map) kfree(map); } -static struct grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count, +struct gntdev_grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count, int dma_flags) { - struct grant_map *add; + struct gntdev_grant_map *add; int i; - add = kzalloc(sizeof(struct grant_map), GFP_KERNEL); + add = kzalloc(sizeof(*add), GFP_KERNEL); if (NULL == add) return NULL; @@ -252,9 +201,9 @@ err: return NULL; } -static void gntdev_add_map(struct gntdev_priv *priv, struct grant_map *add) +void gntdev_add_map(struct gntdev_priv *priv, struct gntdev_grant_map *add) { - struct grant_map *map; + struct gntdev_grant_map *map; list_for_each_entry(map, &priv->maps, next) { if (add->index + add->count < map->index) { @@ -269,10 +218,10 @@ done: gntdev_print_maps(priv, "[new]", add->index); } -static struct grant_map *gntdev_find_map_index(struct gntdev_priv *priv, - int index, int count) +static struct gntdev_grant_map *gntdev_find_map_index(struct gntdev_priv *priv, + int index, int count) { - struct grant_map *map; + struct gntdev_grant_map *map; list_for_each_entry(map, &priv->maps, next) { if (map->index != index) @@ -284,7 +233,7 @@ static struct grant_map *gntdev_find_map_index(struct gntdev_priv *priv, return NULL; } -static void gntdev_put_map(struct gntdev_priv *priv, struct grant_map *map) +void gntdev_put_map(struct gntdev_priv *priv, struct gntdev_grant_map *map) { if (!map) return; @@ -315,7 +264,7 @@ static void gntdev_put_map(struct gntdev_priv *priv, struct grant_map *map) static int find_grant_ptes(pte_t *pte, pgtable_t token, unsigned long addr, void *data) { - struct grant_map *map = data; + struct gntdev_grant_map *map = data; unsigned int pgnr = (addr - map->vma->vm_start) >> PAGE_SHIFT; int flags = map->flags | GNTMAP_application_map | GNTMAP_contains_pte; u64 pte_maddr; @@ -348,7 +297,7 @@ static int set_grant_ptes_as_special(pte_t *pte, pgtable_t token, } #endif -static int map_grant_pages(struct grant_map *map) +int gntdev_map_grant_pages(struct gntdev_grant_map *map) { int i, err = 0; @@ -413,7 +362,8 @@ static int map_grant_pages(struct grant_map *map) return err; } -static int __unmap_grant_pages(struct grant_map *map, int offset, int pages) +static int __unmap_grant_pages(struct gntdev_grant_map *map, int offset, + int pages) { int i, err = 0; struct gntab_unmap_queue_data unmap_data; @@ -448,7 +398,8 @@ static int __unmap_grant_pages(struct grant_map *map, int offset, int pages) return err; } -static int unmap_grant_pages(struct grant_map *map, int offset, int pages) +static int unmap_grant_pages(struct gntdev_grant_map *map, int offset, + int pages) { int range, err = 0; @@ -480,7 +431,7 @@ static int unmap_grant_pages(struct grant_map *map, int offset, int pages) static void gntdev_vma_open(struct vm_area_struct *vma) { - struct grant_map *map = vma->vm_private_data; + struct gntdev_grant_map *map = vma->vm_private_data; pr_debug("gntdev_vma_open %p\n", vma); refcount_inc(&map->users); @@ -488,7 +439,7 @@ static void gntdev_vma_open(struct vm_area_struct *vma) static void gntdev_vma_close(struct vm_area_struct *vma) { - struct grant_map *map = vma->vm_private_data; + struct gntdev_grant_map *map = vma->vm_private_data; struct file *file = vma->vm_file; struct gntdev_priv *priv = file->private_data; @@ -512,7 +463,7 @@ static void gntdev_vma_close(struct vm_area_struct *vma) static struct page *gntdev_vma_find_special_page(struct vm_area_struct *vma, unsigned long addr) { - struct grant_map *map = vma->vm_private_data; + struct gntdev_grant_map *map = vma->vm_private_data; return map->pages[(addr - map->pages_vm_start) >> PAGE_SHIFT]; } @@ -525,7 +476,7 @@ static const struct vm_operations_struct gntdev_vmops = { /* ------------------------------------------------------------------ */ -static void unmap_if_in_range(struct grant_map *map, +static void unmap_if_in_range(struct gntdev_grant_map *map, unsigned long start, unsigned long end) { unsigned long mstart, mend; @@ -554,7 +505,7 @@ static void mn_invl_range_start(struct mmu_notifier *mn, unsigned long start, unsigned long end) { struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn); - struct grant_map *map; + struct gntdev_grant_map *map; mutex_lock(&priv->lock); list_for_each_entry(map, &priv->maps, next) { @@ -570,7 +521,7 @@ static void mn_release(struct mmu_notifier *mn, struct mm_struct *mm) { struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn); - struct grant_map *map; + struct gntdev_grant_map *map; int err; mutex_lock(&priv->lock); @@ -651,13 +602,14 @@ static int gntdev_open(struct inode *inode, struct file *flip) static int gntdev_release(struct inode *inode, struct file *flip) { struct gntdev_priv *priv = flip->private_data; - struct grant_map *map; + struct gntdev_grant_map *map; pr_debug("priv %p\n", priv); mutex_lock(&priv->lock); while (!list_empty(&priv->maps)) { - map = list_entry(priv->maps.next, struct grant_map, next); + map = list_entry(priv->maps.next, + struct gntdev_grant_map, next); list_del(&map->next); gntdev_put_map(NULL /* already removed */, map); } @@ -674,7 +626,7 @@ static long gntdev_ioctl_map_grant_ref(struct gntdev_priv *priv, struct ioctl_gntdev_map_grant_ref __user *u) { struct ioctl_gntdev_map_grant_ref op; - struct grant_map *map; + struct gntdev_grant_map *map; int err; if (copy_from_user(&op, u, sizeof(op)) != 0) @@ -688,7 +640,7 @@ static long gntdev_ioctl_map_grant_ref(struct gntdev_priv *priv, if (!map) return err; - if (unlikely(atomic_add_return(op.count, &pages_mapped) > limit)) { + if (unlikely(gntdev_account_mapped_pages(op.count))) { pr_debug("can't map: over limit\n"); gntdev_put_map(NULL, map); return err; @@ -715,7 +667,7 @@ static long gntdev_ioctl_unmap_grant_ref(struct gntdev_priv *priv, struct ioctl_gntdev_unmap_grant_ref __user *u) { struct ioctl_gntdev_unmap_grant_ref op; - struct grant_map *map; + struct gntdev_grant_map *map; int err = -ENOENT; if (copy_from_user(&op, u, sizeof(op)) != 0) @@ -741,7 +693,7 @@ static long gntdev_ioctl_get_offset_for_vaddr(struct gntdev_priv *priv, { struct ioctl_gntdev_get_offset_for_vaddr op; struct vm_area_struct *vma; - struct grant_map *map; + struct gntdev_grant_map *map; int rv = -EINVAL; if (copy_from_user(&op, u, sizeof(op)) != 0) @@ -772,7 +724,7 @@ static long gntdev_ioctl_get_offset_for_vaddr(struct gntdev_priv *priv, static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u) { struct ioctl_gntdev_unmap_notify op; - struct grant_map *map; + struct gntdev_grant_map *map; int rc; int out_flags; unsigned int out_event; @@ -1070,7 +1022,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma) struct gntdev_priv *priv = flip->private_data; int index = vma->vm_pgoff; int count = vma_pages(vma); - struct grant_map *map; + struct gntdev_grant_map *map; int i, err = -EINVAL; if ((vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_SHARED)) @@ -1127,7 +1079,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma) } } - err = map_grant_pages(map); + err = gntdev_map_grant_pages(map); if (err) goto out_put_map; -- cgit v1.2.3-59-g8ed1b From 932d6562179efe8e2460a0343dbe0fcacf288a9e Mon Sep 17 00:00:00 2001 From: Oleksandr Andrushchenko Date: Fri, 20 Jul 2018 12:01:48 +0300 Subject: xen/gntdev: Add initial support for dma-buf UAPI Add UAPI and IOCTLs for dma-buf grant device driver extension: the extension allows userspace processes and kernel modules to use Xen backed dma-buf implementation. With this extension grant references to the pages of an imported dma-buf can be exported for other domain use and grant references coming from a foreign domain can be converted into a local dma-buf for local export. Implement basic initialization and stubs for Xen DMA buffers' support. Signed-off-by: Oleksandr Andrushchenko Reviewed-by: Boris Ostrovsky Signed-off-by: Boris Ostrovsky --- drivers/xen/Kconfig | 10 +++ drivers/xen/Makefile | 1 + drivers/xen/gntdev-common.h | 6 ++ drivers/xen/gntdev-dmabuf.c | 177 ++++++++++++++++++++++++++++++++++++++++++++ drivers/xen/gntdev-dmabuf.h | 33 +++++++++ drivers/xen/gntdev.c | 31 ++++++++ include/uapi/xen/gntdev.h | 91 +++++++++++++++++++++++ 7 files changed, 349 insertions(+) create mode 100644 drivers/xen/gntdev-dmabuf.c create mode 100644 drivers/xen/gntdev-dmabuf.h diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig index 75e5c40f80a5..b459edfacff3 100644 --- a/drivers/xen/Kconfig +++ b/drivers/xen/Kconfig @@ -152,6 +152,16 @@ config XEN_GNTDEV help Allows userspace processes to use grants. +config XEN_GNTDEV_DMABUF + bool "Add support for dma-buf grant access device driver extension" + depends on XEN_GNTDEV && XEN_GRANT_DMA_ALLOC && DMA_SHARED_BUFFER + help + Allows userspace processes and kernel modules to use Xen backed + dma-buf implementation. With this extension grant references to + the pages of an imported dma-buf can be exported for other domain + use and grant references coming from a foreign domain can be + converted into a local dma-buf for local export. + config XEN_GRANT_DEV_ALLOC tristate "User-space grant reference allocator driver" depends on XEN diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile index 129dd1cc1b83..3e542f60f29f 100644 --- a/drivers/xen/Makefile +++ b/drivers/xen/Makefile @@ -41,5 +41,6 @@ obj-$(CONFIG_XEN_PVCALLS_BACKEND) += pvcalls-back.o obj-$(CONFIG_XEN_PVCALLS_FRONTEND) += pvcalls-front.o xen-evtchn-y := evtchn.o xen-gntdev-y := gntdev.o +xen-gntdev-$(CONFIG_XEN_GNTDEV_DMABUF) += gntdev-dmabuf.o xen-gntalloc-y := gntalloc.o xen-privcmd-y := privcmd.o privcmd-buf.o diff --git a/drivers/xen/gntdev-common.h b/drivers/xen/gntdev-common.h index 2346c198f72e..2f8b949c3eeb 100644 --- a/drivers/xen/gntdev-common.h +++ b/drivers/xen/gntdev-common.h @@ -16,6 +16,8 @@ #include #include +struct gntdev_dmabuf_priv; + struct gntdev_priv { /* Maps with visible offsets in the file descriptor. */ struct list_head maps; @@ -33,6 +35,10 @@ struct gntdev_priv { /* Device for which DMA memory is allocated. */ struct device *dma_dev; #endif + +#ifdef CONFIG_XEN_GNTDEV_DMABUF + struct gntdev_dmabuf_priv *dmabuf_priv; +#endif }; struct gntdev_unmap_notify { diff --git a/drivers/xen/gntdev-dmabuf.c b/drivers/xen/gntdev-dmabuf.c new file mode 100644 index 000000000000..af782c0a8a19 --- /dev/null +++ b/drivers/xen/gntdev-dmabuf.c @@ -0,0 +1,177 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Xen dma-buf functionality for gntdev. + * + * Copyright (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc. + */ + +#include +#include +#include +#include +#include + +#include +#include + +#include "gntdev-common.h" +#include "gntdev-dmabuf.h" + +struct gntdev_dmabuf_priv { + /* List of exported DMA buffers. */ + struct list_head exp_list; + /* List of wait objects. */ + struct list_head exp_wait_list; + /* This is the lock which protects dma_buf_xxx lists. */ + struct mutex lock; +}; + +/* DMA buffer export support. */ + +/* Implementation of wait for exported DMA buffer to be released. */ + +static int dmabuf_exp_wait_released(struct gntdev_dmabuf_priv *priv, int fd, + int wait_to_ms) +{ + return -EINVAL; +} + +static int dmabuf_exp_from_refs(struct gntdev_priv *priv, int flags, + int count, u32 domid, u32 *refs, u32 *fd) +{ + *fd = -1; + return -EINVAL; +} + +/* DMA buffer import support. */ + +static struct gntdev_dmabuf * +dmabuf_imp_to_refs(struct gntdev_dmabuf_priv *priv, struct device *dev, + int fd, int count, int domid) +{ + return ERR_PTR(-ENOMEM); +} + +static u32 *dmabuf_imp_get_refs(struct gntdev_dmabuf *gntdev_dmabuf) +{ + return NULL; +} + +static int dmabuf_imp_release(struct gntdev_dmabuf_priv *priv, u32 fd) +{ + return -EINVAL; +} + +/* DMA buffer IOCTL support. */ + +long gntdev_ioctl_dmabuf_exp_from_refs(struct gntdev_priv *priv, int use_ptemod, + struct ioctl_gntdev_dmabuf_exp_from_refs __user *u) +{ + struct ioctl_gntdev_dmabuf_exp_from_refs op; + u32 *refs; + long ret; + + if (use_ptemod) { + pr_debug("Cannot provide dma-buf: use_ptemode %d\n", + use_ptemod); + return -EINVAL; + } + + if (copy_from_user(&op, u, sizeof(op)) != 0) + return -EFAULT; + + if (unlikely(op.count <= 0)) + return -EINVAL; + + refs = kcalloc(op.count, sizeof(*refs), GFP_KERNEL); + if (!refs) + return -ENOMEM; + + if (copy_from_user(refs, u->refs, sizeof(*refs) * op.count) != 0) { + ret = -EFAULT; + goto out; + } + + ret = dmabuf_exp_from_refs(priv, op.flags, op.count, + op.domid, refs, &op.fd); + if (ret) + goto out; + + if (copy_to_user(u, &op, sizeof(op)) != 0) + ret = -EFAULT; + +out: + kfree(refs); + return ret; +} + +long gntdev_ioctl_dmabuf_exp_wait_released(struct gntdev_priv *priv, + struct ioctl_gntdev_dmabuf_exp_wait_released __user *u) +{ + struct ioctl_gntdev_dmabuf_exp_wait_released op; + + if (copy_from_user(&op, u, sizeof(op)) != 0) + return -EFAULT; + + return dmabuf_exp_wait_released(priv->dmabuf_priv, op.fd, + op.wait_to_ms); +} + +long gntdev_ioctl_dmabuf_imp_to_refs(struct gntdev_priv *priv, + struct ioctl_gntdev_dmabuf_imp_to_refs __user *u) +{ + struct ioctl_gntdev_dmabuf_imp_to_refs op; + struct gntdev_dmabuf *gntdev_dmabuf; + long ret; + + if (copy_from_user(&op, u, sizeof(op)) != 0) + return -EFAULT; + + if (unlikely(op.count <= 0)) + return -EINVAL; + + gntdev_dmabuf = dmabuf_imp_to_refs(priv->dmabuf_priv, + priv->dma_dev, op.fd, + op.count, op.domid); + if (IS_ERR(gntdev_dmabuf)) + return PTR_ERR(gntdev_dmabuf); + + if (copy_to_user(u->refs, dmabuf_imp_get_refs(gntdev_dmabuf), + sizeof(*u->refs) * op.count) != 0) { + ret = -EFAULT; + goto out_release; + } + return 0; + +out_release: + dmabuf_imp_release(priv->dmabuf_priv, op.fd); + return ret; +} + +long gntdev_ioctl_dmabuf_imp_release(struct gntdev_priv *priv, + struct ioctl_gntdev_dmabuf_imp_release __user *u) +{ + struct ioctl_gntdev_dmabuf_imp_release op; + + if (copy_from_user(&op, u, sizeof(op)) != 0) + return -EFAULT; + + return dmabuf_imp_release(priv->dmabuf_priv, op.fd); +} + +struct gntdev_dmabuf_priv *gntdev_dmabuf_init(void) +{ + struct gntdev_dmabuf_priv *priv; + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return ERR_PTR(-ENOMEM); + + return priv; +} + +void gntdev_dmabuf_fini(struct gntdev_dmabuf_priv *priv) +{ + kfree(priv); +} diff --git a/drivers/xen/gntdev-dmabuf.h b/drivers/xen/gntdev-dmabuf.h new file mode 100644 index 000000000000..7220a53d0fc5 --- /dev/null +++ b/drivers/xen/gntdev-dmabuf.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* + * Xen dma-buf functionality for gntdev. + * + * Copyright (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc. + */ + +#ifndef _GNTDEV_DMABUF_H +#define _GNTDEV_DMABUF_H + +#include + +struct gntdev_dmabuf_priv; +struct gntdev_priv; + +struct gntdev_dmabuf_priv *gntdev_dmabuf_init(void); + +void gntdev_dmabuf_fini(struct gntdev_dmabuf_priv *priv); + +long gntdev_ioctl_dmabuf_exp_from_refs(struct gntdev_priv *priv, int use_ptemod, + struct ioctl_gntdev_dmabuf_exp_from_refs __user *u); + +long gntdev_ioctl_dmabuf_exp_wait_released(struct gntdev_priv *priv, + struct ioctl_gntdev_dmabuf_exp_wait_released __user *u); + +long gntdev_ioctl_dmabuf_imp_to_refs(struct gntdev_priv *priv, + struct ioctl_gntdev_dmabuf_imp_to_refs __user *u); + +long gntdev_ioctl_dmabuf_imp_release(struct gntdev_priv *priv, + struct ioctl_gntdev_dmabuf_imp_release __user *u); + +#endif diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c index e03f50052f3e..c866a62f766d 100644 --- a/drivers/xen/gntdev.c +++ b/drivers/xen/gntdev.c @@ -48,6 +48,9 @@ #include #include "gntdev-common.h" +#ifdef CONFIG_XEN_GNTDEV_DMABUF +#include "gntdev-dmabuf.h" +#endif MODULE_LICENSE("GPL"); MODULE_AUTHOR("Derek G. Murray , " @@ -566,6 +569,15 @@ static int gntdev_open(struct inode *inode, struct file *flip) INIT_LIST_HEAD(&priv->freeable_maps); mutex_init(&priv->lock); +#ifdef CONFIG_XEN_GNTDEV_DMABUF + priv->dmabuf_priv = gntdev_dmabuf_init(); + if (IS_ERR(priv->dmabuf_priv)) { + ret = PTR_ERR(priv->dmabuf_priv); + kfree(priv); + return ret; + } +#endif + if (use_ptemod) { priv->mm = get_task_mm(current); if (!priv->mm) { @@ -616,8 +628,13 @@ static int gntdev_release(struct inode *inode, struct file *flip) WARN_ON(!list_empty(&priv->freeable_maps)); mutex_unlock(&priv->lock); +#ifdef CONFIG_XEN_GNTDEV_DMABUF + gntdev_dmabuf_fini(priv->dmabuf_priv); +#endif + if (use_ptemod) mmu_notifier_unregister(&priv->mn, priv->mm); + kfree(priv); return 0; } @@ -1009,6 +1026,20 @@ static long gntdev_ioctl(struct file *flip, case IOCTL_GNTDEV_GRANT_COPY: return gntdev_ioctl_grant_copy(priv, ptr); +#ifdef CONFIG_XEN_GNTDEV_DMABUF + case IOCTL_GNTDEV_DMABUF_EXP_FROM_REFS: + return gntdev_ioctl_dmabuf_exp_from_refs(priv, use_ptemod, ptr); + + case IOCTL_GNTDEV_DMABUF_EXP_WAIT_RELEASED: + return gntdev_ioctl_dmabuf_exp_wait_released(priv, ptr); + + case IOCTL_GNTDEV_DMABUF_IMP_TO_REFS: + return gntdev_ioctl_dmabuf_imp_to_refs(priv, ptr); + + case IOCTL_GNTDEV_DMABUF_IMP_RELEASE: + return gntdev_ioctl_dmabuf_imp_release(priv, ptr); +#endif + default: pr_debug("priv %p, unknown cmd %x\n", priv, cmd); return -ENOIOCTLCMD; diff --git a/include/uapi/xen/gntdev.h b/include/uapi/xen/gntdev.h index 4b9d498a31d4..fe4423e518c6 100644 --- a/include/uapi/xen/gntdev.h +++ b/include/uapi/xen/gntdev.h @@ -5,6 +5,7 @@ * Interface to /dev/xen/gntdev. * * Copyright (c) 2007, D G Murray + * Copyright (c) 2018, Oleksandr Andrushchenko, EPAM Systems Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 @@ -215,4 +216,94 @@ struct ioctl_gntdev_grant_copy { */ #define GNTDEV_DMA_FLAG_COHERENT (1 << 1) +/* + * Create a dma-buf [1] from grant references @refs of count @count provided + * by the foreign domain @domid with flags @flags. + * + * By default dma-buf is backed by system memory pages, but by providing + * one of the GNTDEV_DMA_FLAG_XXX flags it can also be created as + * a DMA write-combine or coherent buffer, e.g. allocated with dma_alloc_wc/ + * dma_alloc_coherent. + * + * Returns 0 if dma-buf was successfully created and the corresponding + * dma-buf's file descriptor is returned in @fd. + * + * [1] Documentation/driver-api/dma-buf.rst + */ + +#define IOCTL_GNTDEV_DMABUF_EXP_FROM_REFS \ + _IOC(_IOC_NONE, 'G', 9, \ + sizeof(struct ioctl_gntdev_dmabuf_exp_from_refs)) +struct ioctl_gntdev_dmabuf_exp_from_refs { + /* IN parameters. */ + /* Specific options for this dma-buf: see GNTDEV_DMA_FLAG_XXX. */ + __u32 flags; + /* Number of grant references in @refs array. */ + __u32 count; + /* OUT parameters. */ + /* File descriptor of the dma-buf. */ + __u32 fd; + /* The domain ID of the grant references to be mapped. */ + __u32 domid; + /* Variable IN parameter. */ + /* Array of grant references of size @count. */ + __u32 refs[1]; +}; + +/* + * This will block until the dma-buf with the file descriptor @fd is + * released. This is only valid for buffers created with + * IOCTL_GNTDEV_DMABUF_EXP_FROM_REFS. + * + * If within @wait_to_ms milliseconds the buffer is not released + * then -ETIMEDOUT error is returned. + * If the buffer with the file descriptor @fd does not exist or has already + * been released, then -ENOENT is returned. For valid file descriptors + * this must not be treated as error. + */ +#define IOCTL_GNTDEV_DMABUF_EXP_WAIT_RELEASED \ + _IOC(_IOC_NONE, 'G', 10, \ + sizeof(struct ioctl_gntdev_dmabuf_exp_wait_released)) +struct ioctl_gntdev_dmabuf_exp_wait_released { + /* IN parameters */ + __u32 fd; + __u32 wait_to_ms; +}; + +/* + * Import a dma-buf with file descriptor @fd and export granted references + * to the pages of that dma-buf into array @refs of size @count. + */ +#define IOCTL_GNTDEV_DMABUF_IMP_TO_REFS \ + _IOC(_IOC_NONE, 'G', 11, \ + sizeof(struct ioctl_gntdev_dmabuf_imp_to_refs)) +struct ioctl_gntdev_dmabuf_imp_to_refs { + /* IN parameters. */ + /* File descriptor of the dma-buf. */ + __u32 fd; + /* Number of grant references in @refs array. */ + __u32 count; + /* The domain ID for which references to be granted. */ + __u32 domid; + /* Reserved - must be zero. */ + __u32 reserved; + /* OUT parameters. */ + /* Array of grant references of size @count. */ + __u32 refs[1]; +}; + +/* + * This will close all references to the imported buffer with file descriptor + * @fd, so it can be released by the owner. This is only valid for buffers + * created with IOCTL_GNTDEV_DMABUF_IMP_TO_REFS. + */ +#define IOCTL_GNTDEV_DMABUF_IMP_RELEASE \ + _IOC(_IOC_NONE, 'G', 12, \ + sizeof(struct ioctl_gntdev_dmabuf_imp_release)) +struct ioctl_gntdev_dmabuf_imp_release { + /* IN parameters */ + __u32 fd; + __u32 reserved; +}; + #endif /* __LINUX_PUBLIC_GNTDEV_H__ */ -- cgit v1.2.3-59-g8ed1b From a240d6e42e28c34fdc34b3a98ca838a31c939901 Mon Sep 17 00:00:00 2001 From: Oleksandr Andrushchenko Date: Fri, 20 Jul 2018 12:01:49 +0300 Subject: xen/gntdev: Implement dma-buf export functionality 1. Create a dma-buf from grant references provided by the foreign domain. By default dma-buf is backed by system memory pages, but by providing GNTDEV_DMA_FLAG_XXX flags it can also be created as a DMA write-combine/coherent buffer, e.g. allocated with corresponding dma_alloc_xxx API. Export the resulting buffer as a new dma-buf. 2. Implement waiting for the dma-buf to be released: block until the dma-buf with the file descriptor provided is released. If within the time-out provided the buffer is not released then -ETIMEDOUT error is returned. If the buffer with the file descriptor does not exist or has already been released, then -ENOENT is returned. For valid file descriptors this must not be treated as error. 3. Make gntdev's common code and structures available to dma-buf. [boris: added 'args.fd = -1' to dmabuf_exp_from_refs() to avoid an unnecessary warning about it not being initialized on i386 with gcc 8.1.1] Signed-off-by: Oleksandr Andrushchenko Reviewed-by: Boris Ostrovsky Signed-off-by: Boris Ostrovsky --- drivers/xen/gntdev-dmabuf.c | 456 +++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 453 insertions(+), 3 deletions(-) diff --git a/drivers/xen/gntdev-dmabuf.c b/drivers/xen/gntdev-dmabuf.c index af782c0a8a19..4ca361fdf355 100644 --- a/drivers/xen/gntdev-dmabuf.c +++ b/drivers/xen/gntdev-dmabuf.c @@ -3,11 +3,14 @@ /* * Xen dma-buf functionality for gntdev. * + * DMA buffer implementation is based on drivers/gpu/drm/drm_prime.c. + * * Copyright (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc. */ #include #include +#include #include #include #include @@ -18,6 +21,39 @@ #include "gntdev-common.h" #include "gntdev-dmabuf.h" +struct gntdev_dmabuf { + struct gntdev_dmabuf_priv *priv; + struct dma_buf *dmabuf; + struct list_head next; + int fd; + + union { + struct { + /* Exported buffers are reference counted. */ + struct kref refcount; + + struct gntdev_priv *priv; + struct gntdev_grant_map *map; + } exp; + } u; + + /* Number of pages this buffer has. */ + int nr_pages; + /* Pages of this buffer. */ + struct page **pages; +}; + +struct gntdev_dmabuf_wait_obj { + struct list_head next; + struct gntdev_dmabuf *gntdev_dmabuf; + struct completion completion; +}; + +struct gntdev_dmabuf_attachment { + struct sg_table *sgt; + enum dma_data_direction dir; +}; + struct gntdev_dmabuf_priv { /* List of exported DMA buffers. */ struct list_head exp_list; @@ -31,17 +67,427 @@ struct gntdev_dmabuf_priv { /* Implementation of wait for exported DMA buffer to be released. */ +static void dmabuf_exp_release(struct kref *kref); + +static struct gntdev_dmabuf_wait_obj * +dmabuf_exp_wait_obj_new(struct gntdev_dmabuf_priv *priv, + struct gntdev_dmabuf *gntdev_dmabuf) +{ + struct gntdev_dmabuf_wait_obj *obj; + + obj = kzalloc(sizeof(*obj), GFP_KERNEL); + if (!obj) + return ERR_PTR(-ENOMEM); + + init_completion(&obj->completion); + obj->gntdev_dmabuf = gntdev_dmabuf; + + mutex_lock(&priv->lock); + list_add(&obj->next, &priv->exp_wait_list); + /* Put our reference and wait for gntdev_dmabuf's release to fire. */ + kref_put(&gntdev_dmabuf->u.exp.refcount, dmabuf_exp_release); + mutex_unlock(&priv->lock); + return obj; +} + +static void dmabuf_exp_wait_obj_free(struct gntdev_dmabuf_priv *priv, + struct gntdev_dmabuf_wait_obj *obj) +{ + mutex_lock(&priv->lock); + list_del(&obj->next); + mutex_unlock(&priv->lock); + kfree(obj); +} + +static int dmabuf_exp_wait_obj_wait(struct gntdev_dmabuf_wait_obj *obj, + u32 wait_to_ms) +{ + if (wait_for_completion_timeout(&obj->completion, + msecs_to_jiffies(wait_to_ms)) <= 0) + return -ETIMEDOUT; + + return 0; +} + +static void dmabuf_exp_wait_obj_signal(struct gntdev_dmabuf_priv *priv, + struct gntdev_dmabuf *gntdev_dmabuf) +{ + struct gntdev_dmabuf_wait_obj *obj; + + list_for_each_entry(obj, &priv->exp_wait_list, next) + if (obj->gntdev_dmabuf == gntdev_dmabuf) { + pr_debug("Found gntdev_dmabuf in the wait list, wake\n"); + complete_all(&obj->completion); + break; + } +} + +static struct gntdev_dmabuf * +dmabuf_exp_wait_obj_get_dmabuf(struct gntdev_dmabuf_priv *priv, int fd) +{ + struct gntdev_dmabuf *gntdev_dmabuf, *ret = ERR_PTR(-ENOENT); + + mutex_lock(&priv->lock); + list_for_each_entry(gntdev_dmabuf, &priv->exp_list, next) + if (gntdev_dmabuf->fd == fd) { + pr_debug("Found gntdev_dmabuf in the wait list\n"); + kref_get(&gntdev_dmabuf->u.exp.refcount); + ret = gntdev_dmabuf; + break; + } + mutex_unlock(&priv->lock); + return ret; +} + static int dmabuf_exp_wait_released(struct gntdev_dmabuf_priv *priv, int fd, int wait_to_ms) { - return -EINVAL; + struct gntdev_dmabuf *gntdev_dmabuf; + struct gntdev_dmabuf_wait_obj *obj; + int ret; + + pr_debug("Will wait for dma-buf with fd %d\n", fd); + /* + * Try to find the DMA buffer: if not found means that + * either the buffer has already been released or file descriptor + * provided is wrong. + */ + gntdev_dmabuf = dmabuf_exp_wait_obj_get_dmabuf(priv, fd); + if (IS_ERR(gntdev_dmabuf)) + return PTR_ERR(gntdev_dmabuf); + + /* + * gntdev_dmabuf still exists and is reference count locked by us now, + * so prepare to wait: allocate wait object and add it to the wait list, + * so we can find it on release. + */ + obj = dmabuf_exp_wait_obj_new(priv, gntdev_dmabuf); + if (IS_ERR(obj)) + return PTR_ERR(obj); + + ret = dmabuf_exp_wait_obj_wait(obj, wait_to_ms); + dmabuf_exp_wait_obj_free(priv, obj); + return ret; +} + +/* DMA buffer export support. */ + +static struct sg_table * +dmabuf_pages_to_sgt(struct page **pages, unsigned int nr_pages) +{ + struct sg_table *sgt; + int ret; + + sgt = kmalloc(sizeof(*sgt), GFP_KERNEL); + if (!sgt) { + ret = -ENOMEM; + goto out; + } + + ret = sg_alloc_table_from_pages(sgt, pages, nr_pages, 0, + nr_pages << PAGE_SHIFT, + GFP_KERNEL); + if (ret) + goto out; + + return sgt; + +out: + kfree(sgt); + return ERR_PTR(ret); +} + +static int dmabuf_exp_ops_attach(struct dma_buf *dma_buf, + struct device *target_dev, + struct dma_buf_attachment *attach) +{ + struct gntdev_dmabuf_attachment *gntdev_dmabuf_attach; + + gntdev_dmabuf_attach = kzalloc(sizeof(*gntdev_dmabuf_attach), + GFP_KERNEL); + if (!gntdev_dmabuf_attach) + return -ENOMEM; + + gntdev_dmabuf_attach->dir = DMA_NONE; + attach->priv = gntdev_dmabuf_attach; + return 0; +} + +static void dmabuf_exp_ops_detach(struct dma_buf *dma_buf, + struct dma_buf_attachment *attach) +{ + struct gntdev_dmabuf_attachment *gntdev_dmabuf_attach = attach->priv; + + if (gntdev_dmabuf_attach) { + struct sg_table *sgt = gntdev_dmabuf_attach->sgt; + + if (sgt) { + if (gntdev_dmabuf_attach->dir != DMA_NONE) + dma_unmap_sg_attrs(attach->dev, sgt->sgl, + sgt->nents, + gntdev_dmabuf_attach->dir, + DMA_ATTR_SKIP_CPU_SYNC); + sg_free_table(sgt); + } + + kfree(sgt); + kfree(gntdev_dmabuf_attach); + attach->priv = NULL; + } +} + +static struct sg_table * +dmabuf_exp_ops_map_dma_buf(struct dma_buf_attachment *attach, + enum dma_data_direction dir) +{ + struct gntdev_dmabuf_attachment *gntdev_dmabuf_attach = attach->priv; + struct gntdev_dmabuf *gntdev_dmabuf = attach->dmabuf->priv; + struct sg_table *sgt; + + pr_debug("Mapping %d pages for dev %p\n", gntdev_dmabuf->nr_pages, + attach->dev); + + if (dir == DMA_NONE || !gntdev_dmabuf_attach) + return ERR_PTR(-EINVAL); + + /* Return the cached mapping when possible. */ + if (gntdev_dmabuf_attach->dir == dir) + return gntdev_dmabuf_attach->sgt; + + /* + * Two mappings with different directions for the same attachment are + * not allowed. + */ + if (gntdev_dmabuf_attach->dir != DMA_NONE) + return ERR_PTR(-EBUSY); + + sgt = dmabuf_pages_to_sgt(gntdev_dmabuf->pages, + gntdev_dmabuf->nr_pages); + if (!IS_ERR(sgt)) { + if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir, + DMA_ATTR_SKIP_CPU_SYNC)) { + sg_free_table(sgt); + kfree(sgt); + sgt = ERR_PTR(-ENOMEM); + } else { + gntdev_dmabuf_attach->sgt = sgt; + gntdev_dmabuf_attach->dir = dir; + } + } + if (IS_ERR(sgt)) + pr_debug("Failed to map sg table for dev %p\n", attach->dev); + return sgt; +} + +static void dmabuf_exp_ops_unmap_dma_buf(struct dma_buf_attachment *attach, + struct sg_table *sgt, + enum dma_data_direction dir) +{ + /* Not implemented. The unmap is done at dmabuf_exp_ops_detach(). */ +} + +static void dmabuf_exp_release(struct kref *kref) +{ + struct gntdev_dmabuf *gntdev_dmabuf = + container_of(kref, struct gntdev_dmabuf, u.exp.refcount); + + dmabuf_exp_wait_obj_signal(gntdev_dmabuf->priv, gntdev_dmabuf); + list_del(&gntdev_dmabuf->next); + kfree(gntdev_dmabuf); +} + +static void dmabuf_exp_remove_map(struct gntdev_priv *priv, + struct gntdev_grant_map *map) +{ + mutex_lock(&priv->lock); + list_del(&map->next); + gntdev_put_map(NULL /* already removed */, map); + mutex_unlock(&priv->lock); +} + +static void dmabuf_exp_ops_release(struct dma_buf *dma_buf) +{ + struct gntdev_dmabuf *gntdev_dmabuf = dma_buf->priv; + struct gntdev_dmabuf_priv *priv = gntdev_dmabuf->priv; + + dmabuf_exp_remove_map(gntdev_dmabuf->u.exp.priv, + gntdev_dmabuf->u.exp.map); + mutex_lock(&priv->lock); + kref_put(&gntdev_dmabuf->u.exp.refcount, dmabuf_exp_release); + mutex_unlock(&priv->lock); +} + +static void *dmabuf_exp_ops_kmap(struct dma_buf *dma_buf, + unsigned long page_num) +{ + /* Not implemented. */ + return NULL; +} + +static void dmabuf_exp_ops_kunmap(struct dma_buf *dma_buf, + unsigned long page_num, void *addr) +{ + /* Not implemented. */ +} + +static int dmabuf_exp_ops_mmap(struct dma_buf *dma_buf, + struct vm_area_struct *vma) +{ + /* Not implemented. */ + return 0; +} + +static const struct dma_buf_ops dmabuf_exp_ops = { + .attach = dmabuf_exp_ops_attach, + .detach = dmabuf_exp_ops_detach, + .map_dma_buf = dmabuf_exp_ops_map_dma_buf, + .unmap_dma_buf = dmabuf_exp_ops_unmap_dma_buf, + .release = dmabuf_exp_ops_release, + .map = dmabuf_exp_ops_kmap, + .unmap = dmabuf_exp_ops_kunmap, + .mmap = dmabuf_exp_ops_mmap, +}; + +struct gntdev_dmabuf_export_args { + struct gntdev_priv *priv; + struct gntdev_grant_map *map; + struct gntdev_dmabuf_priv *dmabuf_priv; + struct device *dev; + int count; + struct page **pages; + u32 fd; +}; + +static int dmabuf_exp_from_pages(struct gntdev_dmabuf_export_args *args) +{ + DEFINE_DMA_BUF_EXPORT_INFO(exp_info); + struct gntdev_dmabuf *gntdev_dmabuf; + int ret; + + gntdev_dmabuf = kzalloc(sizeof(*gntdev_dmabuf), GFP_KERNEL); + if (!gntdev_dmabuf) + return -ENOMEM; + + kref_init(&gntdev_dmabuf->u.exp.refcount); + + gntdev_dmabuf->priv = args->dmabuf_priv; + gntdev_dmabuf->nr_pages = args->count; + gntdev_dmabuf->pages = args->pages; + gntdev_dmabuf->u.exp.priv = args->priv; + gntdev_dmabuf->u.exp.map = args->map; + + exp_info.exp_name = KBUILD_MODNAME; + if (args->dev->driver && args->dev->driver->owner) + exp_info.owner = args->dev->driver->owner; + else + exp_info.owner = THIS_MODULE; + exp_info.ops = &dmabuf_exp_ops; + exp_info.size = args->count << PAGE_SHIFT; + exp_info.flags = O_RDWR; + exp_info.priv = gntdev_dmabuf; + + gntdev_dmabuf->dmabuf = dma_buf_export(&exp_info); + if (IS_ERR(gntdev_dmabuf->dmabuf)) { + ret = PTR_ERR(gntdev_dmabuf->dmabuf); + gntdev_dmabuf->dmabuf = NULL; + goto fail; + } + + ret = dma_buf_fd(gntdev_dmabuf->dmabuf, O_CLOEXEC); + if (ret < 0) + goto fail; + + gntdev_dmabuf->fd = ret; + args->fd = ret; + + pr_debug("Exporting DMA buffer with fd %d\n", ret); + + mutex_lock(&args->dmabuf_priv->lock); + list_add(&gntdev_dmabuf->next, &args->dmabuf_priv->exp_list); + mutex_unlock(&args->dmabuf_priv->lock); + return 0; + +fail: + if (gntdev_dmabuf->dmabuf) + dma_buf_put(gntdev_dmabuf->dmabuf); + kfree(gntdev_dmabuf); + return ret; +} + +static struct gntdev_grant_map * +dmabuf_exp_alloc_backing_storage(struct gntdev_priv *priv, int dmabuf_flags, + int count) +{ + struct gntdev_grant_map *map; + + if (unlikely(count <= 0)) + return ERR_PTR(-EINVAL); + + if ((dmabuf_flags & GNTDEV_DMA_FLAG_WC) && + (dmabuf_flags & GNTDEV_DMA_FLAG_COHERENT)) { + pr_debug("Wrong dma-buf flags: 0x%x\n", dmabuf_flags); + return ERR_PTR(-EINVAL); + } + + map = gntdev_alloc_map(priv, count, dmabuf_flags); + if (!map) + return ERR_PTR(-ENOMEM); + + if (unlikely(gntdev_account_mapped_pages(count))) { + pr_debug("can't map %d pages: over limit\n", count); + gntdev_put_map(NULL, map); + return ERR_PTR(-ENOMEM); + } + return map; } static int dmabuf_exp_from_refs(struct gntdev_priv *priv, int flags, int count, u32 domid, u32 *refs, u32 *fd) { - *fd = -1; - return -EINVAL; + struct gntdev_grant_map *map; + struct gntdev_dmabuf_export_args args; + int i, ret; + + map = dmabuf_exp_alloc_backing_storage(priv, flags, count); + if (IS_ERR(map)) + return PTR_ERR(map); + + for (i = 0; i < count; i++) { + map->grants[i].domid = domid; + map->grants[i].ref = refs[i]; + } + + mutex_lock(&priv->lock); + gntdev_add_map(priv, map); + mutex_unlock(&priv->lock); + + map->flags |= GNTMAP_host_map; +#if defined(CONFIG_X86) + map->flags |= GNTMAP_device_map; +#endif + + ret = gntdev_map_grant_pages(map); + if (ret < 0) + goto out; + + args.priv = priv; + args.map = map; + args.dev = priv->dma_dev; + args.dmabuf_priv = priv->dmabuf_priv; + args.count = map->count; + args.pages = map->pages; + args.fd = -1; /* Shut up unnecessary gcc warning for i386 */ + + ret = dmabuf_exp_from_pages(&args); + if (ret < 0) + goto out; + + *fd = args.fd; + return 0; + +out: + dmabuf_exp_remove_map(priv, map); + return ret; } /* DMA buffer import support. */ @@ -168,6 +614,10 @@ struct gntdev_dmabuf_priv *gntdev_dmabuf_init(void) if (!priv) return ERR_PTR(-ENOMEM); + mutex_init(&priv->lock); + INIT_LIST_HEAD(&priv->exp_list); + INIT_LIST_HEAD(&priv->exp_wait_list); + return priv; } -- cgit v1.2.3-59-g8ed1b From bf8dc55b135873d8bc58bb8fbc91c52f3a902eea Mon Sep 17 00:00:00 2001 From: Oleksandr Andrushchenko Date: Fri, 20 Jul 2018 12:01:50 +0300 Subject: xen/gntdev: Implement dma-buf import functionality 1. Import a dma-buf with the file descriptor provided and export granted references to the pages of that dma-buf into the array of grant references. 2. Add API to close all references to an imported buffer, so it can be released by the owner. This is only valid for buffers created with IOCTL_GNTDEV_DMABUF_IMP_TO_REFS. Signed-off-by: Oleksandr Andrushchenko Reviewed-by: Boris Ostrovsky Signed-off-by: Boris Ostrovsky --- drivers/xen/gntdev-dmabuf.c | 239 +++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 234 insertions(+), 5 deletions(-) diff --git a/drivers/xen/gntdev-dmabuf.c b/drivers/xen/gntdev-dmabuf.c index 4ca361fdf355..faaa4d3970ea 100644 --- a/drivers/xen/gntdev-dmabuf.c +++ b/drivers/xen/gntdev-dmabuf.c @@ -21,6 +21,15 @@ #include "gntdev-common.h" #include "gntdev-dmabuf.h" +#ifndef GRANT_INVALID_REF +/* + * Note on usage of grant reference 0 as invalid grant reference: + * grant reference 0 is valid, but never exposed to a driver, + * because of the fact it is already in use/reserved by the PV console. + */ +#define GRANT_INVALID_REF 0 +#endif + struct gntdev_dmabuf { struct gntdev_dmabuf_priv *priv; struct dma_buf *dmabuf; @@ -35,6 +44,14 @@ struct gntdev_dmabuf { struct gntdev_priv *priv; struct gntdev_grant_map *map; } exp; + struct { + /* Granted references of the imported buffer. */ + grant_ref_t *refs; + /* Scatter-gather table of the imported buffer. */ + struct sg_table *sgt; + /* dma-buf attachment of the imported buffer. */ + struct dma_buf_attachment *attach; + } imp; } u; /* Number of pages this buffer has. */ @@ -59,6 +76,8 @@ struct gntdev_dmabuf_priv { struct list_head exp_list; /* List of wait objects. */ struct list_head exp_wait_list; + /* List of imported DMA buffers. */ + struct list_head imp_list; /* This is the lock which protects dma_buf_xxx lists. */ struct mutex lock; }; @@ -492,21 +511,230 @@ out: /* DMA buffer import support. */ +static int +dmabuf_imp_grant_foreign_access(struct page **pages, u32 *refs, + int count, int domid) +{ + grant_ref_t priv_gref_head; + int i, ret; + + ret = gnttab_alloc_grant_references(count, &priv_gref_head); + if (ret < 0) { + pr_debug("Cannot allocate grant references, ret %d\n", ret); + return ret; + } + + for (i = 0; i < count; i++) { + int cur_ref; + + cur_ref = gnttab_claim_grant_reference(&priv_gref_head); + if (cur_ref < 0) { + ret = cur_ref; + pr_debug("Cannot claim grant reference, ret %d\n", ret); + goto out; + } + + gnttab_grant_foreign_access_ref(cur_ref, domid, + xen_page_to_gfn(pages[i]), 0); + refs[i] = cur_ref; + } + + return 0; + +out: + gnttab_free_grant_references(priv_gref_head); + return ret; +} + +static void dmabuf_imp_end_foreign_access(u32 *refs, int count) +{ + int i; + + for (i = 0; i < count; i++) + if (refs[i] != GRANT_INVALID_REF) + gnttab_end_foreign_access(refs[i], 0, 0UL); +} + +static void dmabuf_imp_free_storage(struct gntdev_dmabuf *gntdev_dmabuf) +{ + kfree(gntdev_dmabuf->pages); + kfree(gntdev_dmabuf->u.imp.refs); + kfree(gntdev_dmabuf); +} + +static struct gntdev_dmabuf *dmabuf_imp_alloc_storage(int count) +{ + struct gntdev_dmabuf *gntdev_dmabuf; + int i; + + gntdev_dmabuf = kzalloc(sizeof(*gntdev_dmabuf), GFP_KERNEL); + if (!gntdev_dmabuf) + goto fail; + + gntdev_dmabuf->u.imp.refs = kcalloc(count, + sizeof(gntdev_dmabuf->u.imp.refs[0]), + GFP_KERNEL); + if (!gntdev_dmabuf->u.imp.refs) + goto fail; + + gntdev_dmabuf->pages = kcalloc(count, + sizeof(gntdev_dmabuf->pages[0]), + GFP_KERNEL); + if (!gntdev_dmabuf->pages) + goto fail; + + gntdev_dmabuf->nr_pages = count; + + for (i = 0; i < count; i++) + gntdev_dmabuf->u.imp.refs[i] = GRANT_INVALID_REF; + + return gntdev_dmabuf; + +fail: + dmabuf_imp_free_storage(gntdev_dmabuf); + return ERR_PTR(-ENOMEM); +} + static struct gntdev_dmabuf * dmabuf_imp_to_refs(struct gntdev_dmabuf_priv *priv, struct device *dev, int fd, int count, int domid) { - return ERR_PTR(-ENOMEM); + struct gntdev_dmabuf *gntdev_dmabuf, *ret; + struct dma_buf *dma_buf; + struct dma_buf_attachment *attach; + struct sg_table *sgt; + struct sg_page_iter sg_iter; + int i; + + dma_buf = dma_buf_get(fd); + if (IS_ERR(dma_buf)) + return ERR_CAST(dma_buf); + + gntdev_dmabuf = dmabuf_imp_alloc_storage(count); + if (IS_ERR(gntdev_dmabuf)) { + ret = gntdev_dmabuf; + goto fail_put; + } + + gntdev_dmabuf->priv = priv; + gntdev_dmabuf->fd = fd; + + attach = dma_buf_attach(dma_buf, dev); + if (IS_ERR(attach)) { + ret = ERR_CAST(attach); + goto fail_free_obj; + } + + gntdev_dmabuf->u.imp.attach = attach; + + sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); + if (IS_ERR(sgt)) { + ret = ERR_CAST(sgt); + goto fail_detach; + } + + /* Check number of pages that imported buffer has. */ + if (attach->dmabuf->size != gntdev_dmabuf->nr_pages << PAGE_SHIFT) { + ret = ERR_PTR(-EINVAL); + pr_debug("DMA buffer has %zu pages, user-space expects %d\n", + attach->dmabuf->size, gntdev_dmabuf->nr_pages); + goto fail_unmap; + } + + gntdev_dmabuf->u.imp.sgt = sgt; + + /* Now convert sgt to array of pages and check for page validity. */ + i = 0; + for_each_sg_page(sgt->sgl, &sg_iter, sgt->nents, 0) { + struct page *page = sg_page_iter_page(&sg_iter); + /* + * Check if page is valid: this can happen if we are given + * a page from VRAM or other resources which are not backed + * by a struct page. + */ + if (!pfn_valid(page_to_pfn(page))) { + ret = ERR_PTR(-EINVAL); + goto fail_unmap; + } + + gntdev_dmabuf->pages[i++] = page; + } + + ret = ERR_PTR(dmabuf_imp_grant_foreign_access(gntdev_dmabuf->pages, + gntdev_dmabuf->u.imp.refs, + count, domid)); + if (IS_ERR(ret)) + goto fail_end_access; + + pr_debug("Imported DMA buffer with fd %d\n", fd); + + mutex_lock(&priv->lock); + list_add(&gntdev_dmabuf->next, &priv->imp_list); + mutex_unlock(&priv->lock); + + return gntdev_dmabuf; + +fail_end_access: + dmabuf_imp_end_foreign_access(gntdev_dmabuf->u.imp.refs, count); +fail_unmap: + dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL); +fail_detach: + dma_buf_detach(dma_buf, attach); +fail_free_obj: + dmabuf_imp_free_storage(gntdev_dmabuf); +fail_put: + dma_buf_put(dma_buf); + return ret; } -static u32 *dmabuf_imp_get_refs(struct gntdev_dmabuf *gntdev_dmabuf) +/* + * Find the hyper dma-buf by its file descriptor and remove + * it from the buffer's list. + */ +static struct gntdev_dmabuf * +dmabuf_imp_find_unlink(struct gntdev_dmabuf_priv *priv, int fd) { - return NULL; + struct gntdev_dmabuf *q, *gntdev_dmabuf, *ret = ERR_PTR(-ENOENT); + + mutex_lock(&priv->lock); + list_for_each_entry_safe(gntdev_dmabuf, q, &priv->imp_list, next) { + if (gntdev_dmabuf->fd == fd) { + pr_debug("Found gntdev_dmabuf in the import list\n"); + ret = gntdev_dmabuf; + list_del(&gntdev_dmabuf->next); + break; + } + } + mutex_unlock(&priv->lock); + return ret; } static int dmabuf_imp_release(struct gntdev_dmabuf_priv *priv, u32 fd) { - return -EINVAL; + struct gntdev_dmabuf *gntdev_dmabuf; + struct dma_buf_attachment *attach; + struct dma_buf *dma_buf; + + gntdev_dmabuf = dmabuf_imp_find_unlink(priv, fd); + if (IS_ERR(gntdev_dmabuf)) + return PTR_ERR(gntdev_dmabuf); + + pr_debug("Releasing DMA buffer with fd %d\n", fd); + + dmabuf_imp_end_foreign_access(gntdev_dmabuf->u.imp.refs, + gntdev_dmabuf->nr_pages); + + attach = gntdev_dmabuf->u.imp.attach; + + if (gntdev_dmabuf->u.imp.sgt) + dma_buf_unmap_attachment(attach, gntdev_dmabuf->u.imp.sgt, + DMA_BIDIRECTIONAL); + dma_buf = attach->dmabuf; + dma_buf_detach(attach->dmabuf, attach); + dma_buf_put(dma_buf); + + dmabuf_imp_free_storage(gntdev_dmabuf); + return 0; } /* DMA buffer IOCTL support. */ @@ -583,7 +811,7 @@ long gntdev_ioctl_dmabuf_imp_to_refs(struct gntdev_priv *priv, if (IS_ERR(gntdev_dmabuf)) return PTR_ERR(gntdev_dmabuf); - if (copy_to_user(u->refs, dmabuf_imp_get_refs(gntdev_dmabuf), + if (copy_to_user(u->refs, gntdev_dmabuf->u.imp.refs, sizeof(*u->refs) * op.count) != 0) { ret = -EFAULT; goto out_release; @@ -617,6 +845,7 @@ struct gntdev_dmabuf_priv *gntdev_dmabuf_init(void) mutex_init(&priv->lock); INIT_LIST_HEAD(&priv->exp_list); INIT_LIST_HEAD(&priv->exp_wait_list); + INIT_LIST_HEAD(&priv->imp_list); return priv; } -- cgit v1.2.3-59-g8ed1b From 47b428d14f06dbeab23dd5c7e424e15283841765 Mon Sep 17 00:00:00 2001 From: Waiman Long Date: Thu, 19 Jul 2018 17:39:57 -0400 Subject: xen/spinlock: Don't use pvqspinlock if only 1 vCPU On a VM with only 1 vCPU, the locking fast paths will always be successful. In this case, there is no need to use the the PV qspinlock code which has higher overhead on the unlock side than the native qspinlock code. The xen_pvspin veriable is also turned off in this 1 vCPU case to eliminate unneeded pvqspinlock initialization in xen_init_lock_cpu() which is run after xen_init_spinlocks(). Signed-off-by: Waiman Long Reviewed-by: Boris Ostrovsky Signed-off-by: Boris Ostrovsky --- arch/x86/xen/spinlock.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c index cd97a62394e7..973f10e05211 100644 --- a/arch/x86/xen/spinlock.c +++ b/arch/x86/xen/spinlock.c @@ -130,6 +130,10 @@ PV_CALLEE_SAVE_REGS_THUNK(xen_vcpu_stolen); void __init xen_init_spinlocks(void) { + /* Don't need to use pvqspinlock code if there is only 1 vCPU. */ + if (num_possible_cpus() == 1) + xen_pvspin = false; + if (!xen_pvspin) { printk(KERN_DEBUG "xen: PV spinlocks disabled\n"); return; -- cgit v1.2.3-59-g8ed1b From 2789e83c933d7da583e592d7958e87493c889605 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Tue, 31 Jul 2018 15:02:25 +0100 Subject: xen/gntdev: don't dereference a null gntdev_dmabuf on allocation failure Currently when the allocation of gntdev_dmabuf fails, the error exit path will call dmabuf_imp_free_storage and causes a null pointer dereference on gntdev_dmabuf. Fix this by adding an error exit path that won't free gntdev_dmabuf. Detected by CoverityScan, CID#1472124 ("Dereference after null check") Fixes: bf8dc55b1358 ("xen/gntdev: Implement dma-buf import functionality") Signed-off-by: Colin Ian King Reviewed-by: Boris Ostrovsky Signed-off-by: Boris Ostrovsky --- drivers/xen/gntdev-dmabuf.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/xen/gntdev-dmabuf.c b/drivers/xen/gntdev-dmabuf.c index faaa4d3970ea..589fd923c550 100644 --- a/drivers/xen/gntdev-dmabuf.c +++ b/drivers/xen/gntdev-dmabuf.c @@ -569,7 +569,7 @@ static struct gntdev_dmabuf *dmabuf_imp_alloc_storage(int count) gntdev_dmabuf = kzalloc(sizeof(*gntdev_dmabuf), GFP_KERNEL); if (!gntdev_dmabuf) - goto fail; + goto fail_no_free; gntdev_dmabuf->u.imp.refs = kcalloc(count, sizeof(gntdev_dmabuf->u.imp.refs[0]), @@ -592,6 +592,7 @@ static struct gntdev_dmabuf *dmabuf_imp_alloc_storage(int count) fail: dmabuf_imp_free_storage(gntdev_dmabuf); +fail_no_free: return ERR_PTR(-ENOMEM); } -- cgit v1.2.3-59-g8ed1b From bf06bad958c4536ee5ac3461a18976ea3e896f26 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Sat, 4 Aug 2018 19:50:14 -0500 Subject: xen/biomerge: Use true and false for boolean values Return statements in functions returning bool should use true or false instead of an integer value. This code was detected with the help of Coccinelle. Signed-off-by: Gustavo A. R. Silva Reviewed-by: Juergen Gross Signed-off-by: Boris Ostrovsky --- drivers/xen/biomerge.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/xen/biomerge.c b/drivers/xen/biomerge.c index 30d7f52eb7ca..55ed80c3a17c 100644 --- a/drivers/xen/biomerge.c +++ b/drivers/xen/biomerge.c @@ -17,7 +17,7 @@ bool xen_biovec_phys_mergeable(const struct bio_vec *vec1, * XXX: Add support for merging bio_vec when using different page * size in Xen and Linux. */ - return 0; + return false; #endif } EXPORT_SYMBOL(xen_biovec_phys_mergeable); -- cgit v1.2.3-59-g8ed1b From 405c018a25fe464dc68057bbc8014a58f2bd4422 Mon Sep 17 00:00:00 2001 From: "M. Vefa Bicakci" Date: Tue, 24 Jul 2018 08:45:47 -0400 Subject: xen/pv: Call get_cpu_address_sizes to set x86_virt/phys_bits Commit d94a155c59c9 ("x86/cpu: Prevent cpuinfo_x86::x86_phys_bits adjustment corruption") has moved the query and calculation of the x86_virt_bits and x86_phys_bits fields of the cpuinfo_x86 struct from the get_cpu_cap function to a new function named get_cpu_address_sizes. One of the call sites related to Xen PV VMs was unfortunately missed in the aforementioned commit. This prevents successful boot-up of kernel versions 4.17 and up in Xen PV VMs if CONFIG_DEBUG_VIRTUAL is enabled, due to the following code path: enlighten_pv.c::xen_start_kernel mmu_pv.c::xen_reserve_special_pages page.h::__pa physaddr.c::__phys_addr physaddr.h::phys_addr_valid phys_addr_valid uses boot_cpu_data.x86_phys_bits to validate physical addresses. boot_cpu_data.x86_phys_bits is no longer populated before the call to xen_reserve_special_pages due to the aforementioned commit though, so the validation performed by phys_addr_valid fails, which causes __phys_addr to trigger a BUG, preventing boot-up. Signed-off-by: M. Vefa Bicakci Reviewed-by: Thomas Gleixner Reviewed-by: Boris Ostrovsky Cc: "Kirill A. Shutemov" Cc: Andy Lutomirski Cc: Ingo Molnar Cc: "H. Peter Anvin" Cc: Thomas Gleixner Cc: Boris Ostrovsky Cc: Juergen Gross Cc: xen-devel@lists.xenproject.org Cc: x86@kernel.org Cc: stable@vger.kernel.org # for v4.17 and up Fixes: d94a155c59c9 ("x86/cpu: Prevent cpuinfo_x86::x86_phys_bits adjustment corruption") Signed-off-by: Boris Ostrovsky --- arch/x86/kernel/cpu/common.c | 2 +- arch/x86/kernel/cpu/cpu.h | 1 + arch/x86/xen/enlighten_pv.c | 3 +++ 3 files changed, 5 insertions(+), 1 deletion(-) diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index eb4cb3efd20e..2322d0c4bfd2 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -911,7 +911,7 @@ void get_cpu_cap(struct cpuinfo_x86 *c) apply_forced_caps(c); } -static void get_cpu_address_sizes(struct cpuinfo_x86 *c) +void get_cpu_address_sizes(struct cpuinfo_x86 *c) { u32 eax, ebx, ecx, edx; diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h index 38216f678fc3..12a5f0cec0b2 100644 --- a/arch/x86/kernel/cpu/cpu.h +++ b/arch/x86/kernel/cpu/cpu.h @@ -46,6 +46,7 @@ extern const struct cpu_dev *const __x86_cpu_dev_start[], *const __x86_cpu_dev_end[]; extern void get_cpu_cap(struct cpuinfo_x86 *c); +extern void get_cpu_address_sizes(struct cpuinfo_x86 *c); extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c); extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c); extern u32 get_scattered_cpuid_leaf(unsigned int level, diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c index 439a94bf89ad..c5e3f2acc7f0 100644 --- a/arch/x86/xen/enlighten_pv.c +++ b/arch/x86/xen/enlighten_pv.c @@ -1259,6 +1259,9 @@ asmlinkage __visible void __init xen_start_kernel(void) get_cpu_cap(&boot_cpu_data); x86_configure_nx(); + /* Determine virtual and physical address sizes */ + get_cpu_address_sizes(&boot_cpu_data); + /* Let's presume PV guests always boot on vCPU with id 0. */ per_cpu(xen_vcpu_id, 0) = 0; -- cgit v1.2.3-59-g8ed1b From cd9139220b87f40ea83858de1d6436c639376ce7 Mon Sep 17 00:00:00 2001 From: Juergen Gross Date: Wed, 13 Jun 2018 11:58:06 +0200 Subject: xen: don't use privcmd_call() from xen_mc_flush() Using privcmd_call() for a singleton multicall seems to be wrong, as privcmd_call() is using stac()/clac() to enable hypervisor access to Linux user space. Even if currently not a problem (pv domains can't use SMAP while HVM and PVH domains can't use multicalls) things might change when PVH dom0 support is added to the kernel. Reported-by: Jan Beulich Signed-off-by: Juergen Gross Reviewed-by: Jan Beulich Signed-off-by: Boris Ostrovsky --- arch/x86/include/asm/xen/hypercall.h | 25 +++++++++++++++++++------ arch/x86/xen/multicalls.c | 6 +++--- 2 files changed, 22 insertions(+), 9 deletions(-) diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h index bfd882617613..6b2f90a0b149 100644 --- a/arch/x86/include/asm/xen/hypercall.h +++ b/arch/x86/include/asm/xen/hypercall.h @@ -209,24 +209,37 @@ extern struct { char _entry[32]; } hypercall_page[]; }) static inline long -privcmd_call(unsigned call, - unsigned long a1, unsigned long a2, - unsigned long a3, unsigned long a4, - unsigned long a5) +xen_single_call(unsigned int call, + unsigned long a1, unsigned long a2, + unsigned long a3, unsigned long a4, + unsigned long a5) { __HYPERCALL_DECLS; __HYPERCALL_5ARG(a1, a2, a3, a4, a5); - stac(); asm volatile(CALL_NOSPEC : __HYPERCALL_5PARAM : [thunk_target] "a" (&hypercall_page[call]) : __HYPERCALL_CLOBBER5); - clac(); return (long)__res; } +static inline long +privcmd_call(unsigned int call, + unsigned long a1, unsigned long a2, + unsigned long a3, unsigned long a4, + unsigned long a5) +{ + long res; + + stac(); + res = xen_single_call(call, a1, a2, a3, a4, a5); + clac(); + + return res; +} + static inline int HYPERVISOR_set_trap_table(struct trap_info *table) { diff --git a/arch/x86/xen/multicalls.c b/arch/x86/xen/multicalls.c index dc502ca8263e..2bce7958ce8b 100644 --- a/arch/x86/xen/multicalls.c +++ b/arch/x86/xen/multicalls.c @@ -80,9 +80,9 @@ void xen_mc_flush(void) and just do the call directly. */ mc = &b->entries[0]; - mc->result = privcmd_call(mc->op, - mc->args[0], mc->args[1], mc->args[2], - mc->args[3], mc->args[4]); + mc->result = xen_single_call(mc->op, mc->args[0], mc->args[1], + mc->args[2], mc->args[3], + mc->args[4]); ret = mc->result < 0; break; -- cgit v1.2.3-59-g8ed1b From 3596924a233e45aa918c961a902170fc4916461b Mon Sep 17 00:00:00 2001 From: Roger Pau Monne Date: Wed, 8 Aug 2018 13:46:41 +0200 Subject: xen/balloon: fix balloon initialization for PVH Dom0 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The current balloon code tries to calculate a delta factor for the balloon target when running in HVM mode in order to account for memory used by the firmware. This workaround for memory accounting doesn't work properly on a PVH Dom0, that has a static-max value different from the target value even at startup. Note that this is not a problem for DomUs because guests are started with a static-max value that matches the amount of RAM in the memory map. Fix this by forcefully setting target_diff for Dom0, regardless of it's mode. Reported-by: Gabriel Bercarug Signed-off-by: Roger Pau Monné Reviewed-by: Juergen Gross Signed-off-by: Boris Ostrovsky --- drivers/xen/xen-balloon.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/xen/xen-balloon.c b/drivers/xen/xen-balloon.c index b437fccd4e62..294f35ce9e46 100644 --- a/drivers/xen/xen-balloon.c +++ b/drivers/xen/xen-balloon.c @@ -81,7 +81,7 @@ static void watch_target(struct xenbus_watch *watch, static_max = new_target; else static_max >>= PAGE_SHIFT - 10; - target_diff = xen_pv_domain() ? 0 + target_diff = (xen_pv_domain() || xen_initial_domain()) ? 0 : static_max - balloon_stats.target_pages; } -- cgit v1.2.3-59-g8ed1b