diff options
Diffstat (limited to 'arch/arm/xen')
-rw-r--r-- | arch/arm/xen/enlighten.c | 141 | ||||
-rw-r--r-- | arch/arm/xen/mm.c | 38 | ||||
-rw-r--r-- | arch/arm/xen/p2m.c | 6 |
3 files changed, 150 insertions, 35 deletions
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c index 7619fbffcea2..93c8ccbf2982 100644 --- a/arch/arm/xen/enlighten.c +++ b/arch/arm/xen/enlighten.c @@ -34,6 +34,7 @@ #include <linux/timekeeping.h> #include <linux/timekeeper_internal.h> #include <linux/acpi.h> +#include <linux/virtio_anchor.h> #include <linux/mm.h> @@ -59,6 +60,10 @@ unsigned long xen_released_pages; struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata; static __read_mostly unsigned int xen_events_irq; +static __read_mostly phys_addr_t xen_grant_frames; + +#define GRANT_TABLE_INDEX 0 +#define EXT_REGION_INDEX 1 uint32_t xen_start_flags; EXPORT_SYMBOL(xen_start_flags); @@ -300,9 +305,118 @@ static void __init xen_acpi_guest_init(void) #endif } +#ifdef CONFIG_XEN_UNPOPULATED_ALLOC +/* + * A type-less specific Xen resource which contains extended regions + * (unused regions of guest physical address space provided by the hypervisor). + */ +static struct resource xen_resource = { + .name = "Xen unused space", +}; + +int __init arch_xen_unpopulated_init(struct resource **res) +{ + struct device_node *np; + struct resource *regs, *tmp_res; + uint64_t min_gpaddr = -1, max_gpaddr = 0; + unsigned int i, nr_reg = 0; + int rc; + + if (!xen_domain()) + return -ENODEV; + + if (!acpi_disabled) + return -ENODEV; + + np = of_find_compatible_node(NULL, NULL, "xen,xen"); + if (WARN_ON(!np)) + return -ENODEV; + + /* Skip region 0 which is reserved for grant table space */ + while (of_get_address(np, nr_reg + EXT_REGION_INDEX, NULL, NULL)) + nr_reg++; + + if (!nr_reg) { + pr_err("No extended regions are found\n"); + of_node_put(np); + return -EINVAL; + } + + regs = kcalloc(nr_reg, sizeof(*regs), GFP_KERNEL); + if (!regs) { + of_node_put(np); + return -ENOMEM; + } + + /* + * Create resource from extended regions provided by the hypervisor to be + * used as unused address space for Xen scratch pages. + */ + for (i = 0; i < nr_reg; i++) { + rc = of_address_to_resource(np, i + EXT_REGION_INDEX, ®s[i]); + if (rc) + goto err; + + if (max_gpaddr < regs[i].end) + max_gpaddr = regs[i].end; + if (min_gpaddr > regs[i].start) + min_gpaddr = regs[i].start; + } + + xen_resource.start = min_gpaddr; + xen_resource.end = max_gpaddr; + + /* + * Mark holes between extended regions as unavailable. The rest of that + * address space will be available for the allocation. + */ + for (i = 1; i < nr_reg; i++) { + resource_size_t start, end; + + /* There is an overlap between regions */ + if (regs[i - 1].end + 1 > regs[i].start) { + rc = -EINVAL; + goto err; + } + + /* There is no hole between regions */ + if (regs[i - 1].end + 1 == regs[i].start) + continue; + + start = regs[i - 1].end + 1; + end = regs[i].start - 1; + + tmp_res = kzalloc(sizeof(*tmp_res), GFP_KERNEL); + if (!tmp_res) { + rc = -ENOMEM; + goto err; + } + + tmp_res->name = "Unavailable space"; + tmp_res->start = start; + tmp_res->end = end; + + rc = insert_resource(&xen_resource, tmp_res); + if (rc) { + pr_err("Cannot insert resource %pR (%d)\n", tmp_res, rc); + kfree(tmp_res); + goto err; + } + } + + *res = &xen_resource; + +err: + of_node_put(np); + kfree(regs); + return rc; +} +#endif + static void __init xen_dt_guest_init(void) { struct device_node *xen_node; + struct resource res; xen_node = of_find_compatible_node(NULL, NULL, "xen,xen"); if (!xen_node) { @@ -311,17 +425,28 @@ static void __init xen_dt_guest_init(void) } xen_events_irq = irq_of_parse_and_map(xen_node, 0); + + if (of_address_to_resource(xen_node, GRANT_TABLE_INDEX, &res)) { + pr_err("Xen grant table region is not found\n"); + of_node_put(xen_node); + return; + } + of_node_put(xen_node); + xen_grant_frames = res.start; } static int __init xen_guest_init(void) { struct xen_add_to_physmap xatp; struct shared_info *shared_info_page = NULL; - int cpu; + int rc, cpu; if (!xen_domain()) return 0; + if (IS_ENABLED(CONFIG_XEN_VIRTIO)) + virtio_set_mem_acc_cb(xen_virtio_mem_acc); + if (!acpi_disabled) xen_acpi_guest_init(); else @@ -370,12 +495,16 @@ static int __init xen_guest_init(void) for_each_possible_cpu(cpu) per_cpu(xen_vcpu_id, cpu) = cpu; - xen_auto_xlat_grant_frames.count = gnttab_max_grant_frames(); - if (xen_xlate_map_ballooned_pages(&xen_auto_xlat_grant_frames.pfn, - &xen_auto_xlat_grant_frames.vaddr, - xen_auto_xlat_grant_frames.count)) { + if (!xen_grant_frames) { + xen_auto_xlat_grant_frames.count = gnttab_max_grant_frames(); + rc = xen_xlate_map_ballooned_pages(&xen_auto_xlat_grant_frames.pfn, + &xen_auto_xlat_grant_frames.vaddr, + xen_auto_xlat_grant_frames.count); + } else + rc = gnttab_setup_auto_xlat_frames(xen_grant_frames); + if (rc) { free_percpu(xen_vcpu_info); - return -ENOMEM; + return rc; } gnttab_init(); diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c index a7e54a087b80..3d826c0b5fee 100644 --- a/arch/arm/xen/mm.c +++ b/arch/arm/xen/mm.c @@ -23,22 +23,20 @@ #include <asm/xen/hypercall.h> #include <asm/xen/interface.h> -unsigned long xen_get_swiotlb_free_pages(unsigned int order) +static gfp_t xen_swiotlb_gfp(void) { phys_addr_t base; - gfp_t flags = __GFP_NOWARN|__GFP_KSWAPD_RECLAIM; u64 i; for_each_mem_range(i, &base, NULL) { if (base < (phys_addr_t)0xffffffff) { if (IS_ENABLED(CONFIG_ZONE_DMA32)) - flags |= __GFP_DMA32; - else - flags |= __GFP_DMA; - break; + return __GFP_DMA32; + return __GFP_DMA; } } - return __get_free_pages(flags, order); + + return GFP_KERNEL; } static bool hypercall_cflush = false; @@ -118,23 +116,6 @@ bool xen_arch_need_swiotlb(struct device *dev, !dev_is_dma_coherent(dev)); } -int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order, - unsigned int address_bits, - dma_addr_t *dma_handle) -{ - if (!xen_initial_domain()) - return -EINVAL; - - /* we assume that dom0 is mapped 1:1 for now */ - *dma_handle = pstart; - return 0; -} - -void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order) -{ - return; -} - static int __init xen_mm_init(void) { struct gnttab_cache_flush cflush; @@ -143,10 +124,13 @@ static int __init xen_mm_init(void) if (!xen_swiotlb_detect()) return 0; - rc = xen_swiotlb_init(); /* we can work with the default swiotlb */ - if (rc < 0 && rc != -EEXIST) - return rc; + if (!io_tlb_default_mem.nslabs) { + rc = swiotlb_init_late(swiotlb_size_or_default(), + xen_swiotlb_gfp(), NULL); + if (rc < 0) + return rc; + } cflush.op = 0; cflush.a.dev_bus_addr = 0; diff --git a/arch/arm/xen/p2m.c b/arch/arm/xen/p2m.c index 84a1cea1f43b..309648c17f48 100644 --- a/arch/arm/xen/p2m.c +++ b/arch/arm/xen/p2m.c @@ -63,11 +63,12 @@ out: unsigned long __pfn_to_mfn(unsigned long pfn) { - struct rb_node *n = phys_to_mach.rb_node; + struct rb_node *n; struct xen_p2m_entry *entry; unsigned long irqflags; read_lock_irqsave(&p2m_lock, irqflags); + n = phys_to_mach.rb_node; while (n) { entry = rb_entry(n, struct xen_p2m_entry, rbnode_phys); if (entry->pfn <= pfn && @@ -152,10 +153,11 @@ bool __set_phys_to_machine_multi(unsigned long pfn, int rc; unsigned long irqflags; struct xen_p2m_entry *p2m_entry; - struct rb_node *n = phys_to_mach.rb_node; + struct rb_node *n; if (mfn == INVALID_P2M_ENTRY) { write_lock_irqsave(&p2m_lock, irqflags); + n = phys_to_mach.rb_node; while (n) { p2m_entry = rb_entry(n, struct xen_p2m_entry, rbnode_phys); if (p2m_entry->pfn <= pfn && |