aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmalloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/vmalloc.c')
-rw-r--r--mm/vmalloc.c43
1 files changed, 37 insertions, 6 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index c5db9a7264d9..11a929872ebd 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -14,7 +14,6 @@
#include <linux/highmem.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
-#include <linux/mutex.h>
#include <linux/interrupt.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
@@ -24,6 +23,7 @@
#include <linux/rbtree.h>
#include <linux/radix-tree.h>
#include <linux/rcupdate.h>
+#include <linux/bootmem.h>
#include <asm/atomic.h>
#include <asm/uaccess.h>
@@ -323,6 +323,7 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
unsigned long addr;
int purged = 0;
+ BUG_ON(!size);
BUG_ON(size & ~PAGE_MASK);
va = kmalloc_node(sizeof(struct vmap_area),
@@ -334,6 +335,9 @@ retry:
addr = ALIGN(vstart, align);
spin_lock(&vmap_area_lock);
+ if (addr + size - 1 < addr)
+ goto overflow;
+
/* XXX: could have a last_hole cache */
n = vmap_area_root.rb_node;
if (n) {
@@ -365,6 +369,8 @@ retry:
while (addr + size > first->va_start && addr + size <= vend) {
addr = ALIGN(first->va_end + PAGE_SIZE, align);
+ if (addr + size - 1 < addr)
+ goto overflow;
n = rb_next(&first->rb_node);
if (n)
@@ -375,6 +381,7 @@ retry:
}
found:
if (addr + size > vend) {
+overflow:
spin_unlock(&vmap_area_lock);
if (!purged) {
purge_vmap_area_lazy();
@@ -495,9 +502,10 @@ static atomic_t vmap_lazy_nr = ATOMIC_INIT(0);
static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
int sync, int force_flush)
{
- static DEFINE_MUTEX(purge_lock);
+ static DEFINE_SPINLOCK(purge_lock);
LIST_HEAD(valist);
struct vmap_area *va;
+ struct vmap_area *n_va;
int nr = 0;
/*
@@ -506,10 +514,10 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
* the case that isn't actually used at the moment anyway.
*/
if (!sync && !force_flush) {
- if (!mutex_trylock(&purge_lock))
+ if (!spin_trylock(&purge_lock))
return;
} else
- mutex_lock(&purge_lock);
+ spin_lock(&purge_lock);
rcu_read_lock();
list_for_each_entry_rcu(va, &vmap_area_list, list) {
@@ -537,11 +545,11 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
if (nr) {
spin_lock(&vmap_area_lock);
- list_for_each_entry(va, &valist, purge_list)
+ list_for_each_entry_safe(va, n_va, &valist, purge_list)
__free_vmap_area(va);
spin_unlock(&vmap_area_lock);
}
- mutex_unlock(&purge_lock);
+ spin_unlock(&purge_lock);
}
/*
@@ -984,6 +992,8 @@ EXPORT_SYMBOL(vm_map_ram);
void __init vmalloc_init(void)
{
+ struct vmap_area *va;
+ struct vm_struct *tmp;
int i;
for_each_possible_cpu(i) {
@@ -996,12 +1006,22 @@ void __init vmalloc_init(void)
vbq->nr_dirty = 0;
}
+ /* Import existing vmlist entries. */
+ for (tmp = vmlist; tmp; tmp = tmp->next) {
+ va = alloc_bootmem(sizeof(struct vmap_area));
+ va->flags = tmp->flags | VM_VM_AREA;
+ va->va_start = (unsigned long)tmp->addr;
+ va->va_end = va->va_start + tmp->size;
+ __insert_vmap_area(va);
+ }
vmap_initialized = true;
}
void unmap_kernel_range(unsigned long addr, unsigned long size)
{
unsigned long end = addr + size;
+
+ flush_cache_vunmap(addr, end);
vunmap_page_range(addr, end);
flush_tlb_kernel_range(addr, end);
}
@@ -1096,6 +1116,14 @@ struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
}
EXPORT_SYMBOL_GPL(__get_vm_area);
+struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
+ unsigned long start, unsigned long end,
+ void *caller)
+{
+ return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL,
+ caller);
+}
+
/**
* get_vm_area - reserve a contiguous kernel virtual area
* @size: size of the area
@@ -1239,6 +1267,7 @@ EXPORT_SYMBOL(vfree);
void vunmap(const void *addr)
{
BUG_ON(in_interrupt());
+ might_sleep();
__vunmap(addr, 0);
}
EXPORT_SYMBOL(vunmap);
@@ -1258,6 +1287,8 @@ void *vmap(struct page **pages, unsigned int count,
{
struct vm_struct *area;
+ might_sleep();
+
if (count > num_physpages)
return NULL;