aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEric Dumazet <dada1@cosmosbay.com>2006-11-10 12:27:48 -0800
committerLinus Torvalds <torvalds@g5.osdl.org>2006-11-13 07:40:42 -0800
commit2b4ac44e7c7e16cf9411b81693ff3e604f332bf1 (patch)
treeaf167ed7cf9e76f7b155d1af53a62c5d9c3b03ba
parent[PATCH] nfsd: fix spurious error return from nfsd_create in async case (diff)
downloadlinux-dev-2b4ac44e7c7e16cf9411b81693ff3e604f332bf1.tar.xz
linux-dev-2b4ac44e7c7e16cf9411b81693ff3e604f332bf1.zip
[PATCH] vmalloc: optimization, cleanup, bugfixes
- reorder 'struct vm_struct' to speedup lookups on CPUS with small cache lines. The fields 'next,addr,size' should be now in the same cache line, to speedup lookups. - One minor cleanup in __get_vm_area_node() - Bugfixes in vmalloc_user() and vmalloc_32_user() NULL returns from __vmalloc() and __find_vm_area() were not tested. [akpm@osdl.org: remove redundant BUG_ONs] Signed-off-by: Eric Dumazet <dada1@cosmosbay.com> Cc: Nick Piggin <nickpiggin@yahoo.com.au> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--include/linux/vmalloc.h3
-rw-r--r--mm/vmalloc.c26
2 files changed, 15 insertions, 14 deletions
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index dc9a29d84abc..924e502905d4 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -23,13 +23,14 @@ struct vm_area_struct;
#endif
struct vm_struct {
+ /* keep next,addr,size together to speedup lookups */
+ struct vm_struct *next;
void *addr;
unsigned long size;
unsigned long flags;
struct page **pages;
unsigned int nr_pages;
unsigned long phys_addr;
- struct vm_struct *next;
};
/*
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 46606c133e82..7dc6aa745166 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -186,10 +186,8 @@ static struct vm_struct *__get_vm_area_node(unsigned long size, unsigned long fl
if (unlikely(!area))
return NULL;
- if (unlikely(!size)) {
- kfree (area);
+ if (unlikely(!size))
return NULL;
- }
/*
* We always allocate a guard page.
@@ -532,11 +530,12 @@ void *vmalloc_user(unsigned long size)
void *ret;
ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
- write_lock(&vmlist_lock);
- area = __find_vm_area(ret);
- area->flags |= VM_USERMAP;
- write_unlock(&vmlist_lock);
-
+ if (ret) {
+ write_lock(&vmlist_lock);
+ area = __find_vm_area(ret);
+ area->flags |= VM_USERMAP;
+ write_unlock(&vmlist_lock);
+ }
return ret;
}
EXPORT_SYMBOL(vmalloc_user);
@@ -605,11 +604,12 @@ void *vmalloc_32_user(unsigned long size)
void *ret;
ret = __vmalloc(size, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL);
- write_lock(&vmlist_lock);
- area = __find_vm_area(ret);
- area->flags |= VM_USERMAP;
- write_unlock(&vmlist_lock);
-
+ if (ret) {
+ write_lock(&vmlist_lock);
+ area = __find_vm_area(ret);
+ area->flags |= VM_USERMAP;
+ write_unlock(&vmlist_lock);
+ }
return ret;
}
EXPORT_SYMBOL(vmalloc_32_user);