From 97e2bde47f886a317909c8a8f9bd2fcd8ce2f0b0 Mon Sep 17 00:00:00 2001 From: Manfred Spraul Date: Sun, 1 May 2005 08:58:38 -0700 Subject: [PATCH] add kmalloc_node, inline cleanup The patch makes the following function calls available to allocate memory on a specific node without changing the basic operation of the slab allocator: kmem_cache_alloc_node(kmem_cache_t *cachep, unsigned int flags, int node); kmalloc_node(size_t size, unsigned int flags, int node); in a similar way to the existing node-blind functions: kmem_cache_alloc(kmem_cache_t *cachep, unsigned int flags); kmalloc(size, flags); kmem_cache_alloc_node was changed to pass flags and the node information through the existing layers of the slab allocator (which lead to some minor rearrangements). The functions at the lowest layer (kmem_getpages, cache_grow) are already node aware. Also __alloc_percpu can call kmalloc_node now. Performance measurements (using the pageset localization patch) yields: w/o patches: Tasks jobs/min jti jobs/min/task real cpu 1 484.27 100 484.2736 12.02 1.97 Wed Mar 30 20:50:43 2005 100 25170.83 91 251.7083 23.12 150.10 Wed Mar 30 20:51:06 2005 200 34601.66 84 173.0083 33.64 294.14 Wed Mar 30 20:51:40 2005 300 37154.47 86 123.8482 46.99 436.56 Wed Mar 30 20:52:28 2005 400 39839.82 80 99.5995 58.43 580.46 Wed Mar 30 20:53:27 2005 500 40036.32 79 80.0726 72.68 728.60 Wed Mar 30 20:54:40 2005 600 44074.21 79 73.4570 79.23 872.10 Wed Mar 30 20:55:59 2005 700 44016.60 78 62.8809 92.56 1015.84 Wed Mar 30 20:57:32 2005 800 40411.05 80 50.5138 115.22 1161.13 Wed Mar 30 20:59:28 2005 900 42298.56 79 46.9984 123.83 1303.42 Wed Mar 30 21:01:33 2005 1000 40955.05 80 40.9551 142.11 1441.92 Wed Mar 30 21:03:55 2005 with pageset localization and slab API patches: Tasks jobs/min jti jobs/min/task real cpu 1 484.19 100 484.1930 12.02 1.98 Wed Mar 30 21:10:18 2005 100 27428.25 92 274.2825 21.22 149.79 Wed Mar 30 21:10:40 2005 200 37228.94 86 186.1447 31.27 293.49 Wed Mar 30 21:11:12 2005 300 41725.42 85 139.0847 41.84 434.10 Wed Mar 30 21:11:54 2005 400 43032.22 82 107.5805 54.10 582.06 Wed Mar 30 21:12:48 2005 500 42211.23 83 84.4225 68.94 722.61 Wed Mar 30 21:13:58 2005 600 40084.49 82 66.8075 87.12 873.11 Wed Mar 30 21:15:25 2005 700 44169.30 79 63.0990 92.24 1008.77 Wed Mar 30 21:16:58 2005 800 43097.94 79 53.8724 108.03 1155.88 Wed Mar 30 21:18:47 2005 900 41846.75 79 46.4964 125.17 1303.38 Wed Mar 30 21:20:52 2005 1000 40247.85 79 40.2478 144.60 1442.21 Wed Mar 30 21:23:17 2005 Signed-off-by: Christoph Lameter Signed-off-by: Manfred Spraul Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/slab.c | 45 +++++++++++++++++++++++++++++++-------------- 1 file changed, 31 insertions(+), 14 deletions(-) (limited to 'mm/slab.c') diff --git a/mm/slab.c b/mm/slab.c index ec660d85ddd7..771cc09f9f1a 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -583,7 +583,7 @@ static inline struct array_cache *ac_data(kmem_cache_t *cachep) return cachep->array[smp_processor_id()]; } -static inline kmem_cache_t *kmem_find_general_cachep(size_t size, int gfpflags) +static inline kmem_cache_t *__find_general_cachep(size_t size, int gfpflags) { struct cache_sizes *csizep = malloc_sizes; @@ -607,6 +607,12 @@ static inline kmem_cache_t *kmem_find_general_cachep(size_t size, int gfpflags) return csizep->cs_cachep; } +kmem_cache_t *kmem_find_general_cachep(size_t size, int gfpflags) +{ + return __find_general_cachep(size, gfpflags); +} +EXPORT_SYMBOL(kmem_find_general_cachep); + /* Cal the num objs, wastage, and bytes left over for a given slab size. */ static void cache_estimate(unsigned long gfporder, size_t size, size_t align, int flags, size_t *left_over, unsigned int *num) @@ -672,14 +678,11 @@ static struct array_cache *alloc_arraycache(int cpu, int entries, int memsize = sizeof(void*)*entries+sizeof(struct array_cache); struct array_cache *nc = NULL; - if (cpu != -1) { - kmem_cache_t *cachep; - cachep = kmem_find_general_cachep(memsize, GFP_KERNEL); - if (cachep) - nc = kmem_cache_alloc_node(cachep, cpu_to_node(cpu)); - } - if (!nc) + if (cpu == -1) nc = kmalloc(memsize, GFP_KERNEL); + else + nc = kmalloc_node(memsize, GFP_KERNEL, cpu_to_node(cpu)); + if (nc) { nc->avail = 0; nc->limit = entries; @@ -2361,7 +2364,7 @@ out: * and can sleep. And it will allocate memory on the given node, which * can improve the performance for cpu bound structures. */ -void *kmem_cache_alloc_node(kmem_cache_t *cachep, int nodeid) +void *kmem_cache_alloc_node(kmem_cache_t *cachep, int flags, int nodeid) { int loop; void *objp; @@ -2393,7 +2396,7 @@ void *kmem_cache_alloc_node(kmem_cache_t *cachep, int nodeid) spin_unlock_irq(&cachep->spinlock); local_irq_disable(); - if (!cache_grow(cachep, GFP_KERNEL, nodeid)) { + if (!cache_grow(cachep, flags, nodeid)) { local_irq_enable(); return NULL; } @@ -2435,6 +2438,16 @@ got_slabp: } EXPORT_SYMBOL(kmem_cache_alloc_node); +void *kmalloc_node(size_t size, int flags, int node) +{ + kmem_cache_t *cachep; + + cachep = kmem_find_general_cachep(size, flags); + if (unlikely(cachep == NULL)) + return NULL; + return kmem_cache_alloc_node(cachep, flags, node); +} +EXPORT_SYMBOL(kmalloc_node); #endif /** @@ -2462,7 +2475,12 @@ void *__kmalloc(size_t size, unsigned int __nocast flags) { kmem_cache_t *cachep; - cachep = kmem_find_general_cachep(size, flags); + /* If you want to save a few bytes .text space: replace + * __ with kmem_. + * Then kmalloc uses the uninlined functions instead of the inline + * functions. + */ + cachep = __find_general_cachep(size, flags); if (unlikely(cachep == NULL)) return NULL; return __cache_alloc(cachep, flags); @@ -2489,9 +2507,8 @@ void *__alloc_percpu(size_t size, size_t align) for (i = 0; i < NR_CPUS; i++) { if (!cpu_possible(i)) continue; - pdata->ptrs[i] = kmem_cache_alloc_node( - kmem_find_general_cachep(size, GFP_KERNEL), - cpu_to_node(i)); + pdata->ptrs[i] = kmalloc_node(size, GFP_KERNEL, + cpu_to_node(i)); if (!pdata->ptrs[i]) goto unwind_oom; -- cgit v1.2.3-59-g8ed1b From fbd568a3e61a7decb8a754ad952aaa5b5c82e9e5 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Sun, 1 May 2005 08:59:04 -0700 Subject: [PATCH] Change synchronize_kernel to _rcu and _sched This patch changes calls to synchronize_kernel(), deprecated in the earlier "Deprecate synchronize_kernel, GPL replacement" patch to instead call the new synchronize_rcu() and synchronize_sched() APIs. Signed-off-by: Paul E. McKenney Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/i386/oprofile/nmi_timer_int.c | 2 +- arch/ppc64/kernel/HvLpEvent.c | 2 +- drivers/acpi/processor_idle.c | 2 +- drivers/char/ipmi/ipmi_si_intf.c | 4 ++-- drivers/input/keyboard/atkbd.c | 2 +- drivers/md/multipath.c | 2 +- drivers/md/raid1.c | 2 +- drivers/md/raid10.c | 2 +- drivers/md/raid5.c | 2 +- drivers/md/raid6main.c | 2 +- drivers/net/r8169.c | 2 +- drivers/s390/cio/airq.c | 4 ++-- kernel/module.c | 2 +- kernel/profile.c | 2 +- mm/slab.c | 2 +- net/core/dev.c | 2 +- 16 files changed, 18 insertions(+), 18 deletions(-) (limited to 'mm/slab.c') diff --git a/arch/i386/oprofile/nmi_timer_int.c b/arch/i386/oprofile/nmi_timer_int.c index b2e462abf337..c58d0c14f274 100644 --- a/arch/i386/oprofile/nmi_timer_int.c +++ b/arch/i386/oprofile/nmi_timer_int.c @@ -36,7 +36,7 @@ static void timer_stop(void) { enable_timer_nmi_watchdog(); unset_nmi_callback(); - synchronize_kernel(); + synchronize_sched(); /* Allow already-started NMIs to complete. */ } diff --git a/arch/ppc64/kernel/HvLpEvent.c b/arch/ppc64/kernel/HvLpEvent.c index 9802beefa217..f8f19637f73f 100644 --- a/arch/ppc64/kernel/HvLpEvent.c +++ b/arch/ppc64/kernel/HvLpEvent.c @@ -45,7 +45,7 @@ int HvLpEvent_unregisterHandler( HvLpEvent_Type eventType ) /* We now sleep until all other CPUs have scheduled. This ensures that * the deletion is seen by all other CPUs, and that the deleted handler * isn't still running on another CPU when we return. */ - synchronize_kernel(); + synchronize_rcu(); } } return rc; diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 05a17812d521..ff64d333e95f 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -838,7 +838,7 @@ int acpi_processor_cst_has_changed (struct acpi_processor *pr) /* Fall back to the default idle loop */ pm_idle = pm_idle_save; - synchronize_kernel(); + synchronize_sched(); /* Relies on interrupts forcing exit from idle. */ pr->flags.power = 0; result = acpi_processor_get_power_info(pr); diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index 29de259a981e..44a7f13c788b 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c @@ -2199,7 +2199,7 @@ static int init_one_smi(int intf_num, struct smi_info **smi) /* Wait until we know that we are out of any interrupt handlers might have been running before we freed the interrupt. */ - synchronize_kernel(); + synchronize_sched(); if (new_smi->si_sm) { if (new_smi->handlers) @@ -2312,7 +2312,7 @@ static void __exit cleanup_one_si(struct smi_info *to_clean) /* Wait until we know that we are out of any interrupt handlers might have been running before we freed the interrupt. */ - synchronize_kernel(); + synchronize_sched(); /* Wait for the timer to stop. This avoids problems with race conditions removing the timer here. */ diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c index f7304f0ce542..ff66ed4ee2cd 100644 --- a/drivers/input/keyboard/atkbd.c +++ b/drivers/input/keyboard/atkbd.c @@ -678,7 +678,7 @@ static void atkbd_disconnect(struct serio *serio) atkbd_disable(atkbd); /* make sure we don't have a command in flight */ - synchronize_kernel(); + synchronize_sched(); /* Allow atkbd_interrupt()s to complete. */ flush_scheduled_work(); device_remove_file(&serio->dev, &atkbd_attr_extra); diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index c9b134cd1532..1891e4930dcc 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c @@ -355,7 +355,7 @@ static int multipath_remove_disk(mddev_t *mddev, int number) goto abort; } p->rdev = NULL; - synchronize_kernel(); + synchronize_rcu(); if (atomic_read(&rdev->nr_pending)) { /* lost the race, try later */ err = -EBUSY; diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index a389394b52f6..83380b5d6593 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -797,7 +797,7 @@ static int raid1_remove_disk(mddev_t *mddev, int number) goto abort; } p->rdev = NULL; - synchronize_kernel(); + synchronize_rcu(); if (atomic_read(&rdev->nr_pending)) { /* lost the race, try later */ err = -EBUSY; diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index b100bfe4fdca..e9dc2876a626 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -977,7 +977,7 @@ static int raid10_remove_disk(mddev_t *mddev, int number) goto abort; } p->rdev = NULL; - synchronize_kernel(); + synchronize_rcu(); if (atomic_read(&rdev->nr_pending)) { /* lost the race, try later */ err = -EBUSY; diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 52c3a81c4aa7..e96e2a10a9c9 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -1873,7 +1873,7 @@ static int raid5_remove_disk(mddev_t *mddev, int number) goto abort; } p->rdev = NULL; - synchronize_kernel(); + synchronize_rcu(); if (atomic_read(&rdev->nr_pending)) { /* lost the race, try later */ err = -EBUSY; diff --git a/drivers/md/raid6main.c b/drivers/md/raid6main.c index 7e30ab29691a..8a33f351e092 100644 --- a/drivers/md/raid6main.c +++ b/drivers/md/raid6main.c @@ -2038,7 +2038,7 @@ static int raid6_remove_disk(mddev_t *mddev, int number) goto abort; } p->rdev = NULL; - synchronize_kernel(); + synchronize_rcu(); if (atomic_read(&rdev->nr_pending)) { /* lost the race, try later */ err = -EBUSY; diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index 07e2df09491f..c59507f8a76b 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c @@ -2385,7 +2385,7 @@ core_down: } /* Give a racing hard_start_xmit a few cycles to complete. */ - synchronize_kernel(); + synchronize_sched(); /* FIXME: should this be synchronize_irq()? */ /* * And now for the 50k$ question: are IRQ disabled or not ? diff --git a/drivers/s390/cio/airq.c b/drivers/s390/cio/airq.c index 3720e77b465f..83e6a060668e 100644 --- a/drivers/s390/cio/airq.c +++ b/drivers/s390/cio/airq.c @@ -45,7 +45,7 @@ s390_register_adapter_interrupt (adapter_int_handler_t handler) else ret = (cmpxchg(&adapter_handler, NULL, handler) ? -EBUSY : 0); if (!ret) - synchronize_kernel(); + synchronize_sched(); /* Allow interrupts to complete. */ sprintf (dbf_txt, "ret:%d", ret); CIO_TRACE_EVENT (4, dbf_txt); @@ -65,7 +65,7 @@ s390_unregister_adapter_interrupt (adapter_int_handler_t handler) ret = -EINVAL; else { adapter_handler = NULL; - synchronize_kernel(); + synchronize_sched(); /* Allow interrupts to complete. */ ret = 0; } sprintf (dbf_txt, "ret:%d", ret); diff --git a/kernel/module.c b/kernel/module.c index 2dbfa0773faf..5734ab09d3f9 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -1801,7 +1801,7 @@ sys_init_module(void __user *umod, /* Init routine failed: abort. Try to protect us from buggy refcounters. */ mod->state = MODULE_STATE_GOING; - synchronize_kernel(); + synchronize_sched(); if (mod->unsafe) printk(KERN_ERR "%s: module is now stuck!\n", mod->name); diff --git a/kernel/profile.c b/kernel/profile.c index a66be468c422..0221a50ca867 100644 --- a/kernel/profile.c +++ b/kernel/profile.c @@ -184,7 +184,7 @@ void unregister_timer_hook(int (*hook)(struct pt_regs *)) WARN_ON(hook != timer_hook); timer_hook = NULL; /* make sure all CPUs see the NULL hook */ - synchronize_kernel(); + synchronize_sched(); /* Allow ongoing interrupts to complete. */ } EXPORT_SYMBOL_GPL(register_timer_hook); diff --git a/mm/slab.c b/mm/slab.c index 771cc09f9f1a..840742641152 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1666,7 +1666,7 @@ int kmem_cache_destroy(kmem_cache_t * cachep) } if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) - synchronize_kernel(); + synchronize_rcu(); /* no cpu_online check required here since we clear the percpu * array on cpu offline and set this to NULL. diff --git a/net/core/dev.c b/net/core/dev.c index 7bd4cd4502c4..f5f005846fe1 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3091,7 +3091,7 @@ void free_netdev(struct net_device *dev) void synchronize_net(void) { might_sleep(); - synchronize_kernel(); + synchronize_rcu(); } /** -- cgit v1.2.3-59-g8ed1b