aboutsummaryrefslogtreecommitdiffstats
path: root/mm/mempool.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/mempool.c')
-rw-r--r--mm/mempool.c35
1 files changed, 14 insertions, 21 deletions
diff --git a/mm/mempool.c b/mm/mempool.c
index b014ffeaa413..c9f3d4620428 100644
--- a/mm/mempool.c
+++ b/mm/mempool.c
@@ -198,31 +198,22 @@ void * mempool_alloc(mempool_t *pool, unsigned int __nocast gfp_mask)
void *element;
unsigned long flags;
DEFINE_WAIT(wait);
- int gfp_nowait = gfp_mask & ~(__GFP_WAIT | __GFP_IO);
+ int gfp_temp;
might_sleep_if(gfp_mask & __GFP_WAIT);
+
+ gfp_mask |= __GFP_NOMEMALLOC; /* don't allocate emergency reserves */
+ gfp_mask |= __GFP_NORETRY; /* don't loop in __alloc_pages */
+ gfp_mask |= __GFP_NOWARN; /* failures are OK */
+
+ gfp_temp = gfp_mask & ~(__GFP_WAIT|__GFP_IO);
+
repeat_alloc:
- element = pool->alloc(gfp_nowait|__GFP_NOWARN, pool->pool_data);
+
+ element = pool->alloc(gfp_temp, pool->pool_data);
if (likely(element != NULL))
return element;
- /*
- * If the pool is less than 50% full and we can perform effective
- * page reclaim then try harder to allocate an element.
- */
- mb();
- if ((gfp_mask & __GFP_FS) && (gfp_mask != gfp_nowait) &&
- (pool->curr_nr <= pool->min_nr/2)) {
- element = pool->alloc(gfp_mask, pool->pool_data);
- if (likely(element != NULL))
- return element;
- }
-
- /*
- * Kick the VM at this point.
- */
- wakeup_bdflush(0);
-
spin_lock_irqsave(&pool->lock, flags);
if (likely(pool->curr_nr)) {
element = remove_element(pool);
@@ -235,8 +226,10 @@ repeat_alloc:
if (!(gfp_mask & __GFP_WAIT))
return NULL;
+ /* Now start performing page reclaim */
+ gfp_temp = gfp_mask;
prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE);
- mb();
+ smp_mb();
if (!pool->curr_nr)
io_schedule();
finish_wait(&pool->wait, &wait);
@@ -257,7 +250,7 @@ void mempool_free(void *element, mempool_t *pool)
{
unsigned long flags;
- mb();
+ smp_mb();
if (pool->curr_nr < pool->min_nr) {
spin_lock_irqsave(&pool->lock, flags);
if (pool->curr_nr < pool->min_nr) {