aboutsummaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
authorJonathan Lemon <jonathan.lemon@gmail.com>2019-08-13 10:45:09 -0700
committerDavid S. Miller <davem@davemloft.net>2019-08-15 12:27:36 -0700
commit8d73f8f23e6b869b726cb01dd4747f56dc88660a (patch)
treea19e6d1e6829a6106b80e66a2440960cc2a164fa /net/core
parentMerge branch 'rds-next' (diff)
downloadlinux-dev-8d73f8f23e6b869b726cb01dd4747f56dc88660a.tar.xz
linux-dev-8d73f8f23e6b869b726cb01dd4747f56dc88660a.zip
page_pool: fix logic in __page_pool_get_cached
__page_pool_get_cached() will return NULL when the ring is empty, even if there are pages present in the lookaside cache. It is also possible to refill the cache, and then return a NULL page. Restructure the logic so eliminate both cases. Signed-off-by: Jonathan Lemon <jonathan.lemon@gmail.com> Acked-by: Jesper Dangaard Brouer <brouer@redhat.com> Acked-by: Ilias Apalodimas <ilias.apalodimas@linaro.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r--net/core/page_pool.c39
1 files changed, 16 insertions, 23 deletions
diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index 31187ff55cbc..5bc65587f1c4 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -82,12 +82,9 @@ EXPORT_SYMBOL(page_pool_create);
static struct page *__page_pool_get_cached(struct page_pool *pool)
{
struct ptr_ring *r = &pool->ring;
+ bool refill = false;
struct page *page;
- /* Quicker fallback, avoid locks when ring is empty */
- if (__ptr_ring_empty(r))
- return NULL;
-
/* Test for safe-context, caller should provide this guarantee */
if (likely(in_serving_softirq())) {
if (likely(pool->alloc.count)) {
@@ -95,27 +92,23 @@ static struct page *__page_pool_get_cached(struct page_pool *pool)
page = pool->alloc.cache[--pool->alloc.count];
return page;
}
- /* Slower-path: Alloc array empty, time to refill
- *
- * Open-coded bulk ptr_ring consumer.
- *
- * Discussion: the ring consumer lock is not really
- * needed due to the softirq/NAPI protection, but
- * later need the ability to reclaim pages on the
- * ring. Thus, keeping the locks.
- */
- spin_lock(&r->consumer_lock);
- while ((page = __ptr_ring_consume(r))) {
- if (pool->alloc.count == PP_ALLOC_CACHE_REFILL)
- break;
- pool->alloc.cache[pool->alloc.count++] = page;
- }
- spin_unlock(&r->consumer_lock);
- return page;
+ refill = true;
}
- /* Slow-path: Get page from locked ring queue */
- page = ptr_ring_consume(&pool->ring);
+ /* Quicker fallback, avoid locks when ring is empty */
+ if (__ptr_ring_empty(r))
+ return NULL;
+
+ /* Slow-path: Get page from locked ring queue,
+ * refill alloc array if requested.
+ */
+ spin_lock(&r->consumer_lock);
+ page = __ptr_ring_consume(r);
+ if (refill)
+ pool->alloc.count = __ptr_ring_consume_batched(r,
+ pool->alloc.cache,
+ PP_ALLOC_CACHE_REFILL);
+ spin_unlock(&r->consumer_lock);
return page;
}