page_pool: fix logic in __page_pool_get_cached

__page_pool_get_cached() will return NULL when the ring is
empty, even if there are pages present in the lookaside cache.

It is also possible to refill the cache, and then return a
NULL page.

Restructure the logic so eliminate both cases.

Signed-off-by: Jonathan Lemon <jonathan.lemon@gmail.com>
Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
Acked-by: Ilias Apalodimas <ilias.apalodimas@linaro.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Jonathan Lemon 2019-08-13 10:45:09 -07:00 committed by David S. Miller
parent 4b58c9b127
commit 8d73f8f23e
1 changed files with 16 additions and 23 deletions

View File

@ -82,12 +82,9 @@ EXPORT_SYMBOL(page_pool_create);
static struct page *__page_pool_get_cached(struct page_pool *pool)
{
struct ptr_ring *r = &pool->ring;
bool refill = false;
struct page *page;
/* Quicker fallback, avoid locks when ring is empty */
if (__ptr_ring_empty(r))
return NULL;
/* Test for safe-context, caller should provide this guarantee */
if (likely(in_serving_softirq())) {
if (likely(pool->alloc.count)) {
@ -95,27 +92,23 @@ static struct page *__page_pool_get_cached(struct page_pool *pool)
page = pool->alloc.cache[--pool->alloc.count];
return page;
}
/* Slower-path: Alloc array empty, time to refill
*
* Open-coded bulk ptr_ring consumer.
*
* Discussion: the ring consumer lock is not really
* needed due to the softirq/NAPI protection, but
* later need the ability to reclaim pages on the
* ring. Thus, keeping the locks.
*/
spin_lock(&r->consumer_lock);
while ((page = __ptr_ring_consume(r))) {
if (pool->alloc.count == PP_ALLOC_CACHE_REFILL)
break;
pool->alloc.cache[pool->alloc.count++] = page;
}
spin_unlock(&r->consumer_lock);
return page;
refill = true;
}
/* Slow-path: Get page from locked ring queue */
page = ptr_ring_consume(&pool->ring);
/* Quicker fallback, avoid locks when ring is empty */
if (__ptr_ring_empty(r))
return NULL;
/* Slow-path: Get page from locked ring queue,
* refill alloc array if requested.
*/
spin_lock(&r->consumer_lock);
page = __ptr_ring_consume(r);
if (refill)
pool->alloc.count = __ptr_ring_consume_batched(r,
pool->alloc.cache,
PP_ALLOC_CACHE_REFILL);
spin_unlock(&r->consumer_lock);
return page;
}