mt76: use a per rx queue page fragment cache

Using the NAPI or netdev frag cache along with other drivers can lead to
32 KiB pages being held for a long time, despite only being used for
very few page fragments.

This can happen if the driver grabs one or two fragments for rx ring
refill, while other drivers use (and free up) the remaining fragments.
The 32 KiB higher-order page can only be freed once all users have freed
their fragments.

Depending on the traffic patterns, this can waste a lot of memory and
look a lot like a memory leak.

Signed-off-by: Felix Fietkau <nbd@nbd.name>
This commit is contained in:
Felix Fietkau 2018-07-13 16:26:15 +02:00
parent 8842d485cb
commit c12128ce44
3 changed files with 20 additions and 8 deletions

View File

@ -322,19 +322,13 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q, bool napi)
int len = SKB_WITH_OVERHEAD(q->buf_size);
int offset = q->buf_offset;
int idx;
void *(*alloc)(unsigned int fragsz);
if (napi)
alloc = napi_alloc_frag;
else
alloc = netdev_alloc_frag;
spin_lock_bh(&q->lock);
while (q->queued < q->ndesc - 1) {
struct mt76_queue_buf qbuf;
buf = alloc(q->buf_size);
buf = page_frag_alloc(&q->rx_page, q->buf_size, GFP_ATOMIC);
if (!buf)
break;
@ -361,6 +355,7 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q, bool napi)
static void
mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
{
struct page *page;
void *buf;
bool more;
@ -373,6 +368,13 @@ mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
skb_free_frag(buf);
} while (1);
spin_unlock_bh(&q->lock);
if (!q->rx_page.va)
return;
page = virt_to_page(q->rx_page.va);
__page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
memset(&q->rx_page, 0, sizeof(q->rx_page));
}
static void

View File

@ -121,6 +121,7 @@ struct mt76_queue {
dma_addr_t desc_dma;
struct sk_buff *rx_head;
struct page_frag_cache rx_page;
};
struct mt76_mcu_ops {

View File

@ -275,6 +275,7 @@ static int
mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76u_buf *buf,
int nsgs, int len, int sglen)
{
struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
struct urb *urb = buf->urb;
int i;
@ -283,7 +284,7 @@ mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76u_buf *buf,
void *data;
int offset;
data = netdev_alloc_frag(len);
data = page_frag_alloc(&q->rx_page, q->buf_size, GFP_ATOMIC);
if (!data)
break;
@ -550,10 +551,18 @@ static int mt76u_alloc_rx(struct mt76_dev *dev)
static void mt76u_free_rx(struct mt76_dev *dev)
{
struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
struct page *page;
int i;
for (i = 0; i < q->ndesc; i++)
mt76u_buf_free(&q->entry[i].ubuf);
if (!q->rx_page.va)
return;
page = virt_to_page(q->rx_page.va);
__page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
memset(&q->rx_page, 0, sizeof(q->rx_page));
}
static void mt76u_stop_rx(struct mt76_dev *dev)