mt76: remove rx_page_lock
We can not run mt76u_alloc_buf() concurently, rx_tasklet is stooped when mt76u_submit_rx_buffers(). We can remove rx_page_lock. Signed-off-by: Stanislaw Gruszka <sgruszka@redhat.com> Signed-off-by: Felix Fietkau <nbd@nbd.name>
This commit is contained in:
parent
a5ba16eb6d
commit
069e2d345c
|
@ -138,7 +138,6 @@ struct mt76_queue {
|
||||||
dma_addr_t desc_dma;
|
dma_addr_t desc_dma;
|
||||||
struct sk_buff *rx_head;
|
struct sk_buff *rx_head;
|
||||||
struct page_frag_cache rx_page;
|
struct page_frag_cache rx_page;
|
||||||
spinlock_t rx_page_lock;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct mt76_sw_queue {
|
struct mt76_sw_queue {
|
||||||
|
|
|
@ -292,7 +292,6 @@ mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76u_buf *buf,
|
||||||
struct urb *urb = buf->urb;
|
struct urb *urb = buf->urb;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
spin_lock_bh(&q->rx_page_lock);
|
|
||||||
for (i = 0; i < nsgs; i++) {
|
for (i = 0; i < nsgs; i++) {
|
||||||
struct page *page;
|
struct page *page;
|
||||||
void *data;
|
void *data;
|
||||||
|
@ -306,7 +305,6 @@ mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76u_buf *buf,
|
||||||
offset = data - page_address(page);
|
offset = data - page_address(page);
|
||||||
sg_set_page(&urb->sg[i], page, sglen, offset);
|
sg_set_page(&urb->sg[i], page, sglen, offset);
|
||||||
}
|
}
|
||||||
spin_unlock_bh(&q->rx_page_lock);
|
|
||||||
|
|
||||||
if (i < nsgs) {
|
if (i < nsgs) {
|
||||||
int j;
|
int j;
|
||||||
|
@ -569,7 +567,6 @@ static int mt76u_alloc_rx(struct mt76_dev *dev)
|
||||||
if (!usb->mcu.data)
|
if (!usb->mcu.data)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
spin_lock_init(&q->rx_page_lock);
|
|
||||||
spin_lock_init(&q->lock);
|
spin_lock_init(&q->lock);
|
||||||
q->entry = devm_kcalloc(dev->dev,
|
q->entry = devm_kcalloc(dev->dev,
|
||||||
MT_NUM_RX_ENTRIES, sizeof(*q->entry),
|
MT_NUM_RX_ENTRIES, sizeof(*q->entry),
|
||||||
|
@ -597,15 +594,12 @@ static void mt76u_free_rx(struct mt76_dev *dev)
|
||||||
for (i = 0; i < q->ndesc; i++)
|
for (i = 0; i < q->ndesc; i++)
|
||||||
mt76u_buf_free(&q->entry[i].ubuf);
|
mt76u_buf_free(&q->entry[i].ubuf);
|
||||||
|
|
||||||
spin_lock_bh(&q->rx_page_lock);
|
|
||||||
if (!q->rx_page.va)
|
if (!q->rx_page.va)
|
||||||
goto out;
|
return;
|
||||||
|
|
||||||
page = virt_to_page(q->rx_page.va);
|
page = virt_to_page(q->rx_page.va);
|
||||||
__page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
|
__page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
|
||||||
memset(&q->rx_page, 0, sizeof(q->rx_page));
|
memset(&q->rx_page, 0, sizeof(q->rx_page));
|
||||||
out:
|
|
||||||
spin_unlock_bh(&q->rx_page_lock);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mt76u_stop_rx(struct mt76_dev *dev)
|
static void mt76u_stop_rx(struct mt76_dev *dev)
|
||||||
|
|
Loading…
Reference in New Issue