staging: qlge: Refill rx buffers up to multiple of 16

Reading the {s,l}bq_prod_idx registers on a running device, it appears that
the adapter will only use buffers up to prod_idx & 0xfff0. The driver
currently uses fixed-size guard zones (16 for sbq, 32 for lbq - don't know
why this difference). After the previous patch, this approach no longer
guarantees prod_idx values aligned on multiples of 16. While it appears
that we can write unaligned values to prod_idx without ill effects on
device operation, it makes more sense to change qlge_refill_bq() to refill
up to a limit that corresponds with the device's behavior.

Signed-off-by: Benjamin Poirier <bpoirier@suse.com>
Link: https://lore.kernel.org/r/20190927101210.23856-17-bpoirier@suse.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Benjamin Poirier 2019-09-27 19:12:10 +09:00 committed by Greg Kroah-Hartman
parent aec626d209
commit 6e9c52b920
2 changed files with 19 additions and 18 deletions

View File

@ -1423,6 +1423,9 @@ struct qlge_bq {
__le64 *base_indirect;
dma_addr_t base_indirect_dma;
struct qlge_bq_desc *queue;
/* prod_idx is the index of the first buffer that may NOT be used by
* hw, ie. one after the last. Advanced by sw.
*/
void __iomem *prod_idx_db_reg;
/* next index where sw should refill a buffer for hw */
u16 next_to_use;
@ -1442,6 +1445,11 @@ struct qlge_bq {
offsetof(struct rx_ring, lbq))); \
})
/* Experience shows that the device ignores the low 4 bits of the tail index.
* Refill up to a x16 multiple.
*/
#define QLGE_BQ_ALIGN(index) ALIGN_DOWN(index, 16)
#define QLGE_BQ_WRAP(index) ((index) & (QLGE_BQ_LEN - 1))
struct rx_ring {

View File

@ -1114,22 +1114,12 @@ static void qlge_refill_bq(struct qlge_bq *bq)
struct rx_ring *rx_ring = QLGE_BQ_CONTAINER(bq);
struct ql_adapter *qdev = rx_ring->qdev;
struct qlge_bq_desc *bq_desc;
int free_count, refill_count;
unsigned int reserved_count;
int refill_count;
int i;
if (bq->type == QLGE_SB)
reserved_count = 16;
else
reserved_count = 32;
free_count = bq->next_to_clean - bq->next_to_use;
if (free_count <= 0)
free_count += QLGE_BQ_LEN;
refill_count = free_count - reserved_count;
/* refill batch size */
if (refill_count < 16)
refill_count = QLGE_BQ_WRAP(QLGE_BQ_ALIGN(bq->next_to_clean - 1) -
bq->next_to_use);
if (!refill_count)
return;
i = bq->next_to_use;
@ -1164,11 +1154,14 @@ static void qlge_refill_bq(struct qlge_bq *bq)
i += QLGE_BQ_LEN;
if (bq->next_to_use != i) {
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"ring %u %s: updating prod idx = %d.\n",
rx_ring->cq_id, bq_type_name[bq->type], i);
if (QLGE_BQ_ALIGN(bq->next_to_use) != QLGE_BQ_ALIGN(i)) {
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"ring %u %s: updating prod idx = %d.\n",
rx_ring->cq_id, bq_type_name[bq->type],
i);
ql_write_db_reg(i, bq->prod_idx_db_reg);
}
bq->next_to_use = i;
ql_write_db_reg(bq->next_to_use, bq->prod_idx_db_reg);
}
}