staging: qlge: Remove rx_ring.type
This field is redundant, the type can be determined from the index, cq_id. Signed-off-by: Benjamin Poirier <bpoirier@suse.com> Link: https://lore.kernel.org/r/20190927101210.23856-11-bpoirier@suse.com Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
6f5740b1d3
commit
e4c911a73c
|
@ -1387,15 +1387,6 @@ struct tx_ring {
|
||||||
u64 tx_errors;
|
u64 tx_errors;
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
|
||||||
* Type of inbound queue.
|
|
||||||
*/
|
|
||||||
enum {
|
|
||||||
DEFAULT_Q = 2, /* Handles slow queue and chip/MPI events. */
|
|
||||||
TX_Q = 3, /* Handles outbound completions. */
|
|
||||||
RX_Q = 4, /* Handles inbound completions. */
|
|
||||||
};
|
|
||||||
|
|
||||||
struct qlge_page_chunk {
|
struct qlge_page_chunk {
|
||||||
struct page *page;
|
struct page *page;
|
||||||
void *va; /* virt addr including offset */
|
void *va; /* virt addr including offset */
|
||||||
|
@ -1468,7 +1459,6 @@ struct rx_ring {
|
||||||
struct qlge_bq sbq;
|
struct qlge_bq sbq;
|
||||||
|
|
||||||
/* Misc. handler elements. */
|
/* Misc. handler elements. */
|
||||||
u32 type; /* Type of queue, tx, rx. */
|
|
||||||
u32 irq; /* Which vector this ring is assigned. */
|
u32 irq; /* Which vector this ring is assigned. */
|
||||||
u32 cpu; /* Which CPU this should run on. */
|
u32 cpu; /* Which CPU this should run on. */
|
||||||
char name[IFNAMSIZ + 5];
|
char name[IFNAMSIZ + 5];
|
||||||
|
|
|
@ -1730,16 +1730,24 @@ void ql_dump_cqicb(struct cqicb *cqicb)
|
||||||
le16_to_cpu(cqicb->sbq_len));
|
le16_to_cpu(cqicb->sbq_len));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static const char *qlge_rx_ring_type_name(struct rx_ring *rx_ring)
|
||||||
|
{
|
||||||
|
struct ql_adapter *qdev = rx_ring->qdev;
|
||||||
|
|
||||||
|
if (rx_ring->cq_id < qdev->rss_ring_count)
|
||||||
|
return "RX COMPLETION";
|
||||||
|
else
|
||||||
|
return "TX COMPLETION";
|
||||||
|
};
|
||||||
|
|
||||||
void ql_dump_rx_ring(struct rx_ring *rx_ring)
|
void ql_dump_rx_ring(struct rx_ring *rx_ring)
|
||||||
{
|
{
|
||||||
if (rx_ring == NULL)
|
if (rx_ring == NULL)
|
||||||
return;
|
return;
|
||||||
pr_err("===================== Dumping rx_ring %d ===============\n",
|
pr_err("===================== Dumping rx_ring %d ===============\n",
|
||||||
rx_ring->cq_id);
|
rx_ring->cq_id);
|
||||||
pr_err("Dumping rx_ring %d, type = %s%s%s\n",
|
pr_err("Dumping rx_ring %d, type = %s\n", rx_ring->cq_id,
|
||||||
rx_ring->cq_id, rx_ring->type == DEFAULT_Q ? "DEFAULT" : "",
|
qlge_rx_ring_type_name(rx_ring));
|
||||||
rx_ring->type == TX_Q ? "OUTBOUND COMPLETIONS" : "",
|
|
||||||
rx_ring->type == RX_Q ? "INBOUND_COMPLETIONS" : "");
|
|
||||||
pr_err("rx_ring->cqicb = %p\n", &rx_ring->cqicb);
|
pr_err("rx_ring->cqicb = %p\n", &rx_ring->cqicb);
|
||||||
pr_err("rx_ring->cq_base = %p\n", rx_ring->cq_base);
|
pr_err("rx_ring->cq_base = %p\n", rx_ring->cq_base);
|
||||||
pr_err("rx_ring->cq_base_dma = %llx\n",
|
pr_err("rx_ring->cq_base_dma = %llx\n",
|
||||||
|
|
|
@ -2785,14 +2785,10 @@ static void ql_free_rx_buffers(struct ql_adapter *qdev)
|
||||||
|
|
||||||
static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
|
static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
|
||||||
{
|
{
|
||||||
struct rx_ring *rx_ring;
|
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < qdev->rx_ring_count; i++) {
|
for (i = 0; i < qdev->rss_ring_count; i++)
|
||||||
rx_ring = &qdev->rx_ring[i];
|
ql_update_buffer_queues(&qdev->rx_ring[i]);
|
||||||
if (rx_ring->type != TX_Q)
|
|
||||||
ql_update_buffer_queues(rx_ring);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int qlge_init_bq(struct qlge_bq *bq)
|
static int qlge_init_bq(struct qlge_bq *bq)
|
||||||
|
@ -3071,12 +3067,7 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
|
||||||
rx_ring->sbq.clean_idx = 0;
|
rx_ring->sbq.clean_idx = 0;
|
||||||
rx_ring->sbq.free_cnt = rx_ring->sbq.len;
|
rx_ring->sbq.free_cnt = rx_ring->sbq.len;
|
||||||
}
|
}
|
||||||
switch (rx_ring->type) {
|
if (rx_ring->cq_id < qdev->rss_ring_count) {
|
||||||
case TX_Q:
|
|
||||||
cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
|
|
||||||
cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
|
|
||||||
break;
|
|
||||||
case RX_Q:
|
|
||||||
/* Inbound completion handling rx_rings run in
|
/* Inbound completion handling rx_rings run in
|
||||||
* separate NAPI contexts.
|
* separate NAPI contexts.
|
||||||
*/
|
*/
|
||||||
|
@ -3084,10 +3075,9 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
|
||||||
64);
|
64);
|
||||||
cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
|
cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
|
||||||
cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
|
cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
|
||||||
break;
|
} else {
|
||||||
default:
|
cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
|
||||||
netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
|
cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
|
||||||
"Invalid rx_ring->type = %d.\n", rx_ring->type);
|
|
||||||
}
|
}
|
||||||
err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
|
err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
|
||||||
CFG_LCQ, rx_ring->cq_id);
|
CFG_LCQ, rx_ring->cq_id);
|
||||||
|
@ -3444,12 +3434,7 @@ static int ql_request_irq(struct ql_adapter *qdev)
|
||||||
goto err_irq;
|
goto err_irq;
|
||||||
|
|
||||||
netif_err(qdev, ifup, qdev->ndev,
|
netif_err(qdev, ifup, qdev->ndev,
|
||||||
"Hooked intr %d, queue type %s, with name %s.\n",
|
"Hooked intr 0, queue type RX_Q, with name %s.\n",
|
||||||
i,
|
|
||||||
qdev->rx_ring[0].type == DEFAULT_Q ?
|
|
||||||
"DEFAULT_Q" :
|
|
||||||
qdev->rx_ring[0].type == TX_Q ? "TX_Q" :
|
|
||||||
qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
|
|
||||||
intr_context->name);
|
intr_context->name);
|
||||||
}
|
}
|
||||||
intr_context->hooked = 1;
|
intr_context->hooked = 1;
|
||||||
|
@ -4012,7 +3997,6 @@ static int ql_configure_rings(struct ql_adapter *qdev)
|
||||||
rx_ring->sbq.type = QLGE_SB;
|
rx_ring->sbq.type = QLGE_SB;
|
||||||
rx_ring->sbq.len = NUM_SMALL_BUFFERS;
|
rx_ring->sbq.len = NUM_SMALL_BUFFERS;
|
||||||
rx_ring->sbq.size = rx_ring->sbq.len * sizeof(__le64);
|
rx_ring->sbq.size = rx_ring->sbq.len * sizeof(__le64);
|
||||||
rx_ring->type = RX_Q;
|
|
||||||
} else {
|
} else {
|
||||||
/*
|
/*
|
||||||
* Outbound queue handles outbound completions only.
|
* Outbound queue handles outbound completions only.
|
||||||
|
@ -4025,7 +4009,6 @@ static int ql_configure_rings(struct ql_adapter *qdev)
|
||||||
rx_ring->lbq.size = 0;
|
rx_ring->lbq.size = 0;
|
||||||
rx_ring->sbq.len = 0;
|
rx_ring->sbq.len = 0;
|
||||||
rx_ring->sbq.size = 0;
|
rx_ring->sbq.size = 0;
|
||||||
rx_ring->type = TX_Q;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
|
|
Loading…
Reference in New Issue