ixgbevf: Update q_vector to contain ring pointers instead of bitmaps
For most cases the ixgbevf driver will only ever contain a single Tx and single Rx queue. In order to track that it makes more sense to use a pointer instead of using a bitmap which must be search in order to locate the ring on an adapter index. As such I am changing the code to use pointers and an iterator to access all rings on a given q_vector. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Signed-off-by: Greg Rose <gregory.v.rose@intel.com> Tested-by: Sibai Li <sibai.li@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
This commit is contained in:
parent
3595990a9c
commit
6b43c44654
|
@ -55,6 +55,7 @@ struct ixgbevf_rx_buffer {
|
||||||
};
|
};
|
||||||
|
|
||||||
struct ixgbevf_ring {
|
struct ixgbevf_ring {
|
||||||
|
struct ixgbevf_ring *next;
|
||||||
struct ixgbevf_adapter *adapter; /* backlink */
|
struct ixgbevf_adapter *adapter; /* backlink */
|
||||||
void *desc; /* descriptor ring memory */
|
void *desc; /* descriptor ring memory */
|
||||||
dma_addr_t dma; /* phys. address of descriptor ring */
|
dma_addr_t dma; /* phys. address of descriptor ring */
|
||||||
|
@ -120,18 +121,23 @@ struct ixgbevf_ring {
|
||||||
#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000
|
#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000
|
||||||
#define IXGBE_TX_FLAGS_VLAN_SHIFT 16
|
#define IXGBE_TX_FLAGS_VLAN_SHIFT 16
|
||||||
|
|
||||||
|
struct ixgbevf_ring_container {
|
||||||
|
struct ixgbevf_ring *ring; /* pointer to linked list of rings */
|
||||||
|
u8 count; /* total number of rings in vector */
|
||||||
|
u8 itr; /* current ITR setting for ring */
|
||||||
|
};
|
||||||
|
|
||||||
|
/* iterator for handling rings in ring container */
|
||||||
|
#define ixgbevf_for_each_ring(pos, head) \
|
||||||
|
for (pos = (head).ring; pos != NULL; pos = pos->next)
|
||||||
|
|
||||||
/* MAX_MSIX_Q_VECTORS of these are allocated,
|
/* MAX_MSIX_Q_VECTORS of these are allocated,
|
||||||
* but we only use one per queue-specific vector.
|
* but we only use one per queue-specific vector.
|
||||||
*/
|
*/
|
||||||
struct ixgbevf_q_vector {
|
struct ixgbevf_q_vector {
|
||||||
struct ixgbevf_adapter *adapter;
|
struct ixgbevf_adapter *adapter;
|
||||||
struct napi_struct napi;
|
struct napi_struct napi;
|
||||||
DECLARE_BITMAP(rxr_idx, MAX_RX_QUEUES); /* Rx ring indices */
|
struct ixgbevf_ring_container rx, tx;
|
||||||
DECLARE_BITMAP(txr_idx, MAX_TX_QUEUES); /* Tx ring indices */
|
|
||||||
u8 rxr_count; /* Rx ring count assigned to this vector */
|
|
||||||
u8 txr_count; /* Tx ring count assigned to this vector */
|
|
||||||
u8 tx_itr;
|
|
||||||
u8 rx_itr;
|
|
||||||
u32 eitr;
|
u32 eitr;
|
||||||
int v_idx; /* vector index in list */
|
int v_idx; /* vector index in list */
|
||||||
};
|
};
|
||||||
|
|
|
@ -526,14 +526,9 @@ static int ixgbevf_clean_rxonly(struct napi_struct *napi, int budget)
|
||||||
struct ixgbevf_q_vector *q_vector =
|
struct ixgbevf_q_vector *q_vector =
|
||||||
container_of(napi, struct ixgbevf_q_vector, napi);
|
container_of(napi, struct ixgbevf_q_vector, napi);
|
||||||
struct ixgbevf_adapter *adapter = q_vector->adapter;
|
struct ixgbevf_adapter *adapter = q_vector->adapter;
|
||||||
struct ixgbevf_ring *rx_ring = NULL;
|
|
||||||
int work_done = 0;
|
int work_done = 0;
|
||||||
long r_idx;
|
|
||||||
|
|
||||||
r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
|
ixgbevf_clean_rx_irq(q_vector, q_vector->rx.ring, &work_done, budget);
|
||||||
rx_ring = &(adapter->rx_ring[r_idx]);
|
|
||||||
|
|
||||||
ixgbevf_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
|
|
||||||
|
|
||||||
/* If all Rx work done, exit the polling mode */
|
/* If all Rx work done, exit the polling mode */
|
||||||
if (work_done < budget) {
|
if (work_done < budget) {
|
||||||
|
@ -541,7 +536,8 @@ static int ixgbevf_clean_rxonly(struct napi_struct *napi, int budget)
|
||||||
if (adapter->itr_setting & 1)
|
if (adapter->itr_setting & 1)
|
||||||
ixgbevf_set_itr_msix(q_vector);
|
ixgbevf_set_itr_msix(q_vector);
|
||||||
if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
|
if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
|
||||||
ixgbevf_irq_enable_queues(adapter, rx_ring->v_idx);
|
ixgbevf_irq_enable_queues(adapter,
|
||||||
|
1 << q_vector->v_idx);
|
||||||
}
|
}
|
||||||
|
|
||||||
return work_done;
|
return work_done;
|
||||||
|
@ -560,26 +556,16 @@ static int ixgbevf_clean_rxonly_many(struct napi_struct *napi, int budget)
|
||||||
struct ixgbevf_q_vector *q_vector =
|
struct ixgbevf_q_vector *q_vector =
|
||||||
container_of(napi, struct ixgbevf_q_vector, napi);
|
container_of(napi, struct ixgbevf_q_vector, napi);
|
||||||
struct ixgbevf_adapter *adapter = q_vector->adapter;
|
struct ixgbevf_adapter *adapter = q_vector->adapter;
|
||||||
struct ixgbevf_ring *rx_ring = NULL;
|
struct ixgbevf_ring *rx_ring;
|
||||||
int work_done = 0, i;
|
int work_done = 0;
|
||||||
long r_idx;
|
|
||||||
u64 enable_mask = 0;
|
|
||||||
|
|
||||||
/* attempt to distribute budget to each queue fairly, but don't allow
|
/* attempt to distribute budget to each queue fairly, but don't allow
|
||||||
* the budget to go below 1 because we'll exit polling */
|
* the budget to go below 1 because we'll exit polling */
|
||||||
budget /= (q_vector->rxr_count ?: 1);
|
budget /= (q_vector->rx.count ?: 1);
|
||||||
budget = max(budget, 1);
|
budget = max(budget, 1);
|
||||||
r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
|
|
||||||
for (i = 0; i < q_vector->rxr_count; i++) {
|
|
||||||
rx_ring = &(adapter->rx_ring[r_idx]);
|
|
||||||
ixgbevf_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
|
|
||||||
enable_mask |= rx_ring->v_idx;
|
|
||||||
r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
|
|
||||||
r_idx + 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
|
ixgbevf_for_each_ring(rx_ring, q_vector->rx)
|
||||||
rx_ring = &(adapter->rx_ring[r_idx]);
|
ixgbevf_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
|
||||||
|
|
||||||
/* If all Rx work done, exit the polling mode */
|
/* If all Rx work done, exit the polling mode */
|
||||||
if (work_done < budget) {
|
if (work_done < budget) {
|
||||||
|
@ -587,7 +573,8 @@ static int ixgbevf_clean_rxonly_many(struct napi_struct *napi, int budget)
|
||||||
if (adapter->itr_setting & 1)
|
if (adapter->itr_setting & 1)
|
||||||
ixgbevf_set_itr_msix(q_vector);
|
ixgbevf_set_itr_msix(q_vector);
|
||||||
if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
|
if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
|
||||||
ixgbevf_irq_enable_queues(adapter, enable_mask);
|
ixgbevf_irq_enable_queues(adapter,
|
||||||
|
1 << q_vector->v_idx);
|
||||||
}
|
}
|
||||||
|
|
||||||
return work_done;
|
return work_done;
|
||||||
|
@ -605,7 +592,7 @@ static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
|
||||||
{
|
{
|
||||||
struct ixgbevf_q_vector *q_vector;
|
struct ixgbevf_q_vector *q_vector;
|
||||||
struct ixgbe_hw *hw = &adapter->hw;
|
struct ixgbe_hw *hw = &adapter->hw;
|
||||||
int i, j, q_vectors, v_idx, r_idx;
|
int q_vectors, v_idx;
|
||||||
u32 mask;
|
u32 mask;
|
||||||
|
|
||||||
q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
|
q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
|
||||||
|
@ -615,33 +602,19 @@ static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
|
||||||
* corresponding register.
|
* corresponding register.
|
||||||
*/
|
*/
|
||||||
for (v_idx = 0; v_idx < q_vectors; v_idx++) {
|
for (v_idx = 0; v_idx < q_vectors; v_idx++) {
|
||||||
|
struct ixgbevf_ring *ring;
|
||||||
q_vector = adapter->q_vector[v_idx];
|
q_vector = adapter->q_vector[v_idx];
|
||||||
/* XXX for_each_set_bit(...) */
|
|
||||||
r_idx = find_first_bit(q_vector->rxr_idx,
|
|
||||||
adapter->num_rx_queues);
|
|
||||||
|
|
||||||
for (i = 0; i < q_vector->rxr_count; i++) {
|
ixgbevf_for_each_ring(ring, q_vector->rx)
|
||||||
j = adapter->rx_ring[r_idx].reg_idx;
|
ixgbevf_set_ivar(adapter, 0, ring->reg_idx, v_idx);
|
||||||
ixgbevf_set_ivar(adapter, 0, j, v_idx);
|
|
||||||
r_idx = find_next_bit(q_vector->rxr_idx,
|
|
||||||
adapter->num_rx_queues,
|
|
||||||
r_idx + 1);
|
|
||||||
}
|
|
||||||
r_idx = find_first_bit(q_vector->txr_idx,
|
|
||||||
adapter->num_tx_queues);
|
|
||||||
|
|
||||||
for (i = 0; i < q_vector->txr_count; i++) {
|
ixgbevf_for_each_ring(ring, q_vector->tx)
|
||||||
j = adapter->tx_ring[r_idx].reg_idx;
|
ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx);
|
||||||
ixgbevf_set_ivar(adapter, 1, j, v_idx);
|
|
||||||
r_idx = find_next_bit(q_vector->txr_idx,
|
|
||||||
adapter->num_tx_queues,
|
|
||||||
r_idx + 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* if this is a tx only vector halve the interrupt rate */
|
/* if this is a tx only vector halve the interrupt rate */
|
||||||
if (q_vector->txr_count && !q_vector->rxr_count)
|
if (q_vector->tx.ring && !q_vector->rx.ring)
|
||||||
q_vector->eitr = (adapter->eitr_param >> 1);
|
q_vector->eitr = (adapter->eitr_param >> 1);
|
||||||
else if (q_vector->rxr_count)
|
else if (q_vector->rx.ring)
|
||||||
/* rx only */
|
/* rx only */
|
||||||
q_vector->eitr = adapter->eitr_param;
|
q_vector->eitr = adapter->eitr_param;
|
||||||
|
|
||||||
|
@ -752,40 +725,32 @@ static void ixgbevf_set_itr_msix(struct ixgbevf_q_vector *q_vector)
|
||||||
struct ixgbevf_adapter *adapter = q_vector->adapter;
|
struct ixgbevf_adapter *adapter = q_vector->adapter;
|
||||||
u32 new_itr;
|
u32 new_itr;
|
||||||
u8 current_itr, ret_itr;
|
u8 current_itr, ret_itr;
|
||||||
int i, r_idx, v_idx = q_vector->v_idx;
|
int v_idx = q_vector->v_idx;
|
||||||
struct ixgbevf_ring *rx_ring, *tx_ring;
|
struct ixgbevf_ring *rx_ring, *tx_ring;
|
||||||
|
|
||||||
r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
|
ixgbevf_for_each_ring(tx_ring, q_vector->tx) {
|
||||||
for (i = 0; i < q_vector->txr_count; i++) {
|
|
||||||
tx_ring = &(adapter->tx_ring[r_idx]);
|
|
||||||
ret_itr = ixgbevf_update_itr(adapter, q_vector->eitr,
|
ret_itr = ixgbevf_update_itr(adapter, q_vector->eitr,
|
||||||
q_vector->tx_itr,
|
q_vector->tx.itr,
|
||||||
tx_ring->total_packets,
|
tx_ring->total_packets,
|
||||||
tx_ring->total_bytes);
|
tx_ring->total_bytes);
|
||||||
/* if the result for this queue would decrease interrupt
|
/* if the result for this queue would decrease interrupt
|
||||||
* rate for this vector then use that result */
|
* rate for this vector then use that result */
|
||||||
q_vector->tx_itr = ((q_vector->tx_itr > ret_itr) ?
|
q_vector->tx.itr = ((q_vector->tx.itr > ret_itr) ?
|
||||||
q_vector->tx_itr - 1 : ret_itr);
|
q_vector->tx.itr - 1 : ret_itr);
|
||||||
r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
|
|
||||||
r_idx + 1);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
|
ixgbevf_for_each_ring(rx_ring, q_vector->rx) {
|
||||||
for (i = 0; i < q_vector->rxr_count; i++) {
|
|
||||||
rx_ring = &(adapter->rx_ring[r_idx]);
|
|
||||||
ret_itr = ixgbevf_update_itr(adapter, q_vector->eitr,
|
ret_itr = ixgbevf_update_itr(adapter, q_vector->eitr,
|
||||||
q_vector->rx_itr,
|
q_vector->rx.itr,
|
||||||
rx_ring->total_packets,
|
rx_ring->total_packets,
|
||||||
rx_ring->total_bytes);
|
rx_ring->total_bytes);
|
||||||
/* if the result for this queue would decrease interrupt
|
/* if the result for this queue would decrease interrupt
|
||||||
* rate for this vector then use that result */
|
* rate for this vector then use that result */
|
||||||
q_vector->rx_itr = ((q_vector->rx_itr > ret_itr) ?
|
q_vector->rx.itr = ((q_vector->rx.itr > ret_itr) ?
|
||||||
q_vector->rx_itr - 1 : ret_itr);
|
q_vector->rx.itr - 1 : ret_itr);
|
||||||
r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
|
|
||||||
r_idx + 1);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
|
current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
|
||||||
|
|
||||||
switch (current_itr) {
|
switch (current_itr) {
|
||||||
/* counts and packets in update_itr are dependent on these numbers */
|
/* counts and packets in update_itr are dependent on these numbers */
|
||||||
|
@ -861,19 +826,14 @@ static irqreturn_t ixgbevf_msix_clean_tx(int irq, void *data)
|
||||||
struct ixgbevf_q_vector *q_vector = data;
|
struct ixgbevf_q_vector *q_vector = data;
|
||||||
struct ixgbevf_adapter *adapter = q_vector->adapter;
|
struct ixgbevf_adapter *adapter = q_vector->adapter;
|
||||||
struct ixgbevf_ring *tx_ring;
|
struct ixgbevf_ring *tx_ring;
|
||||||
int i, r_idx;
|
|
||||||
|
|
||||||
if (!q_vector->txr_count)
|
if (!q_vector->tx.ring)
|
||||||
return IRQ_HANDLED;
|
return IRQ_HANDLED;
|
||||||
|
|
||||||
r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
|
ixgbevf_for_each_ring(tx_ring, q_vector->tx) {
|
||||||
for (i = 0; i < q_vector->txr_count; i++) {
|
|
||||||
tx_ring = &(adapter->tx_ring[r_idx]);
|
|
||||||
tx_ring->total_bytes = 0;
|
tx_ring->total_bytes = 0;
|
||||||
tx_ring->total_packets = 0;
|
tx_ring->total_packets = 0;
|
||||||
ixgbevf_clean_tx_irq(adapter, tx_ring);
|
ixgbevf_clean_tx_irq(adapter, tx_ring);
|
||||||
r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
|
|
||||||
r_idx + 1);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (adapter->itr_setting & 1)
|
if (adapter->itr_setting & 1)
|
||||||
|
@ -893,25 +853,17 @@ static irqreturn_t ixgbevf_msix_clean_rx(int irq, void *data)
|
||||||
struct ixgbevf_adapter *adapter = q_vector->adapter;
|
struct ixgbevf_adapter *adapter = q_vector->adapter;
|
||||||
struct ixgbe_hw *hw = &adapter->hw;
|
struct ixgbe_hw *hw = &adapter->hw;
|
||||||
struct ixgbevf_ring *rx_ring;
|
struct ixgbevf_ring *rx_ring;
|
||||||
int r_idx;
|
|
||||||
int i;
|
|
||||||
|
|
||||||
r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
|
ixgbevf_for_each_ring(rx_ring, q_vector->rx) {
|
||||||
for (i = 0; i < q_vector->rxr_count; i++) {
|
|
||||||
rx_ring = &(adapter->rx_ring[r_idx]);
|
|
||||||
rx_ring->total_bytes = 0;
|
rx_ring->total_bytes = 0;
|
||||||
rx_ring->total_packets = 0;
|
rx_ring->total_packets = 0;
|
||||||
r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
|
|
||||||
r_idx + 1);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!q_vector->rxr_count)
|
if (!q_vector->rx.ring)
|
||||||
return IRQ_HANDLED;
|
return IRQ_HANDLED;
|
||||||
|
|
||||||
r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
|
|
||||||
rx_ring = &(adapter->rx_ring[r_idx]);
|
|
||||||
/* disable interrupts on this vector only */
|
/* disable interrupts on this vector only */
|
||||||
IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, rx_ring->v_idx);
|
IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, 1 << q_vector->v_idx);
|
||||||
napi_schedule(&q_vector->napi);
|
napi_schedule(&q_vector->napi);
|
||||||
|
|
||||||
|
|
||||||
|
@ -931,8 +883,9 @@ static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx,
|
||||||
{
|
{
|
||||||
struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
|
struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
|
||||||
|
|
||||||
set_bit(r_idx, q_vector->rxr_idx);
|
a->rx_ring[r_idx].next = q_vector->rx.ring;
|
||||||
q_vector->rxr_count++;
|
q_vector->rx.ring = &a->rx_ring[r_idx];
|
||||||
|
q_vector->rx.count++;
|
||||||
a->rx_ring[r_idx].v_idx = 1 << v_idx;
|
a->rx_ring[r_idx].v_idx = 1 << v_idx;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -941,8 +894,9 @@ static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx,
|
||||||
{
|
{
|
||||||
struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
|
struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
|
||||||
|
|
||||||
set_bit(t_idx, q_vector->txr_idx);
|
a->tx_ring[t_idx].next = q_vector->tx.ring;
|
||||||
q_vector->txr_count++;
|
q_vector->tx.ring = &a->tx_ring[t_idx];
|
||||||
|
q_vector->tx.count++;
|
||||||
a->tx_ring[t_idx].v_idx = 1 << v_idx;
|
a->tx_ring[t_idx].v_idx = 1 << v_idx;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1026,10 +980,10 @@ static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
|
||||||
/* Decrement for Other and TCP Timer vectors */
|
/* Decrement for Other and TCP Timer vectors */
|
||||||
q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
|
q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
|
||||||
|
|
||||||
#define SET_HANDLER(_v) (((_v)->rxr_count && (_v)->txr_count) \
|
#define SET_HANDLER(_v) (((_v)->rx.ring && (_v)->tx.ring) \
|
||||||
? &ixgbevf_msix_clean_many : \
|
? &ixgbevf_msix_clean_many : \
|
||||||
(_v)->rxr_count ? &ixgbevf_msix_clean_rx : \
|
(_v)->rx.ring ? &ixgbevf_msix_clean_rx : \
|
||||||
(_v)->txr_count ? &ixgbevf_msix_clean_tx : \
|
(_v)->tx.ring ? &ixgbevf_msix_clean_tx : \
|
||||||
NULL)
|
NULL)
|
||||||
for (vector = 0; vector < q_vectors; vector++) {
|
for (vector = 0; vector < q_vectors; vector++) {
|
||||||
handler = SET_HANDLER(adapter->q_vector[vector]);
|
handler = SET_HANDLER(adapter->q_vector[vector]);
|
||||||
|
@ -1085,10 +1039,10 @@ static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter)
|
||||||
|
|
||||||
for (i = 0; i < q_vectors; i++) {
|
for (i = 0; i < q_vectors; i++) {
|
||||||
struct ixgbevf_q_vector *q_vector = adapter->q_vector[i];
|
struct ixgbevf_q_vector *q_vector = adapter->q_vector[i];
|
||||||
bitmap_zero(q_vector->rxr_idx, MAX_RX_QUEUES);
|
q_vector->rx.ring = NULL;
|
||||||
bitmap_zero(q_vector->txr_idx, MAX_TX_QUEUES);
|
q_vector->tx.ring = NULL;
|
||||||
q_vector->rxr_count = 0;
|
q_vector->rx.count = 0;
|
||||||
q_vector->txr_count = 0;
|
q_vector->tx.count = 0;
|
||||||
q_vector->eitr = adapter->eitr_param;
|
q_vector->eitr = adapter->eitr_param;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1365,10 +1319,10 @@ static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
|
||||||
for (q_idx = 0; q_idx < q_vectors; q_idx++) {
|
for (q_idx = 0; q_idx < q_vectors; q_idx++) {
|
||||||
struct napi_struct *napi;
|
struct napi_struct *napi;
|
||||||
q_vector = adapter->q_vector[q_idx];
|
q_vector = adapter->q_vector[q_idx];
|
||||||
if (!q_vector->rxr_count)
|
if (!q_vector->rx.ring)
|
||||||
continue;
|
continue;
|
||||||
napi = &q_vector->napi;
|
napi = &q_vector->napi;
|
||||||
if (q_vector->rxr_count > 1)
|
if (q_vector->rx.count > 1)
|
||||||
napi->poll = &ixgbevf_clean_rxonly_many;
|
napi->poll = &ixgbevf_clean_rxonly_many;
|
||||||
|
|
||||||
napi_enable(napi);
|
napi_enable(napi);
|
||||||
|
@ -1383,7 +1337,7 @@ static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
|
||||||
|
|
||||||
for (q_idx = 0; q_idx < q_vectors; q_idx++) {
|
for (q_idx = 0; q_idx < q_vectors; q_idx++) {
|
||||||
q_vector = adapter->q_vector[q_idx];
|
q_vector = adapter->q_vector[q_idx];
|
||||||
if (!q_vector->rxr_count)
|
if (!q_vector->rx.ring)
|
||||||
continue;
|
continue;
|
||||||
napi_disable(&q_vector->napi);
|
napi_disable(&q_vector->napi);
|
||||||
}
|
}
|
||||||
|
@ -2144,7 +2098,7 @@ static void ixgbevf_watchdog(unsigned long data)
|
||||||
/* get one bit for every active tx/rx interrupt vector */
|
/* get one bit for every active tx/rx interrupt vector */
|
||||||
for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
|
for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
|
||||||
struct ixgbevf_q_vector *qv = adapter->q_vector[i];
|
struct ixgbevf_q_vector *qv = adapter->q_vector[i];
|
||||||
if (qv->rxr_count || qv->txr_count)
|
if (qv->rx.ring || qv->tx.ring)
|
||||||
eics |= (1 << i);
|
eics |= (1 << i);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue