ixgbe: fix build err, num_rx_queues is only available with CONFIG_RPS

In the recent support for layer 2 hardware acceleration, I added a
few references to real_num_rx_queues and num_rx_queues which are
only available with CONFIG_RPS.

The fix is first to remove unnecessary references to num_rx_queues.
Because the hardware offload case is limited to cases where RX queues
and TX queues are equal we only need a single check. Then wrap the
single case in an ifdef.

The patch that introduce this is here,

commit a6cc0cfa72
Author: John Fastabend <john.r.fastabend@intel.com>
Date:   Wed Nov 6 09:54:46 2013 -0800

    net: Add layer 2 hardware acceleration operations for macvlan devices

Reported-by: kbuild test robot <fengguang.wu@intel.com>
Signed-off-by: John Fastabend <john.r.fastabend@intel.com>
Acked-by: Neil Horman <nhorman@tuxdriver.com>
Acked-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
John Fastabend 2013-11-08 00:50:32 -08:00 committed by David S. Miller
parent f104a567e6
commit 219354d489
1 changed files with 10 additions and 6 deletions

View File

@ -4164,7 +4164,7 @@ static void ixgbe_add_mac_filter(struct ixgbe_adapter *adapter,
static void ixgbe_fwd_psrtype(struct ixgbe_fwd_adapter *vadapter) static void ixgbe_fwd_psrtype(struct ixgbe_fwd_adapter *vadapter)
{ {
struct ixgbe_adapter *adapter = vadapter->real_adapter; struct ixgbe_adapter *adapter = vadapter->real_adapter;
int rss_i = vadapter->netdev->real_num_rx_queues; int rss_i = adapter->num_rx_queues_per_pool;
struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_hw *hw = &adapter->hw;
u16 pool = vadapter->pool; u16 pool = vadapter->pool;
u32 psrtype = IXGBE_PSRTYPE_TCPHDR | u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
@ -4315,8 +4315,6 @@ static int ixgbe_fwd_ring_up(struct net_device *vdev,
if (err) if (err)
goto fwd_queue_err; goto fwd_queue_err;
queues = min_t(unsigned int,
adapter->num_rx_queues_per_pool, vdev->num_rx_queues);
err = netif_set_real_num_rx_queues(vdev, queues); err = netif_set_real_num_rx_queues(vdev, queues);
if (err) if (err)
goto fwd_queue_err; goto fwd_queue_err;
@ -7540,9 +7538,15 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
struct ixgbe_adapter *adapter = netdev_priv(pdev); struct ixgbe_adapter *adapter = netdev_priv(pdev);
int pool, err; int pool, err;
#ifdef CONFIG_RPS
if (vdev->num_rx_queues != vdev->num_tx_queues) {
netdev_info(pdev, "%s: Only supports a single queue count for TX and RX\n",
vdev->name);
return ERR_PTR(-EINVAL);
}
#endif
/* Check for hardware restriction on number of rx/tx queues */ /* Check for hardware restriction on number of rx/tx queues */
if (vdev->num_rx_queues != vdev->num_tx_queues || if (vdev->num_tx_queues > IXGBE_MAX_L2A_QUEUES ||
vdev->num_tx_queues > IXGBE_MAX_L2A_QUEUES ||
vdev->num_tx_queues == IXGBE_BAD_L2A_QUEUE) { vdev->num_tx_queues == IXGBE_BAD_L2A_QUEUE) {
netdev_info(pdev, netdev_info(pdev,
"%s: Supports RX/TX Queue counts 1,2, and 4\n", "%s: Supports RX/TX Queue counts 1,2, and 4\n",
@ -7566,7 +7570,7 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
/* Enable VMDq flag so device will be set in VM mode */ /* Enable VMDq flag so device will be set in VM mode */
adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED | IXGBE_FLAG_SRIOV_ENABLED; adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED | IXGBE_FLAG_SRIOV_ENABLED;
adapter->ring_feature[RING_F_VMDQ].limit = adapter->num_rx_pools; adapter->ring_feature[RING_F_VMDQ].limit = adapter->num_rx_pools;
adapter->ring_feature[RING_F_RSS].limit = vdev->num_rx_queues; adapter->ring_feature[RING_F_RSS].limit = vdev->num_tx_queues;
/* Force reinit of ring allocation with VMDQ enabled */ /* Force reinit of ring allocation with VMDQ enabled */
err = ixgbe_setup_tc(pdev, netdev_get_num_tc(pdev)); err = ixgbe_setup_tc(pdev, netdev_get_num_tc(pdev));