Merge branch '10GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue
Jeff Kirsher says: ==================== L2 Fwd Offload & 10GbE Intel Driver Updates 2018-07-09 This patch series is meant to allow support for the L2 forward offload, aka MACVLAN offload without the need for using ndo_select_queue. The existing solution currently requires that we use ndo_select_queue in the transmit path if we want to associate specific Tx queues with a given MACVLAN interface. In order to get away from this we need to repurpose the tc_to_txq array and XPS pointer for the MACVLAN interface and use those as a means of accessing the queues on the lower device. As a result we cannot offload a device that is configured as multiqueue, however it doesn't really make sense to configure a macvlan interfaced as being multiqueue anyway since it doesn't really have a qdisc of its own in the first place. The big changes in this set are: Allow lower device to update tc_to_txq and XPS map of offloaded MACVLAN Disable XPS for single queue devices Replace accel_priv with sb_dev in ndo_select_queue Add sb_dev parameter to fallback function for ndo_select_queue Consolidated ndo_select_queue functions that appeared to be duplicates ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
e32f55f373
|
@ -423,7 +423,7 @@ tx_finish:
|
|||
|
||||
static u16 hfi1_vnic_select_queue(struct net_device *netdev,
|
||||
struct sk_buff *skb,
|
||||
void *accel_priv,
|
||||
struct net_device *sb_dev,
|
||||
select_queue_fallback_t fallback)
|
||||
{
|
||||
struct hfi1_vnic_vport_info *vinfo = opa_vnic_dev_priv(netdev);
|
||||
|
|
|
@ -95,7 +95,7 @@ static netdev_tx_t opa_netdev_start_xmit(struct sk_buff *skb,
|
|||
}
|
||||
|
||||
static u16 opa_vnic_select_queue(struct net_device *netdev, struct sk_buff *skb,
|
||||
void *accel_priv,
|
||||
struct net_device *sb_dev,
|
||||
select_queue_fallback_t fallback)
|
||||
{
|
||||
struct opa_vnic_adapter *adapter = opa_vnic_priv(netdev);
|
||||
|
@ -107,7 +107,7 @@ static u16 opa_vnic_select_queue(struct net_device *netdev, struct sk_buff *skb,
|
|||
mdata->entropy = opa_vnic_calc_entropy(skb);
|
||||
mdata->vl = opa_vnic_get_vl(adapter, skb);
|
||||
rc = adapter->rn_ops->ndo_select_queue(netdev, skb,
|
||||
accel_priv, fallback);
|
||||
sb_dev, fallback);
|
||||
skb_pull(skb, sizeof(*mdata));
|
||||
return rc;
|
||||
}
|
||||
|
|
|
@ -4094,7 +4094,8 @@ static inline int bond_slave_override(struct bonding *bond,
|
|||
|
||||
|
||||
static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
void *accel_priv, select_queue_fallback_t fallback)
|
||||
struct net_device *sb_dev,
|
||||
select_queue_fallback_t fallback)
|
||||
{
|
||||
/* This helper function exists to help dev_pick_tx get the correct
|
||||
* destination queue. Using a helper function skips a call to
|
||||
|
|
|
@ -2213,7 +2213,8 @@ static void ena_netpoll(struct net_device *netdev)
|
|||
#endif /* CONFIG_NET_POLL_CONTROLLER */
|
||||
|
||||
static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
void *accel_priv, select_queue_fallback_t fallback)
|
||||
struct net_device *sb_dev,
|
||||
select_queue_fallback_t fallback)
|
||||
{
|
||||
u16 qid;
|
||||
/* we suspect that this is good for in--kernel network services that
|
||||
|
@ -2223,7 +2224,7 @@ static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
|
|||
if (skb_rx_queue_recorded(skb))
|
||||
qid = skb_get_rx_queue(skb);
|
||||
else
|
||||
qid = fallback(dev, skb);
|
||||
qid = fallback(dev, skb, NULL);
|
||||
|
||||
return qid;
|
||||
}
|
||||
|
|
|
@ -2107,7 +2107,7 @@ static const struct ethtool_ops bcm_sysport_ethtool_ops = {
|
|||
};
|
||||
|
||||
static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
void *accel_priv,
|
||||
struct net_device *sb_dev,
|
||||
select_queue_fallback_t fallback)
|
||||
{
|
||||
struct bcm_sysport_priv *priv = netdev_priv(dev);
|
||||
|
@ -2116,7 +2116,7 @@ static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb,
|
|||
unsigned int q, port;
|
||||
|
||||
if (!netdev_uses_dsa(dev))
|
||||
return fallback(dev, skb);
|
||||
return fallback(dev, skb, NULL);
|
||||
|
||||
/* DSA tagging layer will have configured the correct queue */
|
||||
q = BRCM_TAG_GET_QUEUE(queue);
|
||||
|
@ -2124,7 +2124,7 @@ static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb,
|
|||
tx_ring = priv->ring_map[q + port * priv->per_port_num_tx_queues];
|
||||
|
||||
if (unlikely(!tx_ring))
|
||||
return fallback(dev, skb);
|
||||
return fallback(dev, skb, NULL);
|
||||
|
||||
return tx_ring->index;
|
||||
}
|
||||
|
|
|
@ -1910,7 +1910,8 @@ void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
|
|||
}
|
||||
|
||||
u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
void *accel_priv, select_queue_fallback_t fallback)
|
||||
struct net_device *sb_dev,
|
||||
select_queue_fallback_t fallback)
|
||||
{
|
||||
struct bnx2x *bp = netdev_priv(dev);
|
||||
|
||||
|
@ -1932,7 +1933,8 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
|
|||
}
|
||||
|
||||
/* select a non-FCoE queue */
|
||||
return fallback(dev, skb) % (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos);
|
||||
return fallback(dev, skb, NULL) %
|
||||
(BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos);
|
||||
}
|
||||
|
||||
void bnx2x_set_num_queues(struct bnx2x *bp)
|
||||
|
|
|
@ -497,7 +497,8 @@ int bnx2x_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
|
|||
|
||||
/* select_queue callback */
|
||||
u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
void *accel_priv, select_queue_fallback_t fallback);
|
||||
struct net_device *sb_dev,
|
||||
select_queue_fallback_t fallback);
|
||||
|
||||
static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
|
||||
struct bnx2x_fastpath *fp,
|
||||
|
|
|
@ -930,7 +930,8 @@ freeout:
|
|||
}
|
||||
|
||||
static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
void *accel_priv, select_queue_fallback_t fallback)
|
||||
struct net_device *sb_dev,
|
||||
select_queue_fallback_t fallback)
|
||||
{
|
||||
int txq;
|
||||
|
||||
|
@ -972,7 +973,7 @@ static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
|
|||
return txq;
|
||||
}
|
||||
|
||||
return fallback(dev, skb) % dev->real_num_tx_queues;
|
||||
return fallback(dev, skb, NULL) % dev->real_num_tx_queues;
|
||||
}
|
||||
|
||||
static int closest_timer(const struct sge *s, int time)
|
||||
|
|
|
@ -2022,7 +2022,8 @@ static void hns_nic_get_stats64(struct net_device *ndev,
|
|||
|
||||
static u16
|
||||
hns_nic_select_queue(struct net_device *ndev, struct sk_buff *skb,
|
||||
void *accel_priv, select_queue_fallback_t fallback)
|
||||
struct net_device *sb_dev,
|
||||
select_queue_fallback_t fallback)
|
||||
{
|
||||
struct ethhdr *eth_hdr = (struct ethhdr *)skb->data;
|
||||
struct hns_nic_priv *priv = netdev_priv(ndev);
|
||||
|
@ -2032,7 +2033,7 @@ hns_nic_select_queue(struct net_device *ndev, struct sk_buff *skb,
|
|||
is_multicast_ether_addr(eth_hdr->h_dest))
|
||||
return 0;
|
||||
else
|
||||
return fallback(ndev, skb);
|
||||
return fallback(ndev, skb, NULL);
|
||||
}
|
||||
|
||||
static const struct net_device_ops hns_nic_netdev_ops = {
|
||||
|
|
|
@ -5275,6 +5275,8 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
|
|||
static int ixgbe_fwd_ring_up(struct ixgbe_adapter *adapter,
|
||||
struct ixgbe_fwd_adapter *accel)
|
||||
{
|
||||
u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
|
||||
int num_tc = netdev_get_num_tc(adapter->netdev);
|
||||
struct net_device *vdev = accel->netdev;
|
||||
int i, baseq, err;
|
||||
|
||||
|
@ -5286,6 +5288,11 @@ static int ixgbe_fwd_ring_up(struct ixgbe_adapter *adapter,
|
|||
accel->rx_base_queue = baseq;
|
||||
accel->tx_base_queue = baseq;
|
||||
|
||||
/* record configuration for macvlan interface in vdev */
|
||||
for (i = 0; i < num_tc; i++)
|
||||
netdev_bind_sb_channel_queue(adapter->netdev, vdev,
|
||||
i, rss_i, baseq + (rss_i * i));
|
||||
|
||||
for (i = 0; i < adapter->num_rx_queues_per_pool; i++)
|
||||
adapter->rx_ring[baseq + i]->netdev = vdev;
|
||||
|
||||
|
@ -5310,6 +5317,10 @@ static int ixgbe_fwd_ring_up(struct ixgbe_adapter *adapter,
|
|||
|
||||
netdev_err(vdev, "L2FW offload disabled due to L2 filter error\n");
|
||||
|
||||
/* unbind the queues and drop the subordinate channel config */
|
||||
netdev_unbind_sb_channel(adapter->netdev, vdev);
|
||||
netdev_set_sb_channel(vdev, 0);
|
||||
|
||||
clear_bit(accel->pool, adapter->fwd_bitmask);
|
||||
kfree(accel);
|
||||
|
||||
|
@ -8197,26 +8208,26 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
|
|||
input, common, ring->queue_index);
|
||||
}
|
||||
|
||||
#ifdef IXGBE_FCOE
|
||||
static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
void *accel_priv, select_queue_fallback_t fallback)
|
||||
struct net_device *sb_dev,
|
||||
select_queue_fallback_t fallback)
|
||||
{
|
||||
struct ixgbe_fwd_adapter *fwd_adapter = accel_priv;
|
||||
struct ixgbe_adapter *adapter;
|
||||
int txq;
|
||||
#ifdef IXGBE_FCOE
|
||||
struct ixgbe_ring_feature *f;
|
||||
#endif
|
||||
int txq;
|
||||
|
||||
if (fwd_adapter) {
|
||||
adapter = netdev_priv(dev);
|
||||
txq = reciprocal_scale(skb_get_hash(skb),
|
||||
adapter->num_rx_queues_per_pool);
|
||||
if (sb_dev) {
|
||||
u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
|
||||
struct net_device *vdev = sb_dev;
|
||||
|
||||
return txq + fwd_adapter->tx_base_queue;
|
||||
txq = vdev->tc_to_txq[tc].offset;
|
||||
txq += reciprocal_scale(skb_get_hash(skb),
|
||||
vdev->tc_to_txq[tc].count);
|
||||
|
||||
return txq;
|
||||
}
|
||||
|
||||
#ifdef IXGBE_FCOE
|
||||
|
||||
/*
|
||||
* only execute the code below if protocol is FCoE
|
||||
* or FIP and we have FCoE enabled on the adapter
|
||||
|
@ -8226,11 +8237,11 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
|
|||
case htons(ETH_P_FIP):
|
||||
adapter = netdev_priv(dev);
|
||||
|
||||
if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
|
||||
if (!sb_dev && (adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
|
||||
break;
|
||||
/* fall through */
|
||||
default:
|
||||
return fallback(dev, skb);
|
||||
return fallback(dev, skb, sb_dev);
|
||||
}
|
||||
|
||||
f = &adapter->ring_feature[RING_F_FCOE];
|
||||
|
@ -8242,11 +8253,9 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
|
|||
txq -= f->indices;
|
||||
|
||||
return txq + f->offset;
|
||||
#else
|
||||
return fallback(dev, skb);
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif
|
||||
static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter,
|
||||
struct xdp_frame *xdpf)
|
||||
{
|
||||
|
@ -8766,6 +8775,11 @@ static int ixgbe_reassign_macvlan_pool(struct net_device *vdev, void *data)
|
|||
/* if we cannot find a free pool then disable the offload */
|
||||
netdev_err(vdev, "L2FW offload disabled due to lack of queue resources\n");
|
||||
macvlan_release_l2fw_offload(vdev);
|
||||
|
||||
/* unbind the queues and drop the subordinate channel config */
|
||||
netdev_unbind_sb_channel(adapter->netdev, vdev);
|
||||
netdev_set_sb_channel(vdev, 0);
|
||||
|
||||
kfree(accel);
|
||||
|
||||
return 0;
|
||||
|
@ -9769,6 +9783,13 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
|
|||
if (!macvlan_supports_dest_filter(vdev))
|
||||
return ERR_PTR(-EMEDIUMTYPE);
|
||||
|
||||
/* We need to lock down the macvlan to be a single queue device so that
|
||||
* we can reuse the tc_to_txq field in the macvlan netdev to represent
|
||||
* the queue mapping to our netdev.
|
||||
*/
|
||||
if (netif_is_multiqueue(vdev))
|
||||
return ERR_PTR(-ERANGE);
|
||||
|
||||
pool = find_first_zero_bit(adapter->fwd_bitmask, adapter->num_rx_pools);
|
||||
if (pool == adapter->num_rx_pools) {
|
||||
u16 used_pools = adapter->num_vfs + adapter->num_rx_pools;
|
||||
|
@ -9825,6 +9846,7 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
|
|||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
set_bit(pool, adapter->fwd_bitmask);
|
||||
netdev_set_sb_channel(vdev, pool);
|
||||
accel->pool = pool;
|
||||
accel->netdev = vdev;
|
||||
|
||||
|
@ -9866,6 +9888,10 @@ static void ixgbe_fwd_del(struct net_device *pdev, void *priv)
|
|||
ring->netdev = NULL;
|
||||
}
|
||||
|
||||
/* unbind the queues and drop the subordinate channel config */
|
||||
netdev_unbind_sb_channel(pdev, accel->netdev);
|
||||
netdev_set_sb_channel(accel->netdev, 0);
|
||||
|
||||
clear_bit(accel->pool, adapter->fwd_bitmask);
|
||||
kfree(accel);
|
||||
}
|
||||
|
@ -10026,7 +10052,6 @@ static const struct net_device_ops ixgbe_netdev_ops = {
|
|||
.ndo_open = ixgbe_open,
|
||||
.ndo_stop = ixgbe_close,
|
||||
.ndo_start_xmit = ixgbe_xmit_frame,
|
||||
.ndo_select_queue = ixgbe_select_queue,
|
||||
.ndo_set_rx_mode = ixgbe_set_rx_mode,
|
||||
.ndo_validate_addr = eth_validate_addr,
|
||||
.ndo_set_mac_address = ixgbe_set_mac,
|
||||
|
@ -10049,6 +10074,7 @@ static const struct net_device_ops ixgbe_netdev_ops = {
|
|||
.ndo_poll_controller = ixgbe_netpoll,
|
||||
#endif
|
||||
#ifdef IXGBE_FCOE
|
||||
.ndo_select_queue = ixgbe_select_queue,
|
||||
.ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get,
|
||||
.ndo_fcoe_ddp_target = ixgbe_fcoe_ddp_target,
|
||||
.ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put,
|
||||
|
|
|
@ -563,14 +563,6 @@ ltq_etop_set_multicast_list(struct net_device *dev)
|
|||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
}
|
||||
|
||||
static u16
|
||||
ltq_etop_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
void *accel_priv, select_queue_fallback_t fallback)
|
||||
{
|
||||
/* we are currently only using the first queue */
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
ltq_etop_init(struct net_device *dev)
|
||||
{
|
||||
|
@ -641,7 +633,7 @@ static const struct net_device_ops ltq_eth_netdev_ops = {
|
|||
.ndo_set_mac_address = ltq_etop_set_mac_address,
|
||||
.ndo_validate_addr = eth_validate_addr,
|
||||
.ndo_set_rx_mode = ltq_etop_set_multicast_list,
|
||||
.ndo_select_queue = ltq_etop_select_queue,
|
||||
.ndo_select_queue = dev_pick_tx_zero,
|
||||
.ndo_init = ltq_etop_init,
|
||||
.ndo_tx_timeout = ltq_etop_tx_timeout,
|
||||
};
|
||||
|
|
|
@ -688,15 +688,16 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc,
|
|||
}
|
||||
|
||||
u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
void *accel_priv, select_queue_fallback_t fallback)
|
||||
struct net_device *sb_dev,
|
||||
select_queue_fallback_t fallback)
|
||||
{
|
||||
struct mlx4_en_priv *priv = netdev_priv(dev);
|
||||
u16 rings_p_up = priv->num_tx_rings_p_up;
|
||||
|
||||
if (netdev_get_num_tc(dev))
|
||||
return fallback(dev, skb);
|
||||
return fallback(dev, skb, NULL);
|
||||
|
||||
return fallback(dev, skb) % rings_p_up;
|
||||
return fallback(dev, skb, NULL) % rings_p_up;
|
||||
}
|
||||
|
||||
static void mlx4_bf_copy(void __iomem *dst, const void *src,
|
||||
|
|
|
@ -699,7 +699,8 @@ void mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
|
|||
|
||||
void mlx4_en_tx_irq(struct mlx4_cq *mcq);
|
||||
u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
void *accel_priv, select_queue_fallback_t fallback);
|
||||
struct net_device *sb_dev,
|
||||
select_queue_fallback_t fallback);
|
||||
netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
|
||||
netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring,
|
||||
struct mlx4_en_rx_alloc *frame,
|
||||
|
|
|
@ -865,7 +865,8 @@ struct mlx5e_profile {
|
|||
void mlx5e_build_ptys2ethtool_map(void);
|
||||
|
||||
u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
void *accel_priv, select_queue_fallback_t fallback);
|
||||
struct net_device *sb_dev,
|
||||
select_queue_fallback_t fallback);
|
||||
netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
|
||||
netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
|
||||
struct mlx5e_tx_wqe *wqe, u16 pi);
|
||||
|
|
|
@ -111,10 +111,11 @@ static inline int mlx5e_get_dscp_up(struct mlx5e_priv *priv, struct sk_buff *skb
|
|||
#endif
|
||||
|
||||
u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
void *accel_priv, select_queue_fallback_t fallback)
|
||||
struct net_device *sb_dev,
|
||||
select_queue_fallback_t fallback)
|
||||
{
|
||||
struct mlx5e_priv *priv = netdev_priv(dev);
|
||||
int channel_ix = fallback(dev, skb);
|
||||
int channel_ix = fallback(dev, skb, NULL);
|
||||
u16 num_channels;
|
||||
int up = 0;
|
||||
|
||||
|
|
|
@ -1656,7 +1656,8 @@ drop:
|
|||
}
|
||||
|
||||
static u16 ravb_select_queue(struct net_device *ndev, struct sk_buff *skb,
|
||||
void *accel_priv, select_queue_fallback_t fallback)
|
||||
struct net_device *sb_dev,
|
||||
select_queue_fallback_t fallback)
|
||||
{
|
||||
/* If skb needs TX timestamp, it is handled in network control queue */
|
||||
return (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) ? RAVB_NC :
|
||||
|
|
|
@ -101,7 +101,8 @@ static struct vnet_port *vsw_tx_port_find(struct sk_buff *skb,
|
|||
}
|
||||
|
||||
static u16 vsw_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
void *accel_priv, select_queue_fallback_t fallback)
|
||||
struct net_device *sb_dev,
|
||||
select_queue_fallback_t fallback)
|
||||
{
|
||||
struct vnet_port *port = netdev_priv(dev);
|
||||
|
||||
|
|
|
@ -234,7 +234,8 @@ static struct vnet_port *vnet_tx_port_find(struct sk_buff *skb,
|
|||
}
|
||||
|
||||
static u16 vnet_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
void *accel_priv, select_queue_fallback_t fallback)
|
||||
struct net_device *sb_dev,
|
||||
select_queue_fallback_t fallback)
|
||||
{
|
||||
struct vnet *vp = netdev_priv(dev);
|
||||
struct vnet_port *port = __tx_port_find(vp, skb);
|
||||
|
|
|
@ -1889,13 +1889,6 @@ static int netcp_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
|
|||
return err;
|
||||
}
|
||||
|
||||
static u16 netcp_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
void *accel_priv,
|
||||
select_queue_fallback_t fallback)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int netcp_setup_tc(struct net_device *dev, enum tc_setup_type type,
|
||||
void *type_data)
|
||||
{
|
||||
|
@ -1972,7 +1965,7 @@ static const struct net_device_ops netcp_netdev_ops = {
|
|||
.ndo_vlan_rx_add_vid = netcp_rx_add_vid,
|
||||
.ndo_vlan_rx_kill_vid = netcp_rx_kill_vid,
|
||||
.ndo_tx_timeout = netcp_ndo_tx_timeout,
|
||||
.ndo_select_queue = netcp_select_queue,
|
||||
.ndo_select_queue = dev_pick_tx_zero,
|
||||
.ndo_setup_tc = netcp_setup_tc,
|
||||
};
|
||||
|
||||
|
|
|
@ -329,7 +329,7 @@ static u16 netvsc_pick_tx(struct net_device *ndev, struct sk_buff *skb)
|
|||
}
|
||||
|
||||
static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
|
||||
void *accel_priv,
|
||||
struct net_device *sb_dev,
|
||||
select_queue_fallback_t fallback)
|
||||
{
|
||||
struct net_device_context *ndc = netdev_priv(ndev);
|
||||
|
@ -343,9 +343,9 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
|
|||
|
||||
if (vf_ops->ndo_select_queue)
|
||||
txq = vf_ops->ndo_select_queue(vf_netdev, skb,
|
||||
accel_priv, fallback);
|
||||
sb_dev, fallback);
|
||||
else
|
||||
txq = fallback(vf_netdev, skb);
|
||||
txq = fallback(vf_netdev, skb, NULL);
|
||||
|
||||
/* Record the queue selected by VF so that it can be
|
||||
* used for common case where VF has more queues than
|
||||
|
|
|
@ -514,7 +514,6 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
const struct macvlan_dev *vlan = netdev_priv(dev);
|
||||
const struct macvlan_port *port = vlan->port;
|
||||
const struct macvlan_dev *dest;
|
||||
void *accel_priv = NULL;
|
||||
|
||||
if (vlan->mode == MACVLAN_MODE_BRIDGE) {
|
||||
const struct ethhdr *eth = (void *)skb->data;
|
||||
|
@ -533,15 +532,10 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
return NET_XMIT_SUCCESS;
|
||||
}
|
||||
}
|
||||
|
||||
/* For packets that are non-multicast and not bridged we will pass
|
||||
* the necessary information so that the lowerdev can distinguish
|
||||
* the source of the packets via the accel_priv value.
|
||||
*/
|
||||
accel_priv = vlan->accel_priv;
|
||||
xmit_world:
|
||||
skb->dev = vlan->lowerdev;
|
||||
return dev_queue_xmit_accel(skb, accel_priv);
|
||||
return dev_queue_xmit_accel(skb,
|
||||
netdev_get_sb_channel(dev) ? dev : NULL);
|
||||
}
|
||||
|
||||
static inline netdev_tx_t macvlan_netpoll_send_skb(struct macvlan_dev *vlan, struct sk_buff *skb)
|
||||
|
|
|
@ -115,7 +115,8 @@ static netdev_tx_t net_failover_start_xmit(struct sk_buff *skb,
|
|||
}
|
||||
|
||||
static u16 net_failover_select_queue(struct net_device *dev,
|
||||
struct sk_buff *skb, void *accel_priv,
|
||||
struct sk_buff *skb,
|
||||
struct net_device *sb_dev,
|
||||
select_queue_fallback_t fallback)
|
||||
{
|
||||
struct net_failover_info *nfo_info = netdev_priv(dev);
|
||||
|
@ -128,9 +129,9 @@ static u16 net_failover_select_queue(struct net_device *dev,
|
|||
|
||||
if (ops->ndo_select_queue)
|
||||
txq = ops->ndo_select_queue(primary_dev, skb,
|
||||
accel_priv, fallback);
|
||||
sb_dev, fallback);
|
||||
else
|
||||
txq = fallback(primary_dev, skb);
|
||||
txq = fallback(primary_dev, skb, NULL);
|
||||
|
||||
qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping;
|
||||
|
||||
|
|
|
@ -1707,7 +1707,8 @@ static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
}
|
||||
|
||||
static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
void *accel_priv, select_queue_fallback_t fallback)
|
||||
struct net_device *sb_dev,
|
||||
select_queue_fallback_t fallback)
|
||||
{
|
||||
/*
|
||||
* This helper function exists to help dev_pick_tx get the correct
|
||||
|
|
|
@ -607,7 +607,8 @@ static u16 tun_ebpf_select_queue(struct tun_struct *tun, struct sk_buff *skb)
|
|||
}
|
||||
|
||||
static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
void *accel_priv, select_queue_fallback_t fallback)
|
||||
struct net_device *sb_dev,
|
||||
select_queue_fallback_t fallback)
|
||||
{
|
||||
struct tun_struct *tun = netdev_priv(dev);
|
||||
u16 ret;
|
||||
|
|
|
@ -1279,7 +1279,8 @@ static struct net_device_stats *mwifiex_get_stats(struct net_device *dev)
|
|||
|
||||
static u16
|
||||
mwifiex_netdev_select_wmm_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
void *accel_priv, select_queue_fallback_t fallback)
|
||||
struct net_device *sb_dev,
|
||||
select_queue_fallback_t fallback)
|
||||
{
|
||||
skb->priority = cfg80211_classify8021d(skb, NULL);
|
||||
return mwifiex_1d_to_wmm_queue[skb->priority];
|
||||
|
|
|
@ -148,14 +148,14 @@ void xenvif_wake_queue(struct xenvif_queue *queue)
|
|||
}
|
||||
|
||||
static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
void *accel_priv,
|
||||
struct net_device *sb_dev,
|
||||
select_queue_fallback_t fallback)
|
||||
{
|
||||
struct xenvif *vif = netdev_priv(dev);
|
||||
unsigned int size = vif->hash.size;
|
||||
|
||||
if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
|
||||
return fallback(dev, skb) % dev->real_num_tx_queues;
|
||||
return fallback(dev, skb, NULL) % dev->real_num_tx_queues;
|
||||
|
||||
xenvif_set_skb_hash(vif, skb);
|
||||
|
||||
|
|
|
@ -545,7 +545,8 @@ static int xennet_count_skb_slots(struct sk_buff *skb)
|
|||
}
|
||||
|
||||
static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
void *accel_priv, select_queue_fallback_t fallback)
|
||||
struct net_device *sb_dev,
|
||||
select_queue_fallback_t fallback)
|
||||
{
|
||||
unsigned int num_queues = dev->real_num_tx_queues;
|
||||
u32 hash;
|
||||
|
|
|
@ -290,13 +290,6 @@ static netdev_tx_t xlr_net_start_xmit(struct sk_buff *skb,
|
|||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
static u16 xlr_net_select_queue(struct net_device *ndev, struct sk_buff *skb,
|
||||
void *accel_priv,
|
||||
select_queue_fallback_t fallback)
|
||||
{
|
||||
return (u16)smp_processor_id();
|
||||
}
|
||||
|
||||
static void xlr_hw_set_mac_addr(struct net_device *ndev)
|
||||
{
|
||||
struct xlr_net_priv *priv = netdev_priv(ndev);
|
||||
|
@ -403,7 +396,7 @@ static const struct net_device_ops xlr_netdev_ops = {
|
|||
.ndo_open = xlr_net_open,
|
||||
.ndo_stop = xlr_net_stop,
|
||||
.ndo_start_xmit = xlr_net_start_xmit,
|
||||
.ndo_select_queue = xlr_net_select_queue,
|
||||
.ndo_select_queue = dev_pick_tx_cpu_id,
|
||||
.ndo_set_mac_address = xlr_net_set_mac_addr,
|
||||
.ndo_set_rx_mode = xlr_set_rx_mode,
|
||||
.ndo_get_stats64 = xlr_stats,
|
||||
|
|
|
@ -253,7 +253,8 @@ static unsigned int rtw_classify8021d(struct sk_buff *skb)
|
|||
}
|
||||
|
||||
static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
void *accel_priv, select_queue_fallback_t fallback)
|
||||
struct net_device *sb_dev,
|
||||
select_queue_fallback_t fallback)
|
||||
{
|
||||
struct adapter *padapter = rtw_netdev_priv(dev);
|
||||
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
|
||||
|
|
|
@ -403,10 +403,9 @@ static unsigned int rtw_classify8021d(struct sk_buff *skb)
|
|||
}
|
||||
|
||||
|
||||
static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb
|
||||
, void *accel_priv
|
||||
, select_queue_fallback_t fallback
|
||||
)
|
||||
static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
struct net_device *sb_dev,
|
||||
select_queue_fallback_t fallback)
|
||||
{
|
||||
struct adapter *padapter = rtw_netdev_priv(dev);
|
||||
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
|
||||
|
|
|
@ -575,6 +575,9 @@ struct netdev_queue {
|
|||
* (/sys/class/net/DEV/Q/trans_timeout)
|
||||
*/
|
||||
unsigned long trans_timeout;
|
||||
|
||||
/* Subordinate device that the queue has been assigned to */
|
||||
struct net_device *sb_dev;
|
||||
/*
|
||||
* write-mostly part
|
||||
*/
|
||||
|
@ -790,7 +793,8 @@ static inline bool netdev_phys_item_id_same(struct netdev_phys_item_id *a,
|
|||
}
|
||||
|
||||
typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
|
||||
struct sk_buff *skb);
|
||||
struct sk_buff *skb,
|
||||
struct net_device *sb_dev);
|
||||
|
||||
enum tc_setup_type {
|
||||
TC_SETUP_QDISC_MQPRIO,
|
||||
|
@ -954,7 +958,8 @@ struct dev_ifalias {
|
|||
* those the driver believes to be appropriate.
|
||||
*
|
||||
* u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
|
||||
* void *accel_priv, select_queue_fallback_t fallback);
|
||||
* struct net_device *sb_dev,
|
||||
* select_queue_fallback_t fallback);
|
||||
* Called to decide which queue to use when device supports multiple
|
||||
* transmit queues.
|
||||
*
|
||||
|
@ -1226,7 +1231,7 @@ struct net_device_ops {
|
|||
netdev_features_t features);
|
||||
u16 (*ndo_select_queue)(struct net_device *dev,
|
||||
struct sk_buff *skb,
|
||||
void *accel_priv,
|
||||
struct net_device *sb_dev,
|
||||
select_queue_fallback_t fallback);
|
||||
void (*ndo_change_rx_flags)(struct net_device *dev,
|
||||
int flags);
|
||||
|
@ -1991,7 +1996,7 @@ struct net_device {
|
|||
#ifdef CONFIG_DCB
|
||||
const struct dcbnl_rtnl_ops *dcbnl_ops;
|
||||
#endif
|
||||
u8 num_tc;
|
||||
s16 num_tc;
|
||||
struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
|
||||
u8 prio_tc_map[TC_BITMASK + 1];
|
||||
|
||||
|
@ -2045,6 +2050,17 @@ int netdev_get_num_tc(struct net_device *dev)
|
|||
return dev->num_tc;
|
||||
}
|
||||
|
||||
void netdev_unbind_sb_channel(struct net_device *dev,
|
||||
struct net_device *sb_dev);
|
||||
int netdev_bind_sb_channel_queue(struct net_device *dev,
|
||||
struct net_device *sb_dev,
|
||||
u8 tc, u16 count, u16 offset);
|
||||
int netdev_set_sb_channel(struct net_device *dev, u16 channel);
|
||||
static inline int netdev_get_sb_channel(struct net_device *dev)
|
||||
{
|
||||
return max_t(int, -dev->num_tc, 0);
|
||||
}
|
||||
|
||||
static inline
|
||||
struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
|
||||
unsigned int index)
|
||||
|
@ -2089,7 +2105,7 @@ static inline void netdev_for_each_tx_queue(struct net_device *dev,
|
|||
|
||||
struct netdev_queue *netdev_pick_tx(struct net_device *dev,
|
||||
struct sk_buff *skb,
|
||||
void *accel_priv);
|
||||
struct net_device *sb_dev);
|
||||
|
||||
/* returns the headroom that the master device needs to take in account
|
||||
* when forwarding to this dev
|
||||
|
@ -2553,8 +2569,14 @@ void dev_close(struct net_device *dev);
|
|||
void dev_close_many(struct list_head *head, bool unlink);
|
||||
void dev_disable_lro(struct net_device *dev);
|
||||
int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb);
|
||||
u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
|
||||
struct net_device *sb_dev,
|
||||
select_queue_fallback_t fallback);
|
||||
u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
|
||||
struct net_device *sb_dev,
|
||||
select_queue_fallback_t fallback);
|
||||
int dev_queue_xmit(struct sk_buff *skb);
|
||||
int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv);
|
||||
int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev);
|
||||
int dev_direct_xmit(struct sk_buff *skb, u16 queue_id);
|
||||
int register_netdevice(struct net_device *dev);
|
||||
void unregister_netdevice_queue(struct net_device *dev, struct list_head *head);
|
||||
|
|
157
net/core/dev.c
157
net/core/dev.c
|
@ -2067,11 +2067,13 @@ int netdev_txq_to_tc(struct net_device *dev, unsigned int txq)
|
|||
struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
|
||||
int i;
|
||||
|
||||
/* walk through the TCs and see if it falls into any of them */
|
||||
for (i = 0; i < TC_MAX_QUEUE; i++, tc++) {
|
||||
if ((txq - tc->offset) < tc->count)
|
||||
return i;
|
||||
}
|
||||
|
||||
/* didn't find it, just return -1 to indicate no match */
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -2260,7 +2262,14 @@ int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
|
|||
unsigned int nr_ids;
|
||||
|
||||
if (dev->num_tc) {
|
||||
/* Do not allow XPS on subordinate device directly */
|
||||
num_tc = dev->num_tc;
|
||||
if (num_tc < 0)
|
||||
return -EINVAL;
|
||||
|
||||
/* If queue belongs to subordinate dev use its map */
|
||||
dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev;
|
||||
|
||||
tc = netdev_txq_to_tc(dev, index);
|
||||
if (tc < 0)
|
||||
return -EINVAL;
|
||||
|
@ -2448,11 +2457,25 @@ int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
|
|||
EXPORT_SYMBOL(netif_set_xps_queue);
|
||||
|
||||
#endif
|
||||
static void netdev_unbind_all_sb_channels(struct net_device *dev)
|
||||
{
|
||||
struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues];
|
||||
|
||||
/* Unbind any subordinate channels */
|
||||
while (txq-- != &dev->_tx[0]) {
|
||||
if (txq->sb_dev)
|
||||
netdev_unbind_sb_channel(dev, txq->sb_dev);
|
||||
}
|
||||
}
|
||||
|
||||
void netdev_reset_tc(struct net_device *dev)
|
||||
{
|
||||
#ifdef CONFIG_XPS
|
||||
netif_reset_xps_queues_gt(dev, 0);
|
||||
#endif
|
||||
netdev_unbind_all_sb_channels(dev);
|
||||
|
||||
/* Reset TC configuration of device */
|
||||
dev->num_tc = 0;
|
||||
memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq));
|
||||
memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map));
|
||||
|
@ -2481,11 +2504,77 @@ int netdev_set_num_tc(struct net_device *dev, u8 num_tc)
|
|||
#ifdef CONFIG_XPS
|
||||
netif_reset_xps_queues_gt(dev, 0);
|
||||
#endif
|
||||
netdev_unbind_all_sb_channels(dev);
|
||||
|
||||
dev->num_tc = num_tc;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(netdev_set_num_tc);
|
||||
|
||||
void netdev_unbind_sb_channel(struct net_device *dev,
|
||||
struct net_device *sb_dev)
|
||||
{
|
||||
struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues];
|
||||
|
||||
#ifdef CONFIG_XPS
|
||||
netif_reset_xps_queues_gt(sb_dev, 0);
|
||||
#endif
|
||||
memset(sb_dev->tc_to_txq, 0, sizeof(sb_dev->tc_to_txq));
|
||||
memset(sb_dev->prio_tc_map, 0, sizeof(sb_dev->prio_tc_map));
|
||||
|
||||
while (txq-- != &dev->_tx[0]) {
|
||||
if (txq->sb_dev == sb_dev)
|
||||
txq->sb_dev = NULL;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(netdev_unbind_sb_channel);
|
||||
|
||||
int netdev_bind_sb_channel_queue(struct net_device *dev,
|
||||
struct net_device *sb_dev,
|
||||
u8 tc, u16 count, u16 offset)
|
||||
{
|
||||
/* Make certain the sb_dev and dev are already configured */
|
||||
if (sb_dev->num_tc >= 0 || tc >= dev->num_tc)
|
||||
return -EINVAL;
|
||||
|
||||
/* We cannot hand out queues we don't have */
|
||||
if ((offset + count) > dev->real_num_tx_queues)
|
||||
return -EINVAL;
|
||||
|
||||
/* Record the mapping */
|
||||
sb_dev->tc_to_txq[tc].count = count;
|
||||
sb_dev->tc_to_txq[tc].offset = offset;
|
||||
|
||||
/* Provide a way for Tx queue to find the tc_to_txq map or
|
||||
* XPS map for itself.
|
||||
*/
|
||||
while (count--)
|
||||
netdev_get_tx_queue(dev, count + offset)->sb_dev = sb_dev;
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(netdev_bind_sb_channel_queue);
|
||||
|
||||
int netdev_set_sb_channel(struct net_device *dev, u16 channel)
|
||||
{
|
||||
/* Do not use a multiqueue device to represent a subordinate channel */
|
||||
if (netif_is_multiqueue(dev))
|
||||
return -ENODEV;
|
||||
|
||||
/* We allow channels 1 - 32767 to be used for subordinate channels.
|
||||
* Channel 0 is meant to be "native" mode and used only to represent
|
||||
* the main root device. We allow writing 0 to reset the device back
|
||||
* to normal mode after being used as a subordinate channel.
|
||||
*/
|
||||
if (channel > S16_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
dev->num_tc = -channel;
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(netdev_set_sb_channel);
|
||||
|
||||
/*
|
||||
* Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
|
||||
* greater than real_num_tx_queues stale skbs on the qdisc must be flushed.
|
||||
|
@ -2697,24 +2786,26 @@ EXPORT_SYMBOL(netif_device_attach);
|
|||
* Returns a Tx hash based on the given packet descriptor a Tx queues' number
|
||||
* to be used as a distribution range.
|
||||
*/
|
||||
static u16 skb_tx_hash(const struct net_device *dev, struct sk_buff *skb)
|
||||
static u16 skb_tx_hash(const struct net_device *dev,
|
||||
const struct net_device *sb_dev,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
u32 hash;
|
||||
u16 qoffset = 0;
|
||||
u16 qcount = dev->real_num_tx_queues;
|
||||
|
||||
if (dev->num_tc) {
|
||||
u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
|
||||
|
||||
qoffset = sb_dev->tc_to_txq[tc].offset;
|
||||
qcount = sb_dev->tc_to_txq[tc].count;
|
||||
}
|
||||
|
||||
if (skb_rx_queue_recorded(skb)) {
|
||||
hash = skb_get_rx_queue(skb);
|
||||
while (unlikely(hash >= qcount))
|
||||
hash -= qcount;
|
||||
return hash;
|
||||
}
|
||||
|
||||
if (dev->num_tc) {
|
||||
u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
|
||||
|
||||
qoffset = dev->tc_to_txq[tc].offset;
|
||||
qcount = dev->tc_to_txq[tc].count;
|
||||
return hash + qoffset;
|
||||
}
|
||||
|
||||
return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset;
|
||||
|
@ -3484,7 +3575,8 @@ static int __get_xps_queue_idx(struct net_device *dev, struct sk_buff *skb,
|
|||
}
|
||||
#endif
|
||||
|
||||
static int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
|
||||
static int get_xps_queue(struct net_device *dev, struct net_device *sb_dev,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
#ifdef CONFIG_XPS
|
||||
struct xps_dev_maps *dev_maps;
|
||||
|
@ -3498,7 +3590,7 @@ static int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
|
|||
if (!static_key_false(&xps_rxqs_needed))
|
||||
goto get_cpus_map;
|
||||
|
||||
dev_maps = rcu_dereference(dev->xps_rxqs_map);
|
||||
dev_maps = rcu_dereference(sb_dev->xps_rxqs_map);
|
||||
if (dev_maps) {
|
||||
int tci = sk_rx_queue_get(sk);
|
||||
|
||||
|
@ -3509,7 +3601,7 @@ static int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
|
|||
|
||||
get_cpus_map:
|
||||
if (queue_index < 0) {
|
||||
dev_maps = rcu_dereference(dev->xps_cpus_map);
|
||||
dev_maps = rcu_dereference(sb_dev->xps_cpus_map);
|
||||
if (dev_maps) {
|
||||
unsigned int tci = skb->sender_cpu - 1;
|
||||
|
||||
|
@ -3525,17 +3617,36 @@ get_cpus_map:
|
|||
#endif
|
||||
}
|
||||
|
||||
static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
|
||||
u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
|
||||
struct net_device *sb_dev,
|
||||
select_queue_fallback_t fallback)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(dev_pick_tx_zero);
|
||||
|
||||
u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
|
||||
struct net_device *sb_dev,
|
||||
select_queue_fallback_t fallback)
|
||||
{
|
||||
return (u16)raw_smp_processor_id() % dev->real_num_tx_queues;
|
||||
}
|
||||
EXPORT_SYMBOL(dev_pick_tx_cpu_id);
|
||||
|
||||
static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
|
||||
struct net_device *sb_dev)
|
||||
{
|
||||
struct sock *sk = skb->sk;
|
||||
int queue_index = sk_tx_queue_get(sk);
|
||||
|
||||
sb_dev = sb_dev ? : dev;
|
||||
|
||||
if (queue_index < 0 || skb->ooo_okay ||
|
||||
queue_index >= dev->real_num_tx_queues) {
|
||||
int new_index = get_xps_queue(dev, skb);
|
||||
int new_index = get_xps_queue(dev, sb_dev, skb);
|
||||
|
||||
if (new_index < 0)
|
||||
new_index = skb_tx_hash(dev, skb);
|
||||
new_index = skb_tx_hash(dev, sb_dev, skb);
|
||||
|
||||
if (queue_index != new_index && sk &&
|
||||
sk_fullsock(sk) &&
|
||||
|
@ -3550,7 +3661,7 @@ static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
|
|||
|
||||
struct netdev_queue *netdev_pick_tx(struct net_device *dev,
|
||||
struct sk_buff *skb,
|
||||
void *accel_priv)
|
||||
struct net_device *sb_dev)
|
||||
{
|
||||
int queue_index = 0;
|
||||
|
||||
|
@ -3565,10 +3676,10 @@ struct netdev_queue *netdev_pick_tx(struct net_device *dev,
|
|||
const struct net_device_ops *ops = dev->netdev_ops;
|
||||
|
||||
if (ops->ndo_select_queue)
|
||||
queue_index = ops->ndo_select_queue(dev, skb, accel_priv,
|
||||
queue_index = ops->ndo_select_queue(dev, skb, sb_dev,
|
||||
__netdev_pick_tx);
|
||||
else
|
||||
queue_index = __netdev_pick_tx(dev, skb);
|
||||
queue_index = __netdev_pick_tx(dev, skb, sb_dev);
|
||||
|
||||
queue_index = netdev_cap_txqueue(dev, queue_index);
|
||||
}
|
||||
|
@ -3580,7 +3691,7 @@ struct netdev_queue *netdev_pick_tx(struct net_device *dev,
|
|||
/**
|
||||
* __dev_queue_xmit - transmit a buffer
|
||||
* @skb: buffer to transmit
|
||||
* @accel_priv: private data used for L2 forwarding offload
|
||||
* @sb_dev: suboordinate device used for L2 forwarding offload
|
||||
*
|
||||
* Queue a buffer for transmission to a network device. The caller must
|
||||
* have set the device and priority and built the buffer before calling
|
||||
|
@ -3603,7 +3714,7 @@ struct netdev_queue *netdev_pick_tx(struct net_device *dev,
|
|||
* the BH enable code must have IRQs enabled so that it will not deadlock.
|
||||
* --BLG
|
||||
*/
|
||||
static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
|
||||
static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev)
|
||||
{
|
||||
struct net_device *dev = skb->dev;
|
||||
struct netdev_queue *txq;
|
||||
|
@ -3642,7 +3753,7 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
|
|||
else
|
||||
skb_dst_force(skb);
|
||||
|
||||
txq = netdev_pick_tx(dev, skb, accel_priv);
|
||||
txq = netdev_pick_tx(dev, skb, sb_dev);
|
||||
q = rcu_dereference_bh(txq->qdisc);
|
||||
|
||||
trace_net_dev_queue(skb);
|
||||
|
@ -3716,9 +3827,9 @@ int dev_queue_xmit(struct sk_buff *skb)
|
|||
}
|
||||
EXPORT_SYMBOL(dev_queue_xmit);
|
||||
|
||||
int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv)
|
||||
int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev)
|
||||
{
|
||||
return __dev_queue_xmit(skb, accel_priv);
|
||||
return __dev_queue_xmit(skb, sb_dev);
|
||||
}
|
||||
EXPORT_SYMBOL(dev_queue_xmit_accel);
|
||||
|
||||
|
|
|
@ -1047,13 +1047,30 @@ static ssize_t traffic_class_show(struct netdev_queue *queue,
|
|||
char *buf)
|
||||
{
|
||||
struct net_device *dev = queue->dev;
|
||||
int index = get_netdev_queue_index(queue);
|
||||
int tc = netdev_txq_to_tc(dev, index);
|
||||
int index;
|
||||
int tc;
|
||||
|
||||
if (!netif_is_multiqueue(dev))
|
||||
return -ENOENT;
|
||||
|
||||
index = get_netdev_queue_index(queue);
|
||||
|
||||
/* If queue belongs to subordinate dev use its TC mapping */
|
||||
dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev;
|
||||
|
||||
tc = netdev_txq_to_tc(dev, index);
|
||||
if (tc < 0)
|
||||
return -EINVAL;
|
||||
|
||||
return sprintf(buf, "%u\n", tc);
|
||||
/* We can report the traffic class one of two ways:
|
||||
* Subordinate device traffic classes are reported with the traffic
|
||||
* class first, and then the subordinate class so for example TC0 on
|
||||
* subordinate device 2 will be reported as "0-2". If the queue
|
||||
* belongs to the root device it will be reported with just the
|
||||
* traffic class, so just "0" for TC 0 for example.
|
||||
*/
|
||||
return dev->num_tc < 0 ? sprintf(buf, "%u%d\n", tc, dev->num_tc) :
|
||||
sprintf(buf, "%u\n", tc);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_XPS
|
||||
|
@ -1214,10 +1231,20 @@ static ssize_t xps_cpus_show(struct netdev_queue *queue,
|
|||
cpumask_var_t mask;
|
||||
unsigned long index;
|
||||
|
||||
if (!netif_is_multiqueue(dev))
|
||||
return -ENOENT;
|
||||
|
||||
index = get_netdev_queue_index(queue);
|
||||
|
||||
if (dev->num_tc) {
|
||||
/* Do not allow XPS on subordinate device directly */
|
||||
num_tc = dev->num_tc;
|
||||
if (num_tc < 0)
|
||||
return -EINVAL;
|
||||
|
||||
/* If queue belongs to subordinate dev use its map */
|
||||
dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev;
|
||||
|
||||
tc = netdev_txq_to_tc(dev, index);
|
||||
if (tc < 0)
|
||||
return -EINVAL;
|
||||
|
@ -1260,6 +1287,9 @@ static ssize_t xps_cpus_store(struct netdev_queue *queue,
|
|||
cpumask_var_t mask;
|
||||
int err;
|
||||
|
||||
if (!netif_is_multiqueue(dev))
|
||||
return -ENOENT;
|
||||
|
||||
if (!capable(CAP_NET_ADMIN))
|
||||
return -EPERM;
|
||||
|
||||
|
|
|
@ -1130,7 +1130,7 @@ static void ieee80211_uninit(struct net_device *dev)
|
|||
|
||||
static u16 ieee80211_netdev_select_queue(struct net_device *dev,
|
||||
struct sk_buff *skb,
|
||||
void *accel_priv,
|
||||
struct net_device *sb_dev,
|
||||
select_queue_fallback_t fallback)
|
||||
{
|
||||
return ieee80211_select_queue(IEEE80211_DEV_TO_SUB_IF(dev), skb);
|
||||
|
@ -1176,7 +1176,7 @@ static const struct net_device_ops ieee80211_dataif_ops = {
|
|||
|
||||
static u16 ieee80211_monitor_select_queue(struct net_device *dev,
|
||||
struct sk_buff *skb,
|
||||
void *accel_priv,
|
||||
struct net_device *sb_dev,
|
||||
select_queue_fallback_t fallback)
|
||||
{
|
||||
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
|
||||
|
|
|
@ -275,9 +275,10 @@ static bool packet_use_direct_xmit(const struct packet_sock *po)
|
|||
return po->xmit == packet_direct_xmit;
|
||||
}
|
||||
|
||||
static u16 __packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb)
|
||||
static u16 __packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
struct net_device *sb_dev)
|
||||
{
|
||||
return (u16) raw_smp_processor_id() % dev->real_num_tx_queues;
|
||||
return dev_pick_tx_cpu_id(dev, skb, sb_dev, NULL);
|
||||
}
|
||||
|
||||
static u16 packet_pick_tx_queue(struct sk_buff *skb)
|
||||
|
@ -291,7 +292,7 @@ static u16 packet_pick_tx_queue(struct sk_buff *skb)
|
|||
__packet_pick_tx_queue);
|
||||
queue_index = netdev_cap_txqueue(dev, queue_index);
|
||||
} else {
|
||||
queue_index = __packet_pick_tx_queue(dev, skb);
|
||||
queue_index = __packet_pick_tx_queue(dev, skb, NULL);
|
||||
}
|
||||
|
||||
return queue_index;
|
||||
|
|
Loading…
Reference in New Issue