Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) Fix refcounting bug for connections in on-packet scheduling mode of IPVS, from Julian Anastasov. 2) Set network header properly in AF_PACKET's packet_snd, from Willem de Bruijn. 3) Fix regressions in 3c59x by converting to generic DMA API. It was relying upon the hack that the PCI DMA interfaces would accept NULL for EISA devices. From Christoph Hellwig. 4) Remove RDMA devices before unregistering netdev in QEDE driver, from Michal Kalderon. 5) Use after free in TUN driver ptr_ring usage, from Jason Wang. 6) Properly check for missing netlink attributes in SMC_PNETID requests, from Eric Biggers. 7) Set DMA mask before performaing any DMA operations in vmxnet3 driver, from Regis Duchesne. 8) Fix mlx5 build with SMP=n, from Saeed Mahameed. 9) Classifier fixes in bcm_sf2 driver from Florian Fainelli. 10) Tuntap use after free during release, from Jason Wang. 11) Don't use stack memory in scatterlists in tls code, from Matt Mullins. 12) Not fully initialized flow key object in ipv4 routing code, from David Ahern. 13) Various packet headroom bug fixes in ip6_gre driver, from Petr Machata. 14) Remove queues from XPS maps using correct index, from Amritha Nambiar. 15) Fix use after free in sock_diag, from Eric Dumazet. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (64 commits) net: ip6_gre: fix tunnel metadata device sharing. cxgb4: fix offset in collecting TX rate limit info net: sched: red: avoid hashing NULL child sock_diag: fix use-after-free read in __sk_free sh_eth: Change platform check to CONFIG_ARCH_RENESAS net: dsa: Do not register devlink for unused ports net: Fix a bug in removing queues from XPS map bpf: fix truncated jump targets on heavy expansions bpf: parse and verdict prog attach may race with bpf map update bpf: sockmap update rollback on error can incorrectly dec prog refcnt net: test tailroom before appending to linear skb net: ip6_gre: Fix ip6erspan hlen calculation net: ip6_gre: Split up ip6gre_changelink() net: ip6_gre: Split up ip6gre_newlink() net: ip6_gre: Split up ip6gre_tnl_change() net: ip6_gre: Split up ip6gre_tnl_link_config() net: ip6_gre: Fix headroom request in ip6erspan_tunnel_xmit() net: ip6_gre: Request headroom in __gre6_xmit() selftests/bpf: check return value of fopen in test_verifier.c erspan: fix invalid erspan version. ...
This commit is contained in:
commit
5aef268ace
|
@ -57,6 +57,13 @@ KSZ9031:
|
|||
- txd2-skew-ps : Skew control of TX data 2 pad
|
||||
- txd3-skew-ps : Skew control of TX data 3 pad
|
||||
|
||||
- micrel,force-master:
|
||||
Boolean, force phy to master mode. Only set this option if the phy
|
||||
reference clock provided at CLK125_NDO pin is used as MAC reference
|
||||
clock because the clock jitter in slave mode is to high (errata#2).
|
||||
Attention: The link partner must be configurable as slave otherwise
|
||||
no link will be established.
|
||||
|
||||
Examples:
|
||||
|
||||
mdio {
|
||||
|
|
|
@ -354,10 +354,13 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port,
|
|||
/* Locate the first rule available */
|
||||
if (fs->location == RX_CLS_LOC_ANY)
|
||||
rule_index = find_first_zero_bit(priv->cfp.used,
|
||||
bcm_sf2_cfp_rule_size(priv));
|
||||
priv->num_cfp_rules);
|
||||
else
|
||||
rule_index = fs->location;
|
||||
|
||||
if (rule_index > bcm_sf2_cfp_rule_size(priv))
|
||||
return -ENOSPC;
|
||||
|
||||
layout = &udf_tcpip4_layout;
|
||||
/* We only use one UDF slice for now */
|
||||
slice_num = bcm_sf2_get_slice_number(layout, 0);
|
||||
|
@ -562,19 +565,21 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
|
|||
* first half because the HW search is by incrementing addresses.
|
||||
*/
|
||||
if (fs->location == RX_CLS_LOC_ANY)
|
||||
rule_index[0] = find_first_zero_bit(priv->cfp.used,
|
||||
bcm_sf2_cfp_rule_size(priv));
|
||||
rule_index[1] = find_first_zero_bit(priv->cfp.used,
|
||||
priv->num_cfp_rules);
|
||||
else
|
||||
rule_index[0] = fs->location;
|
||||
rule_index[1] = fs->location;
|
||||
if (rule_index[1] > bcm_sf2_cfp_rule_size(priv))
|
||||
return -ENOSPC;
|
||||
|
||||
/* Flag it as used (cleared on error path) such that we can immediately
|
||||
* obtain a second one to chain from.
|
||||
*/
|
||||
set_bit(rule_index[0], priv->cfp.used);
|
||||
set_bit(rule_index[1], priv->cfp.used);
|
||||
|
||||
rule_index[1] = find_first_zero_bit(priv->cfp.used,
|
||||
bcm_sf2_cfp_rule_size(priv));
|
||||
if (rule_index[1] > bcm_sf2_cfp_rule_size(priv)) {
|
||||
rule_index[0] = find_first_zero_bit(priv->cfp.used,
|
||||
priv->num_cfp_rules);
|
||||
if (rule_index[0] > bcm_sf2_cfp_rule_size(priv)) {
|
||||
ret = -ENOSPC;
|
||||
goto out_err;
|
||||
}
|
||||
|
@ -712,14 +717,14 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
|
|||
/* Flag the second half rule as being used now, return it as the
|
||||
* location, and flag it as unique while dumping rules
|
||||
*/
|
||||
set_bit(rule_index[1], priv->cfp.used);
|
||||
set_bit(rule_index[0], priv->cfp.used);
|
||||
set_bit(rule_index[1], priv->cfp.unique);
|
||||
fs->location = rule_index[1];
|
||||
|
||||
return ret;
|
||||
|
||||
out_err:
|
||||
clear_bit(rule_index[0], priv->cfp.used);
|
||||
clear_bit(rule_index[1], priv->cfp.used);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -785,10 +790,6 @@ static int bcm_sf2_cfp_rule_del_one(struct bcm_sf2_priv *priv, int port,
|
|||
int ret;
|
||||
u32 reg;
|
||||
|
||||
/* Refuse deletion of unused rules, and the default reserved rule */
|
||||
if (!test_bit(loc, priv->cfp.used) || loc == 0)
|
||||
return -EINVAL;
|
||||
|
||||
/* Indicate which rule we want to read */
|
||||
bcm_sf2_cfp_rule_addr_set(priv, loc);
|
||||
|
||||
|
@ -826,6 +827,13 @@ static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port,
|
|||
u32 next_loc = 0;
|
||||
int ret;
|
||||
|
||||
/* Refuse deleting unused rules, and those that are not unique since
|
||||
* that could leave IPv6 rules with one of the chained rule in the
|
||||
* table.
|
||||
*/
|
||||
if (!test_bit(loc, priv->cfp.unique) || loc == 0)
|
||||
return -EINVAL;
|
||||
|
||||
ret = bcm_sf2_cfp_rule_del_one(priv, port, loc, &next_loc);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
|
|
@ -1212,9 +1212,9 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq,
|
|||
vp->mii.reg_num_mask = 0x1f;
|
||||
|
||||
/* Makes sure rings are at least 16 byte aligned. */
|
||||
vp->rx_ring = pci_alloc_consistent(pdev, sizeof(struct boom_rx_desc) * RX_RING_SIZE
|
||||
vp->rx_ring = dma_alloc_coherent(gendev, sizeof(struct boom_rx_desc) * RX_RING_SIZE
|
||||
+ sizeof(struct boom_tx_desc) * TX_RING_SIZE,
|
||||
&vp->rx_ring_dma);
|
||||
&vp->rx_ring_dma, GFP_KERNEL);
|
||||
retval = -ENOMEM;
|
||||
if (!vp->rx_ring)
|
||||
goto free_device;
|
||||
|
@ -1476,11 +1476,10 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq,
|
|||
return 0;
|
||||
|
||||
free_ring:
|
||||
pci_free_consistent(pdev,
|
||||
sizeof(struct boom_rx_desc) * RX_RING_SIZE
|
||||
+ sizeof(struct boom_tx_desc) * TX_RING_SIZE,
|
||||
vp->rx_ring,
|
||||
vp->rx_ring_dma);
|
||||
dma_free_coherent(&pdev->dev,
|
||||
sizeof(struct boom_rx_desc) * RX_RING_SIZE +
|
||||
sizeof(struct boom_tx_desc) * TX_RING_SIZE,
|
||||
vp->rx_ring, vp->rx_ring_dma);
|
||||
free_device:
|
||||
free_netdev(dev);
|
||||
pr_err(PFX "vortex_probe1 fails. Returns %d\n", retval);
|
||||
|
@ -1751,9 +1750,9 @@ vortex_open(struct net_device *dev)
|
|||
break; /* Bad news! */
|
||||
|
||||
skb_reserve(skb, NET_IP_ALIGN); /* Align IP on 16 byte boundaries */
|
||||
dma = pci_map_single(VORTEX_PCI(vp), skb->data,
|
||||
PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
|
||||
if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma))
|
||||
dma = dma_map_single(vp->gendev, skb->data,
|
||||
PKT_BUF_SZ, DMA_FROM_DEVICE);
|
||||
if (dma_mapping_error(vp->gendev, dma))
|
||||
break;
|
||||
vp->rx_ring[i].addr = cpu_to_le32(dma);
|
||||
}
|
||||
|
@ -2067,9 +2066,9 @@ vortex_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
if (vp->bus_master) {
|
||||
/* Set the bus-master controller to transfer the packet. */
|
||||
int len = (skb->len + 3) & ~3;
|
||||
vp->tx_skb_dma = pci_map_single(VORTEX_PCI(vp), skb->data, len,
|
||||
PCI_DMA_TODEVICE);
|
||||
if (dma_mapping_error(&VORTEX_PCI(vp)->dev, vp->tx_skb_dma)) {
|
||||
vp->tx_skb_dma = dma_map_single(vp->gendev, skb->data, len,
|
||||
DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(vp->gendev, vp->tx_skb_dma)) {
|
||||
dev_kfree_skb_any(skb);
|
||||
dev->stats.tx_dropped++;
|
||||
return NETDEV_TX_OK;
|
||||
|
@ -2168,9 +2167,9 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded | AddTCPChksum | AddUDPChksum);
|
||||
|
||||
if (!skb_shinfo(skb)->nr_frags) {
|
||||
dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data, skb->len,
|
||||
PCI_DMA_TODEVICE);
|
||||
if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr))
|
||||
dma_addr = dma_map_single(vp->gendev, skb->data, skb->len,
|
||||
DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(vp->gendev, dma_addr))
|
||||
goto out_dma_err;
|
||||
|
||||
vp->tx_ring[entry].frag[0].addr = cpu_to_le32(dma_addr);
|
||||
|
@ -2178,9 +2177,9 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
} else {
|
||||
int i;
|
||||
|
||||
dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data,
|
||||
skb_headlen(skb), PCI_DMA_TODEVICE);
|
||||
if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr))
|
||||
dma_addr = dma_map_single(vp->gendev, skb->data,
|
||||
skb_headlen(skb), DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(vp->gendev, dma_addr))
|
||||
goto out_dma_err;
|
||||
|
||||
vp->tx_ring[entry].frag[0].addr = cpu_to_le32(dma_addr);
|
||||
|
@ -2189,21 +2188,21 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
|
||||
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
|
||||
|
||||
dma_addr = skb_frag_dma_map(&VORTEX_PCI(vp)->dev, frag,
|
||||
dma_addr = skb_frag_dma_map(vp->gendev, frag,
|
||||
0,
|
||||
frag->size,
|
||||
DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr)) {
|
||||
if (dma_mapping_error(vp->gendev, dma_addr)) {
|
||||
for(i = i-1; i >= 0; i--)
|
||||
dma_unmap_page(&VORTEX_PCI(vp)->dev,
|
||||
dma_unmap_page(vp->gendev,
|
||||
le32_to_cpu(vp->tx_ring[entry].frag[i+1].addr),
|
||||
le32_to_cpu(vp->tx_ring[entry].frag[i+1].length),
|
||||
DMA_TO_DEVICE);
|
||||
|
||||
pci_unmap_single(VORTEX_PCI(vp),
|
||||
dma_unmap_single(vp->gendev,
|
||||
le32_to_cpu(vp->tx_ring[entry].frag[0].addr),
|
||||
le32_to_cpu(vp->tx_ring[entry].frag[0].length),
|
||||
PCI_DMA_TODEVICE);
|
||||
DMA_TO_DEVICE);
|
||||
|
||||
goto out_dma_err;
|
||||
}
|
||||
|
@ -2218,8 +2217,8 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
}
|
||||
}
|
||||
#else
|
||||
dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, PCI_DMA_TODEVICE);
|
||||
if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr))
|
||||
dma_addr = dma_map_single(vp->gendev, skb->data, skb->len, DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(vp->gendev, dma_addr))
|
||||
goto out_dma_err;
|
||||
vp->tx_ring[entry].addr = cpu_to_le32(dma_addr);
|
||||
vp->tx_ring[entry].length = cpu_to_le32(skb->len | LAST_FRAG);
|
||||
|
@ -2254,7 +2253,7 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
out:
|
||||
return NETDEV_TX_OK;
|
||||
out_dma_err:
|
||||
dev_err(&VORTEX_PCI(vp)->dev, "Error mapping dma buffer\n");
|
||||
dev_err(vp->gendev, "Error mapping dma buffer\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -2322,7 +2321,7 @@ vortex_interrupt(int irq, void *dev_id)
|
|||
if (status & DMADone) {
|
||||
if (ioread16(ioaddr + Wn7_MasterStatus) & 0x1000) {
|
||||
iowrite16(0x1000, ioaddr + Wn7_MasterStatus); /* Ack the event. */
|
||||
pci_unmap_single(VORTEX_PCI(vp), vp->tx_skb_dma, (vp->tx_skb->len + 3) & ~3, PCI_DMA_TODEVICE);
|
||||
dma_unmap_single(vp->gendev, vp->tx_skb_dma, (vp->tx_skb->len + 3) & ~3, DMA_TO_DEVICE);
|
||||
pkts_compl++;
|
||||
bytes_compl += vp->tx_skb->len;
|
||||
dev_kfree_skb_irq(vp->tx_skb); /* Release the transferred buffer */
|
||||
|
@ -2459,19 +2458,19 @@ boomerang_interrupt(int irq, void *dev_id)
|
|||
struct sk_buff *skb = vp->tx_skbuff[entry];
|
||||
#if DO_ZEROCOPY
|
||||
int i;
|
||||
pci_unmap_single(VORTEX_PCI(vp),
|
||||
dma_unmap_single(vp->gendev,
|
||||
le32_to_cpu(vp->tx_ring[entry].frag[0].addr),
|
||||
le32_to_cpu(vp->tx_ring[entry].frag[0].length)&0xFFF,
|
||||
PCI_DMA_TODEVICE);
|
||||
DMA_TO_DEVICE);
|
||||
|
||||
for (i=1; i<=skb_shinfo(skb)->nr_frags; i++)
|
||||
pci_unmap_page(VORTEX_PCI(vp),
|
||||
dma_unmap_page(vp->gendev,
|
||||
le32_to_cpu(vp->tx_ring[entry].frag[i].addr),
|
||||
le32_to_cpu(vp->tx_ring[entry].frag[i].length)&0xFFF,
|
||||
PCI_DMA_TODEVICE);
|
||||
DMA_TO_DEVICE);
|
||||
#else
|
||||
pci_unmap_single(VORTEX_PCI(vp),
|
||||
le32_to_cpu(vp->tx_ring[entry].addr), skb->len, PCI_DMA_TODEVICE);
|
||||
dma_unmap_single(vp->gendev,
|
||||
le32_to_cpu(vp->tx_ring[entry].addr), skb->len, DMA_TO_DEVICE);
|
||||
#endif
|
||||
pkts_compl++;
|
||||
bytes_compl += skb->len;
|
||||
|
@ -2561,14 +2560,14 @@ static int vortex_rx(struct net_device *dev)
|
|||
/* 'skb_put()' points to the start of sk_buff data area. */
|
||||
if (vp->bus_master &&
|
||||
! (ioread16(ioaddr + Wn7_MasterStatus) & 0x8000)) {
|
||||
dma_addr_t dma = pci_map_single(VORTEX_PCI(vp), skb_put(skb, pkt_len),
|
||||
pkt_len, PCI_DMA_FROMDEVICE);
|
||||
dma_addr_t dma = dma_map_single(vp->gendev, skb_put(skb, pkt_len),
|
||||
pkt_len, DMA_FROM_DEVICE);
|
||||
iowrite32(dma, ioaddr + Wn7_MasterAddr);
|
||||
iowrite16((skb->len + 3) & ~3, ioaddr + Wn7_MasterLen);
|
||||
iowrite16(StartDMAUp, ioaddr + EL3_CMD);
|
||||
while (ioread16(ioaddr + Wn7_MasterStatus) & 0x8000)
|
||||
;
|
||||
pci_unmap_single(VORTEX_PCI(vp), dma, pkt_len, PCI_DMA_FROMDEVICE);
|
||||
dma_unmap_single(vp->gendev, dma, pkt_len, DMA_FROM_DEVICE);
|
||||
} else {
|
||||
ioread32_rep(ioaddr + RX_FIFO,
|
||||
skb_put(skb, pkt_len),
|
||||
|
@ -2635,11 +2634,11 @@ boomerang_rx(struct net_device *dev)
|
|||
if (pkt_len < rx_copybreak &&
|
||||
(skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
|
||||
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
|
||||
pci_dma_sync_single_for_cpu(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
|
||||
dma_sync_single_for_cpu(vp->gendev, dma, PKT_BUF_SZ, DMA_FROM_DEVICE);
|
||||
/* 'skb_put()' points to the start of sk_buff data area. */
|
||||
skb_put_data(skb, vp->rx_skbuff[entry]->data,
|
||||
pkt_len);
|
||||
pci_dma_sync_single_for_device(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
|
||||
dma_sync_single_for_device(vp->gendev, dma, PKT_BUF_SZ, DMA_FROM_DEVICE);
|
||||
vp->rx_copy++;
|
||||
} else {
|
||||
/* Pre-allocate the replacement skb. If it or its
|
||||
|
@ -2651,9 +2650,9 @@ boomerang_rx(struct net_device *dev)
|
|||
dev->stats.rx_dropped++;
|
||||
goto clear_complete;
|
||||
}
|
||||
newdma = pci_map_single(VORTEX_PCI(vp), newskb->data,
|
||||
PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
|
||||
if (dma_mapping_error(&VORTEX_PCI(vp)->dev, newdma)) {
|
||||
newdma = dma_map_single(vp->gendev, newskb->data,
|
||||
PKT_BUF_SZ, DMA_FROM_DEVICE);
|
||||
if (dma_mapping_error(vp->gendev, newdma)) {
|
||||
dev->stats.rx_dropped++;
|
||||
consume_skb(newskb);
|
||||
goto clear_complete;
|
||||
|
@ -2664,7 +2663,7 @@ boomerang_rx(struct net_device *dev)
|
|||
vp->rx_skbuff[entry] = newskb;
|
||||
vp->rx_ring[entry].addr = cpu_to_le32(newdma);
|
||||
skb_put(skb, pkt_len);
|
||||
pci_unmap_single(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
|
||||
dma_unmap_single(vp->gendev, dma, PKT_BUF_SZ, DMA_FROM_DEVICE);
|
||||
vp->rx_nocopy++;
|
||||
}
|
||||
skb->protocol = eth_type_trans(skb, dev);
|
||||
|
@ -2761,8 +2760,8 @@ vortex_close(struct net_device *dev)
|
|||
if (vp->full_bus_master_rx) { /* Free Boomerang bus master Rx buffers. */
|
||||
for (i = 0; i < RX_RING_SIZE; i++)
|
||||
if (vp->rx_skbuff[i]) {
|
||||
pci_unmap_single( VORTEX_PCI(vp), le32_to_cpu(vp->rx_ring[i].addr),
|
||||
PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
|
||||
dma_unmap_single(vp->gendev, le32_to_cpu(vp->rx_ring[i].addr),
|
||||
PKT_BUF_SZ, DMA_FROM_DEVICE);
|
||||
dev_kfree_skb(vp->rx_skbuff[i]);
|
||||
vp->rx_skbuff[i] = NULL;
|
||||
}
|
||||
|
@ -2775,12 +2774,12 @@ vortex_close(struct net_device *dev)
|
|||
int k;
|
||||
|
||||
for (k=0; k<=skb_shinfo(skb)->nr_frags; k++)
|
||||
pci_unmap_single(VORTEX_PCI(vp),
|
||||
dma_unmap_single(vp->gendev,
|
||||
le32_to_cpu(vp->tx_ring[i].frag[k].addr),
|
||||
le32_to_cpu(vp->tx_ring[i].frag[k].length)&0xFFF,
|
||||
PCI_DMA_TODEVICE);
|
||||
DMA_TO_DEVICE);
|
||||
#else
|
||||
pci_unmap_single(VORTEX_PCI(vp), le32_to_cpu(vp->tx_ring[i].addr), skb->len, PCI_DMA_TODEVICE);
|
||||
dma_unmap_single(vp->gendev, le32_to_cpu(vp->tx_ring[i].addr), skb->len, DMA_TO_DEVICE);
|
||||
#endif
|
||||
dev_kfree_skb(skb);
|
||||
vp->tx_skbuff[i] = NULL;
|
||||
|
@ -3288,11 +3287,10 @@ static void vortex_remove_one(struct pci_dev *pdev)
|
|||
|
||||
pci_iounmap(pdev, vp->ioaddr);
|
||||
|
||||
pci_free_consistent(pdev,
|
||||
sizeof(struct boom_rx_desc) * RX_RING_SIZE
|
||||
+ sizeof(struct boom_tx_desc) * TX_RING_SIZE,
|
||||
vp->rx_ring,
|
||||
vp->rx_ring_dma);
|
||||
dma_free_coherent(&pdev->dev,
|
||||
sizeof(struct boom_rx_desc) * RX_RING_SIZE +
|
||||
sizeof(struct boom_tx_desc) * TX_RING_SIZE,
|
||||
vp->rx_ring, vp->rx_ring_dma);
|
||||
|
||||
pci_release_regions(pdev);
|
||||
|
||||
|
|
|
@ -164,7 +164,9 @@ bad_clone_list[] __initdata = {
|
|||
#define NESM_START_PG 0x40 /* First page of TX buffer */
|
||||
#define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */
|
||||
|
||||
#if defined(CONFIG_ATARI) /* 8-bit mode on Atari, normal on Q40 */
|
||||
#if defined(CONFIG_MACH_TX49XX)
|
||||
# define DCR_VAL 0x48 /* 8-bit mode */
|
||||
#elif defined(CONFIG_ATARI) /* 8-bit mode on Atari, normal on Q40 */
|
||||
# define DCR_VAL (MACH_IS_ATARI ? 0x48 : 0x49)
|
||||
#else
|
||||
# define DCR_VAL 0x49
|
||||
|
|
|
@ -419,15 +419,15 @@ static const u32 t6_up_cim_reg_array[][IREG_NUM_ELEM + 1] = {
|
|||
{0x7b50, 0x7b54, 0x280, 0x20, 0}, /* up_cim_280_to_2fc */
|
||||
{0x7b50, 0x7b54, 0x300, 0x20, 0}, /* up_cim_300_to_37c */
|
||||
{0x7b50, 0x7b54, 0x380, 0x14, 0}, /* up_cim_380_to_3cc */
|
||||
{0x7b50, 0x7b54, 0x2900, 0x4, 0x4}, /* up_cim_2900_to_3d40 */
|
||||
{0x7b50, 0x7b54, 0x2904, 0x4, 0x4}, /* up_cim_2904_to_3d44 */
|
||||
{0x7b50, 0x7b54, 0x2908, 0x4, 0x4}, /* up_cim_2908_to_3d48 */
|
||||
{0x7b50, 0x7b54, 0x2910, 0x4, 0x4}, /* up_cim_2910_to_3d4c */
|
||||
{0x7b50, 0x7b54, 0x2914, 0x4, 0x4}, /* up_cim_2914_to_3d50 */
|
||||
{0x7b50, 0x7b54, 0x2920, 0x10, 0x10}, /* up_cim_2920_to_2a10 */
|
||||
{0x7b50, 0x7b54, 0x2924, 0x10, 0x10}, /* up_cim_2924_to_2a14 */
|
||||
{0x7b50, 0x7b54, 0x2928, 0x10, 0x10}, /* up_cim_2928_to_2a18 */
|
||||
{0x7b50, 0x7b54, 0x292c, 0x10, 0x10}, /* up_cim_292c_to_2a1c */
|
||||
{0x7b50, 0x7b54, 0x4900, 0x4, 0x4}, /* up_cim_4900_to_4c60 */
|
||||
{0x7b50, 0x7b54, 0x4904, 0x4, 0x4}, /* up_cim_4904_to_4c64 */
|
||||
{0x7b50, 0x7b54, 0x4908, 0x4, 0x4}, /* up_cim_4908_to_4c68 */
|
||||
{0x7b50, 0x7b54, 0x4910, 0x4, 0x4}, /* up_cim_4910_to_4c70 */
|
||||
{0x7b50, 0x7b54, 0x4914, 0x4, 0x4}, /* up_cim_4914_to_4c74 */
|
||||
{0x7b50, 0x7b54, 0x4920, 0x10, 0x10}, /* up_cim_4920_to_4a10 */
|
||||
{0x7b50, 0x7b54, 0x4924, 0x10, 0x10}, /* up_cim_4924_to_4a14 */
|
||||
{0x7b50, 0x7b54, 0x4928, 0x10, 0x10}, /* up_cim_4928_to_4a18 */
|
||||
{0x7b50, 0x7b54, 0x492c, 0x10, 0x10}, /* up_cim_492c_to_4a1c */
|
||||
};
|
||||
|
||||
static const u32 t5_up_cim_reg_array[][IREG_NUM_ELEM + 1] = {
|
||||
|
@ -444,16 +444,6 @@ static const u32 t5_up_cim_reg_array[][IREG_NUM_ELEM + 1] = {
|
|||
{0x7b50, 0x7b54, 0x280, 0x20, 0}, /* up_cim_280_to_2fc */
|
||||
{0x7b50, 0x7b54, 0x300, 0x20, 0}, /* up_cim_300_to_37c */
|
||||
{0x7b50, 0x7b54, 0x380, 0x14, 0}, /* up_cim_380_to_3cc */
|
||||
{0x7b50, 0x7b54, 0x2900, 0x4, 0x4}, /* up_cim_2900_to_3d40 */
|
||||
{0x7b50, 0x7b54, 0x2904, 0x4, 0x4}, /* up_cim_2904_to_3d44 */
|
||||
{0x7b50, 0x7b54, 0x2908, 0x4, 0x4}, /* up_cim_2908_to_3d48 */
|
||||
{0x7b50, 0x7b54, 0x2910, 0x4, 0x4}, /* up_cim_2910_to_3d4c */
|
||||
{0x7b50, 0x7b54, 0x2914, 0x4, 0x4}, /* up_cim_2914_to_3d50 */
|
||||
{0x7b50, 0x7b54, 0x2918, 0x4, 0x4}, /* up_cim_2918_to_3d54 */
|
||||
{0x7b50, 0x7b54, 0x291c, 0x4, 0x4}, /* up_cim_291c_to_3d58 */
|
||||
{0x7b50, 0x7b54, 0x2924, 0x10, 0x10}, /* up_cim_2924_to_2914 */
|
||||
{0x7b50, 0x7b54, 0x2928, 0x10, 0x10}, /* up_cim_2928_to_2a18 */
|
||||
{0x7b50, 0x7b54, 0x292c, 0x10, 0x10}, /* up_cim_292c_to_2a1c */
|
||||
};
|
||||
|
||||
static const u32 t6_hma_ireg_array[][IREG_NUM_ELEM] = {
|
||||
|
|
|
@ -836,7 +836,7 @@ bool is_filter_exact_match(struct adapter *adap,
|
|||
{
|
||||
struct tp_params *tp = &adap->params.tp;
|
||||
u64 hash_filter_mask = tp->hash_filter_mask;
|
||||
u32 mask;
|
||||
u64 ntuple_mask = 0;
|
||||
|
||||
if (!is_hashfilter(adap))
|
||||
return false;
|
||||
|
@ -865,73 +865,45 @@ bool is_filter_exact_match(struct adapter *adap,
|
|||
if (!fs->val.fport || fs->mask.fport != 0xffff)
|
||||
return false;
|
||||
|
||||
if (tp->fcoe_shift >= 0) {
|
||||
mask = (hash_filter_mask >> tp->fcoe_shift) & FT_FCOE_W;
|
||||
if (mask && !fs->mask.fcoe)
|
||||
return false;
|
||||
}
|
||||
/* calculate tuple mask and compare with mask configured in hw */
|
||||
if (tp->fcoe_shift >= 0)
|
||||
ntuple_mask |= (u64)fs->mask.fcoe << tp->fcoe_shift;
|
||||
|
||||
if (tp->port_shift >= 0) {
|
||||
mask = (hash_filter_mask >> tp->port_shift) & FT_PORT_W;
|
||||
if (mask && !fs->mask.iport)
|
||||
return false;
|
||||
}
|
||||
if (tp->port_shift >= 0)
|
||||
ntuple_mask |= (u64)fs->mask.iport << tp->port_shift;
|
||||
|
||||
if (tp->vnic_shift >= 0) {
|
||||
mask = (hash_filter_mask >> tp->vnic_shift) & FT_VNIC_ID_W;
|
||||
|
||||
if ((adap->params.tp.ingress_config & VNIC_F)) {
|
||||
if (mask && !fs->mask.pfvf_vld)
|
||||
return false;
|
||||
} else {
|
||||
if (mask && !fs->mask.ovlan_vld)
|
||||
return false;
|
||||
}
|
||||
if ((adap->params.tp.ingress_config & VNIC_F))
|
||||
ntuple_mask |= (u64)fs->mask.pfvf_vld << tp->vnic_shift;
|
||||
else
|
||||
ntuple_mask |= (u64)fs->mask.ovlan_vld <<
|
||||
tp->vnic_shift;
|
||||
}
|
||||
|
||||
if (tp->vlan_shift >= 0) {
|
||||
mask = (hash_filter_mask >> tp->vlan_shift) & FT_VLAN_W;
|
||||
if (mask && !fs->mask.ivlan)
|
||||
return false;
|
||||
}
|
||||
if (tp->vlan_shift >= 0)
|
||||
ntuple_mask |= (u64)fs->mask.ivlan << tp->vlan_shift;
|
||||
|
||||
if (tp->tos_shift >= 0) {
|
||||
mask = (hash_filter_mask >> tp->tos_shift) & FT_TOS_W;
|
||||
if (mask && !fs->mask.tos)
|
||||
return false;
|
||||
}
|
||||
if (tp->tos_shift >= 0)
|
||||
ntuple_mask |= (u64)fs->mask.tos << tp->tos_shift;
|
||||
|
||||
if (tp->protocol_shift >= 0) {
|
||||
mask = (hash_filter_mask >> tp->protocol_shift) & FT_PROTOCOL_W;
|
||||
if (mask && !fs->mask.proto)
|
||||
return false;
|
||||
}
|
||||
if (tp->protocol_shift >= 0)
|
||||
ntuple_mask |= (u64)fs->mask.proto << tp->protocol_shift;
|
||||
|
||||
if (tp->ethertype_shift >= 0) {
|
||||
mask = (hash_filter_mask >> tp->ethertype_shift) &
|
||||
FT_ETHERTYPE_W;
|
||||
if (mask && !fs->mask.ethtype)
|
||||
return false;
|
||||
}
|
||||
if (tp->ethertype_shift >= 0)
|
||||
ntuple_mask |= (u64)fs->mask.ethtype << tp->ethertype_shift;
|
||||
|
||||
if (tp->macmatch_shift >= 0) {
|
||||
mask = (hash_filter_mask >> tp->macmatch_shift) & FT_MACMATCH_W;
|
||||
if (mask && !fs->mask.macidx)
|
||||
return false;
|
||||
}
|
||||
if (tp->macmatch_shift >= 0)
|
||||
ntuple_mask |= (u64)fs->mask.macidx << tp->macmatch_shift;
|
||||
|
||||
if (tp->matchtype_shift >= 0)
|
||||
ntuple_mask |= (u64)fs->mask.matchtype << tp->matchtype_shift;
|
||||
|
||||
if (tp->frag_shift >= 0)
|
||||
ntuple_mask |= (u64)fs->mask.frag << tp->frag_shift;
|
||||
|
||||
if (ntuple_mask != hash_filter_mask)
|
||||
return false;
|
||||
|
||||
if (tp->matchtype_shift >= 0) {
|
||||
mask = (hash_filter_mask >> tp->matchtype_shift) &
|
||||
FT_MPSHITTYPE_W;
|
||||
if (mask && !fs->mask.matchtype)
|
||||
return false;
|
||||
}
|
||||
if (tp->frag_shift >= 0) {
|
||||
mask = (hash_filter_mask >> tp->frag_shift) &
|
||||
FT_FRAGMENTATION_W;
|
||||
if (mask && !fs->mask.frag)
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
|
@ -192,6 +192,7 @@ static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
|
|||
if (adapter->fw_done_rc) {
|
||||
dev_err(dev, "Couldn't map long term buffer,rc = %d\n",
|
||||
adapter->fw_done_rc);
|
||||
dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
|
@ -1821,9 +1822,8 @@ static int do_reset(struct ibmvnic_adapter *adapter,
|
|||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
ibmvnic_disable_irqs(adapter);
|
||||
}
|
||||
|
||||
ibmvnic_disable_irqs(adapter);
|
||||
adapter->state = VNIC_CLOSED;
|
||||
|
||||
if (reset_state == VNIC_CLOSED)
|
||||
|
@ -4586,14 +4586,6 @@ static int ibmvnic_init(struct ibmvnic_adapter *adapter)
|
|||
release_crq_queue(adapter);
|
||||
}
|
||||
|
||||
rc = init_stats_buffers(adapter);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
rc = init_stats_token(adapter);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -4662,13 +4654,21 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
|
|||
goto ibmvnic_init_fail;
|
||||
} while (rc == EAGAIN);
|
||||
|
||||
rc = init_stats_buffers(adapter);
|
||||
if (rc)
|
||||
goto ibmvnic_init_fail;
|
||||
|
||||
rc = init_stats_token(adapter);
|
||||
if (rc)
|
||||
goto ibmvnic_stats_fail;
|
||||
|
||||
netdev->mtu = adapter->req_mtu - ETH_HLEN;
|
||||
netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
|
||||
netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
|
||||
|
||||
rc = device_create_file(&dev->dev, &dev_attr_failover);
|
||||
if (rc)
|
||||
goto ibmvnic_init_fail;
|
||||
goto ibmvnic_dev_file_err;
|
||||
|
||||
netif_carrier_off(netdev);
|
||||
rc = register_netdev(netdev);
|
||||
|
@ -4687,6 +4687,12 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
|
|||
ibmvnic_register_fail:
|
||||
device_remove_file(&dev->dev, &dev_attr_failover);
|
||||
|
||||
ibmvnic_dev_file_err:
|
||||
release_stats_token(adapter);
|
||||
|
||||
ibmvnic_stats_fail:
|
||||
release_stats_buffers(adapter);
|
||||
|
||||
ibmvnic_init_fail:
|
||||
release_sub_crqs(adapter, 1);
|
||||
release_crq_queue(adapter);
|
||||
|
|
|
@ -2929,6 +2929,7 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
|
|||
mlx4_err(dev, "Failed to create file for port %d\n", port);
|
||||
devlink_port_unregister(&info->devlink_port);
|
||||
info->port = -1;
|
||||
return err;
|
||||
}
|
||||
|
||||
sprintf(info->dev_mtu_name, "mlx4_port%d_mtu", port);
|
||||
|
@ -2950,9 +2951,10 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
|
|||
&info->port_attr);
|
||||
devlink_port_unregister(&info->devlink_port);
|
||||
info->port = -1;
|
||||
return err;
|
||||
}
|
||||
|
||||
return err;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void mlx4_cleanup_port_info(struct mlx4_port_info *info)
|
||||
|
|
|
@ -335,7 +335,7 @@ static int nfp_bpf_parse_capabilities(struct nfp_app *app)
|
|||
return PTR_ERR(mem) == -ENOENT ? 0 : PTR_ERR(mem);
|
||||
|
||||
start = mem;
|
||||
while (mem - start + 8 < nfp_cpp_area_size(area)) {
|
||||
while (mem - start + 8 <= nfp_cpp_area_size(area)) {
|
||||
u8 __iomem *value;
|
||||
u32 type, length;
|
||||
|
||||
|
|
|
@ -292,6 +292,7 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
|
|||
struct qed_ll2_tx_packet *p_pkt = NULL;
|
||||
struct qed_ll2_info *p_ll2_conn;
|
||||
struct qed_ll2_tx_queue *p_tx;
|
||||
unsigned long flags = 0;
|
||||
dma_addr_t tx_frag;
|
||||
|
||||
p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
|
||||
|
@ -300,6 +301,7 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
|
|||
|
||||
p_tx = &p_ll2_conn->tx_queue;
|
||||
|
||||
spin_lock_irqsave(&p_tx->lock, flags);
|
||||
while (!list_empty(&p_tx->active_descq)) {
|
||||
p_pkt = list_first_entry(&p_tx->active_descq,
|
||||
struct qed_ll2_tx_packet, list_entry);
|
||||
|
@ -309,6 +311,7 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
|
|||
list_del(&p_pkt->list_entry);
|
||||
b_last_packet = list_empty(&p_tx->active_descq);
|
||||
list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
|
||||
spin_unlock_irqrestore(&p_tx->lock, flags);
|
||||
if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) {
|
||||
struct qed_ooo_buffer *p_buffer;
|
||||
|
||||
|
@ -328,7 +331,9 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
|
|||
b_last_frag,
|
||||
b_last_packet);
|
||||
}
|
||||
spin_lock_irqsave(&p_tx->lock, flags);
|
||||
}
|
||||
spin_unlock_irqrestore(&p_tx->lock, flags);
|
||||
}
|
||||
|
||||
static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
|
||||
|
@ -556,6 +561,7 @@ static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
|
|||
struct qed_ll2_info *p_ll2_conn = NULL;
|
||||
struct qed_ll2_rx_packet *p_pkt = NULL;
|
||||
struct qed_ll2_rx_queue *p_rx;
|
||||
unsigned long flags = 0;
|
||||
|
||||
p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
|
||||
if (!p_ll2_conn)
|
||||
|
@ -563,13 +569,14 @@ static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
|
|||
|
||||
p_rx = &p_ll2_conn->rx_queue;
|
||||
|
||||
spin_lock_irqsave(&p_rx->lock, flags);
|
||||
while (!list_empty(&p_rx->active_descq)) {
|
||||
p_pkt = list_first_entry(&p_rx->active_descq,
|
||||
struct qed_ll2_rx_packet, list_entry);
|
||||
if (!p_pkt)
|
||||
break;
|
||||
|
||||
list_move_tail(&p_pkt->list_entry, &p_rx->free_descq);
|
||||
spin_unlock_irqrestore(&p_rx->lock, flags);
|
||||
|
||||
if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) {
|
||||
struct qed_ooo_buffer *p_buffer;
|
||||
|
@ -588,7 +595,30 @@ static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
|
|||
cookie,
|
||||
rx_buf_addr, b_last);
|
||||
}
|
||||
spin_lock_irqsave(&p_rx->lock, flags);
|
||||
}
|
||||
spin_unlock_irqrestore(&p_rx->lock, flags);
|
||||
}
|
||||
|
||||
static bool
|
||||
qed_ll2_lb_rxq_handler_slowpath(struct qed_hwfn *p_hwfn,
|
||||
struct core_rx_slow_path_cqe *p_cqe)
|
||||
{
|
||||
struct ooo_opaque *iscsi_ooo;
|
||||
u32 cid;
|
||||
|
||||
if (p_cqe->ramrod_cmd_id != CORE_RAMROD_RX_QUEUE_FLUSH)
|
||||
return false;
|
||||
|
||||
iscsi_ooo = (struct ooo_opaque *)&p_cqe->opaque_data;
|
||||
if (iscsi_ooo->ooo_opcode != TCP_EVENT_DELETE_ISLES)
|
||||
return false;
|
||||
|
||||
/* Need to make a flush */
|
||||
cid = le32_to_cpu(iscsi_ooo->cid);
|
||||
qed_ooo_release_connection_isles(p_hwfn, p_hwfn->p_ooo_info, cid);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn,
|
||||
|
@ -617,6 +647,11 @@ static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn,
|
|||
cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
|
||||
cqe_type = cqe->rx_cqe_sp.type;
|
||||
|
||||
if (cqe_type == CORE_RX_CQE_TYPE_SLOW_PATH)
|
||||
if (qed_ll2_lb_rxq_handler_slowpath(p_hwfn,
|
||||
&cqe->rx_cqe_sp))
|
||||
continue;
|
||||
|
||||
if (cqe_type != CORE_RX_CQE_TYPE_REGULAR) {
|
||||
DP_NOTICE(p_hwfn,
|
||||
"Got a non-regular LB LL2 completion [type 0x%02x]\n",
|
||||
|
@ -794,6 +829,9 @@ static int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
|
|||
struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie;
|
||||
int rc;
|
||||
|
||||
if (!QED_LL2_RX_REGISTERED(p_ll2_conn))
|
||||
return 0;
|
||||
|
||||
rc = qed_ll2_lb_rxq_handler(p_hwfn, p_ll2_conn);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
@ -814,6 +852,9 @@ static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
|
|||
u16 new_idx = 0, num_bds = 0;
|
||||
int rc;
|
||||
|
||||
if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
|
||||
return 0;
|
||||
|
||||
new_idx = le16_to_cpu(*p_tx->p_fw_cons);
|
||||
num_bds = ((s16)new_idx - (s16)p_tx->bds_idx);
|
||||
|
||||
|
@ -1867,17 +1908,25 @@ int qed_ll2_terminate_connection(void *cxt, u8 connection_handle)
|
|||
|
||||
/* Stop Tx & Rx of connection, if needed */
|
||||
if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
|
||||
p_ll2_conn->tx_queue.b_cb_registred = false;
|
||||
smp_wmb(); /* Make sure this is seen by ll2_lb_rxq_completion */
|
||||
rc = qed_sp_ll2_tx_queue_stop(p_hwfn, p_ll2_conn);
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
qed_ll2_txq_flush(p_hwfn, connection_handle);
|
||||
qed_int_unregister_cb(p_hwfn, p_ll2_conn->tx_queue.tx_sb_index);
|
||||
}
|
||||
|
||||
if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
|
||||
p_ll2_conn->rx_queue.b_cb_registred = false;
|
||||
smp_wmb(); /* Make sure this is seen by ll2_lb_rxq_completion */
|
||||
rc = qed_sp_ll2_rx_queue_stop(p_hwfn, p_ll2_conn);
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
qed_ll2_rxq_flush(p_hwfn, connection_handle);
|
||||
qed_int_unregister_cb(p_hwfn, p_ll2_conn->rx_queue.rx_sb_index);
|
||||
}
|
||||
|
||||
if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO)
|
||||
|
@ -1925,16 +1974,6 @@ void qed_ll2_release_connection(void *cxt, u8 connection_handle)
|
|||
if (!p_ll2_conn)
|
||||
return;
|
||||
|
||||
if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
|
||||
p_ll2_conn->rx_queue.b_cb_registred = false;
|
||||
qed_int_unregister_cb(p_hwfn, p_ll2_conn->rx_queue.rx_sb_index);
|
||||
}
|
||||
|
||||
if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
|
||||
p_ll2_conn->tx_queue.b_cb_registred = false;
|
||||
qed_int_unregister_cb(p_hwfn, p_ll2_conn->tx_queue.tx_sb_index);
|
||||
}
|
||||
|
||||
kfree(p_ll2_conn->tx_queue.descq_mem);
|
||||
qed_chain_free(p_hwfn->cdev, &p_ll2_conn->tx_queue.txq_chain);
|
||||
|
||||
|
|
|
@ -1066,13 +1066,12 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
|
|||
|
||||
DP_INFO(edev, "Starting qede_remove\n");
|
||||
|
||||
qede_rdma_dev_remove(edev);
|
||||
unregister_netdev(ndev);
|
||||
cancel_delayed_work_sync(&edev->sp_task);
|
||||
|
||||
qede_ptp_disable(edev);
|
||||
|
||||
qede_rdma_dev_remove(edev);
|
||||
|
||||
edev->ops->common->set_power_state(cdev, PCI_D0);
|
||||
|
||||
pci_set_drvdata(pdev, NULL);
|
||||
|
|
|
@ -163,7 +163,7 @@ enum {
|
|||
};
|
||||
|
||||
/* Driver's parameters */
|
||||
#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
|
||||
#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_RENESAS)
|
||||
#define SH_ETH_RX_ALIGN 32
|
||||
#else
|
||||
#define SH_ETH_RX_ALIGN 2
|
||||
|
|
|
@ -792,8 +792,10 @@ static int ipvlan_device_event(struct notifier_block *unused,
|
|||
break;
|
||||
|
||||
case NETDEV_CHANGEADDR:
|
||||
list_for_each_entry(ipvlan, &port->ipvlans, pnode)
|
||||
list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
|
||||
ether_addr_copy(ipvlan->dev->dev_addr, dev->dev_addr);
|
||||
call_netdevice_notifiers(NETDEV_CHANGEADDR, ipvlan->dev);
|
||||
}
|
||||
break;
|
||||
|
||||
case NETDEV_PRE_TYPE_CHANGE:
|
||||
|
|
|
@ -573,9 +573,40 @@ static int ksz9031_config_init(struct phy_device *phydev)
|
|||
ksz9031_of_load_skew_values(phydev, of_node,
|
||||
MII_KSZ9031RN_TX_DATA_PAD_SKEW, 4,
|
||||
tx_data_skews, 4);
|
||||
|
||||
/* Silicon Errata Sheet (DS80000691D or DS80000692D):
|
||||
* When the device links in the 1000BASE-T slave mode only,
|
||||
* the optional 125MHz reference output clock (CLK125_NDO)
|
||||
* has wide duty cycle variation.
|
||||
*
|
||||
* The optional CLK125_NDO clock does not meet the RGMII
|
||||
* 45/55 percent (min/max) duty cycle requirement and therefore
|
||||
* cannot be used directly by the MAC side for clocking
|
||||
* applications that have setup/hold time requirements on
|
||||
* rising and falling clock edges.
|
||||
*
|
||||
* Workaround:
|
||||
* Force the phy to be the master to receive a stable clock
|
||||
* which meets the duty cycle requirement.
|
||||
*/
|
||||
if (of_property_read_bool(of_node, "micrel,force-master")) {
|
||||
result = phy_read(phydev, MII_CTRL1000);
|
||||
if (result < 0)
|
||||
goto err_force_master;
|
||||
|
||||
/* enable master mode, config & prefer master */
|
||||
result |= CTL1000_ENABLE_MASTER | CTL1000_AS_MASTER;
|
||||
result = phy_write(phydev, MII_CTRL1000, result);
|
||||
if (result < 0)
|
||||
goto err_force_master;
|
||||
}
|
||||
}
|
||||
|
||||
return ksz9031_center_flp_timing(phydev);
|
||||
|
||||
err_force_master:
|
||||
phydev_err(phydev, "failed to force the phy to master mode\n");
|
||||
return result;
|
||||
}
|
||||
|
||||
#define KSZ8873MLL_GLOBAL_CONTROL_4 0x06
|
||||
|
|
|
@ -681,15 +681,6 @@ static void tun_queue_purge(struct tun_file *tfile)
|
|||
skb_queue_purge(&tfile->sk.sk_error_queue);
|
||||
}
|
||||
|
||||
static void tun_cleanup_tx_ring(struct tun_file *tfile)
|
||||
{
|
||||
if (tfile->tx_ring.queue) {
|
||||
ptr_ring_cleanup(&tfile->tx_ring, tun_ptr_free);
|
||||
xdp_rxq_info_unreg(&tfile->xdp_rxq);
|
||||
memset(&tfile->tx_ring, 0, sizeof(tfile->tx_ring));
|
||||
}
|
||||
}
|
||||
|
||||
static void __tun_detach(struct tun_file *tfile, bool clean)
|
||||
{
|
||||
struct tun_file *ntfile;
|
||||
|
@ -736,7 +727,9 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
|
|||
tun->dev->reg_state == NETREG_REGISTERED)
|
||||
unregister_netdevice(tun->dev);
|
||||
}
|
||||
tun_cleanup_tx_ring(tfile);
|
||||
if (tun)
|
||||
xdp_rxq_info_unreg(&tfile->xdp_rxq);
|
||||
ptr_ring_cleanup(&tfile->tx_ring, tun_ptr_free);
|
||||
sock_put(&tfile->sk);
|
||||
}
|
||||
}
|
||||
|
@ -783,14 +776,14 @@ static void tun_detach_all(struct net_device *dev)
|
|||
tun_napi_del(tun, tfile);
|
||||
/* Drop read queue */
|
||||
tun_queue_purge(tfile);
|
||||
xdp_rxq_info_unreg(&tfile->xdp_rxq);
|
||||
sock_put(&tfile->sk);
|
||||
tun_cleanup_tx_ring(tfile);
|
||||
}
|
||||
list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) {
|
||||
tun_enable_queue(tfile);
|
||||
tun_queue_purge(tfile);
|
||||
xdp_rxq_info_unreg(&tfile->xdp_rxq);
|
||||
sock_put(&tfile->sk);
|
||||
tun_cleanup_tx_ring(tfile);
|
||||
}
|
||||
BUG_ON(tun->numdisabled != 0);
|
||||
|
||||
|
@ -834,7 +827,8 @@ static int tun_attach(struct tun_struct *tun, struct file *file,
|
|||
}
|
||||
|
||||
if (!tfile->detached &&
|
||||
ptr_ring_init(&tfile->tx_ring, dev->tx_queue_len, GFP_KERNEL)) {
|
||||
ptr_ring_resize(&tfile->tx_ring, dev->tx_queue_len,
|
||||
GFP_KERNEL, tun_ptr_free)) {
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
@ -3219,6 +3213,11 @@ static int tun_chr_open(struct inode *inode, struct file * file)
|
|||
&tun_proto, 0);
|
||||
if (!tfile)
|
||||
return -ENOMEM;
|
||||
if (ptr_ring_init(&tfile->tx_ring, 0, GFP_KERNEL)) {
|
||||
sk_free(&tfile->sk);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
RCU_INIT_POINTER(tfile->tun, NULL);
|
||||
tfile->flags = 0;
|
||||
tfile->ifindex = 0;
|
||||
|
@ -3239,8 +3238,6 @@ static int tun_chr_open(struct inode *inode, struct file * file)
|
|||
|
||||
sock_set_flag(&tfile->sk, SOCK_ZEROCOPY);
|
||||
|
||||
memset(&tfile->tx_ring, 0, sizeof(tfile->tx_ring));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -369,6 +369,11 @@ vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue *tq,
|
|||
|
||||
gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
|
||||
while (VMXNET3_TCD_GET_GEN(&gdesc->tcd) == tq->comp_ring.gen) {
|
||||
/* Prevent any &gdesc->tcd field from being (speculatively)
|
||||
* read before (&gdesc->tcd)->gen is read.
|
||||
*/
|
||||
dma_rmb();
|
||||
|
||||
completed += vmxnet3_unmap_pkt(VMXNET3_TCD_GET_TXIDX(
|
||||
&gdesc->tcd), tq, adapter->pdev,
|
||||
adapter);
|
||||
|
@ -1103,6 +1108,11 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
|
|||
gdesc->txd.tci = skb_vlan_tag_get(skb);
|
||||
}
|
||||
|
||||
/* Ensure that the write to (&gdesc->txd)->gen will be observed after
|
||||
* all other writes to &gdesc->txd.
|
||||
*/
|
||||
dma_wmb();
|
||||
|
||||
/* finally flips the GEN bit of the SOP desc. */
|
||||
gdesc->dword[2] = cpu_to_le32(le32_to_cpu(gdesc->dword[2]) ^
|
||||
VMXNET3_TXD_GEN);
|
||||
|
@ -1298,6 +1308,12 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
|
|||
*/
|
||||
break;
|
||||
}
|
||||
|
||||
/* Prevent any rcd field from being (speculatively) read before
|
||||
* rcd->gen is read.
|
||||
*/
|
||||
dma_rmb();
|
||||
|
||||
BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2 &&
|
||||
rcd->rqID != rq->dataRingQid);
|
||||
idx = rcd->rxdIdx;
|
||||
|
@ -1528,6 +1544,12 @@ rcd_done:
|
|||
ring->next2comp = idx;
|
||||
num_to_alloc = vmxnet3_cmd_ring_desc_avail(ring);
|
||||
ring = rq->rx_ring + ring_idx;
|
||||
|
||||
/* Ensure that the writes to rxd->gen bits will be observed
|
||||
* after all other writes to rxd objects.
|
||||
*/
|
||||
dma_wmb();
|
||||
|
||||
while (num_to_alloc) {
|
||||
vmxnet3_getRxDesc(rxd, &ring->base[ring->next2fill].rxd,
|
||||
&rxCmdDesc);
|
||||
|
@ -2688,7 +2710,7 @@ vmxnet3_set_mac_addr(struct net_device *netdev, void *p)
|
|||
/* ==================== initialization and cleanup routines ============ */
|
||||
|
||||
static int
|
||||
vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64)
|
||||
vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter)
|
||||
{
|
||||
int err;
|
||||
unsigned long mmio_start, mmio_len;
|
||||
|
@ -2700,30 +2722,12 @@ vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64)
|
|||
return err;
|
||||
}
|
||||
|
||||
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
|
||||
if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
|
||||
dev_err(&pdev->dev,
|
||||
"pci_set_consistent_dma_mask failed\n");
|
||||
err = -EIO;
|
||||
goto err_set_mask;
|
||||
}
|
||||
*dma64 = true;
|
||||
} else {
|
||||
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
|
||||
dev_err(&pdev->dev,
|
||||
"pci_set_dma_mask failed\n");
|
||||
err = -EIO;
|
||||
goto err_set_mask;
|
||||
}
|
||||
*dma64 = false;
|
||||
}
|
||||
|
||||
err = pci_request_selected_regions(pdev, (1 << 2) - 1,
|
||||
vmxnet3_driver_name);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev,
|
||||
"Failed to request region for adapter: error %d\n", err);
|
||||
goto err_set_mask;
|
||||
goto err_enable_device;
|
||||
}
|
||||
|
||||
pci_set_master(pdev);
|
||||
|
@ -2751,7 +2755,7 @@ err_bar1:
|
|||
iounmap(adapter->hw_addr0);
|
||||
err_ioremap:
|
||||
pci_release_selected_regions(pdev, (1 << 2) - 1);
|
||||
err_set_mask:
|
||||
err_enable_device:
|
||||
pci_disable_device(pdev);
|
||||
return err;
|
||||
}
|
||||
|
@ -3254,7 +3258,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
|
|||
#endif
|
||||
};
|
||||
int err;
|
||||
bool dma64 = false; /* stupid gcc */
|
||||
bool dma64;
|
||||
u32 ver;
|
||||
struct net_device *netdev;
|
||||
struct vmxnet3_adapter *adapter;
|
||||
|
@ -3300,6 +3304,24 @@ vmxnet3_probe_device(struct pci_dev *pdev,
|
|||
adapter->rx_ring_size = VMXNET3_DEF_RX_RING_SIZE;
|
||||
adapter->rx_ring2_size = VMXNET3_DEF_RX_RING2_SIZE;
|
||||
|
||||
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
|
||||
if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
|
||||
dev_err(&pdev->dev,
|
||||
"pci_set_consistent_dma_mask failed\n");
|
||||
err = -EIO;
|
||||
goto err_set_mask;
|
||||
}
|
||||
dma64 = true;
|
||||
} else {
|
||||
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
|
||||
dev_err(&pdev->dev,
|
||||
"pci_set_dma_mask failed\n");
|
||||
err = -EIO;
|
||||
goto err_set_mask;
|
||||
}
|
||||
dma64 = false;
|
||||
}
|
||||
|
||||
spin_lock_init(&adapter->cmd_lock);
|
||||
adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter,
|
||||
sizeof(struct vmxnet3_adapter),
|
||||
|
@ -3307,7 +3329,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
|
|||
if (dma_mapping_error(&adapter->pdev->dev, adapter->adapter_pa)) {
|
||||
dev_err(&pdev->dev, "Failed to map dma\n");
|
||||
err = -EFAULT;
|
||||
goto err_dma_map;
|
||||
goto err_set_mask;
|
||||
}
|
||||
adapter->shared = dma_alloc_coherent(
|
||||
&adapter->pdev->dev,
|
||||
|
@ -3358,7 +3380,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
|
|||
}
|
||||
#endif /* VMXNET3_RSS */
|
||||
|
||||
err = vmxnet3_alloc_pci_resources(adapter, &dma64);
|
||||
err = vmxnet3_alloc_pci_resources(adapter);
|
||||
if (err < 0)
|
||||
goto err_alloc_pci;
|
||||
|
||||
|
@ -3504,7 +3526,7 @@ err_alloc_queue_desc:
|
|||
err_alloc_shared:
|
||||
dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa,
|
||||
sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE);
|
||||
err_dma_map:
|
||||
err_set_mask:
|
||||
free_netdev(netdev);
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -69,10 +69,12 @@
|
|||
/*
|
||||
* Version numbers
|
||||
*/
|
||||
#define VMXNET3_DRIVER_VERSION_STRING "1.4.14.0-k"
|
||||
#define VMXNET3_DRIVER_VERSION_STRING "1.4.16.0-k"
|
||||
|
||||
/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
|
||||
#define VMXNET3_DRIVER_VERSION_NUM 0x01040e00
|
||||
/* Each byte of this 32-bit integer encodes a version number in
|
||||
* VMXNET3_DRIVER_VERSION_STRING.
|
||||
*/
|
||||
#define VMXNET3_DRIVER_VERSION_NUM 0x01041000
|
||||
|
||||
#if defined(CONFIG_PCI_MSI)
|
||||
/* RSS only makes sense if MSI-X is supported. */
|
||||
|
|
|
@ -1286,17 +1286,7 @@ enum {
|
|||
static inline const struct cpumask *
|
||||
mlx5_get_vector_affinity_hint(struct mlx5_core_dev *dev, int vector)
|
||||
{
|
||||
struct irq_desc *desc;
|
||||
unsigned int irq;
|
||||
int eqn;
|
||||
int err;
|
||||
|
||||
err = mlx5_vector2eqn(dev, vector, &eqn, &irq);
|
||||
if (err)
|
||||
return NULL;
|
||||
|
||||
desc = irq_to_desc(irq);
|
||||
return desc->affinity_hint;
|
||||
return dev->priv.irq_info[vector].mask;
|
||||
}
|
||||
|
||||
#endif /* MLX5_DRIVER_H */
|
||||
|
|
|
@ -170,6 +170,7 @@ struct nft_data_desc {
|
|||
int nft_data_init(const struct nft_ctx *ctx,
|
||||
struct nft_data *data, unsigned int size,
|
||||
struct nft_data_desc *desc, const struct nlattr *nla);
|
||||
void nft_data_hold(const struct nft_data *data, enum nft_data_types type);
|
||||
void nft_data_release(const struct nft_data *data, enum nft_data_types type);
|
||||
int nft_data_dump(struct sk_buff *skb, int attr, const struct nft_data *data,
|
||||
enum nft_data_types type, unsigned int len);
|
||||
|
@ -736,6 +737,10 @@ struct nft_expr_ops {
|
|||
int (*init)(const struct nft_ctx *ctx,
|
||||
const struct nft_expr *expr,
|
||||
const struct nlattr * const tb[]);
|
||||
void (*activate)(const struct nft_ctx *ctx,
|
||||
const struct nft_expr *expr);
|
||||
void (*deactivate)(const struct nft_ctx *ctx,
|
||||
const struct nft_expr *expr);
|
||||
void (*destroy)(const struct nft_ctx *ctx,
|
||||
const struct nft_expr *expr);
|
||||
int (*dump)(struct sk_buff *skb,
|
||||
|
|
|
@ -97,6 +97,9 @@ struct tls_sw_context {
|
|||
u8 control;
|
||||
bool decrypted;
|
||||
|
||||
char rx_aad_ciphertext[TLS_AAD_SPACE_SIZE];
|
||||
char rx_aad_plaintext[TLS_AAD_SPACE_SIZE];
|
||||
|
||||
/* Sending context */
|
||||
char aad_space[TLS_AAD_SPACE_SIZE];
|
||||
|
||||
|
|
|
@ -46,6 +46,9 @@ enum tcp_conntrack {
|
|||
/* Marks possibility for expected RFC5961 challenge ACK */
|
||||
#define IP_CT_EXP_CHALLENGE_ACK 0x40
|
||||
|
||||
/* Simultaneous open initialized */
|
||||
#define IP_CT_TCP_SIMULTANEOUS_OPEN 0x80
|
||||
|
||||
struct nf_ct_tcp_flags {
|
||||
__u8 flags;
|
||||
__u8 mask;
|
||||
|
|
|
@ -218,47 +218,84 @@ int bpf_prog_calc_tag(struct bpf_prog *fp)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void bpf_adj_branches(struct bpf_prog *prog, u32 pos, u32 delta)
|
||||
static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, u32 delta,
|
||||
u32 curr, const bool probe_pass)
|
||||
{
|
||||
const s64 imm_min = S32_MIN, imm_max = S32_MAX;
|
||||
s64 imm = insn->imm;
|
||||
|
||||
if (curr < pos && curr + imm + 1 > pos)
|
||||
imm += delta;
|
||||
else if (curr > pos + delta && curr + imm + 1 <= pos + delta)
|
||||
imm -= delta;
|
||||
if (imm < imm_min || imm > imm_max)
|
||||
return -ERANGE;
|
||||
if (!probe_pass)
|
||||
insn->imm = imm;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, u32 delta,
|
||||
u32 curr, const bool probe_pass)
|
||||
{
|
||||
const s32 off_min = S16_MIN, off_max = S16_MAX;
|
||||
s32 off = insn->off;
|
||||
|
||||
if (curr < pos && curr + off + 1 > pos)
|
||||
off += delta;
|
||||
else if (curr > pos + delta && curr + off + 1 <= pos + delta)
|
||||
off -= delta;
|
||||
if (off < off_min || off > off_max)
|
||||
return -ERANGE;
|
||||
if (!probe_pass)
|
||||
insn->off = off;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, u32 delta,
|
||||
const bool probe_pass)
|
||||
{
|
||||
u32 i, insn_cnt = prog->len + (probe_pass ? delta : 0);
|
||||
struct bpf_insn *insn = prog->insnsi;
|
||||
u32 i, insn_cnt = prog->len;
|
||||
bool pseudo_call;
|
||||
u8 code;
|
||||
int off;
|
||||
int ret = 0;
|
||||
|
||||
for (i = 0; i < insn_cnt; i++, insn++) {
|
||||
code = insn->code;
|
||||
if (BPF_CLASS(code) != BPF_JMP)
|
||||
continue;
|
||||
if (BPF_OP(code) == BPF_EXIT)
|
||||
continue;
|
||||
if (BPF_OP(code) == BPF_CALL) {
|
||||
if (insn->src_reg == BPF_PSEUDO_CALL)
|
||||
pseudo_call = true;
|
||||
else
|
||||
continue;
|
||||
} else {
|
||||
pseudo_call = false;
|
||||
u8 code;
|
||||
|
||||
/* In the probing pass we still operate on the original,
|
||||
* unpatched image in order to check overflows before we
|
||||
* do any other adjustments. Therefore skip the patchlet.
|
||||
*/
|
||||
if (probe_pass && i == pos) {
|
||||
i += delta + 1;
|
||||
insn++;
|
||||
}
|
||||
off = pseudo_call ? insn->imm : insn->off;
|
||||
|
||||
/* Adjust offset of jmps if we cross boundaries. */
|
||||
if (i < pos && i + off + 1 > pos)
|
||||
off += delta;
|
||||
else if (i > pos + delta && i + off + 1 <= pos + delta)
|
||||
off -= delta;
|
||||
|
||||
if (pseudo_call)
|
||||
insn->imm = off;
|
||||
else
|
||||
insn->off = off;
|
||||
code = insn->code;
|
||||
if (BPF_CLASS(code) != BPF_JMP ||
|
||||
BPF_OP(code) == BPF_EXIT)
|
||||
continue;
|
||||
/* Adjust offset of jmps if we cross patch boundaries. */
|
||||
if (BPF_OP(code) == BPF_CALL) {
|
||||
if (insn->src_reg != BPF_PSEUDO_CALL)
|
||||
continue;
|
||||
ret = bpf_adj_delta_to_imm(insn, pos, delta, i,
|
||||
probe_pass);
|
||||
} else {
|
||||
ret = bpf_adj_delta_to_off(insn, pos, delta, i,
|
||||
probe_pass);
|
||||
}
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
|
||||
const struct bpf_insn *patch, u32 len)
|
||||
{
|
||||
u32 insn_adj_cnt, insn_rest, insn_delta = len - 1;
|
||||
const u32 cnt_max = S16_MAX;
|
||||
struct bpf_prog *prog_adj;
|
||||
|
||||
/* Since our patchlet doesn't expand the image, we're done. */
|
||||
|
@ -269,6 +306,15 @@ struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
|
|||
|
||||
insn_adj_cnt = prog->len + insn_delta;
|
||||
|
||||
/* Reject anything that would potentially let the insn->off
|
||||
* target overflow when we have excessive program expansions.
|
||||
* We need to probe here before we do any reallocation where
|
||||
* we afterwards may not fail anymore.
|
||||
*/
|
||||
if (insn_adj_cnt > cnt_max &&
|
||||
bpf_adj_branches(prog, off, insn_delta, true))
|
||||
return NULL;
|
||||
|
||||
/* Several new instructions need to be inserted. Make room
|
||||
* for them. Likely, there's no need for a new allocation as
|
||||
* last page could have large enough tailroom.
|
||||
|
@ -294,7 +340,11 @@ struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
|
|||
sizeof(*patch) * insn_rest);
|
||||
memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len);
|
||||
|
||||
bpf_adj_branches(prog_adj, off, insn_delta);
|
||||
/* We are guaranteed to not fail at this point, otherwise
|
||||
* the ship has sailed to reverse to the original state. An
|
||||
* overflow cannot happen at this point.
|
||||
*/
|
||||
BUG_ON(bpf_adj_branches(prog_adj, off, insn_delta, false));
|
||||
|
||||
return prog_adj;
|
||||
}
|
||||
|
|
|
@ -1703,11 +1703,11 @@ static int sock_map_ctx_update_elem(struct bpf_sock_ops_kern *skops,
|
|||
* we increment the refcnt. If this is the case abort with an
|
||||
* error.
|
||||
*/
|
||||
verdict = bpf_prog_inc_not_zero(stab->bpf_verdict);
|
||||
verdict = bpf_prog_inc_not_zero(verdict);
|
||||
if (IS_ERR(verdict))
|
||||
return PTR_ERR(verdict);
|
||||
|
||||
parse = bpf_prog_inc_not_zero(stab->bpf_parse);
|
||||
parse = bpf_prog_inc_not_zero(parse);
|
||||
if (IS_ERR(parse)) {
|
||||
bpf_prog_put(verdict);
|
||||
return PTR_ERR(parse);
|
||||
|
@ -1715,12 +1715,12 @@ static int sock_map_ctx_update_elem(struct bpf_sock_ops_kern *skops,
|
|||
}
|
||||
|
||||
if (tx_msg) {
|
||||
tx_msg = bpf_prog_inc_not_zero(stab->bpf_tx_msg);
|
||||
tx_msg = bpf_prog_inc_not_zero(tx_msg);
|
||||
if (IS_ERR(tx_msg)) {
|
||||
if (verdict)
|
||||
bpf_prog_put(verdict);
|
||||
if (parse)
|
||||
if (parse && verdict) {
|
||||
bpf_prog_put(parse);
|
||||
bpf_prog_put(verdict);
|
||||
}
|
||||
return PTR_ERR(tx_msg);
|
||||
}
|
||||
}
|
||||
|
@ -1805,10 +1805,10 @@ static int sock_map_ctx_update_elem(struct bpf_sock_ops_kern *skops,
|
|||
out_free:
|
||||
smap_release_sock(psock, sock);
|
||||
out_progs:
|
||||
if (verdict)
|
||||
bpf_prog_put(verdict);
|
||||
if (parse)
|
||||
if (parse && verdict) {
|
||||
bpf_prog_put(parse);
|
||||
bpf_prog_put(verdict);
|
||||
}
|
||||
if (tx_msg)
|
||||
bpf_prog_put(tx_msg);
|
||||
write_unlock_bh(&sock->sk_callback_lock);
|
||||
|
|
|
@ -161,8 +161,8 @@ static int ebt_stp_mt_check(const struct xt_mtchk_param *par)
|
|||
/* Make sure the match only receives stp frames */
|
||||
if (!par->nft_compat &&
|
||||
(!ether_addr_equal(e->destmac, eth_stp_addr) ||
|
||||
!is_broadcast_ether_addr(e->destmsk) ||
|
||||
!(e->bitmask & EBT_DESTMAC)))
|
||||
!(e->bitmask & EBT_DESTMAC) ||
|
||||
!is_broadcast_ether_addr(e->destmsk)))
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -2124,7 +2124,7 @@ static bool remove_xps_queue_cpu(struct net_device *dev,
|
|||
int i, j;
|
||||
|
||||
for (i = count, j = offset; i--; j++) {
|
||||
if (!remove_xps_queue(dev_maps, cpu, j))
|
||||
if (!remove_xps_queue(dev_maps, tci, j))
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
|
@ -481,11 +481,18 @@ do_pass:
|
|||
|
||||
#define BPF_EMIT_JMP \
|
||||
do { \
|
||||
const s32 off_min = S16_MIN, off_max = S16_MAX; \
|
||||
s32 off; \
|
||||
\
|
||||
if (target >= len || target < 0) \
|
||||
goto err; \
|
||||
insn->off = addrs ? addrs[target] - addrs[i] - 1 : 0; \
|
||||
off = addrs ? addrs[target] - addrs[i] - 1 : 0; \
|
||||
/* Adjust pc relative offset for 2nd or 3rd insn. */ \
|
||||
insn->off -= insn - tmp_insns; \
|
||||
off -= insn - tmp_insns; \
|
||||
/* Reject anything not fitting into insn->off. */ \
|
||||
if (off < off_min || off > off_max) \
|
||||
goto err; \
|
||||
insn->off = off; \
|
||||
} while (0)
|
||||
|
||||
case BPF_JMP | BPF_JA:
|
||||
|
|
|
@ -1606,7 +1606,7 @@ static void __sk_free(struct sock *sk)
|
|||
if (likely(sk->sk_net_refcnt))
|
||||
sock_inuse_add(sock_net(sk), -1);
|
||||
|
||||
if (unlikely(sock_diag_has_destroy_listeners(sk) && sk->sk_net_refcnt))
|
||||
if (unlikely(sk->sk_net_refcnt && sock_diag_has_destroy_listeners(sk)))
|
||||
sock_diag_broadcast_destroy(sk);
|
||||
else
|
||||
sk_destruct(sk);
|
||||
|
|
|
@ -258,11 +258,13 @@ static void dsa_tree_teardown_default_cpu(struct dsa_switch_tree *dst)
|
|||
static int dsa_port_setup(struct dsa_port *dp)
|
||||
{
|
||||
struct dsa_switch *ds = dp->ds;
|
||||
int err;
|
||||
int err = 0;
|
||||
|
||||
memset(&dp->devlink_port, 0, sizeof(dp->devlink_port));
|
||||
|
||||
err = devlink_port_register(ds->devlink, &dp->devlink_port, dp->index);
|
||||
if (dp->type != DSA_PORT_TYPE_UNUSED)
|
||||
err = devlink_port_register(ds->devlink, &dp->devlink_port,
|
||||
dp->index);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
|
@ -293,7 +295,8 @@ static int dsa_port_setup(struct dsa_port *dp)
|
|||
|
||||
static void dsa_port_teardown(struct dsa_port *dp)
|
||||
{
|
||||
devlink_port_unregister(&dp->devlink_port);
|
||||
if (dp->type != DSA_PORT_TYPE_UNUSED)
|
||||
devlink_port_unregister(&dp->devlink_port);
|
||||
|
||||
switch (dp->type) {
|
||||
case DSA_PORT_TYPE_UNUSED:
|
||||
|
|
|
@ -326,10 +326,11 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
|
|||
u8 tos, int oif, struct net_device *dev,
|
||||
int rpf, struct in_device *idev, u32 *itag)
|
||||
{
|
||||
struct net *net = dev_net(dev);
|
||||
struct flow_keys flkeys;
|
||||
int ret, no_addr;
|
||||
struct fib_result res;
|
||||
struct flowi4 fl4;
|
||||
struct net *net = dev_net(dev);
|
||||
bool dev_match;
|
||||
|
||||
fl4.flowi4_oif = 0;
|
||||
|
@ -347,6 +348,11 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
|
|||
no_addr = idev->ifa_list == NULL;
|
||||
|
||||
fl4.flowi4_mark = IN_DEV_SRC_VMARK(idev) ? skb->mark : 0;
|
||||
if (!fib4_rules_early_flow_dissect(net, skb, &fl4, &flkeys)) {
|
||||
fl4.flowi4_proto = 0;
|
||||
fl4.fl4_sport = 0;
|
||||
fl4.fl4_dport = 0;
|
||||
}
|
||||
|
||||
trace_fib_validate_source(dev, &fl4);
|
||||
|
||||
|
|
|
@ -722,10 +722,12 @@ static netdev_tx_t erspan_xmit(struct sk_buff *skb,
|
|||
erspan_build_header(skb, ntohl(tunnel->parms.o_key),
|
||||
tunnel->index,
|
||||
truncate, true);
|
||||
else
|
||||
else if (tunnel->erspan_ver == 2)
|
||||
erspan_build_header_v2(skb, ntohl(tunnel->parms.o_key),
|
||||
tunnel->dir, tunnel->hwid,
|
||||
truncate, true);
|
||||
else
|
||||
goto free_skb;
|
||||
|
||||
tunnel->parms.o_flags &= ~TUNNEL_KEY;
|
||||
__gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_ERSPAN));
|
||||
|
|
|
@ -1045,7 +1045,8 @@ alloc_new_skb:
|
|||
if (copy > length)
|
||||
copy = length;
|
||||
|
||||
if (!(rt->dst.dev->features&NETIF_F_SG)) {
|
||||
if (!(rt->dst.dev->features&NETIF_F_SG) &&
|
||||
skb_tailroom(skb) >= copy) {
|
||||
unsigned int off;
|
||||
|
||||
off = skb->len;
|
||||
|
|
|
@ -34,6 +34,7 @@
|
|||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
|
||||
MODULE_DESCRIPTION("IPv4 packet filter");
|
||||
MODULE_ALIAS("ipt_icmp");
|
||||
|
||||
void *ipt_alloc_initial_table(const struct xt_table *info)
|
||||
{
|
||||
|
|
|
@ -89,10 +89,10 @@ static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
|
|||
return true ^ invert;
|
||||
}
|
||||
|
||||
memset(&flow, 0, sizeof(flow));
|
||||
flow.flowi4_iif = LOOPBACK_IFINDEX;
|
||||
flow.daddr = iph->saddr;
|
||||
flow.saddr = rpfilter_get_saddr(iph->daddr);
|
||||
flow.flowi4_oif = 0;
|
||||
flow.flowi4_mark = info->flags & XT_RPFILTER_VALID_MARK ? skb->mark : 0;
|
||||
flow.flowi4_tos = RT_TOS(iph->tos);
|
||||
flow.flowi4_scope = RT_SCOPE_UNIVERSE;
|
||||
|
|
|
@ -1961,8 +1961,13 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
|
|||
fl4.saddr = saddr;
|
||||
fl4.flowi4_uid = sock_net_uid(net, NULL);
|
||||
|
||||
if (fib4_rules_early_flow_dissect(net, skb, &fl4, &_flkeys))
|
||||
if (fib4_rules_early_flow_dissect(net, skb, &fl4, &_flkeys)) {
|
||||
flkeys = &_flkeys;
|
||||
} else {
|
||||
fl4.flowi4_proto = 0;
|
||||
fl4.fl4_sport = 0;
|
||||
fl4.fl4_dport = 0;
|
||||
}
|
||||
|
||||
err = fib_lookup(net, &fl4, res, 0);
|
||||
if (err != 0) {
|
||||
|
|
|
@ -2833,8 +2833,10 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
|
|||
return -EBUSY;
|
||||
|
||||
if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) {
|
||||
if (before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
|
||||
BUG();
|
||||
if (unlikely(before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))) {
|
||||
WARN_ON_ONCE(1);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq))
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
@ -3342,6 +3344,7 @@ static void tcp_connect_init(struct sock *sk)
|
|||
sock_reset_flag(sk, SOCK_DONE);
|
||||
tp->snd_wnd = 0;
|
||||
tcp_init_wl(tp, 0);
|
||||
tcp_write_queue_purge(sk);
|
||||
tp->snd_una = tp->write_seq;
|
||||
tp->snd_sml = tp->write_seq;
|
||||
tp->snd_up = tp->write_seq;
|
||||
|
|
|
@ -71,6 +71,7 @@ struct ip6gre_net {
|
|||
struct ip6_tnl __rcu *tunnels[4][IP6_GRE_HASH_SIZE];
|
||||
|
||||
struct ip6_tnl __rcu *collect_md_tun;
|
||||
struct ip6_tnl __rcu *collect_md_tun_erspan;
|
||||
struct net_device *fb_tunnel_dev;
|
||||
};
|
||||
|
||||
|
@ -81,6 +82,7 @@ static int ip6gre_tunnel_init(struct net_device *dev);
|
|||
static void ip6gre_tunnel_setup(struct net_device *dev);
|
||||
static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t);
|
||||
static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu);
|
||||
static void ip6erspan_tnl_link_config(struct ip6_tnl *t, int set_mtu);
|
||||
|
||||
/* Tunnel hash table */
|
||||
|
||||
|
@ -232,7 +234,12 @@ static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev,
|
|||
if (cand)
|
||||
return cand;
|
||||
|
||||
t = rcu_dereference(ign->collect_md_tun);
|
||||
if (gre_proto == htons(ETH_P_ERSPAN) ||
|
||||
gre_proto == htons(ETH_P_ERSPAN2))
|
||||
t = rcu_dereference(ign->collect_md_tun_erspan);
|
||||
else
|
||||
t = rcu_dereference(ign->collect_md_tun);
|
||||
|
||||
if (t && t->dev->flags & IFF_UP)
|
||||
return t;
|
||||
|
||||
|
@ -261,6 +268,31 @@ static struct ip6_tnl __rcu **__ip6gre_bucket(struct ip6gre_net *ign,
|
|||
return &ign->tunnels[prio][h];
|
||||
}
|
||||
|
||||
static void ip6gre_tunnel_link_md(struct ip6gre_net *ign, struct ip6_tnl *t)
|
||||
{
|
||||
if (t->parms.collect_md)
|
||||
rcu_assign_pointer(ign->collect_md_tun, t);
|
||||
}
|
||||
|
||||
static void ip6erspan_tunnel_link_md(struct ip6gre_net *ign, struct ip6_tnl *t)
|
||||
{
|
||||
if (t->parms.collect_md)
|
||||
rcu_assign_pointer(ign->collect_md_tun_erspan, t);
|
||||
}
|
||||
|
||||
static void ip6gre_tunnel_unlink_md(struct ip6gre_net *ign, struct ip6_tnl *t)
|
||||
{
|
||||
if (t->parms.collect_md)
|
||||
rcu_assign_pointer(ign->collect_md_tun, NULL);
|
||||
}
|
||||
|
||||
static void ip6erspan_tunnel_unlink_md(struct ip6gre_net *ign,
|
||||
struct ip6_tnl *t)
|
||||
{
|
||||
if (t->parms.collect_md)
|
||||
rcu_assign_pointer(ign->collect_md_tun_erspan, NULL);
|
||||
}
|
||||
|
||||
static inline struct ip6_tnl __rcu **ip6gre_bucket(struct ip6gre_net *ign,
|
||||
const struct ip6_tnl *t)
|
||||
{
|
||||
|
@ -271,9 +303,6 @@ static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t)
|
|||
{
|
||||
struct ip6_tnl __rcu **tp = ip6gre_bucket(ign, t);
|
||||
|
||||
if (t->parms.collect_md)
|
||||
rcu_assign_pointer(ign->collect_md_tun, t);
|
||||
|
||||
rcu_assign_pointer(t->next, rtnl_dereference(*tp));
|
||||
rcu_assign_pointer(*tp, t);
|
||||
}
|
||||
|
@ -283,9 +312,6 @@ static void ip6gre_tunnel_unlink(struct ip6gre_net *ign, struct ip6_tnl *t)
|
|||
struct ip6_tnl __rcu **tp;
|
||||
struct ip6_tnl *iter;
|
||||
|
||||
if (t->parms.collect_md)
|
||||
rcu_assign_pointer(ign->collect_md_tun, NULL);
|
||||
|
||||
for (tp = ip6gre_bucket(ign, t);
|
||||
(iter = rtnl_dereference(*tp)) != NULL;
|
||||
tp = &iter->next) {
|
||||
|
@ -374,11 +400,23 @@ failed_free:
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static void ip6erspan_tunnel_uninit(struct net_device *dev)
|
||||
{
|
||||
struct ip6_tnl *t = netdev_priv(dev);
|
||||
struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id);
|
||||
|
||||
ip6erspan_tunnel_unlink_md(ign, t);
|
||||
ip6gre_tunnel_unlink(ign, t);
|
||||
dst_cache_reset(&t->dst_cache);
|
||||
dev_put(dev);
|
||||
}
|
||||
|
||||
static void ip6gre_tunnel_uninit(struct net_device *dev)
|
||||
{
|
||||
struct ip6_tnl *t = netdev_priv(dev);
|
||||
struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id);
|
||||
|
||||
ip6gre_tunnel_unlink_md(ign, t);
|
||||
ip6gre_tunnel_unlink(ign, t);
|
||||
dst_cache_reset(&t->dst_cache);
|
||||
dev_put(dev);
|
||||
|
@ -698,6 +736,9 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
|
|||
else
|
||||
fl6->daddr = tunnel->parms.raddr;
|
||||
|
||||
if (skb_cow_head(skb, dev->needed_headroom ?: tunnel->hlen))
|
||||
return -ENOMEM;
|
||||
|
||||
/* Push GRE header. */
|
||||
protocol = (dev->type == ARPHRD_ETHER) ? htons(ETH_P_TEB) : proto;
|
||||
|
||||
|
@ -908,7 +949,7 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
|
|||
truncate = true;
|
||||
}
|
||||
|
||||
if (skb_cow_head(skb, dev->needed_headroom))
|
||||
if (skb_cow_head(skb, dev->needed_headroom ?: t->hlen))
|
||||
goto tx_err;
|
||||
|
||||
t->parms.o_flags &= ~TUNNEL_KEY;
|
||||
|
@ -979,11 +1020,14 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
|
|||
erspan_build_header(skb, ntohl(t->parms.o_key),
|
||||
t->parms.index,
|
||||
truncate, false);
|
||||
else
|
||||
else if (t->parms.erspan_ver == 2)
|
||||
erspan_build_header_v2(skb, ntohl(t->parms.o_key),
|
||||
t->parms.dir,
|
||||
t->parms.hwid,
|
||||
truncate, false);
|
||||
else
|
||||
goto tx_err;
|
||||
|
||||
fl6.daddr = t->parms.raddr;
|
||||
}
|
||||
|
||||
|
@ -1019,12 +1063,11 @@ tx_err:
|
|||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu)
|
||||
static void ip6gre_tnl_link_config_common(struct ip6_tnl *t)
|
||||
{
|
||||
struct net_device *dev = t->dev;
|
||||
struct __ip6_tnl_parm *p = &t->parms;
|
||||
struct flowi6 *fl6 = &t->fl.u.ip6;
|
||||
int t_hlen;
|
||||
|
||||
if (dev->type != ARPHRD_ETHER) {
|
||||
memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr));
|
||||
|
@ -1051,12 +1094,13 @@ static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu)
|
|||
dev->flags |= IFF_POINTOPOINT;
|
||||
else
|
||||
dev->flags &= ~IFF_POINTOPOINT;
|
||||
}
|
||||
|
||||
t->tun_hlen = gre_calc_hlen(t->parms.o_flags);
|
||||
|
||||
t->hlen = t->encap_hlen + t->tun_hlen;
|
||||
|
||||
t_hlen = t->hlen + sizeof(struct ipv6hdr);
|
||||
static void ip6gre_tnl_link_config_route(struct ip6_tnl *t, int set_mtu,
|
||||
int t_hlen)
|
||||
{
|
||||
const struct __ip6_tnl_parm *p = &t->parms;
|
||||
struct net_device *dev = t->dev;
|
||||
|
||||
if (p->flags & IP6_TNL_F_CAP_XMIT) {
|
||||
int strict = (ipv6_addr_type(&p->raddr) &
|
||||
|
@ -1088,8 +1132,26 @@ static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu)
|
|||
}
|
||||
}
|
||||
|
||||
static int ip6gre_tnl_change(struct ip6_tnl *t,
|
||||
const struct __ip6_tnl_parm *p, int set_mtu)
|
||||
static int ip6gre_calc_hlen(struct ip6_tnl *tunnel)
|
||||
{
|
||||
int t_hlen;
|
||||
|
||||
tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
|
||||
tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
|
||||
|
||||
t_hlen = tunnel->hlen + sizeof(struct ipv6hdr);
|
||||
tunnel->dev->hard_header_len = LL_MAX_HEADER + t_hlen;
|
||||
return t_hlen;
|
||||
}
|
||||
|
||||
static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu)
|
||||
{
|
||||
ip6gre_tnl_link_config_common(t);
|
||||
ip6gre_tnl_link_config_route(t, set_mtu, ip6gre_calc_hlen(t));
|
||||
}
|
||||
|
||||
static void ip6gre_tnl_copy_tnl_parm(struct ip6_tnl *t,
|
||||
const struct __ip6_tnl_parm *p)
|
||||
{
|
||||
t->parms.laddr = p->laddr;
|
||||
t->parms.raddr = p->raddr;
|
||||
|
@ -1105,6 +1167,12 @@ static int ip6gre_tnl_change(struct ip6_tnl *t,
|
|||
t->parms.o_flags = p->o_flags;
|
||||
t->parms.fwmark = p->fwmark;
|
||||
dst_cache_reset(&t->dst_cache);
|
||||
}
|
||||
|
||||
static int ip6gre_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p,
|
||||
int set_mtu)
|
||||
{
|
||||
ip6gre_tnl_copy_tnl_parm(t, p);
|
||||
ip6gre_tnl_link_config(t, set_mtu);
|
||||
return 0;
|
||||
}
|
||||
|
@ -1381,11 +1449,7 @@ static int ip6gre_tunnel_init_common(struct net_device *dev)
|
|||
return ret;
|
||||
}
|
||||
|
||||
tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
|
||||
tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
|
||||
t_hlen = tunnel->hlen + sizeof(struct ipv6hdr);
|
||||
|
||||
dev->hard_header_len = LL_MAX_HEADER + t_hlen;
|
||||
t_hlen = ip6gre_calc_hlen(tunnel);
|
||||
dev->mtu = ETH_DATA_LEN - t_hlen;
|
||||
if (dev->type == ARPHRD_ETHER)
|
||||
dev->mtu -= ETH_HLEN;
|
||||
|
@ -1728,6 +1792,19 @@ static const struct net_device_ops ip6gre_tap_netdev_ops = {
|
|||
.ndo_get_iflink = ip6_tnl_get_iflink,
|
||||
};
|
||||
|
||||
static int ip6erspan_calc_hlen(struct ip6_tnl *tunnel)
|
||||
{
|
||||
int t_hlen;
|
||||
|
||||
tunnel->tun_hlen = 8;
|
||||
tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen +
|
||||
erspan_hdr_len(tunnel->parms.erspan_ver);
|
||||
|
||||
t_hlen = tunnel->hlen + sizeof(struct ipv6hdr);
|
||||
tunnel->dev->hard_header_len = LL_MAX_HEADER + t_hlen;
|
||||
return t_hlen;
|
||||
}
|
||||
|
||||
static int ip6erspan_tap_init(struct net_device *dev)
|
||||
{
|
||||
struct ip6_tnl *tunnel;
|
||||
|
@ -1751,12 +1828,7 @@ static int ip6erspan_tap_init(struct net_device *dev)
|
|||
return ret;
|
||||
}
|
||||
|
||||
tunnel->tun_hlen = 8;
|
||||
tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen +
|
||||
erspan_hdr_len(tunnel->parms.erspan_ver);
|
||||
t_hlen = tunnel->hlen + sizeof(struct ipv6hdr);
|
||||
|
||||
dev->hard_header_len = LL_MAX_HEADER + t_hlen;
|
||||
t_hlen = ip6erspan_calc_hlen(tunnel);
|
||||
dev->mtu = ETH_DATA_LEN - t_hlen;
|
||||
if (dev->type == ARPHRD_ETHER)
|
||||
dev->mtu -= ETH_HLEN;
|
||||
|
@ -1764,14 +1836,14 @@ static int ip6erspan_tap_init(struct net_device *dev)
|
|||
dev->mtu -= 8;
|
||||
|
||||
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
|
||||
ip6gre_tnl_link_config(tunnel, 1);
|
||||
ip6erspan_tnl_link_config(tunnel, 1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct net_device_ops ip6erspan_netdev_ops = {
|
||||
.ndo_init = ip6erspan_tap_init,
|
||||
.ndo_uninit = ip6gre_tunnel_uninit,
|
||||
.ndo_uninit = ip6erspan_tunnel_uninit,
|
||||
.ndo_start_xmit = ip6erspan_tunnel_xmit,
|
||||
.ndo_set_mac_address = eth_mac_addr,
|
||||
.ndo_validate_addr = eth_validate_addr,
|
||||
|
@ -1835,13 +1907,11 @@ static bool ip6gre_netlink_encap_parms(struct nlattr *data[],
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int ip6gre_newlink(struct net *src_net, struct net_device *dev,
|
||||
struct nlattr *tb[], struct nlattr *data[],
|
||||
struct netlink_ext_ack *extack)
|
||||
static int ip6gre_newlink_common(struct net *src_net, struct net_device *dev,
|
||||
struct nlattr *tb[], struct nlattr *data[],
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct ip6_tnl *nt;
|
||||
struct net *net = dev_net(dev);
|
||||
struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
|
||||
struct ip_tunnel_encap ipencap;
|
||||
int err;
|
||||
|
||||
|
@ -1854,16 +1924,6 @@ static int ip6gre_newlink(struct net *src_net, struct net_device *dev,
|
|||
return err;
|
||||
}
|
||||
|
||||
ip6gre_netlink_parms(data, &nt->parms);
|
||||
|
||||
if (nt->parms.collect_md) {
|
||||
if (rtnl_dereference(ign->collect_md_tun))
|
||||
return -EEXIST;
|
||||
} else {
|
||||
if (ip6gre_tunnel_find(net, &nt->parms, dev->type))
|
||||
return -EEXIST;
|
||||
}
|
||||
|
||||
if (dev->type == ARPHRD_ETHER && !tb[IFLA_ADDRESS])
|
||||
eth_hw_addr_random(dev);
|
||||
|
||||
|
@ -1874,51 +1934,94 @@ static int ip6gre_newlink(struct net *src_net, struct net_device *dev,
|
|||
if (err)
|
||||
goto out;
|
||||
|
||||
ip6gre_tnl_link_config(nt, !tb[IFLA_MTU]);
|
||||
|
||||
if (tb[IFLA_MTU])
|
||||
ip6_tnl_change_mtu(dev, nla_get_u32(tb[IFLA_MTU]));
|
||||
|
||||
dev_hold(dev);
|
||||
ip6gre_tunnel_link(ign, nt);
|
||||
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int ip6gre_newlink(struct net *src_net, struct net_device *dev,
|
||||
struct nlattr *tb[], struct nlattr *data[],
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct ip6_tnl *nt = netdev_priv(dev);
|
||||
struct net *net = dev_net(dev);
|
||||
struct ip6gre_net *ign;
|
||||
int err;
|
||||
|
||||
ip6gre_netlink_parms(data, &nt->parms);
|
||||
ign = net_generic(net, ip6gre_net_id);
|
||||
|
||||
if (nt->parms.collect_md) {
|
||||
if (rtnl_dereference(ign->collect_md_tun))
|
||||
return -EEXIST;
|
||||
} else {
|
||||
if (ip6gre_tunnel_find(net, &nt->parms, dev->type))
|
||||
return -EEXIST;
|
||||
}
|
||||
|
||||
err = ip6gre_newlink_common(src_net, dev, tb, data, extack);
|
||||
if (!err) {
|
||||
ip6gre_tnl_link_config(nt, !tb[IFLA_MTU]);
|
||||
ip6gre_tunnel_link_md(ign, nt);
|
||||
ip6gre_tunnel_link(net_generic(net, ip6gre_net_id), nt);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
static struct ip6_tnl *
|
||||
ip6gre_changelink_common(struct net_device *dev, struct nlattr *tb[],
|
||||
struct nlattr *data[], struct __ip6_tnl_parm *p_p,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct ip6_tnl *t, *nt = netdev_priv(dev);
|
||||
struct net *net = nt->net;
|
||||
struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
|
||||
struct ip_tunnel_encap ipencap;
|
||||
|
||||
if (dev == ign->fb_tunnel_dev)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
if (ip6gre_netlink_encap_parms(data, &ipencap)) {
|
||||
int err = ip6_tnl_encap_setup(nt, &ipencap);
|
||||
|
||||
if (err < 0)
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
ip6gre_netlink_parms(data, p_p);
|
||||
|
||||
t = ip6gre_tunnel_locate(net, p_p, 0);
|
||||
|
||||
if (t) {
|
||||
if (t->dev != dev)
|
||||
return ERR_PTR(-EEXIST);
|
||||
} else {
|
||||
t = nt;
|
||||
}
|
||||
|
||||
return t;
|
||||
}
|
||||
|
||||
static int ip6gre_changelink(struct net_device *dev, struct nlattr *tb[],
|
||||
struct nlattr *data[],
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct ip6_tnl *t, *nt = netdev_priv(dev);
|
||||
struct net *net = nt->net;
|
||||
struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
|
||||
struct ip6gre_net *ign = net_generic(dev_net(dev), ip6gre_net_id);
|
||||
struct __ip6_tnl_parm p;
|
||||
struct ip_tunnel_encap ipencap;
|
||||
struct ip6_tnl *t;
|
||||
|
||||
if (dev == ign->fb_tunnel_dev)
|
||||
return -EINVAL;
|
||||
|
||||
if (ip6gre_netlink_encap_parms(data, &ipencap)) {
|
||||
int err = ip6_tnl_encap_setup(nt, &ipencap);
|
||||
|
||||
if (err < 0)
|
||||
return err;
|
||||
}
|
||||
|
||||
ip6gre_netlink_parms(data, &p);
|
||||
|
||||
t = ip6gre_tunnel_locate(net, &p, 0);
|
||||
|
||||
if (t) {
|
||||
if (t->dev != dev)
|
||||
return -EEXIST;
|
||||
} else {
|
||||
t = nt;
|
||||
}
|
||||
t = ip6gre_changelink_common(dev, tb, data, &p, extack);
|
||||
if (IS_ERR(t))
|
||||
return PTR_ERR(t);
|
||||
|
||||
ip6gre_tunnel_unlink_md(ign, t);
|
||||
ip6gre_tunnel_unlink(ign, t);
|
||||
ip6gre_tnl_change(t, &p, !tb[IFLA_MTU]);
|
||||
ip6gre_tunnel_link_md(ign, t);
|
||||
ip6gre_tunnel_link(ign, t);
|
||||
return 0;
|
||||
}
|
||||
|
@ -2068,6 +2171,69 @@ static void ip6erspan_tap_setup(struct net_device *dev)
|
|||
netif_keep_dst(dev);
|
||||
}
|
||||
|
||||
static int ip6erspan_newlink(struct net *src_net, struct net_device *dev,
|
||||
struct nlattr *tb[], struct nlattr *data[],
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct ip6_tnl *nt = netdev_priv(dev);
|
||||
struct net *net = dev_net(dev);
|
||||
struct ip6gre_net *ign;
|
||||
int err;
|
||||
|
||||
ip6gre_netlink_parms(data, &nt->parms);
|
||||
ign = net_generic(net, ip6gre_net_id);
|
||||
|
||||
if (nt->parms.collect_md) {
|
||||
if (rtnl_dereference(ign->collect_md_tun_erspan))
|
||||
return -EEXIST;
|
||||
} else {
|
||||
if (ip6gre_tunnel_find(net, &nt->parms, dev->type))
|
||||
return -EEXIST;
|
||||
}
|
||||
|
||||
err = ip6gre_newlink_common(src_net, dev, tb, data, extack);
|
||||
if (!err) {
|
||||
ip6erspan_tnl_link_config(nt, !tb[IFLA_MTU]);
|
||||
ip6erspan_tunnel_link_md(ign, nt);
|
||||
ip6gre_tunnel_link(net_generic(net, ip6gre_net_id), nt);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
static void ip6erspan_tnl_link_config(struct ip6_tnl *t, int set_mtu)
|
||||
{
|
||||
ip6gre_tnl_link_config_common(t);
|
||||
ip6gre_tnl_link_config_route(t, set_mtu, ip6erspan_calc_hlen(t));
|
||||
}
|
||||
|
||||
static int ip6erspan_tnl_change(struct ip6_tnl *t,
|
||||
const struct __ip6_tnl_parm *p, int set_mtu)
|
||||
{
|
||||
ip6gre_tnl_copy_tnl_parm(t, p);
|
||||
ip6erspan_tnl_link_config(t, set_mtu);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ip6erspan_changelink(struct net_device *dev, struct nlattr *tb[],
|
||||
struct nlattr *data[],
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct ip6gre_net *ign = net_generic(dev_net(dev), ip6gre_net_id);
|
||||
struct __ip6_tnl_parm p;
|
||||
struct ip6_tnl *t;
|
||||
|
||||
t = ip6gre_changelink_common(dev, tb, data, &p, extack);
|
||||
if (IS_ERR(t))
|
||||
return PTR_ERR(t);
|
||||
|
||||
ip6gre_tunnel_unlink_md(ign, t);
|
||||
ip6gre_tunnel_unlink(ign, t);
|
||||
ip6erspan_tnl_change(t, &p, !tb[IFLA_MTU]);
|
||||
ip6erspan_tunnel_link_md(ign, t);
|
||||
ip6gre_tunnel_link(ign, t);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
|
||||
.kind = "ip6gre",
|
||||
.maxtype = IFLA_GRE_MAX,
|
||||
|
@ -2104,8 +2270,8 @@ static struct rtnl_link_ops ip6erspan_tap_ops __read_mostly = {
|
|||
.priv_size = sizeof(struct ip6_tnl),
|
||||
.setup = ip6erspan_tap_setup,
|
||||
.validate = ip6erspan_tap_validate,
|
||||
.newlink = ip6gre_newlink,
|
||||
.changelink = ip6gre_changelink,
|
||||
.newlink = ip6erspan_newlink,
|
||||
.changelink = ip6erspan_changelink,
|
||||
.get_size = ip6gre_get_size,
|
||||
.fill_info = ip6gre_fill_info,
|
||||
.get_link_net = ip6_tnl_get_link_net,
|
||||
|
|
|
@ -1503,7 +1503,8 @@ alloc_new_skb:
|
|||
if (copy > length)
|
||||
copy = length;
|
||||
|
||||
if (!(rt->dst.dev->features&NETIF_F_SG)) {
|
||||
if (!(rt->dst.dev->features&NETIF_F_SG) &&
|
||||
skb_tailroom(skb) >= copy) {
|
||||
unsigned int off;
|
||||
|
||||
off = skb->len;
|
||||
|
|
|
@ -38,6 +38,7 @@
|
|||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
|
||||
MODULE_DESCRIPTION("IPv6 packet filter");
|
||||
MODULE_ALIAS("ip6t_icmp6");
|
||||
|
||||
void *ip6t_alloc_initial_table(const struct xt_table *info)
|
||||
{
|
||||
|
|
|
@ -585,7 +585,8 @@ void (*nf_nat_decode_session_hook)(struct sk_buff *, struct flowi *);
|
|||
EXPORT_SYMBOL(nf_nat_decode_session_hook);
|
||||
#endif
|
||||
|
||||
static void __net_init __netfilter_net_init(struct nf_hook_entries **e, int max)
|
||||
static void __net_init
|
||||
__netfilter_net_init(struct nf_hook_entries __rcu **e, int max)
|
||||
{
|
||||
int h;
|
||||
|
||||
|
|
|
@ -232,7 +232,10 @@ static inline int ip_vs_conn_unhash(struct ip_vs_conn *cp)
|
|||
static inline bool ip_vs_conn_unlink(struct ip_vs_conn *cp)
|
||||
{
|
||||
unsigned int hash;
|
||||
bool ret;
|
||||
bool ret = false;
|
||||
|
||||
if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
|
||||
return refcount_dec_if_one(&cp->refcnt);
|
||||
|
||||
hash = ip_vs_conn_hashkey_conn(cp);
|
||||
|
||||
|
@ -240,15 +243,13 @@ static inline bool ip_vs_conn_unlink(struct ip_vs_conn *cp)
|
|||
spin_lock(&cp->lock);
|
||||
|
||||
if (cp->flags & IP_VS_CONN_F_HASHED) {
|
||||
ret = false;
|
||||
/* Decrease refcnt and unlink conn only if we are last user */
|
||||
if (refcount_dec_if_one(&cp->refcnt)) {
|
||||
hlist_del_rcu(&cp->c_list);
|
||||
cp->flags &= ~IP_VS_CONN_F_HASHED;
|
||||
ret = true;
|
||||
}
|
||||
} else
|
||||
ret = refcount_read(&cp->refcnt) ? false : true;
|
||||
}
|
||||
|
||||
spin_unlock(&cp->lock);
|
||||
ct_write_unlock_bh(hash);
|
||||
|
@ -454,12 +455,6 @@ ip_vs_conn_out_get_proto(struct netns_ipvs *ipvs, int af,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(ip_vs_conn_out_get_proto);
|
||||
|
||||
static void __ip_vs_conn_put_notimer(struct ip_vs_conn *cp)
|
||||
{
|
||||
__ip_vs_conn_put(cp);
|
||||
ip_vs_conn_expire(&cp->timer);
|
||||
}
|
||||
|
||||
/*
|
||||
* Put back the conn and restart its timer with its timeout
|
||||
*/
|
||||
|
@ -478,7 +473,7 @@ void ip_vs_conn_put(struct ip_vs_conn *cp)
|
|||
(refcount_read(&cp->refcnt) == 1) &&
|
||||
!timer_pending(&cp->timer))
|
||||
/* expire connection immediately */
|
||||
__ip_vs_conn_put_notimer(cp);
|
||||
ip_vs_conn_expire(&cp->timer);
|
||||
else
|
||||
__ip_vs_conn_put_timer(cp);
|
||||
}
|
||||
|
|
|
@ -119,6 +119,8 @@ ip_vs_in_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
|
|||
struct ip_vs_cpu_stats *s;
|
||||
struct ip_vs_service *svc;
|
||||
|
||||
local_bh_disable();
|
||||
|
||||
s = this_cpu_ptr(dest->stats.cpustats);
|
||||
u64_stats_update_begin(&s->syncp);
|
||||
s->cnt.inpkts++;
|
||||
|
@ -137,6 +139,8 @@ ip_vs_in_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
|
|||
s->cnt.inpkts++;
|
||||
s->cnt.inbytes += skb->len;
|
||||
u64_stats_update_end(&s->syncp);
|
||||
|
||||
local_bh_enable();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -151,6 +155,8 @@ ip_vs_out_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
|
|||
struct ip_vs_cpu_stats *s;
|
||||
struct ip_vs_service *svc;
|
||||
|
||||
local_bh_disable();
|
||||
|
||||
s = this_cpu_ptr(dest->stats.cpustats);
|
||||
u64_stats_update_begin(&s->syncp);
|
||||
s->cnt.outpkts++;
|
||||
|
@ -169,6 +175,8 @@ ip_vs_out_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
|
|||
s->cnt.outpkts++;
|
||||
s->cnt.outbytes += skb->len;
|
||||
u64_stats_update_end(&s->syncp);
|
||||
|
||||
local_bh_enable();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -179,6 +187,8 @@ ip_vs_conn_stats(struct ip_vs_conn *cp, struct ip_vs_service *svc)
|
|||
struct netns_ipvs *ipvs = svc->ipvs;
|
||||
struct ip_vs_cpu_stats *s;
|
||||
|
||||
local_bh_disable();
|
||||
|
||||
s = this_cpu_ptr(cp->dest->stats.cpustats);
|
||||
u64_stats_update_begin(&s->syncp);
|
||||
s->cnt.conns++;
|
||||
|
@ -193,6 +203,8 @@ ip_vs_conn_stats(struct ip_vs_conn *cp, struct ip_vs_service *svc)
|
|||
u64_stats_update_begin(&s->syncp);
|
||||
s->cnt.conns++;
|
||||
u64_stats_update_end(&s->syncp);
|
||||
|
||||
local_bh_enable();
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -981,6 +981,17 @@ static int tcp_packet(struct nf_conn *ct,
|
|||
return NF_ACCEPT; /* Don't change state */
|
||||
}
|
||||
break;
|
||||
case TCP_CONNTRACK_SYN_SENT2:
|
||||
/* tcp_conntracks table is not smart enough to handle
|
||||
* simultaneous open.
|
||||
*/
|
||||
ct->proto.tcp.last_flags |= IP_CT_TCP_SIMULTANEOUS_OPEN;
|
||||
break;
|
||||
case TCP_CONNTRACK_SYN_RECV:
|
||||
if (dir == IP_CT_DIR_REPLY && index == TCP_ACK_SET &&
|
||||
ct->proto.tcp.last_flags & IP_CT_TCP_SIMULTANEOUS_OPEN)
|
||||
new_state = TCP_CONNTRACK_ESTABLISHED;
|
||||
break;
|
||||
case TCP_CONNTRACK_CLOSE:
|
||||
if (index == TCP_RST_SET
|
||||
&& (ct->proto.tcp.seen[!dir].flags & IP_CT_TCP_FLAG_MAXACK_SET)
|
||||
|
|
|
@ -214,6 +214,34 @@ static int nft_delchain(struct nft_ctx *ctx)
|
|||
return err;
|
||||
}
|
||||
|
||||
static void nft_rule_expr_activate(const struct nft_ctx *ctx,
|
||||
struct nft_rule *rule)
|
||||
{
|
||||
struct nft_expr *expr;
|
||||
|
||||
expr = nft_expr_first(rule);
|
||||
while (expr != nft_expr_last(rule) && expr->ops) {
|
||||
if (expr->ops->activate)
|
||||
expr->ops->activate(ctx, expr);
|
||||
|
||||
expr = nft_expr_next(expr);
|
||||
}
|
||||
}
|
||||
|
||||
static void nft_rule_expr_deactivate(const struct nft_ctx *ctx,
|
||||
struct nft_rule *rule)
|
||||
{
|
||||
struct nft_expr *expr;
|
||||
|
||||
expr = nft_expr_first(rule);
|
||||
while (expr != nft_expr_last(rule) && expr->ops) {
|
||||
if (expr->ops->deactivate)
|
||||
expr->ops->deactivate(ctx, expr);
|
||||
|
||||
expr = nft_expr_next(expr);
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
nf_tables_delrule_deactivate(struct nft_ctx *ctx, struct nft_rule *rule)
|
||||
{
|
||||
|
@ -259,6 +287,7 @@ static int nft_delrule(struct nft_ctx *ctx, struct nft_rule *rule)
|
|||
nft_trans_destroy(trans);
|
||||
return err;
|
||||
}
|
||||
nft_rule_expr_deactivate(ctx, rule);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -2238,6 +2267,13 @@ static void nf_tables_rule_destroy(const struct nft_ctx *ctx,
|
|||
kfree(rule);
|
||||
}
|
||||
|
||||
static void nf_tables_rule_release(const struct nft_ctx *ctx,
|
||||
struct nft_rule *rule)
|
||||
{
|
||||
nft_rule_expr_deactivate(ctx, rule);
|
||||
nf_tables_rule_destroy(ctx, rule);
|
||||
}
|
||||
|
||||
#define NFT_RULE_MAXEXPRS 128
|
||||
|
||||
static struct nft_expr_info *info;
|
||||
|
@ -2402,7 +2438,7 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk,
|
|||
return 0;
|
||||
|
||||
err2:
|
||||
nf_tables_rule_destroy(&ctx, rule);
|
||||
nf_tables_rule_release(&ctx, rule);
|
||||
err1:
|
||||
for (i = 0; i < n; i++) {
|
||||
if (info[i].ops != NULL)
|
||||
|
@ -4044,8 +4080,10 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
|
|||
if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA) ^
|
||||
nft_set_ext_exists(ext2, NFT_SET_EXT_DATA) ||
|
||||
nft_set_ext_exists(ext, NFT_SET_EXT_OBJREF) ^
|
||||
nft_set_ext_exists(ext2, NFT_SET_EXT_OBJREF))
|
||||
return -EBUSY;
|
||||
nft_set_ext_exists(ext2, NFT_SET_EXT_OBJREF)) {
|
||||
err = -EBUSY;
|
||||
goto err5;
|
||||
}
|
||||
if ((nft_set_ext_exists(ext, NFT_SET_EXT_DATA) &&
|
||||
nft_set_ext_exists(ext2, NFT_SET_EXT_DATA) &&
|
||||
memcmp(nft_set_ext_data(ext),
|
||||
|
@ -4130,7 +4168,7 @@ static int nf_tables_newsetelem(struct net *net, struct sock *nlsk,
|
|||
* NFT_GOTO verdicts. This function must be called on active data objects
|
||||
* from the second phase of the commit protocol.
|
||||
*/
|
||||
static void nft_data_hold(const struct nft_data *data, enum nft_data_types type)
|
||||
void nft_data_hold(const struct nft_data *data, enum nft_data_types type)
|
||||
{
|
||||
if (type == NFT_DATA_VERDICT) {
|
||||
switch (data->verdict.code) {
|
||||
|
@ -5761,7 +5799,7 @@ static void nft_chain_commit_update(struct nft_trans *trans)
|
|||
}
|
||||
}
|
||||
|
||||
static void nf_tables_commit_release(struct nft_trans *trans)
|
||||
static void nft_commit_release(struct nft_trans *trans)
|
||||
{
|
||||
switch (trans->msg_type) {
|
||||
case NFT_MSG_DELTABLE:
|
||||
|
@ -5790,6 +5828,21 @@ static void nf_tables_commit_release(struct nft_trans *trans)
|
|||
kfree(trans);
|
||||
}
|
||||
|
||||
static void nf_tables_commit_release(struct net *net)
|
||||
{
|
||||
struct nft_trans *trans, *next;
|
||||
|
||||
if (list_empty(&net->nft.commit_list))
|
||||
return;
|
||||
|
||||
synchronize_rcu();
|
||||
|
||||
list_for_each_entry_safe(trans, next, &net->nft.commit_list, list) {
|
||||
list_del(&trans->list);
|
||||
nft_commit_release(trans);
|
||||
}
|
||||
}
|
||||
|
||||
static int nf_tables_commit(struct net *net, struct sk_buff *skb)
|
||||
{
|
||||
struct nft_trans *trans, *next;
|
||||
|
@ -5920,13 +5973,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
|
|||
}
|
||||
}
|
||||
|
||||
synchronize_rcu();
|
||||
|
||||
list_for_each_entry_safe(trans, next, &net->nft.commit_list, list) {
|
||||
list_del(&trans->list);
|
||||
nf_tables_commit_release(trans);
|
||||
}
|
||||
|
||||
nf_tables_commit_release(net);
|
||||
nf_tables_gen_notify(net, skb, NFT_MSG_NEWGEN);
|
||||
|
||||
return 0;
|
||||
|
@ -6006,10 +6053,12 @@ static int nf_tables_abort(struct net *net, struct sk_buff *skb)
|
|||
case NFT_MSG_NEWRULE:
|
||||
trans->ctx.chain->use--;
|
||||
list_del_rcu(&nft_trans_rule(trans)->list);
|
||||
nft_rule_expr_deactivate(&trans->ctx, nft_trans_rule(trans));
|
||||
break;
|
||||
case NFT_MSG_DELRULE:
|
||||
trans->ctx.chain->use++;
|
||||
nft_clear(trans->ctx.net, nft_trans_rule(trans));
|
||||
nft_rule_expr_activate(&trans->ctx, nft_trans_rule(trans));
|
||||
nft_trans_destroy(trans);
|
||||
break;
|
||||
case NFT_MSG_NEWSET:
|
||||
|
@ -6585,7 +6634,7 @@ int __nft_release_basechain(struct nft_ctx *ctx)
|
|||
list_for_each_entry_safe(rule, nr, &ctx->chain->rules, list) {
|
||||
list_del(&rule->list);
|
||||
ctx->chain->use--;
|
||||
nf_tables_rule_destroy(ctx, rule);
|
||||
nf_tables_rule_release(ctx, rule);
|
||||
}
|
||||
list_del(&ctx->chain->list);
|
||||
ctx->table->use--;
|
||||
|
@ -6623,7 +6672,7 @@ static void __nft_release_tables(struct net *net)
|
|||
list_for_each_entry_safe(rule, nr, &chain->rules, list) {
|
||||
list_del(&rule->list);
|
||||
chain->use--;
|
||||
nf_tables_rule_destroy(&ctx, rule);
|
||||
nf_tables_rule_release(&ctx, rule);
|
||||
}
|
||||
}
|
||||
list_for_each_entry_safe(flowtable, nf, &table->flowtables, list) {
|
||||
|
|
|
@ -119,15 +119,22 @@ DEFINE_STATIC_KEY_FALSE(nft_counters_enabled);
|
|||
static noinline void nft_update_chain_stats(const struct nft_chain *chain,
|
||||
const struct nft_pktinfo *pkt)
|
||||
{
|
||||
struct nft_base_chain *base_chain;
|
||||
struct nft_stats *stats;
|
||||
|
||||
local_bh_disable();
|
||||
stats = this_cpu_ptr(rcu_dereference(nft_base_chain(chain)->stats));
|
||||
u64_stats_update_begin(&stats->syncp);
|
||||
stats->pkts++;
|
||||
stats->bytes += pkt->skb->len;
|
||||
u64_stats_update_end(&stats->syncp);
|
||||
local_bh_enable();
|
||||
base_chain = nft_base_chain(chain);
|
||||
if (!base_chain->stats)
|
||||
return;
|
||||
|
||||
stats = this_cpu_ptr(rcu_dereference(base_chain->stats));
|
||||
if (stats) {
|
||||
local_bh_disable();
|
||||
u64_stats_update_begin(&stats->syncp);
|
||||
stats->pkts++;
|
||||
stats->bytes += pkt->skb->len;
|
||||
u64_stats_update_end(&stats->syncp);
|
||||
local_bh_enable();
|
||||
}
|
||||
}
|
||||
|
||||
struct nft_jumpstack {
|
||||
|
|
|
@ -115,7 +115,7 @@ static int nfnl_acct_new(struct net *net, struct sock *nfnl,
|
|||
nfacct->flags = flags;
|
||||
}
|
||||
|
||||
strncpy(nfacct->name, nla_data(tb[NFACCT_NAME]), NFACCT_NAME_MAX);
|
||||
nla_strlcpy(nfacct->name, nla_data(tb[NFACCT_NAME]), NFACCT_NAME_MAX);
|
||||
|
||||
if (tb[NFACCT_BYTES]) {
|
||||
atomic64_set(&nfacct->bytes,
|
||||
|
|
|
@ -149,8 +149,8 @@ nfnl_cthelper_expect_policy(struct nf_conntrack_expect_policy *expect_policy,
|
|||
!tb[NFCTH_POLICY_EXPECT_TIMEOUT])
|
||||
return -EINVAL;
|
||||
|
||||
strncpy(expect_policy->name,
|
||||
nla_data(tb[NFCTH_POLICY_NAME]), NF_CT_HELPER_NAME_LEN);
|
||||
nla_strlcpy(expect_policy->name,
|
||||
nla_data(tb[NFCTH_POLICY_NAME]), NF_CT_HELPER_NAME_LEN);
|
||||
expect_policy->max_expected =
|
||||
ntohl(nla_get_be32(tb[NFCTH_POLICY_EXPECT_MAX]));
|
||||
if (expect_policy->max_expected > NF_CT_EXPECT_MAX_CNT)
|
||||
|
@ -234,7 +234,8 @@ nfnl_cthelper_create(const struct nlattr * const tb[],
|
|||
if (ret < 0)
|
||||
goto err1;
|
||||
|
||||
strncpy(helper->name, nla_data(tb[NFCTH_NAME]), NF_CT_HELPER_NAME_LEN);
|
||||
nla_strlcpy(helper->name,
|
||||
nla_data(tb[NFCTH_NAME]), NF_CT_HELPER_NAME_LEN);
|
||||
size = ntohl(nla_get_be32(tb[NFCTH_PRIV_DATA_LEN]));
|
||||
if (size > FIELD_SIZEOF(struct nf_conn_help, data)) {
|
||||
ret = -ENOMEM;
|
||||
|
|
|
@ -27,14 +27,31 @@ struct nft_xt {
|
|||
struct list_head head;
|
||||
struct nft_expr_ops ops;
|
||||
unsigned int refcnt;
|
||||
|
||||
/* Unlike other expressions, ops doesn't have static storage duration.
|
||||
* nft core assumes they do. We use kfree_rcu so that nft core can
|
||||
* can check expr->ops->size even after nft_compat->destroy() frees
|
||||
* the nft_xt struct that holds the ops structure.
|
||||
*/
|
||||
struct rcu_head rcu_head;
|
||||
};
|
||||
|
||||
static void nft_xt_put(struct nft_xt *xt)
|
||||
/* Used for matches where *info is larger than X byte */
|
||||
#define NFT_MATCH_LARGE_THRESH 192
|
||||
|
||||
struct nft_xt_match_priv {
|
||||
void *info;
|
||||
};
|
||||
|
||||
static bool nft_xt_put(struct nft_xt *xt)
|
||||
{
|
||||
if (--xt->refcnt == 0) {
|
||||
list_del(&xt->head);
|
||||
kfree(xt);
|
||||
kfree_rcu(xt, rcu_head);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static int nft_compat_chain_validate_dependency(const char *tablename,
|
||||
|
@ -226,6 +243,7 @@ nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
|
|||
struct xt_target *target = expr->ops->data;
|
||||
struct xt_tgchk_param par;
|
||||
size_t size = XT_ALIGN(nla_len(tb[NFTA_TARGET_INFO]));
|
||||
struct nft_xt *nft_xt;
|
||||
u16 proto = 0;
|
||||
bool inv = false;
|
||||
union nft_entry e = {};
|
||||
|
@ -236,25 +254,22 @@ nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
|
|||
if (ctx->nla[NFTA_RULE_COMPAT]) {
|
||||
ret = nft_parse_compat(ctx->nla[NFTA_RULE_COMPAT], &proto, &inv);
|
||||
if (ret < 0)
|
||||
goto err;
|
||||
return ret;
|
||||
}
|
||||
|
||||
nft_target_set_tgchk_param(&par, ctx, target, info, &e, proto, inv);
|
||||
|
||||
ret = xt_check_target(&par, size, proto, inv);
|
||||
if (ret < 0)
|
||||
goto err;
|
||||
return ret;
|
||||
|
||||
/* The standard target cannot be used */
|
||||
if (target->target == NULL) {
|
||||
ret = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
if (!target->target)
|
||||
return -EINVAL;
|
||||
|
||||
nft_xt = container_of(expr->ops, struct nft_xt, ops);
|
||||
nft_xt->refcnt++;
|
||||
return 0;
|
||||
err:
|
||||
module_put(target->me);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -271,8 +286,8 @@ nft_target_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
|
|||
if (par.target->destroy != NULL)
|
||||
par.target->destroy(&par);
|
||||
|
||||
nft_xt_put(container_of(expr->ops, struct nft_xt, ops));
|
||||
module_put(target->me);
|
||||
if (nft_xt_put(container_of(expr->ops, struct nft_xt, ops)))
|
||||
module_put(target->me);
|
||||
}
|
||||
|
||||
static int nft_target_dump(struct sk_buff *skb, const struct nft_expr *expr)
|
||||
|
@ -316,11 +331,11 @@ static int nft_target_validate(const struct nft_ctx *ctx,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void nft_match_eval(const struct nft_expr *expr,
|
||||
struct nft_regs *regs,
|
||||
const struct nft_pktinfo *pkt)
|
||||
static void __nft_match_eval(const struct nft_expr *expr,
|
||||
struct nft_regs *regs,
|
||||
const struct nft_pktinfo *pkt,
|
||||
void *info)
|
||||
{
|
||||
void *info = nft_expr_priv(expr);
|
||||
struct xt_match *match = expr->ops->data;
|
||||
struct sk_buff *skb = pkt->skb;
|
||||
bool ret;
|
||||
|
@ -344,6 +359,22 @@ static void nft_match_eval(const struct nft_expr *expr,
|
|||
}
|
||||
}
|
||||
|
||||
static void nft_match_large_eval(const struct nft_expr *expr,
|
||||
struct nft_regs *regs,
|
||||
const struct nft_pktinfo *pkt)
|
||||
{
|
||||
struct nft_xt_match_priv *priv = nft_expr_priv(expr);
|
||||
|
||||
__nft_match_eval(expr, regs, pkt, priv->info);
|
||||
}
|
||||
|
||||
static void nft_match_eval(const struct nft_expr *expr,
|
||||
struct nft_regs *regs,
|
||||
const struct nft_pktinfo *pkt)
|
||||
{
|
||||
__nft_match_eval(expr, regs, pkt, nft_expr_priv(expr));
|
||||
}
|
||||
|
||||
static const struct nla_policy nft_match_policy[NFTA_MATCH_MAX + 1] = {
|
||||
[NFTA_MATCH_NAME] = { .type = NLA_NUL_STRING },
|
||||
[NFTA_MATCH_REV] = { .type = NLA_U32 },
|
||||
|
@ -404,13 +435,14 @@ static void match_compat_from_user(struct xt_match *m, void *in, void *out)
|
|||
}
|
||||
|
||||
static int
|
||||
nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
|
||||
const struct nlattr * const tb[])
|
||||
__nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
|
||||
const struct nlattr * const tb[],
|
||||
void *info)
|
||||
{
|
||||
void *info = nft_expr_priv(expr);
|
||||
struct xt_match *match = expr->ops->data;
|
||||
struct xt_mtchk_param par;
|
||||
size_t size = XT_ALIGN(nla_len(tb[NFTA_MATCH_INFO]));
|
||||
struct nft_xt *nft_xt;
|
||||
u16 proto = 0;
|
||||
bool inv = false;
|
||||
union nft_entry e = {};
|
||||
|
@ -421,26 +453,50 @@ nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
|
|||
if (ctx->nla[NFTA_RULE_COMPAT]) {
|
||||
ret = nft_parse_compat(ctx->nla[NFTA_RULE_COMPAT], &proto, &inv);
|
||||
if (ret < 0)
|
||||
goto err;
|
||||
return ret;
|
||||
}
|
||||
|
||||
nft_match_set_mtchk_param(&par, ctx, match, info, &e, proto, inv);
|
||||
|
||||
ret = xt_check_match(&par, size, proto, inv);
|
||||
if (ret < 0)
|
||||
goto err;
|
||||
return ret;
|
||||
|
||||
nft_xt = container_of(expr->ops, struct nft_xt, ops);
|
||||
nft_xt->refcnt++;
|
||||
return 0;
|
||||
err:
|
||||
module_put(match->me);
|
||||
}
|
||||
|
||||
static int
|
||||
nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
|
||||
const struct nlattr * const tb[])
|
||||
{
|
||||
return __nft_match_init(ctx, expr, tb, nft_expr_priv(expr));
|
||||
}
|
||||
|
||||
static int
|
||||
nft_match_large_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
|
||||
const struct nlattr * const tb[])
|
||||
{
|
||||
struct nft_xt_match_priv *priv = nft_expr_priv(expr);
|
||||
struct xt_match *m = expr->ops->data;
|
||||
int ret;
|
||||
|
||||
priv->info = kmalloc(XT_ALIGN(m->matchsize), GFP_KERNEL);
|
||||
if (!priv->info)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = __nft_match_init(ctx, expr, tb, priv->info);
|
||||
if (ret)
|
||||
kfree(priv->info);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void
|
||||
nft_match_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
|
||||
__nft_match_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr,
|
||||
void *info)
|
||||
{
|
||||
struct xt_match *match = expr->ops->data;
|
||||
void *info = nft_expr_priv(expr);
|
||||
struct xt_mtdtor_param par;
|
||||
|
||||
par.net = ctx->net;
|
||||
|
@ -450,13 +506,28 @@ nft_match_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
|
|||
if (par.match->destroy != NULL)
|
||||
par.match->destroy(&par);
|
||||
|
||||
nft_xt_put(container_of(expr->ops, struct nft_xt, ops));
|
||||
module_put(match->me);
|
||||
if (nft_xt_put(container_of(expr->ops, struct nft_xt, ops)))
|
||||
module_put(match->me);
|
||||
}
|
||||
|
||||
static int nft_match_dump(struct sk_buff *skb, const struct nft_expr *expr)
|
||||
static void
|
||||
nft_match_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
|
||||
{
|
||||
__nft_match_destroy(ctx, expr, nft_expr_priv(expr));
|
||||
}
|
||||
|
||||
static void
|
||||
nft_match_large_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
|
||||
{
|
||||
struct nft_xt_match_priv *priv = nft_expr_priv(expr);
|
||||
|
||||
__nft_match_destroy(ctx, expr, priv->info);
|
||||
kfree(priv->info);
|
||||
}
|
||||
|
||||
static int __nft_match_dump(struct sk_buff *skb, const struct nft_expr *expr,
|
||||
void *info)
|
||||
{
|
||||
void *info = nft_expr_priv(expr);
|
||||
struct xt_match *match = expr->ops->data;
|
||||
|
||||
if (nla_put_string(skb, NFTA_MATCH_NAME, match->name) ||
|
||||
|
@ -470,6 +541,18 @@ nla_put_failure:
|
|||
return -1;
|
||||
}
|
||||
|
||||
static int nft_match_dump(struct sk_buff *skb, const struct nft_expr *expr)
|
||||
{
|
||||
return __nft_match_dump(skb, expr, nft_expr_priv(expr));
|
||||
}
|
||||
|
||||
static int nft_match_large_dump(struct sk_buff *skb, const struct nft_expr *e)
|
||||
{
|
||||
struct nft_xt_match_priv *priv = nft_expr_priv(e);
|
||||
|
||||
return __nft_match_dump(skb, e, priv->info);
|
||||
}
|
||||
|
||||
static int nft_match_validate(const struct nft_ctx *ctx,
|
||||
const struct nft_expr *expr,
|
||||
const struct nft_data **data)
|
||||
|
@ -637,6 +720,7 @@ nft_match_select_ops(const struct nft_ctx *ctx,
|
|||
{
|
||||
struct nft_xt *nft_match;
|
||||
struct xt_match *match;
|
||||
unsigned int matchsize;
|
||||
char *mt_name;
|
||||
u32 rev, family;
|
||||
int err;
|
||||
|
@ -654,13 +738,8 @@ nft_match_select_ops(const struct nft_ctx *ctx,
|
|||
list_for_each_entry(nft_match, &nft_match_list, head) {
|
||||
struct xt_match *match = nft_match->ops.data;
|
||||
|
||||
if (nft_match_cmp(match, mt_name, rev, family)) {
|
||||
if (!try_module_get(match->me))
|
||||
return ERR_PTR(-ENOENT);
|
||||
|
||||
nft_match->refcnt++;
|
||||
if (nft_match_cmp(match, mt_name, rev, family))
|
||||
return &nft_match->ops;
|
||||
}
|
||||
}
|
||||
|
||||
match = xt_request_find_match(family, mt_name, rev);
|
||||
|
@ -679,9 +758,8 @@ nft_match_select_ops(const struct nft_ctx *ctx,
|
|||
goto err;
|
||||
}
|
||||
|
||||
nft_match->refcnt = 1;
|
||||
nft_match->refcnt = 0;
|
||||
nft_match->ops.type = &nft_match_type;
|
||||
nft_match->ops.size = NFT_EXPR_SIZE(XT_ALIGN(match->matchsize));
|
||||
nft_match->ops.eval = nft_match_eval;
|
||||
nft_match->ops.init = nft_match_init;
|
||||
nft_match->ops.destroy = nft_match_destroy;
|
||||
|
@ -689,6 +767,18 @@ nft_match_select_ops(const struct nft_ctx *ctx,
|
|||
nft_match->ops.validate = nft_match_validate;
|
||||
nft_match->ops.data = match;
|
||||
|
||||
matchsize = NFT_EXPR_SIZE(XT_ALIGN(match->matchsize));
|
||||
if (matchsize > NFT_MATCH_LARGE_THRESH) {
|
||||
matchsize = NFT_EXPR_SIZE(sizeof(struct nft_xt_match_priv));
|
||||
|
||||
nft_match->ops.eval = nft_match_large_eval;
|
||||
nft_match->ops.init = nft_match_large_init;
|
||||
nft_match->ops.destroy = nft_match_large_destroy;
|
||||
nft_match->ops.dump = nft_match_large_dump;
|
||||
}
|
||||
|
||||
nft_match->ops.size = matchsize;
|
||||
|
||||
list_add(&nft_match->head, &nft_match_list);
|
||||
|
||||
return &nft_match->ops;
|
||||
|
@ -739,13 +829,8 @@ nft_target_select_ops(const struct nft_ctx *ctx,
|
|||
list_for_each_entry(nft_target, &nft_target_list, head) {
|
||||
struct xt_target *target = nft_target->ops.data;
|
||||
|
||||
if (nft_target_cmp(target, tg_name, rev, family)) {
|
||||
if (!try_module_get(target->me))
|
||||
return ERR_PTR(-ENOENT);
|
||||
|
||||
nft_target->refcnt++;
|
||||
if (nft_target_cmp(target, tg_name, rev, family))
|
||||
return &nft_target->ops;
|
||||
}
|
||||
}
|
||||
|
||||
target = xt_request_find_target(family, tg_name, rev);
|
||||
|
@ -764,7 +849,7 @@ nft_target_select_ops(const struct nft_ctx *ctx,
|
|||
goto err;
|
||||
}
|
||||
|
||||
nft_target->refcnt = 1;
|
||||
nft_target->refcnt = 0;
|
||||
nft_target->ops.type = &nft_target_type;
|
||||
nft_target->ops.size = NFT_EXPR_SIZE(XT_ALIGN(target->targetsize));
|
||||
nft_target->ops.init = nft_target_init;
|
||||
|
@ -823,6 +908,32 @@ err_match:
|
|||
|
||||
static void __exit nft_compat_module_exit(void)
|
||||
{
|
||||
struct nft_xt *xt, *next;
|
||||
|
||||
/* list should be empty here, it can be non-empty only in case there
|
||||
* was an error that caused nft_xt expr to not be initialized fully
|
||||
* and noone else requested the same expression later.
|
||||
*
|
||||
* In this case, the lists contain 0-refcount entries that still
|
||||
* hold module reference.
|
||||
*/
|
||||
list_for_each_entry_safe(xt, next, &nft_target_list, head) {
|
||||
struct xt_target *target = xt->ops.data;
|
||||
|
||||
if (WARN_ON_ONCE(xt->refcnt))
|
||||
continue;
|
||||
module_put(target->me);
|
||||
kfree(xt);
|
||||
}
|
||||
|
||||
list_for_each_entry_safe(xt, next, &nft_match_list, head) {
|
||||
struct xt_match *match = xt->ops.data;
|
||||
|
||||
if (WARN_ON_ONCE(xt->refcnt))
|
||||
continue;
|
||||
module_put(match->me);
|
||||
kfree(xt);
|
||||
}
|
||||
nfnetlink_subsys_unregister(&nfnl_compat_subsys);
|
||||
nft_unregister_expr(&nft_target_type);
|
||||
nft_unregister_expr(&nft_match_type);
|
||||
|
|
|
@ -69,8 +69,16 @@ err1:
|
|||
return err;
|
||||
}
|
||||
|
||||
static void nft_immediate_destroy(const struct nft_ctx *ctx,
|
||||
const struct nft_expr *expr)
|
||||
static void nft_immediate_activate(const struct nft_ctx *ctx,
|
||||
const struct nft_expr *expr)
|
||||
{
|
||||
const struct nft_immediate_expr *priv = nft_expr_priv(expr);
|
||||
|
||||
return nft_data_hold(&priv->data, nft_dreg_to_type(priv->dreg));
|
||||
}
|
||||
|
||||
static void nft_immediate_deactivate(const struct nft_ctx *ctx,
|
||||
const struct nft_expr *expr)
|
||||
{
|
||||
const struct nft_immediate_expr *priv = nft_expr_priv(expr);
|
||||
|
||||
|
@ -108,7 +116,8 @@ static const struct nft_expr_ops nft_imm_ops = {
|
|||
.size = NFT_EXPR_SIZE(sizeof(struct nft_immediate_expr)),
|
||||
.eval = nft_immediate_eval,
|
||||
.init = nft_immediate_init,
|
||||
.destroy = nft_immediate_destroy,
|
||||
.activate = nft_immediate_activate,
|
||||
.deactivate = nft_immediate_deactivate,
|
||||
.dump = nft_immediate_dump,
|
||||
.validate = nft_immediate_validate,
|
||||
};
|
||||
|
|
|
@ -183,6 +183,9 @@ struct xt_match *xt_find_match(u8 af, const char *name, u8 revision)
|
|||
struct xt_match *m;
|
||||
int err = -ENOENT;
|
||||
|
||||
if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
mutex_lock(&xt[af].mutex);
|
||||
list_for_each_entry(m, &xt[af].match, list) {
|
||||
if (strcmp(m->name, name) == 0) {
|
||||
|
@ -229,6 +232,9 @@ struct xt_target *xt_find_target(u8 af, const char *name, u8 revision)
|
|||
struct xt_target *t;
|
||||
int err = -ENOENT;
|
||||
|
||||
if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
mutex_lock(&xt[af].mutex);
|
||||
list_for_each_entry(t, &xt[af].target, list) {
|
||||
if (strcmp(t->name, name) == 0) {
|
||||
|
|
|
@ -2903,13 +2903,15 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
|
|||
if (skb == NULL)
|
||||
goto out_unlock;
|
||||
|
||||
skb_set_network_header(skb, reserve);
|
||||
skb_reset_network_header(skb);
|
||||
|
||||
err = -EINVAL;
|
||||
if (sock->type == SOCK_DGRAM) {
|
||||
offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len);
|
||||
if (unlikely(offset < 0))
|
||||
goto out_free;
|
||||
} else if (reserve) {
|
||||
skb_push(skb, reserve);
|
||||
}
|
||||
|
||||
/* Returns -EFAULT on error */
|
||||
|
|
|
@ -161,6 +161,8 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
|
|||
case htons(ETH_P_8021AD):
|
||||
break;
|
||||
default:
|
||||
if (exists)
|
||||
tcf_idr_release(*a, bind);
|
||||
return -EPROTONOSUPPORT;
|
||||
}
|
||||
} else {
|
||||
|
|
|
@ -222,10 +222,11 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt,
|
|||
extack);
|
||||
if (IS_ERR(child))
|
||||
return PTR_ERR(child);
|
||||
|
||||
/* child is fifo, no need to check for noop_qdisc */
|
||||
qdisc_hash_add(child, true);
|
||||
}
|
||||
|
||||
if (child != &noop_qdisc)
|
||||
qdisc_hash_add(child, true);
|
||||
sch_tree_lock(sch);
|
||||
q->flags = ctl->flags;
|
||||
q->limit = ctl->limit;
|
||||
|
|
|
@ -383,6 +383,9 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt,
|
|||
err = PTR_ERR(child);
|
||||
goto done;
|
||||
}
|
||||
|
||||
/* child is fifo, no need to check for noop_qdisc */
|
||||
qdisc_hash_add(child, true);
|
||||
}
|
||||
|
||||
sch_tree_lock(sch);
|
||||
|
@ -391,8 +394,6 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt,
|
|||
q->qdisc->qstats.backlog);
|
||||
qdisc_destroy(q->qdisc);
|
||||
q->qdisc = child;
|
||||
if (child != &noop_qdisc)
|
||||
qdisc_hash_add(child, true);
|
||||
}
|
||||
q->limit = qopt->limit;
|
||||
if (tb[TCA_TBF_PBURST])
|
||||
|
|
|
@ -245,40 +245,45 @@ out:
|
|||
static int smc_pnet_fill_entry(struct net *net, struct smc_pnetentry *pnetelem,
|
||||
struct nlattr *tb[])
|
||||
{
|
||||
char *string, *ibname = NULL;
|
||||
int rc = 0;
|
||||
char *string, *ibname;
|
||||
int rc;
|
||||
|
||||
memset(pnetelem, 0, sizeof(*pnetelem));
|
||||
INIT_LIST_HEAD(&pnetelem->list);
|
||||
if (tb[SMC_PNETID_NAME]) {
|
||||
string = (char *)nla_data(tb[SMC_PNETID_NAME]);
|
||||
if (!smc_pnetid_valid(string, pnetelem->pnet_name)) {
|
||||
rc = -EINVAL;
|
||||
goto error;
|
||||
}
|
||||
}
|
||||
if (tb[SMC_PNETID_ETHNAME]) {
|
||||
string = (char *)nla_data(tb[SMC_PNETID_ETHNAME]);
|
||||
pnetelem->ndev = dev_get_by_name(net, string);
|
||||
if (!pnetelem->ndev)
|
||||
return -ENOENT;
|
||||
}
|
||||
if (tb[SMC_PNETID_IBNAME]) {
|
||||
ibname = (char *)nla_data(tb[SMC_PNETID_IBNAME]);
|
||||
ibname = strim(ibname);
|
||||
pnetelem->smcibdev = smc_pnet_find_ib(ibname);
|
||||
if (!pnetelem->smcibdev) {
|
||||
rc = -ENOENT;
|
||||
goto error;
|
||||
}
|
||||
}
|
||||
if (tb[SMC_PNETID_IBPORT]) {
|
||||
pnetelem->ib_port = nla_get_u8(tb[SMC_PNETID_IBPORT]);
|
||||
if (pnetelem->ib_port > SMC_MAX_PORTS) {
|
||||
rc = -EINVAL;
|
||||
goto error;
|
||||
}
|
||||
}
|
||||
|
||||
rc = -EINVAL;
|
||||
if (!tb[SMC_PNETID_NAME])
|
||||
goto error;
|
||||
string = (char *)nla_data(tb[SMC_PNETID_NAME]);
|
||||
if (!smc_pnetid_valid(string, pnetelem->pnet_name))
|
||||
goto error;
|
||||
|
||||
rc = -EINVAL;
|
||||
if (!tb[SMC_PNETID_ETHNAME])
|
||||
goto error;
|
||||
rc = -ENOENT;
|
||||
string = (char *)nla_data(tb[SMC_PNETID_ETHNAME]);
|
||||
pnetelem->ndev = dev_get_by_name(net, string);
|
||||
if (!pnetelem->ndev)
|
||||
goto error;
|
||||
|
||||
rc = -EINVAL;
|
||||
if (!tb[SMC_PNETID_IBNAME])
|
||||
goto error;
|
||||
rc = -ENOENT;
|
||||
ibname = (char *)nla_data(tb[SMC_PNETID_IBNAME]);
|
||||
ibname = strim(ibname);
|
||||
pnetelem->smcibdev = smc_pnet_find_ib(ibname);
|
||||
if (!pnetelem->smcibdev)
|
||||
goto error;
|
||||
|
||||
rc = -EINVAL;
|
||||
if (!tb[SMC_PNETID_IBPORT])
|
||||
goto error;
|
||||
pnetelem->ib_port = nla_get_u8(tb[SMC_PNETID_IBPORT]);
|
||||
if (pnetelem->ib_port < 1 || pnetelem->ib_port > SMC_MAX_PORTS)
|
||||
goto error;
|
||||
|
||||
return 0;
|
||||
|
||||
error:
|
||||
|
@ -307,6 +312,8 @@ static int smc_pnet_get(struct sk_buff *skb, struct genl_info *info)
|
|||
void *hdr;
|
||||
int rc;
|
||||
|
||||
if (!info->attrs[SMC_PNETID_NAME])
|
||||
return -EINVAL;
|
||||
pnetelem = smc_pnet_find_pnetid(
|
||||
(char *)nla_data(info->attrs[SMC_PNETID_NAME]));
|
||||
if (!pnetelem)
|
||||
|
@ -359,6 +366,8 @@ static int smc_pnet_add(struct sk_buff *skb, struct genl_info *info)
|
|||
|
||||
static int smc_pnet_del(struct sk_buff *skb, struct genl_info *info)
|
||||
{
|
||||
if (!info->attrs[SMC_PNETID_NAME])
|
||||
return -EINVAL;
|
||||
return smc_pnet_remove_by_pnetid(
|
||||
(char *)nla_data(info->attrs[SMC_PNETID_NAME]));
|
||||
}
|
||||
|
|
|
@ -680,7 +680,6 @@ static int decrypt_skb(struct sock *sk, struct sk_buff *skb,
|
|||
struct scatterlist *sgin = &sgin_arr[0];
|
||||
struct strp_msg *rxm = strp_msg(skb);
|
||||
int ret, nsg = ARRAY_SIZE(sgin_arr);
|
||||
char aad_recv[TLS_AAD_SPACE_SIZE];
|
||||
struct sk_buff *unused;
|
||||
|
||||
ret = skb_copy_bits(skb, rxm->offset + TLS_HEADER_SIZE,
|
||||
|
@ -698,13 +697,13 @@ static int decrypt_skb(struct sock *sk, struct sk_buff *skb,
|
|||
}
|
||||
|
||||
sg_init_table(sgin, nsg);
|
||||
sg_set_buf(&sgin[0], aad_recv, sizeof(aad_recv));
|
||||
sg_set_buf(&sgin[0], ctx->rx_aad_ciphertext, TLS_AAD_SPACE_SIZE);
|
||||
|
||||
nsg = skb_to_sgvec(skb, &sgin[1],
|
||||
rxm->offset + tls_ctx->rx.prepend_size,
|
||||
rxm->full_len - tls_ctx->rx.prepend_size);
|
||||
|
||||
tls_make_aad(aad_recv,
|
||||
tls_make_aad(ctx->rx_aad_ciphertext,
|
||||
rxm->full_len - tls_ctx->rx.overhead_size,
|
||||
tls_ctx->rx.rec_seq,
|
||||
tls_ctx->rx.rec_seq_size,
|
||||
|
@ -803,12 +802,12 @@ int tls_sw_recvmsg(struct sock *sk,
|
|||
if (to_copy <= len && page_count < MAX_SKB_FRAGS &&
|
||||
likely(!(flags & MSG_PEEK))) {
|
||||
struct scatterlist sgin[MAX_SKB_FRAGS + 1];
|
||||
char unused[21];
|
||||
int pages = 0;
|
||||
|
||||
zc = true;
|
||||
sg_init_table(sgin, MAX_SKB_FRAGS + 1);
|
||||
sg_set_buf(&sgin[0], unused, 13);
|
||||
sg_set_buf(&sgin[0], ctx->rx_aad_plaintext,
|
||||
TLS_AAD_SPACE_SIZE);
|
||||
|
||||
err = zerocopy_from_iter(sk, &msg->msg_iter,
|
||||
to_copy, &pages,
|
||||
|
|
|
@ -2035,7 +2035,7 @@ int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
|
|||
return -EINVAL;
|
||||
|
||||
obj = bpf_object__open(attr->file);
|
||||
if (IS_ERR(obj))
|
||||
if (IS_ERR_OR_NULL(obj))
|
||||
return -ENOENT;
|
||||
|
||||
bpf_object__for_each_program(prog, obj) {
|
||||
|
|
|
@ -11713,6 +11713,11 @@ static void get_unpriv_disabled()
|
|||
FILE *fd;
|
||||
|
||||
fd = fopen("/proc/sys/"UNPRIV_SYSCTL, "r");
|
||||
if (!fd) {
|
||||
perror("fopen /proc/sys/"UNPRIV_SYSCTL);
|
||||
unpriv_disabled = true;
|
||||
return;
|
||||
}
|
||||
if (fgets(buf, 2, fd) == buf && atoi(buf))
|
||||
unpriv_disabled = true;
|
||||
fclose(fd);
|
||||
|
|
Loading…
Reference in New Issue