Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) Fix memory leak in nftables, from Liping Zhang. 2) Need to check result of vlan_insert_tag() in batman-adv otherwise we risk NULL skb derefs, from Sven Eckelmann. 3) Check for dev_alloc_skb() failures in cfg80211, from Gregory Greenman. 4) Handle properly when we have ppp_unregister_channel() happening in parallel with ppp_connect_channel(), from WANG Cong. 5) Fix DCCP deadlock, from Eric Dumazet. 6) Bail out properly in UDP if sk_filter() truncates the packet to be smaller than even the space that the protocol headers need. From Michal Kubecek. 7) Similarly for rose, dccp, and sctp, from Willem de Bruijn. 8) Make TCP challenge ACKs less predictable, from Eric Dumazet. 9) Fix infinite loop in bgmac_dma_tx_add() from Florian Fainelli. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (65 commits) packet: propagate sock_cmsg_send() error net/mlx5e: Fix del vxlan port command buffer memset packet: fix second argument of sock_tx_timestamp() net: switchdev: change ageing_time type to clock_t Update maintainer for EHEA driver. net/mlx4_en: Add resilience in low memory systems net/mlx4_en: Move filters cleanup to a proper location sctp: load transport header after sk_filter net/sched/sch_htb: clamp xstats tokens to fit into 32-bit int net: cavium: liquidio: Avoid dma_unmap_single on uninitialized ndata net: nb8800: Fix SKB leak in nb8800_receive() et131x: Fix logical vs bitwise check in et131x_tx_timeout() vlan: use a valid default mtu value for vlan over macsec net: bgmac: Fix infinite loop in bgmac_dma_tx_add() mlxsw: spectrum: Prevent invalid ingress buffer mapping mlxsw: spectrum: Prevent overwrite of DCB capability fields mlxsw: spectrum: Don't emit errors when PFC is disabled mlxsw: spectrum: Indicate support for autonegotiation mlxsw: spectrum: Force link training according to admin state r8152: add MODULE_VERSION ...
This commit is contained in:
commit
107df03203
|
@ -4476,7 +4476,7 @@ S: Orphan
|
|||
F: fs/efs/
|
||||
|
||||
EHEA (IBM pSeries eHEA 10Gb ethernet adapter) DRIVER
|
||||
M: Thadeu Lima de Souza Cascardo <cascardo@linux.vnet.ibm.com>
|
||||
M: Douglas Miller <dougmill@linux.vnet.ibm.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/net/ethernet/ibm/ehea/
|
||||
|
|
|
@ -446,7 +446,11 @@ static int bond_newlink(struct net *src_net, struct net_device *bond_dev,
|
|||
if (err < 0)
|
||||
return err;
|
||||
|
||||
return register_netdevice(bond_dev);
|
||||
err = register_netdevice(bond_dev);
|
||||
|
||||
netif_carrier_off(bond_dev);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static size_t bond_get_size(const struct net_device *bond_dev)
|
||||
|
|
|
@ -3851,7 +3851,7 @@ static void et131x_tx_timeout(struct net_device *netdev)
|
|||
unsigned long flags;
|
||||
|
||||
/* If the device is closed, ignore the timeout */
|
||||
if (~(adapter->flags & FMP_ADAPTER_INTERRUPT_IN_USE))
|
||||
if (!(adapter->flags & FMP_ADAPTER_INTERRUPT_IN_USE))
|
||||
return;
|
||||
|
||||
/* Any nonrecoverable hardware error?
|
||||
|
|
|
@ -259,6 +259,7 @@ static void nb8800_receive(struct net_device *dev, unsigned int i,
|
|||
if (err) {
|
||||
netdev_err(dev, "rx buffer allocation failed\n");
|
||||
dev->stats.rx_dropped++;
|
||||
dev_kfree_skb(skb);
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -231,7 +231,7 @@ err_dma:
|
|||
dma_unmap_single(dma_dev, slot->dma_addr, skb_headlen(skb),
|
||||
DMA_TO_DEVICE);
|
||||
|
||||
while (i > 0) {
|
||||
while (i-- > 0) {
|
||||
int index = (ring->end + i) % BGMAC_TX_RING_SLOTS;
|
||||
struct bgmac_slot_info *slot = &ring->slots[index];
|
||||
u32 ctl1 = le32_to_cpu(ring->cpu_base[index].ctl1);
|
||||
|
|
|
@ -1591,7 +1591,7 @@ static int bnxt_get_module_eeprom(struct net_device *dev,
|
|||
{
|
||||
struct bnxt *bp = netdev_priv(dev);
|
||||
u16 start = eeprom->offset, length = eeprom->len;
|
||||
int rc;
|
||||
int rc = 0;
|
||||
|
||||
memset(data, 0, eeprom->len);
|
||||
|
||||
|
|
|
@ -2821,7 +2821,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
|
|||
if (!g) {
|
||||
netif_info(lio, tx_err, lio->netdev,
|
||||
"Transmit scatter gather: glist null!\n");
|
||||
goto lio_xmit_failed;
|
||||
goto lio_xmit_dma_failed;
|
||||
}
|
||||
|
||||
cmdsetup.s.gather = 1;
|
||||
|
@ -2892,7 +2892,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
|
|||
else
|
||||
status = octnet_send_nic_data_pkt(oct, &ndata, xmit_more);
|
||||
if (status == IQ_SEND_FAILED)
|
||||
goto lio_xmit_failed;
|
||||
goto lio_xmit_dma_failed;
|
||||
|
||||
netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n");
|
||||
|
||||
|
@ -2906,12 +2906,13 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
|
|||
|
||||
return NETDEV_TX_OK;
|
||||
|
||||
lio_xmit_dma_failed:
|
||||
dma_unmap_single(&oct->pci_dev->dev, ndata.cmd.dptr,
|
||||
ndata.datasize, DMA_TO_DEVICE);
|
||||
lio_xmit_failed:
|
||||
stats->tx_dropped++;
|
||||
netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n",
|
||||
iq_no, stats->tx_dropped);
|
||||
dma_unmap_single(&oct->pci_dev->dev, ndata.cmd.dptr,
|
||||
ndata.datasize, DMA_TO_DEVICE);
|
||||
recv_buffer_free(skb);
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
|
|
@ -860,6 +860,11 @@ static netdev_tx_t ethoc_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
unsigned int entry;
|
||||
void *dest;
|
||||
|
||||
if (skb_put_padto(skb, ETHOC_ZLEN)) {
|
||||
dev->stats.tx_errors++;
|
||||
goto out_no_free;
|
||||
}
|
||||
|
||||
if (unlikely(skb->len > ETHOC_BUFSIZ)) {
|
||||
dev->stats.tx_errors++;
|
||||
goto out;
|
||||
|
@ -894,6 +899,7 @@ static netdev_tx_t ethoc_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
skb_tx_timestamp(skb);
|
||||
out:
|
||||
dev_kfree_skb(skb);
|
||||
out_no_free:
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
|
@ -1086,7 +1092,7 @@ static int ethoc_probe(struct platform_device *pdev)
|
|||
if (!priv->iobase) {
|
||||
dev_err(&pdev->dev, "cannot remap I/O memory space\n");
|
||||
ret = -ENXIO;
|
||||
goto error;
|
||||
goto free;
|
||||
}
|
||||
|
||||
if (netdev->mem_end) {
|
||||
|
@ -1095,7 +1101,7 @@ static int ethoc_probe(struct platform_device *pdev)
|
|||
if (!priv->membase) {
|
||||
dev_err(&pdev->dev, "cannot remap memory space\n");
|
||||
ret = -ENXIO;
|
||||
goto error;
|
||||
goto free;
|
||||
}
|
||||
} else {
|
||||
/* Allocate buffer memory */
|
||||
|
@ -1106,7 +1112,7 @@ static int ethoc_probe(struct platform_device *pdev)
|
|||
dev_err(&pdev->dev, "cannot allocate %dB buffer\n",
|
||||
buffer_size);
|
||||
ret = -ENOMEM;
|
||||
goto error;
|
||||
goto free;
|
||||
}
|
||||
netdev->mem_end = netdev->mem_start + buffer_size;
|
||||
priv->dma_alloc = buffer_size;
|
||||
|
@ -1120,7 +1126,7 @@ static int ethoc_probe(struct platform_device *pdev)
|
|||
128, (netdev->mem_end - netdev->mem_start + 1) / ETHOC_BUFSIZ);
|
||||
if (num_bd < 4) {
|
||||
ret = -ENODEV;
|
||||
goto error;
|
||||
goto free;
|
||||
}
|
||||
priv->num_bd = num_bd;
|
||||
/* num_tx must be a power of two */
|
||||
|
@ -1133,7 +1139,7 @@ static int ethoc_probe(struct platform_device *pdev)
|
|||
priv->vma = devm_kzalloc(&pdev->dev, num_bd*sizeof(void *), GFP_KERNEL);
|
||||
if (!priv->vma) {
|
||||
ret = -ENOMEM;
|
||||
goto error;
|
||||
goto free;
|
||||
}
|
||||
|
||||
/* Allow the platform setup code to pass in a MAC address. */
|
||||
|
|
|
@ -285,6 +285,7 @@ static void nps_enet_hw_reset(struct net_device *ndev)
|
|||
ge_rst_value |= NPS_ENET_ENABLE << RST_GMAC_0_SHIFT;
|
||||
nps_enet_reg_set(priv, NPS_ENET_REG_GE_RST, ge_rst_value);
|
||||
usleep_range(10, 20);
|
||||
ge_rst_value = 0;
|
||||
nps_enet_reg_set(priv, NPS_ENET_REG_GE_RST, ge_rst_value);
|
||||
|
||||
/* Tx fifo reset sequence */
|
||||
|
|
|
@ -75,6 +75,7 @@
|
|||
#include <linux/uaccess.h>
|
||||
#include <asm/firmware.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/workqueue.h>
|
||||
|
||||
#include "ibmvnic.h"
|
||||
|
||||
|
@ -89,6 +90,7 @@ MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
|
|||
static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
|
||||
static int ibmvnic_remove(struct vio_dev *);
|
||||
static void release_sub_crqs(struct ibmvnic_adapter *);
|
||||
static void release_sub_crqs_no_irqs(struct ibmvnic_adapter *);
|
||||
static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
|
||||
static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
|
||||
static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
|
||||
|
@ -469,7 +471,8 @@ static int ibmvnic_open(struct net_device *netdev)
|
|||
crq.logical_link_state.link_state = IBMVNIC_LOGICAL_LNK_UP;
|
||||
ibmvnic_send_crq(adapter, &crq);
|
||||
|
||||
netif_start_queue(netdev);
|
||||
netif_tx_start_all_queues(netdev);
|
||||
|
||||
return 0;
|
||||
|
||||
bounce_map_failed:
|
||||
|
@ -519,7 +522,7 @@ static int ibmvnic_close(struct net_device *netdev)
|
|||
for (i = 0; i < adapter->req_rx_queues; i++)
|
||||
napi_disable(&adapter->napi[i]);
|
||||
|
||||
netif_stop_queue(netdev);
|
||||
netif_tx_stop_all_queues(netdev);
|
||||
|
||||
if (adapter->bounce_buffer) {
|
||||
if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
|
||||
|
@ -1212,12 +1215,6 @@ static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
|
|||
goto reg_failed;
|
||||
}
|
||||
|
||||
scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
|
||||
if (scrq->irq == NO_IRQ) {
|
||||
dev_err(dev, "Error mapping irq\n");
|
||||
goto map_irq_failed;
|
||||
}
|
||||
|
||||
scrq->adapter = adapter;
|
||||
scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
|
||||
scrq->cur = 0;
|
||||
|
@ -1230,12 +1227,6 @@ static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
|
|||
|
||||
return scrq;
|
||||
|
||||
map_irq_failed:
|
||||
do {
|
||||
rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
|
||||
adapter->vdev->unit_address,
|
||||
scrq->crq_num);
|
||||
} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
|
||||
reg_failed:
|
||||
dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
|
||||
DMA_BIDIRECTIONAL);
|
||||
|
@ -1256,6 +1247,7 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter)
|
|||
if (adapter->tx_scrq[i]) {
|
||||
free_irq(adapter->tx_scrq[i]->irq,
|
||||
adapter->tx_scrq[i]);
|
||||
irq_dispose_mapping(adapter->tx_scrq[i]->irq);
|
||||
release_sub_crq_queue(adapter,
|
||||
adapter->tx_scrq[i]);
|
||||
}
|
||||
|
@ -1267,6 +1259,7 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter)
|
|||
if (adapter->rx_scrq[i]) {
|
||||
free_irq(adapter->rx_scrq[i]->irq,
|
||||
adapter->rx_scrq[i]);
|
||||
irq_dispose_mapping(adapter->rx_scrq[i]->irq);
|
||||
release_sub_crq_queue(adapter,
|
||||
adapter->rx_scrq[i]);
|
||||
}
|
||||
|
@ -1276,6 +1269,29 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter)
|
|||
adapter->requested_caps = 0;
|
||||
}
|
||||
|
||||
static void release_sub_crqs_no_irqs(struct ibmvnic_adapter *adapter)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (adapter->tx_scrq) {
|
||||
for (i = 0; i < adapter->req_tx_queues; i++)
|
||||
if (adapter->tx_scrq[i])
|
||||
release_sub_crq_queue(adapter,
|
||||
adapter->tx_scrq[i]);
|
||||
adapter->tx_scrq = NULL;
|
||||
}
|
||||
|
||||
if (adapter->rx_scrq) {
|
||||
for (i = 0; i < adapter->req_rx_queues; i++)
|
||||
if (adapter->rx_scrq[i])
|
||||
release_sub_crq_queue(adapter,
|
||||
adapter->rx_scrq[i]);
|
||||
adapter->rx_scrq = NULL;
|
||||
}
|
||||
|
||||
adapter->requested_caps = 0;
|
||||
}
|
||||
|
||||
static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
|
||||
struct ibmvnic_sub_crq_queue *scrq)
|
||||
{
|
||||
|
@ -1395,6 +1411,66 @@ static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
|
|||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
|
||||
{
|
||||
struct device *dev = &adapter->vdev->dev;
|
||||
struct ibmvnic_sub_crq_queue *scrq;
|
||||
int i = 0, j = 0;
|
||||
int rc = 0;
|
||||
|
||||
for (i = 0; i < adapter->req_tx_queues; i++) {
|
||||
scrq = adapter->tx_scrq[i];
|
||||
scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
|
||||
|
||||
if (scrq->irq == NO_IRQ) {
|
||||
rc = -EINVAL;
|
||||
dev_err(dev, "Error mapping irq\n");
|
||||
goto req_tx_irq_failed;
|
||||
}
|
||||
|
||||
rc = request_irq(scrq->irq, ibmvnic_interrupt_tx,
|
||||
0, "ibmvnic_tx", scrq);
|
||||
|
||||
if (rc) {
|
||||
dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
|
||||
scrq->irq, rc);
|
||||
irq_dispose_mapping(scrq->irq);
|
||||
goto req_rx_irq_failed;
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < adapter->req_rx_queues; i++) {
|
||||
scrq = adapter->rx_scrq[i];
|
||||
scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
|
||||
if (scrq->irq == NO_IRQ) {
|
||||
rc = -EINVAL;
|
||||
dev_err(dev, "Error mapping irq\n");
|
||||
goto req_rx_irq_failed;
|
||||
}
|
||||
rc = request_irq(scrq->irq, ibmvnic_interrupt_rx,
|
||||
0, "ibmvnic_rx", scrq);
|
||||
if (rc) {
|
||||
dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
|
||||
scrq->irq, rc);
|
||||
irq_dispose_mapping(scrq->irq);
|
||||
goto req_rx_irq_failed;
|
||||
}
|
||||
}
|
||||
return rc;
|
||||
|
||||
req_rx_irq_failed:
|
||||
for (j = 0; j < i; j++)
|
||||
free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
|
||||
irq_dispose_mapping(adapter->rx_scrq[j]->irq);
|
||||
i = adapter->req_tx_queues;
|
||||
req_tx_irq_failed:
|
||||
for (j = 0; j < i; j++)
|
||||
free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
|
||||
irq_dispose_mapping(adapter->rx_scrq[j]->irq);
|
||||
release_sub_crqs_no_irqs(adapter);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
|
||||
{
|
||||
struct device *dev = &adapter->vdev->dev;
|
||||
|
@ -1403,8 +1479,7 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
|
|||
union ibmvnic_crq crq;
|
||||
int total_queues;
|
||||
int more = 0;
|
||||
int i, j;
|
||||
int rc;
|
||||
int i;
|
||||
|
||||
if (!retry) {
|
||||
/* Sub-CRQ entries are 32 byte long */
|
||||
|
@ -1483,13 +1558,6 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
|
|||
for (i = 0; i < adapter->req_tx_queues; i++) {
|
||||
adapter->tx_scrq[i] = allqueues[i];
|
||||
adapter->tx_scrq[i]->pool_index = i;
|
||||
rc = request_irq(adapter->tx_scrq[i]->irq, ibmvnic_interrupt_tx,
|
||||
0, "ibmvnic_tx", adapter->tx_scrq[i]);
|
||||
if (rc) {
|
||||
dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
|
||||
adapter->tx_scrq[i]->irq, rc);
|
||||
goto req_tx_irq_failed;
|
||||
}
|
||||
}
|
||||
|
||||
adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
|
||||
|
@ -1500,13 +1568,6 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
|
|||
for (i = 0; i < adapter->req_rx_queues; i++) {
|
||||
adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
|
||||
adapter->rx_scrq[i]->scrq_num = i;
|
||||
rc = request_irq(adapter->rx_scrq[i]->irq, ibmvnic_interrupt_rx,
|
||||
0, "ibmvnic_rx", adapter->rx_scrq[i]);
|
||||
if (rc) {
|
||||
dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
|
||||
adapter->rx_scrq[i]->irq, rc);
|
||||
goto req_rx_irq_failed;
|
||||
}
|
||||
}
|
||||
|
||||
memset(&crq, 0, sizeof(crq));
|
||||
|
@ -1559,15 +1620,6 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
|
|||
|
||||
return;
|
||||
|
||||
req_rx_irq_failed:
|
||||
for (j = 0; j < i; j++)
|
||||
free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
|
||||
i = adapter->req_tx_queues;
|
||||
req_tx_irq_failed:
|
||||
for (j = 0; j < i; j++)
|
||||
free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
|
||||
kfree(adapter->rx_scrq);
|
||||
adapter->rx_scrq = NULL;
|
||||
rx_failed:
|
||||
kfree(adapter->tx_scrq);
|
||||
adapter->tx_scrq = NULL;
|
||||
|
@ -2348,9 +2400,9 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq,
|
|||
*req_value,
|
||||
(long int)be32_to_cpu(crq->request_capability_rsp.
|
||||
number), name);
|
||||
release_sub_crqs(adapter);
|
||||
release_sub_crqs_no_irqs(adapter);
|
||||
*req_value = be32_to_cpu(crq->request_capability_rsp.number);
|
||||
complete(&adapter->init_done);
|
||||
init_sub_crqs(adapter, 1);
|
||||
return;
|
||||
default:
|
||||
dev_err(dev, "Error %d in request cap rsp\n",
|
||||
|
@ -2659,7 +2711,7 @@ static void handle_query_cap_rsp(union ibmvnic_crq *crq,
|
|||
|
||||
out:
|
||||
if (atomic_read(&adapter->running_cap_queries) == 0)
|
||||
complete(&adapter->init_done);
|
||||
init_sub_crqs(adapter, 0);
|
||||
/* We're done querying the capabilities, initialize sub-crqs */
|
||||
}
|
||||
|
||||
|
@ -3202,8 +3254,8 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
|
|||
dev_info(dev, "Partner initialized\n");
|
||||
/* Send back a response */
|
||||
rc = ibmvnic_send_crq_init_complete(adapter);
|
||||
if (rc == 0)
|
||||
send_version_xchg(adapter);
|
||||
if (!rc)
|
||||
schedule_work(&adapter->vnic_crq_init);
|
||||
else
|
||||
dev_err(dev, "Can't send initrsp rc=%ld\n", rc);
|
||||
break;
|
||||
|
@ -3555,8 +3607,63 @@ static const struct file_operations ibmvnic_dump_ops = {
|
|||
.release = single_release,
|
||||
};
|
||||
|
||||
static void handle_crq_init_rsp(struct work_struct *work)
|
||||
{
|
||||
struct ibmvnic_adapter *adapter = container_of(work,
|
||||
struct ibmvnic_adapter,
|
||||
vnic_crq_init);
|
||||
struct device *dev = &adapter->vdev->dev;
|
||||
struct net_device *netdev = adapter->netdev;
|
||||
unsigned long timeout = msecs_to_jiffies(30000);
|
||||
int rc;
|
||||
|
||||
send_version_xchg(adapter);
|
||||
reinit_completion(&adapter->init_done);
|
||||
if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
|
||||
dev_err(dev, "Passive init timeout\n");
|
||||
goto task_failed;
|
||||
}
|
||||
|
||||
do {
|
||||
if (adapter->renegotiate) {
|
||||
adapter->renegotiate = false;
|
||||
release_sub_crqs_no_irqs(adapter);
|
||||
send_cap_queries(adapter);
|
||||
|
||||
reinit_completion(&adapter->init_done);
|
||||
if (!wait_for_completion_timeout(&adapter->init_done,
|
||||
timeout)) {
|
||||
dev_err(dev, "Passive init timeout\n");
|
||||
goto task_failed;
|
||||
}
|
||||
}
|
||||
} while (adapter->renegotiate);
|
||||
rc = init_sub_crq_irqs(adapter);
|
||||
|
||||
if (rc)
|
||||
goto task_failed;
|
||||
|
||||
netdev->real_num_tx_queues = adapter->req_tx_queues;
|
||||
|
||||
rc = register_netdev(netdev);
|
||||
if (rc) {
|
||||
dev_err(dev,
|
||||
"failed to register netdev rc=%d\n", rc);
|
||||
goto register_failed;
|
||||
}
|
||||
dev_info(dev, "ibmvnic registered\n");
|
||||
|
||||
return;
|
||||
|
||||
register_failed:
|
||||
release_sub_crqs(adapter);
|
||||
task_failed:
|
||||
dev_err(dev, "Passive initialization was not successful\n");
|
||||
}
|
||||
|
||||
static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
|
||||
{
|
||||
unsigned long timeout = msecs_to_jiffies(30000);
|
||||
struct ibmvnic_adapter *adapter;
|
||||
struct net_device *netdev;
|
||||
unsigned char *mac_addr_p;
|
||||
|
@ -3593,6 +3700,8 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
|
|||
netdev->ethtool_ops = &ibmvnic_ethtool_ops;
|
||||
SET_NETDEV_DEV(netdev, &dev->dev);
|
||||
|
||||
INIT_WORK(&adapter->vnic_crq_init, handle_crq_init_rsp);
|
||||
|
||||
spin_lock_init(&adapter->stats_lock);
|
||||
|
||||
rc = ibmvnic_init_crq_queue(adapter);
|
||||
|
@ -3635,30 +3744,26 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
|
|||
ibmvnic_send_crq_init(adapter);
|
||||
|
||||
init_completion(&adapter->init_done);
|
||||
wait_for_completion(&adapter->init_done);
|
||||
if (!wait_for_completion_timeout(&adapter->init_done, timeout))
|
||||
return 0;
|
||||
|
||||
do {
|
||||
adapter->renegotiate = false;
|
||||
|
||||
init_sub_crqs(adapter, 0);
|
||||
reinit_completion(&adapter->init_done);
|
||||
wait_for_completion(&adapter->init_done);
|
||||
|
||||
if (adapter->renegotiate) {
|
||||
release_sub_crqs(adapter);
|
||||
adapter->renegotiate = false;
|
||||
release_sub_crqs_no_irqs(adapter);
|
||||
send_cap_queries(adapter);
|
||||
|
||||
reinit_completion(&adapter->init_done);
|
||||
wait_for_completion(&adapter->init_done);
|
||||
if (!wait_for_completion_timeout(&adapter->init_done,
|
||||
timeout))
|
||||
return 0;
|
||||
}
|
||||
} while (adapter->renegotiate);
|
||||
|
||||
/* if init_sub_crqs is partially successful, retry */
|
||||
while (!adapter->tx_scrq || !adapter->rx_scrq) {
|
||||
init_sub_crqs(adapter, 1);
|
||||
|
||||
reinit_completion(&adapter->init_done);
|
||||
wait_for_completion(&adapter->init_done);
|
||||
rc = init_sub_crq_irqs(adapter);
|
||||
if (rc) {
|
||||
dev_err(&dev->dev, "failed to initialize sub crq irqs\n");
|
||||
goto free_debugfs;
|
||||
}
|
||||
|
||||
netdev->real_num_tx_queues = adapter->req_tx_queues;
|
||||
|
@ -3666,12 +3771,14 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
|
|||
rc = register_netdev(netdev);
|
||||
if (rc) {
|
||||
dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
|
||||
goto free_debugfs;
|
||||
goto free_sub_crqs;
|
||||
}
|
||||
dev_info(&dev->dev, "ibmvnic registered\n");
|
||||
|
||||
return 0;
|
||||
|
||||
free_sub_crqs:
|
||||
release_sub_crqs(adapter);
|
||||
free_debugfs:
|
||||
if (adapter->debugfs_dir && !IS_ERR(adapter->debugfs_dir))
|
||||
debugfs_remove_recursive(adapter->debugfs_dir);
|
||||
|
|
|
@ -1045,4 +1045,6 @@ struct ibmvnic_adapter {
|
|||
u64 opt_rxba_entries_per_subcrq;
|
||||
__be64 tx_rx_desc_req;
|
||||
u8 map_id;
|
||||
|
||||
struct work_struct vnic_crq_init;
|
||||
};
|
||||
|
|
|
@ -1344,6 +1344,13 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
|
|||
if (!vsi || !macaddr)
|
||||
return NULL;
|
||||
|
||||
/* Do not allow broadcast filter to be added since broadcast filter
|
||||
* is added as part of add VSI for any newly created VSI except
|
||||
* FDIR VSI
|
||||
*/
|
||||
if (is_broadcast_ether_addr(macaddr))
|
||||
return NULL;
|
||||
|
||||
f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev);
|
||||
if (!f) {
|
||||
f = kzalloc(sizeof(*f), GFP_ATOMIC);
|
||||
|
@ -2151,18 +2158,6 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
|
|||
aq_ret, pf->hw.aq.asq_last_status);
|
||||
}
|
||||
}
|
||||
aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw,
|
||||
vsi->seid,
|
||||
cur_promisc, NULL);
|
||||
if (aq_ret) {
|
||||
retval = i40e_aq_rc_to_posix(aq_ret,
|
||||
pf->hw.aq.asq_last_status);
|
||||
dev_info(&pf->pdev->dev,
|
||||
"set brdcast promisc failed, err %s, aq_err %s\n",
|
||||
i40e_stat_str(&pf->hw, aq_ret),
|
||||
i40e_aq_str(&pf->hw,
|
||||
pf->hw.aq.asq_last_status));
|
||||
}
|
||||
}
|
||||
out:
|
||||
/* if something went wrong then set the changed flag so we try again */
|
||||
|
@ -7726,10 +7721,11 @@ static int i40e_init_msix(struct i40e_pf *pf)
|
|||
* i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
|
||||
* @vsi: the VSI being configured
|
||||
* @v_idx: index of the vector in the vsi struct
|
||||
* @cpu: cpu to be used on affinity_mask
|
||||
*
|
||||
* We allocate one q_vector. If allocation fails we return -ENOMEM.
|
||||
**/
|
||||
static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
|
||||
static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx, int cpu)
|
||||
{
|
||||
struct i40e_q_vector *q_vector;
|
||||
|
||||
|
@ -7740,7 +7736,8 @@ static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
|
|||
|
||||
q_vector->vsi = vsi;
|
||||
q_vector->v_idx = v_idx;
|
||||
cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
|
||||
cpumask_set_cpu(cpu, &q_vector->affinity_mask);
|
||||
|
||||
if (vsi->netdev)
|
||||
netif_napi_add(vsi->netdev, &q_vector->napi,
|
||||
i40e_napi_poll, NAPI_POLL_WEIGHT);
|
||||
|
@ -7764,8 +7761,7 @@ static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
|
|||
static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
|
||||
{
|
||||
struct i40e_pf *pf = vsi->back;
|
||||
int v_idx, num_q_vectors;
|
||||
int err;
|
||||
int err, v_idx, num_q_vectors, current_cpu;
|
||||
|
||||
/* if not MSIX, give the one vector only to the LAN VSI */
|
||||
if (pf->flags & I40E_FLAG_MSIX_ENABLED)
|
||||
|
@ -7775,10 +7771,15 @@ static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
|
|||
else
|
||||
return -EINVAL;
|
||||
|
||||
current_cpu = cpumask_first(cpu_online_mask);
|
||||
|
||||
for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
|
||||
err = i40e_vsi_alloc_q_vector(vsi, v_idx);
|
||||
err = i40e_vsi_alloc_q_vector(vsi, v_idx, current_cpu);
|
||||
if (err)
|
||||
goto err_out;
|
||||
current_cpu = cpumask_next(current_cpu, cpu_online_mask);
|
||||
if (unlikely(current_cpu >= nr_cpu_ids))
|
||||
current_cpu = cpumask_first(cpu_online_mask);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -9224,6 +9225,7 @@ int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi)
|
|||
static int i40e_add_vsi(struct i40e_vsi *vsi)
|
||||
{
|
||||
int ret = -ENODEV;
|
||||
i40e_status aq_ret = 0;
|
||||
u8 laa_macaddr[ETH_ALEN];
|
||||
bool found_laa_mac_filter = false;
|
||||
struct i40e_pf *pf = vsi->back;
|
||||
|
@ -9413,6 +9415,18 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
|
|||
vsi->seid = ctxt.seid;
|
||||
vsi->id = ctxt.vsi_number;
|
||||
}
|
||||
/* Except FDIR VSI, for all othet VSI set the broadcast filter */
|
||||
if (vsi->type != I40E_VSI_FDIR) {
|
||||
aq_ret = i40e_aq_set_vsi_broadcast(hw, vsi->seid, true, NULL);
|
||||
if (aq_ret) {
|
||||
ret = i40e_aq_rc_to_posix(aq_ret,
|
||||
hw->aq.asq_last_status);
|
||||
dev_info(&pf->pdev->dev,
|
||||
"set brdcast promisc failed, err %s, aq_err %s\n",
|
||||
i40e_stat_str(hw, aq_ret),
|
||||
i40e_aq_str(hw, hw->aq.asq_last_status));
|
||||
}
|
||||
}
|
||||
|
||||
spin_lock_bh(&vsi->mac_filter_list_lock);
|
||||
/* If macvlan filters already exist, force them to get loaded */
|
||||
|
|
|
@ -1280,8 +1280,8 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
|
|||
union i40e_rx_desc *rx_desc)
|
||||
{
|
||||
struct i40e_rx_ptype_decoded decoded;
|
||||
bool ipv4, ipv6, tunnel = false;
|
||||
u32 rx_error, rx_status;
|
||||
bool ipv4, ipv6;
|
||||
u8 ptype;
|
||||
u64 qword;
|
||||
|
||||
|
@ -1336,19 +1336,23 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
|
|||
if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
|
||||
return;
|
||||
|
||||
/* The hardware supported by this driver does not validate outer
|
||||
* checksums for tunneled VXLAN or GENEVE frames. I don't agree
|
||||
* with it but the specification states that you "MAY validate", it
|
||||
* doesn't make it a hard requirement so if we have validated the
|
||||
* inner checksum report CHECKSUM_UNNECESSARY.
|
||||
/* If there is an outer header present that might contain a checksum
|
||||
* we need to bump the checksum level by 1 to reflect the fact that
|
||||
* we are indicating we validated the inner checksum.
|
||||
*/
|
||||
if (decoded.inner_prot & (I40E_RX_PTYPE_INNER_PROT_TCP |
|
||||
I40E_RX_PTYPE_INNER_PROT_UDP |
|
||||
I40E_RX_PTYPE_INNER_PROT_SCTP))
|
||||
tunnel = true;
|
||||
if (decoded.tunnel_type >= I40E_RX_PTYPE_TUNNEL_IP_GRENAT)
|
||||
skb->csum_level = 1;
|
||||
|
||||
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
||||
skb->csum_level = tunnel ? 1 : 0;
|
||||
/* Only report checksum unnecessary for TCP, UDP, or SCTP */
|
||||
switch (decoded.inner_prot) {
|
||||
case I40E_RX_PTYPE_INNER_PROT_TCP:
|
||||
case I40E_RX_PTYPE_INNER_PROT_UDP:
|
||||
case I40E_RX_PTYPE_INNER_PROT_SCTP:
|
||||
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
||||
/* fall though */
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return;
|
||||
|
||||
|
|
|
@ -752,8 +752,8 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
|
|||
union i40e_rx_desc *rx_desc)
|
||||
{
|
||||
struct i40e_rx_ptype_decoded decoded;
|
||||
bool ipv4, ipv6, tunnel = false;
|
||||
u32 rx_error, rx_status;
|
||||
bool ipv4, ipv6;
|
||||
u8 ptype;
|
||||
u64 qword;
|
||||
|
||||
|
@ -808,19 +808,23 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
|
|||
if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
|
||||
return;
|
||||
|
||||
/* The hardware supported by this driver does not validate outer
|
||||
* checksums for tunneled VXLAN or GENEVE frames. I don't agree
|
||||
* with it but the specification states that you "MAY validate", it
|
||||
* doesn't make it a hard requirement so if we have validated the
|
||||
* inner checksum report CHECKSUM_UNNECESSARY.
|
||||
/* If there is an outer header present that might contain a checksum
|
||||
* we need to bump the checksum level by 1 to reflect the fact that
|
||||
* we are indicating we validated the inner checksum.
|
||||
*/
|
||||
if (decoded.inner_prot & (I40E_RX_PTYPE_INNER_PROT_TCP |
|
||||
I40E_RX_PTYPE_INNER_PROT_UDP |
|
||||
I40E_RX_PTYPE_INNER_PROT_SCTP))
|
||||
tunnel = true;
|
||||
if (decoded.tunnel_type >= I40E_RX_PTYPE_TUNNEL_IP_GRENAT)
|
||||
skb->csum_level = 1;
|
||||
|
||||
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
||||
skb->csum_level = tunnel ? 1 : 0;
|
||||
/* Only report checksum unnecessary for TCP, UDP, or SCTP */
|
||||
switch (decoded.inner_prot) {
|
||||
case I40E_RX_PTYPE_INNER_PROT_TCP:
|
||||
case I40E_RX_PTYPE_INNER_PROT_UDP:
|
||||
case I40E_RX_PTYPE_INNER_PROT_SCTP:
|
||||
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
||||
/* fall though */
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return;
|
||||
|
||||
|
|
|
@ -2887,7 +2887,7 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
|
|||
if (!test_bit(__IXGBE_DOWN, &adapter->state))
|
||||
ixgbe_irq_enable_queues(adapter, BIT_ULL(q_vector->v_idx));
|
||||
|
||||
return 0;
|
||||
return min(work_done, budget - 1);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -244,7 +244,7 @@
|
|||
/* Various constants */
|
||||
|
||||
/* Coalescing */
|
||||
#define MVNETA_TXDONE_COAL_PKTS 1
|
||||
#define MVNETA_TXDONE_COAL_PKTS 0 /* interrupt per packet */
|
||||
#define MVNETA_RX_COAL_PKTS 32
|
||||
#define MVNETA_RX_COAL_USEC 100
|
||||
|
||||
|
|
|
@ -1042,6 +1042,8 @@ static int mlx4_en_set_ringparam(struct net_device *dev,
|
|||
{
|
||||
struct mlx4_en_priv *priv = netdev_priv(dev);
|
||||
struct mlx4_en_dev *mdev = priv->mdev;
|
||||
struct mlx4_en_port_profile new_prof;
|
||||
struct mlx4_en_priv *tmp;
|
||||
u32 rx_size, tx_size;
|
||||
int port_up = 0;
|
||||
int err = 0;
|
||||
|
@ -1061,22 +1063,25 @@ static int mlx4_en_set_ringparam(struct net_device *dev,
|
|||
tx_size == priv->tx_ring[0]->size)
|
||||
return 0;
|
||||
|
||||
tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
|
||||
if (!tmp)
|
||||
return -ENOMEM;
|
||||
|
||||
mutex_lock(&mdev->state_lock);
|
||||
memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
|
||||
new_prof.tx_ring_size = tx_size;
|
||||
new_prof.rx_ring_size = rx_size;
|
||||
err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
if (priv->port_up) {
|
||||
port_up = 1;
|
||||
mlx4_en_stop_port(dev, 1);
|
||||
}
|
||||
|
||||
mlx4_en_free_resources(priv);
|
||||
mlx4_en_safe_replace_resources(priv, tmp);
|
||||
|
||||
priv->prof->tx_ring_size = tx_size;
|
||||
priv->prof->rx_ring_size = rx_size;
|
||||
|
||||
err = mlx4_en_alloc_resources(priv);
|
||||
if (err) {
|
||||
en_err(priv, "Failed reallocating port resources\n");
|
||||
goto out;
|
||||
}
|
||||
if (port_up) {
|
||||
err = mlx4_en_start_port(dev);
|
||||
if (err)
|
||||
|
@ -1084,8 +1089,8 @@ static int mlx4_en_set_ringparam(struct net_device *dev,
|
|||
}
|
||||
|
||||
err = mlx4_en_moderation_update(priv);
|
||||
|
||||
out:
|
||||
kfree(tmp);
|
||||
mutex_unlock(&mdev->state_lock);
|
||||
return err;
|
||||
}
|
||||
|
@ -1714,6 +1719,8 @@ static int mlx4_en_set_channels(struct net_device *dev,
|
|||
{
|
||||
struct mlx4_en_priv *priv = netdev_priv(dev);
|
||||
struct mlx4_en_dev *mdev = priv->mdev;
|
||||
struct mlx4_en_port_profile new_prof;
|
||||
struct mlx4_en_priv *tmp;
|
||||
int port_up = 0;
|
||||
int err = 0;
|
||||
|
||||
|
@ -1723,23 +1730,26 @@ static int mlx4_en_set_channels(struct net_device *dev,
|
|||
!channel->tx_count || !channel->rx_count)
|
||||
return -EINVAL;
|
||||
|
||||
tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
|
||||
if (!tmp)
|
||||
return -ENOMEM;
|
||||
|
||||
mutex_lock(&mdev->state_lock);
|
||||
memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
|
||||
new_prof.num_tx_rings_p_up = channel->tx_count;
|
||||
new_prof.tx_ring_num = channel->tx_count * MLX4_EN_NUM_UP;
|
||||
new_prof.rx_ring_num = channel->rx_count;
|
||||
|
||||
err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
if (priv->port_up) {
|
||||
port_up = 1;
|
||||
mlx4_en_stop_port(dev, 1);
|
||||
}
|
||||
|
||||
mlx4_en_free_resources(priv);
|
||||
|
||||
priv->num_tx_rings_p_up = channel->tx_count;
|
||||
priv->tx_ring_num = channel->tx_count * MLX4_EN_NUM_UP;
|
||||
priv->rx_ring_num = channel->rx_count;
|
||||
|
||||
err = mlx4_en_alloc_resources(priv);
|
||||
if (err) {
|
||||
en_err(priv, "Failed reallocating port resources\n");
|
||||
goto out;
|
||||
}
|
||||
mlx4_en_safe_replace_resources(priv, tmp);
|
||||
|
||||
netif_set_real_num_tx_queues(dev, priv->tx_ring_num);
|
||||
netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
|
||||
|
@ -1757,8 +1767,8 @@ static int mlx4_en_set_channels(struct net_device *dev,
|
|||
}
|
||||
|
||||
err = mlx4_en_moderation_update(priv);
|
||||
|
||||
out:
|
||||
kfree(tmp);
|
||||
mutex_unlock(&mdev->state_lock);
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -1954,7 +1954,7 @@ static int mlx4_en_close(struct net_device *dev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
void mlx4_en_free_resources(struct mlx4_en_priv *priv)
|
||||
static void mlx4_en_free_resources(struct mlx4_en_priv *priv)
|
||||
{
|
||||
int i;
|
||||
|
||||
|
@ -1979,7 +1979,7 @@ void mlx4_en_free_resources(struct mlx4_en_priv *priv)
|
|||
|
||||
}
|
||||
|
||||
int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
|
||||
static int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
|
||||
{
|
||||
struct mlx4_en_port_profile *prof = priv->prof;
|
||||
int i;
|
||||
|
@ -2044,6 +2044,77 @@ static void mlx4_en_shutdown(struct net_device *dev)
|
|||
rtnl_unlock();
|
||||
}
|
||||
|
||||
static int mlx4_en_copy_priv(struct mlx4_en_priv *dst,
|
||||
struct mlx4_en_priv *src,
|
||||
struct mlx4_en_port_profile *prof)
|
||||
{
|
||||
memcpy(&dst->hwtstamp_config, &prof->hwtstamp_config,
|
||||
sizeof(dst->hwtstamp_config));
|
||||
dst->num_tx_rings_p_up = src->mdev->profile.num_tx_rings_p_up;
|
||||
dst->tx_ring_num = prof->tx_ring_num;
|
||||
dst->rx_ring_num = prof->rx_ring_num;
|
||||
dst->flags = prof->flags;
|
||||
dst->mdev = src->mdev;
|
||||
dst->port = src->port;
|
||||
dst->dev = src->dev;
|
||||
dst->prof = prof;
|
||||
dst->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
|
||||
DS_SIZE * MLX4_EN_MAX_RX_FRAGS);
|
||||
|
||||
dst->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring *) * MAX_TX_RINGS,
|
||||
GFP_KERNEL);
|
||||
if (!dst->tx_ring)
|
||||
return -ENOMEM;
|
||||
|
||||
dst->tx_cq = kzalloc(sizeof(struct mlx4_en_cq *) * MAX_TX_RINGS,
|
||||
GFP_KERNEL);
|
||||
if (!dst->tx_cq) {
|
||||
kfree(dst->tx_ring);
|
||||
return -ENOMEM;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void mlx4_en_update_priv(struct mlx4_en_priv *dst,
|
||||
struct mlx4_en_priv *src)
|
||||
{
|
||||
memcpy(dst->rx_ring, src->rx_ring,
|
||||
sizeof(struct mlx4_en_rx_ring *) * src->rx_ring_num);
|
||||
memcpy(dst->rx_cq, src->rx_cq,
|
||||
sizeof(struct mlx4_en_cq *) * src->rx_ring_num);
|
||||
memcpy(&dst->hwtstamp_config, &src->hwtstamp_config,
|
||||
sizeof(dst->hwtstamp_config));
|
||||
dst->tx_ring_num = src->tx_ring_num;
|
||||
dst->rx_ring_num = src->rx_ring_num;
|
||||
dst->tx_ring = src->tx_ring;
|
||||
dst->tx_cq = src->tx_cq;
|
||||
memcpy(dst->prof, src->prof, sizeof(struct mlx4_en_port_profile));
|
||||
}
|
||||
|
||||
int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv,
|
||||
struct mlx4_en_priv *tmp,
|
||||
struct mlx4_en_port_profile *prof)
|
||||
{
|
||||
mlx4_en_copy_priv(tmp, priv, prof);
|
||||
|
||||
if (mlx4_en_alloc_resources(tmp)) {
|
||||
en_warn(priv,
|
||||
"%s: Resource allocation failed, using previous configuration\n",
|
||||
__func__);
|
||||
kfree(tmp->tx_ring);
|
||||
kfree(tmp->tx_cq);
|
||||
return -ENOMEM;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void mlx4_en_safe_replace_resources(struct mlx4_en_priv *priv,
|
||||
struct mlx4_en_priv *tmp)
|
||||
{
|
||||
mlx4_en_free_resources(priv);
|
||||
mlx4_en_update_priv(priv, tmp);
|
||||
}
|
||||
|
||||
void mlx4_en_destroy_netdev(struct net_device *dev)
|
||||
{
|
||||
struct mlx4_en_priv *priv = netdev_priv(dev);
|
||||
|
@ -2080,6 +2151,10 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
|
|||
mdev->upper[priv->port] = NULL;
|
||||
mutex_unlock(&mdev->state_lock);
|
||||
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
mlx4_en_cleanup_filters(priv);
|
||||
#endif
|
||||
|
||||
mlx4_en_free_resources(priv);
|
||||
|
||||
kfree(priv->tx_ring);
|
||||
|
@ -3124,6 +3199,8 @@ int mlx4_en_reset_config(struct net_device *dev,
|
|||
{
|
||||
struct mlx4_en_priv *priv = netdev_priv(dev);
|
||||
struct mlx4_en_dev *mdev = priv->mdev;
|
||||
struct mlx4_en_port_profile new_prof;
|
||||
struct mlx4_en_priv *tmp;
|
||||
int port_up = 0;
|
||||
int err = 0;
|
||||
|
||||
|
@ -3140,19 +3217,29 @@ int mlx4_en_reset_config(struct net_device *dev,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
|
||||
if (!tmp)
|
||||
return -ENOMEM;
|
||||
|
||||
mutex_lock(&mdev->state_lock);
|
||||
|
||||
memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
|
||||
memcpy(&new_prof.hwtstamp_config, &ts_config, sizeof(ts_config));
|
||||
|
||||
err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
if (priv->port_up) {
|
||||
port_up = 1;
|
||||
mlx4_en_stop_port(dev, 1);
|
||||
}
|
||||
|
||||
mlx4_en_free_resources(priv);
|
||||
|
||||
en_warn(priv, "Changing device configuration rx filter(%x) rx vlan(%x)\n",
|
||||
ts_config.rx_filter, !!(features & NETIF_F_HW_VLAN_CTAG_RX));
|
||||
ts_config.rx_filter,
|
||||
!!(features & NETIF_F_HW_VLAN_CTAG_RX));
|
||||
|
||||
priv->hwtstamp_config.tx_type = ts_config.tx_type;
|
||||
priv->hwtstamp_config.rx_filter = ts_config.rx_filter;
|
||||
mlx4_en_safe_replace_resources(priv, tmp);
|
||||
|
||||
if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX)) {
|
||||
if (features & NETIF_F_HW_VLAN_CTAG_RX)
|
||||
|
@ -3186,11 +3273,6 @@ int mlx4_en_reset_config(struct net_device *dev,
|
|||
dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
|
||||
}
|
||||
|
||||
err = mlx4_en_alloc_resources(priv);
|
||||
if (err) {
|
||||
en_err(priv, "Failed reallocating port resources\n");
|
||||
goto out;
|
||||
}
|
||||
if (port_up) {
|
||||
err = mlx4_en_start_port(dev);
|
||||
if (err)
|
||||
|
@ -3199,6 +3281,8 @@ int mlx4_en_reset_config(struct net_device *dev,
|
|||
|
||||
out:
|
||||
mutex_unlock(&mdev->state_lock);
|
||||
netdev_features_change(dev);
|
||||
kfree(tmp);
|
||||
if (!err)
|
||||
netdev_features_change(dev);
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -514,9 +514,6 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
|
|||
ring->rx_info = NULL;
|
||||
kfree(ring);
|
||||
*pring = NULL;
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
mlx4_en_cleanup_filters(priv);
|
||||
#endif
|
||||
}
|
||||
|
||||
void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
|
||||
|
|
|
@ -353,12 +353,14 @@ struct mlx4_en_port_profile {
|
|||
u32 rx_ring_num;
|
||||
u32 tx_ring_size;
|
||||
u32 rx_ring_size;
|
||||
u8 num_tx_rings_p_up;
|
||||
u8 rx_pause;
|
||||
u8 rx_ppp;
|
||||
u8 tx_pause;
|
||||
u8 tx_ppp;
|
||||
int rss_rings;
|
||||
int inline_thold;
|
||||
struct hwtstamp_config hwtstamp_config;
|
||||
};
|
||||
|
||||
struct mlx4_en_profile {
|
||||
|
@ -623,8 +625,11 @@ void mlx4_en_set_stats_bitmap(struct mlx4_dev *dev,
|
|||
u8 rx_ppp, u8 rx_pause,
|
||||
u8 tx_ppp, u8 tx_pause);
|
||||
|
||||
void mlx4_en_free_resources(struct mlx4_en_priv *priv);
|
||||
int mlx4_en_alloc_resources(struct mlx4_en_priv *priv);
|
||||
int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv,
|
||||
struct mlx4_en_priv *tmp,
|
||||
struct mlx4_en_port_profile *prof);
|
||||
void mlx4_en_safe_replace_resources(struct mlx4_en_priv *priv,
|
||||
struct mlx4_en_priv *tmp);
|
||||
|
||||
int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq,
|
||||
int entries, int ring, enum cq_type mode, int node);
|
||||
|
|
|
@ -1348,6 +1348,11 @@ static int mlx5e_open_channels(struct mlx5e_priv *priv)
|
|||
goto err_close_channels;
|
||||
}
|
||||
|
||||
/* FIXME: This is a W/A for tx timeout watch dog false alarm when
|
||||
* polling for inactive tx queues.
|
||||
*/
|
||||
netif_tx_start_all_queues(priv->netdev);
|
||||
|
||||
kfree(cparam);
|
||||
return 0;
|
||||
|
||||
|
@ -1367,6 +1372,12 @@ static void mlx5e_close_channels(struct mlx5e_priv *priv)
|
|||
{
|
||||
int i;
|
||||
|
||||
/* FIXME: This is a W/A only for tx timeout watch dog false alarm when
|
||||
* polling for inactive tx queues.
|
||||
*/
|
||||
netif_tx_stop_all_queues(priv->netdev);
|
||||
netif_tx_disable(priv->netdev);
|
||||
|
||||
for (i = 0; i < priv->params.num_channels; i++)
|
||||
mlx5e_close_channel(priv->channel[i]);
|
||||
|
||||
|
@ -2656,7 +2667,7 @@ static void mlx5e_tx_timeout(struct net_device *dev)
|
|||
for (i = 0; i < priv->params.num_channels * priv->params.num_tc; i++) {
|
||||
struct mlx5e_sq *sq = priv->txq_to_sq_map[i];
|
||||
|
||||
if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, i)))
|
||||
if (!netif_xmit_stopped(netdev_get_tx_queue(dev, i)))
|
||||
continue;
|
||||
sched_work = true;
|
||||
set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state);
|
||||
|
|
|
@ -72,8 +72,8 @@ static int mlx5e_vxlan_core_del_port_cmd(struct mlx5_core_dev *mdev, u16 port)
|
|||
u32 in[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_in)];
|
||||
u32 out[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_out)];
|
||||
|
||||
memset(&in, 0, sizeof(in));
|
||||
memset(&out, 0, sizeof(out));
|
||||
memset(in, 0, sizeof(in));
|
||||
memset(out, 0, sizeof(out));
|
||||
|
||||
MLX5_SET(delete_vxlan_udp_dport_in, in, opcode,
|
||||
MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT);
|
||||
|
|
|
@ -2718,7 +2718,7 @@ static inline void mlxsw_reg_ppcnt_pack(char *payload, u8 local_port,
|
|||
* Configures the switch priority to buffer table.
|
||||
*/
|
||||
#define MLXSW_REG_PPTB_ID 0x500B
|
||||
#define MLXSW_REG_PPTB_LEN 0x0C
|
||||
#define MLXSW_REG_PPTB_LEN 0x10
|
||||
|
||||
static const struct mlxsw_reg_info mlxsw_reg_pptb = {
|
||||
.id = MLXSW_REG_PPTB_ID,
|
||||
|
@ -2784,6 +2784,13 @@ MLXSW_ITEM32(reg, pptb, pm_msb, 0x08, 24, 8);
|
|||
*/
|
||||
MLXSW_ITEM32(reg, pptb, untagged_buff, 0x08, 0, 4);
|
||||
|
||||
/* reg_pptb_prio_to_buff_msb
|
||||
* Mapping of switch priority <i+8> to one of the allocated receive port
|
||||
* buffers.
|
||||
* Access: RW
|
||||
*/
|
||||
MLXSW_ITEM_BIT_ARRAY(reg, pptb, prio_to_buff_msb, 0x0C, 0x04, 4);
|
||||
|
||||
#define MLXSW_REG_PPTB_ALL_PRIO 0xFF
|
||||
|
||||
static inline void mlxsw_reg_pptb_pack(char *payload, u8 local_port)
|
||||
|
@ -2792,6 +2799,14 @@ static inline void mlxsw_reg_pptb_pack(char *payload, u8 local_port)
|
|||
mlxsw_reg_pptb_mm_set(payload, MLXSW_REG_PPTB_MM_UM);
|
||||
mlxsw_reg_pptb_local_port_set(payload, local_port);
|
||||
mlxsw_reg_pptb_pm_set(payload, MLXSW_REG_PPTB_ALL_PRIO);
|
||||
mlxsw_reg_pptb_pm_msb_set(payload, MLXSW_REG_PPTB_ALL_PRIO);
|
||||
}
|
||||
|
||||
static inline void mlxsw_reg_pptb_prio_to_buff_pack(char *payload, u8 prio,
|
||||
u8 buff)
|
||||
{
|
||||
mlxsw_reg_pptb_prio_to_buff_set(payload, prio, buff);
|
||||
mlxsw_reg_pptb_prio_to_buff_msb_set(payload, prio, buff);
|
||||
}
|
||||
|
||||
/* PBMC - Port Buffer Management Control Register
|
||||
|
|
|
@ -171,23 +171,6 @@ static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
|
|||
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
|
||||
}
|
||||
|
||||
static int mlxsw_sp_port_oper_status_get(struct mlxsw_sp_port *mlxsw_sp_port,
|
||||
bool *p_is_up)
|
||||
{
|
||||
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
|
||||
char paos_pl[MLXSW_REG_PAOS_LEN];
|
||||
u8 oper_status;
|
||||
int err;
|
||||
|
||||
mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 0);
|
||||
err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
|
||||
if (err)
|
||||
return err;
|
||||
oper_status = mlxsw_reg_paos_oper_status_get(paos_pl);
|
||||
*p_is_up = oper_status == MLXSW_PORT_ADMIN_STATUS_UP ? true : false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
|
||||
unsigned char *addr)
|
||||
{
|
||||
|
@ -1434,7 +1417,8 @@ static int mlxsw_sp_port_get_settings(struct net_device *dev,
|
|||
|
||||
cmd->supported = mlxsw_sp_from_ptys_supported_port(eth_proto_cap) |
|
||||
mlxsw_sp_from_ptys_supported_link(eth_proto_cap) |
|
||||
SUPPORTED_Pause | SUPPORTED_Asym_Pause;
|
||||
SUPPORTED_Pause | SUPPORTED_Asym_Pause |
|
||||
SUPPORTED_Autoneg;
|
||||
cmd->advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_admin);
|
||||
mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev),
|
||||
eth_proto_oper, cmd);
|
||||
|
@ -1493,7 +1477,6 @@ static int mlxsw_sp_port_set_settings(struct net_device *dev,
|
|||
u32 eth_proto_new;
|
||||
u32 eth_proto_cap;
|
||||
u32 eth_proto_admin;
|
||||
bool is_up;
|
||||
int err;
|
||||
|
||||
speed = ethtool_cmd_speed(cmd);
|
||||
|
@ -1525,12 +1508,7 @@ static int mlxsw_sp_port_set_settings(struct net_device *dev,
|
|||
return err;
|
||||
}
|
||||
|
||||
err = mlxsw_sp_port_oper_status_get(mlxsw_sp_port, &is_up);
|
||||
if (err) {
|
||||
netdev_err(dev, "Failed to get oper status");
|
||||
return err;
|
||||
}
|
||||
if (!is_up)
|
||||
if (!netif_running(dev))
|
||||
return 0;
|
||||
|
||||
err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
|
||||
|
|
|
@ -194,7 +194,7 @@ static int mlxsw_sp_port_pb_prio_init(struct mlxsw_sp_port *mlxsw_sp_port)
|
|||
|
||||
mlxsw_reg_pptb_pack(pptb_pl, mlxsw_sp_port->local_port);
|
||||
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
|
||||
mlxsw_reg_pptb_prio_to_buff_set(pptb_pl, i, 0);
|
||||
mlxsw_reg_pptb_prio_to_buff_pack(pptb_pl, i, 0);
|
||||
return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pptb),
|
||||
pptb_pl);
|
||||
}
|
||||
|
|
|
@ -103,7 +103,8 @@ static int mlxsw_sp_port_pg_prio_map(struct mlxsw_sp_port *mlxsw_sp_port,
|
|||
|
||||
mlxsw_reg_pptb_pack(pptb_pl, mlxsw_sp_port->local_port);
|
||||
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
|
||||
mlxsw_reg_pptb_prio_to_buff_set(pptb_pl, i, prio_tc[i]);
|
||||
mlxsw_reg_pptb_prio_to_buff_pack(pptb_pl, i, prio_tc[i]);
|
||||
|
||||
return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pptb),
|
||||
pptb_pl);
|
||||
}
|
||||
|
@ -249,6 +250,7 @@ static int mlxsw_sp_dcbnl_ieee_setets(struct net_device *dev,
|
|||
return err;
|
||||
|
||||
memcpy(mlxsw_sp_port->dcb.ets, ets, sizeof(*ets));
|
||||
mlxsw_sp_port->dcb.ets->ets_cap = IEEE_8021QAZ_MAX_TCS;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -351,7 +353,8 @@ static int mlxsw_sp_dcbnl_ieee_setpfc(struct net_device *dev,
|
|||
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
|
||||
int err;
|
||||
|
||||
if (mlxsw_sp_port->link.tx_pause || mlxsw_sp_port->link.rx_pause) {
|
||||
if ((mlxsw_sp_port->link.tx_pause || mlxsw_sp_port->link.rx_pause) &&
|
||||
pfc->pfc_en) {
|
||||
netdev_err(dev, "PAUSE frames already enabled on port\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -371,6 +374,7 @@ static int mlxsw_sp_dcbnl_ieee_setpfc(struct net_device *dev,
|
|||
}
|
||||
|
||||
memcpy(mlxsw_sp_port->dcb.pfc, pfc, sizeof(*pfc));
|
||||
mlxsw_sp_port->dcb.pfc->pfc_cap = IEEE_8021QAZ_MAX_TCS;
|
||||
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -2601,8 +2601,6 @@ ppp_unregister_channel(struct ppp_channel *chan)
|
|||
spin_lock_bh(&pn->all_channels_lock);
|
||||
list_del(&pch->list);
|
||||
spin_unlock_bh(&pn->all_channels_lock);
|
||||
put_net(pch->chan_net);
|
||||
pch->chan_net = NULL;
|
||||
|
||||
pch->file.dead = 1;
|
||||
wake_up_interruptible(&pch->file.rwait);
|
||||
|
@ -3136,6 +3134,9 @@ ppp_disconnect_channel(struct channel *pch)
|
|||
*/
|
||||
static void ppp_destroy_channel(struct channel *pch)
|
||||
{
|
||||
put_net(pch->chan_net);
|
||||
pch->chan_net = NULL;
|
||||
|
||||
atomic_dec(&channel_count);
|
||||
|
||||
if (!pch->file.dead) {
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
#include <linux/mdio.h>
|
||||
#include <linux/usb/cdc.h>
|
||||
#include <linux/suspend.h>
|
||||
#include <linux/acpi.h>
|
||||
|
||||
/* Information for net-next */
|
||||
#define NETNEXT_VERSION "08"
|
||||
|
@ -460,6 +461,11 @@
|
|||
/* SRAM_IMPEDANCE */
|
||||
#define RX_DRIVING_MASK 0x6000
|
||||
|
||||
/* MAC PASSTHRU */
|
||||
#define AD_MASK 0xfee0
|
||||
#define EFUSE 0xcfdb
|
||||
#define PASS_THRU_MASK 0x1
|
||||
|
||||
enum rtl_register_content {
|
||||
_1000bps = 0x10,
|
||||
_100bps = 0x08,
|
||||
|
@ -1036,6 +1042,65 @@ out1:
|
|||
return ret;
|
||||
}
|
||||
|
||||
/* Devices containing RTL8153-AD can support a persistent
|
||||
* host system provided MAC address.
|
||||
* Examples of this are Dell TB15 and Dell WD15 docks
|
||||
*/
|
||||
static int vendor_mac_passthru_addr_read(struct r8152 *tp, struct sockaddr *sa)
|
||||
{
|
||||
acpi_status status;
|
||||
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
|
||||
union acpi_object *obj;
|
||||
int ret = -EINVAL;
|
||||
u32 ocp_data;
|
||||
unsigned char buf[6];
|
||||
|
||||
/* test for -AD variant of RTL8153 */
|
||||
ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0);
|
||||
if ((ocp_data & AD_MASK) != 0x1000)
|
||||
return -ENODEV;
|
||||
|
||||
/* test for MAC address pass-through bit */
|
||||
ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, EFUSE);
|
||||
if ((ocp_data & PASS_THRU_MASK) != 1)
|
||||
return -ENODEV;
|
||||
|
||||
/* returns _AUXMAC_#AABBCCDDEEFF# */
|
||||
status = acpi_evaluate_object(NULL, "\\_SB.AMAC", NULL, &buffer);
|
||||
obj = (union acpi_object *)buffer.pointer;
|
||||
if (!ACPI_SUCCESS(status))
|
||||
return -ENODEV;
|
||||
if (obj->type != ACPI_TYPE_BUFFER || obj->string.length != 0x17) {
|
||||
netif_warn(tp, probe, tp->netdev,
|
||||
"Invalid buffer when reading pass-thru MAC addr: "
|
||||
"(%d, %d)\n",
|
||||
obj->type, obj->string.length);
|
||||
goto amacout;
|
||||
}
|
||||
if (strncmp(obj->string.pointer, "_AUXMAC_#", 9) != 0 ||
|
||||
strncmp(obj->string.pointer + 0x15, "#", 1) != 0) {
|
||||
netif_warn(tp, probe, tp->netdev,
|
||||
"Invalid header when reading pass-thru MAC addr\n");
|
||||
goto amacout;
|
||||
}
|
||||
ret = hex2bin(buf, obj->string.pointer + 9, 6);
|
||||
if (!(ret == 0 && is_valid_ether_addr(buf))) {
|
||||
netif_warn(tp, probe, tp->netdev,
|
||||
"Invalid MAC when reading pass-thru MAC addr: "
|
||||
"%d, %pM\n", ret, buf);
|
||||
ret = -EINVAL;
|
||||
goto amacout;
|
||||
}
|
||||
memcpy(sa->sa_data, buf, 6);
|
||||
ether_addr_copy(tp->netdev->dev_addr, sa->sa_data);
|
||||
netif_info(tp, probe, tp->netdev,
|
||||
"Using pass-thru MAC addr %pM\n", sa->sa_data);
|
||||
|
||||
amacout:
|
||||
kfree(obj);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int set_ethernet_addr(struct r8152 *tp)
|
||||
{
|
||||
struct net_device *dev = tp->netdev;
|
||||
|
@ -1044,8 +1109,15 @@ static int set_ethernet_addr(struct r8152 *tp)
|
|||
|
||||
if (tp->version == RTL_VER_01)
|
||||
ret = pla_ocp_read(tp, PLA_IDR, 8, sa.sa_data);
|
||||
else
|
||||
ret = pla_ocp_read(tp, PLA_BACKUP, 8, sa.sa_data);
|
||||
else {
|
||||
/* if this is not an RTL8153-AD, no eFuse mac pass thru set,
|
||||
* or system doesn't provide valid _SB.AMAC this will be
|
||||
* be expected to non-zero
|
||||
*/
|
||||
ret = vendor_mac_passthru_addr_read(tp, &sa);
|
||||
if (ret < 0)
|
||||
ret = pla_ocp_read(tp, PLA_BACKUP, 8, sa.sa_data);
|
||||
}
|
||||
|
||||
if (ret < 0) {
|
||||
netif_err(tp, probe, dev, "Get ether addr fail\n");
|
||||
|
@ -2296,10 +2368,6 @@ static u32 __rtl_get_wol(struct r8152 *tp)
|
|||
u32 ocp_data;
|
||||
u32 wolopts = 0;
|
||||
|
||||
ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CONFIG5);
|
||||
if (!(ocp_data & LAN_WAKE_EN))
|
||||
return 0;
|
||||
|
||||
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG34);
|
||||
if (ocp_data & LINK_ON_WAKE_EN)
|
||||
wolopts |= WAKE_PHY;
|
||||
|
@ -2332,15 +2400,13 @@ static void __rtl_set_wol(struct r8152 *tp, u32 wolopts)
|
|||
ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG34, ocp_data);
|
||||
|
||||
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG5);
|
||||
ocp_data &= ~(UWF_EN | BWF_EN | MWF_EN | LAN_WAKE_EN);
|
||||
ocp_data &= ~(UWF_EN | BWF_EN | MWF_EN);
|
||||
if (wolopts & WAKE_UCAST)
|
||||
ocp_data |= UWF_EN;
|
||||
if (wolopts & WAKE_BCAST)
|
||||
ocp_data |= BWF_EN;
|
||||
if (wolopts & WAKE_MCAST)
|
||||
ocp_data |= MWF_EN;
|
||||
if (wolopts & WAKE_ANY)
|
||||
ocp_data |= LAN_WAKE_EN;
|
||||
ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG5, ocp_data);
|
||||
|
||||
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML);
|
||||
|
@ -4359,3 +4425,4 @@ module_usb_driver(rtl8152_driver);
|
|||
MODULE_AUTHOR(DRIVER_AUTHOR);
|
||||
MODULE_DESCRIPTION(DRIVER_DESC);
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_VERSION(DRIVER_VERSION);
|
||||
|
|
|
@ -467,7 +467,11 @@ static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
|
|||
}
|
||||
#endif /* CONFIG_DEBUG_SET_MODULE_RONX */
|
||||
|
||||
int sk_filter(struct sock *sk, struct sk_buff *skb);
|
||||
int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap);
|
||||
static inline int sk_filter(struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
return sk_filter_trim_cap(sk, skb, 1);
|
||||
}
|
||||
|
||||
struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err);
|
||||
void bpf_prog_free(struct bpf_prog *fp);
|
||||
|
|
|
@ -4145,6 +4145,13 @@ static inline void netif_keep_dst(struct net_device *dev)
|
|||
dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM);
|
||||
}
|
||||
|
||||
/* return true if dev can't cope with mtu frames that need vlan tag insertion */
|
||||
static inline bool netif_reduces_vlan_mtu(struct net_device *dev)
|
||||
{
|
||||
/* TODO: reserve and use an additional IFF bit, if we get more users */
|
||||
return dev->priv_flags & IFF_MACSEC;
|
||||
}
|
||||
|
||||
extern struct pernet_operations __net_initdata loopback_net_ops;
|
||||
|
||||
/* Logging, debugging and troubleshooting/diagnostic helpers. */
|
||||
|
|
|
@ -284,6 +284,14 @@ static inline bool nf_is_loopback_packet(const struct sk_buff *skb)
|
|||
return skb->dev && skb->skb_iif && skb->dev->flags & IFF_LOOPBACK;
|
||||
}
|
||||
|
||||
/* jiffies until ct expires, 0 if already expired */
|
||||
static inline unsigned long nf_ct_expires(const struct nf_conn *ct)
|
||||
{
|
||||
long timeout = (long)ct->timeout.expires - (long)jiffies;
|
||||
|
||||
return timeout > 0 ? timeout : 0;
|
||||
}
|
||||
|
||||
struct kernel_param;
|
||||
|
||||
int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp);
|
||||
|
|
|
@ -1576,7 +1576,13 @@ static inline void sock_put(struct sock *sk)
|
|||
*/
|
||||
void sock_gen_put(struct sock *sk);
|
||||
|
||||
int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested);
|
||||
int __sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested,
|
||||
unsigned int trim_cap);
|
||||
static inline int sk_receive_skb(struct sock *sk, struct sk_buff *skb,
|
||||
const int nested)
|
||||
{
|
||||
return __sk_receive_skb(sk, skb, nested, 1);
|
||||
}
|
||||
|
||||
static inline void sk_tx_queue_set(struct sock *sk, int tx_queue)
|
||||
{
|
||||
|
|
|
@ -60,7 +60,7 @@ struct switchdev_attr {
|
|||
struct netdev_phys_item_id ppid; /* PORT_PARENT_ID */
|
||||
u8 stp_state; /* PORT_STP_STATE */
|
||||
unsigned long brport_flags; /* PORT_BRIDGE_FLAGS */
|
||||
u32 ageing_time; /* BRIDGE_AGEING_TIME */
|
||||
clock_t ageing_time; /* BRIDGE_AGEING_TIME */
|
||||
bool vlan_filtering; /* BRIDGE_VLAN_FILTERING */
|
||||
} u;
|
||||
};
|
||||
|
|
|
@ -146,10 +146,12 @@ static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
|
|||
|
||||
static int vlan_dev_change_mtu(struct net_device *dev, int new_mtu)
|
||||
{
|
||||
/* TODO: gotta make sure the underlying layer can handle it,
|
||||
* maybe an IFF_VLAN_CAPABLE flag for devices?
|
||||
*/
|
||||
if (vlan_dev_priv(dev)->real_dev->mtu < new_mtu)
|
||||
struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
|
||||
unsigned int max_mtu = real_dev->mtu;
|
||||
|
||||
if (netif_reduces_vlan_mtu(real_dev))
|
||||
max_mtu -= VLAN_HLEN;
|
||||
if (max_mtu < new_mtu)
|
||||
return -ERANGE;
|
||||
|
||||
dev->mtu = new_mtu;
|
||||
|
|
|
@ -118,6 +118,7 @@ static int vlan_newlink(struct net *src_net, struct net_device *dev,
|
|||
{
|
||||
struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
|
||||
struct net_device *real_dev;
|
||||
unsigned int max_mtu;
|
||||
__be16 proto;
|
||||
int err;
|
||||
|
||||
|
@ -144,9 +145,11 @@ static int vlan_newlink(struct net *src_net, struct net_device *dev,
|
|||
if (err < 0)
|
||||
return err;
|
||||
|
||||
max_mtu = netif_reduces_vlan_mtu(real_dev) ? real_dev->mtu - VLAN_HLEN :
|
||||
real_dev->mtu;
|
||||
if (!tb[IFLA_MTU])
|
||||
dev->mtu = real_dev->mtu;
|
||||
else if (dev->mtu > real_dev->mtu)
|
||||
dev->mtu = max_mtu;
|
||||
else if (dev->mtu > max_mtu)
|
||||
return -EINVAL;
|
||||
|
||||
err = vlan_changelink(dev, tb, data);
|
||||
|
|
|
@ -177,10 +177,21 @@ static void batadv_backbone_gw_put(struct batadv_bla_backbone_gw *backbone_gw)
|
|||
static void batadv_claim_release(struct kref *ref)
|
||||
{
|
||||
struct batadv_bla_claim *claim;
|
||||
struct batadv_bla_backbone_gw *old_backbone_gw;
|
||||
|
||||
claim = container_of(ref, struct batadv_bla_claim, refcount);
|
||||
|
||||
batadv_backbone_gw_put(claim->backbone_gw);
|
||||
spin_lock_bh(&claim->backbone_lock);
|
||||
old_backbone_gw = claim->backbone_gw;
|
||||
claim->backbone_gw = NULL;
|
||||
spin_unlock_bh(&claim->backbone_lock);
|
||||
|
||||
spin_lock_bh(&old_backbone_gw->crc_lock);
|
||||
old_backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
|
||||
spin_unlock_bh(&old_backbone_gw->crc_lock);
|
||||
|
||||
batadv_backbone_gw_put(old_backbone_gw);
|
||||
|
||||
kfree_rcu(claim, rcu);
|
||||
}
|
||||
|
||||
|
@ -418,9 +429,12 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, u8 *mac,
|
|||
break;
|
||||
}
|
||||
|
||||
if (vid & BATADV_VLAN_HAS_TAG)
|
||||
if (vid & BATADV_VLAN_HAS_TAG) {
|
||||
skb = vlan_insert_tag(skb, htons(ETH_P_8021Q),
|
||||
vid & VLAN_VID_MASK);
|
||||
if (!skb)
|
||||
goto out;
|
||||
}
|
||||
|
||||
skb_reset_mac_header(skb);
|
||||
skb->protocol = eth_type_trans(skb, soft_iface);
|
||||
|
@ -674,8 +688,10 @@ static void batadv_bla_add_claim(struct batadv_priv *bat_priv,
|
|||
const u8 *mac, const unsigned short vid,
|
||||
struct batadv_bla_backbone_gw *backbone_gw)
|
||||
{
|
||||
struct batadv_bla_backbone_gw *old_backbone_gw;
|
||||
struct batadv_bla_claim *claim;
|
||||
struct batadv_bla_claim search_claim;
|
||||
bool remove_crc = false;
|
||||
int hash_added;
|
||||
|
||||
ether_addr_copy(search_claim.addr, mac);
|
||||
|
@ -689,8 +705,10 @@ static void batadv_bla_add_claim(struct batadv_priv *bat_priv,
|
|||
return;
|
||||
|
||||
ether_addr_copy(claim->addr, mac);
|
||||
spin_lock_init(&claim->backbone_lock);
|
||||
claim->vid = vid;
|
||||
claim->lasttime = jiffies;
|
||||
kref_get(&backbone_gw->refcount);
|
||||
claim->backbone_gw = backbone_gw;
|
||||
|
||||
kref_init(&claim->refcount);
|
||||
|
@ -718,15 +736,26 @@ static void batadv_bla_add_claim(struct batadv_priv *bat_priv,
|
|||
"bla_add_claim(): changing ownership for %pM, vid %d\n",
|
||||
mac, BATADV_PRINT_VID(vid));
|
||||
|
||||
spin_lock_bh(&claim->backbone_gw->crc_lock);
|
||||
claim->backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
|
||||
spin_unlock_bh(&claim->backbone_gw->crc_lock);
|
||||
batadv_backbone_gw_put(claim->backbone_gw);
|
||||
remove_crc = true;
|
||||
}
|
||||
/* set (new) backbone gw */
|
||||
|
||||
/* replace backbone_gw atomically and adjust reference counters */
|
||||
spin_lock_bh(&claim->backbone_lock);
|
||||
old_backbone_gw = claim->backbone_gw;
|
||||
kref_get(&backbone_gw->refcount);
|
||||
claim->backbone_gw = backbone_gw;
|
||||
spin_unlock_bh(&claim->backbone_lock);
|
||||
|
||||
if (remove_crc) {
|
||||
/* remove claim address from old backbone_gw */
|
||||
spin_lock_bh(&old_backbone_gw->crc_lock);
|
||||
old_backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
|
||||
spin_unlock_bh(&old_backbone_gw->crc_lock);
|
||||
}
|
||||
|
||||
batadv_backbone_gw_put(old_backbone_gw);
|
||||
|
||||
/* add claim address to new backbone_gw */
|
||||
spin_lock_bh(&backbone_gw->crc_lock);
|
||||
backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
|
||||
spin_unlock_bh(&backbone_gw->crc_lock);
|
||||
|
@ -736,6 +765,26 @@ claim_free_ref:
|
|||
batadv_claim_put(claim);
|
||||
}
|
||||
|
||||
/**
|
||||
* batadv_bla_claim_get_backbone_gw - Get valid reference for backbone_gw of
|
||||
* claim
|
||||
* @claim: claim whose backbone_gw should be returned
|
||||
*
|
||||
* Return: valid reference to claim::backbone_gw
|
||||
*/
|
||||
static struct batadv_bla_backbone_gw *
|
||||
batadv_bla_claim_get_backbone_gw(struct batadv_bla_claim *claim)
|
||||
{
|
||||
struct batadv_bla_backbone_gw *backbone_gw;
|
||||
|
||||
spin_lock_bh(&claim->backbone_lock);
|
||||
backbone_gw = claim->backbone_gw;
|
||||
kref_get(&backbone_gw->refcount);
|
||||
spin_unlock_bh(&claim->backbone_lock);
|
||||
|
||||
return backbone_gw;
|
||||
}
|
||||
|
||||
/**
|
||||
* batadv_bla_del_claim - delete a claim from the claim hash
|
||||
* @bat_priv: the bat priv with all the soft interface information
|
||||
|
@ -760,10 +809,6 @@ static void batadv_bla_del_claim(struct batadv_priv *bat_priv,
|
|||
batadv_choose_claim, claim);
|
||||
batadv_claim_put(claim); /* reference from the hash is gone */
|
||||
|
||||
spin_lock_bh(&claim->backbone_gw->crc_lock);
|
||||
claim->backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
|
||||
spin_unlock_bh(&claim->backbone_gw->crc_lock);
|
||||
|
||||
/* don't need the reference from hash_find() anymore */
|
||||
batadv_claim_put(claim);
|
||||
}
|
||||
|
@ -1216,6 +1261,7 @@ static void batadv_bla_purge_claims(struct batadv_priv *bat_priv,
|
|||
struct batadv_hard_iface *primary_if,
|
||||
int now)
|
||||
{
|
||||
struct batadv_bla_backbone_gw *backbone_gw;
|
||||
struct batadv_bla_claim *claim;
|
||||
struct hlist_head *head;
|
||||
struct batadv_hashtable *hash;
|
||||
|
@ -1230,14 +1276,17 @@ static void batadv_bla_purge_claims(struct batadv_priv *bat_priv,
|
|||
|
||||
rcu_read_lock();
|
||||
hlist_for_each_entry_rcu(claim, head, hash_entry) {
|
||||
backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
|
||||
if (now)
|
||||
goto purge_now;
|
||||
if (!batadv_compare_eth(claim->backbone_gw->orig,
|
||||
|
||||
if (!batadv_compare_eth(backbone_gw->orig,
|
||||
primary_if->net_dev->dev_addr))
|
||||
continue;
|
||||
goto skip;
|
||||
|
||||
if (!batadv_has_timed_out(claim->lasttime,
|
||||
BATADV_BLA_CLAIM_TIMEOUT))
|
||||
continue;
|
||||
goto skip;
|
||||
|
||||
batadv_dbg(BATADV_DBG_BLA, bat_priv,
|
||||
"bla_purge_claims(): %pM, vid %d, time out\n",
|
||||
|
@ -1245,8 +1294,10 @@ static void batadv_bla_purge_claims(struct batadv_priv *bat_priv,
|
|||
|
||||
purge_now:
|
||||
batadv_handle_unclaim(bat_priv, primary_if,
|
||||
claim->backbone_gw->orig,
|
||||
backbone_gw->orig,
|
||||
claim->addr, claim->vid);
|
||||
skip:
|
||||
batadv_backbone_gw_put(backbone_gw);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
@ -1757,9 +1808,11 @@ batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb,
|
|||
bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
|
||||
unsigned short vid, bool is_bcast)
|
||||
{
|
||||
struct batadv_bla_backbone_gw *backbone_gw;
|
||||
struct ethhdr *ethhdr;
|
||||
struct batadv_bla_claim search_claim, *claim = NULL;
|
||||
struct batadv_hard_iface *primary_if;
|
||||
bool own_claim;
|
||||
bool ret;
|
||||
|
||||
ethhdr = eth_hdr(skb);
|
||||
|
@ -1794,8 +1847,12 @@ bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
|
|||
}
|
||||
|
||||
/* if it is our own claim ... */
|
||||
if (batadv_compare_eth(claim->backbone_gw->orig,
|
||||
primary_if->net_dev->dev_addr)) {
|
||||
backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
|
||||
own_claim = batadv_compare_eth(backbone_gw->orig,
|
||||
primary_if->net_dev->dev_addr);
|
||||
batadv_backbone_gw_put(backbone_gw);
|
||||
|
||||
if (own_claim) {
|
||||
/* ... allow it in any case */
|
||||
claim->lasttime = jiffies;
|
||||
goto allow;
|
||||
|
@ -1859,7 +1916,9 @@ bool batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb,
|
|||
{
|
||||
struct ethhdr *ethhdr;
|
||||
struct batadv_bla_claim search_claim, *claim = NULL;
|
||||
struct batadv_bla_backbone_gw *backbone_gw;
|
||||
struct batadv_hard_iface *primary_if;
|
||||
bool client_roamed;
|
||||
bool ret = false;
|
||||
|
||||
primary_if = batadv_primary_if_get_selected(bat_priv);
|
||||
|
@ -1889,8 +1948,12 @@ bool batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb,
|
|||
goto allow;
|
||||
|
||||
/* check if we are responsible. */
|
||||
if (batadv_compare_eth(claim->backbone_gw->orig,
|
||||
primary_if->net_dev->dev_addr)) {
|
||||
backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
|
||||
client_roamed = batadv_compare_eth(backbone_gw->orig,
|
||||
primary_if->net_dev->dev_addr);
|
||||
batadv_backbone_gw_put(backbone_gw);
|
||||
|
||||
if (client_roamed) {
|
||||
/* if yes, the client has roamed and we have
|
||||
* to unclaim it.
|
||||
*/
|
||||
|
@ -1938,6 +2001,7 @@ int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
|
|||
struct net_device *net_dev = (struct net_device *)seq->private;
|
||||
struct batadv_priv *bat_priv = netdev_priv(net_dev);
|
||||
struct batadv_hashtable *hash = bat_priv->bla.claim_hash;
|
||||
struct batadv_bla_backbone_gw *backbone_gw;
|
||||
struct batadv_bla_claim *claim;
|
||||
struct batadv_hard_iface *primary_if;
|
||||
struct hlist_head *head;
|
||||
|
@ -1962,17 +2026,21 @@ int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
|
|||
|
||||
rcu_read_lock();
|
||||
hlist_for_each_entry_rcu(claim, head, hash_entry) {
|
||||
is_own = batadv_compare_eth(claim->backbone_gw->orig,
|
||||
backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
|
||||
|
||||
is_own = batadv_compare_eth(backbone_gw->orig,
|
||||
primary_addr);
|
||||
|
||||
spin_lock_bh(&claim->backbone_gw->crc_lock);
|
||||
backbone_crc = claim->backbone_gw->crc;
|
||||
spin_unlock_bh(&claim->backbone_gw->crc_lock);
|
||||
spin_lock_bh(&backbone_gw->crc_lock);
|
||||
backbone_crc = backbone_gw->crc;
|
||||
spin_unlock_bh(&backbone_gw->crc_lock);
|
||||
seq_printf(seq, " * %pM on %5d by %pM [%c] (%#.4x)\n",
|
||||
claim->addr, BATADV_PRINT_VID(claim->vid),
|
||||
claim->backbone_gw->orig,
|
||||
backbone_gw->orig,
|
||||
(is_own ? 'x' : ' '),
|
||||
backbone_crc);
|
||||
|
||||
batadv_backbone_gw_put(backbone_gw);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
|
|
@ -1009,9 +1009,12 @@ bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv,
|
|||
if (!skb_new)
|
||||
goto out;
|
||||
|
||||
if (vid & BATADV_VLAN_HAS_TAG)
|
||||
if (vid & BATADV_VLAN_HAS_TAG) {
|
||||
skb_new = vlan_insert_tag(skb_new, htons(ETH_P_8021Q),
|
||||
vid & VLAN_VID_MASK);
|
||||
if (!skb_new)
|
||||
goto out;
|
||||
}
|
||||
|
||||
skb_reset_mac_header(skb_new);
|
||||
skb_new->protocol = eth_type_trans(skb_new,
|
||||
|
@ -1089,9 +1092,12 @@ bool batadv_dat_snoop_incoming_arp_request(struct batadv_priv *bat_priv,
|
|||
*/
|
||||
skb_reset_mac_header(skb_new);
|
||||
|
||||
if (vid & BATADV_VLAN_HAS_TAG)
|
||||
if (vid & BATADV_VLAN_HAS_TAG) {
|
||||
skb_new = vlan_insert_tag(skb_new, htons(ETH_P_8021Q),
|
||||
vid & VLAN_VID_MASK);
|
||||
if (!skb_new)
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* To preserve backwards compatibility, the node has choose the outgoing
|
||||
* format based on the incoming request packet type. The assumption is
|
||||
|
|
|
@ -765,6 +765,8 @@ static void batadv_orig_node_release(struct kref *ref)
|
|||
struct batadv_neigh_node *neigh_node;
|
||||
struct batadv_orig_node *orig_node;
|
||||
struct batadv_orig_ifinfo *orig_ifinfo;
|
||||
struct batadv_orig_node_vlan *vlan;
|
||||
struct batadv_orig_ifinfo *last_candidate;
|
||||
|
||||
orig_node = container_of(ref, struct batadv_orig_node, refcount);
|
||||
|
||||
|
@ -782,8 +784,21 @@ static void batadv_orig_node_release(struct kref *ref)
|
|||
hlist_del_rcu(&orig_ifinfo->list);
|
||||
batadv_orig_ifinfo_put(orig_ifinfo);
|
||||
}
|
||||
|
||||
last_candidate = orig_node->last_bonding_candidate;
|
||||
orig_node->last_bonding_candidate = NULL;
|
||||
spin_unlock_bh(&orig_node->neigh_list_lock);
|
||||
|
||||
if (last_candidate)
|
||||
batadv_orig_ifinfo_put(last_candidate);
|
||||
|
||||
spin_lock_bh(&orig_node->vlan_list_lock);
|
||||
hlist_for_each_entry_safe(vlan, node_tmp, &orig_node->vlan_list, list) {
|
||||
hlist_del_rcu(&vlan->list);
|
||||
batadv_orig_node_vlan_put(vlan);
|
||||
}
|
||||
spin_unlock_bh(&orig_node->vlan_list_lock);
|
||||
|
||||
/* Free nc_nodes */
|
||||
batadv_nc_purge_orig(orig_node->bat_priv, orig_node, NULL);
|
||||
|
||||
|
|
|
@ -455,6 +455,29 @@ static int batadv_check_unicast_packet(struct batadv_priv *bat_priv,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* batadv_last_bonding_replace - Replace last_bonding_candidate of orig_node
|
||||
* @orig_node: originator node whose bonding candidates should be replaced
|
||||
* @new_candidate: new bonding candidate or NULL
|
||||
*/
|
||||
static void
|
||||
batadv_last_bonding_replace(struct batadv_orig_node *orig_node,
|
||||
struct batadv_orig_ifinfo *new_candidate)
|
||||
{
|
||||
struct batadv_orig_ifinfo *old_candidate;
|
||||
|
||||
spin_lock_bh(&orig_node->neigh_list_lock);
|
||||
old_candidate = orig_node->last_bonding_candidate;
|
||||
|
||||
if (new_candidate)
|
||||
kref_get(&new_candidate->refcount);
|
||||
orig_node->last_bonding_candidate = new_candidate;
|
||||
spin_unlock_bh(&orig_node->neigh_list_lock);
|
||||
|
||||
if (old_candidate)
|
||||
batadv_orig_ifinfo_put(old_candidate);
|
||||
}
|
||||
|
||||
/**
|
||||
* batadv_find_router - find a suitable router for this originator
|
||||
* @bat_priv: the bat priv with all the soft interface information
|
||||
|
@ -562,10 +585,6 @@ next:
|
|||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
/* last_bonding_candidate is reset below, remove the old reference. */
|
||||
if (orig_node->last_bonding_candidate)
|
||||
batadv_orig_ifinfo_put(orig_node->last_bonding_candidate);
|
||||
|
||||
/* After finding candidates, handle the three cases:
|
||||
* 1) there is a next candidate, use that
|
||||
* 2) there is no next candidate, use the first of the list
|
||||
|
@ -574,21 +593,28 @@ next:
|
|||
if (next_candidate) {
|
||||
batadv_neigh_node_put(router);
|
||||
|
||||
/* remove references to first candidate, we don't need it. */
|
||||
if (first_candidate) {
|
||||
batadv_neigh_node_put(first_candidate_router);
|
||||
batadv_orig_ifinfo_put(first_candidate);
|
||||
}
|
||||
kref_get(&next_candidate_router->refcount);
|
||||
router = next_candidate_router;
|
||||
orig_node->last_bonding_candidate = next_candidate;
|
||||
batadv_last_bonding_replace(orig_node, next_candidate);
|
||||
} else if (first_candidate) {
|
||||
batadv_neigh_node_put(router);
|
||||
|
||||
/* refcounting has already been done in the loop above. */
|
||||
kref_get(&first_candidate_router->refcount);
|
||||
router = first_candidate_router;
|
||||
orig_node->last_bonding_candidate = first_candidate;
|
||||
batadv_last_bonding_replace(orig_node, first_candidate);
|
||||
} else {
|
||||
orig_node->last_bonding_candidate = NULL;
|
||||
batadv_last_bonding_replace(orig_node, NULL);
|
||||
}
|
||||
|
||||
/* cleanup of candidates */
|
||||
if (first_candidate) {
|
||||
batadv_neigh_node_put(first_candidate_router);
|
||||
batadv_orig_ifinfo_put(first_candidate);
|
||||
}
|
||||
|
||||
if (next_candidate) {
|
||||
batadv_neigh_node_put(next_candidate_router);
|
||||
batadv_orig_ifinfo_put(next_candidate);
|
||||
}
|
||||
|
||||
return router;
|
||||
|
|
|
@ -424,8 +424,8 @@ int batadv_send_skb_via_gw(struct batadv_priv *bat_priv, struct sk_buff *skb,
|
|||
struct batadv_orig_node *orig_node;
|
||||
|
||||
orig_node = batadv_gw_get_selected_orig(bat_priv);
|
||||
return batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST, 0,
|
||||
orig_node, vid);
|
||||
return batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST_4ADDR,
|
||||
BATADV_P_DATA, orig_node, vid);
|
||||
}
|
||||
|
||||
void batadv_schedule_bat_ogm(struct batadv_hard_iface *hard_iface)
|
||||
|
|
|
@ -330,7 +330,9 @@ struct batadv_orig_node {
|
|||
DECLARE_BITMAP(bcast_bits, BATADV_TQ_LOCAL_WINDOW_SIZE);
|
||||
u32 last_bcast_seqno;
|
||||
struct hlist_head neigh_list;
|
||||
/* neigh_list_lock protects: neigh_list and router */
|
||||
/* neigh_list_lock protects: neigh_list, ifinfo_list,
|
||||
* last_bonding_candidate and router
|
||||
*/
|
||||
spinlock_t neigh_list_lock;
|
||||
struct hlist_node hash_entry;
|
||||
struct batadv_priv *bat_priv;
|
||||
|
@ -1042,6 +1044,7 @@ struct batadv_bla_backbone_gw {
|
|||
* @addr: mac address of claimed non-mesh client
|
||||
* @vid: vlan id this client was detected on
|
||||
* @backbone_gw: pointer to backbone gw claiming this client
|
||||
* @backbone_lock: lock protecting backbone_gw pointer
|
||||
* @lasttime: last time we heard of claim (locals only)
|
||||
* @hash_entry: hlist node for batadv_priv_bla::claim_hash
|
||||
* @refcount: number of contexts the object is used
|
||||
|
@ -1051,6 +1054,7 @@ struct batadv_bla_claim {
|
|||
u8 addr[ETH_ALEN];
|
||||
unsigned short vid;
|
||||
struct batadv_bla_backbone_gw *backbone_gw;
|
||||
spinlock_t backbone_lock; /* protects backbone_gw */
|
||||
unsigned long lasttime;
|
||||
struct hlist_node hash_entry;
|
||||
struct rcu_head rcu;
|
||||
|
|
|
@ -53,9 +53,10 @@
|
|||
#include <net/sock_reuseport.h>
|
||||
|
||||
/**
|
||||
* sk_filter - run a packet through a socket filter
|
||||
* sk_filter_trim_cap - run a packet through a socket filter
|
||||
* @sk: sock associated with &sk_buff
|
||||
* @skb: buffer to filter
|
||||
* @cap: limit on how short the eBPF program may trim the packet
|
||||
*
|
||||
* Run the eBPF program and then cut skb->data to correct size returned by
|
||||
* the program. If pkt_len is 0 we toss packet. If skb->len is smaller
|
||||
|
@ -64,7 +65,7 @@
|
|||
* be accepted or -EPERM if the packet should be tossed.
|
||||
*
|
||||
*/
|
||||
int sk_filter(struct sock *sk, struct sk_buff *skb)
|
||||
int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap)
|
||||
{
|
||||
int err;
|
||||
struct sk_filter *filter;
|
||||
|
@ -85,14 +86,13 @@ int sk_filter(struct sock *sk, struct sk_buff *skb)
|
|||
filter = rcu_dereference(sk->sk_filter);
|
||||
if (filter) {
|
||||
unsigned int pkt_len = bpf_prog_run_save_cb(filter->prog, skb);
|
||||
|
||||
err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM;
|
||||
err = pkt_len ? pskb_trim(skb, max(cap, pkt_len)) : -EPERM;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(sk_filter);
|
||||
EXPORT_SYMBOL(sk_filter_trim_cap);
|
||||
|
||||
static u64 __skb_get_pay_offset(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
|
||||
{
|
||||
|
|
|
@ -452,11 +452,12 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
|
|||
}
|
||||
EXPORT_SYMBOL(sock_queue_rcv_skb);
|
||||
|
||||
int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
|
||||
int __sk_receive_skb(struct sock *sk, struct sk_buff *skb,
|
||||
const int nested, unsigned int trim_cap)
|
||||
{
|
||||
int rc = NET_RX_SUCCESS;
|
||||
|
||||
if (sk_filter(sk, skb))
|
||||
if (sk_filter_trim_cap(sk, skb, trim_cap))
|
||||
goto discard_and_relse;
|
||||
|
||||
skb->dev = NULL;
|
||||
|
@ -492,7 +493,7 @@ discard_and_relse:
|
|||
kfree_skb(skb);
|
||||
goto out;
|
||||
}
|
||||
EXPORT_SYMBOL(sk_receive_skb);
|
||||
EXPORT_SYMBOL(__sk_receive_skb);
|
||||
|
||||
struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
|
||||
{
|
||||
|
@ -1938,6 +1939,10 @@ int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg,
|
|||
sockc->tsflags &= ~SOF_TIMESTAMPING_TX_RECORD_MASK;
|
||||
sockc->tsflags |= tsflags;
|
||||
break;
|
||||
/* SCM_RIGHTS and SCM_CREDENTIALS are semantically in SOL_UNIX. */
|
||||
case SCM_RIGHTS:
|
||||
case SCM_CREDENTIALS:
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
|
|
@ -462,7 +462,7 @@ static struct dst_entry* dccp_v4_route_skb(struct net *net, struct sock *sk,
|
|||
security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
|
||||
rt = ip_route_output_flow(net, &fl4, sk);
|
||||
if (IS_ERR(rt)) {
|
||||
__IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
|
||||
IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -527,17 +527,19 @@ static void dccp_v4_ctl_send_reset(const struct sock *sk, struct sk_buff *rxskb)
|
|||
rxiph->daddr);
|
||||
skb_dst_set(skb, dst_clone(dst));
|
||||
|
||||
local_bh_disable();
|
||||
bh_lock_sock(ctl_sk);
|
||||
err = ip_build_and_send_pkt(skb, ctl_sk,
|
||||
rxiph->daddr, rxiph->saddr, NULL);
|
||||
bh_unlock_sock(ctl_sk);
|
||||
|
||||
if (net_xmit_eval(err) == 0) {
|
||||
DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
|
||||
DCCP_INC_STATS(DCCP_MIB_OUTRSTS);
|
||||
__DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
|
||||
__DCCP_INC_STATS(DCCP_MIB_OUTRSTS);
|
||||
}
|
||||
local_bh_enable();
|
||||
out:
|
||||
dst_release(dst);
|
||||
dst_release(dst);
|
||||
}
|
||||
|
||||
static void dccp_v4_reqsk_destructor(struct request_sock *req)
|
||||
|
@ -866,7 +868,7 @@ lookup:
|
|||
goto discard_and_relse;
|
||||
nf_reset(skb);
|
||||
|
||||
return sk_receive_skb(sk, skb, 1);
|
||||
return __sk_receive_skb(sk, skb, 1, dh->dccph_doff * 4);
|
||||
|
||||
no_dccp_socket:
|
||||
if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
|
||||
|
|
|
@ -732,7 +732,7 @@ lookup:
|
|||
if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
|
||||
goto discard_and_relse;
|
||||
|
||||
return sk_receive_skb(sk, skb, 1) ? -1 : 0;
|
||||
return __sk_receive_skb(sk, skb, 1, dh->dccph_doff * 4) ? -1 : 0;
|
||||
|
||||
no_dccp_socket:
|
||||
if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
|
||||
|
|
|
@ -479,6 +479,9 @@ static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
|
|||
if (!rtnh_ok(rtnh, remaining))
|
||||
return -EINVAL;
|
||||
|
||||
if (rtnh->rtnh_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN))
|
||||
return -EINVAL;
|
||||
|
||||
nexthop_nh->nh_flags =
|
||||
(cfg->fc_flags & ~0xFF) | rtnh->rtnh_flags;
|
||||
nexthop_nh->nh_oif = rtnh->rtnh_ifindex;
|
||||
|
@ -1003,6 +1006,9 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
|
|||
if (fib_props[cfg->fc_type].scope > cfg->fc_scope)
|
||||
goto err_inval;
|
||||
|
||||
if (cfg->fc_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN))
|
||||
goto err_inval;
|
||||
|
||||
#ifdef CONFIG_IP_ROUTE_MULTIPATH
|
||||
if (cfg->fc_mp) {
|
||||
nhs = fib_count_nexthops(cfg->fc_mp, cfg->fc_mp_len);
|
||||
|
|
|
@ -87,7 +87,7 @@ int sysctl_tcp_adv_win_scale __read_mostly = 1;
|
|||
EXPORT_SYMBOL(sysctl_tcp_adv_win_scale);
|
||||
|
||||
/* rfc5961 challenge ack rate limiting */
|
||||
int sysctl_tcp_challenge_ack_limit = 100;
|
||||
int sysctl_tcp_challenge_ack_limit = 1000;
|
||||
|
||||
int sysctl_tcp_stdurg __read_mostly;
|
||||
int sysctl_tcp_rfc1337 __read_mostly;
|
||||
|
@ -3421,6 +3421,23 @@ static int tcp_ack_update_window(struct sock *sk, const struct sk_buff *skb, u32
|
|||
return flag;
|
||||
}
|
||||
|
||||
static bool __tcp_oow_rate_limited(struct net *net, int mib_idx,
|
||||
u32 *last_oow_ack_time)
|
||||
{
|
||||
if (*last_oow_ack_time) {
|
||||
s32 elapsed = (s32)(tcp_time_stamp - *last_oow_ack_time);
|
||||
|
||||
if (0 <= elapsed && elapsed < sysctl_tcp_invalid_ratelimit) {
|
||||
NET_INC_STATS(net, mib_idx);
|
||||
return true; /* rate-limited: don't send yet! */
|
||||
}
|
||||
}
|
||||
|
||||
*last_oow_ack_time = tcp_time_stamp;
|
||||
|
||||
return false; /* not rate-limited: go ahead, send dupack now! */
|
||||
}
|
||||
|
||||
/* Return true if we're currently rate-limiting out-of-window ACKs and
|
||||
* thus shouldn't send a dupack right now. We rate-limit dupacks in
|
||||
* response to out-of-window SYNs or ACKs to mitigate ACK loops or DoS
|
||||
|
@ -3434,21 +3451,9 @@ bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb,
|
|||
/* Data packets without SYNs are not likely part of an ACK loop. */
|
||||
if ((TCP_SKB_CB(skb)->seq != TCP_SKB_CB(skb)->end_seq) &&
|
||||
!tcp_hdr(skb)->syn)
|
||||
goto not_rate_limited;
|
||||
return false;
|
||||
|
||||
if (*last_oow_ack_time) {
|
||||
s32 elapsed = (s32)(tcp_time_stamp - *last_oow_ack_time);
|
||||
|
||||
if (0 <= elapsed && elapsed < sysctl_tcp_invalid_ratelimit) {
|
||||
NET_INC_STATS(net, mib_idx);
|
||||
return true; /* rate-limited: don't send yet! */
|
||||
}
|
||||
}
|
||||
|
||||
*last_oow_ack_time = tcp_time_stamp;
|
||||
|
||||
not_rate_limited:
|
||||
return false; /* not rate-limited: go ahead, send dupack now! */
|
||||
return __tcp_oow_rate_limited(net, mib_idx, last_oow_ack_time);
|
||||
}
|
||||
|
||||
/* RFC 5961 7 [ACK Throttling] */
|
||||
|
@ -3458,21 +3463,26 @@ static void tcp_send_challenge_ack(struct sock *sk, const struct sk_buff *skb)
|
|||
static u32 challenge_timestamp;
|
||||
static unsigned int challenge_count;
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
u32 now;
|
||||
u32 count, now;
|
||||
|
||||
/* First check our per-socket dupack rate limit. */
|
||||
if (tcp_oow_rate_limited(sock_net(sk), skb,
|
||||
LINUX_MIB_TCPACKSKIPPEDCHALLENGE,
|
||||
&tp->last_oow_ack_time))
|
||||
if (__tcp_oow_rate_limited(sock_net(sk),
|
||||
LINUX_MIB_TCPACKSKIPPEDCHALLENGE,
|
||||
&tp->last_oow_ack_time))
|
||||
return;
|
||||
|
||||
/* Then check the check host-wide RFC 5961 rate limit. */
|
||||
/* Then check host-wide RFC 5961 rate limit. */
|
||||
now = jiffies / HZ;
|
||||
if (now != challenge_timestamp) {
|
||||
u32 half = (sysctl_tcp_challenge_ack_limit + 1) >> 1;
|
||||
|
||||
challenge_timestamp = now;
|
||||
challenge_count = 0;
|
||||
WRITE_ONCE(challenge_count, half +
|
||||
prandom_u32_max(sysctl_tcp_challenge_ack_limit));
|
||||
}
|
||||
if (++challenge_count <= sysctl_tcp_challenge_ack_limit) {
|
||||
count = READ_ONCE(challenge_count);
|
||||
if (count > 0) {
|
||||
WRITE_ONCE(challenge_count, count - 1);
|
||||
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPCHALLENGEACK);
|
||||
tcp_send_ack(sk);
|
||||
}
|
||||
|
|
|
@ -1583,6 +1583,8 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
|
|||
|
||||
if (sk_filter(sk, skb))
|
||||
goto drop;
|
||||
if (unlikely(skb->len < sizeof(struct udphdr)))
|
||||
goto drop;
|
||||
|
||||
udp_csum_pull_header(skb);
|
||||
if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
|
||||
|
|
|
@ -620,6 +620,8 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
|
|||
|
||||
if (sk_filter(sk, skb))
|
||||
goto drop;
|
||||
if (unlikely(skb->len < sizeof(struct udphdr)))
|
||||
goto drop;
|
||||
|
||||
udp_csum_pull_header(skb);
|
||||
if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
|
||||
|
|
|
@ -1545,7 +1545,8 @@ error:
|
|||
/*
|
||||
* Set up receiving multicast socket over UDP
|
||||
*/
|
||||
static struct socket *make_receive_sock(struct netns_ipvs *ipvs, int id)
|
||||
static struct socket *make_receive_sock(struct netns_ipvs *ipvs, int id,
|
||||
int ifindex)
|
||||
{
|
||||
/* multicast addr */
|
||||
union ipvs_sockaddr mcast_addr;
|
||||
|
@ -1566,6 +1567,7 @@ static struct socket *make_receive_sock(struct netns_ipvs *ipvs, int id)
|
|||
set_sock_size(sock->sk, 0, result);
|
||||
|
||||
get_mcast_sockaddr(&mcast_addr, &salen, &ipvs->bcfg, id);
|
||||
sock->sk->sk_bound_dev_if = ifindex;
|
||||
result = sock->ops->bind(sock, (struct sockaddr *)&mcast_addr, salen);
|
||||
if (result < 0) {
|
||||
pr_err("Error binding to the multicast addr\n");
|
||||
|
@ -1868,7 +1870,7 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
|
|||
if (state == IP_VS_STATE_MASTER)
|
||||
sock = make_send_sock(ipvs, id);
|
||||
else
|
||||
sock = make_receive_sock(ipvs, id);
|
||||
sock = make_receive_sock(ipvs, id, dev->ifindex);
|
||||
if (IS_ERR(sock)) {
|
||||
result = PTR_ERR(sock);
|
||||
goto outtinfo;
|
||||
|
|
|
@ -646,6 +646,7 @@ static int nf_ct_resolve_clash(struct net *net, struct sk_buff *skb,
|
|||
|
||||
l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
|
||||
if (l4proto->allow_clash &&
|
||||
!nfct_nat(ct) &&
|
||||
!nf_ct_is_dying(ct) &&
|
||||
atomic_inc_not_zero(&ct->ct_general.use)) {
|
||||
nf_ct_acct_merge(ct, ctinfo, (struct nf_conn *)skb->nfct);
|
||||
|
@ -1601,8 +1602,15 @@ void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls)
|
|||
unsigned int nr_slots, i;
|
||||
size_t sz;
|
||||
|
||||
if (*sizep > (UINT_MAX / sizeof(struct hlist_nulls_head)))
|
||||
return NULL;
|
||||
|
||||
BUILD_BUG_ON(sizeof(struct hlist_nulls_head) != sizeof(struct hlist_head));
|
||||
nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_nulls_head));
|
||||
|
||||
if (nr_slots > (UINT_MAX / sizeof(struct hlist_nulls_head)))
|
||||
return NULL;
|
||||
|
||||
sz = nr_slots * sizeof(struct hlist_nulls_head);
|
||||
hash = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
|
||||
get_order(sz));
|
||||
|
|
|
@ -1724,9 +1724,11 @@ struct nft_expr *nft_expr_init(const struct nft_ctx *ctx,
|
|||
|
||||
err = nf_tables_newexpr(ctx, &info, expr);
|
||||
if (err < 0)
|
||||
goto err2;
|
||||
goto err3;
|
||||
|
||||
return expr;
|
||||
err3:
|
||||
kfree(expr);
|
||||
err2:
|
||||
module_put(info.ops->type->owner);
|
||||
err1:
|
||||
|
|
|
@ -54,7 +54,6 @@ static void nft_ct_get_eval(const struct nft_expr *expr,
|
|||
const struct nf_conn_help *help;
|
||||
const struct nf_conntrack_tuple *tuple;
|
||||
const struct nf_conntrack_helper *helper;
|
||||
long diff;
|
||||
unsigned int state;
|
||||
|
||||
ct = nf_ct_get(pkt->skb, &ctinfo);
|
||||
|
@ -94,10 +93,7 @@ static void nft_ct_get_eval(const struct nft_expr *expr,
|
|||
return;
|
||||
#endif
|
||||
case NFT_CT_EXPIRATION:
|
||||
diff = (long)jiffies - (long)ct->timeout.expires;
|
||||
if (diff < 0)
|
||||
diff = 0;
|
||||
*dest = jiffies_to_msecs(diff);
|
||||
*dest = jiffies_to_msecs(nf_ct_expires(ct));
|
||||
return;
|
||||
case NFT_CT_HELPER:
|
||||
if (ct->master == NULL)
|
||||
|
|
|
@ -227,7 +227,7 @@ void nft_meta_set_eval(const struct nft_expr *expr,
|
|||
skb->pkt_type = value;
|
||||
break;
|
||||
case NFT_META_NFTRACE:
|
||||
skb->nf_trace = 1;
|
||||
skb->nf_trace = !!value;
|
||||
break;
|
||||
default:
|
||||
WARN_ON(1);
|
||||
|
|
|
@ -1927,13 +1927,11 @@ retry:
|
|||
goto out_unlock;
|
||||
}
|
||||
|
||||
sockc.tsflags = 0;
|
||||
sockc.tsflags = sk->sk_tsflags;
|
||||
if (msg->msg_controllen) {
|
||||
err = sock_cmsg_send(sk, msg, &sockc);
|
||||
if (unlikely(err)) {
|
||||
err = -EINVAL;
|
||||
if (unlikely(err))
|
||||
goto out_unlock;
|
||||
}
|
||||
}
|
||||
|
||||
skb->protocol = proto;
|
||||
|
@ -2678,7 +2676,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
|
|||
dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
|
||||
}
|
||||
|
||||
sockc.tsflags = 0;
|
||||
sockc.tsflags = po->sk.sk_tsflags;
|
||||
if (msg->msg_controllen) {
|
||||
err = sock_cmsg_send(&po->sk, msg, &sockc);
|
||||
if (unlikely(err))
|
||||
|
@ -2881,7 +2879,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
|
|||
if (unlikely(!(dev->flags & IFF_UP)))
|
||||
goto out_unlock;
|
||||
|
||||
sockc.tsflags = 0;
|
||||
sockc.tsflags = sk->sk_tsflags;
|
||||
sockc.mark = sk->sk_mark;
|
||||
if (msg->msg_controllen) {
|
||||
err = sock_cmsg_send(sk, msg, &sockc);
|
||||
|
|
|
@ -164,7 +164,8 @@ static int rose_state3_machine(struct sock *sk, struct sk_buff *skb, int framety
|
|||
rose_frames_acked(sk, nr);
|
||||
if (ns == rose->vr) {
|
||||
rose_start_idletimer(sk);
|
||||
if (sock_queue_rcv_skb(sk, skb) == 0) {
|
||||
if (sk_filter_trim_cap(sk, skb, ROSE_MIN_LEN) == 0 &&
|
||||
__sock_queue_rcv_skb(sk, skb) == 0) {
|
||||
rose->vr = (rose->vr + 1) % ROSE_MODULUS;
|
||||
queued = 1;
|
||||
} else {
|
||||
|
|
|
@ -1140,8 +1140,10 @@ htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
|
|||
|
||||
if (!cl->level && cl->un.leaf.q)
|
||||
qlen = cl->un.leaf.q->q.qlen;
|
||||
cl->xstats.tokens = PSCHED_NS2TICKS(cl->tokens);
|
||||
cl->xstats.ctokens = PSCHED_NS2TICKS(cl->ctokens);
|
||||
cl->xstats.tokens = clamp_t(s64, PSCHED_NS2TICKS(cl->tokens),
|
||||
INT_MIN, INT_MAX);
|
||||
cl->xstats.ctokens = clamp_t(s64, PSCHED_NS2TICKS(cl->ctokens),
|
||||
INT_MIN, INT_MAX);
|
||||
|
||||
if (gnet_stats_copy_basic(d, NULL, &cl->bstats) < 0 ||
|
||||
gnet_stats_copy_rate_est(d, NULL, &cl->rate_est) < 0 ||
|
||||
|
|
|
@ -112,7 +112,6 @@ int sctp_rcv(struct sk_buff *skb)
|
|||
struct sctp_ep_common *rcvr;
|
||||
struct sctp_transport *transport = NULL;
|
||||
struct sctp_chunk *chunk;
|
||||
struct sctphdr *sh;
|
||||
union sctp_addr src;
|
||||
union sctp_addr dest;
|
||||
int family;
|
||||
|
@ -127,8 +126,6 @@ int sctp_rcv(struct sk_buff *skb)
|
|||
if (skb_linearize(skb))
|
||||
goto discard_it;
|
||||
|
||||
sh = sctp_hdr(skb);
|
||||
|
||||
/* Pull up the IP and SCTP headers. */
|
||||
__skb_pull(skb, skb_transport_offset(skb));
|
||||
if (skb->len < sizeof(struct sctphdr))
|
||||
|
@ -230,7 +227,7 @@ int sctp_rcv(struct sk_buff *skb)
|
|||
chunk->rcvr = rcvr;
|
||||
|
||||
/* Remember the SCTP header. */
|
||||
chunk->sctp_hdr = sh;
|
||||
chunk->sctp_hdr = sctp_hdr(skb);
|
||||
|
||||
/* Set the source and destination addresses of the incoming chunk. */
|
||||
sctp_init_addrs(chunk, &src, &dest);
|
||||
|
|
|
@ -330,6 +330,21 @@ static int tipc_reset_bearer(struct net *net, struct tipc_bearer *b)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* tipc_bearer_reset_all - reset all links on all bearers
|
||||
*/
|
||||
void tipc_bearer_reset_all(struct net *net)
|
||||
{
|
||||
struct tipc_net *tn = tipc_net(net);
|
||||
struct tipc_bearer *b;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < MAX_BEARERS; i++) {
|
||||
b = rcu_dereference_rtnl(tn->bearer_list[i]);
|
||||
if (b)
|
||||
tipc_reset_bearer(net, b);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* bearer_disable
|
||||
*
|
||||
|
|
|
@ -198,6 +198,7 @@ void tipc_bearer_add_dest(struct net *net, u32 bearer_id, u32 dest);
|
|||
void tipc_bearer_remove_dest(struct net *net, u32 bearer_id, u32 dest);
|
||||
struct tipc_bearer *tipc_bearer_find(struct net *net, const char *name);
|
||||
struct tipc_media *tipc_media_find(const char *name);
|
||||
void tipc_bearer_reset_all(struct net *net);
|
||||
int tipc_bearer_setup(void);
|
||||
void tipc_bearer_cleanup(void);
|
||||
void tipc_bearer_stop(struct net *net);
|
||||
|
|
|
@ -349,6 +349,8 @@ void tipc_link_remove_bc_peer(struct tipc_link *snd_l,
|
|||
u16 ack = snd_l->snd_nxt - 1;
|
||||
|
||||
snd_l->ackers--;
|
||||
rcv_l->bc_peer_is_up = true;
|
||||
rcv_l->state = LINK_ESTABLISHED;
|
||||
tipc_link_bc_ack_rcv(rcv_l, ack, xmitq);
|
||||
tipc_link_reset(rcv_l);
|
||||
rcv_l->state = LINK_RESET;
|
||||
|
@ -1559,7 +1561,12 @@ void tipc_link_bc_sync_rcv(struct tipc_link *l, struct tipc_msg *hdr,
|
|||
if (!msg_peer_node_is_up(hdr))
|
||||
return;
|
||||
|
||||
l->bc_peer_is_up = true;
|
||||
/* Open when peer ackowledges our bcast init msg (pkt #1) */
|
||||
if (msg_ack(hdr))
|
||||
l->bc_peer_is_up = true;
|
||||
|
||||
if (!l->bc_peer_is_up)
|
||||
return;
|
||||
|
||||
/* Ignore if peers_snd_nxt goes beyond receive window */
|
||||
if (more(peers_snd_nxt, l->rcv_nxt + l->window))
|
||||
|
|
|
@ -1297,10 +1297,6 @@ static void tipc_node_bc_rcv(struct net *net, struct sk_buff *skb, int bearer_id
|
|||
|
||||
rc = tipc_bcast_rcv(net, be->link, skb);
|
||||
|
||||
/* Broadcast link reset may happen at reassembly failure */
|
||||
if (rc & TIPC_LINK_DOWN_EVT)
|
||||
tipc_node_reset_links(n);
|
||||
|
||||
/* Broadcast ACKs are sent on a unicast link */
|
||||
if (rc & TIPC_LINK_SND_BC_ACK) {
|
||||
tipc_node_read_lock(n);
|
||||
|
@ -1320,6 +1316,17 @@ static void tipc_node_bc_rcv(struct net *net, struct sk_buff *skb, int bearer_id
|
|||
spin_unlock_bh(&be->inputq2.lock);
|
||||
tipc_sk_mcast_rcv(net, &be->arrvq, &be->inputq2);
|
||||
}
|
||||
|
||||
if (rc & TIPC_LINK_DOWN_EVT) {
|
||||
/* Reception reassembly failure => reset all links to peer */
|
||||
if (!tipc_link_is_up(be->link))
|
||||
tipc_node_reset_links(n);
|
||||
|
||||
/* Retransmission failure => reset all links to all peers */
|
||||
if (!tipc_link_is_up(tipc_bc_sndlink(net)))
|
||||
tipc_bearer_reset_all(net);
|
||||
}
|
||||
|
||||
tipc_node_put(n);
|
||||
}
|
||||
|
||||
|
|
|
@ -3487,16 +3487,16 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
|
|||
params.smps_mode = NL80211_SMPS_OFF;
|
||||
}
|
||||
|
||||
params.pbss = nla_get_flag(info->attrs[NL80211_ATTR_PBSS]);
|
||||
if (params.pbss && !rdev->wiphy.bands[NL80211_BAND_60GHZ])
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (info->attrs[NL80211_ATTR_ACL_POLICY]) {
|
||||
params.acl = parse_acl_data(&rdev->wiphy, info);
|
||||
if (IS_ERR(params.acl))
|
||||
return PTR_ERR(params.acl);
|
||||
}
|
||||
|
||||
params.pbss = nla_get_flag(info->attrs[NL80211_ATTR_PBSS]);
|
||||
if (params.pbss && !rdev->wiphy.bands[NL80211_BAND_60GHZ])
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
wdev_lock(wdev);
|
||||
err = rdev_start_ap(rdev, dev, ¶ms);
|
||||
if (!err) {
|
||||
|
|
|
@ -721,6 +721,8 @@ __ieee80211_amsdu_copy(struct sk_buff *skb, unsigned int hlen,
|
|||
* alignment since sizeof(struct ethhdr) is 14.
|
||||
*/
|
||||
frame = dev_alloc_skb(hlen + sizeof(struct ethhdr) + 2 + cur_len);
|
||||
if (!frame)
|
||||
return NULL;
|
||||
|
||||
skb_reserve(frame, hlen + sizeof(struct ethhdr) + 2);
|
||||
skb_copy_bits(skb, offset, skb_put(frame, cur_len), cur_len);
|
||||
|
|
Loading…
Reference in New Issue