Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: "More fixes in the queue: 1) Netfilter nat can erroneously register the device notifier twice, fix from Florian Westphal. 2) Use after free in nf_tables, from Pablo Neira Ayuso. 3) Parallel update of steering rule fix in mlx5 river, from Eli Britstein. 4) RX processing panic in lan743x, fix from Bryan Whitehead. 5) Use before initialization of TCP_SKB_CB, fix from Christoph Paasch. 6) Fix locking in SRIOV mode of mlx4 driver, from Jack Morgenstein. 7) Fix TX stalls in lan743x due to mishandling of interrupt ACKing modes, from Bryan Whitehead. 8) Fix infoleak in l2tp_ip6_recvmsg(), from Eric Dumazet" * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (43 commits) pptp: dst_release sk_dst_cache in pptp_sock_destruct MAINTAINERS: GENET & SYSTEMPORT: Add internal Broadcom list l2tp: fix infoleak in l2tp_ip6_recvmsg() net/tls: Inform user space about send buffer availability net_sched: return correct value for *notify* functions lan743x: Fix TX Stall Issue net/mlx4_core: Fix qp mtt size calculation net/mlx4_core: Fix locking in SRIOV mode when switching between events and polling net/mlx4_core: Fix reset flow when in command polling mode mlxsw: minimal: Initialize base_mac mlxsw: core: Prevent duplication during QSFP module initialization net: dwmac-sun8i: fix a missing check of of_get_phy_mode net: sh_eth: fix a missing check of of_get_phy_mode net: 8390: fix potential NULL pointer dereferences net: fujitsu: fix a potential NULL pointer dereference net: qlogic: fix a potential NULL pointer dereference isdn: hfcpci: fix potential NULL pointer dereference Documentation: devicetree: add a new optional property for port mac address net: rocker: fix a potential NULL pointer dereference net: qlge: fix a potential NULL pointer dereference ...
This commit is contained in:
commit
f3ca4c55a6
|
@ -71,6 +71,10 @@ properties, described in binding documents:
|
|||
Documentation/devicetree/bindings/net/fixed-link.txt
|
||||
for details.
|
||||
|
||||
- local-mac-address : See
|
||||
Documentation/devicetree/bindings/net/ethernet.txt
|
||||
for details.
|
||||
|
||||
Example
|
||||
|
||||
The following example shows three switches on three MDIO busses,
|
||||
|
@ -97,6 +101,7 @@ linked into one DSA cluster.
|
|||
port@1 {
|
||||
reg = <1>;
|
||||
label = "lan1";
|
||||
local-mac-address = [00 00 00 00 00 00];
|
||||
};
|
||||
|
||||
port@2 {
|
||||
|
|
|
@ -3204,6 +3204,7 @@ F: drivers/phy/broadcom/phy-brcm-usb*
|
|||
BROADCOM GENET ETHERNET DRIVER
|
||||
M: Doug Berger <opendmb@gmail.com>
|
||||
M: Florian Fainelli <f.fainelli@gmail.com>
|
||||
L: bcm-kernel-feedback-list@broadcom.com
|
||||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/net/ethernet/broadcom/genet/
|
||||
|
@ -3311,6 +3312,7 @@ F: drivers/spi/spi-iproc-qspi.c
|
|||
|
||||
BROADCOM SYSTEMPORT ETHERNET DRIVER
|
||||
M: Florian Fainelli <f.fainelli@gmail.com>
|
||||
L: bcm-kernel-feedback-list@broadcom.com
|
||||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/net/ethernet/broadcom/bcmsysport.*
|
||||
|
|
|
@ -2,8 +2,8 @@
|
|||
#ifndef _UAPI_ASM_SOCKET_H
|
||||
#define _UAPI_ASM_SOCKET_H
|
||||
|
||||
#include <linux/posix_types.h>
|
||||
#include <asm/sockios.h>
|
||||
#include <asm/bitsperlong.h>
|
||||
|
||||
/* For setsockopt(2) */
|
||||
/*
|
||||
|
|
|
@ -10,8 +10,8 @@
|
|||
#ifndef _UAPI_ASM_SOCKET_H
|
||||
#define _UAPI_ASM_SOCKET_H
|
||||
|
||||
#include <linux/posix_types.h>
|
||||
#include <asm/sockios.h>
|
||||
#include <asm/bitsperlong.h>
|
||||
|
||||
/*
|
||||
* For setsockopt(2)
|
||||
|
|
|
@ -2,8 +2,8 @@
|
|||
#ifndef _UAPI_ASM_SOCKET_H
|
||||
#define _UAPI_ASM_SOCKET_H
|
||||
|
||||
#include <linux/posix_types.h>
|
||||
#include <asm/sockios.h>
|
||||
#include <asm/bitsperlong.h>
|
||||
|
||||
/* For setsockopt(2) */
|
||||
#define SOL_SOCKET 0xffff
|
||||
|
|
|
@ -2,8 +2,8 @@
|
|||
#ifndef _ASM_SOCKET_H
|
||||
#define _ASM_SOCKET_H
|
||||
|
||||
#include <linux/posix_types.h>
|
||||
#include <asm/sockios.h>
|
||||
#include <asm/bitsperlong.h>
|
||||
|
||||
/* For setsockopt(2) */
|
||||
#define SOL_SOCKET 0xffff
|
||||
|
|
|
@ -2032,10 +2032,19 @@ setup_hw(struct hfc_pci *hc)
|
|||
hc->hw.fifos = buffer;
|
||||
pci_write_config_dword(hc->pdev, 0x80, hc->hw.dmahandle);
|
||||
hc->hw.pci_io = ioremap((ulong) hc->hw.pci_io, 256);
|
||||
if (unlikely(!hc->hw.pci_io)) {
|
||||
printk(KERN_WARNING
|
||||
"HFC-PCI: Error in ioremap for PCI!\n");
|
||||
pci_free_consistent(hc->pdev, 0x8000, hc->hw.fifos,
|
||||
hc->hw.dmahandle);
|
||||
return 1;
|
||||
}
|
||||
|
||||
printk(KERN_INFO
|
||||
"HFC-PCI: defined at mem %#lx fifo %#lx(%#lx) IRQ %d HZ %d\n",
|
||||
(u_long) hc->hw.pci_io, (u_long) hc->hw.fifos,
|
||||
(u_long) hc->hw.dmahandle, hc->irq, HZ);
|
||||
|
||||
/* enable memory mapped ports, disable busmaster */
|
||||
pci_write_config_word(hc->pdev, PCI_COMMAND, PCI_ENA_MEMIO);
|
||||
hc->hw.int_m2 = 0;
|
||||
|
|
|
@ -289,6 +289,11 @@ static struct hw_info *get_hwinfo(struct pcmcia_device *link)
|
|||
|
||||
virt = ioremap(link->resource[2]->start,
|
||||
resource_size(link->resource[2]));
|
||||
if (unlikely(!virt)) {
|
||||
pcmcia_release_window(link, link->resource[2]);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
for (i = 0; i < NR_INFO; i++) {
|
||||
pcmcia_map_mem_page(link, link->resource[2],
|
||||
hw_info[i].offset & ~(resource_size(link->resource[2])-1));
|
||||
|
@ -1423,6 +1428,11 @@ static int setup_shmem_window(struct pcmcia_device *link, int start_pg,
|
|||
/* Try scribbling on the buffer */
|
||||
info->base = ioremap(link->resource[3]->start,
|
||||
resource_size(link->resource[3]));
|
||||
if (unlikely(!info->base)) {
|
||||
ret = -ENOMEM;
|
||||
goto failed;
|
||||
}
|
||||
|
||||
for (i = 0; i < (TX_PAGES<<8); i += 2)
|
||||
__raw_writew((i>>1), info->base+offset+i);
|
||||
udelay(100);
|
||||
|
|
|
@ -1211,6 +1211,11 @@ int liquidio_change_mtu(struct net_device *netdev, int new_mtu)
|
|||
|
||||
sc = (struct octeon_soft_command *)
|
||||
octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, 16, 0);
|
||||
if (!sc) {
|
||||
netif_info(lio, rx_err, lio->netdev,
|
||||
"Failed to allocate soft command\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ncmd = (union octnet_cmd *)sc->virtdptr;
|
||||
|
||||
|
@ -1684,6 +1689,11 @@ int liquidio_set_fec(struct lio *lio, int on_off)
|
|||
|
||||
sc = octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
|
||||
sizeof(struct oct_nic_seapi_resp), 0);
|
||||
if (!sc) {
|
||||
dev_err(&oct->pci_dev->dev,
|
||||
"Failed to allocate soft command\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ncmd = sc->virtdptr;
|
||||
resp = sc->virtrptr;
|
||||
|
|
|
@ -1192,6 +1192,11 @@ static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
|
|||
sc = (struct octeon_soft_command *)
|
||||
octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
|
||||
16, 0);
|
||||
if (!sc) {
|
||||
netif_info(lio, rx_err, lio->netdev,
|
||||
"Failed to allocate octeon_soft_command\n");
|
||||
return;
|
||||
}
|
||||
|
||||
ncmd = (union octnet_cmd *)sc->virtdptr;
|
||||
|
||||
|
|
|
@ -2234,6 +2234,12 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
nic->nicvf_rx_mode_wq = alloc_ordered_workqueue("nicvf_rx_mode_wq_VF%d",
|
||||
WQ_MEM_RECLAIM,
|
||||
nic->vf_id);
|
||||
if (!nic->nicvf_rx_mode_wq) {
|
||||
err = -ENOMEM;
|
||||
dev_err(dev, "Failed to allocate work queue\n");
|
||||
goto err_unregister_interrupts;
|
||||
}
|
||||
|
||||
INIT_WORK(&nic->rx_mode_work.work, nicvf_set_rx_mode_task);
|
||||
spin_lock_init(&nic->rx_mode_wq_lock);
|
||||
mutex_init(&nic->rx_mode_mtx);
|
||||
|
|
|
@ -547,6 +547,11 @@ static int fmvj18x_get_hwinfo(struct pcmcia_device *link, u_char *node_id)
|
|||
return -1;
|
||||
|
||||
base = ioremap(link->resource[2]->start, resource_size(link->resource[2]));
|
||||
if (!base) {
|
||||
pcmcia_release_window(link, link->resource[2]);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
pcmcia_map_mem_page(link, link->resource[2], 0);
|
||||
|
||||
/*
|
||||
|
|
|
@ -2645,6 +2645,8 @@ int mlx4_cmd_use_events(struct mlx4_dev *dev)
|
|||
if (!priv->cmd.context)
|
||||
return -ENOMEM;
|
||||
|
||||
if (mlx4_is_mfunc(dev))
|
||||
mutex_lock(&priv->cmd.slave_cmd_mutex);
|
||||
down_write(&priv->cmd.switch_sem);
|
||||
for (i = 0; i < priv->cmd.max_cmds; ++i) {
|
||||
priv->cmd.context[i].token = i;
|
||||
|
@ -2670,6 +2672,8 @@ int mlx4_cmd_use_events(struct mlx4_dev *dev)
|
|||
down(&priv->cmd.poll_sem);
|
||||
priv->cmd.use_events = 1;
|
||||
up_write(&priv->cmd.switch_sem);
|
||||
if (mlx4_is_mfunc(dev))
|
||||
mutex_unlock(&priv->cmd.slave_cmd_mutex);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -2682,6 +2686,8 @@ void mlx4_cmd_use_polling(struct mlx4_dev *dev)
|
|||
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||
int i;
|
||||
|
||||
if (mlx4_is_mfunc(dev))
|
||||
mutex_lock(&priv->cmd.slave_cmd_mutex);
|
||||
down_write(&priv->cmd.switch_sem);
|
||||
priv->cmd.use_events = 0;
|
||||
|
||||
|
@ -2689,9 +2695,12 @@ void mlx4_cmd_use_polling(struct mlx4_dev *dev)
|
|||
down(&priv->cmd.event_sem);
|
||||
|
||||
kfree(priv->cmd.context);
|
||||
priv->cmd.context = NULL;
|
||||
|
||||
up(&priv->cmd.poll_sem);
|
||||
up_write(&priv->cmd.switch_sem);
|
||||
if (mlx4_is_mfunc(dev))
|
||||
mutex_unlock(&priv->cmd.slave_cmd_mutex);
|
||||
}
|
||||
|
||||
struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev)
|
||||
|
|
|
@ -2719,13 +2719,13 @@ static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
|
|||
int total_pages;
|
||||
int total_mem;
|
||||
int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
|
||||
int tot;
|
||||
|
||||
sq_size = 1 << (log_sq_size + log_sq_sride + 4);
|
||||
rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
|
||||
total_mem = sq_size + rq_size;
|
||||
total_pages =
|
||||
roundup_pow_of_two((total_mem + (page_offset << 6)) >>
|
||||
page_shift);
|
||||
tot = (total_mem + (page_offset << 6)) >> page_shift;
|
||||
total_pages = !tot ? 1 : roundup_pow_of_two(tot);
|
||||
|
||||
return total_pages;
|
||||
}
|
||||
|
|
|
@ -424,6 +424,9 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
|
|||
|
||||
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
|
||||
priv->channels.params = new_channels.params;
|
||||
if (!netif_is_rxfh_configured(priv->netdev))
|
||||
mlx5e_build_default_indir_rqt(priv->rss_params.indirection_rqt,
|
||||
MLX5E_INDIR_RQT_SIZE, count);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
|
|
@ -1129,16 +1129,17 @@ static int mlx5e_rep_get_phys_port_name(struct net_device *dev,
|
|||
struct mlx5e_priv *priv = netdev_priv(dev);
|
||||
struct mlx5e_rep_priv *rpriv = priv->ppriv;
|
||||
struct mlx5_eswitch_rep *rep = rpriv->rep;
|
||||
int ret, pf_num;
|
||||
unsigned int fn;
|
||||
int ret;
|
||||
|
||||
ret = mlx5_lag_get_pf_num(priv->mdev, &pf_num);
|
||||
if (ret)
|
||||
return ret;
|
||||
fn = PCI_FUNC(priv->mdev->pdev->devfn);
|
||||
if (fn >= MLX5_MAX_PORTS)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (rep->vport == MLX5_VPORT_UPLINK)
|
||||
ret = snprintf(buf, len, "p%d", pf_num);
|
||||
ret = snprintf(buf, len, "p%d", fn);
|
||||
else
|
||||
ret = snprintf(buf, len, "pf%dvf%d", pf_num, rep->vport - 1);
|
||||
ret = snprintf(buf, len, "pf%dvf%d", fn, rep->vport - 1);
|
||||
|
||||
if (ret >= len)
|
||||
return -EOPNOTSUPP;
|
||||
|
|
|
@ -1295,8 +1295,14 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
|
|||
|
||||
skb->protocol = *((__be16 *)(skb->data));
|
||||
|
||||
skb->ip_summed = CHECKSUM_COMPLETE;
|
||||
skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
|
||||
if (netdev->features & NETIF_F_RXCSUM) {
|
||||
skb->ip_summed = CHECKSUM_COMPLETE;
|
||||
skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
|
||||
stats->csum_complete++;
|
||||
} else {
|
||||
skb->ip_summed = CHECKSUM_NONE;
|
||||
stats->csum_none++;
|
||||
}
|
||||
|
||||
if (unlikely(mlx5e_rx_hw_stamp(tstamp)))
|
||||
skb_hwtstamps(skb)->hwtstamp =
|
||||
|
@ -1315,7 +1321,6 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
|
|||
|
||||
skb->dev = netdev;
|
||||
|
||||
stats->csum_complete++;
|
||||
stats->packets++;
|
||||
stats->bytes += cqe_bcnt;
|
||||
}
|
||||
|
|
|
@ -1931,7 +1931,7 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
|
|||
u64 node_guid;
|
||||
int err = 0;
|
||||
|
||||
if (!MLX5_CAP_GEN(esw->dev, vport_group_manager))
|
||||
if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager))
|
||||
return -EPERM;
|
||||
if (!LEGAL_VPORT(esw, vport) || is_multicast_ether_addr(mac))
|
||||
return -EINVAL;
|
||||
|
@ -2005,7 +2005,7 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
|
|||
{
|
||||
struct mlx5_vport *evport;
|
||||
|
||||
if (!MLX5_CAP_GEN(esw->dev, vport_group_manager))
|
||||
if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager))
|
||||
return -EPERM;
|
||||
if (!LEGAL_VPORT(esw, vport))
|
||||
return -EINVAL;
|
||||
|
@ -2297,19 +2297,24 @@ static int normalize_vports_min_rate(struct mlx5_eswitch *esw, u32 divider)
|
|||
int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, int vport,
|
||||
u32 max_rate, u32 min_rate)
|
||||
{
|
||||
u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
|
||||
bool min_rate_supported = MLX5_CAP_QOS(esw->dev, esw_bw_share) &&
|
||||
fw_max_bw_share >= MLX5_MIN_BW_SHARE;
|
||||
bool max_rate_supported = MLX5_CAP_QOS(esw->dev, esw_rate_limit);
|
||||
struct mlx5_vport *evport;
|
||||
u32 fw_max_bw_share;
|
||||
u32 previous_min_rate;
|
||||
u32 divider;
|
||||
bool min_rate_supported;
|
||||
bool max_rate_supported;
|
||||
int err = 0;
|
||||
|
||||
if (!ESW_ALLOWED(esw))
|
||||
return -EPERM;
|
||||
if (!LEGAL_VPORT(esw, vport))
|
||||
return -EINVAL;
|
||||
|
||||
fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
|
||||
min_rate_supported = MLX5_CAP_QOS(esw->dev, esw_bw_share) &&
|
||||
fw_max_bw_share >= MLX5_MIN_BW_SHARE;
|
||||
max_rate_supported = MLX5_CAP_QOS(esw->dev, esw_rate_limit);
|
||||
|
||||
if ((min_rate && !min_rate_supported) || (max_rate && !max_rate_supported))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
|
|
|
@ -263,10 +263,11 @@ static void nested_down_write_ref_node(struct fs_node *node,
|
|||
}
|
||||
}
|
||||
|
||||
static void down_write_ref_node(struct fs_node *node)
|
||||
static void down_write_ref_node(struct fs_node *node, bool locked)
|
||||
{
|
||||
if (node) {
|
||||
down_write(&node->lock);
|
||||
if (!locked)
|
||||
down_write(&node->lock);
|
||||
refcount_inc(&node->refcount);
|
||||
}
|
||||
}
|
||||
|
@ -277,13 +278,14 @@ static void up_read_ref_node(struct fs_node *node)
|
|||
up_read(&node->lock);
|
||||
}
|
||||
|
||||
static void up_write_ref_node(struct fs_node *node)
|
||||
static void up_write_ref_node(struct fs_node *node, bool locked)
|
||||
{
|
||||
refcount_dec(&node->refcount);
|
||||
up_write(&node->lock);
|
||||
if (!locked)
|
||||
up_write(&node->lock);
|
||||
}
|
||||
|
||||
static void tree_put_node(struct fs_node *node)
|
||||
static void tree_put_node(struct fs_node *node, bool locked)
|
||||
{
|
||||
struct fs_node *parent_node = node->parent;
|
||||
|
||||
|
@ -294,27 +296,27 @@ static void tree_put_node(struct fs_node *node)
|
|||
/* Only root namespace doesn't have parent and we just
|
||||
* need to free its node.
|
||||
*/
|
||||
down_write_ref_node(parent_node);
|
||||
down_write_ref_node(parent_node, locked);
|
||||
list_del_init(&node->list);
|
||||
if (node->del_sw_func)
|
||||
node->del_sw_func(node);
|
||||
up_write_ref_node(parent_node);
|
||||
up_write_ref_node(parent_node, locked);
|
||||
} else {
|
||||
kfree(node);
|
||||
}
|
||||
node = NULL;
|
||||
}
|
||||
if (!node && parent_node)
|
||||
tree_put_node(parent_node);
|
||||
tree_put_node(parent_node, locked);
|
||||
}
|
||||
|
||||
static int tree_remove_node(struct fs_node *node)
|
||||
static int tree_remove_node(struct fs_node *node, bool locked)
|
||||
{
|
||||
if (refcount_read(&node->refcount) > 1) {
|
||||
refcount_dec(&node->refcount);
|
||||
return -EEXIST;
|
||||
}
|
||||
tree_put_node(node);
|
||||
tree_put_node(node, locked);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -420,22 +422,34 @@ static void del_sw_flow_table(struct fs_node *node)
|
|||
kfree(ft);
|
||||
}
|
||||
|
||||
static void del_sw_hw_rule(struct fs_node *node)
|
||||
static void modify_fte(struct fs_fte *fte)
|
||||
{
|
||||
struct mlx5_flow_root_namespace *root;
|
||||
struct mlx5_flow_rule *rule;
|
||||
struct mlx5_flow_table *ft;
|
||||
struct mlx5_flow_group *fg;
|
||||
struct fs_fte *fte;
|
||||
int modify_mask;
|
||||
struct mlx5_core_dev *dev = get_dev(node);
|
||||
struct mlx5_core_dev *dev;
|
||||
int err;
|
||||
bool update_fte = false;
|
||||
|
||||
fs_get_obj(fg, fte->node.parent);
|
||||
fs_get_obj(ft, fg->node.parent);
|
||||
dev = get_dev(&fte->node);
|
||||
|
||||
root = find_root(&ft->node);
|
||||
err = root->cmds->update_fte(dev, ft, fg->id, fte->modify_mask, fte);
|
||||
if (err)
|
||||
mlx5_core_warn(dev,
|
||||
"%s can't del rule fg id=%d fte_index=%d\n",
|
||||
__func__, fg->id, fte->index);
|
||||
fte->modify_mask = 0;
|
||||
}
|
||||
|
||||
static void del_sw_hw_rule(struct fs_node *node)
|
||||
{
|
||||
struct mlx5_flow_rule *rule;
|
||||
struct fs_fte *fte;
|
||||
|
||||
fs_get_obj(rule, node);
|
||||
fs_get_obj(fte, rule->node.parent);
|
||||
fs_get_obj(fg, fte->node.parent);
|
||||
fs_get_obj(ft, fg->node.parent);
|
||||
trace_mlx5_fs_del_rule(rule);
|
||||
if (rule->sw_action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
|
||||
mutex_lock(&rule->dest_attr.ft->lock);
|
||||
|
@ -445,27 +459,19 @@ static void del_sw_hw_rule(struct fs_node *node)
|
|||
|
||||
if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER &&
|
||||
--fte->dests_size) {
|
||||
modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION) |
|
||||
BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
|
||||
fte->modify_mask |=
|
||||
BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION) |
|
||||
BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
|
||||
fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT;
|
||||
update_fte = true;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if ((fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
|
||||
--fte->dests_size) {
|
||||
modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
|
||||
update_fte = true;
|
||||
fte->modify_mask |=
|
||||
BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
|
||||
}
|
||||
out:
|
||||
root = find_root(&ft->node);
|
||||
if (update_fte && fte->dests_size) {
|
||||
err = root->cmds->update_fte(dev, ft, fg->id, modify_mask, fte);
|
||||
if (err)
|
||||
mlx5_core_warn(dev,
|
||||
"%s can't del rule fg id=%d fte_index=%d\n",
|
||||
__func__, fg->id, fte->index);
|
||||
}
|
||||
kfree(rule);
|
||||
}
|
||||
|
||||
|
@ -491,6 +497,7 @@ static void del_hw_fte(struct fs_node *node)
|
|||
mlx5_core_warn(dev,
|
||||
"flow steering can't delete fte in index %d of flow group id %d\n",
|
||||
fte->index, fg->id);
|
||||
node->active = 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -591,7 +598,7 @@ static struct fs_fte *alloc_fte(struct mlx5_flow_table *ft,
|
|||
fte->node.type = FS_TYPE_FLOW_ENTRY;
|
||||
fte->action = *flow_act;
|
||||
|
||||
tree_init_node(&fte->node, del_hw_fte, del_sw_fte);
|
||||
tree_init_node(&fte->node, NULL, del_sw_fte);
|
||||
|
||||
return fte;
|
||||
}
|
||||
|
@ -858,7 +865,7 @@ static int _mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
|
|||
fs_get_obj(fte, rule->node.parent);
|
||||
if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
|
||||
return -EINVAL;
|
||||
down_write_ref_node(&fte->node);
|
||||
down_write_ref_node(&fte->node, false);
|
||||
fs_get_obj(fg, fte->node.parent);
|
||||
fs_get_obj(ft, fg->node.parent);
|
||||
|
||||
|
@ -866,7 +873,7 @@ static int _mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
|
|||
root = find_root(&ft->node);
|
||||
err = root->cmds->update_fte(get_dev(&ft->node), ft, fg->id,
|
||||
modify_mask, fte);
|
||||
up_write_ref_node(&fte->node);
|
||||
up_write_ref_node(&fte->node, false);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -1016,11 +1023,11 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
|
|||
if (err)
|
||||
goto destroy_ft;
|
||||
ft->node.active = true;
|
||||
down_write_ref_node(&fs_prio->node);
|
||||
down_write_ref_node(&fs_prio->node, false);
|
||||
tree_add_node(&ft->node, &fs_prio->node);
|
||||
list_add_flow_table(ft, fs_prio);
|
||||
fs_prio->num_ft++;
|
||||
up_write_ref_node(&fs_prio->node);
|
||||
up_write_ref_node(&fs_prio->node, false);
|
||||
mutex_unlock(&root->chain_lock);
|
||||
trace_mlx5_fs_add_ft(ft);
|
||||
return ft;
|
||||
|
@ -1114,17 +1121,17 @@ struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft,
|
|||
if (ft->autogroup.active)
|
||||
return ERR_PTR(-EPERM);
|
||||
|
||||
down_write_ref_node(&ft->node);
|
||||
down_write_ref_node(&ft->node, false);
|
||||
fg = alloc_insert_flow_group(ft, match_criteria_enable, match_criteria,
|
||||
start_index, end_index,
|
||||
ft->node.children.prev);
|
||||
up_write_ref_node(&ft->node);
|
||||
up_write_ref_node(&ft->node, false);
|
||||
if (IS_ERR(fg))
|
||||
return fg;
|
||||
|
||||
err = root->cmds->create_flow_group(dev, ft, fg_in, &fg->id);
|
||||
if (err) {
|
||||
tree_put_node(&fg->node);
|
||||
tree_put_node(&fg->node, false);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
trace_mlx5_fs_add_fg(fg);
|
||||
|
@ -1521,10 +1528,10 @@ static void free_match_list(struct match_list_head *head)
|
|||
struct match_list *iter, *match_tmp;
|
||||
|
||||
list_del(&head->first.list);
|
||||
tree_put_node(&head->first.g->node);
|
||||
tree_put_node(&head->first.g->node, false);
|
||||
list_for_each_entry_safe(iter, match_tmp, &head->list,
|
||||
list) {
|
||||
tree_put_node(&iter->g->node);
|
||||
tree_put_node(&iter->g->node, false);
|
||||
list_del(&iter->list);
|
||||
kfree(iter);
|
||||
}
|
||||
|
@ -1601,11 +1608,16 @@ lookup_fte_locked(struct mlx5_flow_group *g,
|
|||
fte_tmp = NULL;
|
||||
goto out;
|
||||
}
|
||||
if (!fte_tmp->node.active) {
|
||||
tree_put_node(&fte_tmp->node, false);
|
||||
fte_tmp = NULL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD);
|
||||
out:
|
||||
if (take_write)
|
||||
up_write_ref_node(&g->node);
|
||||
up_write_ref_node(&g->node, false);
|
||||
else
|
||||
up_read_ref_node(&g->node);
|
||||
return fte_tmp;
|
||||
|
@ -1647,8 +1659,8 @@ search_again_locked:
|
|||
continue;
|
||||
rule = add_rule_fg(g, spec->match_value,
|
||||
flow_act, dest, dest_num, fte_tmp);
|
||||
up_write_ref_node(&fte_tmp->node);
|
||||
tree_put_node(&fte_tmp->node);
|
||||
up_write_ref_node(&fte_tmp->node, false);
|
||||
tree_put_node(&fte_tmp->node, false);
|
||||
kmem_cache_free(steering->ftes_cache, fte);
|
||||
return rule;
|
||||
}
|
||||
|
@ -1684,7 +1696,7 @@ skip_search:
|
|||
|
||||
err = insert_fte(g, fte);
|
||||
if (err) {
|
||||
up_write_ref_node(&g->node);
|
||||
up_write_ref_node(&g->node, false);
|
||||
if (err == -ENOSPC)
|
||||
continue;
|
||||
kmem_cache_free(steering->ftes_cache, fte);
|
||||
|
@ -1692,11 +1704,11 @@ skip_search:
|
|||
}
|
||||
|
||||
nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
|
||||
up_write_ref_node(&g->node);
|
||||
up_write_ref_node(&g->node, false);
|
||||
rule = add_rule_fg(g, spec->match_value,
|
||||
flow_act, dest, dest_num, fte);
|
||||
up_write_ref_node(&fte->node);
|
||||
tree_put_node(&fte->node);
|
||||
up_write_ref_node(&fte->node, false);
|
||||
tree_put_node(&fte->node, false);
|
||||
return rule;
|
||||
}
|
||||
rule = ERR_PTR(-ENOENT);
|
||||
|
@ -1738,7 +1750,7 @@ search_again_locked:
|
|||
err = build_match_list(&match_head, ft, spec);
|
||||
if (err) {
|
||||
if (take_write)
|
||||
up_write_ref_node(&ft->node);
|
||||
up_write_ref_node(&ft->node, false);
|
||||
else
|
||||
up_read_ref_node(&ft->node);
|
||||
return ERR_PTR(err);
|
||||
|
@ -1753,7 +1765,7 @@ search_again_locked:
|
|||
if (!IS_ERR(rule) ||
|
||||
(PTR_ERR(rule) != -ENOENT && PTR_ERR(rule) != -EAGAIN)) {
|
||||
if (take_write)
|
||||
up_write_ref_node(&ft->node);
|
||||
up_write_ref_node(&ft->node, false);
|
||||
return rule;
|
||||
}
|
||||
|
||||
|
@ -1769,12 +1781,12 @@ search_again_locked:
|
|||
g = alloc_auto_flow_group(ft, spec);
|
||||
if (IS_ERR(g)) {
|
||||
rule = ERR_CAST(g);
|
||||
up_write_ref_node(&ft->node);
|
||||
up_write_ref_node(&ft->node, false);
|
||||
return rule;
|
||||
}
|
||||
|
||||
nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
|
||||
up_write_ref_node(&ft->node);
|
||||
up_write_ref_node(&ft->node, false);
|
||||
|
||||
err = create_auto_flow_group(ft, g);
|
||||
if (err)
|
||||
|
@ -1793,17 +1805,17 @@ search_again_locked:
|
|||
}
|
||||
|
||||
nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
|
||||
up_write_ref_node(&g->node);
|
||||
up_write_ref_node(&g->node, false);
|
||||
rule = add_rule_fg(g, spec->match_value, flow_act, dest,
|
||||
dest_num, fte);
|
||||
up_write_ref_node(&fte->node);
|
||||
tree_put_node(&fte->node);
|
||||
tree_put_node(&g->node);
|
||||
up_write_ref_node(&fte->node, false);
|
||||
tree_put_node(&fte->node, false);
|
||||
tree_put_node(&g->node, false);
|
||||
return rule;
|
||||
|
||||
err_release_fg:
|
||||
up_write_ref_node(&g->node);
|
||||
tree_put_node(&g->node);
|
||||
up_write_ref_node(&g->node, false);
|
||||
tree_put_node(&g->node, false);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
|
@ -1866,10 +1878,33 @@ EXPORT_SYMBOL(mlx5_add_flow_rules);
|
|||
|
||||
void mlx5_del_flow_rules(struct mlx5_flow_handle *handle)
|
||||
{
|
||||
struct fs_fte *fte;
|
||||
int i;
|
||||
|
||||
/* In order to consolidate the HW changes we lock the FTE for other
|
||||
* changes, and increase its refcount, in order not to perform the
|
||||
* "del" functions of the FTE. Will handle them here.
|
||||
* The removal of the rules is done under locked FTE.
|
||||
* After removing all the handle's rules, if there are remaining
|
||||
* rules, it means we just need to modify the FTE in FW, and
|
||||
* unlock/decrease the refcount we increased before.
|
||||
* Otherwise, it means the FTE should be deleted. First delete the
|
||||
* FTE in FW. Then, unlock the FTE, and proceed the tree_put_node of
|
||||
* the FTE, which will handle the last decrease of the refcount, as
|
||||
* well as required handling of its parent.
|
||||
*/
|
||||
fs_get_obj(fte, handle->rule[0]->node.parent);
|
||||
down_write_ref_node(&fte->node, false);
|
||||
for (i = handle->num_rules - 1; i >= 0; i--)
|
||||
tree_remove_node(&handle->rule[i]->node);
|
||||
tree_remove_node(&handle->rule[i]->node, true);
|
||||
if (fte->modify_mask && fte->dests_size) {
|
||||
modify_fte(fte);
|
||||
up_write_ref_node(&fte->node, false);
|
||||
} else {
|
||||
del_hw_fte(&fte->node);
|
||||
up_write(&fte->node.lock);
|
||||
tree_put_node(&fte->node, false);
|
||||
}
|
||||
kfree(handle);
|
||||
}
|
||||
EXPORT_SYMBOL(mlx5_del_flow_rules);
|
||||
|
@ -1972,7 +2007,7 @@ int mlx5_destroy_flow_table(struct mlx5_flow_table *ft)
|
|||
mutex_unlock(&root->chain_lock);
|
||||
return err;
|
||||
}
|
||||
if (tree_remove_node(&ft->node))
|
||||
if (tree_remove_node(&ft->node, false))
|
||||
mlx5_core_warn(get_dev(&ft->node), "Flow table %d wasn't destroyed, refcount > 1\n",
|
||||
ft->id);
|
||||
mutex_unlock(&root->chain_lock);
|
||||
|
@ -1983,7 +2018,7 @@ EXPORT_SYMBOL(mlx5_destroy_flow_table);
|
|||
|
||||
void mlx5_destroy_flow_group(struct mlx5_flow_group *fg)
|
||||
{
|
||||
if (tree_remove_node(&fg->node))
|
||||
if (tree_remove_node(&fg->node, false))
|
||||
mlx5_core_warn(get_dev(&fg->node), "Flow group %d wasn't destroyed, refcount > 1\n",
|
||||
fg->id);
|
||||
}
|
||||
|
@ -2367,8 +2402,8 @@ static void clean_tree(struct fs_node *node)
|
|||
tree_get_node(node);
|
||||
list_for_each_entry_safe(iter, temp, &node->children, list)
|
||||
clean_tree(iter);
|
||||
tree_put_node(node);
|
||||
tree_remove_node(node);
|
||||
tree_put_node(node, false);
|
||||
tree_remove_node(node, false);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -172,6 +172,7 @@ struct fs_fte {
|
|||
enum fs_fte_status status;
|
||||
struct mlx5_fc *counter;
|
||||
struct rhash_head hash;
|
||||
int modify_mask;
|
||||
};
|
||||
|
||||
/* Type of children is mlx5_flow_table/namespace */
|
||||
|
|
|
@ -595,27 +595,6 @@ void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev)
|
|||
err);
|
||||
}
|
||||
|
||||
int mlx5_lag_get_pf_num(struct mlx5_core_dev *dev, int *pf_num)
|
||||
{
|
||||
struct mlx5_lag *ldev;
|
||||
int n;
|
||||
|
||||
ldev = mlx5_lag_dev_get(dev);
|
||||
if (!ldev) {
|
||||
mlx5_core_warn(dev, "no lag device, can't get pf num\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
for (n = 0; n < MLX5_MAX_PORTS; n++)
|
||||
if (ldev->pf[n].dev == dev) {
|
||||
*pf_num = n;
|
||||
return 0;
|
||||
}
|
||||
|
||||
mlx5_core_warn(dev, "wasn't able to locate pf in the lag device\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Must be called with intf_mutex held */
|
||||
void mlx5_lag_remove(struct mlx5_core_dev *dev)
|
||||
{
|
||||
|
|
|
@ -188,8 +188,6 @@ static inline int mlx5_lag_is_lacp_owner(struct mlx5_core_dev *dev)
|
|||
MLX5_CAP_GEN(dev, lag_master);
|
||||
}
|
||||
|
||||
int mlx5_lag_get_pf_num(struct mlx5_core_dev *dev, int *pf_num);
|
||||
|
||||
void mlx5_reload_interface(struct mlx5_core_dev *mdev, int protocol);
|
||||
void mlx5_lag_update(struct mlx5_core_dev *dev);
|
||||
|
||||
|
|
|
@ -111,7 +111,6 @@ struct mlxsw_thermal {
|
|||
struct mlxsw_thermal_trip trips[MLXSW_THERMAL_NUM_TRIPS];
|
||||
enum thermal_device_mode mode;
|
||||
struct mlxsw_thermal_module *tz_module_arr;
|
||||
unsigned int tz_module_num;
|
||||
};
|
||||
|
||||
static inline u8 mlxsw_state_to_duty(int state)
|
||||
|
@ -711,6 +710,9 @@ mlxsw_thermal_module_init(struct device *dev, struct mlxsw_core *core,
|
|||
|
||||
module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
|
||||
module_tz = &thermal->tz_module_arr[module];
|
||||
/* Skip if parent is already set (case of port split). */
|
||||
if (module_tz->parent)
|
||||
return 0;
|
||||
module_tz->module = module;
|
||||
module_tz->parent = thermal;
|
||||
memcpy(module_tz->trips, default_thermal_trips,
|
||||
|
@ -718,13 +720,7 @@ mlxsw_thermal_module_init(struct device *dev, struct mlxsw_core *core,
|
|||
/* Initialize all trip point. */
|
||||
mlxsw_thermal_module_trips_reset(module_tz);
|
||||
/* Update trip point according to the module data. */
|
||||
err = mlxsw_thermal_module_trips_update(dev, core, module_tz);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
thermal->tz_module_num++;
|
||||
|
||||
return 0;
|
||||
return mlxsw_thermal_module_trips_update(dev, core, module_tz);
|
||||
}
|
||||
|
||||
static void mlxsw_thermal_module_fini(struct mlxsw_thermal_module *module_tz)
|
||||
|
@ -732,6 +728,7 @@ static void mlxsw_thermal_module_fini(struct mlxsw_thermal_module *module_tz)
|
|||
if (module_tz && module_tz->tzdev) {
|
||||
mlxsw_thermal_module_tz_fini(module_tz->tzdev);
|
||||
module_tz->tzdev = NULL;
|
||||
module_tz->parent = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -740,6 +737,7 @@ mlxsw_thermal_modules_init(struct device *dev, struct mlxsw_core *core,
|
|||
struct mlxsw_thermal *thermal)
|
||||
{
|
||||
unsigned int module_count = mlxsw_core_max_ports(core);
|
||||
struct mlxsw_thermal_module *module_tz;
|
||||
int i, err;
|
||||
|
||||
thermal->tz_module_arr = kcalloc(module_count,
|
||||
|
@ -754,8 +752,11 @@ mlxsw_thermal_modules_init(struct device *dev, struct mlxsw_core *core,
|
|||
goto err_unreg_tz_module_arr;
|
||||
}
|
||||
|
||||
for (i = 0; i < thermal->tz_module_num; i++) {
|
||||
err = mlxsw_thermal_module_tz_init(&thermal->tz_module_arr[i]);
|
||||
for (i = 0; i < module_count - 1; i++) {
|
||||
module_tz = &thermal->tz_module_arr[i];
|
||||
if (!module_tz->parent)
|
||||
continue;
|
||||
err = mlxsw_thermal_module_tz_init(module_tz);
|
||||
if (err)
|
||||
goto err_unreg_tz_module_arr;
|
||||
}
|
||||
|
|
|
@ -34,6 +34,18 @@ struct mlxsw_m_port {
|
|||
u8 module;
|
||||
};
|
||||
|
||||
static int mlxsw_m_base_mac_get(struct mlxsw_m *mlxsw_m)
|
||||
{
|
||||
char spad_pl[MLXSW_REG_SPAD_LEN] = {0};
|
||||
int err;
|
||||
|
||||
err = mlxsw_reg_query(mlxsw_m->core, MLXSW_REG(spad), spad_pl);
|
||||
if (err)
|
||||
return err;
|
||||
mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_m->base_mac);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mlxsw_m_port_dummy_open_stop(struct net_device *dev)
|
||||
{
|
||||
return 0;
|
||||
|
@ -314,6 +326,12 @@ static int mlxsw_m_init(struct mlxsw_core *mlxsw_core,
|
|||
mlxsw_m->core = mlxsw_core;
|
||||
mlxsw_m->bus_info = mlxsw_bus_info;
|
||||
|
||||
err = mlxsw_m_base_mac_get(mlxsw_m);
|
||||
if (err) {
|
||||
dev_err(mlxsw_m->bus_info->dev, "Failed to get base mac\n");
|
||||
return err;
|
||||
}
|
||||
|
||||
err = mlxsw_m_ports_create(mlxsw_m);
|
||||
if (err) {
|
||||
dev_err(mlxsw_m->bus_info->dev, "Failed to create ports\n");
|
||||
|
|
|
@ -585,8 +585,7 @@ static int lan743x_intr_open(struct lan743x_adapter *adapter)
|
|||
|
||||
if (adapter->csr.flags &
|
||||
LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR) {
|
||||
flags = LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_CLEAR |
|
||||
LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET |
|
||||
flags = LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET |
|
||||
LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET |
|
||||
LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR |
|
||||
LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR;
|
||||
|
@ -599,12 +598,6 @@ static int lan743x_intr_open(struct lan743x_adapter *adapter)
|
|||
/* map TX interrupt to vector */
|
||||
int_vec_map1 |= INT_VEC_MAP1_TX_VEC_(index, vector);
|
||||
lan743x_csr_write(adapter, INT_VEC_MAP1, int_vec_map1);
|
||||
if (flags &
|
||||
LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_CLEAR) {
|
||||
int_vec_en_auto_clr |= INT_VEC_EN_(vector);
|
||||
lan743x_csr_write(adapter, INT_VEC_EN_AUTO_CLR,
|
||||
int_vec_en_auto_clr);
|
||||
}
|
||||
|
||||
/* Remove TX interrupt from shared mask */
|
||||
intr->vector_list[0].int_mask &= ~int_bit;
|
||||
|
@ -1902,7 +1895,17 @@ static int lan743x_rx_next_index(struct lan743x_rx *rx, int index)
|
|||
return ((++index) % rx->ring_size);
|
||||
}
|
||||
|
||||
static int lan743x_rx_allocate_ring_element(struct lan743x_rx *rx, int index)
|
||||
static struct sk_buff *lan743x_rx_allocate_skb(struct lan743x_rx *rx)
|
||||
{
|
||||
int length = 0;
|
||||
|
||||
length = (LAN743X_MAX_FRAME_SIZE + ETH_HLEN + 4 + RX_HEAD_PADDING);
|
||||
return __netdev_alloc_skb(rx->adapter->netdev,
|
||||
length, GFP_ATOMIC | GFP_DMA);
|
||||
}
|
||||
|
||||
static int lan743x_rx_init_ring_element(struct lan743x_rx *rx, int index,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
struct lan743x_rx_buffer_info *buffer_info;
|
||||
struct lan743x_rx_descriptor *descriptor;
|
||||
|
@ -1911,9 +1914,7 @@ static int lan743x_rx_allocate_ring_element(struct lan743x_rx *rx, int index)
|
|||
length = (LAN743X_MAX_FRAME_SIZE + ETH_HLEN + 4 + RX_HEAD_PADDING);
|
||||
descriptor = &rx->ring_cpu_ptr[index];
|
||||
buffer_info = &rx->buffer_info[index];
|
||||
buffer_info->skb = __netdev_alloc_skb(rx->adapter->netdev,
|
||||
length,
|
||||
GFP_ATOMIC | GFP_DMA);
|
||||
buffer_info->skb = skb;
|
||||
if (!(buffer_info->skb))
|
||||
return -ENOMEM;
|
||||
buffer_info->dma_ptr = dma_map_single(&rx->adapter->pdev->dev,
|
||||
|
@ -2060,8 +2061,19 @@ static int lan743x_rx_process_packet(struct lan743x_rx *rx)
|
|||
/* packet is available */
|
||||
if (first_index == last_index) {
|
||||
/* single buffer packet */
|
||||
struct sk_buff *new_skb = NULL;
|
||||
int packet_length;
|
||||
|
||||
new_skb = lan743x_rx_allocate_skb(rx);
|
||||
if (!new_skb) {
|
||||
/* failed to allocate next skb.
|
||||
* Memory is very low.
|
||||
* Drop this packet and reuse buffer.
|
||||
*/
|
||||
lan743x_rx_reuse_ring_element(rx, first_index);
|
||||
goto process_extension;
|
||||
}
|
||||
|
||||
buffer_info = &rx->buffer_info[first_index];
|
||||
skb = buffer_info->skb;
|
||||
descriptor = &rx->ring_cpu_ptr[first_index];
|
||||
|
@ -2081,7 +2093,7 @@ static int lan743x_rx_process_packet(struct lan743x_rx *rx)
|
|||
skb_put(skb, packet_length - 4);
|
||||
skb->protocol = eth_type_trans(skb,
|
||||
rx->adapter->netdev);
|
||||
lan743x_rx_allocate_ring_element(rx, first_index);
|
||||
lan743x_rx_init_ring_element(rx, first_index, new_skb);
|
||||
} else {
|
||||
int index = first_index;
|
||||
|
||||
|
@ -2094,26 +2106,23 @@ static int lan743x_rx_process_packet(struct lan743x_rx *rx)
|
|||
if (first_index <= last_index) {
|
||||
while ((index >= first_index) &&
|
||||
(index <= last_index)) {
|
||||
lan743x_rx_release_ring_element(rx,
|
||||
index);
|
||||
lan743x_rx_allocate_ring_element(rx,
|
||||
index);
|
||||
lan743x_rx_reuse_ring_element(rx,
|
||||
index);
|
||||
index = lan743x_rx_next_index(rx,
|
||||
index);
|
||||
}
|
||||
} else {
|
||||
while ((index >= first_index) ||
|
||||
(index <= last_index)) {
|
||||
lan743x_rx_release_ring_element(rx,
|
||||
index);
|
||||
lan743x_rx_allocate_ring_element(rx,
|
||||
index);
|
||||
lan743x_rx_reuse_ring_element(rx,
|
||||
index);
|
||||
index = lan743x_rx_next_index(rx,
|
||||
index);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
process_extension:
|
||||
if (extension_index >= 0) {
|
||||
descriptor = &rx->ring_cpu_ptr[extension_index];
|
||||
buffer_info = &rx->buffer_info[extension_index];
|
||||
|
@ -2290,7 +2299,9 @@ static int lan743x_rx_ring_init(struct lan743x_rx *rx)
|
|||
|
||||
rx->last_head = 0;
|
||||
for (index = 0; index < rx->ring_size; index++) {
|
||||
ret = lan743x_rx_allocate_ring_element(rx, index);
|
||||
struct sk_buff *new_skb = lan743x_rx_allocate_skb(rx);
|
||||
|
||||
ret = lan743x_rx_init_ring_element(rx, index, new_skb);
|
||||
if (ret)
|
||||
goto cleanup;
|
||||
}
|
||||
|
|
|
@ -3886,6 +3886,12 @@ static int ql3xxx_probe(struct pci_dev *pdev,
|
|||
netif_stop_queue(ndev);
|
||||
|
||||
qdev->workqueue = create_singlethread_workqueue(ndev->name);
|
||||
if (!qdev->workqueue) {
|
||||
unregister_netdev(ndev);
|
||||
err = -ENOMEM;
|
||||
goto err_out_iounmap;
|
||||
}
|
||||
|
||||
INIT_DELAYED_WORK(&qdev->reset_work, ql_reset_work);
|
||||
INIT_DELAYED_WORK(&qdev->tx_timeout_work, ql_tx_timeout_work);
|
||||
INIT_DELAYED_WORK(&qdev->link_state_work, ql_link_state_machine_work);
|
||||
|
|
|
@ -4681,6 +4681,11 @@ static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
|
|||
*/
|
||||
qdev->workqueue = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM,
|
||||
ndev->name);
|
||||
if (!qdev->workqueue) {
|
||||
err = -ENOMEM;
|
||||
goto err_out2;
|
||||
}
|
||||
|
||||
INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
|
||||
INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
|
||||
INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
|
||||
|
|
|
@ -3181,12 +3181,16 @@ static struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev)
|
|||
struct device_node *np = dev->of_node;
|
||||
struct sh_eth_plat_data *pdata;
|
||||
const char *mac_addr;
|
||||
int ret;
|
||||
|
||||
pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
|
||||
if (!pdata)
|
||||
return NULL;
|
||||
|
||||
pdata->phy_interface = of_get_phy_mode(np);
|
||||
ret = of_get_phy_mode(np);
|
||||
if (ret < 0)
|
||||
return NULL;
|
||||
pdata->phy_interface = ret;
|
||||
|
||||
mac_addr = of_get_mac_address(np);
|
||||
if (mac_addr)
|
||||
|
|
|
@ -2805,6 +2805,11 @@ static int rocker_switchdev_event(struct notifier_block *unused,
|
|||
memcpy(&switchdev_work->fdb_info, ptr,
|
||||
sizeof(switchdev_work->fdb_info));
|
||||
switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
|
||||
if (unlikely(!switchdev_work->fdb_info.addr)) {
|
||||
kfree(switchdev_work);
|
||||
return NOTIFY_BAD;
|
||||
}
|
||||
|
||||
ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
|
||||
fdb_info->addr);
|
||||
/* Take a reference on the rocker device */
|
||||
|
|
|
@ -1147,7 +1147,10 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
|
|||
return ret;
|
||||
}
|
||||
|
||||
plat_dat->interface = of_get_phy_mode(dev->of_node);
|
||||
ret = of_get_phy_mode(dev->of_node);
|
||||
if (ret < 0)
|
||||
return -EINVAL;
|
||||
plat_dat->interface = ret;
|
||||
|
||||
/* platform data specifying hardware features and callbacks.
|
||||
* hardware features were copied from Allwinner drivers.
|
||||
|
|
|
@ -532,6 +532,7 @@ static void pptp_sock_destruct(struct sock *sk)
|
|||
pppox_unbind_sock(sk);
|
||||
}
|
||||
skb_queue_purge(&sk->sk_receive_queue);
|
||||
dst_release(rcu_dereference_protected(sk->sk_dst_cache, 1));
|
||||
}
|
||||
|
||||
static int pptp_create(struct net *net, struct socket *sock, int kern)
|
||||
|
|
|
@ -382,6 +382,7 @@ void nft_unregister_set(struct nft_set_type *type);
|
|||
* @dtype: data type (verdict or numeric type defined by userspace)
|
||||
* @objtype: object type (see NFT_OBJECT_* definitions)
|
||||
* @size: maximum set size
|
||||
* @use: number of rules references to this set
|
||||
* @nelems: number of elements
|
||||
* @ndeact: number of deactivated elements queued for removal
|
||||
* @timeout: default timeout value in jiffies
|
||||
|
@ -407,6 +408,7 @@ struct nft_set {
|
|||
u32 dtype;
|
||||
u32 objtype;
|
||||
u32 size;
|
||||
u32 use;
|
||||
atomic_t nelems;
|
||||
u32 ndeact;
|
||||
u64 timeout;
|
||||
|
@ -416,7 +418,8 @@ struct nft_set {
|
|||
unsigned char *udata;
|
||||
/* runtime data below here */
|
||||
const struct nft_set_ops *ops ____cacheline_aligned;
|
||||
u16 flags:14,
|
||||
u16 flags:13,
|
||||
bound:1,
|
||||
genmask:2;
|
||||
u8 klen;
|
||||
u8 dlen;
|
||||
|
@ -466,6 +469,10 @@ struct nft_set_binding {
|
|||
u32 flags;
|
||||
};
|
||||
|
||||
enum nft_trans_phase;
|
||||
void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set,
|
||||
struct nft_set_binding *binding,
|
||||
enum nft_trans_phase phase);
|
||||
int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
|
||||
struct nft_set_binding *binding);
|
||||
void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
|
||||
|
@ -1344,15 +1351,12 @@ struct nft_trans_rule {
|
|||
struct nft_trans_set {
|
||||
struct nft_set *set;
|
||||
u32 set_id;
|
||||
bool bound;
|
||||
};
|
||||
|
||||
#define nft_trans_set(trans) \
|
||||
(((struct nft_trans_set *)trans->data)->set)
|
||||
#define nft_trans_set_id(trans) \
|
||||
(((struct nft_trans_set *)trans->data)->set_id)
|
||||
#define nft_trans_set_bound(trans) \
|
||||
(((struct nft_trans_set *)trans->data)->bound)
|
||||
|
||||
struct nft_trans_chain {
|
||||
bool update;
|
||||
|
|
|
@ -2,8 +2,8 @@
|
|||
#ifndef __ASM_GENERIC_SOCKET_H
|
||||
#define __ASM_GENERIC_SOCKET_H
|
||||
|
||||
#include <linux/posix_types.h>
|
||||
#include <asm/sockios.h>
|
||||
#include <asm/bitsperlong.h>
|
||||
|
||||
/* For setsockopt(2) */
|
||||
#define SOL_SOCKET 1
|
||||
|
|
|
@ -1734,15 +1734,8 @@ EXPORT_SYMBOL(tcp_add_backlog);
|
|||
int tcp_filter(struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
struct tcphdr *th = (struct tcphdr *)skb->data;
|
||||
unsigned int eaten = skb->len;
|
||||
int err;
|
||||
|
||||
err = sk_filter_trim_cap(sk, skb, th->doff * 4);
|
||||
if (!err) {
|
||||
eaten -= skb->len;
|
||||
TCP_SKB_CB(skb)->end_seq -= eaten;
|
||||
}
|
||||
return err;
|
||||
return sk_filter_trim_cap(sk, skb, th->doff * 4);
|
||||
}
|
||||
EXPORT_SYMBOL(tcp_filter);
|
||||
|
||||
|
|
|
@ -778,8 +778,9 @@ static bool check_6rd(struct ip_tunnel *tunnel, const struct in6_addr *v6dst,
|
|||
pbw0 = tunnel->ip6rd.prefixlen >> 5;
|
||||
pbi0 = tunnel->ip6rd.prefixlen & 0x1f;
|
||||
|
||||
d = (ntohl(v6dst->s6_addr32[pbw0]) << pbi0) >>
|
||||
tunnel->ip6rd.relay_prefixlen;
|
||||
d = tunnel->ip6rd.relay_prefixlen < 32 ?
|
||||
(ntohl(v6dst->s6_addr32[pbw0]) << pbi0) >>
|
||||
tunnel->ip6rd.relay_prefixlen : 0;
|
||||
|
||||
pbi1 = pbi0 - tunnel->ip6rd.relay_prefixlen;
|
||||
if (pbi1 > 0)
|
||||
|
|
|
@ -674,9 +674,6 @@ static int l2tp_ip6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
|
|||
if (flags & MSG_OOB)
|
||||
goto out;
|
||||
|
||||
if (addr_len)
|
||||
*addr_len = sizeof(*lsa);
|
||||
|
||||
if (flags & MSG_ERRQUEUE)
|
||||
return ipv6_recv_error(sk, msg, len, addr_len);
|
||||
|
||||
|
@ -706,6 +703,7 @@ static int l2tp_ip6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
|
|||
lsa->l2tp_conn_id = 0;
|
||||
if (ipv6_addr_type(&lsa->l2tp_addr) & IPV6_ADDR_LINKLOCAL)
|
||||
lsa->l2tp_scope_id = inet6_iif(skb);
|
||||
*addr_len = sizeof(*lsa);
|
||||
}
|
||||
|
||||
if (np->rxopt.all)
|
||||
|
|
|
@ -11,7 +11,8 @@
|
|||
#include <net/netfilter/ipv6/nf_nat_masquerade.h>
|
||||
|
||||
static DEFINE_MUTEX(masq_mutex);
|
||||
static unsigned int masq_refcnt __read_mostly;
|
||||
static unsigned int masq_refcnt4 __read_mostly;
|
||||
static unsigned int masq_refcnt6 __read_mostly;
|
||||
|
||||
unsigned int
|
||||
nf_nat_masquerade_ipv4(struct sk_buff *skb, unsigned int hooknum,
|
||||
|
@ -141,8 +142,13 @@ int nf_nat_masquerade_ipv4_register_notifier(void)
|
|||
int ret = 0;
|
||||
|
||||
mutex_lock(&masq_mutex);
|
||||
if (WARN_ON_ONCE(masq_refcnt4 == UINT_MAX)) {
|
||||
ret = -EOVERFLOW;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
/* check if the notifier was already set */
|
||||
if (++masq_refcnt > 1)
|
||||
if (++masq_refcnt4 > 1)
|
||||
goto out_unlock;
|
||||
|
||||
/* Register for device down reports */
|
||||
|
@ -160,7 +166,7 @@ int nf_nat_masquerade_ipv4_register_notifier(void)
|
|||
err_unregister:
|
||||
unregister_netdevice_notifier(&masq_dev_notifier);
|
||||
err_dec:
|
||||
masq_refcnt--;
|
||||
masq_refcnt4--;
|
||||
out_unlock:
|
||||
mutex_unlock(&masq_mutex);
|
||||
return ret;
|
||||
|
@ -171,7 +177,7 @@ void nf_nat_masquerade_ipv4_unregister_notifier(void)
|
|||
{
|
||||
mutex_lock(&masq_mutex);
|
||||
/* check if the notifier still has clients */
|
||||
if (--masq_refcnt > 0)
|
||||
if (--masq_refcnt4 > 0)
|
||||
goto out_unlock;
|
||||
|
||||
unregister_netdevice_notifier(&masq_dev_notifier);
|
||||
|
@ -321,25 +327,23 @@ int nf_nat_masquerade_ipv6_register_notifier(void)
|
|||
int ret = 0;
|
||||
|
||||
mutex_lock(&masq_mutex);
|
||||
/* check if the notifier is already set */
|
||||
if (++masq_refcnt > 1)
|
||||
if (WARN_ON_ONCE(masq_refcnt6 == UINT_MAX)) {
|
||||
ret = -EOVERFLOW;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
ret = register_netdevice_notifier(&masq_dev_notifier);
|
||||
if (ret)
|
||||
goto err_dec;
|
||||
/* check if the notifier is already set */
|
||||
if (++masq_refcnt6 > 1)
|
||||
goto out_unlock;
|
||||
|
||||
ret = register_inet6addr_notifier(&masq_inet6_notifier);
|
||||
if (ret)
|
||||
goto err_unregister;
|
||||
goto err_dec;
|
||||
|
||||
mutex_unlock(&masq_mutex);
|
||||
return ret;
|
||||
|
||||
err_unregister:
|
||||
unregister_netdevice_notifier(&masq_dev_notifier);
|
||||
err_dec:
|
||||
masq_refcnt--;
|
||||
masq_refcnt6--;
|
||||
out_unlock:
|
||||
mutex_unlock(&masq_mutex);
|
||||
return ret;
|
||||
|
@ -350,11 +354,10 @@ void nf_nat_masquerade_ipv6_unregister_notifier(void)
|
|||
{
|
||||
mutex_lock(&masq_mutex);
|
||||
/* check if the notifier still has clients */
|
||||
if (--masq_refcnt > 0)
|
||||
if (--masq_refcnt6 > 0)
|
||||
goto out_unlock;
|
||||
|
||||
unregister_inet6addr_notifier(&masq_inet6_notifier);
|
||||
unregister_netdevice_notifier(&masq_dev_notifier);
|
||||
out_unlock:
|
||||
mutex_unlock(&masq_mutex);
|
||||
}
|
||||
|
|
|
@ -142,7 +142,7 @@ static void nft_set_trans_bind(const struct nft_ctx *ctx, struct nft_set *set)
|
|||
list_for_each_entry_reverse(trans, &net->nft.commit_list, list) {
|
||||
if (trans->msg_type == NFT_MSG_NEWSET &&
|
||||
nft_trans_set(trans) == set) {
|
||||
nft_trans_set_bound(trans) = true;
|
||||
set->bound = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -2162,9 +2162,11 @@ err1:
|
|||
static void nf_tables_expr_destroy(const struct nft_ctx *ctx,
|
||||
struct nft_expr *expr)
|
||||
{
|
||||
const struct nft_expr_type *type = expr->ops->type;
|
||||
|
||||
if (expr->ops->destroy)
|
||||
expr->ops->destroy(ctx, expr);
|
||||
module_put(expr->ops->type->owner);
|
||||
module_put(type->owner);
|
||||
}
|
||||
|
||||
struct nft_expr *nft_expr_init(const struct nft_ctx *ctx,
|
||||
|
@ -3672,6 +3674,9 @@ err1:
|
|||
|
||||
static void nft_set_destroy(struct nft_set *set)
|
||||
{
|
||||
if (WARN_ON(set->use > 0))
|
||||
return;
|
||||
|
||||
set->ops->destroy(set);
|
||||
module_put(to_set_type(set->ops)->owner);
|
||||
kfree(set->name);
|
||||
|
@ -3712,7 +3717,7 @@ static int nf_tables_delset(struct net *net, struct sock *nlsk,
|
|||
NL_SET_BAD_ATTR(extack, attr);
|
||||
return PTR_ERR(set);
|
||||
}
|
||||
if (!list_empty(&set->bindings) ||
|
||||
if (set->use ||
|
||||
(nlh->nlmsg_flags & NLM_F_NONREC && atomic_read(&set->nelems) > 0)) {
|
||||
NL_SET_BAD_ATTR(extack, attr);
|
||||
return -EBUSY;
|
||||
|
@ -3742,6 +3747,9 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
|
|||
struct nft_set_binding *i;
|
||||
struct nft_set_iter iter;
|
||||
|
||||
if (set->use == UINT_MAX)
|
||||
return -EOVERFLOW;
|
||||
|
||||
if (!list_empty(&set->bindings) && nft_set_is_anonymous(set))
|
||||
return -EBUSY;
|
||||
|
||||
|
@ -3769,6 +3777,7 @@ bind:
|
|||
binding->chain = ctx->chain;
|
||||
list_add_tail_rcu(&binding->list, &set->bindings);
|
||||
nft_set_trans_bind(ctx, set);
|
||||
set->use++;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -3788,6 +3797,25 @@ void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(nf_tables_unbind_set);
|
||||
|
||||
void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set,
|
||||
struct nft_set_binding *binding,
|
||||
enum nft_trans_phase phase)
|
||||
{
|
||||
switch (phase) {
|
||||
case NFT_TRANS_PREPARE:
|
||||
set->use--;
|
||||
return;
|
||||
case NFT_TRANS_ABORT:
|
||||
case NFT_TRANS_RELEASE:
|
||||
set->use--;
|
||||
/* fall through */
|
||||
default:
|
||||
nf_tables_unbind_set(ctx, set, binding,
|
||||
phase == NFT_TRANS_COMMIT);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nf_tables_deactivate_set);
|
||||
|
||||
void nf_tables_destroy_set(const struct nft_ctx *ctx, struct nft_set *set)
|
||||
{
|
||||
if (list_empty(&set->bindings) && nft_set_is_anonymous(set))
|
||||
|
@ -6536,6 +6564,11 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
|
|||
struct nft_chain *chain;
|
||||
struct nft_table *table;
|
||||
|
||||
if (list_empty(&net->nft.commit_list)) {
|
||||
mutex_unlock(&net->nft.commit_mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* 0. Validate ruleset, otherwise roll back for error reporting. */
|
||||
if (nf_tables_validate(net) < 0)
|
||||
return -EAGAIN;
|
||||
|
@ -6709,8 +6742,7 @@ static void nf_tables_abort_release(struct nft_trans *trans)
|
|||
nf_tables_rule_destroy(&trans->ctx, nft_trans_rule(trans));
|
||||
break;
|
||||
case NFT_MSG_NEWSET:
|
||||
if (!nft_trans_set_bound(trans))
|
||||
nft_set_destroy(nft_trans_set(trans));
|
||||
nft_set_destroy(nft_trans_set(trans));
|
||||
break;
|
||||
case NFT_MSG_NEWSETELEM:
|
||||
nft_set_elem_destroy(nft_trans_elem_set(trans),
|
||||
|
@ -6783,8 +6815,11 @@ static int __nf_tables_abort(struct net *net)
|
|||
break;
|
||||
case NFT_MSG_NEWSET:
|
||||
trans->ctx.table->use--;
|
||||
if (!nft_trans_set_bound(trans))
|
||||
list_del_rcu(&nft_trans_set(trans)->list);
|
||||
if (nft_trans_set(trans)->bound) {
|
||||
nft_trans_destroy(trans);
|
||||
break;
|
||||
}
|
||||
list_del_rcu(&nft_trans_set(trans)->list);
|
||||
break;
|
||||
case NFT_MSG_DELSET:
|
||||
trans->ctx.table->use++;
|
||||
|
@ -6792,8 +6827,11 @@ static int __nf_tables_abort(struct net *net)
|
|||
nft_trans_destroy(trans);
|
||||
break;
|
||||
case NFT_MSG_NEWSETELEM:
|
||||
if (nft_trans_elem_set(trans)->bound) {
|
||||
nft_trans_destroy(trans);
|
||||
break;
|
||||
}
|
||||
te = (struct nft_trans_elem *)trans->data;
|
||||
|
||||
te->set->ops->remove(net, te->set, &te->elem);
|
||||
atomic_dec(&te->set->nelems);
|
||||
break;
|
||||
|
|
|
@ -240,11 +240,15 @@ static void nft_dynset_deactivate(const struct nft_ctx *ctx,
|
|||
{
|
||||
struct nft_dynset *priv = nft_expr_priv(expr);
|
||||
|
||||
if (phase == NFT_TRANS_PREPARE)
|
||||
return;
|
||||
nf_tables_deactivate_set(ctx, priv->set, &priv->binding, phase);
|
||||
}
|
||||
|
||||
nf_tables_unbind_set(ctx, priv->set, &priv->binding,
|
||||
phase == NFT_TRANS_COMMIT);
|
||||
static void nft_dynset_activate(const struct nft_ctx *ctx,
|
||||
const struct nft_expr *expr)
|
||||
{
|
||||
struct nft_dynset *priv = nft_expr_priv(expr);
|
||||
|
||||
priv->set->use++;
|
||||
}
|
||||
|
||||
static void nft_dynset_destroy(const struct nft_ctx *ctx,
|
||||
|
@ -292,6 +296,7 @@ static const struct nft_expr_ops nft_dynset_ops = {
|
|||
.eval = nft_dynset_eval,
|
||||
.init = nft_dynset_init,
|
||||
.destroy = nft_dynset_destroy,
|
||||
.activate = nft_dynset_activate,
|
||||
.deactivate = nft_dynset_deactivate,
|
||||
.dump = nft_dynset_dump,
|
||||
};
|
||||
|
|
|
@ -127,11 +127,15 @@ static void nft_lookup_deactivate(const struct nft_ctx *ctx,
|
|||
{
|
||||
struct nft_lookup *priv = nft_expr_priv(expr);
|
||||
|
||||
if (phase == NFT_TRANS_PREPARE)
|
||||
return;
|
||||
nf_tables_deactivate_set(ctx, priv->set, &priv->binding, phase);
|
||||
}
|
||||
|
||||
nf_tables_unbind_set(ctx, priv->set, &priv->binding,
|
||||
phase == NFT_TRANS_COMMIT);
|
||||
static void nft_lookup_activate(const struct nft_ctx *ctx,
|
||||
const struct nft_expr *expr)
|
||||
{
|
||||
struct nft_lookup *priv = nft_expr_priv(expr);
|
||||
|
||||
priv->set->use++;
|
||||
}
|
||||
|
||||
static void nft_lookup_destroy(const struct nft_ctx *ctx,
|
||||
|
@ -222,6 +226,7 @@ static const struct nft_expr_ops nft_lookup_ops = {
|
|||
.size = NFT_EXPR_SIZE(sizeof(struct nft_lookup)),
|
||||
.eval = nft_lookup_eval,
|
||||
.init = nft_lookup_init,
|
||||
.activate = nft_lookup_activate,
|
||||
.deactivate = nft_lookup_deactivate,
|
||||
.destroy = nft_lookup_destroy,
|
||||
.dump = nft_lookup_dump,
|
||||
|
|
|
@ -162,11 +162,15 @@ static void nft_objref_map_deactivate(const struct nft_ctx *ctx,
|
|||
{
|
||||
struct nft_objref_map *priv = nft_expr_priv(expr);
|
||||
|
||||
if (phase == NFT_TRANS_PREPARE)
|
||||
return;
|
||||
nf_tables_deactivate_set(ctx, priv->set, &priv->binding, phase);
|
||||
}
|
||||
|
||||
nf_tables_unbind_set(ctx, priv->set, &priv->binding,
|
||||
phase == NFT_TRANS_COMMIT);
|
||||
static void nft_objref_map_activate(const struct nft_ctx *ctx,
|
||||
const struct nft_expr *expr)
|
||||
{
|
||||
struct nft_objref_map *priv = nft_expr_priv(expr);
|
||||
|
||||
priv->set->use++;
|
||||
}
|
||||
|
||||
static void nft_objref_map_destroy(const struct nft_ctx *ctx,
|
||||
|
@ -183,6 +187,7 @@ static const struct nft_expr_ops nft_objref_map_ops = {
|
|||
.size = NFT_EXPR_SIZE(sizeof(struct nft_objref_map)),
|
||||
.eval = nft_objref_map_eval,
|
||||
.init = nft_objref_map_init,
|
||||
.activate = nft_objref_map_activate,
|
||||
.deactivate = nft_objref_map_deactivate,
|
||||
.destroy = nft_objref_map_destroy,
|
||||
.dump = nft_objref_map_dump,
|
||||
|
|
|
@ -1893,6 +1893,7 @@ static int tfilter_notify(struct net *net, struct sk_buff *oskb,
|
|||
{
|
||||
struct sk_buff *skb;
|
||||
u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
|
||||
int err = 0;
|
||||
|
||||
skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
|
||||
if (!skb)
|
||||
|
@ -1906,10 +1907,14 @@ static int tfilter_notify(struct net *net, struct sk_buff *oskb,
|
|||
}
|
||||
|
||||
if (unicast)
|
||||
return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
|
||||
err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
|
||||
else
|
||||
err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
|
||||
n->nlmsg_flags & NLM_F_ECHO);
|
||||
|
||||
return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
|
||||
n->nlmsg_flags & NLM_F_ECHO);
|
||||
if (err > 0)
|
||||
err = 0;
|
||||
return err;
|
||||
}
|
||||
|
||||
static int tfilter_del_notify(struct net *net, struct sk_buff *oskb,
|
||||
|
@ -1941,12 +1946,15 @@ static int tfilter_del_notify(struct net *net, struct sk_buff *oskb,
|
|||
}
|
||||
|
||||
if (unicast)
|
||||
return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
|
||||
|
||||
err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
|
||||
n->nlmsg_flags & NLM_F_ECHO);
|
||||
err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
|
||||
else
|
||||
err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
|
||||
n->nlmsg_flags & NLM_F_ECHO);
|
||||
if (err < 0)
|
||||
NL_SET_ERR_MSG(extack, "Failed to send filter delete notification");
|
||||
|
||||
if (err > 0)
|
||||
err = 0;
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -2688,6 +2696,7 @@ static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
|
|||
struct tcf_block *block = chain->block;
|
||||
struct net *net = block->net;
|
||||
struct sk_buff *skb;
|
||||
int err = 0;
|
||||
|
||||
skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
|
||||
if (!skb)
|
||||
|
@ -2701,9 +2710,14 @@ static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
|
|||
}
|
||||
|
||||
if (unicast)
|
||||
return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
|
||||
err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
|
||||
else
|
||||
err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
|
||||
flags & NLM_F_ECHO);
|
||||
|
||||
return rtnetlink_send(skb, net, portid, RTNLGRP_TC, flags & NLM_F_ECHO);
|
||||
if (err > 0)
|
||||
err = 0;
|
||||
return err;
|
||||
}
|
||||
|
||||
static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
|
||||
|
|
|
@ -1824,6 +1824,7 @@ static int tclass_notify(struct net *net, struct sk_buff *oskb,
|
|||
{
|
||||
struct sk_buff *skb;
|
||||
u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
|
||||
int err = 0;
|
||||
|
||||
skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
|
||||
if (!skb)
|
||||
|
@ -1834,8 +1835,11 @@ static int tclass_notify(struct net *net, struct sk_buff *oskb,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
|
||||
n->nlmsg_flags & NLM_F_ECHO);
|
||||
err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
|
||||
n->nlmsg_flags & NLM_F_ECHO);
|
||||
if (err > 0)
|
||||
err = 0;
|
||||
return err;
|
||||
}
|
||||
|
||||
static int tclass_del_notify(struct net *net,
|
||||
|
@ -1866,8 +1870,11 @@ static int tclass_del_notify(struct net *net,
|
|||
return err;
|
||||
}
|
||||
|
||||
return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
|
||||
n->nlmsg_flags & NLM_F_ECHO);
|
||||
err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
|
||||
n->nlmsg_flags & NLM_F_ECHO);
|
||||
if (err > 0)
|
||||
err = 0;
|
||||
return err;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NET_CLS
|
||||
|
|
|
@ -558,9 +558,6 @@ void tls_device_write_space(struct sock *sk, struct tls_context *ctx)
|
|||
MSG_DONTWAIT | MSG_NOSIGNAL);
|
||||
sk->sk_allocation = sk_allocation;
|
||||
}
|
||||
|
||||
if (!rc)
|
||||
ctx->sk_write_space(sk);
|
||||
}
|
||||
|
||||
void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn)
|
||||
|
|
|
@ -146,7 +146,6 @@ retry:
|
|||
}
|
||||
|
||||
ctx->in_tcp_sendpages = false;
|
||||
ctx->sk_write_space(sk);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -228,6 +227,8 @@ static void tls_write_space(struct sock *sk)
|
|||
else
|
||||
#endif
|
||||
tls_sw_write_space(sk, ctx);
|
||||
|
||||
ctx->sk_write_space(sk);
|
||||
}
|
||||
|
||||
static void tls_ctx_free(struct tls_context *ctx)
|
||||
|
|
|
@ -825,6 +825,7 @@ out_put_neigh:
|
|||
x25_neigh_put(x25->neighbour);
|
||||
x25->neighbour = NULL;
|
||||
read_unlock_bh(&x25_list_lock);
|
||||
x25->state = X25_STATE_0;
|
||||
}
|
||||
out_put_route:
|
||||
x25_route_put(rt);
|
||||
|
|
Loading…
Reference in New Issue