Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
David writes: "Networking 1) Fix gro_cells leak in xfrm layer, from Li RongQing. 2) BPF selftests change RLIMIT_MEMLOCK blindly, don't do that. From Eric Dumazet. 3) AF_XDP calls synchronize_net() under RCU lock, fix from Björn Töpel. 4) Out of bounds packet access in _decode_session6(), from Alexei Starovoitov. 5) Several ethtool bugs, where we copy a struct into the kernel twice and our validations of the values in the first copy can be invalidated by the second copy due to asynchronous updates to the memory by the user. From Wenwen Wang. 6) Missing netlink attribute validation in cls_api, from Davide Caratti. 7) LLC SAP sockets neet to be SOCK_RCU FREE, from Cong Wang. 8) rxrpc operates on wrong kvec, from Yue Haibing. 9) A regression was introduced by the disassosciation of route neighbour references in rt6_probe(), causing probe for neighbourless routes to not be properly rate limited. Fix from Sabrina Dubroca. 10) Unsafe RCU locking in tipc, from Tung Nguyen. 11) Use after free in inet6_mc_check(), from Eric Dumazet. 12) PMTU from icmp packets should update the SCTP transport pathmtu, from Xin Long. 13) Missing peer put on error in rxrpc, from David Howells. 14) Fix pedit in nfp driver, from Pieter Jansen van Vuuren. 15) Fix overflowing shift statement in qla3xxx driver, from Nathan Chancellor. 16) Fix Spectre v1 in ptp code, from Gustavo A. R. Silva. 17) udp6_unicast_rcv_skb() interprets udpv6_queue_rcv_skb() return value in an inverted manner, fix from Paolo Abeni. 18) Fix missed unresolved entries in ipmr dumps, from Nikolay Aleksandrov. 19) Fix NAPI handling under high load, we can completely miss events when NAPI has to loop more than one time in a cycle. From Heiner Kallweit." * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (49 commits) ip6_tunnel: Fix encapsulation layout tipc: fix info leak from kernel tipc_event net: socket: fix a missing-check bug net: sched: Fix for duplicate class dump r8169: fix NAPI handling under high load net: ipmr: fix unresolved entry dumps net: mscc: ocelot: Fix comment in ocelot_vlant_wait_for_completion() sctp: fix the data size calculation in sctp_data_size virtio_net: avoid using netif_tx_disable() for serializing tx routine udp6: fix encap return code for resubmitting mlxsw: core: Fix use-after-free when flashing firmware during init sctp: not free the new asoc when sctp_wait_for_connect returns err sctp: fix race on sctp_id2asoc r8169: re-enable MSI-X on RTL8168g net: bpfilter: use get_pid_task instead of pid_task ptp: fix Spectre v1 vulnerability net: qla3xxx: Remove overflowing shift statement geneve, vxlan: Don't set exceptions if skb->len < mtu geneve, vxlan: Don't check skb_dst() twice sctp: get pr_assoc and pr_stream all status with SCTP_PR_SCTP_ALL instead ...
This commit is contained in:
commit
91b15613ce
|
@ -10122,7 +10122,6 @@ L: netdev@vger.kernel.org
|
|||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec.git
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec-next.git
|
||||
S: Maintained
|
||||
F: net/core/flow.c
|
||||
F: net/xfrm/
|
||||
F: net/key/
|
||||
F: net/ipv4/xfrm*
|
||||
|
|
|
@ -321,9 +321,12 @@ int bcmgenet_mii_probe(struct net_device *dev)
|
|||
phydev->advertising = phydev->supported;
|
||||
|
||||
/* The internal PHY has its link interrupts routed to the
|
||||
* Ethernet MAC ISRs
|
||||
* Ethernet MAC ISRs. On GENETv5 there is a hardware issue
|
||||
* that prevents the signaling of link UP interrupts when
|
||||
* the link operates at 10Mbps, so fallback to polling for
|
||||
* those versions of GENET.
|
||||
*/
|
||||
if (priv->internal_phy)
|
||||
if (priv->internal_phy && !GENET_IS_V5(priv))
|
||||
dev->phydev->irq = PHY_IGNORE_INTERRUPT;
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -452,6 +452,10 @@ struct bufdesc_ex {
|
|||
* initialisation.
|
||||
*/
|
||||
#define FEC_QUIRK_MIB_CLEAR (1 << 15)
|
||||
/* Only i.MX25/i.MX27/i.MX28 controller supports FRBR,FRSR registers,
|
||||
* those FIFO receive registers are resolved in other platforms.
|
||||
*/
|
||||
#define FEC_QUIRK_HAS_FRREG (1 << 16)
|
||||
|
||||
struct bufdesc_prop {
|
||||
int qid;
|
||||
|
|
|
@ -91,14 +91,16 @@ static struct platform_device_id fec_devtype[] = {
|
|||
.driver_data = 0,
|
||||
}, {
|
||||
.name = "imx25-fec",
|
||||
.driver_data = FEC_QUIRK_USE_GASKET | FEC_QUIRK_MIB_CLEAR,
|
||||
.driver_data = FEC_QUIRK_USE_GASKET | FEC_QUIRK_MIB_CLEAR |
|
||||
FEC_QUIRK_HAS_FRREG,
|
||||
}, {
|
||||
.name = "imx27-fec",
|
||||
.driver_data = FEC_QUIRK_MIB_CLEAR,
|
||||
.driver_data = FEC_QUIRK_MIB_CLEAR | FEC_QUIRK_HAS_FRREG,
|
||||
}, {
|
||||
.name = "imx28-fec",
|
||||
.driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |
|
||||
FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC,
|
||||
FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC |
|
||||
FEC_QUIRK_HAS_FRREG,
|
||||
}, {
|
||||
.name = "imx6q-fec",
|
||||
.driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
|
||||
|
@ -2164,7 +2166,13 @@ static void fec_enet_get_regs(struct net_device *ndev,
|
|||
memset(buf, 0, regs->len);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(fec_enet_register_offset); i++) {
|
||||
off = fec_enet_register_offset[i] / 4;
|
||||
off = fec_enet_register_offset[i];
|
||||
|
||||
if ((off == FEC_R_BOUND || off == FEC_R_FSTART) &&
|
||||
!(fep->quirks & FEC_QUIRK_HAS_FRREG))
|
||||
continue;
|
||||
|
||||
off >>= 2;
|
||||
buf[off] = readl(&theregs[off]);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -432,10 +432,9 @@ static inline u16 mlx5e_icosq_wrap_cnt(struct mlx5e_icosq *sq)
|
|||
|
||||
static inline void mlx5e_fill_icosq_frag_edge(struct mlx5e_icosq *sq,
|
||||
struct mlx5_wq_cyc *wq,
|
||||
u16 pi, u16 frag_pi)
|
||||
u16 pi, u16 nnops)
|
||||
{
|
||||
struct mlx5e_sq_wqe_info *edge_wi, *wi = &sq->db.ico_wqe[pi];
|
||||
u8 nnops = mlx5_wq_cyc_get_frag_size(wq) - frag_pi;
|
||||
|
||||
edge_wi = wi + nnops;
|
||||
|
||||
|
@ -454,15 +453,14 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
|
|||
struct mlx5_wq_cyc *wq = &sq->wq;
|
||||
struct mlx5e_umr_wqe *umr_wqe;
|
||||
u16 xlt_offset = ix << (MLX5E_LOG_ALIGNED_MPWQE_PPW - 1);
|
||||
u16 pi, frag_pi;
|
||||
u16 pi, contig_wqebbs_room;
|
||||
int err;
|
||||
int i;
|
||||
|
||||
pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
|
||||
frag_pi = mlx5_wq_cyc_ctr2fragix(wq, sq->pc);
|
||||
|
||||
if (unlikely(frag_pi + MLX5E_UMR_WQEBBS > mlx5_wq_cyc_get_frag_size(wq))) {
|
||||
mlx5e_fill_icosq_frag_edge(sq, wq, pi, frag_pi);
|
||||
contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
|
||||
if (unlikely(contig_wqebbs_room < MLX5E_UMR_WQEBBS)) {
|
||||
mlx5e_fill_icosq_frag_edge(sq, wq, pi, contig_wqebbs_room);
|
||||
pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
|
||||
}
|
||||
|
||||
|
|
|
@ -290,10 +290,9 @@ dma_unmap_wqe_err:
|
|||
|
||||
static inline void mlx5e_fill_sq_frag_edge(struct mlx5e_txqsq *sq,
|
||||
struct mlx5_wq_cyc *wq,
|
||||
u16 pi, u16 frag_pi)
|
||||
u16 pi, u16 nnops)
|
||||
{
|
||||
struct mlx5e_tx_wqe_info *edge_wi, *wi = &sq->db.wqe_info[pi];
|
||||
u8 nnops = mlx5_wq_cyc_get_frag_size(wq) - frag_pi;
|
||||
|
||||
edge_wi = wi + nnops;
|
||||
|
||||
|
@ -348,8 +347,8 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
|
|||
struct mlx5e_tx_wqe_info *wi;
|
||||
|
||||
struct mlx5e_sq_stats *stats = sq->stats;
|
||||
u16 headlen, ihs, contig_wqebbs_room;
|
||||
u16 ds_cnt, ds_cnt_inl = 0;
|
||||
u16 headlen, ihs, frag_pi;
|
||||
u8 num_wqebbs, opcode;
|
||||
u32 num_bytes;
|
||||
int num_dma;
|
||||
|
@ -386,9 +385,9 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
|
|||
}
|
||||
|
||||
num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
|
||||
frag_pi = mlx5_wq_cyc_ctr2fragix(wq, sq->pc);
|
||||
if (unlikely(frag_pi + num_wqebbs > mlx5_wq_cyc_get_frag_size(wq))) {
|
||||
mlx5e_fill_sq_frag_edge(sq, wq, pi, frag_pi);
|
||||
contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
|
||||
if (unlikely(contig_wqebbs_room < num_wqebbs)) {
|
||||
mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
|
||||
mlx5e_sq_fetch_wqe(sq, &wqe, &pi);
|
||||
}
|
||||
|
||||
|
@ -636,7 +635,7 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
|
|||
struct mlx5e_tx_wqe_info *wi;
|
||||
|
||||
struct mlx5e_sq_stats *stats = sq->stats;
|
||||
u16 headlen, ihs, pi, frag_pi;
|
||||
u16 headlen, ihs, pi, contig_wqebbs_room;
|
||||
u16 ds_cnt, ds_cnt_inl = 0;
|
||||
u8 num_wqebbs, opcode;
|
||||
u32 num_bytes;
|
||||
|
@ -672,13 +671,14 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
|
|||
}
|
||||
|
||||
num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
|
||||
frag_pi = mlx5_wq_cyc_ctr2fragix(wq, sq->pc);
|
||||
if (unlikely(frag_pi + num_wqebbs > mlx5_wq_cyc_get_frag_size(wq))) {
|
||||
pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
|
||||
contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
|
||||
if (unlikely(contig_wqebbs_room < num_wqebbs)) {
|
||||
mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
|
||||
pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
|
||||
mlx5e_fill_sq_frag_edge(sq, wq, pi, frag_pi);
|
||||
}
|
||||
|
||||
mlx5i_sq_fetch_wqe(sq, &wqe, &pi);
|
||||
mlx5i_sq_fetch_wqe(sq, &wqe, pi);
|
||||
|
||||
/* fill wqe */
|
||||
wi = &sq->db.wqe_info[pi];
|
||||
|
|
|
@ -273,7 +273,7 @@ static void eq_pf_process(struct mlx5_eq *eq)
|
|||
case MLX5_PFAULT_SUBTYPE_WQE:
|
||||
/* WQE based event */
|
||||
pfault->type =
|
||||
be32_to_cpu(pf_eqe->wqe.pftype_wq) >> 24;
|
||||
(be32_to_cpu(pf_eqe->wqe.pftype_wq) >> 24) & 0x7;
|
||||
pfault->token =
|
||||
be32_to_cpu(pf_eqe->wqe.token);
|
||||
pfault->wqe.wq_num =
|
||||
|
|
|
@ -245,7 +245,7 @@ static void *mlx5_fpga_ipsec_cmd_exec(struct mlx5_core_dev *mdev,
|
|||
return ERR_PTR(res);
|
||||
}
|
||||
|
||||
/* Context will be freed by wait func after completion */
|
||||
/* Context should be freed by the caller after completion. */
|
||||
return context;
|
||||
}
|
||||
|
||||
|
@ -418,10 +418,8 @@ static int mlx5_fpga_ipsec_set_caps(struct mlx5_core_dev *mdev, u32 flags)
|
|||
cmd.cmd = htonl(MLX5_FPGA_IPSEC_CMD_OP_SET_CAP);
|
||||
cmd.flags = htonl(flags);
|
||||
context = mlx5_fpga_ipsec_cmd_exec(mdev, &cmd, sizeof(cmd));
|
||||
if (IS_ERR(context)) {
|
||||
err = PTR_ERR(context);
|
||||
goto out;
|
||||
}
|
||||
if (IS_ERR(context))
|
||||
return PTR_ERR(context);
|
||||
|
||||
err = mlx5_fpga_ipsec_cmd_wait(context);
|
||||
if (err)
|
||||
|
@ -435,6 +433,7 @@ static int mlx5_fpga_ipsec_set_caps(struct mlx5_core_dev *mdev, u32 flags)
|
|||
}
|
||||
|
||||
out:
|
||||
kfree(context);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
|
@ -109,12 +109,11 @@ struct mlx5i_tx_wqe {
|
|||
|
||||
static inline void mlx5i_sq_fetch_wqe(struct mlx5e_txqsq *sq,
|
||||
struct mlx5i_tx_wqe **wqe,
|
||||
u16 *pi)
|
||||
u16 pi)
|
||||
{
|
||||
struct mlx5_wq_cyc *wq = &sq->wq;
|
||||
|
||||
*pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
|
||||
*wqe = mlx5_wq_cyc_get_wqe(wq, *pi);
|
||||
*wqe = mlx5_wq_cyc_get_wqe(wq, pi);
|
||||
memset(*wqe, 0, sizeof(**wqe));
|
||||
}
|
||||
|
||||
|
|
|
@ -39,11 +39,6 @@ u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq)
|
|||
return (u32)wq->fbc.sz_m1 + 1;
|
||||
}
|
||||
|
||||
u16 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq)
|
||||
{
|
||||
return wq->fbc.frag_sz_m1 + 1;
|
||||
}
|
||||
|
||||
u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq)
|
||||
{
|
||||
return wq->fbc.sz_m1 + 1;
|
||||
|
|
|
@ -80,7 +80,6 @@ int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
|
|||
void *wqc, struct mlx5_wq_cyc *wq,
|
||||
struct mlx5_wq_ctrl *wq_ctrl);
|
||||
u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq);
|
||||
u16 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq);
|
||||
|
||||
int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
|
||||
void *qpc, struct mlx5_wq_qp *wq,
|
||||
|
@ -140,11 +139,6 @@ static inline u16 mlx5_wq_cyc_ctr2ix(struct mlx5_wq_cyc *wq, u16 ctr)
|
|||
return ctr & wq->fbc.sz_m1;
|
||||
}
|
||||
|
||||
static inline u16 mlx5_wq_cyc_ctr2fragix(struct mlx5_wq_cyc *wq, u16 ctr)
|
||||
{
|
||||
return ctr & wq->fbc.frag_sz_m1;
|
||||
}
|
||||
|
||||
static inline u16 mlx5_wq_cyc_get_head(struct mlx5_wq_cyc *wq)
|
||||
{
|
||||
return mlx5_wq_cyc_ctr2ix(wq, wq->wqe_ctr);
|
||||
|
@ -160,6 +154,11 @@ static inline void *mlx5_wq_cyc_get_wqe(struct mlx5_wq_cyc *wq, u16 ix)
|
|||
return mlx5_frag_buf_get_wqe(&wq->fbc, ix);
|
||||
}
|
||||
|
||||
static inline u16 mlx5_wq_cyc_get_contig_wqebbs(struct mlx5_wq_cyc *wq, u16 ix)
|
||||
{
|
||||
return mlx5_frag_buf_get_idx_last_contig_stride(&wq->fbc, ix) - ix + 1;
|
||||
}
|
||||
|
||||
static inline int mlx5_wq_cyc_cc_bigger(u16 cc1, u16 cc2)
|
||||
{
|
||||
int equal = (cc1 == cc2);
|
||||
|
|
|
@ -1055,6 +1055,7 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
|
|||
err_driver_init:
|
||||
mlxsw_thermal_fini(mlxsw_core->thermal);
|
||||
err_thermal_init:
|
||||
mlxsw_hwmon_fini(mlxsw_core->hwmon);
|
||||
err_hwmon_init:
|
||||
if (!reload)
|
||||
devlink_unregister(devlink);
|
||||
|
@ -1088,6 +1089,7 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
|
|||
if (mlxsw_core->driver->fini)
|
||||
mlxsw_core->driver->fini(mlxsw_core);
|
||||
mlxsw_thermal_fini(mlxsw_core->thermal);
|
||||
mlxsw_hwmon_fini(mlxsw_core->hwmon);
|
||||
if (!reload)
|
||||
devlink_unregister(devlink);
|
||||
mlxsw_emad_fini(mlxsw_core);
|
||||
|
|
|
@ -359,6 +359,10 @@ static inline int mlxsw_hwmon_init(struct mlxsw_core *mlxsw_core,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline void mlxsw_hwmon_fini(struct mlxsw_hwmon *mlxsw_hwmon)
|
||||
{
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
struct mlxsw_thermal;
|
||||
|
|
|
@ -303,8 +303,7 @@ int mlxsw_hwmon_init(struct mlxsw_core *mlxsw_core,
|
|||
struct device *hwmon_dev;
|
||||
int err;
|
||||
|
||||
mlxsw_hwmon = devm_kzalloc(mlxsw_bus_info->dev, sizeof(*mlxsw_hwmon),
|
||||
GFP_KERNEL);
|
||||
mlxsw_hwmon = kzalloc(sizeof(*mlxsw_hwmon), GFP_KERNEL);
|
||||
if (!mlxsw_hwmon)
|
||||
return -ENOMEM;
|
||||
mlxsw_hwmon->core = mlxsw_core;
|
||||
|
@ -321,10 +320,9 @@ int mlxsw_hwmon_init(struct mlxsw_core *mlxsw_core,
|
|||
mlxsw_hwmon->groups[0] = &mlxsw_hwmon->group;
|
||||
mlxsw_hwmon->group.attrs = mlxsw_hwmon->attrs;
|
||||
|
||||
hwmon_dev = devm_hwmon_device_register_with_groups(mlxsw_bus_info->dev,
|
||||
"mlxsw",
|
||||
mlxsw_hwmon,
|
||||
mlxsw_hwmon->groups);
|
||||
hwmon_dev = hwmon_device_register_with_groups(mlxsw_bus_info->dev,
|
||||
"mlxsw", mlxsw_hwmon,
|
||||
mlxsw_hwmon->groups);
|
||||
if (IS_ERR(hwmon_dev)) {
|
||||
err = PTR_ERR(hwmon_dev);
|
||||
goto err_hwmon_register;
|
||||
|
@ -337,5 +335,12 @@ int mlxsw_hwmon_init(struct mlxsw_core *mlxsw_core,
|
|||
err_hwmon_register:
|
||||
err_fans_init:
|
||||
err_temp_init:
|
||||
kfree(mlxsw_hwmon);
|
||||
return err;
|
||||
}
|
||||
|
||||
void mlxsw_hwmon_fini(struct mlxsw_hwmon *mlxsw_hwmon)
|
||||
{
|
||||
hwmon_device_unregister(mlxsw_hwmon->hwmon_dev);
|
||||
kfree(mlxsw_hwmon);
|
||||
}
|
||||
|
|
|
@ -133,9 +133,9 @@ static inline int ocelot_vlant_wait_for_completion(struct ocelot *ocelot)
|
|||
{
|
||||
unsigned int val, timeout = 10;
|
||||
|
||||
/* Wait for the issued mac table command to be completed, or timeout.
|
||||
* When the command read from ANA_TABLES_MACACCESS is
|
||||
* MACACCESS_CMD_IDLE, the issued command completed successfully.
|
||||
/* Wait for the issued vlan table command to be completed, or timeout.
|
||||
* When the command read from ANA_TABLES_VLANACCESS is
|
||||
* VLANACCESS_CMD_IDLE, the issued command completed successfully.
|
||||
*/
|
||||
do {
|
||||
val = ocelot_read(ocelot, ANA_TABLES_VLANACCESS);
|
||||
|
|
|
@ -429,12 +429,14 @@ nfp_fl_set_ip4(const struct tc_action *action, int idx, u32 off,
|
|||
|
||||
switch (off) {
|
||||
case offsetof(struct iphdr, daddr):
|
||||
set_ip_addr->ipv4_dst_mask = mask;
|
||||
set_ip_addr->ipv4_dst = exact;
|
||||
set_ip_addr->ipv4_dst_mask |= mask;
|
||||
set_ip_addr->ipv4_dst &= ~mask;
|
||||
set_ip_addr->ipv4_dst |= exact & mask;
|
||||
break;
|
||||
case offsetof(struct iphdr, saddr):
|
||||
set_ip_addr->ipv4_src_mask = mask;
|
||||
set_ip_addr->ipv4_src = exact;
|
||||
set_ip_addr->ipv4_src_mask |= mask;
|
||||
set_ip_addr->ipv4_src &= ~mask;
|
||||
set_ip_addr->ipv4_src |= exact & mask;
|
||||
break;
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
|
@ -448,11 +450,12 @@ nfp_fl_set_ip4(const struct tc_action *action, int idx, u32 off,
|
|||
}
|
||||
|
||||
static void
|
||||
nfp_fl_set_ip6_helper(int opcode_tag, int idx, __be32 exact, __be32 mask,
|
||||
nfp_fl_set_ip6_helper(int opcode_tag, u8 word, __be32 exact, __be32 mask,
|
||||
struct nfp_fl_set_ipv6_addr *ip6)
|
||||
{
|
||||
ip6->ipv6[idx % 4].mask = mask;
|
||||
ip6->ipv6[idx % 4].exact = exact;
|
||||
ip6->ipv6[word].mask |= mask;
|
||||
ip6->ipv6[word].exact &= ~mask;
|
||||
ip6->ipv6[word].exact |= exact & mask;
|
||||
|
||||
ip6->reserved = cpu_to_be16(0);
|
||||
ip6->head.jump_id = opcode_tag;
|
||||
|
@ -465,6 +468,7 @@ nfp_fl_set_ip6(const struct tc_action *action, int idx, u32 off,
|
|||
struct nfp_fl_set_ipv6_addr *ip_src)
|
||||
{
|
||||
__be32 exact, mask;
|
||||
u8 word;
|
||||
|
||||
/* We are expecting tcf_pedit to return a big endian value */
|
||||
mask = (__force __be32)~tcf_pedit_mask(action, idx);
|
||||
|
@ -473,17 +477,20 @@ nfp_fl_set_ip6(const struct tc_action *action, int idx, u32 off,
|
|||
if (exact & ~mask)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (off < offsetof(struct ipv6hdr, saddr))
|
||||
if (off < offsetof(struct ipv6hdr, saddr)) {
|
||||
return -EOPNOTSUPP;
|
||||
else if (off < offsetof(struct ipv6hdr, daddr))
|
||||
nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_SRC, idx,
|
||||
} else if (off < offsetof(struct ipv6hdr, daddr)) {
|
||||
word = (off - offsetof(struct ipv6hdr, saddr)) / sizeof(exact);
|
||||
nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_SRC, word,
|
||||
exact, mask, ip_src);
|
||||
else if (off < offsetof(struct ipv6hdr, daddr) +
|
||||
sizeof(struct in6_addr))
|
||||
nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_DST, idx,
|
||||
} else if (off < offsetof(struct ipv6hdr, daddr) +
|
||||
sizeof(struct in6_addr)) {
|
||||
word = (off - offsetof(struct ipv6hdr, daddr)) / sizeof(exact);
|
||||
nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_DST, word,
|
||||
exact, mask, ip_dst);
|
||||
else
|
||||
} else {
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -541,7 +548,7 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow,
|
|||
struct nfp_fl_set_eth set_eth;
|
||||
enum pedit_header_type htype;
|
||||
int idx, nkeys, err;
|
||||
size_t act_size;
|
||||
size_t act_size = 0;
|
||||
u32 offset, cmd;
|
||||
u8 ip_proto = 0;
|
||||
|
||||
|
@ -599,7 +606,9 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow,
|
|||
act_size = sizeof(set_eth);
|
||||
memcpy(nfp_action, &set_eth, act_size);
|
||||
*a_len += act_size;
|
||||
} else if (set_ip_addr.head.len_lw) {
|
||||
}
|
||||
if (set_ip_addr.head.len_lw) {
|
||||
nfp_action += act_size;
|
||||
act_size = sizeof(set_ip_addr);
|
||||
memcpy(nfp_action, &set_ip_addr, act_size);
|
||||
*a_len += act_size;
|
||||
|
@ -607,10 +616,12 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow,
|
|||
/* Hardware will automatically fix IPv4 and TCP/UDP checksum. */
|
||||
*csum_updated |= TCA_CSUM_UPDATE_FLAG_IPV4HDR |
|
||||
nfp_fl_csum_l4_to_flag(ip_proto);
|
||||
} else if (set_ip6_dst.head.len_lw && set_ip6_src.head.len_lw) {
|
||||
}
|
||||
if (set_ip6_dst.head.len_lw && set_ip6_src.head.len_lw) {
|
||||
/* TC compiles set src and dst IPv6 address as a single action,
|
||||
* the hardware requires this to be 2 separate actions.
|
||||
*/
|
||||
nfp_action += act_size;
|
||||
act_size = sizeof(set_ip6_src);
|
||||
memcpy(nfp_action, &set_ip6_src, act_size);
|
||||
*a_len += act_size;
|
||||
|
@ -623,6 +634,7 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow,
|
|||
/* Hardware will automatically fix TCP/UDP checksum. */
|
||||
*csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
|
||||
} else if (set_ip6_dst.head.len_lw) {
|
||||
nfp_action += act_size;
|
||||
act_size = sizeof(set_ip6_dst);
|
||||
memcpy(nfp_action, &set_ip6_dst, act_size);
|
||||
*a_len += act_size;
|
||||
|
@ -630,13 +642,16 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow,
|
|||
/* Hardware will automatically fix TCP/UDP checksum. */
|
||||
*csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
|
||||
} else if (set_ip6_src.head.len_lw) {
|
||||
nfp_action += act_size;
|
||||
act_size = sizeof(set_ip6_src);
|
||||
memcpy(nfp_action, &set_ip6_src, act_size);
|
||||
*a_len += act_size;
|
||||
|
||||
/* Hardware will automatically fix TCP/UDP checksum. */
|
||||
*csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
|
||||
} else if (set_tport.head.len_lw) {
|
||||
}
|
||||
if (set_tport.head.len_lw) {
|
||||
nfp_action += act_size;
|
||||
act_size = sizeof(set_tport);
|
||||
memcpy(nfp_action, &set_tport, act_size);
|
||||
*a_len += act_size;
|
||||
|
|
|
@ -228,7 +228,7 @@ static int qed_grc_attn_cb(struct qed_hwfn *p_hwfn)
|
|||
attn_master_to_str(GET_FIELD(tmp, QED_GRC_ATTENTION_MASTER)),
|
||||
GET_FIELD(tmp2, QED_GRC_ATTENTION_PF),
|
||||
(GET_FIELD(tmp2, QED_GRC_ATTENTION_PRIV) ==
|
||||
QED_GRC_ATTENTION_PRIV_VF) ? "VF" : "(Ireelevant)",
|
||||
QED_GRC_ATTENTION_PRIV_VF) ? "VF" : "(Irrelevant)",
|
||||
GET_FIELD(tmp2, QED_GRC_ATTENTION_VF));
|
||||
|
||||
out:
|
||||
|
|
|
@ -380,8 +380,6 @@ static void fm93c56a_select(struct ql3_adapter *qdev)
|
|||
|
||||
qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1;
|
||||
ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
|
||||
ql_write_nvram_reg(qdev, spir,
|
||||
((ISP_NVRAM_MASK << 16) | qdev->eeprom_cmd_data));
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -6549,17 +6549,15 @@ static int rtl8169_poll(struct napi_struct *napi, int budget)
|
|||
struct rtl8169_private *tp = container_of(napi, struct rtl8169_private, napi);
|
||||
struct net_device *dev = tp->dev;
|
||||
u16 enable_mask = RTL_EVENT_NAPI | tp->event_slow;
|
||||
int work_done= 0;
|
||||
int work_done;
|
||||
u16 status;
|
||||
|
||||
status = rtl_get_events(tp);
|
||||
rtl_ack_events(tp, status & ~tp->event_slow);
|
||||
|
||||
if (status & RTL_EVENT_NAPI_RX)
|
||||
work_done = rtl_rx(dev, tp, (u32) budget);
|
||||
work_done = rtl_rx(dev, tp, (u32) budget);
|
||||
|
||||
if (status & RTL_EVENT_NAPI_TX)
|
||||
rtl_tx(dev, tp);
|
||||
rtl_tx(dev, tp);
|
||||
|
||||
if (status & tp->event_slow) {
|
||||
enable_mask &= ~tp->event_slow;
|
||||
|
@ -7093,20 +7091,12 @@ static int rtl_alloc_irq(struct rtl8169_private *tp)
|
|||
{
|
||||
unsigned int flags;
|
||||
|
||||
switch (tp->mac_version) {
|
||||
case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_06:
|
||||
if (tp->mac_version <= RTL_GIGA_MAC_VER_06) {
|
||||
RTL_W8(tp, Cfg9346, Cfg9346_Unlock);
|
||||
RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~MSIEnable);
|
||||
RTL_W8(tp, Cfg9346, Cfg9346_Lock);
|
||||
flags = PCI_IRQ_LEGACY;
|
||||
break;
|
||||
case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_40:
|
||||
/* This version was reported to have issues with resume
|
||||
* from suspend when using MSI-X
|
||||
*/
|
||||
flags = PCI_IRQ_LEGACY | PCI_IRQ_MSI;
|
||||
break;
|
||||
default:
|
||||
} else {
|
||||
flags = PCI_IRQ_ALL_TYPES;
|
||||
}
|
||||
|
||||
|
|
|
@ -830,12 +830,8 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
|
|||
if (IS_ERR(rt))
|
||||
return PTR_ERR(rt);
|
||||
|
||||
if (skb_dst(skb)) {
|
||||
int mtu = dst_mtu(&rt->dst) - GENEVE_IPV4_HLEN -
|
||||
info->options_len;
|
||||
|
||||
skb_dst_update_pmtu(skb, mtu);
|
||||
}
|
||||
skb_tunnel_check_pmtu(skb, &rt->dst,
|
||||
GENEVE_IPV4_HLEN + info->options_len);
|
||||
|
||||
sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
|
||||
if (geneve->collect_md) {
|
||||
|
@ -876,11 +872,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
|
|||
if (IS_ERR(dst))
|
||||
return PTR_ERR(dst);
|
||||
|
||||
if (skb_dst(skb)) {
|
||||
int mtu = dst_mtu(dst) - GENEVE_IPV6_HLEN - info->options_len;
|
||||
|
||||
skb_dst_update_pmtu(skb, mtu);
|
||||
}
|
||||
skb_tunnel_check_pmtu(skb, dst, GENEVE_IPV6_HLEN + info->options_len);
|
||||
|
||||
sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
|
||||
if (geneve->collect_md) {
|
||||
|
|
|
@ -2218,8 +2218,9 @@ static void virtnet_freeze_down(struct virtio_device *vdev)
|
|||
/* Make sure no work handler is accessing the device */
|
||||
flush_work(&vi->config_work);
|
||||
|
||||
netif_tx_lock_bh(vi->dev);
|
||||
netif_device_detach(vi->dev);
|
||||
netif_tx_disable(vi->dev);
|
||||
netif_tx_unlock_bh(vi->dev);
|
||||
cancel_delayed_work_sync(&vi->refill);
|
||||
|
||||
if (netif_running(vi->dev)) {
|
||||
|
@ -2255,7 +2256,9 @@ static int virtnet_restore_up(struct virtio_device *vdev)
|
|||
}
|
||||
}
|
||||
|
||||
netif_tx_lock_bh(vi->dev);
|
||||
netif_device_attach(vi->dev);
|
||||
netif_tx_unlock_bh(vi->dev);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
|
@ -2194,11 +2194,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
|
|||
}
|
||||
|
||||
ndst = &rt->dst;
|
||||
if (skb_dst(skb)) {
|
||||
int mtu = dst_mtu(ndst) - VXLAN_HEADROOM;
|
||||
|
||||
skb_dst_update_pmtu(skb, mtu);
|
||||
}
|
||||
skb_tunnel_check_pmtu(skb, ndst, VXLAN_HEADROOM);
|
||||
|
||||
tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
|
||||
ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
|
||||
|
@ -2235,11 +2231,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
|
|||
goto out_unlock;
|
||||
}
|
||||
|
||||
if (skb_dst(skb)) {
|
||||
int mtu = dst_mtu(ndst) - VXLAN6_HEADROOM;
|
||||
|
||||
skb_dst_update_pmtu(skb, mtu);
|
||||
}
|
||||
skb_tunnel_check_pmtu(skb, ndst, VXLAN6_HEADROOM);
|
||||
|
||||
tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
|
||||
ttl = ttl ? : ip6_dst_hoplimit(ndst);
|
||||
|
|
|
@ -24,6 +24,8 @@
|
|||
#include <linux/slab.h>
|
||||
#include <linux/timekeeping.h>
|
||||
|
||||
#include <linux/nospec.h>
|
||||
|
||||
#include "ptp_private.h"
|
||||
|
||||
static int ptp_disable_pinfunc(struct ptp_clock_info *ops,
|
||||
|
@ -248,6 +250,7 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
|
|||
err = -EINVAL;
|
||||
break;
|
||||
}
|
||||
pin_index = array_index_nospec(pin_index, ops->n_pins);
|
||||
if (mutex_lock_interruptible(&ptp->pincfg_mux))
|
||||
return -ERESTARTSYS;
|
||||
pd = ops->pin_config[pin_index];
|
||||
|
@ -266,6 +269,7 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
|
|||
err = -EINVAL;
|
||||
break;
|
||||
}
|
||||
pin_index = array_index_nospec(pin_index, ops->n_pins);
|
||||
if (mutex_lock_interruptible(&ptp->pincfg_mux))
|
||||
return -ERESTARTSYS;
|
||||
err = ptp_set_pinfunc(ptp, pin_index, pd.func, pd.chan);
|
||||
|
|
|
@ -1032,6 +1032,14 @@ static inline void *mlx5_frag_buf_get_wqe(struct mlx5_frag_buf_ctrl *fbc,
|
|||
((fbc->frag_sz_m1 & ix) << fbc->log_stride);
|
||||
}
|
||||
|
||||
static inline u32
|
||||
mlx5_frag_buf_get_idx_last_contig_stride(struct mlx5_frag_buf_ctrl *fbc, u32 ix)
|
||||
{
|
||||
u32 last_frag_stride_idx = (ix + fbc->strides_offset) | fbc->frag_sz_m1;
|
||||
|
||||
return min_t(u32, last_frag_stride_idx - fbc->strides_offset, fbc->sz_m1);
|
||||
}
|
||||
|
||||
int mlx5_cmd_init(struct mlx5_core_dev *dev);
|
||||
void mlx5_cmd_cleanup(struct mlx5_core_dev *dev);
|
||||
void mlx5_cmd_use_events(struct mlx5_core_dev *dev);
|
||||
|
|
|
@ -527,4 +527,14 @@ static inline void skb_dst_update_pmtu(struct sk_buff *skb, u32 mtu)
|
|||
dst->ops->update_pmtu(dst, NULL, skb, mtu);
|
||||
}
|
||||
|
||||
static inline void skb_tunnel_check_pmtu(struct sk_buff *skb,
|
||||
struct dst_entry *encap_dst,
|
||||
int headroom)
|
||||
{
|
||||
u32 encap_mtu = dst_mtu(encap_dst);
|
||||
|
||||
if (skb->len > encap_mtu - headroom)
|
||||
skb_dst_update_pmtu(skb, encap_mtu - headroom);
|
||||
}
|
||||
|
||||
#endif /* _NET_DST_H */
|
||||
|
|
|
@ -159,6 +159,10 @@ struct fib6_info {
|
|||
struct rt6_info * __percpu *rt6i_pcpu;
|
||||
struct rt6_exception_bucket __rcu *rt6i_exception_bucket;
|
||||
|
||||
#ifdef CONFIG_IPV6_ROUTER_PREF
|
||||
unsigned long last_probe;
|
||||
#endif
|
||||
|
||||
u32 fib6_metric;
|
||||
u8 fib6_protocol;
|
||||
u8 fib6_type;
|
||||
|
|
|
@ -347,7 +347,7 @@ static inline __u16 sctp_data_size(struct sctp_chunk *chunk)
|
|||
__u16 size;
|
||||
|
||||
size = ntohs(chunk->chunk_hdr->length);
|
||||
size -= sctp_datahdr_len(&chunk->asoc->stream);
|
||||
size -= sctp_datachk_len(&chunk->asoc->stream);
|
||||
|
||||
return size;
|
||||
}
|
||||
|
|
|
@ -876,6 +876,8 @@ struct sctp_transport {
|
|||
unsigned long sackdelay;
|
||||
__u32 sackfreq;
|
||||
|
||||
atomic_t mtu_info;
|
||||
|
||||
/* When was the last time that we heard from this transport? We use
|
||||
* this to pick new active and retran paths.
|
||||
*/
|
||||
|
|
|
@ -301,6 +301,7 @@ enum sctp_sinfo_flags {
|
|||
SCTP_SACK_IMMEDIATELY = (1 << 3), /* SACK should be sent without delay. */
|
||||
/* 2 bits here have been used by SCTP_PR_SCTP_MASK */
|
||||
SCTP_SENDALL = (1 << 6),
|
||||
SCTP_PR_SCTP_ALL = (1 << 7),
|
||||
SCTP_NOTIFICATION = MSG_NOTIFICATION, /* Next message is not user msg but notification. */
|
||||
SCTP_EOF = MSG_FIN, /* Initiate graceful shutdown process. */
|
||||
};
|
||||
|
|
|
@ -192,11 +192,8 @@ static int xsk_map_update_elem(struct bpf_map *map, void *key, void *value,
|
|||
sock_hold(sock->sk);
|
||||
|
||||
old_xs = xchg(&m->xsk_map[i], xs);
|
||||
if (old_xs) {
|
||||
/* Make sure we've flushed everything. */
|
||||
synchronize_net();
|
||||
if (old_xs)
|
||||
sock_put((struct sock *)old_xs);
|
||||
}
|
||||
|
||||
sockfd_put(sock);
|
||||
return 0;
|
||||
|
@ -212,11 +209,8 @@ static int xsk_map_delete_elem(struct bpf_map *map, void *key)
|
|||
return -EINVAL;
|
||||
|
||||
old_xs = xchg(&m->xsk_map[k], NULL);
|
||||
if (old_xs) {
|
||||
/* Make sure we've flushed everything. */
|
||||
synchronize_net();
|
||||
if (old_xs)
|
||||
sock_put((struct sock *)old_xs);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -23,9 +23,11 @@ static void shutdown_umh(struct umh_info *info)
|
|||
|
||||
if (!info->pid)
|
||||
return;
|
||||
tsk = pid_task(find_vpid(info->pid), PIDTYPE_PID);
|
||||
if (tsk)
|
||||
tsk = get_pid_task(find_vpid(info->pid), PIDTYPE_PID);
|
||||
if (tsk) {
|
||||
force_sig(SIGKILL, tsk);
|
||||
put_task_struct(tsk);
|
||||
}
|
||||
fput(info->pipe_to_umh);
|
||||
fput(info->pipe_from_umh);
|
||||
info->pid = 0;
|
||||
|
|
|
@ -1015,6 +1015,9 @@ static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (info.cmd != cmd)
|
||||
return -EINVAL;
|
||||
|
||||
if (info.cmd == ETHTOOL_GRXCLSRLALL) {
|
||||
if (info.rule_cnt > 0) {
|
||||
if (info.rule_cnt <= KMALLOC_MAX_SIZE / sizeof(u32))
|
||||
|
@ -2469,13 +2472,17 @@ roll_back:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int ethtool_set_per_queue(struct net_device *dev, void __user *useraddr)
|
||||
static int ethtool_set_per_queue(struct net_device *dev,
|
||||
void __user *useraddr, u32 sub_cmd)
|
||||
{
|
||||
struct ethtool_per_queue_op per_queue_opt;
|
||||
|
||||
if (copy_from_user(&per_queue_opt, useraddr, sizeof(per_queue_opt)))
|
||||
return -EFAULT;
|
||||
|
||||
if (per_queue_opt.sub_command != sub_cmd)
|
||||
return -EINVAL;
|
||||
|
||||
switch (per_queue_opt.sub_command) {
|
||||
case ETHTOOL_GCOALESCE:
|
||||
return ethtool_get_per_queue_coalesce(dev, useraddr, &per_queue_opt);
|
||||
|
@ -2846,7 +2853,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
|
|||
rc = ethtool_get_phy_stats(dev, useraddr);
|
||||
break;
|
||||
case ETHTOOL_PERQUEUE:
|
||||
rc = ethtool_set_per_queue(dev, useraddr);
|
||||
rc = ethtool_set_per_queue(dev, useraddr, sub_cmd);
|
||||
break;
|
||||
case ETHTOOL_GLINKSETTINGS:
|
||||
rc = ethtool_get_link_ksettings(dev, useraddr);
|
||||
|
|
|
@ -296,8 +296,6 @@ int mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb,
|
|||
next_entry:
|
||||
e++;
|
||||
}
|
||||
e = 0;
|
||||
s_e = 0;
|
||||
|
||||
spin_lock_bh(lock);
|
||||
list_for_each_entry(mfc, &mrt->mfc_unres_queue, list) {
|
||||
|
|
|
@ -1184,11 +1184,6 @@ route_lookup:
|
|||
}
|
||||
skb_dst_set(skb, dst);
|
||||
|
||||
if (encap_limit >= 0) {
|
||||
init_tel_txopt(&opt, encap_limit);
|
||||
ipv6_push_frag_opts(skb, &opt.ops, &proto);
|
||||
}
|
||||
|
||||
if (hop_limit == 0) {
|
||||
if (skb->protocol == htons(ETH_P_IP))
|
||||
hop_limit = ip_hdr(skb)->ttl;
|
||||
|
@ -1210,6 +1205,11 @@ route_lookup:
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
if (encap_limit >= 0) {
|
||||
init_tel_txopt(&opt, encap_limit);
|
||||
ipv6_push_frag_opts(skb, &opt.ops, &proto);
|
||||
}
|
||||
|
||||
skb_push(skb, sizeof(struct ipv6hdr));
|
||||
skb_reset_network_header(skb);
|
||||
ipv6h = ipv6_hdr(skb);
|
||||
|
|
|
@ -2436,17 +2436,17 @@ static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml,
|
|||
{
|
||||
int err;
|
||||
|
||||
/* callers have the socket lock and rtnl lock
|
||||
* so no other readers or writers of iml or its sflist
|
||||
*/
|
||||
write_lock_bh(&iml->sflock);
|
||||
if (!iml->sflist) {
|
||||
/* any-source empty exclude case */
|
||||
return ip6_mc_del_src(idev, &iml->addr, iml->sfmode, 0, NULL, 0);
|
||||
err = ip6_mc_del_src(idev, &iml->addr, iml->sfmode, 0, NULL, 0);
|
||||
} else {
|
||||
err = ip6_mc_del_src(idev, &iml->addr, iml->sfmode,
|
||||
iml->sflist->sl_count, iml->sflist->sl_addr, 0);
|
||||
sock_kfree_s(sk, iml->sflist, IP6_SFLSIZE(iml->sflist->sl_max));
|
||||
iml->sflist = NULL;
|
||||
}
|
||||
err = ip6_mc_del_src(idev, &iml->addr, iml->sfmode,
|
||||
iml->sflist->sl_count, iml->sflist->sl_addr, 0);
|
||||
sock_kfree_s(sk, iml->sflist, IP6_SFLSIZE(iml->sflist->sl_max));
|
||||
iml->sflist = NULL;
|
||||
write_unlock_bh(&iml->sflock);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
|
@ -520,10 +520,11 @@ static void rt6_probe_deferred(struct work_struct *w)
|
|||
|
||||
static void rt6_probe(struct fib6_info *rt)
|
||||
{
|
||||
struct __rt6_probe_work *work;
|
||||
struct __rt6_probe_work *work = NULL;
|
||||
const struct in6_addr *nh_gw;
|
||||
struct neighbour *neigh;
|
||||
struct net_device *dev;
|
||||
struct inet6_dev *idev;
|
||||
|
||||
/*
|
||||
* Okay, this does not seem to be appropriate
|
||||
|
@ -539,15 +540,12 @@ static void rt6_probe(struct fib6_info *rt)
|
|||
nh_gw = &rt->fib6_nh.nh_gw;
|
||||
dev = rt->fib6_nh.nh_dev;
|
||||
rcu_read_lock_bh();
|
||||
idev = __in6_dev_get(dev);
|
||||
neigh = __ipv6_neigh_lookup_noref(dev, nh_gw);
|
||||
if (neigh) {
|
||||
struct inet6_dev *idev;
|
||||
|
||||
if (neigh->nud_state & NUD_VALID)
|
||||
goto out;
|
||||
|
||||
idev = __in6_dev_get(dev);
|
||||
work = NULL;
|
||||
write_lock(&neigh->lock);
|
||||
if (!(neigh->nud_state & NUD_VALID) &&
|
||||
time_after(jiffies,
|
||||
|
@ -557,11 +555,13 @@ static void rt6_probe(struct fib6_info *rt)
|
|||
__neigh_set_probe_once(neigh);
|
||||
}
|
||||
write_unlock(&neigh->lock);
|
||||
} else {
|
||||
} else if (time_after(jiffies, rt->last_probe +
|
||||
idev->cnf.rtr_probe_interval)) {
|
||||
work = kmalloc(sizeof(*work), GFP_ATOMIC);
|
||||
}
|
||||
|
||||
if (work) {
|
||||
rt->last_probe = jiffies;
|
||||
INIT_WORK(&work->work, rt6_probe_deferred);
|
||||
work->target = *nh_gw;
|
||||
dev_hold(dev);
|
||||
|
|
|
@ -766,11 +766,9 @@ static int udp6_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb,
|
|||
|
||||
ret = udpv6_queue_rcv_skb(sk, skb);
|
||||
|
||||
/* a return value > 0 means to resubmit the input, but
|
||||
* it wants the return to be -protocol, or 0
|
||||
*/
|
||||
/* a return value > 0 means to resubmit the input */
|
||||
if (ret > 0)
|
||||
return -ret;
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -146,8 +146,8 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
|
|||
fl6->daddr = reverse ? hdr->saddr : hdr->daddr;
|
||||
fl6->saddr = reverse ? hdr->daddr : hdr->saddr;
|
||||
|
||||
while (nh + offset + 1 < skb->data ||
|
||||
pskb_may_pull(skb, nh + offset + 1 - skb->data)) {
|
||||
while (nh + offset + sizeof(*exthdr) < skb->data ||
|
||||
pskb_may_pull(skb, nh + offset + sizeof(*exthdr) - skb->data)) {
|
||||
nh = skb_network_header(skb);
|
||||
exthdr = (struct ipv6_opt_hdr *)(nh + offset);
|
||||
|
||||
|
|
|
@ -734,6 +734,7 @@ void llc_sap_add_socket(struct llc_sap *sap, struct sock *sk)
|
|||
llc_sk(sk)->sap = sap;
|
||||
|
||||
spin_lock_bh(&sap->sk_lock);
|
||||
sock_set_flag(sk, SOCK_RCU_FREE);
|
||||
sap->sk_count++;
|
||||
sk_nulls_add_node_rcu(sk, laddr_hb);
|
||||
hlist_add_head(&llc->dev_hash_node, dev_hb);
|
||||
|
|
|
@ -337,7 +337,7 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
|
|||
{
|
||||
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
|
||||
struct rxrpc_connection *conn;
|
||||
struct rxrpc_peer *peer;
|
||||
struct rxrpc_peer *peer = NULL;
|
||||
struct rxrpc_call *call;
|
||||
|
||||
_enter("");
|
||||
|
|
|
@ -139,7 +139,7 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
|
|||
udp_sk(usk)->gro_complete = NULL;
|
||||
|
||||
udp_encap_enable();
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
#if IS_ENABLED(CONFIG_AF_RXRPC_IPV6)
|
||||
if (local->srx.transport.family == AF_INET6)
|
||||
udpv6_encap_enable();
|
||||
#endif
|
||||
|
|
|
@ -572,7 +572,8 @@ void rxrpc_reject_packets(struct rxrpc_local *local)
|
|||
whdr.flags ^= RXRPC_CLIENT_INITIATED;
|
||||
whdr.flags &= RXRPC_CLIENT_INITIATED;
|
||||
|
||||
ret = kernel_sendmsg(local->socket, &msg, iov, 2, size);
|
||||
ret = kernel_sendmsg(local->socket, &msg,
|
||||
iov, ioc, size);
|
||||
if (ret < 0)
|
||||
trace_rxrpc_tx_fail(local->debug_id, 0, ret,
|
||||
rxrpc_tx_point_reject);
|
||||
|
|
|
@ -195,6 +195,7 @@ void rxrpc_error_report(struct sock *sk)
|
|||
rxrpc_store_error(peer, serr);
|
||||
rcu_read_unlock();
|
||||
rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
|
||||
rxrpc_put_peer(peer);
|
||||
|
||||
_leave("");
|
||||
}
|
||||
|
|
|
@ -31,6 +31,8 @@
|
|||
#include <net/pkt_sched.h>
|
||||
#include <net/pkt_cls.h>
|
||||
|
||||
extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1];
|
||||
|
||||
/* The list of all installed classifier types */
|
||||
static LIST_HEAD(tcf_proto_base);
|
||||
|
||||
|
@ -1211,7 +1213,7 @@ static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
|
|||
replay:
|
||||
tp_created = 0;
|
||||
|
||||
err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, NULL, extack);
|
||||
err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, rtm_tca_policy, extack);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
|
@ -1360,7 +1362,7 @@ static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
|
|||
if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
|
||||
return -EPERM;
|
||||
|
||||
err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, NULL, extack);
|
||||
err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, rtm_tca_policy, extack);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
|
@ -1475,7 +1477,7 @@ static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
|
|||
void *fh = NULL;
|
||||
int err;
|
||||
|
||||
err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, NULL, extack);
|
||||
err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, rtm_tca_policy, extack);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
|
@ -1838,7 +1840,7 @@ static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n,
|
|||
return -EPERM;
|
||||
|
||||
replay:
|
||||
err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, NULL, extack);
|
||||
err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, rtm_tca_policy, extack);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
|
@ -1949,7 +1951,8 @@ static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb)
|
|||
if (nlmsg_len(cb->nlh) < sizeof(*tcm))
|
||||
return skb->len;
|
||||
|
||||
err = nlmsg_parse(cb->nlh, sizeof(*tcm), tca, TCA_MAX, NULL, NULL);
|
||||
err = nlmsg_parse(cb->nlh, sizeof(*tcm), tca, TCA_MAX, rtm_tca_policy,
|
||||
NULL);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
|
|
|
@ -1307,10 +1307,6 @@ check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Delete/get qdisc.
|
||||
*/
|
||||
|
||||
const struct nla_policy rtm_tca_policy[TCA_MAX + 1] = {
|
||||
[TCA_KIND] = { .type = NLA_STRING },
|
||||
[TCA_OPTIONS] = { .type = NLA_NESTED },
|
||||
|
@ -1323,6 +1319,10 @@ const struct nla_policy rtm_tca_policy[TCA_MAX + 1] = {
|
|||
[TCA_EGRESS_BLOCK] = { .type = NLA_U32 },
|
||||
};
|
||||
|
||||
/*
|
||||
* Delete/get qdisc.
|
||||
*/
|
||||
|
||||
static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
|
@ -2059,7 +2059,8 @@ static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb,
|
|||
|
||||
if (tcm->tcm_parent) {
|
||||
q = qdisc_match_from_root(root, TC_H_MAJ(tcm->tcm_parent));
|
||||
if (q && tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
|
||||
if (q && q != root &&
|
||||
tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
|
||||
return -1;
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1450,7 +1450,8 @@ void sctp_assoc_sync_pmtu(struct sctp_association *asoc)
|
|||
/* Get the lowest pmtu of all the transports. */
|
||||
list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) {
|
||||
if (t->pmtu_pending && t->dst) {
|
||||
sctp_transport_update_pmtu(t, sctp_dst_mtu(t->dst));
|
||||
sctp_transport_update_pmtu(t,
|
||||
atomic_read(&t->mtu_info));
|
||||
t->pmtu_pending = 0;
|
||||
}
|
||||
if (!pmtu || (t->pathmtu < pmtu))
|
||||
|
|
|
@ -395,6 +395,7 @@ void sctp_icmp_frag_needed(struct sock *sk, struct sctp_association *asoc,
|
|||
return;
|
||||
|
||||
if (sock_owned_by_user(sk)) {
|
||||
atomic_set(&t->mtu_info, pmtu);
|
||||
asoc->pmtu_pending = 1;
|
||||
t->pmtu_pending = 1;
|
||||
return;
|
||||
|
|
|
@ -120,6 +120,12 @@ void sctp_packet_config(struct sctp_packet *packet, __u32 vtag,
|
|||
sctp_assoc_sync_pmtu(asoc);
|
||||
}
|
||||
|
||||
if (asoc->pmtu_pending) {
|
||||
if (asoc->param_flags & SPP_PMTUD_ENABLE)
|
||||
sctp_assoc_sync_pmtu(asoc);
|
||||
asoc->pmtu_pending = 0;
|
||||
}
|
||||
|
||||
/* If there a is a prepend chunk stick it on the list before
|
||||
* any other chunks get appended.
|
||||
*/
|
||||
|
|
|
@ -271,11 +271,10 @@ struct sctp_association *sctp_id2assoc(struct sock *sk, sctp_assoc_t id)
|
|||
|
||||
spin_lock_bh(&sctp_assocs_id_lock);
|
||||
asoc = (struct sctp_association *)idr_find(&sctp_assocs_id, (int)id);
|
||||
if (asoc && (asoc->base.sk != sk || asoc->base.dead))
|
||||
asoc = NULL;
|
||||
spin_unlock_bh(&sctp_assocs_id_lock);
|
||||
|
||||
if (!asoc || (asoc->base.sk != sk) || asoc->base.dead)
|
||||
return NULL;
|
||||
|
||||
return asoc;
|
||||
}
|
||||
|
||||
|
@ -1946,8 +1945,10 @@ static int sctp_sendmsg_to_asoc(struct sctp_association *asoc,
|
|||
if (sp->strm_interleave) {
|
||||
timeo = sock_sndtimeo(sk, 0);
|
||||
err = sctp_wait_for_connect(asoc, &timeo);
|
||||
if (err)
|
||||
if (err) {
|
||||
err = -ESRCH;
|
||||
goto err;
|
||||
}
|
||||
} else {
|
||||
wait_connect = true;
|
||||
}
|
||||
|
@ -7100,14 +7101,14 @@ static int sctp_getsockopt_pr_assocstatus(struct sock *sk, int len,
|
|||
}
|
||||
|
||||
policy = params.sprstat_policy;
|
||||
if (policy & ~SCTP_PR_SCTP_MASK)
|
||||
if (!policy || (policy & ~(SCTP_PR_SCTP_MASK | SCTP_PR_SCTP_ALL)))
|
||||
goto out;
|
||||
|
||||
asoc = sctp_id2assoc(sk, params.sprstat_assoc_id);
|
||||
if (!asoc)
|
||||
goto out;
|
||||
|
||||
if (policy == SCTP_PR_SCTP_NONE) {
|
||||
if (policy & SCTP_PR_SCTP_ALL) {
|
||||
params.sprstat_abandoned_unsent = 0;
|
||||
params.sprstat_abandoned_sent = 0;
|
||||
for (policy = 0; policy <= SCTP_PR_INDEX(MAX); policy++) {
|
||||
|
@ -7159,7 +7160,7 @@ static int sctp_getsockopt_pr_streamstatus(struct sock *sk, int len,
|
|||
}
|
||||
|
||||
policy = params.sprstat_policy;
|
||||
if (policy & ~SCTP_PR_SCTP_MASK)
|
||||
if (!policy || (policy & ~(SCTP_PR_SCTP_MASK | SCTP_PR_SCTP_ALL)))
|
||||
goto out;
|
||||
|
||||
asoc = sctp_id2assoc(sk, params.sprstat_assoc_id);
|
||||
|
@ -7175,7 +7176,7 @@ static int sctp_getsockopt_pr_streamstatus(struct sock *sk, int len,
|
|||
goto out;
|
||||
}
|
||||
|
||||
if (policy == SCTP_PR_SCTP_NONE) {
|
||||
if (policy == SCTP_PR_SCTP_ALL) {
|
||||
params.sprstat_abandoned_unsent = 0;
|
||||
params.sprstat_abandoned_sent = 0;
|
||||
for (policy = 0; policy <= SCTP_PR_INDEX(MAX); policy++) {
|
||||
|
|
11
net/socket.c
11
net/socket.c
|
@ -2875,9 +2875,14 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
|
|||
copy_in_user(&rxnfc->fs.ring_cookie,
|
||||
&compat_rxnfc->fs.ring_cookie,
|
||||
(void __user *)(&rxnfc->fs.location + 1) -
|
||||
(void __user *)&rxnfc->fs.ring_cookie) ||
|
||||
copy_in_user(&rxnfc->rule_cnt, &compat_rxnfc->rule_cnt,
|
||||
sizeof(rxnfc->rule_cnt)))
|
||||
(void __user *)&rxnfc->fs.ring_cookie))
|
||||
return -EFAULT;
|
||||
if (ethcmd == ETHTOOL_GRXCLSRLALL) {
|
||||
if (put_user(rule_cnt, &rxnfc->rule_cnt))
|
||||
return -EFAULT;
|
||||
} else if (copy_in_user(&rxnfc->rule_cnt,
|
||||
&compat_rxnfc->rule_cnt,
|
||||
sizeof(rxnfc->rule_cnt)))
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
|
|
|
@ -666,6 +666,7 @@ static void tipc_group_create_event(struct tipc_group *grp,
|
|||
struct sk_buff *skb;
|
||||
struct tipc_msg *hdr;
|
||||
|
||||
memset(&evt, 0, sizeof(evt));
|
||||
evt.event = event;
|
||||
evt.found_lower = m->instance;
|
||||
evt.found_upper = m->instance;
|
||||
|
|
|
@ -1041,6 +1041,7 @@ static int tipc_link_retrans(struct tipc_link *l, struct tipc_link *r,
|
|||
if (r->last_retransm != buf_seqno(skb)) {
|
||||
r->last_retransm = buf_seqno(skb);
|
||||
r->stale_limit = jiffies + msecs_to_jiffies(r->tolerance);
|
||||
r->stale_cnt = 0;
|
||||
} else if (++r->stale_cnt > 99 && time_after(jiffies, r->stale_limit)) {
|
||||
link_retransmit_failure(l, skb);
|
||||
if (link_is_bc_sndlink(l))
|
||||
|
|
|
@ -115,7 +115,7 @@ struct sk_buff *tipc_named_withdraw(struct net *net, struct publication *publ)
|
|||
struct sk_buff *buf;
|
||||
struct distr_item *item;
|
||||
|
||||
list_del(&publ->binding_node);
|
||||
list_del_rcu(&publ->binding_node);
|
||||
|
||||
if (publ->scope == TIPC_NODE_SCOPE)
|
||||
return NULL;
|
||||
|
@ -147,7 +147,7 @@ static void named_distribute(struct net *net, struct sk_buff_head *list,
|
|||
ITEM_SIZE) * ITEM_SIZE;
|
||||
u32 msg_rem = msg_dsz;
|
||||
|
||||
list_for_each_entry(publ, pls, binding_node) {
|
||||
list_for_each_entry_rcu(publ, pls, binding_node) {
|
||||
/* Prepare next buffer: */
|
||||
if (!skb) {
|
||||
skb = named_prepare_buf(net, PUBLICATION, msg_rem,
|
||||
|
|
|
@ -744,6 +744,8 @@ static int xsk_create(struct net *net, struct socket *sock, int protocol,
|
|||
sk->sk_destruct = xsk_destruct;
|
||||
sk_refcnt_debug_inc(sk);
|
||||
|
||||
sock_set_flag(sk, SOCK_RCU_FREE);
|
||||
|
||||
xs = xdp_sk(sk);
|
||||
mutex_init(&xs->mutex);
|
||||
spin_lock_init(&xs->tx_completion_lock);
|
||||
|
|
|
@ -116,6 +116,9 @@ static void xfrmi_unlink(struct xfrmi_net *xfrmn, struct xfrm_if *xi)
|
|||
|
||||
static void xfrmi_dev_free(struct net_device *dev)
|
||||
{
|
||||
struct xfrm_if *xi = netdev_priv(dev);
|
||||
|
||||
gro_cells_destroy(&xi->gro_cells);
|
||||
free_percpu(dev->tstats);
|
||||
}
|
||||
|
||||
|
|
|
@ -632,9 +632,9 @@ static void xfrm_hash_rebuild(struct work_struct *work)
|
|||
break;
|
||||
}
|
||||
if (newpos)
|
||||
hlist_add_behind(&policy->bydst, newpos);
|
||||
hlist_add_behind_rcu(&policy->bydst, newpos);
|
||||
else
|
||||
hlist_add_head(&policy->bydst, chain);
|
||||
hlist_add_head_rcu(&policy->bydst, chain);
|
||||
}
|
||||
|
||||
spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
|
||||
|
@ -774,9 +774,9 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
|
|||
break;
|
||||
}
|
||||
if (newpos)
|
||||
hlist_add_behind(&policy->bydst, newpos);
|
||||
hlist_add_behind_rcu(&policy->bydst, newpos);
|
||||
else
|
||||
hlist_add_head(&policy->bydst, chain);
|
||||
hlist_add_head_rcu(&policy->bydst, chain);
|
||||
__xfrm_policy_link(policy, dir);
|
||||
|
||||
/* After previous checking, family can either be AF_INET or AF_INET6 */
|
||||
|
|
|
@ -437,14 +437,19 @@ void enable_fastopen(void)
|
|||
}
|
||||
}
|
||||
|
||||
static struct rlimit rlim_old, rlim_new;
|
||||
static struct rlimit rlim_old;
|
||||
|
||||
static __attribute__((constructor)) void main_ctor(void)
|
||||
{
|
||||
getrlimit(RLIMIT_MEMLOCK, &rlim_old);
|
||||
rlim_new.rlim_cur = rlim_old.rlim_cur + (1UL << 20);
|
||||
rlim_new.rlim_max = rlim_old.rlim_max + (1UL << 20);
|
||||
setrlimit(RLIMIT_MEMLOCK, &rlim_new);
|
||||
|
||||
if (rlim_old.rlim_cur != RLIM_INFINITY) {
|
||||
struct rlimit rlim_new;
|
||||
|
||||
rlim_new.rlim_cur = rlim_old.rlim_cur + (1UL << 20);
|
||||
rlim_new.rlim_max = rlim_old.rlim_max + (1UL << 20);
|
||||
setrlimit(RLIMIT_MEMLOCK, &rlim_new);
|
||||
}
|
||||
}
|
||||
|
||||
static __attribute__((destructor)) void main_dtor(void)
|
||||
|
|
Loading…
Reference in New Issue