Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

 1) Fix state pruning in bpf verifier wrt. alignment, from Daniel
    Borkmann.

 2) Handle non-linear SKBs properly in SCTP ICMP parsing, from Davide
    Caratti.

 3) Fix bit field definitions for rss_hash_type of descriptors in mlx5
    driver, from Jesper Brouer.

 4) Defer slave->link updates until bonding is ready to do a full commit
    to the new settings, from Nithin Sujir.

 5) Properly reference count ipv4 FIB metrics to avoid use after free
    situations, from Eric Dumazet and several others including Cong Wang
    and Julian Anastasov.

 6) Fix races in llc_ui_bind(), from Lin Zhang.

 7) Fix regression of ESP UDP encapsulation for TCP packets, from
    Steffen Klassert.

 8) Fix mdio-octeon driver Kconfig deps, from Randy Dunlap.

 9) Fix regression in setting DSCP on ipv6/GRE encapsulation, from Peter
    Dawson.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (43 commits)
  ipv4: add reference counting to metrics
  net: ethernet: ax88796: don't call free_irq without request_irq first
  ip6_tunnel, ip6_gre: fix setting of DSCP on encapsulated packets
  sctp: fix ICMP processing if skb is non-linear
  net: llc: add lock_sock in llc_ui_bind to avoid a race condition
  bonding: Don't update slave->link until ready to commit
  test_bpf: Add a couple of tests for BPF_JSGE.
  bpf: add various verifier test cases
  bpf: fix wrong exposure of map_flags into fdinfo for lpm
  bpf: add bpf_clone_redirect to bpf_helper_changes_pkt_data
  bpf: properly reset caller saved regs after helper call and ld_abs/ind
  bpf: fix incorrect pruning decision when alignment must be tracked
  arp: fixed -Wuninitialized compiler warning
  tcp: avoid fastopen API to be used on AF_UNSPEC
  net: move somaxconn init from sysctl code
  net: fix potential null pointer dereference
  geneve: fix fill_info when using collect_metadata
  virtio-net: enable TSO/checksum offloads for Q-in-Q vlans
  be2net: Fix offload features for Q-in-Q packets
  vlan: Fix tcp checksum offloads in Q-in-Q vlans
  ...
This commit is contained in:
Linus Torvalds 2017-05-26 13:51:01 -07:00
commit 6741d51699
57 changed files with 702 additions and 255 deletions

View File

@ -15,6 +15,10 @@ Optional properties:
- phy-reset-active-high : If present then the reset sequence using the GPIO - phy-reset-active-high : If present then the reset sequence using the GPIO
specified in the "phy-reset-gpios" property is reversed (H=reset state, specified in the "phy-reset-gpios" property is reversed (H=reset state,
L=operation state). L=operation state).
- phy-reset-post-delay : Post reset delay in milliseconds. If present then
a delay of phy-reset-post-delay milliseconds will be observed after the
phy-reset-gpios has been toggled. Can be omitted thus no delay is
observed. Delay is in range of 1ms to 1000ms. Other delays are invalid.
- phy-supply : regulator that powers the Ethernet PHY. - phy-supply : regulator that powers the Ethernet PHY.
- phy-handle : phandle to the PHY device connected to this device. - phy-handle : phandle to the PHY device connected to this device.
- fixed-link : Assume a fixed link. See fixed-link.txt in the same directory. - fixed-link : Assume a fixed link. See fixed-link.txt in the same directory.

View File

@ -2612,11 +2612,13 @@ static void bond_loadbalance_arp_mon(struct bonding *bond)
bond_for_each_slave_rcu(bond, slave, iter) { bond_for_each_slave_rcu(bond, slave, iter) {
unsigned long trans_start = dev_trans_start(slave->dev); unsigned long trans_start = dev_trans_start(slave->dev);
slave->new_link = BOND_LINK_NOCHANGE;
if (slave->link != BOND_LINK_UP) { if (slave->link != BOND_LINK_UP) {
if (bond_time_in_interval(bond, trans_start, 1) && if (bond_time_in_interval(bond, trans_start, 1) &&
bond_time_in_interval(bond, slave->last_rx, 1)) { bond_time_in_interval(bond, slave->last_rx, 1)) {
slave->link = BOND_LINK_UP; slave->new_link = BOND_LINK_UP;
slave_state_changed = 1; slave_state_changed = 1;
/* primary_slave has no meaning in round-robin /* primary_slave has no meaning in round-robin
@ -2643,7 +2645,7 @@ static void bond_loadbalance_arp_mon(struct bonding *bond)
if (!bond_time_in_interval(bond, trans_start, 2) || if (!bond_time_in_interval(bond, trans_start, 2) ||
!bond_time_in_interval(bond, slave->last_rx, 2)) { !bond_time_in_interval(bond, slave->last_rx, 2)) {
slave->link = BOND_LINK_DOWN; slave->new_link = BOND_LINK_DOWN;
slave_state_changed = 1; slave_state_changed = 1;
if (slave->link_failure_count < UINT_MAX) if (slave->link_failure_count < UINT_MAX)
@ -2674,6 +2676,11 @@ static void bond_loadbalance_arp_mon(struct bonding *bond)
if (!rtnl_trylock()) if (!rtnl_trylock())
goto re_arm; goto re_arm;
bond_for_each_slave(bond, slave, iter) {
if (slave->new_link != BOND_LINK_NOCHANGE)
slave->link = slave->new_link;
}
if (slave_state_changed) { if (slave_state_changed) {
bond_slave_state_change(bond); bond_slave_state_change(bond);
if (BOND_MODE(bond) == BOND_MODE_XOR) if (BOND_MODE(bond) == BOND_MODE_XOR)

View File

@ -748,13 +748,13 @@ static int ax_init_dev(struct net_device *dev)
ret = ax_mii_init(dev); ret = ax_mii_init(dev);
if (ret) if (ret)
goto out_irq; goto err_out;
ax_NS8390_init(dev, 0); ax_NS8390_init(dev, 0);
ret = register_netdev(dev); ret = register_netdev(dev);
if (ret) if (ret)
goto out_irq; goto err_out;
netdev_info(dev, "%dbit, irq %d, %lx, MAC: %pM\n", netdev_info(dev, "%dbit, irq %d, %lx, MAC: %pM\n",
ei_local->word16 ? 16 : 8, dev->irq, dev->base_addr, ei_local->word16 ? 16 : 8, dev->irq, dev->base_addr,
@ -762,9 +762,6 @@ static int ax_init_dev(struct net_device *dev)
return 0; return 0;
out_irq:
/* cleanup irq */
free_irq(dev->irq, dev);
err_out: err_out:
return ret; return ret;
} }

View File

@ -5078,9 +5078,11 @@ static netdev_features_t be_features_check(struct sk_buff *skb,
struct be_adapter *adapter = netdev_priv(dev); struct be_adapter *adapter = netdev_priv(dev);
u8 l4_hdr = 0; u8 l4_hdr = 0;
/* The code below restricts offload features for some tunneled packets. /* The code below restricts offload features for some tunneled and
* Q-in-Q packets.
* Offload features for normal (non tunnel) packets are unchanged. * Offload features for normal (non tunnel) packets are unchanged.
*/ */
features = vlan_features_check(skb, features);
if (!skb->encapsulation || if (!skb->encapsulation ||
!(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)) !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
return features; return features;

View File

@ -3192,7 +3192,7 @@ static int fec_reset_phy(struct platform_device *pdev)
{ {
int err, phy_reset; int err, phy_reset;
bool active_high = false; bool active_high = false;
int msec = 1; int msec = 1, phy_post_delay = 0;
struct device_node *np = pdev->dev.of_node; struct device_node *np = pdev->dev.of_node;
if (!np) if (!np)
@ -3209,6 +3209,11 @@ static int fec_reset_phy(struct platform_device *pdev)
else if (!gpio_is_valid(phy_reset)) else if (!gpio_is_valid(phy_reset))
return 0; return 0;
err = of_property_read_u32(np, "phy-reset-post-delay", &phy_post_delay);
/* valid reset duration should be less than 1s */
if (!err && phy_post_delay > 1000)
return -EINVAL;
active_high = of_property_read_bool(np, "phy-reset-active-high"); active_high = of_property_read_bool(np, "phy-reset-active-high");
err = devm_gpio_request_one(&pdev->dev, phy_reset, err = devm_gpio_request_one(&pdev->dev, phy_reset,
@ -3226,6 +3231,15 @@ static int fec_reset_phy(struct platform_device *pdev)
gpio_set_value_cansleep(phy_reset, !active_high); gpio_set_value_cansleep(phy_reset, !active_high);
if (!phy_post_delay)
return 0;
if (phy_post_delay > 20)
msleep(phy_post_delay);
else
usleep_range(phy_post_delay * 1000,
phy_post_delay * 1000 + 1000);
return 0; return 0;
} }
#else /* CONFIG_OF */ #else /* CONFIG_OF */

View File

@ -774,7 +774,7 @@ static void cb_timeout_handler(struct work_struct *work)
mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n", mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
mlx5_command_str(msg_to_opcode(ent->in)), mlx5_command_str(msg_to_opcode(ent->in)),
msg_to_opcode(ent->in)); msg_to_opcode(ent->in));
mlx5_cmd_comp_handler(dev, 1UL << ent->idx); mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
} }
static void cmd_work_handler(struct work_struct *work) static void cmd_work_handler(struct work_struct *work)
@ -804,6 +804,7 @@ static void cmd_work_handler(struct work_struct *work)
} }
cmd->ent_arr[ent->idx] = ent; cmd->ent_arr[ent->idx] = ent;
set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state);
lay = get_inst(cmd, ent->idx); lay = get_inst(cmd, ent->idx);
ent->lay = lay; ent->lay = lay;
memset(lay, 0, sizeof(*lay)); memset(lay, 0, sizeof(*lay));
@ -825,6 +826,20 @@ static void cmd_work_handler(struct work_struct *work)
if (ent->callback) if (ent->callback)
schedule_delayed_work(&ent->cb_timeout_work, cb_timeout); schedule_delayed_work(&ent->cb_timeout_work, cb_timeout);
/* Skip sending command to fw if internal error */
if (pci_channel_offline(dev->pdev) ||
dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
u8 status = 0;
u32 drv_synd;
ent->ret = mlx5_internal_err_ret_value(dev, msg_to_opcode(ent->in), &drv_synd, &status);
MLX5_SET(mbox_out, ent->out, status, status);
MLX5_SET(mbox_out, ent->out, syndrome, drv_synd);
mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
return;
}
/* ring doorbell after the descriptor is valid */ /* ring doorbell after the descriptor is valid */
mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx); mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx);
wmb(); wmb();
@ -835,7 +850,7 @@ static void cmd_work_handler(struct work_struct *work)
poll_timeout(ent); poll_timeout(ent);
/* make sure we read the descriptor after ownership is SW */ /* make sure we read the descriptor after ownership is SW */
rmb(); rmb();
mlx5_cmd_comp_handler(dev, 1UL << ent->idx); mlx5_cmd_comp_handler(dev, 1UL << ent->idx, (ent->ret == -ETIMEDOUT));
} }
} }
@ -879,7 +894,7 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
wait_for_completion(&ent->done); wait_for_completion(&ent->done);
} else if (!wait_for_completion_timeout(&ent->done, timeout)) { } else if (!wait_for_completion_timeout(&ent->done, timeout)) {
ent->ret = -ETIMEDOUT; ent->ret = -ETIMEDOUT;
mlx5_cmd_comp_handler(dev, 1UL << ent->idx); mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
} }
err = ent->ret; err = ent->ret;
@ -1375,7 +1390,7 @@ static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg)
} }
} }
void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec) void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced)
{ {
struct mlx5_cmd *cmd = &dev->cmd; struct mlx5_cmd *cmd = &dev->cmd;
struct mlx5_cmd_work_ent *ent; struct mlx5_cmd_work_ent *ent;
@ -1395,6 +1410,19 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec)
struct semaphore *sem; struct semaphore *sem;
ent = cmd->ent_arr[i]; ent = cmd->ent_arr[i];
/* if we already completed the command, ignore it */
if (!test_and_clear_bit(MLX5_CMD_ENT_STATE_PENDING_COMP,
&ent->state)) {
/* only real completion can free the cmd slot */
if (!forced) {
mlx5_core_err(dev, "Command completion arrived after timeout (entry idx = %d).\n",
ent->idx);
free_ent(cmd, ent->idx);
}
continue;
}
if (ent->callback) if (ent->callback)
cancel_delayed_work(&ent->cb_timeout_work); cancel_delayed_work(&ent->cb_timeout_work);
if (ent->page_queue) if (ent->page_queue)
@ -1417,6 +1445,9 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec)
mlx5_core_dbg(dev, "command completed. ret 0x%x, delivery status %s(0x%x)\n", mlx5_core_dbg(dev, "command completed. ret 0x%x, delivery status %s(0x%x)\n",
ent->ret, deliv_status_to_str(ent->status), ent->status); ent->ret, deliv_status_to_str(ent->status), ent->status);
} }
/* only real completion will free the entry slot */
if (!forced)
free_ent(cmd, ent->idx); free_ent(cmd, ent->idx);
if (ent->callback) { if (ent->callback) {

View File

@ -1041,6 +1041,8 @@ void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq)
#define MLX5_IB_GRH_BYTES 40 #define MLX5_IB_GRH_BYTES 40
#define MLX5_IPOIB_ENCAP_LEN 4 #define MLX5_IPOIB_ENCAP_LEN 4
#define MLX5_GID_SIZE 16 #define MLX5_GID_SIZE 16
#define MLX5_IPOIB_PSEUDO_LEN 20
#define MLX5_IPOIB_HARD_LEN (MLX5_IPOIB_PSEUDO_LEN + MLX5_IPOIB_ENCAP_LEN)
static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq, static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
struct mlx5_cqe64 *cqe, struct mlx5_cqe64 *cqe,
@ -1048,6 +1050,7 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
struct sk_buff *skb) struct sk_buff *skb)
{ {
struct net_device *netdev = rq->netdev; struct net_device *netdev = rq->netdev;
char *pseudo_header;
u8 *dgid; u8 *dgid;
u8 g; u8 g;
@ -1076,8 +1079,11 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
if (likely(netdev->features & NETIF_F_RXHASH)) if (likely(netdev->features & NETIF_F_RXHASH))
mlx5e_skb_set_hash(cqe, skb); mlx5e_skb_set_hash(cqe, skb);
/* 20 bytes of ipoib header and 4 for encap existing */
pseudo_header = skb_push(skb, MLX5_IPOIB_PSEUDO_LEN);
memset(pseudo_header, 0, MLX5_IPOIB_PSEUDO_LEN);
skb_reset_mac_header(skb); skb_reset_mac_header(skb);
skb_pull(skb, MLX5_IPOIB_ENCAP_LEN); skb_pull(skb, MLX5_IPOIB_HARD_LEN);
skb->dev = netdev; skb->dev = netdev;

View File

@ -43,6 +43,7 @@
#include <net/tc_act/tc_vlan.h> #include <net/tc_act/tc_vlan.h>
#include <net/tc_act/tc_tunnel_key.h> #include <net/tc_act/tc_tunnel_key.h>
#include <net/tc_act/tc_pedit.h> #include <net/tc_act/tc_pedit.h>
#include <net/tc_act/tc_csum.h>
#include <net/vxlan.h> #include <net/vxlan.h>
#include <net/arp.h> #include <net/arp.h>
#include "en.h" #include "en.h"
@ -384,7 +385,7 @@ static void mlx5e_detach_encap(struct mlx5e_priv *priv,
if (e->flags & MLX5_ENCAP_ENTRY_VALID) if (e->flags & MLX5_ENCAP_ENTRY_VALID)
mlx5_encap_dealloc(priv->mdev, e->encap_id); mlx5_encap_dealloc(priv->mdev, e->encap_id);
hlist_del_rcu(&e->encap_hlist); hash_del_rcu(&e->encap_hlist);
kfree(e->encap_header); kfree(e->encap_header);
kfree(e); kfree(e);
} }
@ -925,11 +926,11 @@ static int offload_pedit_fields(struct pedit_headers *masks,
struct mlx5e_tc_flow_parse_attr *parse_attr) struct mlx5e_tc_flow_parse_attr *parse_attr)
{ {
struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals; struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
int i, action_size, nactions, max_actions, first, last; int i, action_size, nactions, max_actions, first, last, first_z;
void *s_masks_p, *a_masks_p, *vals_p; void *s_masks_p, *a_masks_p, *vals_p;
u32 s_mask, a_mask, val;
struct mlx5_fields *f; struct mlx5_fields *f;
u8 cmd, field_bsize; u8 cmd, field_bsize;
u32 s_mask, a_mask;
unsigned long mask; unsigned long mask;
void *action; void *action;
@ -946,7 +947,8 @@ static int offload_pedit_fields(struct pedit_headers *masks,
for (i = 0; i < ARRAY_SIZE(fields); i++) { for (i = 0; i < ARRAY_SIZE(fields); i++) {
f = &fields[i]; f = &fields[i];
/* avoid seeing bits set from previous iterations */ /* avoid seeing bits set from previous iterations */
s_mask = a_mask = mask = val = 0; s_mask = 0;
a_mask = 0;
s_masks_p = (void *)set_masks + f->offset; s_masks_p = (void *)set_masks + f->offset;
a_masks_p = (void *)add_masks + f->offset; a_masks_p = (void *)add_masks + f->offset;
@ -981,12 +983,12 @@ static int offload_pedit_fields(struct pedit_headers *masks,
memset(a_masks_p, 0, f->size); memset(a_masks_p, 0, f->size);
} }
memcpy(&val, vals_p, f->size);
field_bsize = f->size * BITS_PER_BYTE; field_bsize = f->size * BITS_PER_BYTE;
first_z = find_first_zero_bit(&mask, field_bsize);
first = find_first_bit(&mask, field_bsize); first = find_first_bit(&mask, field_bsize);
last = find_last_bit(&mask, field_bsize); last = find_last_bit(&mask, field_bsize);
if (first > 0 || last != (field_bsize - 1)) { if (first > 0 || last != (field_bsize - 1) || first_z < last) {
printk(KERN_WARNING "mlx5: partial rewrite (mask %lx) is currently not offloaded\n", printk(KERN_WARNING "mlx5: partial rewrite (mask %lx) is currently not offloaded\n",
mask); mask);
return -EOPNOTSUPP; return -EOPNOTSUPP;
@ -1002,11 +1004,11 @@ static int offload_pedit_fields(struct pedit_headers *masks,
} }
if (field_bsize == 32) if (field_bsize == 32)
MLX5_SET(set_action_in, action, data, ntohl(val)); MLX5_SET(set_action_in, action, data, ntohl(*(__be32 *)vals_p));
else if (field_bsize == 16) else if (field_bsize == 16)
MLX5_SET(set_action_in, action, data, ntohs(val)); MLX5_SET(set_action_in, action, data, ntohs(*(__be16 *)vals_p));
else if (field_bsize == 8) else if (field_bsize == 8)
MLX5_SET(set_action_in, action, data, val); MLX5_SET(set_action_in, action, data, *(u8 *)vals_p);
action += action_size; action += action_size;
nactions++; nactions++;
@ -1109,6 +1111,28 @@ out_err:
return err; return err;
} }
static bool csum_offload_supported(struct mlx5e_priv *priv, u32 action, u32 update_flags)
{
u32 prot_flags = TCA_CSUM_UPDATE_FLAG_IPV4HDR | TCA_CSUM_UPDATE_FLAG_TCP |
TCA_CSUM_UPDATE_FLAG_UDP;
/* The HW recalcs checksums only if re-writing headers */
if (!(action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)) {
netdev_warn(priv->netdev,
"TC csum action is only offloaded with pedit\n");
return false;
}
if (update_flags & ~prot_flags) {
netdev_warn(priv->netdev,
"can't offload TC csum action for some header/s - flags %#x\n",
update_flags);
return false;
}
return true;
}
static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
struct mlx5e_tc_flow_parse_attr *parse_attr, struct mlx5e_tc_flow_parse_attr *parse_attr,
struct mlx5e_tc_flow *flow) struct mlx5e_tc_flow *flow)
@ -1149,6 +1173,14 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
continue; continue;
} }
if (is_tcf_csum(a)) {
if (csum_offload_supported(priv, attr->action,
tcf_csum_update_flags(a)))
continue;
return -EOPNOTSUPP;
}
if (is_tcf_skbedit_mark(a)) { if (is_tcf_skbedit_mark(a)) {
u32 mark = tcf_skbedit_mark(a); u32 mark = tcf_skbedit_mark(a);
@ -1651,6 +1683,14 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
continue; continue;
} }
if (is_tcf_csum(a)) {
if (csum_offload_supported(priv, attr->action,
tcf_csum_update_flags(a)))
continue;
return -EOPNOTSUPP;
}
if (is_tcf_mirred_egress_redirect(a)) { if (is_tcf_mirred_egress_redirect(a)) {
int ifindex = tcf_mirred_ifindex(a); int ifindex = tcf_mirred_ifindex(a);
struct net_device *out_dev, *encap_dev = NULL; struct net_device *out_dev, *encap_dev = NULL;

View File

@ -422,7 +422,7 @@ static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr)
break; break;
case MLX5_EVENT_TYPE_CMD: case MLX5_EVENT_TYPE_CMD:
mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector)); mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector), false);
break; break;
case MLX5_EVENT_TYPE_PORT_CHANGE: case MLX5_EVENT_TYPE_PORT_CHANGE:

View File

@ -90,7 +90,7 @@ static void trigger_cmd_completions(struct mlx5_core_dev *dev)
spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags); spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
mlx5_core_dbg(dev, "vector 0x%llx\n", vector); mlx5_core_dbg(dev, "vector 0x%llx\n", vector);
mlx5_cmd_comp_handler(dev, vector); mlx5_cmd_comp_handler(dev, vector, true);
return; return;
no_trig: no_trig:

View File

@ -612,7 +612,6 @@ static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
struct mlx5_priv *priv = &mdev->priv; struct mlx5_priv *priv = &mdev->priv;
struct msix_entry *msix = priv->msix_arr; struct msix_entry *msix = priv->msix_arr;
int irq = msix[i + MLX5_EQ_VEC_COMP_BASE].vector; int irq = msix[i + MLX5_EQ_VEC_COMP_BASE].vector;
int err;
if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) { if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) {
mlx5_core_warn(mdev, "zalloc_cpumask_var failed"); mlx5_core_warn(mdev, "zalloc_cpumask_var failed");
@ -622,18 +621,12 @@ static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
cpumask_set_cpu(cpumask_local_spread(i, priv->numa_node), cpumask_set_cpu(cpumask_local_spread(i, priv->numa_node),
priv->irq_info[i].mask); priv->irq_info[i].mask);
err = irq_set_affinity_hint(irq, priv->irq_info[i].mask); #ifdef CONFIG_SMP
if (err) { if (irq_set_affinity_hint(irq, priv->irq_info[i].mask))
mlx5_core_warn(mdev, "irq_set_affinity_hint failed,irq 0x%.4x", mlx5_core_warn(mdev, "irq_set_affinity_hint failed, irq 0x%.4x", irq);
irq); #endif
goto err_clear_mask;
}
return 0; return 0;
err_clear_mask:
free_cpumask_var(priv->irq_info[i].mask);
return err;
} }
static void mlx5_irq_clear_affinity_hint(struct mlx5_core_dev *mdev, int i) static void mlx5_irq_clear_affinity_hint(struct mlx5_core_dev *mdev, int i)

View File

@ -1293,7 +1293,7 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev)
if (nla_put_u32(skb, IFLA_GENEVE_ID, vni)) if (nla_put_u32(skb, IFLA_GENEVE_ID, vni))
goto nla_put_failure; goto nla_put_failure;
if (ip_tunnel_info_af(info) == AF_INET) { if (rtnl_dereference(geneve->sock4)) {
if (nla_put_in_addr(skb, IFLA_GENEVE_REMOTE, if (nla_put_in_addr(skb, IFLA_GENEVE_REMOTE,
info->key.u.ipv4.dst)) info->key.u.ipv4.dst))
goto nla_put_failure; goto nla_put_failure;
@ -1302,8 +1302,10 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev)
!!(info->key.tun_flags & TUNNEL_CSUM))) !!(info->key.tun_flags & TUNNEL_CSUM)))
goto nla_put_failure; goto nla_put_failure;
}
#if IS_ENABLED(CONFIG_IPV6) #if IS_ENABLED(CONFIG_IPV6)
} else { if (rtnl_dereference(geneve->sock6)) {
if (nla_put_in6_addr(skb, IFLA_GENEVE_REMOTE6, if (nla_put_in6_addr(skb, IFLA_GENEVE_REMOTE6,
&info->key.u.ipv6.dst)) &info->key.u.ipv6.dst))
goto nla_put_failure; goto nla_put_failure;
@ -1315,8 +1317,8 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev)
if (nla_put_u8(skb, IFLA_GENEVE_UDP_ZERO_CSUM6_RX, if (nla_put_u8(skb, IFLA_GENEVE_UDP_ZERO_CSUM6_RX,
!geneve->use_udp6_rx_checksums)) !geneve->use_udp6_rx_checksums))
goto nla_put_failure; goto nla_put_failure;
#endif
} }
#endif
if (nla_put_u8(skb, IFLA_GENEVE_TTL, info->key.ttl) || if (nla_put_u8(skb, IFLA_GENEVE_TTL, info->key.ttl) ||
nla_put_u8(skb, IFLA_GENEVE_TOS, info->key.tos) || nla_put_u8(skb, IFLA_GENEVE_TOS, info->key.tos) ||

View File

@ -873,7 +873,7 @@ static struct gtp_dev *gtp_find_dev(struct net *src_net, struct nlattr *nla[])
/* Check if there's an existing gtpX device to configure */ /* Check if there's an existing gtpX device to configure */
dev = dev_get_by_index_rcu(net, nla_get_u32(nla[GTPA_LINK])); dev = dev_get_by_index_rcu(net, nla_get_u32(nla[GTPA_LINK]));
if (dev->netdev_ops == &gtp_netdev_ops) if (dev && dev->netdev_ops == &gtp_netdev_ops)
gtp = netdev_priv(dev); gtp = netdev_priv(dev);
put_net(net); put_net(net);

View File

@ -108,7 +108,7 @@ config MDIO_MOXART
config MDIO_OCTEON config MDIO_OCTEON
tristate "Octeon and some ThunderX SOCs MDIO buses" tristate "Octeon and some ThunderX SOCs MDIO buses"
depends on 64BIT depends on 64BIT
depends on HAS_IOMEM depends on HAS_IOMEM && OF_MDIO
select MDIO_CAVIUM select MDIO_CAVIUM
help help
This module provides a driver for the Octeon and ThunderX MDIO This module provides a driver for the Octeon and ThunderX MDIO

View File

@ -255,34 +255,6 @@ static int marvell_config_aneg(struct phy_device *phydev)
{ {
int err; int err;
/* The Marvell PHY has an errata which requires
* that certain registers get written in order
* to restart autonegotiation */
err = phy_write(phydev, MII_BMCR, BMCR_RESET);
if (err < 0)
return err;
err = phy_write(phydev, 0x1d, 0x1f);
if (err < 0)
return err;
err = phy_write(phydev, 0x1e, 0x200c);
if (err < 0)
return err;
err = phy_write(phydev, 0x1d, 0x5);
if (err < 0)
return err;
err = phy_write(phydev, 0x1e, 0);
if (err < 0)
return err;
err = phy_write(phydev, 0x1e, 0x100);
if (err < 0)
return err;
err = marvell_set_polarity(phydev, phydev->mdix_ctrl); err = marvell_set_polarity(phydev, phydev->mdix_ctrl);
if (err < 0) if (err < 0)
return err; return err;
@ -316,6 +288,42 @@ static int marvell_config_aneg(struct phy_device *phydev)
return 0; return 0;
} }
static int m88e1101_config_aneg(struct phy_device *phydev)
{
int err;
/* This Marvell PHY has an errata which requires
* that certain registers get written in order
* to restart autonegotiation
*/
err = phy_write(phydev, MII_BMCR, BMCR_RESET);
if (err < 0)
return err;
err = phy_write(phydev, 0x1d, 0x1f);
if (err < 0)
return err;
err = phy_write(phydev, 0x1e, 0x200c);
if (err < 0)
return err;
err = phy_write(phydev, 0x1d, 0x5);
if (err < 0)
return err;
err = phy_write(phydev, 0x1e, 0);
if (err < 0)
return err;
err = phy_write(phydev, 0x1e, 0x100);
if (err < 0)
return err;
return marvell_config_aneg(phydev);
}
static int m88e1111_config_aneg(struct phy_device *phydev) static int m88e1111_config_aneg(struct phy_device *phydev)
{ {
int err; int err;
@ -1892,7 +1900,7 @@ static struct phy_driver marvell_drivers[] = {
.flags = PHY_HAS_INTERRUPT, .flags = PHY_HAS_INTERRUPT,
.probe = marvell_probe, .probe = marvell_probe,
.config_init = &marvell_config_init, .config_init = &marvell_config_init,
.config_aneg = &marvell_config_aneg, .config_aneg = &m88e1101_config_aneg,
.read_status = &genphy_read_status, .read_status = &genphy_read_status,
.ack_interrupt = &marvell_ack_interrupt, .ack_interrupt = &marvell_ack_interrupt,
.config_intr = &marvell_config_intr, .config_intr = &marvell_config_intr,

View File

@ -310,13 +310,6 @@ skip:
return -ENODEV; return -ENODEV;
} }
/* Some devices don't initialise properly. In particular
* the packet filter is not reset. There are devices that
* don't do reset all the way. So the packet filter should
* be set to a sane initial value.
*/
usbnet_cdc_update_filter(dev);
return 0; return 0;
bad_desc: bad_desc:
@ -325,6 +318,30 @@ bad_desc:
} }
EXPORT_SYMBOL_GPL(usbnet_generic_cdc_bind); EXPORT_SYMBOL_GPL(usbnet_generic_cdc_bind);
/* like usbnet_generic_cdc_bind() but handles filter initialization
* correctly
*/
int usbnet_ether_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
{
int rv;
rv = usbnet_generic_cdc_bind(dev, intf);
if (rv < 0)
goto bail_out;
/* Some devices don't initialise properly. In particular
* the packet filter is not reset. There are devices that
* don't do reset all the way. So the packet filter should
* be set to a sane initial value.
*/
usbnet_cdc_update_filter(dev);
bail_out:
return rv;
}
EXPORT_SYMBOL_GPL(usbnet_ether_cdc_bind);
void usbnet_cdc_unbind(struct usbnet *dev, struct usb_interface *intf) void usbnet_cdc_unbind(struct usbnet *dev, struct usb_interface *intf)
{ {
struct cdc_state *info = (void *) &dev->data; struct cdc_state *info = (void *) &dev->data;
@ -417,7 +434,7 @@ int usbnet_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
BUILD_BUG_ON((sizeof(((struct usbnet *)0)->data) BUILD_BUG_ON((sizeof(((struct usbnet *)0)->data)
< sizeof(struct cdc_state))); < sizeof(struct cdc_state)));
status = usbnet_generic_cdc_bind(dev, intf); status = usbnet_ether_cdc_bind(dev, intf);
if (status < 0) if (status < 0)
return status; return status;

View File

@ -1989,6 +1989,7 @@ static const struct net_device_ops virtnet_netdev = {
.ndo_poll_controller = virtnet_netpoll, .ndo_poll_controller = virtnet_netpoll,
#endif #endif
.ndo_xdp = virtnet_xdp, .ndo_xdp = virtnet_xdp,
.ndo_features_check = passthru_features_check,
}; };
static void virtnet_config_changed_work(struct work_struct *work) static void virtnet_config_changed_work(struct work_struct *work)

View File

@ -272,6 +272,16 @@ struct bpf_prog_aux;
.off = OFF, \ .off = OFF, \
.imm = IMM }) .imm = IMM })
/* Unconditional jumps, goto pc + off16 */
#define BPF_JMP_A(OFF) \
((struct bpf_insn) { \
.code = BPF_JMP | BPF_JA, \
.dst_reg = 0, \
.src_reg = 0, \
.off = OFF, \
.imm = 0 })
/* Function call */ /* Function call */
#define BPF_EMIT_CALL(FUNC) \ #define BPF_EMIT_CALL(FUNC) \

View File

@ -614,14 +614,16 @@ static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb)
static inline netdev_features_t vlan_features_check(const struct sk_buff *skb, static inline netdev_features_t vlan_features_check(const struct sk_buff *skb,
netdev_features_t features) netdev_features_t features)
{ {
if (skb_vlan_tagged_multi(skb)) if (skb_vlan_tagged_multi(skb)) {
features = netdev_intersect_features(features, /* In the case of multi-tagged packets, use a direct mask
NETIF_F_SG | * instead of using netdev_interesect_features(), to make
NETIF_F_HIGHDMA | * sure that only devices supporting NETIF_F_HW_CSUM will
NETIF_F_FRAGLIST | * have checksum offloading support.
NETIF_F_HW_CSUM | */
NETIF_F_HW_VLAN_CTAG_TX | features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_HW_CSUM |
NETIF_F_HW_VLAN_STAG_TX); NETIF_F_FRAGLIST | NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_STAG_TX;
}
return features; return features;
} }

View File

@ -787,8 +787,14 @@ enum {
}; };
enum { enum {
CQE_RSS_HTYPE_IP = 0x3 << 6, CQE_RSS_HTYPE_IP = 0x3 << 2,
CQE_RSS_HTYPE_L4 = 0x3 << 2, /* cqe->rss_hash_type[3:2] - IP destination selected for hash
* (00 = none, 01 = IPv4, 10 = IPv6, 11 = Reserved)
*/
CQE_RSS_HTYPE_L4 = 0x3 << 6,
/* cqe->rss_hash_type[7:6] - L4 destination selected for hash
* (00 = none, 01 = TCP. 10 = UDP, 11 = IPSEC.SPI
*/
}; };
enum { enum {

View File

@ -787,7 +787,12 @@ enum {
typedef void (*mlx5_cmd_cbk_t)(int status, void *context); typedef void (*mlx5_cmd_cbk_t)(int status, void *context);
enum {
MLX5_CMD_ENT_STATE_PENDING_COMP,
};
struct mlx5_cmd_work_ent { struct mlx5_cmd_work_ent {
unsigned long state;
struct mlx5_cmd_msg *in; struct mlx5_cmd_msg *in;
struct mlx5_cmd_msg *out; struct mlx5_cmd_msg *out;
void *uout; void *uout;
@ -976,7 +981,7 @@ void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn);
void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type); void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type);
void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type); void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type);
struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn); struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn);
void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec); void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced);
void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type); void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type);
int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
int nent, u64 mask, const char *name, int nent, u64 mask, const char *name,

View File

@ -206,6 +206,7 @@ struct cdc_state {
}; };
extern int usbnet_generic_cdc_bind(struct usbnet *, struct usb_interface *); extern int usbnet_generic_cdc_bind(struct usbnet *, struct usb_interface *);
extern int usbnet_ether_cdc_bind(struct usbnet *dev, struct usb_interface *intf);
extern int usbnet_cdc_bind(struct usbnet *, struct usb_interface *); extern int usbnet_cdc_bind(struct usbnet *, struct usb_interface *);
extern void usbnet_cdc_unbind(struct usbnet *, struct usb_interface *); extern void usbnet_cdc_unbind(struct usbnet *, struct usb_interface *);
extern void usbnet_cdc_status(struct usbnet *, struct urb *); extern void usbnet_cdc_status(struct usbnet *, struct urb *);

View File

@ -107,10 +107,16 @@ struct dst_entry {
}; };
}; };
struct dst_metrics {
u32 metrics[RTAX_MAX];
atomic_t refcnt;
};
extern const struct dst_metrics dst_default_metrics;
u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old); u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old);
extern const u32 dst_default_metrics[];
#define DST_METRICS_READ_ONLY 0x1UL #define DST_METRICS_READ_ONLY 0x1UL
#define DST_METRICS_REFCOUNTED 0x2UL
#define DST_METRICS_FLAGS 0x3UL #define DST_METRICS_FLAGS 0x3UL
#define __DST_METRICS_PTR(Y) \ #define __DST_METRICS_PTR(Y) \
((u32 *)((Y) & ~DST_METRICS_FLAGS)) ((u32 *)((Y) & ~DST_METRICS_FLAGS))

View File

@ -114,11 +114,11 @@ struct fib_info {
__be32 fib_prefsrc; __be32 fib_prefsrc;
u32 fib_tb_id; u32 fib_tb_id;
u32 fib_priority; u32 fib_priority;
u32 *fib_metrics; struct dst_metrics *fib_metrics;
#define fib_mtu fib_metrics[RTAX_MTU-1] #define fib_mtu fib_metrics->metrics[RTAX_MTU-1]
#define fib_window fib_metrics[RTAX_WINDOW-1] #define fib_window fib_metrics->metrics[RTAX_WINDOW-1]
#define fib_rtt fib_metrics[RTAX_RTT-1] #define fib_rtt fib_metrics->metrics[RTAX_RTT-1]
#define fib_advmss fib_metrics[RTAX_ADVMSS-1] #define fib_advmss fib_metrics->metrics[RTAX_ADVMSS-1]
int fib_nhs; int fib_nhs;
#ifdef CONFIG_IP_ROUTE_MULTIPATH #ifdef CONFIG_IP_ROUTE_MULTIPATH
int fib_weight; int fib_weight;

View File

@ -3,6 +3,7 @@
#include <linux/types.h> #include <linux/types.h>
#include <net/act_api.h> #include <net/act_api.h>
#include <linux/tc_act/tc_csum.h>
struct tcf_csum { struct tcf_csum {
struct tc_action common; struct tc_action common;
@ -11,4 +12,18 @@ struct tcf_csum {
}; };
#define to_tcf_csum(a) ((struct tcf_csum *)a) #define to_tcf_csum(a) ((struct tcf_csum *)a)
static inline bool is_tcf_csum(const struct tc_action *a)
{
#ifdef CONFIG_NET_CLS_ACT
if (a->ops && a->ops->type == TCA_ACT_CSUM)
return true;
#endif
return false;
}
static inline u32 tcf_csum_update_flags(const struct tc_action *a)
{
return to_tcf_csum(a)->update_flags;
}
#endif /* __NET_TC_CSUM_H */ #endif /* __NET_TC_CSUM_H */

View File

@ -979,10 +979,6 @@ struct xfrm_dst {
struct flow_cache_object flo; struct flow_cache_object flo;
struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX]; struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
int num_pols, num_xfrms; int num_pols, num_xfrms;
#ifdef CONFIG_XFRM_SUB_POLICY
struct flowi *origin;
struct xfrm_selector *partner;
#endif
u32 xfrm_genid; u32 xfrm_genid;
u32 policy_genid; u32 policy_genid;
u32 route_mtu_cached; u32 route_mtu_cached;
@ -998,12 +994,6 @@ static inline void xfrm_dst_destroy(struct xfrm_dst *xdst)
dst_release(xdst->route); dst_release(xdst->route);
if (likely(xdst->u.dst.xfrm)) if (likely(xdst->u.dst.xfrm))
xfrm_state_put(xdst->u.dst.xfrm); xfrm_state_put(xdst->u.dst.xfrm);
#ifdef CONFIG_XFRM_SUB_POLICY
kfree(xdst->origin);
xdst->origin = NULL;
kfree(xdst->partner);
xdst->partner = NULL;
#endif
} }
#endif #endif

View File

@ -86,6 +86,7 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
array->map.key_size = attr->key_size; array->map.key_size = attr->key_size;
array->map.value_size = attr->value_size; array->map.value_size = attr->value_size;
array->map.max_entries = attr->max_entries; array->map.max_entries = attr->max_entries;
array->map.map_flags = attr->map_flags;
array->elem_size = elem_size; array->elem_size = elem_size;
if (!percpu) if (!percpu)

View File

@ -432,6 +432,7 @@ static struct bpf_map *trie_alloc(union bpf_attr *attr)
trie->map.key_size = attr->key_size; trie->map.key_size = attr->key_size;
trie->map.value_size = attr->value_size; trie->map.value_size = attr->value_size;
trie->map.max_entries = attr->max_entries; trie->map.max_entries = attr->max_entries;
trie->map.map_flags = attr->map_flags;
trie->data_size = attr->key_size - trie->data_size = attr->key_size -
offsetof(struct bpf_lpm_trie_key, data); offsetof(struct bpf_lpm_trie_key, data);
trie->max_prefixlen = trie->data_size * 8; trie->max_prefixlen = trie->data_size * 8;

View File

@ -88,6 +88,7 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
smap->map.key_size = attr->key_size; smap->map.key_size = attr->key_size;
smap->map.value_size = value_size; smap->map.value_size = value_size;
smap->map.max_entries = attr->max_entries; smap->map.max_entries = attr->max_entries;
smap->map.map_flags = attr->map_flags;
smap->n_buckets = n_buckets; smap->n_buckets = n_buckets;
smap->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT; smap->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;

View File

@ -463,19 +463,22 @@ static const int caller_saved[CALLER_SAVED_REGS] = {
BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5 BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5
}; };
static void mark_reg_not_init(struct bpf_reg_state *regs, u32 regno)
{
BUG_ON(regno >= MAX_BPF_REG);
memset(&regs[regno], 0, sizeof(regs[regno]));
regs[regno].type = NOT_INIT;
regs[regno].min_value = BPF_REGISTER_MIN_RANGE;
regs[regno].max_value = BPF_REGISTER_MAX_RANGE;
}
static void init_reg_state(struct bpf_reg_state *regs) static void init_reg_state(struct bpf_reg_state *regs)
{ {
int i; int i;
for (i = 0; i < MAX_BPF_REG; i++) { for (i = 0; i < MAX_BPF_REG; i++)
regs[i].type = NOT_INIT; mark_reg_not_init(regs, i);
regs[i].imm = 0;
regs[i].min_value = BPF_REGISTER_MIN_RANGE;
regs[i].max_value = BPF_REGISTER_MAX_RANGE;
regs[i].min_align = 0;
regs[i].aux_off = 0;
regs[i].aux_off_align = 0;
}
/* frame pointer */ /* frame pointer */
regs[BPF_REG_FP].type = FRAME_PTR; regs[BPF_REG_FP].type = FRAME_PTR;
@ -843,9 +846,6 @@ static int check_ptr_alignment(struct bpf_verifier_env *env,
{ {
bool strict = env->strict_alignment; bool strict = env->strict_alignment;
if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
strict = true;
switch (reg->type) { switch (reg->type) {
case PTR_TO_PACKET: case PTR_TO_PACKET:
return check_pkt_ptr_alignment(reg, off, size, strict); return check_pkt_ptr_alignment(reg, off, size, strict);
@ -1349,7 +1349,6 @@ static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
struct bpf_verifier_state *state = &env->cur_state; struct bpf_verifier_state *state = &env->cur_state;
const struct bpf_func_proto *fn = NULL; const struct bpf_func_proto *fn = NULL;
struct bpf_reg_state *regs = state->regs; struct bpf_reg_state *regs = state->regs;
struct bpf_reg_state *reg;
struct bpf_call_arg_meta meta; struct bpf_call_arg_meta meta;
bool changes_data; bool changes_data;
int i, err; int i, err;
@ -1416,11 +1415,8 @@ static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
} }
/* reset caller saved regs */ /* reset caller saved regs */
for (i = 0; i < CALLER_SAVED_REGS; i++) { for (i = 0; i < CALLER_SAVED_REGS; i++)
reg = regs + caller_saved[i]; mark_reg_not_init(regs, caller_saved[i]);
reg->type = NOT_INIT;
reg->imm = 0;
}
/* update return register */ /* update return register */
if (fn->ret_type == RET_INTEGER) { if (fn->ret_type == RET_INTEGER) {
@ -2448,7 +2444,6 @@ static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
{ {
struct bpf_reg_state *regs = env->cur_state.regs; struct bpf_reg_state *regs = env->cur_state.regs;
u8 mode = BPF_MODE(insn->code); u8 mode = BPF_MODE(insn->code);
struct bpf_reg_state *reg;
int i, err; int i, err;
if (!may_access_skb(env->prog->type)) { if (!may_access_skb(env->prog->type)) {
@ -2481,11 +2476,8 @@ static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
} }
/* reset caller saved regs to unreadable */ /* reset caller saved regs to unreadable */
for (i = 0; i < CALLER_SAVED_REGS; i++) { for (i = 0; i < CALLER_SAVED_REGS; i++)
reg = regs + caller_saved[i]; mark_reg_not_init(regs, caller_saved[i]);
reg->type = NOT_INIT;
reg->imm = 0;
}
/* mark destination R0 register as readable, since it contains /* mark destination R0 register as readable, since it contains
* the value fetched from the packet * the value fetched from the packet
@ -2696,7 +2688,8 @@ err_free:
/* the following conditions reduce the number of explored insns /* the following conditions reduce the number of explored insns
* from ~140k to ~80k for ultra large programs that use a lot of ptr_to_packet * from ~140k to ~80k for ultra large programs that use a lot of ptr_to_packet
*/ */
static bool compare_ptrs_to_packet(struct bpf_reg_state *old, static bool compare_ptrs_to_packet(struct bpf_verifier_env *env,
struct bpf_reg_state *old,
struct bpf_reg_state *cur) struct bpf_reg_state *cur)
{ {
if (old->id != cur->id) if (old->id != cur->id)
@ -2739,7 +2732,7 @@ static bool compare_ptrs_to_packet(struct bpf_reg_state *old,
* 'if (R4 > data_end)' and all further insn were already good with r=20, * 'if (R4 > data_end)' and all further insn were already good with r=20,
* so they will be good with r=30 and we can prune the search. * so they will be good with r=30 and we can prune the search.
*/ */
if (old->off <= cur->off && if (!env->strict_alignment && old->off <= cur->off &&
old->off >= old->range && cur->off >= cur->range) old->off >= old->range && cur->off >= cur->range)
return true; return true;
@ -2810,7 +2803,7 @@ static bool states_equal(struct bpf_verifier_env *env,
continue; continue;
if (rold->type == PTR_TO_PACKET && rcur->type == PTR_TO_PACKET && if (rold->type == PTR_TO_PACKET && rcur->type == PTR_TO_PACKET &&
compare_ptrs_to_packet(rold, rcur)) compare_ptrs_to_packet(env, rold, rcur))
continue; continue;
return false; return false;
@ -3588,10 +3581,10 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
} else { } else {
log_level = 0; log_level = 0;
} }
if (attr->prog_flags & BPF_F_STRICT_ALIGNMENT)
env->strict_alignment = !!(attr->prog_flags & BPF_F_STRICT_ALIGNMENT);
if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
env->strict_alignment = true; env->strict_alignment = true;
else
env->strict_alignment = false;
ret = replace_map_fd_with_map_ptr(env); ret = replace_map_fd_with_map_ptr(env);
if (ret < 0) if (ret < 0)
@ -3697,7 +3690,10 @@ int bpf_analyzer(struct bpf_prog *prog, const struct bpf_ext_analyzer_ops *ops,
mutex_lock(&bpf_verifier_lock); mutex_lock(&bpf_verifier_lock);
log_level = 0; log_level = 0;
env->strict_alignment = false; env->strict_alignment = false;
if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
env->strict_alignment = true;
env->explored_states = kcalloc(env->prog->len, env->explored_states = kcalloc(env->prog->len,
sizeof(struct bpf_verifier_state_list *), sizeof(struct bpf_verifier_state_list *),

View File

@ -4504,6 +4504,44 @@ static struct bpf_test tests[] = {
{ }, { },
{ { 0, 1 } }, { { 0, 1 } },
}, },
{
"JMP_JSGE_K: Signed jump: value walk 1",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_LD_IMM64(R1, -3),
BPF_JMP_IMM(BPF_JSGE, R1, 0, 6),
BPF_ALU64_IMM(BPF_ADD, R1, 1),
BPF_JMP_IMM(BPF_JSGE, R1, 0, 4),
BPF_ALU64_IMM(BPF_ADD, R1, 1),
BPF_JMP_IMM(BPF_JSGE, R1, 0, 2),
BPF_ALU64_IMM(BPF_ADD, R1, 1),
BPF_JMP_IMM(BPF_JSGE, R1, 0, 1),
BPF_EXIT_INSN(), /* bad exit */
BPF_ALU32_IMM(BPF_MOV, R0, 1), /* good exit */
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
{
"JMP_JSGE_K: Signed jump: value walk 2",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_LD_IMM64(R1, -3),
BPF_JMP_IMM(BPF_JSGE, R1, 0, 4),
BPF_ALU64_IMM(BPF_ADD, R1, 2),
BPF_JMP_IMM(BPF_JSGE, R1, 0, 2),
BPF_ALU64_IMM(BPF_ADD, R1, 2),
BPF_JMP_IMM(BPF_JSGE, R1, 0, 1),
BPF_EXIT_INSN(), /* bad exit */
BPF_ALU32_IMM(BPF_MOV, R0, 1), /* good exit */
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
/* BPF_JMP | BPF_JGT | BPF_K */ /* BPF_JMP | BPF_JGT | BPF_K */
{ {
"JMP_JGT_K: if (3 > 2) return 1", "JMP_JGT_K: if (3 > 2) return 1",

View File

@ -151,13 +151,13 @@ int dst_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb)
} }
EXPORT_SYMBOL(dst_discard_out); EXPORT_SYMBOL(dst_discard_out);
const u32 dst_default_metrics[RTAX_MAX + 1] = { const struct dst_metrics dst_default_metrics = {
/* This initializer is needed to force linker to place this variable /* This initializer is needed to force linker to place this variable
* into const section. Otherwise it might end into bss section. * into const section. Otherwise it might end into bss section.
* We really want to avoid false sharing on this variable, and catch * We really want to avoid false sharing on this variable, and catch
* any writes on it. * any writes on it.
*/ */
[RTAX_MAX] = 0xdeadbeef, .refcnt = ATOMIC_INIT(1),
}; };
void dst_init(struct dst_entry *dst, struct dst_ops *ops, void dst_init(struct dst_entry *dst, struct dst_ops *ops,
@ -169,7 +169,7 @@ void dst_init(struct dst_entry *dst, struct dst_ops *ops,
if (dev) if (dev)
dev_hold(dev); dev_hold(dev);
dst->ops = ops; dst->ops = ops;
dst_init_metrics(dst, dst_default_metrics, true); dst_init_metrics(dst, dst_default_metrics.metrics, true);
dst->expires = 0UL; dst->expires = 0UL;
dst->path = dst; dst->path = dst;
dst->from = NULL; dst->from = NULL;
@ -314,25 +314,30 @@ EXPORT_SYMBOL(dst_release);
u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old) u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old)
{ {
u32 *p = kmalloc(sizeof(u32) * RTAX_MAX, GFP_ATOMIC); struct dst_metrics *p = kmalloc(sizeof(*p), GFP_ATOMIC);
if (p) { if (p) {
u32 *old_p = __DST_METRICS_PTR(old); struct dst_metrics *old_p = (struct dst_metrics *)__DST_METRICS_PTR(old);
unsigned long prev, new; unsigned long prev, new;
memcpy(p, old_p, sizeof(u32) * RTAX_MAX); atomic_set(&p->refcnt, 1);
memcpy(p->metrics, old_p->metrics, sizeof(p->metrics));
new = (unsigned long) p; new = (unsigned long) p;
prev = cmpxchg(&dst->_metrics, old, new); prev = cmpxchg(&dst->_metrics, old, new);
if (prev != old) { if (prev != old) {
kfree(p); kfree(p);
p = __DST_METRICS_PTR(prev); p = (struct dst_metrics *)__DST_METRICS_PTR(prev);
if (prev & DST_METRICS_READ_ONLY) if (prev & DST_METRICS_READ_ONLY)
p = NULL; p = NULL;
} else if (prev & DST_METRICS_REFCOUNTED) {
if (atomic_dec_and_test(&old_p->refcnt))
kfree(old_p);
} }
} }
return p; BUILD_BUG_ON(offsetof(struct dst_metrics, metrics) != 0);
return (u32 *)p;
} }
EXPORT_SYMBOL(dst_cow_metrics_generic); EXPORT_SYMBOL(dst_cow_metrics_generic);
@ -341,7 +346,7 @@ void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old)
{ {
unsigned long prev, new; unsigned long prev, new;
new = ((unsigned long) dst_default_metrics) | DST_METRICS_READ_ONLY; new = ((unsigned long) &dst_default_metrics) | DST_METRICS_READ_ONLY;
prev = cmpxchg(&dst->_metrics, old, new); prev = cmpxchg(&dst->_metrics, old, new);
if (prev == old) if (prev == old)
kfree(__DST_METRICS_PTR(old)); kfree(__DST_METRICS_PTR(old));

View File

@ -2281,6 +2281,7 @@ bool bpf_helper_changes_pkt_data(void *func)
func == bpf_skb_change_head || func == bpf_skb_change_head ||
func == bpf_skb_change_tail || func == bpf_skb_change_tail ||
func == bpf_skb_pull_data || func == bpf_skb_pull_data ||
func == bpf_clone_redirect ||
func == bpf_l3_csum_replace || func == bpf_l3_csum_replace ||
func == bpf_l4_csum_replace || func == bpf_l4_csum_replace ||
func == bpf_xdp_adjust_head) func == bpf_xdp_adjust_head)

View File

@ -315,6 +315,25 @@ out_undo:
goto out; goto out;
} }
static int __net_init net_defaults_init_net(struct net *net)
{
net->core.sysctl_somaxconn = SOMAXCONN;
return 0;
}
static struct pernet_operations net_defaults_ops = {
.init = net_defaults_init_net,
};
static __init int net_defaults_init(void)
{
if (register_pernet_subsys(&net_defaults_ops))
panic("Cannot initialize net default settings");
return 0;
}
core_initcall(net_defaults_init);
#ifdef CONFIG_NET_NS #ifdef CONFIG_NET_NS
static struct ucounts *inc_net_namespaces(struct user_namespace *ns) static struct ucounts *inc_net_namespaces(struct user_namespace *ns)

View File

@ -3231,8 +3231,11 @@ static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
int err = 0; int err = 0;
int fidx = 0; int fidx = 0;
if (nlmsg_parse(cb->nlh, sizeof(struct ifinfomsg), tb, err = nlmsg_parse(cb->nlh, sizeof(struct ifinfomsg), tb,
IFLA_MAX, ifla_policy, NULL) == 0) { IFLA_MAX, ifla_policy, NULL);
if (err < 0) {
return -EINVAL;
} else if (err == 0) {
if (tb[IFLA_MASTER]) if (tb[IFLA_MASTER])
br_idx = nla_get_u32(tb[IFLA_MASTER]); br_idx = nla_get_u32(tb[IFLA_MASTER]);
} }

View File

@ -479,8 +479,6 @@ static __net_init int sysctl_core_net_init(struct net *net)
{ {
struct ctl_table *tbl; struct ctl_table *tbl;
net->core.sysctl_somaxconn = SOMAXCONN;
tbl = netns_core_table; tbl = netns_core_table;
if (!net_eq(net, &init_net)) { if (!net_eq(net, &init_net)) {
tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL); tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL);

View File

@ -863,8 +863,8 @@ static int arp_process(struct net *net, struct sock *sk, struct sk_buff *skb)
n = __neigh_lookup(&arp_tbl, &sip, dev, 0); n = __neigh_lookup(&arp_tbl, &sip, dev, 0);
if (n || IN_DEV_ARP_ACCEPT(in_dev)) {
addr_type = -1; addr_type = -1;
if (n || IN_DEV_ARP_ACCEPT(in_dev)) {
is_garp = arp_is_garp(net, dev, &addr_type, arp->ar_op, is_garp = arp_is_garp(net, dev, &addr_type, arp->ar_op,
sip, tip, sha, tha); sip, tip, sha, tha);
} }

View File

@ -248,6 +248,7 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
u8 *tail; u8 *tail;
u8 *vaddr; u8 *vaddr;
int nfrags; int nfrags;
int esph_offset;
struct page *page; struct page *page;
struct sk_buff *trailer; struct sk_buff *trailer;
int tailen = esp->tailen; int tailen = esp->tailen;
@ -313,11 +314,13 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
} }
cow: cow:
esph_offset = (unsigned char *)esp->esph - skb_transport_header(skb);
nfrags = skb_cow_data(skb, tailen, &trailer); nfrags = skb_cow_data(skb, tailen, &trailer);
if (nfrags < 0) if (nfrags < 0)
goto out; goto out;
tail = skb_tail_pointer(trailer); tail = skb_tail_pointer(trailer);
esp->esph = ip_esp_hdr(skb); esp->esph = (struct ip_esp_hdr *)(skb_transport_header(skb) + esph_offset);
skip_cow: skip_cow:
esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto); esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto);

View File

@ -203,6 +203,7 @@ static void rt_fibinfo_free_cpus(struct rtable __rcu * __percpu *rtp)
static void free_fib_info_rcu(struct rcu_head *head) static void free_fib_info_rcu(struct rcu_head *head)
{ {
struct fib_info *fi = container_of(head, struct fib_info, rcu); struct fib_info *fi = container_of(head, struct fib_info, rcu);
struct dst_metrics *m;
change_nexthops(fi) { change_nexthops(fi) {
if (nexthop_nh->nh_dev) if (nexthop_nh->nh_dev)
@ -213,8 +214,9 @@ static void free_fib_info_rcu(struct rcu_head *head)
rt_fibinfo_free(&nexthop_nh->nh_rth_input); rt_fibinfo_free(&nexthop_nh->nh_rth_input);
} endfor_nexthops(fi); } endfor_nexthops(fi);
if (fi->fib_metrics != (u32 *) dst_default_metrics) m = fi->fib_metrics;
kfree(fi->fib_metrics); if (m != &dst_default_metrics && atomic_dec_and_test(&m->refcnt))
kfree(m);
kfree(fi); kfree(fi);
} }
@ -971,11 +973,11 @@ fib_convert_metrics(struct fib_info *fi, const struct fib_config *cfg)
val = 255; val = 255;
if (type == RTAX_FEATURES && (val & ~RTAX_FEATURE_MASK)) if (type == RTAX_FEATURES && (val & ~RTAX_FEATURE_MASK))
return -EINVAL; return -EINVAL;
fi->fib_metrics[type - 1] = val; fi->fib_metrics->metrics[type - 1] = val;
} }
if (ecn_ca) if (ecn_ca)
fi->fib_metrics[RTAX_FEATURES - 1] |= DST_FEATURE_ECN_CA; fi->fib_metrics->metrics[RTAX_FEATURES - 1] |= DST_FEATURE_ECN_CA;
return 0; return 0;
} }
@ -1033,11 +1035,12 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
goto failure; goto failure;
fib_info_cnt++; fib_info_cnt++;
if (cfg->fc_mx) { if (cfg->fc_mx) {
fi->fib_metrics = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL); fi->fib_metrics = kzalloc(sizeof(*fi->fib_metrics), GFP_KERNEL);
if (!fi->fib_metrics) if (!fi->fib_metrics)
goto failure; goto failure;
atomic_set(&fi->fib_metrics->refcnt, 1);
} else } else
fi->fib_metrics = (u32 *) dst_default_metrics; fi->fib_metrics = (struct dst_metrics *)&dst_default_metrics;
fi->fib_net = net; fi->fib_net = net;
fi->fib_protocol = cfg->fc_protocol; fi->fib_protocol = cfg->fc_protocol;
@ -1238,7 +1241,7 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
if (fi->fib_priority && if (fi->fib_priority &&
nla_put_u32(skb, RTA_PRIORITY, fi->fib_priority)) nla_put_u32(skb, RTA_PRIORITY, fi->fib_priority))
goto nla_put_failure; goto nla_put_failure;
if (rtnetlink_put_metrics(skb, fi->fib_metrics) < 0) if (rtnetlink_put_metrics(skb, fi->fib_metrics->metrics) < 0)
goto nla_put_failure; goto nla_put_failure;
if (fi->fib_prefsrc && if (fi->fib_prefsrc &&

View File

@ -1385,8 +1385,12 @@ static void rt_add_uncached_list(struct rtable *rt)
static void ipv4_dst_destroy(struct dst_entry *dst) static void ipv4_dst_destroy(struct dst_entry *dst)
{ {
struct dst_metrics *p = (struct dst_metrics *)DST_METRICS_PTR(dst);
struct rtable *rt = (struct rtable *) dst; struct rtable *rt = (struct rtable *) dst;
if (p != &dst_default_metrics && atomic_dec_and_test(&p->refcnt))
kfree(p);
if (!list_empty(&rt->rt_uncached)) { if (!list_empty(&rt->rt_uncached)) {
struct uncached_list *ul = rt->rt_uncached_list; struct uncached_list *ul = rt->rt_uncached_list;
@ -1438,7 +1442,11 @@ static void rt_set_nexthop(struct rtable *rt, __be32 daddr,
rt->rt_gateway = nh->nh_gw; rt->rt_gateway = nh->nh_gw;
rt->rt_uses_gateway = 1; rt->rt_uses_gateway = 1;
} }
dst_init_metrics(&rt->dst, fi->fib_metrics, true); dst_init_metrics(&rt->dst, fi->fib_metrics->metrics, true);
if (fi->fib_metrics != &dst_default_metrics) {
rt->dst._metrics |= DST_METRICS_REFCOUNTED;
atomic_inc(&fi->fib_metrics->refcnt);
}
#ifdef CONFIG_IP_ROUTE_CLASSID #ifdef CONFIG_IP_ROUTE_CLASSID
rt->dst.tclassid = nh->nh_tclassid; rt->dst.tclassid = nh->nh_tclassid;
#endif #endif

View File

@ -1084,9 +1084,12 @@ static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg,
{ {
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
struct inet_sock *inet = inet_sk(sk); struct inet_sock *inet = inet_sk(sk);
struct sockaddr *uaddr = msg->msg_name;
int err, flags; int err, flags;
if (!(sysctl_tcp_fastopen & TFO_CLIENT_ENABLE)) if (!(sysctl_tcp_fastopen & TFO_CLIENT_ENABLE) ||
(uaddr && msg->msg_namelen >= sizeof(uaddr->sa_family) &&
uaddr->sa_family == AF_UNSPEC))
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (tp->fastopen_req) if (tp->fastopen_req)
return -EALREADY; /* Another Fast Open is in progress */ return -EALREADY; /* Another Fast Open is in progress */
@ -1108,7 +1111,7 @@ static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg,
} }
} }
flags = (msg->msg_flags & MSG_DONTWAIT) ? O_NONBLOCK : 0; flags = (msg->msg_flags & MSG_DONTWAIT) ? O_NONBLOCK : 0;
err = __inet_stream_connect(sk->sk_socket, msg->msg_name, err = __inet_stream_connect(sk->sk_socket, uaddr,
msg->msg_namelen, flags, 1); msg->msg_namelen, flags, 1);
/* fastopen_req could already be freed in __inet_stream_connect /* fastopen_req could already be freed in __inet_stream_connect
* if the connection times out or gets rst * if the connection times out or gets rst

View File

@ -537,11 +537,10 @@ static inline int ip6gre_xmit_ipv4(struct sk_buff *skb, struct net_device *dev)
memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
dsfield = ipv4_get_dsfield(iph);
if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS) if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
fl6.flowlabel |= htonl((__u32)iph->tos << IPV6_TCLASS_SHIFT) dsfield = ipv4_get_dsfield(iph);
& IPV6_TCLASS_MASK; else
dsfield = ip6_tclass(t->parms.flowinfo);
if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK) if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
fl6.flowi6_mark = skb->mark; fl6.flowi6_mark = skb->mark;
else else
@ -598,9 +597,11 @@ static inline int ip6gre_xmit_ipv6(struct sk_buff *skb, struct net_device *dev)
memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
dsfield = ipv6_get_dsfield(ipv6h);
if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS) if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
fl6.flowlabel |= (*(__be32 *) ipv6h & IPV6_TCLASS_MASK); dsfield = ipv6_get_dsfield(ipv6h);
else
dsfield = ip6_tclass(t->parms.flowinfo);
if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL) if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)
fl6.flowlabel |= ip6_flowlabel(ipv6h); fl6.flowlabel |= ip6_flowlabel(ipv6h);
if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK) if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)

View File

@ -1196,7 +1196,7 @@ route_lookup:
skb_push(skb, sizeof(struct ipv6hdr)); skb_push(skb, sizeof(struct ipv6hdr));
skb_reset_network_header(skb); skb_reset_network_header(skb);
ipv6h = ipv6_hdr(skb); ipv6h = ipv6_hdr(skb);
ip6_flow_hdr(ipv6h, INET_ECN_encapsulate(0, dsfield), ip6_flow_hdr(ipv6h, dsfield,
ip6_make_flowlabel(net, skb, fl6->flowlabel, true, fl6)); ip6_make_flowlabel(net, skb, fl6->flowlabel, true, fl6));
ipv6h->hop_limit = hop_limit; ipv6h->hop_limit = hop_limit;
ipv6h->nexthdr = proto; ipv6h->nexthdr = proto;
@ -1231,8 +1231,6 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
if (tproto != IPPROTO_IPIP && tproto != 0) if (tproto != IPPROTO_IPIP && tproto != 0)
return -1; return -1;
dsfield = ipv4_get_dsfield(iph);
if (t->parms.collect_md) { if (t->parms.collect_md) {
struct ip_tunnel_info *tun_info; struct ip_tunnel_info *tun_info;
const struct ip_tunnel_key *key; const struct ip_tunnel_key *key;
@ -1246,6 +1244,7 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
fl6.flowi6_proto = IPPROTO_IPIP; fl6.flowi6_proto = IPPROTO_IPIP;
fl6.daddr = key->u.ipv6.dst; fl6.daddr = key->u.ipv6.dst;
fl6.flowlabel = key->label; fl6.flowlabel = key->label;
dsfield = ip6_tclass(key->label);
} else { } else {
if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
encap_limit = t->parms.encap_limit; encap_limit = t->parms.encap_limit;
@ -1254,8 +1253,9 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
fl6.flowi6_proto = IPPROTO_IPIP; fl6.flowi6_proto = IPPROTO_IPIP;
if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS) if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
fl6.flowlabel |= htonl((__u32)iph->tos << IPV6_TCLASS_SHIFT) dsfield = ipv4_get_dsfield(iph);
& IPV6_TCLASS_MASK; else
dsfield = ip6_tclass(t->parms.flowinfo);
if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK) if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
fl6.flowi6_mark = skb->mark; fl6.flowi6_mark = skb->mark;
else else
@ -1267,6 +1267,8 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6)) if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6))
return -1; return -1;
dsfield = INET_ECN_encapsulate(dsfield, ipv4_get_dsfield(iph));
skb_set_inner_ipproto(skb, IPPROTO_IPIP); skb_set_inner_ipproto(skb, IPPROTO_IPIP);
err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu, err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
@ -1300,8 +1302,6 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
ip6_tnl_addr_conflict(t, ipv6h)) ip6_tnl_addr_conflict(t, ipv6h))
return -1; return -1;
dsfield = ipv6_get_dsfield(ipv6h);
if (t->parms.collect_md) { if (t->parms.collect_md) {
struct ip_tunnel_info *tun_info; struct ip_tunnel_info *tun_info;
const struct ip_tunnel_key *key; const struct ip_tunnel_key *key;
@ -1315,6 +1315,7 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
fl6.flowi6_proto = IPPROTO_IPV6; fl6.flowi6_proto = IPPROTO_IPV6;
fl6.daddr = key->u.ipv6.dst; fl6.daddr = key->u.ipv6.dst;
fl6.flowlabel = key->label; fl6.flowlabel = key->label;
dsfield = ip6_tclass(key->label);
} else { } else {
offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb)); offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
/* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */ /* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */
@ -1337,7 +1338,9 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
fl6.flowi6_proto = IPPROTO_IPV6; fl6.flowi6_proto = IPPROTO_IPV6;
if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS) if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
fl6.flowlabel |= (*(__be32 *)ipv6h & IPV6_TCLASS_MASK); dsfield = ipv6_get_dsfield(ipv6h);
else
dsfield = ip6_tclass(t->parms.flowinfo);
if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL) if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)
fl6.flowlabel |= ip6_flowlabel(ipv6h); fl6.flowlabel |= ip6_flowlabel(ipv6h);
if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK) if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
@ -1351,6 +1354,8 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6)) if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6))
return -1; return -1;
dsfield = INET_ECN_encapsulate(dsfield, ipv6_get_dsfield(ipv6h));
skb_set_inner_ipproto(skb, IPPROTO_IPV6); skb_set_inner_ipproto(skb, IPPROTO_IPV6);
err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu, err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,

View File

@ -3285,7 +3285,7 @@ static struct xfrm_policy *pfkey_compile_policy(struct sock *sk, int opt,
p += pol->sadb_x_policy_len*8; p += pol->sadb_x_policy_len*8;
sec_ctx = (struct sadb_x_sec_ctx *)p; sec_ctx = (struct sadb_x_sec_ctx *)p;
if (len < pol->sadb_x_policy_len*8 + if (len < pol->sadb_x_policy_len*8 +
sec_ctx->sadb_x_sec_len) { sec_ctx->sadb_x_sec_len*8) {
*dir = -EINVAL; *dir = -EINVAL;
goto out; goto out;
} }

View File

@ -311,6 +311,8 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen)
int rc = -EINVAL; int rc = -EINVAL;
dprintk("%s: binding %02X\n", __func__, addr->sllc_sap); dprintk("%s: binding %02X\n", __func__, addr->sllc_sap);
lock_sock(sk);
if (unlikely(!sock_flag(sk, SOCK_ZAPPED) || addrlen != sizeof(*addr))) if (unlikely(!sock_flag(sk, SOCK_ZAPPED) || addrlen != sizeof(*addr)))
goto out; goto out;
rc = -EAFNOSUPPORT; rc = -EAFNOSUPPORT;
@ -382,6 +384,7 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen)
out_put: out_put:
llc_sap_put(sap); llc_sap_put(sap);
out: out:
release_sock(sk);
return rc; return rc;
} }

View File

@ -2492,7 +2492,8 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
if (is_multicast_ether_addr(hdr->addr1)) { if (is_multicast_ether_addr(hdr->addr1)) {
mpp_addr = hdr->addr3; mpp_addr = hdr->addr3;
proxied_addr = mesh_hdr->eaddr1; proxied_addr = mesh_hdr->eaddr1;
} else if (mesh_hdr->flags & MESH_FLAGS_AE_A5_A6) { } else if ((mesh_hdr->flags & MESH_FLAGS_AE) ==
MESH_FLAGS_AE_A5_A6) {
/* has_a4 already checked in ieee80211_rx_mesh_check */ /* has_a4 already checked in ieee80211_rx_mesh_check */
mpp_addr = hdr->addr4; mpp_addr = hdr->addr4;
proxied_addr = mesh_hdr->eaddr2; proxied_addr = mesh_hdr->eaddr2;

View File

@ -1176,7 +1176,9 @@ void sctp_assoc_update(struct sctp_association *asoc,
asoc->ctsn_ack_point = asoc->next_tsn - 1; asoc->ctsn_ack_point = asoc->next_tsn - 1;
asoc->adv_peer_ack_point = asoc->ctsn_ack_point; asoc->adv_peer_ack_point = asoc->ctsn_ack_point;
if (!asoc->stream) {
if (sctp_state(asoc, COOKIE_WAIT)) {
sctp_stream_free(asoc->stream);
asoc->stream = new->stream; asoc->stream = new->stream;
new->stream = NULL; new->stream = NULL;
} }

View File

@ -473,15 +473,14 @@ struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *skb,
struct sctp_association **app, struct sctp_association **app,
struct sctp_transport **tpp) struct sctp_transport **tpp)
{ {
struct sctp_init_chunk *chunkhdr, _chunkhdr;
union sctp_addr saddr; union sctp_addr saddr;
union sctp_addr daddr; union sctp_addr daddr;
struct sctp_af *af; struct sctp_af *af;
struct sock *sk = NULL; struct sock *sk = NULL;
struct sctp_association *asoc; struct sctp_association *asoc;
struct sctp_transport *transport = NULL; struct sctp_transport *transport = NULL;
struct sctp_init_chunk *chunkhdr;
__u32 vtag = ntohl(sctphdr->vtag); __u32 vtag = ntohl(sctphdr->vtag);
int len = skb->len - ((void *)sctphdr - (void *)skb->data);
*app = NULL; *tpp = NULL; *app = NULL; *tpp = NULL;
@ -516,13 +515,16 @@ struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *skb,
* discard the packet. * discard the packet.
*/ */
if (vtag == 0) { if (vtag == 0) {
chunkhdr = (void *)sctphdr + sizeof(struct sctphdr); /* chunk header + first 4 octects of init header */
if (len < sizeof(struct sctphdr) + sizeof(sctp_chunkhdr_t) chunkhdr = skb_header_pointer(skb, skb_transport_offset(skb) +
+ sizeof(__be32) || sizeof(struct sctphdr),
sizeof(struct sctp_chunkhdr) +
sizeof(__be32), &_chunkhdr);
if (!chunkhdr ||
chunkhdr->chunk_hdr.type != SCTP_CID_INIT || chunkhdr->chunk_hdr.type != SCTP_CID_INIT ||
ntohl(chunkhdr->init_hdr.init_tag) != asoc->c.my_vtag) { ntohl(chunkhdr->init_hdr.init_tag) != asoc->c.my_vtag)
goto out; goto out;
}
} else if (vtag != asoc->c.peer_vtag) { } else if (vtag != asoc->c.peer_vtag) {
goto out; goto out;
} }

View File

@ -2454,16 +2454,11 @@ int sctp_process_init(struct sctp_association *asoc, struct sctp_chunk *chunk,
* stream sequence number shall be set to 0. * stream sequence number shall be set to 0.
*/ */
/* Allocate storage for the negotiated streams if it is not a temporary
* association.
*/
if (!asoc->temp) {
if (sctp_stream_init(asoc, gfp)) if (sctp_stream_init(asoc, gfp))
goto clean_up; goto clean_up;
if (sctp_assoc_set_id(asoc, gfp)) if (!asoc->temp && sctp_assoc_set_id(asoc, gfp))
goto clean_up; goto clean_up;
}
/* ADDIP Section 4.1 ASCONF Chunk Procedures /* ADDIP Section 4.1 ASCONF Chunk Procedures
* *

View File

@ -2088,6 +2088,9 @@ sctp_disposition_t sctp_sf_do_5_2_4_dupcook(struct net *net,
} }
} }
/* Set temp so that it won't be added into hashtable */
new_asoc->temp = 1;
/* Compare the tie_tag in cookie with the verification tag of /* Compare the tie_tag in cookie with the verification tag of
* current association. * current association.
*/ */

View File

@ -322,9 +322,9 @@ cfg80211_find_sched_scan_req(struct cfg80211_registered_device *rdev, u64 reqid)
{ {
struct cfg80211_sched_scan_request *pos; struct cfg80211_sched_scan_request *pos;
ASSERT_RTNL(); WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
list_for_each_entry(pos, &rdev->sched_scan_req_list, list) { list_for_each_entry_rcu(pos, &rdev->sched_scan_req_list, list) {
if (pos->reqid == reqid) if (pos->reqid == reqid)
return pos; return pos;
} }
@ -398,13 +398,13 @@ void cfg80211_sched_scan_results(struct wiphy *wiphy, u64 reqid)
trace_cfg80211_sched_scan_results(wiphy, reqid); trace_cfg80211_sched_scan_results(wiphy, reqid);
/* ignore if we're not scanning */ /* ignore if we're not scanning */
rtnl_lock(); rcu_read_lock();
request = cfg80211_find_sched_scan_req(rdev, reqid); request = cfg80211_find_sched_scan_req(rdev, reqid);
if (request) { if (request) {
request->report_results = true; request->report_results = true;
queue_work(cfg80211_wq, &rdev->sched_scan_res_wk); queue_work(cfg80211_wq, &rdev->sched_scan_res_wk);
} }
rtnl_unlock(); rcu_read_unlock();
} }
EXPORT_SYMBOL(cfg80211_sched_scan_results); EXPORT_SYMBOL(cfg80211_sched_scan_results);

View File

@ -454,6 +454,8 @@ int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr,
if (iftype == NL80211_IFTYPE_MESH_POINT) if (iftype == NL80211_IFTYPE_MESH_POINT)
skb_copy_bits(skb, hdrlen, &mesh_flags, 1); skb_copy_bits(skb, hdrlen, &mesh_flags, 1);
mesh_flags &= MESH_FLAGS_AE;
switch (hdr->frame_control & switch (hdr->frame_control &
cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) { cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) {
case cpu_to_le16(IEEE80211_FCTL_TODS): case cpu_to_le16(IEEE80211_FCTL_TODS):
@ -469,9 +471,9 @@ int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr,
iftype != NL80211_IFTYPE_STATION)) iftype != NL80211_IFTYPE_STATION))
return -1; return -1;
if (iftype == NL80211_IFTYPE_MESH_POINT) { if (iftype == NL80211_IFTYPE_MESH_POINT) {
if (mesh_flags & MESH_FLAGS_AE_A4) if (mesh_flags == MESH_FLAGS_AE_A4)
return -1; return -1;
if (mesh_flags & MESH_FLAGS_AE_A5_A6) { if (mesh_flags == MESH_FLAGS_AE_A5_A6) {
skb_copy_bits(skb, hdrlen + skb_copy_bits(skb, hdrlen +
offsetof(struct ieee80211s_hdr, eaddr1), offsetof(struct ieee80211s_hdr, eaddr1),
tmp.h_dest, 2 * ETH_ALEN); tmp.h_dest, 2 * ETH_ALEN);
@ -487,9 +489,9 @@ int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr,
ether_addr_equal(tmp.h_source, addr))) ether_addr_equal(tmp.h_source, addr)))
return -1; return -1;
if (iftype == NL80211_IFTYPE_MESH_POINT) { if (iftype == NL80211_IFTYPE_MESH_POINT) {
if (mesh_flags & MESH_FLAGS_AE_A5_A6) if (mesh_flags == MESH_FLAGS_AE_A5_A6)
return -1; return -1;
if (mesh_flags & MESH_FLAGS_AE_A4) if (mesh_flags == MESH_FLAGS_AE_A4)
skb_copy_bits(skb, hdrlen + skb_copy_bits(skb, hdrlen +
offsetof(struct ieee80211s_hdr, eaddr1), offsetof(struct ieee80211s_hdr, eaddr1),
tmp.h_source, ETH_ALEN); tmp.h_source, ETH_ALEN);

View File

@ -170,7 +170,7 @@ static int xfrm_dev_feat_change(struct net_device *dev)
static int xfrm_dev_down(struct net_device *dev) static int xfrm_dev_down(struct net_device *dev)
{ {
if (dev->hw_features & NETIF_F_HW_ESP) if (dev->features & NETIF_F_HW_ESP)
xfrm_dev_state_flush(dev_net(dev), dev, true); xfrm_dev_state_flush(dev_net(dev), dev, true);
xfrm_garbage_collect(dev_net(dev)); xfrm_garbage_collect(dev_net(dev));

View File

@ -1797,43 +1797,6 @@ free_dst:
goto out; goto out;
} }
#ifdef CONFIG_XFRM_SUB_POLICY
static int xfrm_dst_alloc_copy(void **target, const void *src, int size)
{
if (!*target) {
*target = kmalloc(size, GFP_ATOMIC);
if (!*target)
return -ENOMEM;
}
memcpy(*target, src, size);
return 0;
}
#endif
static int xfrm_dst_update_parent(struct dst_entry *dst,
const struct xfrm_selector *sel)
{
#ifdef CONFIG_XFRM_SUB_POLICY
struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
return xfrm_dst_alloc_copy((void **)&(xdst->partner),
sel, sizeof(*sel));
#else
return 0;
#endif
}
static int xfrm_dst_update_origin(struct dst_entry *dst,
const struct flowi *fl)
{
#ifdef CONFIG_XFRM_SUB_POLICY
struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
return xfrm_dst_alloc_copy((void **)&(xdst->origin), fl, sizeof(*fl));
#else
return 0;
#endif
}
static int xfrm_expand_policies(const struct flowi *fl, u16 family, static int xfrm_expand_policies(const struct flowi *fl, u16 family,
struct xfrm_policy **pols, struct xfrm_policy **pols,
int *num_pols, int *num_xfrms) int *num_pols, int *num_xfrms)
@ -1905,16 +1868,6 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
xdst = (struct xfrm_dst *)dst; xdst = (struct xfrm_dst *)dst;
xdst->num_xfrms = err; xdst->num_xfrms = err;
if (num_pols > 1)
err = xfrm_dst_update_parent(dst, &pols[1]->selector);
else
err = xfrm_dst_update_origin(dst, fl);
if (unlikely(err)) {
dst_free(dst);
XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLECHECKERROR);
return ERR_PTR(err);
}
xdst->num_pols = num_pols; xdst->num_pols = num_pols;
memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols); memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
xdst->policy_genid = atomic_read(&pols[0]->genid); xdst->policy_genid = atomic_read(&pols[0]->genid);

View File

@ -1383,6 +1383,8 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig)
x->curlft.add_time = orig->curlft.add_time; x->curlft.add_time = orig->curlft.add_time;
x->km.state = orig->km.state; x->km.state = orig->km.state;
x->km.seq = orig->km.seq; x->km.seq = orig->km.seq;
x->replay = orig->replay;
x->preplay = orig->preplay;
return x; return x;

View File

@ -208,6 +208,16 @@
.off = OFF, \ .off = OFF, \
.imm = IMM }) .imm = IMM })
/* Unconditional jumps, goto pc + off16 */
#define BPF_JMP_A(OFF) \
((struct bpf_insn) { \
.code = BPF_JMP | BPF_JA, \
.dst_reg = 0, \
.src_reg = 0, \
.off = OFF, \
.imm = 0 })
/* Function call */ /* Function call */
#define BPF_EMIT_CALL(FUNC) \ #define BPF_EMIT_CALL(FUNC) \

View File

@ -49,6 +49,7 @@
#define MAX_NR_MAPS 4 #define MAX_NR_MAPS 4
#define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS (1 << 0) #define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS (1 << 0)
#define F_LOAD_WITH_STRICT_ALIGNMENT (1 << 1)
struct bpf_test { struct bpf_test {
const char *descr; const char *descr;
@ -2614,6 +2615,30 @@ static struct bpf_test tests[] = {
.result = REJECT, .result = REJECT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS, .prog_type = BPF_PROG_TYPE_SCHED_CLS,
}, },
{
"direct packet access: test17 (pruning, alignment)",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
offsetof(struct __sk_buff, mark)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 14),
BPF_JMP_IMM(BPF_JGT, BPF_REG_7, 1, 4),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, -4),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
BPF_JMP_A(-6),
},
.errstr = "misaligned packet access off 2+15+-4 size 4",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
},
{ {
"helper access to packet: test1, valid packet_ptr range", "helper access to packet: test1, valid packet_ptr range",
.insns = { .insns = {
@ -3340,6 +3365,70 @@ static struct bpf_test tests[] = {
.result = ACCEPT, .result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS .prog_type = BPF_PROG_TYPE_SCHED_CLS
}, },
{
"alu ops on ptr_to_map_value_or_null, 1",
.insns = {
BPF_MOV64_IMM(BPF_REG_1, 10),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 2),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 4 },
.errstr = "R4 invalid mem access",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS
},
{
"alu ops on ptr_to_map_value_or_null, 2",
.insns = {
BPF_MOV64_IMM(BPF_REG_1, 10),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
BPF_ALU64_IMM(BPF_AND, BPF_REG_4, -1),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 4 },
.errstr = "R4 invalid mem access",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS
},
{
"alu ops on ptr_to_map_value_or_null, 3",
.insns = {
BPF_MOV64_IMM(BPF_REG_1, 10),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
BPF_ALU64_IMM(BPF_LSH, BPF_REG_4, 1),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 4 },
.errstr = "R4 invalid mem access",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS
},
{ {
"invalid memory access with multiple map_lookup_elem calls", "invalid memory access with multiple map_lookup_elem calls",
.insns = { .insns = {
@ -4937,7 +5026,149 @@ static struct bpf_test tests[] = {
.fixup_map_in_map = { 3 }, .fixup_map_in_map = { 3 },
.errstr = "R1 type=map_value_or_null expected=map_ptr", .errstr = "R1 type=map_value_or_null expected=map_ptr",
.result = REJECT, .result = REJECT,
} },
{
"ld_abs: check calling conv, r1",
.insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_MOV64_IMM(BPF_REG_1, 0),
BPF_LD_ABS(BPF_W, -0x200000),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(),
},
.errstr = "R1 !read_ok",
.result = REJECT,
},
{
"ld_abs: check calling conv, r2",
.insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_LD_ABS(BPF_W, -0x200000),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_EXIT_INSN(),
},
.errstr = "R2 !read_ok",
.result = REJECT,
},
{
"ld_abs: check calling conv, r3",
.insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_LD_ABS(BPF_W, -0x200000),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
BPF_EXIT_INSN(),
},
.errstr = "R3 !read_ok",
.result = REJECT,
},
{
"ld_abs: check calling conv, r4",
.insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_MOV64_IMM(BPF_REG_4, 0),
BPF_LD_ABS(BPF_W, -0x200000),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
BPF_EXIT_INSN(),
},
.errstr = "R4 !read_ok",
.result = REJECT,
},
{
"ld_abs: check calling conv, r5",
.insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_MOV64_IMM(BPF_REG_5, 0),
BPF_LD_ABS(BPF_W, -0x200000),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
BPF_EXIT_INSN(),
},
.errstr = "R5 !read_ok",
.result = REJECT,
},
{
"ld_abs: check calling conv, r7",
.insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_MOV64_IMM(BPF_REG_7, 0),
BPF_LD_ABS(BPF_W, -0x200000),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
{
"ld_ind: check calling conv, r1",
.insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_MOV64_IMM(BPF_REG_1, 1),
BPF_LD_IND(BPF_W, BPF_REG_1, -0x200000),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(),
},
.errstr = "R1 !read_ok",
.result = REJECT,
},
{
"ld_ind: check calling conv, r2",
.insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_MOV64_IMM(BPF_REG_2, 1),
BPF_LD_IND(BPF_W, BPF_REG_2, -0x200000),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_EXIT_INSN(),
},
.errstr = "R2 !read_ok",
.result = REJECT,
},
{
"ld_ind: check calling conv, r3",
.insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_MOV64_IMM(BPF_REG_3, 1),
BPF_LD_IND(BPF_W, BPF_REG_3, -0x200000),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
BPF_EXIT_INSN(),
},
.errstr = "R3 !read_ok",
.result = REJECT,
},
{
"ld_ind: check calling conv, r4",
.insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_MOV64_IMM(BPF_REG_4, 1),
BPF_LD_IND(BPF_W, BPF_REG_4, -0x200000),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
BPF_EXIT_INSN(),
},
.errstr = "R4 !read_ok",
.result = REJECT,
},
{
"ld_ind: check calling conv, r5",
.insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_MOV64_IMM(BPF_REG_5, 1),
BPF_LD_IND(BPF_W, BPF_REG_5, -0x200000),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
BPF_EXIT_INSN(),
},
.errstr = "R5 !read_ok",
.result = REJECT,
},
{
"ld_ind: check calling conv, r7",
.insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_MOV64_IMM(BPF_REG_7, 1),
BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
}; };
static int probe_filter_length(const struct bpf_insn *fp) static int probe_filter_length(const struct bpf_insn *fp)
@ -5059,9 +5290,9 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
do_test_fixup(test, prog, map_fds); do_test_fixup(test, prog, map_fds);
fd_prog = bpf_load_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER, fd_prog = bpf_verify_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER,
prog, prog_len, "GPL", 0, bpf_vlog, prog, prog_len, test->flags & F_LOAD_WITH_STRICT_ALIGNMENT,
sizeof(bpf_vlog)); "GPL", 0, bpf_vlog, sizeof(bpf_vlog));
expected_ret = unpriv && test->result_unpriv != UNDEF ? expected_ret = unpriv && test->result_unpriv != UNDEF ?
test->result_unpriv : test->result; test->result_unpriv : test->result;