net/mlx5e: Use flow keys dissector to parse packets for ARFS
The current ARFS code relies on certain fields to be set in the SKB
(e.g. transport_header) and extracts IP addresses and ports by custom
code that parses the packet. The necessary SKB fields, however, are not
always set at that point, which leads to an out-of-bounds access. Use
skb_flow_dissect_flow_keys() to get the necessary information reliably,
fix the out-of-bounds access and reuse the code.
Fixes: 18c908e477
("net/mlx5e: Add accelerated RFS support")
Signed-off-by: Maxim Mikityanskiy <maximmi@mellanox.com>
Reviewed-by: Tariq Toukan <tariqt@mellanox.com>
Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
This commit is contained in:
parent
f6649feb26
commit
405b93eb76
|
@ -437,12 +437,6 @@ arfs_hash_bucket(struct arfs_table *arfs_t, __be16 src_port,
|
||||||
return &arfs_t->rules_hash[bucket_idx];
|
return &arfs_t->rules_hash[bucket_idx];
|
||||||
}
|
}
|
||||||
|
|
||||||
static u8 arfs_get_ip_proto(const struct sk_buff *skb)
|
|
||||||
{
|
|
||||||
return (skb->protocol == htons(ETH_P_IP)) ?
|
|
||||||
ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct arfs_table *arfs_get_table(struct mlx5e_arfs_tables *arfs,
|
static struct arfs_table *arfs_get_table(struct mlx5e_arfs_tables *arfs,
|
||||||
u8 ip_proto, __be16 etype)
|
u8 ip_proto, __be16 etype)
|
||||||
{
|
{
|
||||||
|
@ -602,31 +596,9 @@ out:
|
||||||
arfs_may_expire_flow(priv);
|
arfs_may_expire_flow(priv);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* return L4 destination port from ip4/6 packets */
|
|
||||||
static __be16 arfs_get_dst_port(const struct sk_buff *skb)
|
|
||||||
{
|
|
||||||
char *transport_header;
|
|
||||||
|
|
||||||
transport_header = skb_transport_header(skb);
|
|
||||||
if (arfs_get_ip_proto(skb) == IPPROTO_TCP)
|
|
||||||
return ((struct tcphdr *)transport_header)->dest;
|
|
||||||
return ((struct udphdr *)transport_header)->dest;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* return L4 source port from ip4/6 packets */
|
|
||||||
static __be16 arfs_get_src_port(const struct sk_buff *skb)
|
|
||||||
{
|
|
||||||
char *transport_header;
|
|
||||||
|
|
||||||
transport_header = skb_transport_header(skb);
|
|
||||||
if (arfs_get_ip_proto(skb) == IPPROTO_TCP)
|
|
||||||
return ((struct tcphdr *)transport_header)->source;
|
|
||||||
return ((struct udphdr *)transport_header)->source;
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
|
static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
|
||||||
struct arfs_table *arfs_t,
|
struct arfs_table *arfs_t,
|
||||||
const struct sk_buff *skb,
|
const struct flow_keys *fk,
|
||||||
u16 rxq, u32 flow_id)
|
u16 rxq, u32 flow_id)
|
||||||
{
|
{
|
||||||
struct arfs_rule *rule;
|
struct arfs_rule *rule;
|
||||||
|
@ -641,19 +613,19 @@ static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
|
||||||
INIT_WORK(&rule->arfs_work, arfs_handle_work);
|
INIT_WORK(&rule->arfs_work, arfs_handle_work);
|
||||||
|
|
||||||
tuple = &rule->tuple;
|
tuple = &rule->tuple;
|
||||||
tuple->etype = skb->protocol;
|
tuple->etype = fk->basic.n_proto;
|
||||||
|
tuple->ip_proto = fk->basic.ip_proto;
|
||||||
if (tuple->etype == htons(ETH_P_IP)) {
|
if (tuple->etype == htons(ETH_P_IP)) {
|
||||||
tuple->src_ipv4 = ip_hdr(skb)->saddr;
|
tuple->src_ipv4 = fk->addrs.v4addrs.src;
|
||||||
tuple->dst_ipv4 = ip_hdr(skb)->daddr;
|
tuple->dst_ipv4 = fk->addrs.v4addrs.dst;
|
||||||
} else {
|
} else {
|
||||||
memcpy(&tuple->src_ipv6, &ipv6_hdr(skb)->saddr,
|
memcpy(&tuple->src_ipv6, &fk->addrs.v6addrs.src,
|
||||||
sizeof(struct in6_addr));
|
sizeof(struct in6_addr));
|
||||||
memcpy(&tuple->dst_ipv6, &ipv6_hdr(skb)->daddr,
|
memcpy(&tuple->dst_ipv6, &fk->addrs.v6addrs.dst,
|
||||||
sizeof(struct in6_addr));
|
sizeof(struct in6_addr));
|
||||||
}
|
}
|
||||||
tuple->ip_proto = arfs_get_ip_proto(skb);
|
tuple->src_port = fk->ports.src;
|
||||||
tuple->src_port = arfs_get_src_port(skb);
|
tuple->dst_port = fk->ports.dst;
|
||||||
tuple->dst_port = arfs_get_dst_port(skb);
|
|
||||||
|
|
||||||
rule->flow_id = flow_id;
|
rule->flow_id = flow_id;
|
||||||
rule->filter_id = priv->fs.arfs.last_filter_id++ % RPS_NO_FILTER;
|
rule->filter_id = priv->fs.arfs.last_filter_id++ % RPS_NO_FILTER;
|
||||||
|
@ -664,37 +636,33 @@ static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
|
||||||
return rule;
|
return rule;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool arfs_cmp_ips(struct arfs_tuple *tuple,
|
static bool arfs_cmp(const struct arfs_tuple *tuple, const struct flow_keys *fk)
|
||||||
const struct sk_buff *skb)
|
|
||||||
{
|
{
|
||||||
if (tuple->etype == htons(ETH_P_IP) &&
|
if (tuple->src_port != fk->ports.src || tuple->dst_port != fk->ports.dst)
|
||||||
tuple->src_ipv4 == ip_hdr(skb)->saddr &&
|
return false;
|
||||||
tuple->dst_ipv4 == ip_hdr(skb)->daddr)
|
if (tuple->etype != fk->basic.n_proto)
|
||||||
return true;
|
return false;
|
||||||
if (tuple->etype == htons(ETH_P_IPV6) &&
|
if (tuple->etype == htons(ETH_P_IP))
|
||||||
(!memcmp(&tuple->src_ipv6, &ipv6_hdr(skb)->saddr,
|
return tuple->src_ipv4 == fk->addrs.v4addrs.src &&
|
||||||
sizeof(struct in6_addr))) &&
|
tuple->dst_ipv4 == fk->addrs.v4addrs.dst;
|
||||||
(!memcmp(&tuple->dst_ipv6, &ipv6_hdr(skb)->daddr,
|
if (tuple->etype == htons(ETH_P_IPV6))
|
||||||
sizeof(struct in6_addr))))
|
return !memcmp(&tuple->src_ipv6, &fk->addrs.v6addrs.src,
|
||||||
return true;
|
sizeof(struct in6_addr)) &&
|
||||||
|
!memcmp(&tuple->dst_ipv6, &fk->addrs.v6addrs.dst,
|
||||||
|
sizeof(struct in6_addr));
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct arfs_rule *arfs_find_rule(struct arfs_table *arfs_t,
|
static struct arfs_rule *arfs_find_rule(struct arfs_table *arfs_t,
|
||||||
const struct sk_buff *skb)
|
const struct flow_keys *fk)
|
||||||
{
|
{
|
||||||
struct arfs_rule *arfs_rule;
|
struct arfs_rule *arfs_rule;
|
||||||
struct hlist_head *head;
|
struct hlist_head *head;
|
||||||
__be16 src_port = arfs_get_src_port(skb);
|
|
||||||
__be16 dst_port = arfs_get_dst_port(skb);
|
|
||||||
|
|
||||||
head = arfs_hash_bucket(arfs_t, src_port, dst_port);
|
head = arfs_hash_bucket(arfs_t, fk->ports.src, fk->ports.dst);
|
||||||
hlist_for_each_entry(arfs_rule, head, hlist) {
|
hlist_for_each_entry(arfs_rule, head, hlist) {
|
||||||
if (arfs_rule->tuple.src_port == src_port &&
|
if (arfs_cmp(&arfs_rule->tuple, fk))
|
||||||
arfs_rule->tuple.dst_port == dst_port &&
|
|
||||||
arfs_cmp_ips(&arfs_rule->tuple, skb)) {
|
|
||||||
return arfs_rule;
|
return arfs_rule;
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -707,20 +675,24 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
|
||||||
struct mlx5e_arfs_tables *arfs = &priv->fs.arfs;
|
struct mlx5e_arfs_tables *arfs = &priv->fs.arfs;
|
||||||
struct arfs_table *arfs_t;
|
struct arfs_table *arfs_t;
|
||||||
struct arfs_rule *arfs_rule;
|
struct arfs_rule *arfs_rule;
|
||||||
|
struct flow_keys fk;
|
||||||
|
|
||||||
if (skb->protocol != htons(ETH_P_IP) &&
|
if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
|
||||||
skb->protocol != htons(ETH_P_IPV6))
|
return -EPROTONOSUPPORT;
|
||||||
|
|
||||||
|
if (fk.basic.n_proto != htons(ETH_P_IP) &&
|
||||||
|
fk.basic.n_proto != htons(ETH_P_IPV6))
|
||||||
return -EPROTONOSUPPORT;
|
return -EPROTONOSUPPORT;
|
||||||
|
|
||||||
if (skb->encapsulation)
|
if (skb->encapsulation)
|
||||||
return -EPROTONOSUPPORT;
|
return -EPROTONOSUPPORT;
|
||||||
|
|
||||||
arfs_t = arfs_get_table(arfs, arfs_get_ip_proto(skb), skb->protocol);
|
arfs_t = arfs_get_table(arfs, fk.basic.ip_proto, fk.basic.n_proto);
|
||||||
if (!arfs_t)
|
if (!arfs_t)
|
||||||
return -EPROTONOSUPPORT;
|
return -EPROTONOSUPPORT;
|
||||||
|
|
||||||
spin_lock_bh(&arfs->arfs_lock);
|
spin_lock_bh(&arfs->arfs_lock);
|
||||||
arfs_rule = arfs_find_rule(arfs_t, skb);
|
arfs_rule = arfs_find_rule(arfs_t, &fk);
|
||||||
if (arfs_rule) {
|
if (arfs_rule) {
|
||||||
if (arfs_rule->rxq == rxq_index) {
|
if (arfs_rule->rxq == rxq_index) {
|
||||||
spin_unlock_bh(&arfs->arfs_lock);
|
spin_unlock_bh(&arfs->arfs_lock);
|
||||||
|
@ -728,8 +700,7 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
|
||||||
}
|
}
|
||||||
arfs_rule->rxq = rxq_index;
|
arfs_rule->rxq = rxq_index;
|
||||||
} else {
|
} else {
|
||||||
arfs_rule = arfs_alloc_rule(priv, arfs_t, skb,
|
arfs_rule = arfs_alloc_rule(priv, arfs_t, &fk, rxq_index, flow_id);
|
||||||
rxq_index, flow_id);
|
|
||||||
if (!arfs_rule) {
|
if (!arfs_rule) {
|
||||||
spin_unlock_bh(&arfs->arfs_lock);
|
spin_unlock_bh(&arfs->arfs_lock);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
Loading…
Reference in New Issue