Merge branch 'fix-use-after-free-bugs-in-skb-list-processing'
Edward Cree says: ==================== fix use-after-free bugs in skb list processing A couple of bugs in skb list handling were spotted by Dan Carpenter, with the help of Smatch; following up on them I found a couple more similar cases. This series fixes them by changing the relevant loops to use the dequeue-enqueue model (rather than in-place list modification). v3: fixed another similar bug in __netif_receive_skb_list_core(). v2: dropped patch #3 (new list.h helper), per DaveM's request. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
863f4fdb71
|
@ -294,12 +294,16 @@ NF_HOOK_LIST(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk,
|
||||||
int (*okfn)(struct net *, struct sock *, struct sk_buff *))
|
int (*okfn)(struct net *, struct sock *, struct sk_buff *))
|
||||||
{
|
{
|
||||||
struct sk_buff *skb, *next;
|
struct sk_buff *skb, *next;
|
||||||
|
struct list_head sublist;
|
||||||
|
|
||||||
|
INIT_LIST_HEAD(&sublist);
|
||||||
list_for_each_entry_safe(skb, next, head, list) {
|
list_for_each_entry_safe(skb, next, head, list) {
|
||||||
int ret = nf_hook(pf, hook, net, sk, skb, in, out, okfn);
|
|
||||||
if (ret != 1)
|
|
||||||
list_del(&skb->list);
|
list_del(&skb->list);
|
||||||
|
if (nf_hook(pf, hook, net, sk, skb, in, out, okfn) == 1)
|
||||||
|
list_add_tail(&skb->list, &sublist);
|
||||||
}
|
}
|
||||||
|
/* Put passed packets back on main list */
|
||||||
|
list_splice(&sublist, head);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Call setsockopt() */
|
/* Call setsockopt() */
|
||||||
|
|
|
@ -4830,23 +4830,28 @@ static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemallo
|
||||||
struct list_head sublist;
|
struct list_head sublist;
|
||||||
struct sk_buff *skb, *next;
|
struct sk_buff *skb, *next;
|
||||||
|
|
||||||
|
INIT_LIST_HEAD(&sublist);
|
||||||
list_for_each_entry_safe(skb, next, head, list) {
|
list_for_each_entry_safe(skb, next, head, list) {
|
||||||
struct net_device *orig_dev = skb->dev;
|
struct net_device *orig_dev = skb->dev;
|
||||||
struct packet_type *pt_prev = NULL;
|
struct packet_type *pt_prev = NULL;
|
||||||
|
|
||||||
|
list_del(&skb->list);
|
||||||
__netif_receive_skb_core(skb, pfmemalloc, &pt_prev);
|
__netif_receive_skb_core(skb, pfmemalloc, &pt_prev);
|
||||||
|
if (!pt_prev)
|
||||||
|
continue;
|
||||||
if (pt_curr != pt_prev || od_curr != orig_dev) {
|
if (pt_curr != pt_prev || od_curr != orig_dev) {
|
||||||
/* dispatch old sublist */
|
/* dispatch old sublist */
|
||||||
list_cut_before(&sublist, head, &skb->list);
|
|
||||||
__netif_receive_skb_list_ptype(&sublist, pt_curr, od_curr);
|
__netif_receive_skb_list_ptype(&sublist, pt_curr, od_curr);
|
||||||
/* start new sublist */
|
/* start new sublist */
|
||||||
|
INIT_LIST_HEAD(&sublist);
|
||||||
pt_curr = pt_prev;
|
pt_curr = pt_prev;
|
||||||
od_curr = orig_dev;
|
od_curr = orig_dev;
|
||||||
}
|
}
|
||||||
|
list_add_tail(&skb->list, &sublist);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* dispatch final sublist */
|
/* dispatch final sublist */
|
||||||
__netif_receive_skb_list_ptype(head, pt_curr, od_curr);
|
__netif_receive_skb_list_ptype(&sublist, pt_curr, od_curr);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __netif_receive_skb(struct sk_buff *skb)
|
static int __netif_receive_skb(struct sk_buff *skb)
|
||||||
|
@ -4982,25 +4987,30 @@ static void netif_receive_skb_list_internal(struct list_head *head)
|
||||||
{
|
{
|
||||||
struct bpf_prog *xdp_prog = NULL;
|
struct bpf_prog *xdp_prog = NULL;
|
||||||
struct sk_buff *skb, *next;
|
struct sk_buff *skb, *next;
|
||||||
|
struct list_head sublist;
|
||||||
|
|
||||||
|
INIT_LIST_HEAD(&sublist);
|
||||||
list_for_each_entry_safe(skb, next, head, list) {
|
list_for_each_entry_safe(skb, next, head, list) {
|
||||||
net_timestamp_check(netdev_tstamp_prequeue, skb);
|
net_timestamp_check(netdev_tstamp_prequeue, skb);
|
||||||
if (skb_defer_rx_timestamp(skb))
|
|
||||||
/* Handled, remove from list */
|
|
||||||
list_del(&skb->list);
|
list_del(&skb->list);
|
||||||
|
if (!skb_defer_rx_timestamp(skb))
|
||||||
|
list_add_tail(&skb->list, &sublist);
|
||||||
}
|
}
|
||||||
|
list_splice_init(&sublist, head);
|
||||||
|
|
||||||
if (static_branch_unlikely(&generic_xdp_needed_key)) {
|
if (static_branch_unlikely(&generic_xdp_needed_key)) {
|
||||||
preempt_disable();
|
preempt_disable();
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
list_for_each_entry_safe(skb, next, head, list) {
|
list_for_each_entry_safe(skb, next, head, list) {
|
||||||
xdp_prog = rcu_dereference(skb->dev->xdp_prog);
|
xdp_prog = rcu_dereference(skb->dev->xdp_prog);
|
||||||
if (do_xdp_generic(xdp_prog, skb) != XDP_PASS)
|
|
||||||
/* Dropped, remove from list */
|
|
||||||
list_del(&skb->list);
|
list_del(&skb->list);
|
||||||
|
if (do_xdp_generic(xdp_prog, skb) == XDP_PASS)
|
||||||
|
list_add_tail(&skb->list, &sublist);
|
||||||
}
|
}
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
preempt_enable();
|
preempt_enable();
|
||||||
|
/* Put passed packets back on main list */
|
||||||
|
list_splice_init(&sublist, head);
|
||||||
}
|
}
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
|
@ -5011,9 +5021,9 @@ static void netif_receive_skb_list_internal(struct list_head *head)
|
||||||
int cpu = get_rps_cpu(skb->dev, skb, &rflow);
|
int cpu = get_rps_cpu(skb->dev, skb, &rflow);
|
||||||
|
|
||||||
if (cpu >= 0) {
|
if (cpu >= 0) {
|
||||||
enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
|
/* Will be handled, remove from list */
|
||||||
/* Handled, remove from list */
|
|
||||||
list_del(&skb->list);
|
list_del(&skb->list);
|
||||||
|
enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue