Merge branch 'skb_list_walk_safe-refactoring'
Jason A. Donenfeld says: ==================== skb_list_walk_safe refactoring for net/*'s skb_gso_segment usage This patchset adjusts all return values of skb_gso_segment in net/* to use the new skb_list_walk_safe helper. First we fix a minor bug in the helper macro that didn't come up in the last patchset's uses. Then we adjust several cases throughout net/. The xfrm changes were a bit hairy, but doable. Reading and thinking about the code in mac80211 indicates a memory leak, which the commit addresses. All the other cases were pretty trivial. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
2b133adfcf
|
@ -1479,9 +1479,9 @@ static inline void skb_mark_not_on_list(struct sk_buff *skb)
|
|||
}
|
||||
|
||||
/* Iterate through singly-linked GSO fragments of an skb. */
|
||||
#define skb_list_walk_safe(first, skb, next) \
|
||||
for ((skb) = (first), (next) = (skb) ? (skb)->next : NULL; (skb); \
|
||||
(skb) = (next), (next) = (skb) ? (skb)->next : NULL)
|
||||
#define skb_list_walk_safe(first, skb, next_skb) \
|
||||
for ((skb) = (first), (next_skb) = (skb) ? (skb)->next : NULL; (skb); \
|
||||
(skb) = (next_skb), (next_skb) = (skb) ? (skb)->next : NULL)
|
||||
|
||||
static inline void skb_list_del_init(struct sk_buff *skb)
|
||||
{
|
||||
|
|
|
@ -240,8 +240,8 @@ static int ip_finish_output2(struct net *net, struct sock *sk, struct sk_buff *s
|
|||
static int ip_finish_output_gso(struct net *net, struct sock *sk,
|
||||
struct sk_buff *skb, unsigned int mtu)
|
||||
{
|
||||
struct sk_buff *segs, *nskb;
|
||||
netdev_features_t features;
|
||||
struct sk_buff *segs;
|
||||
int ret = 0;
|
||||
|
||||
/* common case: seglen is <= mtu
|
||||
|
@ -272,8 +272,7 @@ static int ip_finish_output_gso(struct net *net, struct sock *sk,
|
|||
|
||||
consume_skb(skb);
|
||||
|
||||
do {
|
||||
struct sk_buff *nskb = segs->next;
|
||||
skb_list_walk_safe(segs, segs, nskb) {
|
||||
int err;
|
||||
|
||||
skb_mark_not_on_list(segs);
|
||||
|
@ -281,8 +280,7 @@ static int ip_finish_output_gso(struct net *net, struct sock *sk,
|
|||
|
||||
if (err && ret == 0)
|
||||
ret = err;
|
||||
segs = nskb;
|
||||
} while (segs);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -2104,8 +2104,7 @@ static int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
|
|||
BUILD_BUG_ON(sizeof(struct udp_skb_cb) > SKB_SGO_CB_OFFSET);
|
||||
__skb_push(skb, -skb_mac_offset(skb));
|
||||
segs = udp_rcv_segment(sk, skb, true);
|
||||
for (skb = segs; skb; skb = next) {
|
||||
next = skb->next;
|
||||
skb_list_walk_safe(segs, skb, next) {
|
||||
__skb_pull(skb, skb_transport_offset(skb));
|
||||
ret = udp_queue_rcv_one_skb(sk, skb);
|
||||
if (ret > 0)
|
||||
|
|
|
@ -690,8 +690,7 @@ static int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
|
|||
|
||||
__skb_push(skb, -skb_mac_offset(skb));
|
||||
segs = udp_rcv_segment(sk, skb, false);
|
||||
for (skb = segs; skb; skb = next) {
|
||||
next = skb->next;
|
||||
skb_list_walk_safe(segs, skb, next) {
|
||||
__skb_pull(skb, skb_transport_offset(skb));
|
||||
|
||||
ret = udpv6_queue_rcv_one_skb(sk, skb);
|
||||
|
|
|
@ -3949,18 +3949,15 @@ void __ieee80211_subif_start_xmit(struct sk_buff *skb,
|
|||
}
|
||||
}
|
||||
|
||||
next = skb;
|
||||
while (next) {
|
||||
skb = next;
|
||||
next = skb->next;
|
||||
|
||||
skb->prev = NULL;
|
||||
skb->next = NULL;
|
||||
skb_list_walk_safe(skb, skb, next) {
|
||||
skb_mark_not_on_list(skb);
|
||||
|
||||
skb = ieee80211_build_hdr(sdata, skb, info_flags,
|
||||
sta, ctrl_flags);
|
||||
if (IS_ERR(skb))
|
||||
if (IS_ERR(skb)) {
|
||||
kfree_skb_list(next);
|
||||
goto out;
|
||||
}
|
||||
|
||||
ieee80211_tx_stats(dev, skb->len);
|
||||
|
||||
|
|
|
@ -778,7 +778,7 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
|
|||
{
|
||||
unsigned int queued;
|
||||
struct nfqnl_instance *queue;
|
||||
struct sk_buff *skb, *segs;
|
||||
struct sk_buff *skb, *segs, *nskb;
|
||||
int err = -ENOBUFS;
|
||||
struct net *net = entry->state.net;
|
||||
struct nfnl_queue_net *q = nfnl_queue_pernet(net);
|
||||
|
@ -815,8 +815,7 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
|
|||
goto out_err;
|
||||
queued = 0;
|
||||
err = 0;
|
||||
do {
|
||||
struct sk_buff *nskb = segs->next;
|
||||
skb_list_walk_safe(segs, segs, nskb) {
|
||||
if (err == 0)
|
||||
err = __nfqnl_enqueue_packet_gso(net, queue,
|
||||
segs, entry);
|
||||
|
@ -824,8 +823,7 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
|
|||
queued++;
|
||||
else
|
||||
kfree_skb(segs);
|
||||
segs = nskb;
|
||||
} while (segs);
|
||||
}
|
||||
|
||||
if (queued) {
|
||||
if (err) /* some segments are already queued */
|
||||
|
|
|
@ -321,8 +321,7 @@ static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb,
|
|||
}
|
||||
|
||||
/* Queue all of the segments. */
|
||||
skb = segs;
|
||||
do {
|
||||
skb_list_walk_safe(segs, skb, nskb) {
|
||||
if (gso_type & SKB_GSO_UDP && skb != segs)
|
||||
key = &later_key;
|
||||
|
||||
|
@ -330,17 +329,15 @@ static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb,
|
|||
if (err)
|
||||
break;
|
||||
|
||||
} while ((skb = skb->next));
|
||||
}
|
||||
|
||||
/* Free all of the segments. */
|
||||
skb = segs;
|
||||
do {
|
||||
nskb = skb->next;
|
||||
skb_list_walk_safe(segs, skb, nskb) {
|
||||
if (err)
|
||||
kfree_skb(skb);
|
||||
else
|
||||
consume_skb(skb);
|
||||
} while ((skb = nskb));
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
|
@ -1682,8 +1682,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
|||
if (IS_ERR_OR_NULL(segs))
|
||||
return qdisc_drop(skb, sch, to_free);
|
||||
|
||||
while (segs) {
|
||||
nskb = segs->next;
|
||||
skb_list_walk_safe(segs, segs, nskb) {
|
||||
skb_mark_not_on_list(segs);
|
||||
qdisc_skb_cb(segs)->pkt_len = segs->len;
|
||||
cobalt_set_enqueue_time(segs, now);
|
||||
|
@ -1696,7 +1695,6 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
|||
slen += segs->len;
|
||||
q->buffer_used += segs->truesize;
|
||||
b->packets++;
|
||||
segs = nskb;
|
||||
}
|
||||
|
||||
/* stats */
|
||||
|
|
|
@ -155,8 +155,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch,
|
|||
return qdisc_drop(skb, sch, to_free);
|
||||
|
||||
nb = 0;
|
||||
while (segs) {
|
||||
nskb = segs->next;
|
||||
skb_list_walk_safe(segs, segs, nskb) {
|
||||
skb_mark_not_on_list(segs);
|
||||
qdisc_skb_cb(segs)->pkt_len = segs->len;
|
||||
len += segs->len;
|
||||
|
@ -167,7 +166,6 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch,
|
|||
} else {
|
||||
nb++;
|
||||
}
|
||||
segs = nskb;
|
||||
}
|
||||
sch->q.qlen += nb;
|
||||
if (nb > 1)
|
||||
|
|
|
@ -78,7 +78,7 @@ struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t featur
|
|||
int err;
|
||||
unsigned long flags;
|
||||
struct xfrm_state *x;
|
||||
struct sk_buff *skb2;
|
||||
struct sk_buff *skb2, *nskb;
|
||||
struct softnet_data *sd;
|
||||
netdev_features_t esp_features = features;
|
||||
struct xfrm_offload *xo = xfrm_offload(skb);
|
||||
|
@ -148,11 +148,7 @@ struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t featur
|
|||
return skb;
|
||||
}
|
||||
|
||||
skb2 = skb;
|
||||
|
||||
do {
|
||||
struct sk_buff *nskb = skb2->next;
|
||||
|
||||
skb_list_walk_safe(skb, skb2, nskb) {
|
||||
esp_features |= skb->dev->gso_partial_features;
|
||||
skb_mark_not_on_list(skb2);
|
||||
|
||||
|
@ -176,14 +172,11 @@ struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t featur
|
|||
if (!skb)
|
||||
return NULL;
|
||||
|
||||
goto skip_push;
|
||||
continue;
|
||||
}
|
||||
|
||||
skb_push(skb2, skb2->data - skb_mac_header(skb2));
|
||||
|
||||
skip_push:
|
||||
skb2 = nskb;
|
||||
} while (skb2);
|
||||
}
|
||||
|
||||
return skb;
|
||||
}
|
||||
|
|
|
@ -533,7 +533,7 @@ static int xfrm_output2(struct net *net, struct sock *sk, struct sk_buff *skb)
|
|||
|
||||
static int xfrm_output_gso(struct net *net, struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
struct sk_buff *segs;
|
||||
struct sk_buff *segs, *nskb;
|
||||
|
||||
BUILD_BUG_ON(sizeof(*IPCB(skb)) > SKB_SGO_CB_OFFSET);
|
||||
BUILD_BUG_ON(sizeof(*IP6CB(skb)) > SKB_SGO_CB_OFFSET);
|
||||
|
@ -544,8 +544,7 @@ static int xfrm_output_gso(struct net *net, struct sock *sk, struct sk_buff *skb
|
|||
if (segs == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
do {
|
||||
struct sk_buff *nskb = segs->next;
|
||||
skb_list_walk_safe(segs, segs, nskb) {
|
||||
int err;
|
||||
|
||||
skb_mark_not_on_list(segs);
|
||||
|
@ -555,9 +554,7 @@ static int xfrm_output_gso(struct net *net, struct sock *sk, struct sk_buff *skb
|
|||
kfree_skb_list(nskb);
|
||||
return err;
|
||||
}
|
||||
|
||||
segs = nskb;
|
||||
} while (segs);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue