skbuff: introduce skb_gso_validate_mtu
skb_gso_network_seglen is not enough for checking fragment sizes if skb is using GSO_BY_FRAGS as we have to check frag per frag. This patch introduces skb_gso_validate_mtu, based on the former, which will wrap the use case inside it as all calls to skb_gso_network_seglen were to validate if it fits on a given TMU, and improve the check. Signed-off-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com> Tested-by: Xin Long <lucien.xin@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
3953c46c3a
commit
ae7ef81ef0
|
@ -2992,6 +2992,7 @@ void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len);
|
|||
int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen);
|
||||
void skb_scrub_packet(struct sk_buff *skb, bool xnet);
|
||||
unsigned int skb_gso_transport_seglen(const struct sk_buff *skb);
|
||||
bool skb_gso_validate_mtu(const struct sk_buff *skb, unsigned int mtu);
|
||||
struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
|
||||
struct sk_buff *skb_vlan_untag(struct sk_buff *skb);
|
||||
int skb_ensure_writable(struct sk_buff *skb, int write_len);
|
||||
|
|
|
@ -4392,6 +4392,37 @@ unsigned int skb_gso_transport_seglen(const struct sk_buff *skb)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(skb_gso_transport_seglen);
|
||||
|
||||
/**
|
||||
* skb_gso_validate_mtu - Return in case such skb fits a given MTU
|
||||
*
|
||||
* @skb: GSO skb
|
||||
*
|
||||
* skb_gso_validate_mtu validates if a given skb will fit a wanted MTU
|
||||
* once split.
|
||||
*/
|
||||
bool skb_gso_validate_mtu(const struct sk_buff *skb, unsigned int mtu)
|
||||
{
|
||||
const struct skb_shared_info *shinfo = skb_shinfo(skb);
|
||||
const struct sk_buff *iter;
|
||||
unsigned int hlen;
|
||||
|
||||
hlen = skb_gso_network_seglen(skb);
|
||||
|
||||
if (shinfo->gso_size != GSO_BY_FRAGS)
|
||||
return hlen <= mtu;
|
||||
|
||||
/* Undo this so we can re-use header sizes */
|
||||
hlen -= GSO_BY_FRAGS;
|
||||
|
||||
skb_walk_frags(skb, iter) {
|
||||
if (hlen + skb_headlen(iter) > mtu)
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(skb_gso_validate_mtu);
|
||||
|
||||
static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb)
|
||||
{
|
||||
if (skb_cow(skb, skb_headroom(skb)) < 0) {
|
||||
|
|
|
@ -54,7 +54,7 @@ static bool ip_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
|
|||
if (skb->ignore_df)
|
||||
return false;
|
||||
|
||||
if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
|
||||
if (skb_is_gso(skb) && skb_gso_validate_mtu(skb, mtu))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
|
|
|
@ -225,7 +225,7 @@ static int ip_finish_output_gso(struct net *net, struct sock *sk,
|
|||
|
||||
/* common case: locally created skb or seglen is <= mtu */
|
||||
if (((IPCB(skb)->flags & IPSKB_FORWARDED) == 0) ||
|
||||
skb_gso_network_seglen(skb) <= mtu)
|
||||
skb_gso_validate_mtu(skb, mtu))
|
||||
return ip_finish_output2(net, sk, skb);
|
||||
|
||||
/* Slowpath - GSO segment length is exceeding the dst MTU.
|
||||
|
|
|
@ -368,7 +368,7 @@ static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
|
|||
if (skb->ignore_df)
|
||||
return false;
|
||||
|
||||
if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
|
||||
if (skb_is_gso(skb) && skb_gso_validate_mtu(skb, mtu))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
|
|
|
@ -91,7 +91,7 @@ bool mpls_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
|
|||
if (skb->len <= mtu)
|
||||
return false;
|
||||
|
||||
if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
|
||||
if (skb_is_gso(skb) && skb_gso_validate_mtu(skb, mtu))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
|
|
Loading…
Reference in New Issue