bpf: add plumbing for BPF_LWT_ENCAP_IP in bpf_lwt_push_encap
This patch adds all needed plumbing in preparation to allowing bpf programs to do IP encapping via bpf_lwt_push_encap. Actual implementation is added in the next patch in the patchset. Of note: - bpf_lwt_push_encap can now be called from BPF_PROG_TYPE_LWT_XMIT prog types in addition to BPF_PROG_TYPE_LWT_IN; - if the skb being encapped has GSO set, encapsulation is limited to IPIP/IP+GRE/IP+GUE (both IPv4 and IPv6); - as route lookups are different for ingress vs egress, the single external bpf_lwt_push_encap BPF helper is routed internally to either bpf_lwt_in_push_encap or bpf_lwt_xmit_push_encap BPF_CALLs, depending on prog type. v8 changes: fixed a typo. Signed-off-by: Peter Oskolkov <posk@google.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
parent
dd27c2e3d0
commit
3e0bd37ce0
|
@ -2016,6 +2016,19 @@ union bpf_attr {
|
||||||
* Only works if *skb* contains an IPv6 packet. Insert a
|
* Only works if *skb* contains an IPv6 packet. Insert a
|
||||||
* Segment Routing Header (**struct ipv6_sr_hdr**) inside
|
* Segment Routing Header (**struct ipv6_sr_hdr**) inside
|
||||||
* the IPv6 header.
|
* the IPv6 header.
|
||||||
|
* **BPF_LWT_ENCAP_IP**
|
||||||
|
* IP encapsulation (GRE/GUE/IPIP/etc). The outer header
|
||||||
|
* must be IPv4 or IPv6, followed by zero or more
|
||||||
|
* additional headers, up to LWT_BPF_MAX_HEADROOM total
|
||||||
|
* bytes in all prepended headers. Please note that
|
||||||
|
* if skb_is_gso(skb) is true, no more than two headers
|
||||||
|
* can be prepended, and the inner header, if present,
|
||||||
|
* should be either GRE or UDP/GUE.
|
||||||
|
*
|
||||||
|
* BPF_LWT_ENCAP_SEG6*** types can be called by bpf programs of
|
||||||
|
* type BPF_PROG_TYPE_LWT_IN; BPF_LWT_ENCAP_IP type can be called
|
||||||
|
* by bpf programs of types BPF_PROG_TYPE_LWT_IN and
|
||||||
|
* BPF_PROG_TYPE_LWT_XMIT.
|
||||||
*
|
*
|
||||||
* A call to this helper is susceptible to change the underlaying
|
* A call to this helper is susceptible to change the underlaying
|
||||||
* packet buffer. Therefore, at load time, all checks on pointers
|
* packet buffer. Therefore, at load time, all checks on pointers
|
||||||
|
@ -2517,7 +2530,8 @@ enum bpf_hdr_start_off {
|
||||||
/* Encapsulation type for BPF_FUNC_lwt_push_encap helper. */
|
/* Encapsulation type for BPF_FUNC_lwt_push_encap helper. */
|
||||||
enum bpf_lwt_encap_mode {
|
enum bpf_lwt_encap_mode {
|
||||||
BPF_LWT_ENCAP_SEG6,
|
BPF_LWT_ENCAP_SEG6,
|
||||||
BPF_LWT_ENCAP_SEG6_INLINE
|
BPF_LWT_ENCAP_SEG6_INLINE,
|
||||||
|
BPF_LWT_ENCAP_IP,
|
||||||
};
|
};
|
||||||
|
|
||||||
#define __bpf_md_ptr(type, name) \
|
#define __bpf_md_ptr(type, name) \
|
||||||
|
@ -2606,7 +2620,15 @@ enum bpf_ret_code {
|
||||||
BPF_DROP = 2,
|
BPF_DROP = 2,
|
||||||
/* 3-6 reserved */
|
/* 3-6 reserved */
|
||||||
BPF_REDIRECT = 7,
|
BPF_REDIRECT = 7,
|
||||||
/* >127 are reserved for prog type specific return codes */
|
/* >127 are reserved for prog type specific return codes.
|
||||||
|
*
|
||||||
|
* BPF_LWT_REROUTE: used by BPF_PROG_TYPE_LWT_IN and
|
||||||
|
* BPF_PROG_TYPE_LWT_XMIT to indicate that skb had been
|
||||||
|
* changed and should be routed based on its new L3 header.
|
||||||
|
* (This is an L3 redirect, as opposed to L2 redirect
|
||||||
|
* represented by BPF_REDIRECT above).
|
||||||
|
*/
|
||||||
|
BPF_LWT_REROUTE = 128,
|
||||||
};
|
};
|
||||||
|
|
||||||
struct bpf_sock {
|
struct bpf_sock {
|
||||||
|
|
|
@ -4815,7 +4815,15 @@ static int bpf_push_seg6_encap(struct sk_buff *skb, u32 type, void *hdr, u32 len
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_IPV6_SEG6_BPF */
|
#endif /* CONFIG_IPV6_SEG6_BPF */
|
||||||
|
|
||||||
BPF_CALL_4(bpf_lwt_push_encap, struct sk_buff *, skb, u32, type, void *, hdr,
|
#if IS_ENABLED(CONFIG_LWTUNNEL_BPF)
|
||||||
|
static int bpf_push_ip_encap(struct sk_buff *skb, void *hdr, u32 len,
|
||||||
|
bool ingress)
|
||||||
|
{
|
||||||
|
return -EINVAL; /* Implemented in the next patch. */
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
BPF_CALL_4(bpf_lwt_in_push_encap, struct sk_buff *, skb, u32, type, void *, hdr,
|
||||||
u32, len)
|
u32, len)
|
||||||
{
|
{
|
||||||
switch (type) {
|
switch (type) {
|
||||||
|
@ -4823,14 +4831,41 @@ BPF_CALL_4(bpf_lwt_push_encap, struct sk_buff *, skb, u32, type, void *, hdr,
|
||||||
case BPF_LWT_ENCAP_SEG6:
|
case BPF_LWT_ENCAP_SEG6:
|
||||||
case BPF_LWT_ENCAP_SEG6_INLINE:
|
case BPF_LWT_ENCAP_SEG6_INLINE:
|
||||||
return bpf_push_seg6_encap(skb, type, hdr, len);
|
return bpf_push_seg6_encap(skb, type, hdr, len);
|
||||||
|
#endif
|
||||||
|
#if IS_ENABLED(CONFIG_LWTUNNEL_BPF)
|
||||||
|
case BPF_LWT_ENCAP_IP:
|
||||||
|
return bpf_push_ip_encap(skb, hdr, len, true /* ingress */);
|
||||||
#endif
|
#endif
|
||||||
default:
|
default:
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct bpf_func_proto bpf_lwt_push_encap_proto = {
|
BPF_CALL_4(bpf_lwt_xmit_push_encap, struct sk_buff *, skb, u32, type,
|
||||||
.func = bpf_lwt_push_encap,
|
void *, hdr, u32, len)
|
||||||
|
{
|
||||||
|
switch (type) {
|
||||||
|
#if IS_ENABLED(CONFIG_LWTUNNEL_BPF)
|
||||||
|
case BPF_LWT_ENCAP_IP:
|
||||||
|
return bpf_push_ip_encap(skb, hdr, len, false /* egress */);
|
||||||
|
#endif
|
||||||
|
default:
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static const struct bpf_func_proto bpf_lwt_in_push_encap_proto = {
|
||||||
|
.func = bpf_lwt_in_push_encap,
|
||||||
|
.gpl_only = false,
|
||||||
|
.ret_type = RET_INTEGER,
|
||||||
|
.arg1_type = ARG_PTR_TO_CTX,
|
||||||
|
.arg2_type = ARG_ANYTHING,
|
||||||
|
.arg3_type = ARG_PTR_TO_MEM,
|
||||||
|
.arg4_type = ARG_CONST_SIZE
|
||||||
|
};
|
||||||
|
|
||||||
|
static const struct bpf_func_proto bpf_lwt_xmit_push_encap_proto = {
|
||||||
|
.func = bpf_lwt_xmit_push_encap,
|
||||||
.gpl_only = false,
|
.gpl_only = false,
|
||||||
.ret_type = RET_INTEGER,
|
.ret_type = RET_INTEGER,
|
||||||
.arg1_type = ARG_PTR_TO_CTX,
|
.arg1_type = ARG_PTR_TO_CTX,
|
||||||
|
@ -5417,7 +5452,8 @@ bool bpf_helper_changes_pkt_data(void *func)
|
||||||
func == bpf_lwt_seg6_adjust_srh ||
|
func == bpf_lwt_seg6_adjust_srh ||
|
||||||
func == bpf_lwt_seg6_action ||
|
func == bpf_lwt_seg6_action ||
|
||||||
#endif
|
#endif
|
||||||
func == bpf_lwt_push_encap)
|
func == bpf_lwt_in_push_encap ||
|
||||||
|
func == bpf_lwt_xmit_push_encap)
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
|
@ -5815,7 +5851,7 @@ lwt_in_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
|
||||||
{
|
{
|
||||||
switch (func_id) {
|
switch (func_id) {
|
||||||
case BPF_FUNC_lwt_push_encap:
|
case BPF_FUNC_lwt_push_encap:
|
||||||
return &bpf_lwt_push_encap_proto;
|
return &bpf_lwt_in_push_encap_proto;
|
||||||
default:
|
default:
|
||||||
return lwt_out_func_proto(func_id, prog);
|
return lwt_out_func_proto(func_id, prog);
|
||||||
}
|
}
|
||||||
|
@ -5851,6 +5887,8 @@ lwt_xmit_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
|
||||||
return &bpf_l4_csum_replace_proto;
|
return &bpf_l4_csum_replace_proto;
|
||||||
case BPF_FUNC_set_hash_invalid:
|
case BPF_FUNC_set_hash_invalid:
|
||||||
return &bpf_set_hash_invalid_proto;
|
return &bpf_set_hash_invalid_proto;
|
||||||
|
case BPF_FUNC_lwt_push_encap:
|
||||||
|
return &bpf_lwt_xmit_push_encap_proto;
|
||||||
default:
|
default:
|
||||||
return lwt_out_func_proto(func_id, prog);
|
return lwt_out_func_proto(func_id, prog);
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue