Merge branch 'sk_filter-trim-limit'
Willem de Bruijn says: ==================== limit sk_filter trim to payload Sockets can apply a filter to incoming packets to drop or trim them. Fix two codepaths that call skb_pull/__skb_pull after sk_filter without checking for packet length. Reading beyond skb->tail after trimming happens in more codepaths, but safety of reading in the linear segment is based on minimum allocation size (MAX_HEADER, GRO_MAX_HEAD, ..). ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
790e5ef59f
|
@ -467,7 +467,11 @@ static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
|
|||
}
|
||||
#endif /* CONFIG_DEBUG_SET_MODULE_RONX */
|
||||
|
||||
int sk_filter(struct sock *sk, struct sk_buff *skb);
|
||||
int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap);
|
||||
static inline int sk_filter(struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
return sk_filter_trim_cap(sk, skb, 1);
|
||||
}
|
||||
|
||||
struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err);
|
||||
void bpf_prog_free(struct bpf_prog *fp);
|
||||
|
|
|
@ -1576,7 +1576,13 @@ static inline void sock_put(struct sock *sk)
|
|||
*/
|
||||
void sock_gen_put(struct sock *sk);
|
||||
|
||||
int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested);
|
||||
int __sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested,
|
||||
unsigned int trim_cap);
|
||||
static inline int sk_receive_skb(struct sock *sk, struct sk_buff *skb,
|
||||
const int nested)
|
||||
{
|
||||
return __sk_receive_skb(sk, skb, nested, 1);
|
||||
}
|
||||
|
||||
static inline void sk_tx_queue_set(struct sock *sk, int tx_queue)
|
||||
{
|
||||
|
|
|
@ -53,9 +53,10 @@
|
|||
#include <net/sock_reuseport.h>
|
||||
|
||||
/**
|
||||
* sk_filter - run a packet through a socket filter
|
||||
* sk_filter_trim_cap - run a packet through a socket filter
|
||||
* @sk: sock associated with &sk_buff
|
||||
* @skb: buffer to filter
|
||||
* @cap: limit on how short the eBPF program may trim the packet
|
||||
*
|
||||
* Run the eBPF program and then cut skb->data to correct size returned by
|
||||
* the program. If pkt_len is 0 we toss packet. If skb->len is smaller
|
||||
|
@ -64,7 +65,7 @@
|
|||
* be accepted or -EPERM if the packet should be tossed.
|
||||
*
|
||||
*/
|
||||
int sk_filter(struct sock *sk, struct sk_buff *skb)
|
||||
int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap)
|
||||
{
|
||||
int err;
|
||||
struct sk_filter *filter;
|
||||
|
@ -85,14 +86,13 @@ int sk_filter(struct sock *sk, struct sk_buff *skb)
|
|||
filter = rcu_dereference(sk->sk_filter);
|
||||
if (filter) {
|
||||
unsigned int pkt_len = bpf_prog_run_save_cb(filter->prog, skb);
|
||||
|
||||
err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM;
|
||||
err = pkt_len ? pskb_trim(skb, max(cap, pkt_len)) : -EPERM;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(sk_filter);
|
||||
EXPORT_SYMBOL(sk_filter_trim_cap);
|
||||
|
||||
static u64 __skb_get_pay_offset(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
|
||||
{
|
||||
|
|
|
@ -452,11 +452,12 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
|
|||
}
|
||||
EXPORT_SYMBOL(sock_queue_rcv_skb);
|
||||
|
||||
int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
|
||||
int __sk_receive_skb(struct sock *sk, struct sk_buff *skb,
|
||||
const int nested, unsigned int trim_cap)
|
||||
{
|
||||
int rc = NET_RX_SUCCESS;
|
||||
|
||||
if (sk_filter(sk, skb))
|
||||
if (sk_filter_trim_cap(sk, skb, trim_cap))
|
||||
goto discard_and_relse;
|
||||
|
||||
skb->dev = NULL;
|
||||
|
@ -492,7 +493,7 @@ discard_and_relse:
|
|||
kfree_skb(skb);
|
||||
goto out;
|
||||
}
|
||||
EXPORT_SYMBOL(sk_receive_skb);
|
||||
EXPORT_SYMBOL(__sk_receive_skb);
|
||||
|
||||
struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
|
||||
{
|
||||
|
|
|
@ -868,7 +868,7 @@ lookup:
|
|||
goto discard_and_relse;
|
||||
nf_reset(skb);
|
||||
|
||||
return sk_receive_skb(sk, skb, 1);
|
||||
return __sk_receive_skb(sk, skb, 1, dh->dccph_doff * 4);
|
||||
|
||||
no_dccp_socket:
|
||||
if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
|
||||
|
|
|
@ -732,7 +732,7 @@ lookup:
|
|||
if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
|
||||
goto discard_and_relse;
|
||||
|
||||
return sk_receive_skb(sk, skb, 1) ? -1 : 0;
|
||||
return __sk_receive_skb(sk, skb, 1, dh->dccph_doff * 4) ? -1 : 0;
|
||||
|
||||
no_dccp_socket:
|
||||
if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
|
||||
|
|
|
@ -164,7 +164,8 @@ static int rose_state3_machine(struct sock *sk, struct sk_buff *skb, int framety
|
|||
rose_frames_acked(sk, nr);
|
||||
if (ns == rose->vr) {
|
||||
rose_start_idletimer(sk);
|
||||
if (sock_queue_rcv_skb(sk, skb) == 0) {
|
||||
if (sk_filter_trim_cap(sk, skb, ROSE_MIN_LEN) == 0 &&
|
||||
__sock_queue_rcv_skb(sk, skb) == 0) {
|
||||
rose->vr = (rose->vr + 1) % ROSE_MODULUS;
|
||||
queued = 1;
|
||||
} else {
|
||||
|
|
Loading…
Reference in New Issue