netfilter: nf_queue: fix possible use-after-free
Eric Dumazet says:
The sock_hold() side seems suspect, because there is no guarantee
that sk_refcnt is not already 0.
On failure, we cannot queue the packet and need to indicate an
error. The packet will be dropped by the caller.
v2: split skb prefetch hunk into separate change
Fixes: 271b72c7fa
("udp: RCU handling for Unicast packets.")
Reported-by: Eric Dumazet <eric.dumazet@gmail.com>
Reviewed-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: Florian Westphal <fw@strlen.de>
This commit is contained in:
parent
2e78855d31
commit
c387307024
|
@ -37,7 +37,7 @@ void nf_register_queue_handler(const struct nf_queue_handler *qh);
|
|||
void nf_unregister_queue_handler(void);
|
||||
void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict);
|
||||
|
||||
void nf_queue_entry_get_refs(struct nf_queue_entry *entry);
|
||||
bool nf_queue_entry_get_refs(struct nf_queue_entry *entry);
|
||||
void nf_queue_entry_free(struct nf_queue_entry *entry);
|
||||
|
||||
static inline void init_hashrandom(u32 *jhash_initval)
|
||||
|
|
|
@ -96,19 +96,21 @@ static void __nf_queue_entry_init_physdevs(struct nf_queue_entry *entry)
|
|||
}
|
||||
|
||||
/* Bump dev refs so they don't vanish while packet is out */
|
||||
void nf_queue_entry_get_refs(struct nf_queue_entry *entry)
|
||||
bool nf_queue_entry_get_refs(struct nf_queue_entry *entry)
|
||||
{
|
||||
struct nf_hook_state *state = &entry->state;
|
||||
|
||||
if (state->sk && !refcount_inc_not_zero(&state->sk->sk_refcnt))
|
||||
return false;
|
||||
|
||||
dev_hold(state->in);
|
||||
dev_hold(state->out);
|
||||
if (state->sk)
|
||||
sock_hold(state->sk);
|
||||
|
||||
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
|
||||
dev_hold(entry->physin);
|
||||
dev_hold(entry->physout);
|
||||
#endif
|
||||
return true;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nf_queue_entry_get_refs);
|
||||
|
||||
|
@ -196,7 +198,10 @@ static int __nf_queue(struct sk_buff *skb, const struct nf_hook_state *state,
|
|||
|
||||
__nf_queue_entry_init_physdevs(entry);
|
||||
|
||||
nf_queue_entry_get_refs(entry);
|
||||
if (!nf_queue_entry_get_refs(entry)) {
|
||||
kfree(entry);
|
||||
return -ENOTCONN;
|
||||
}
|
||||
|
||||
switch (entry->state.pf) {
|
||||
case AF_INET:
|
||||
|
|
|
@ -710,9 +710,15 @@ static struct nf_queue_entry *
|
|||
nf_queue_entry_dup(struct nf_queue_entry *e)
|
||||
{
|
||||
struct nf_queue_entry *entry = kmemdup(e, e->size, GFP_ATOMIC);
|
||||
if (entry)
|
||||
nf_queue_entry_get_refs(entry);
|
||||
return entry;
|
||||
|
||||
if (!entry)
|
||||
return NULL;
|
||||
|
||||
if (nf_queue_entry_get_refs(entry))
|
||||
return entry;
|
||||
|
||||
kfree(entry);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
|
||||
|
|
Loading…
Reference in New Issue