OpenCloudOS-Kernel/drivers/net/gtp.c

1942 lines
45 KiB
C
Raw Normal View History

// SPDX-License-Identifier: GPL-2.0-or-later
/* GTP according to GSM TS 09.60 / 3GPP TS 29.060
*
* (C) 2012-2014 by sysmocom - s.f.m.c. GmbH
* (C) 2016 by Pablo Neira Ayuso <pablo@netfilter.org>
*
* Author: Harald Welte <hwelte@sysmocom.de>
* Pablo Neira Ayuso <pablo@netfilter.org>
* Andreas Schultz <aschultz@travelping.com>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/udp.h>
#include <linux/rculist.h>
#include <linux/jhash.h>
#include <linux/if_tunnel.h>
#include <linux/net.h>
#include <linux/file.h>
#include <linux/gtp.h>
#include <net/net_namespace.h>
#include <net/protocol.h>
#include <net/ip.h>
#include <net/udp.h>
#include <net/udp_tunnel.h>
#include <net/icmp.h>
#include <net/xfrm.h>
#include <net/genetlink.h>
#include <net/netns/generic.h>
#include <net/gtp.h>
/* An active session for the subscriber. */
struct pdp_ctx {
struct hlist_node hlist_tid;
struct hlist_node hlist_addr;
union {
struct {
u64 tid;
u16 flow;
} v0;
struct {
u32 i_tei;
u32 o_tei;
} v1;
} u;
u8 gtp_version;
u16 af;
struct in_addr ms_addr_ip4;
struct in_addr peer_addr_ip4;
struct sock *sk;
struct net_device *dev;
atomic_t tx_seq;
struct rcu_head rcu_head;
};
/* One instance of the GTP device. */
struct gtp_dev {
struct list_head list;
struct sock *sk0;
struct sock *sk1u;
u8 sk_created;
struct net_device *dev;
struct net *net;
unsigned int role;
unsigned int hash_size;
struct hlist_head *tid_hash;
struct hlist_head *addr_hash;
u8 restart_count;
};
struct echo_info {
struct in_addr ms_addr_ip4;
struct in_addr peer_addr_ip4;
u8 gtp_version;
};
netns: make struct pernet_operations::id unsigned int Make struct pernet_operations::id unsigned. There are 2 reasons to do so: 1) This field is really an index into an zero based array and thus is unsigned entity. Using negative value is out-of-bound access by definition. 2) On x86_64 unsigned 32-bit data which are mixed with pointers via array indexing or offsets added or subtracted to pointers are preffered to signed 32-bit data. "int" being used as an array index needs to be sign-extended to 64-bit before being used. void f(long *p, int i) { g(p[i]); } roughly translates to movsx rsi, esi mov rdi, [rsi+...] call g MOVSX is 3 byte instruction which isn't necessary if the variable is unsigned because x86_64 is zero extending by default. Now, there is net_generic() function which, you guessed it right, uses "int" as an array index: static inline void *net_generic(const struct net *net, int id) { ... ptr = ng->ptr[id - 1]; ... } And this function is used a lot, so those sign extensions add up. Patch snipes ~1730 bytes on allyesconfig kernel (without all junk messing with code generation): add/remove: 0/0 grow/shrink: 70/598 up/down: 396/-2126 (-1730) Unfortunately some functions actually grow bigger. This is a semmingly random artefact of code generation with register allocator being used differently. gcc decides that some variable needs to live in new r8+ registers and every access now requires REX prefix. Or it is shifted into r12, so [r12+0] addressing mode has to be used which is longer than [r8] However, overall balance is in negative direction: add/remove: 0/0 grow/shrink: 70/598 up/down: 396/-2126 (-1730) function old new delta nfsd4_lock 3886 3959 +73 tipc_link_build_proto_msg 1096 1140 +44 mac80211_hwsim_new_radio 2776 2808 +32 tipc_mon_rcv 1032 1058 +26 svcauth_gss_legacy_init 1413 1429 +16 tipc_bcbase_select_primary 379 392 +13 nfsd4_exchange_id 1247 1260 +13 nfsd4_setclientid_confirm 782 793 +11 ... put_client_renew_locked 494 480 -14 ip_set_sockfn_get 730 716 -14 geneve_sock_add 829 813 -16 nfsd4_sequence_done 721 703 -18 nlmclnt_lookup_host 708 686 -22 nfsd4_lockt 1085 1063 -22 nfs_get_client 1077 1050 -27 tcf_bpf_init 1106 1076 -30 nfsd4_encode_fattr 5997 5930 -67 Total: Before=154856051, After=154854321, chg -0.00% Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2016-11-17 09:58:21 +08:00
static unsigned int gtp_net_id __read_mostly;
struct gtp_net {
struct list_head gtp_dev_list;
};
static u32 gtp_h_initval;
static struct genl_family gtp_genl_family;
enum gtp_multicast_groups {
GTP_GENL_MCGRP,
};
static const struct genl_multicast_group gtp_genl_mcgrps[] = {
[GTP_GENL_MCGRP] = { .name = GTP_GENL_MCGRP_NAME },
};
static void pdp_context_delete(struct pdp_ctx *pctx);
static inline u32 gtp0_hashfn(u64 tid)
{
u32 *tid32 = (u32 *) &tid;
return jhash_2words(tid32[0], tid32[1], gtp_h_initval);
}
static inline u32 gtp1u_hashfn(u32 tid)
{
return jhash_1word(tid, gtp_h_initval);
}
static inline u32 ipv4_hashfn(__be32 ip)
{
return jhash_1word((__force u32)ip, gtp_h_initval);
}
/* Resolve a PDP context structure based on the 64bit TID. */
static struct pdp_ctx *gtp0_pdp_find(struct gtp_dev *gtp, u64 tid)
{
struct hlist_head *head;
struct pdp_ctx *pdp;
head = &gtp->tid_hash[gtp0_hashfn(tid) % gtp->hash_size];
hlist_for_each_entry_rcu(pdp, head, hlist_tid) {
if (pdp->gtp_version == GTP_V0 &&
pdp->u.v0.tid == tid)
return pdp;
}
return NULL;
}
/* Resolve a PDP context structure based on the 32bit TEI. */
static struct pdp_ctx *gtp1_pdp_find(struct gtp_dev *gtp, u32 tid)
{
struct hlist_head *head;
struct pdp_ctx *pdp;
head = &gtp->tid_hash[gtp1u_hashfn(tid) % gtp->hash_size];
hlist_for_each_entry_rcu(pdp, head, hlist_tid) {
if (pdp->gtp_version == GTP_V1 &&
pdp->u.v1.i_tei == tid)
return pdp;
}
return NULL;
}
/* Resolve a PDP context based on IPv4 address of MS. */
static struct pdp_ctx *ipv4_pdp_find(struct gtp_dev *gtp, __be32 ms_addr)
{
struct hlist_head *head;
struct pdp_ctx *pdp;
head = &gtp->addr_hash[ipv4_hashfn(ms_addr) % gtp->hash_size];
hlist_for_each_entry_rcu(pdp, head, hlist_addr) {
if (pdp->af == AF_INET &&
pdp->ms_addr_ip4.s_addr == ms_addr)
return pdp;
}
return NULL;
}
static bool gtp_check_ms_ipv4(struct sk_buff *skb, struct pdp_ctx *pctx,
unsigned int hdrlen, unsigned int role)
{
struct iphdr *iph;
if (!pskb_may_pull(skb, hdrlen + sizeof(struct iphdr)))
return false;
iph = (struct iphdr *)(skb->data + hdrlen);
if (role == GTP_ROLE_SGSN)
return iph->daddr == pctx->ms_addr_ip4.s_addr;
else
return iph->saddr == pctx->ms_addr_ip4.s_addr;
}
/* Check if the inner IP address in this packet is assigned to any
* existing mobile subscriber.
*/
static bool gtp_check_ms(struct sk_buff *skb, struct pdp_ctx *pctx,
unsigned int hdrlen, unsigned int role)
{
switch (ntohs(skb->protocol)) {
case ETH_P_IP:
return gtp_check_ms_ipv4(skb, pctx, hdrlen, role);
}
return false;
}
static int gtp_rx(struct pdp_ctx *pctx, struct sk_buff *skb,
unsigned int hdrlen, unsigned int role)
{
if (!gtp_check_ms(skb, pctx, hdrlen, role)) {
netdev_dbg(pctx->dev, "No PDP ctx for this MS\n");
return 1;
}
/* Get rid of the GTP + UDP headers. */
if (iptunnel_pull_header(skb, hdrlen, skb->protocol,
!net_eq(sock_net(pctx->sk), dev_net(pctx->dev)))) {
pctx->dev->stats.rx_length_errors++;
goto err;
}
netdev_dbg(pctx->dev, "forwarding packet from GGSN to uplink\n");
/* Now that the UDP and the GTP header have been removed, set up the
* new network header. This is required by the upper layer to
* calculate the transport header.
*/
skb_reset_network_header(skb);
skb_reset_mac_header(skb);
skb->dev = pctx->dev;
dev_sw_netstats_rx_add(pctx->dev, skb->len);
net: dev: Makes sure netif_rx() can be invoked in any context. Dave suggested a while ago (eleven years by now) "Let's make netif_rx() work in all contexts and get rid of netif_rx_ni()". Eric agreed and pointed out that modern devices should use netif_receive_skb() to avoid the overhead. In the meantime someone added another variant, netif_rx_any_context(), which behaves as suggested. netif_rx() must be invoked with disabled bottom halves to ensure that pending softirqs, which were raised within the function, are handled. netif_rx_ni() can be invoked only from process context (bottom halves must be enabled) because the function handles pending softirqs without checking if bottom halves were disabled or not. netif_rx_any_context() invokes on the former functions by checking in_interrupts(). netif_rx() could be taught to handle both cases (disabled and enabled bottom halves) by simply disabling bottom halves while invoking netif_rx_internal(). The local_bh_enable() invocation will then invoke pending softirqs only if the BH-disable counter drops to zero. Eric is concerned about the overhead of BH-disable+enable especially in regard to the loopback driver. As critical as this driver is, it will receive a shortcut to avoid the additional overhead which is not needed. Add a local_bh_disable() section in netif_rx() to ensure softirqs are handled if needed. Provide __netif_rx() which does not disable BH and has a lockdep assert to ensure that interrupts are disabled. Use this shortcut in the loopback driver and in drivers/net/*.c. Make netif_rx_ni() and netif_rx_any_context() invoke netif_rx() so they can be removed once they are no more users left. Link: https://lkml.kernel.org/r/20100415.020246.218622820.davem@davemloft.net Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Reviewed-by: Eric Dumazet <edumazet@google.com> Reviewed-by: Toke Høiland-Jørgensen <toke@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2022-02-12 07:38:38 +08:00
__netif_rx(skb);
return 0;
err:
pctx->dev->stats.rx_dropped++;
return -1;
}
static struct rtable *ip4_route_output_gtp(struct flowi4 *fl4,
const struct sock *sk,
__be32 daddr, __be32 saddr)
{
memset(fl4, 0, sizeof(*fl4));
fl4->flowi4_oif = sk->sk_bound_dev_if;
fl4->daddr = daddr;
fl4->saddr = saddr;
fl4->flowi4_tos = RT_CONN_FLAGS(sk);
fl4->flowi4_proto = sk->sk_protocol;
return ip_route_output_key(sock_net(sk), fl4);
}
/* GSM TS 09.60. 7.3
* In all Path Management messages:
* - TID: is not used and shall be set to 0.
* - Flow Label is not used and shall be set to 0
* In signalling messages:
* - number: this field is not yet used in signalling messages.
* It shall be set to 255 by the sender and shall be ignored
* by the receiver
* Returns true if the echo req was correct, false otherwise.
*/
static bool gtp0_validate_echo_hdr(struct gtp0_header *gtp0)
{
return !(gtp0->tid || (gtp0->flags ^ 0x1e) ||
gtp0->number != 0xff || gtp0->flow);
}
/* msg_type has to be GTP_ECHO_REQ or GTP_ECHO_RSP */
static void gtp0_build_echo_msg(struct gtp0_header *hdr, __u8 msg_type)
{
int len_pkt, len_hdr;
hdr->flags = 0x1e; /* v0, GTP-non-prime. */
hdr->type = msg_type;
/* GSM TS 09.60. 7.3 In all Path Management Flow Label and TID
* are not used and shall be set to 0.
*/
hdr->flow = 0;
hdr->tid = 0;
hdr->number = 0xff;
hdr->spare[0] = 0xff;
hdr->spare[1] = 0xff;
hdr->spare[2] = 0xff;
len_pkt = sizeof(struct gtp0_packet);
len_hdr = sizeof(struct gtp0_header);
if (msg_type == GTP_ECHO_RSP)
hdr->length = htons(len_pkt - len_hdr);
else
hdr->length = 0;
}
static int gtp0_send_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb)
{
struct gtp0_packet *gtp_pkt;
struct gtp0_header *gtp0;
struct rtable *rt;
struct flowi4 fl4;
struct iphdr *iph;
__be16 seq;
gtp0 = (struct gtp0_header *)(skb->data + sizeof(struct udphdr));
if (!gtp0_validate_echo_hdr(gtp0))
return -1;
seq = gtp0->seq;
/* pull GTP and UDP headers */
skb_pull_data(skb, sizeof(struct gtp0_header) + sizeof(struct udphdr));
gtp_pkt = skb_push(skb, sizeof(struct gtp0_packet));
memset(gtp_pkt, 0, sizeof(struct gtp0_packet));
gtp0_build_echo_msg(&gtp_pkt->gtp0_h, GTP_ECHO_RSP);
/* GSM TS 09.60. 7.3 The Sequence Number in a signalling response
* message shall be copied from the signalling request message
* that the GSN is replying to.
*/
gtp_pkt->gtp0_h.seq = seq;
gtp_pkt->ie.tag = GTPIE_RECOVERY;
gtp_pkt->ie.val = gtp->restart_count;
iph = ip_hdr(skb);
/* find route to the sender,
* src address becomes dst address and vice versa.
*/
rt = ip4_route_output_gtp(&fl4, gtp->sk0, iph->saddr, iph->daddr);
if (IS_ERR(rt)) {
netdev_dbg(gtp->dev, "no route for echo response from %pI4\n",
&iph->saddr);
return -1;
}
udp_tunnel_xmit_skb(rt, gtp->sk0, skb,
fl4.saddr, fl4.daddr,
iph->tos,
ip4_dst_hoplimit(&rt->dst),
0,
htons(GTP0_PORT), htons(GTP0_PORT),
!net_eq(sock_net(gtp->sk1u),
dev_net(gtp->dev)),
false);
return 0;
}
static int gtp_genl_fill_echo(struct sk_buff *skb, u32 snd_portid, u32 snd_seq,
int flags, u32 type, struct echo_info echo)
{
void *genlh;
genlh = genlmsg_put(skb, snd_portid, snd_seq, &gtp_genl_family, flags,
type);
if (!genlh)
goto failure;
if (nla_put_u32(skb, GTPA_VERSION, echo.gtp_version) ||
nla_put_be32(skb, GTPA_PEER_ADDRESS, echo.peer_addr_ip4.s_addr) ||
nla_put_be32(skb, GTPA_MS_ADDRESS, echo.ms_addr_ip4.s_addr))
goto failure;
genlmsg_end(skb, genlh);
return 0;
failure:
genlmsg_cancel(skb, genlh);
return -EMSGSIZE;
}
static int gtp0_handle_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb)
{
struct gtp0_header *gtp0;
struct echo_info echo;
struct sk_buff *msg;
struct iphdr *iph;
int ret;
gtp0 = (struct gtp0_header *)(skb->data + sizeof(struct udphdr));
if (!gtp0_validate_echo_hdr(gtp0))
return -1;
iph = ip_hdr(skb);
echo.ms_addr_ip4.s_addr = iph->daddr;
echo.peer_addr_ip4.s_addr = iph->saddr;
echo.gtp_version = GTP_V0;
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
if (!msg)
return -ENOMEM;
ret = gtp_genl_fill_echo(msg, 0, 0, 0, GTP_CMD_ECHOREQ, echo);
if (ret < 0) {
nlmsg_free(msg);
return ret;
}
return genlmsg_multicast_netns(&gtp_genl_family, dev_net(gtp->dev),
msg, 0, GTP_GENL_MCGRP, GFP_ATOMIC);
}
/* 1 means pass up to the stack, -1 means drop and 0 means decapsulated. */
static int gtp0_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb)
{
unsigned int hdrlen = sizeof(struct udphdr) +
sizeof(struct gtp0_header);
struct gtp0_header *gtp0;
struct pdp_ctx *pctx;
if (!pskb_may_pull(skb, hdrlen))
return -1;
gtp0 = (struct gtp0_header *)(skb->data + sizeof(struct udphdr));
if ((gtp0->flags >> 5) != GTP_V0)
return 1;
/* If the sockets were created in kernel, it means that
* there is no daemon running in userspace which would
* handle echo request.
*/
if (gtp0->type == GTP_ECHO_REQ && gtp->sk_created)
return gtp0_send_echo_resp(gtp, skb);
if (gtp0->type == GTP_ECHO_RSP && gtp->sk_created)
return gtp0_handle_echo_resp(gtp, skb);
if (gtp0->type != GTP_TPDU)
return 1;
pctx = gtp0_pdp_find(gtp, be64_to_cpu(gtp0->tid));
if (!pctx) {
netdev_dbg(gtp->dev, "No PDP ctx to decap skb=%p\n", skb);
return 1;
}
return gtp_rx(pctx, skb, hdrlen, gtp->role);
}
/* msg_type has to be GTP_ECHO_REQ or GTP_ECHO_RSP */
static void gtp1u_build_echo_msg(struct gtp1_header_long *hdr, __u8 msg_type)
{
int len_pkt, len_hdr;
/* S flag must be set to 1 */
hdr->flags = 0x32; /* v1, GTP-non-prime. */
hdr->type = msg_type;
/* 3GPP TS 29.281 5.1 - TEID has to be set to 0 */
hdr->tid = 0;
/* seq, npdu and next should be counted to the length of the GTP packet
* that's why szie of gtp1_header should be subtracted,
* not size of gtp1_header_long.
*/
len_hdr = sizeof(struct gtp1_header);
if (msg_type == GTP_ECHO_RSP) {
len_pkt = sizeof(struct gtp1u_packet);
hdr->length = htons(len_pkt - len_hdr);
} else {
/* GTP_ECHO_REQ does not carry GTP Information Element,
* the why gtp1_header_long is used here.
*/
len_pkt = sizeof(struct gtp1_header_long);
hdr->length = htons(len_pkt - len_hdr);
}
}
static int gtp1u_send_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb)
{
struct gtp1_header_long *gtp1u;
struct gtp1u_packet *gtp_pkt;
struct rtable *rt;
struct flowi4 fl4;
struct iphdr *iph;
gtp1u = (struct gtp1_header_long *)(skb->data + sizeof(struct udphdr));
/* 3GPP TS 29.281 5.1 - For the Echo Request, Echo Response,
* Error Indication and Supported Extension Headers Notification
* messages, the S flag shall be set to 1 and TEID shall be set to 0.
*/
if (!(gtp1u->flags & GTP1_F_SEQ) || gtp1u->tid)
return -1;
/* pull GTP and UDP headers */
skb_pull_data(skb,
sizeof(struct gtp1_header_long) + sizeof(struct udphdr));
gtp_pkt = skb_push(skb, sizeof(struct gtp1u_packet));
memset(gtp_pkt, 0, sizeof(struct gtp1u_packet));
gtp1u_build_echo_msg(&gtp_pkt->gtp1u_h, GTP_ECHO_RSP);
/* 3GPP TS 29.281 7.7.2 - The Restart Counter value in the
* Recovery information element shall not be used, i.e. it shall
* be set to zero by the sender and shall be ignored by the receiver.
* The Recovery information element is mandatory due to backwards
* compatibility reasons.
*/
gtp_pkt->ie.tag = GTPIE_RECOVERY;
gtp_pkt->ie.val = 0;
iph = ip_hdr(skb);
/* find route to the sender,
* src address becomes dst address and vice versa.
*/
rt = ip4_route_output_gtp(&fl4, gtp->sk1u, iph->saddr, iph->daddr);
if (IS_ERR(rt)) {
netdev_dbg(gtp->dev, "no route for echo response from %pI4\n",
&iph->saddr);
return -1;
}
udp_tunnel_xmit_skb(rt, gtp->sk1u, skb,
fl4.saddr, fl4.daddr,
iph->tos,
ip4_dst_hoplimit(&rt->dst),
0,
htons(GTP1U_PORT), htons(GTP1U_PORT),
!net_eq(sock_net(gtp->sk1u),
dev_net(gtp->dev)),
false);
return 0;
}
static int gtp1u_handle_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb)
{
struct gtp1_header_long *gtp1u;
struct echo_info echo;
struct sk_buff *msg;
struct iphdr *iph;
int ret;
gtp1u = (struct gtp1_header_long *)(skb->data + sizeof(struct udphdr));
/* 3GPP TS 29.281 5.1 - For the Echo Request, Echo Response,
* Error Indication and Supported Extension Headers Notification
* messages, the S flag shall be set to 1 and TEID shall be set to 0.
*/
if (!(gtp1u->flags & GTP1_F_SEQ) || gtp1u->tid)
return -1;
iph = ip_hdr(skb);
echo.ms_addr_ip4.s_addr = iph->daddr;
echo.peer_addr_ip4.s_addr = iph->saddr;
echo.gtp_version = GTP_V1;
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
if (!msg)
return -ENOMEM;
ret = gtp_genl_fill_echo(msg, 0, 0, 0, GTP_CMD_ECHOREQ, echo);
if (ret < 0) {
nlmsg_free(msg);
return ret;
}
return genlmsg_multicast_netns(&gtp_genl_family, dev_net(gtp->dev),
msg, 0, GTP_GENL_MCGRP, GFP_ATOMIC);
}
static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb)
{
unsigned int hdrlen = sizeof(struct udphdr) +
sizeof(struct gtp1_header);
struct gtp1_header *gtp1;
struct pdp_ctx *pctx;
if (!pskb_may_pull(skb, hdrlen))
return -1;
gtp1 = (struct gtp1_header *)(skb->data + sizeof(struct udphdr));
if ((gtp1->flags >> 5) != GTP_V1)
return 1;
/* If the sockets were created in kernel, it means that
* there is no daemon running in userspace which would
* handle echo request.
*/
if (gtp1->type == GTP_ECHO_REQ && gtp->sk_created)
return gtp1u_send_echo_resp(gtp, skb);
if (gtp1->type == GTP_ECHO_RSP && gtp->sk_created)
return gtp1u_handle_echo_resp(gtp, skb);
if (gtp1->type != GTP_TPDU)
return 1;
/* From 29.060: "This field shall be present if and only if any one or
* more of the S, PN and E flags are set.".
*
* If any of the bit is set, then the remaining ones also have to be
* set.
*/
if (gtp1->flags & GTP1_F_MASK)
hdrlen += 4;
/* Make sure the header is larger enough, including extensions. */
if (!pskb_may_pull(skb, hdrlen))
return -1;
gtp1 = (struct gtp1_header *)(skb->data + sizeof(struct udphdr));
pctx = gtp1_pdp_find(gtp, ntohl(gtp1->tid));
if (!pctx) {
netdev_dbg(gtp->dev, "No PDP ctx to decap skb=%p\n", skb);
return 1;
}
return gtp_rx(pctx, skb, hdrlen, gtp->role);
}
gtp: fix use-after-free in gtp_encap_destroy() gtp_encap_destroy() is called twice. 1. When interface is deleted. 2. When udp socket is destroyed. either gtp->sk0 or gtp->sk1u could be freed by sock_put() in gtp_encap_destroy(). so, when gtp_encap_destroy() is called again, it would uses freed sk pointer. patch makes gtp_encap_destroy() to set either gtp->sk0 or gtp->sk1u to null. in addition, both gtp->sk0 and gtp->sk1u pointer are protected by rtnl_lock. so, rtnl_lock() is added. Test command: gtp-link add gtp1 & killall gtp-link ip link del gtp1 Splat looks like: [ 83.182767] BUG: KASAN: use-after-free in __lock_acquire+0x3a20/0x46a0 [ 83.184128] Read of size 8 at addr ffff8880cc7d5360 by task ip/1008 [ 83.185567] CPU: 1 PID: 1008 Comm: ip Not tainted 5.2.0-rc6+ #50 [ 83.188469] Call Trace: [ ... ] [ 83.200126] lock_acquire+0x141/0x380 [ 83.200575] ? lock_sock_nested+0x3a/0xf0 [ 83.201069] _raw_spin_lock_bh+0x38/0x70 [ 83.201551] ? lock_sock_nested+0x3a/0xf0 [ 83.202044] lock_sock_nested+0x3a/0xf0 [ 83.202520] gtp_encap_destroy+0x18/0xe0 [gtp] [ 83.203065] gtp_encap_disable.isra.14+0x13/0x50 [gtp] [ 83.203687] gtp_dellink+0x56/0x170 [gtp] [ 83.204190] rtnl_delete_link+0xb4/0x100 [ ... ] [ 83.236513] Allocated by task 976: [ 83.236925] save_stack+0x19/0x80 [ 83.237332] __kasan_kmalloc.constprop.3+0xa0/0xd0 [ 83.237894] kmem_cache_alloc+0xd8/0x280 [ 83.238360] sk_prot_alloc.isra.42+0x50/0x200 [ 83.238874] sk_alloc+0x32/0x940 [ 83.239264] inet_create+0x283/0xc20 [ 83.239684] __sock_create+0x2dd/0x540 [ 83.240136] __sys_socket+0xca/0x1a0 [ 83.240550] __x64_sys_socket+0x6f/0xb0 [ 83.240998] do_syscall_64+0x9c/0x450 [ 83.241466] entry_SYSCALL_64_after_hwframe+0x49/0xbe [ 83.242061] [ 83.242249] Freed by task 0: [ 83.242616] save_stack+0x19/0x80 [ 83.243013] __kasan_slab_free+0x111/0x150 [ 83.243498] kmem_cache_free+0x89/0x250 [ 83.244444] __sk_destruct+0x38f/0x5a0 [ 83.245366] rcu_core+0x7e9/0x1c20 [ 83.245766] __do_softirq+0x213/0x8fa Fixes: 1e3a3abd8b28 ("gtp: make GTP sockets in gtp_newlink optional") Signed-off-by: Taehee Yoo <ap420073@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2019-07-02 23:22:25 +08:00
static void __gtp_encap_destroy(struct sock *sk)
{
struct gtp_dev *gtp;
gtp: fix suspicious RCU usage gtp_encap_enable_socket() and gtp_encap_destroy() are not protected by rcu_read_lock(). and it's not safe to write sk->sk_user_data. This patch make these functions to use lock_sock() instead of rcu_dereference_sk_user_data(). Test commands: gtp-link add gtp1 Splat looks like: [ 83.238315] ============================= [ 83.239127] WARNING: suspicious RCU usage [ 83.239702] 5.2.0-rc6+ #49 Not tainted [ 83.240268] ----------------------------- [ 83.241205] drivers/net/gtp.c:799 suspicious rcu_dereference_check() usage! [ 83.243828] [ 83.243828] other info that might help us debug this: [ 83.243828] [ 83.246325] [ 83.246325] rcu_scheduler_active = 2, debug_locks = 1 [ 83.247314] 1 lock held by gtp-link/1008: [ 83.248523] #0: 0000000017772c7f (rtnl_mutex){+.+.}, at: __rtnl_newlink+0x5f5/0x11b0 [ 83.251503] [ 83.251503] stack backtrace: [ 83.252173] CPU: 0 PID: 1008 Comm: gtp-link Not tainted 5.2.0-rc6+ #49 [ 83.253271] Hardware name: innotek GmbH VirtualBox/VirtualBox, BIOS VirtualBox 12/01/2006 [ 83.254562] Call Trace: [ 83.254995] dump_stack+0x7c/0xbb [ 83.255567] gtp_encap_enable_socket+0x2df/0x360 [gtp] [ 83.256415] ? gtp_find_dev+0x1a0/0x1a0 [gtp] [ 83.257161] ? memset+0x1f/0x40 [ 83.257843] gtp_newlink+0x90/0xa21 [gtp] [ 83.258497] ? __netlink_ns_capable+0xc3/0xf0 [ 83.259260] __rtnl_newlink+0xb9f/0x11b0 [ 83.260022] ? rtnl_link_unregister+0x230/0x230 [ ... ] Fixes: 1e3a3abd8b28 ("gtp: make GTP sockets in gtp_newlink optional") Signed-off-by: Taehee Yoo <ap420073@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2019-07-02 23:20:51 +08:00
lock_sock(sk);
gtp = sk->sk_user_data;
if (gtp) {
gtp: fix use-after-free in gtp_encap_destroy() gtp_encap_destroy() is called twice. 1. When interface is deleted. 2. When udp socket is destroyed. either gtp->sk0 or gtp->sk1u could be freed by sock_put() in gtp_encap_destroy(). so, when gtp_encap_destroy() is called again, it would uses freed sk pointer. patch makes gtp_encap_destroy() to set either gtp->sk0 or gtp->sk1u to null. in addition, both gtp->sk0 and gtp->sk1u pointer are protected by rtnl_lock. so, rtnl_lock() is added. Test command: gtp-link add gtp1 & killall gtp-link ip link del gtp1 Splat looks like: [ 83.182767] BUG: KASAN: use-after-free in __lock_acquire+0x3a20/0x46a0 [ 83.184128] Read of size 8 at addr ffff8880cc7d5360 by task ip/1008 [ 83.185567] CPU: 1 PID: 1008 Comm: ip Not tainted 5.2.0-rc6+ #50 [ 83.188469] Call Trace: [ ... ] [ 83.200126] lock_acquire+0x141/0x380 [ 83.200575] ? lock_sock_nested+0x3a/0xf0 [ 83.201069] _raw_spin_lock_bh+0x38/0x70 [ 83.201551] ? lock_sock_nested+0x3a/0xf0 [ 83.202044] lock_sock_nested+0x3a/0xf0 [ 83.202520] gtp_encap_destroy+0x18/0xe0 [gtp] [ 83.203065] gtp_encap_disable.isra.14+0x13/0x50 [gtp] [ 83.203687] gtp_dellink+0x56/0x170 [gtp] [ 83.204190] rtnl_delete_link+0xb4/0x100 [ ... ] [ 83.236513] Allocated by task 976: [ 83.236925] save_stack+0x19/0x80 [ 83.237332] __kasan_kmalloc.constprop.3+0xa0/0xd0 [ 83.237894] kmem_cache_alloc+0xd8/0x280 [ 83.238360] sk_prot_alloc.isra.42+0x50/0x200 [ 83.238874] sk_alloc+0x32/0x940 [ 83.239264] inet_create+0x283/0xc20 [ 83.239684] __sock_create+0x2dd/0x540 [ 83.240136] __sys_socket+0xca/0x1a0 [ 83.240550] __x64_sys_socket+0x6f/0xb0 [ 83.240998] do_syscall_64+0x9c/0x450 [ 83.241466] entry_SYSCALL_64_after_hwframe+0x49/0xbe [ 83.242061] [ 83.242249] Freed by task 0: [ 83.242616] save_stack+0x19/0x80 [ 83.243013] __kasan_slab_free+0x111/0x150 [ 83.243498] kmem_cache_free+0x89/0x250 [ 83.244444] __sk_destruct+0x38f/0x5a0 [ 83.245366] rcu_core+0x7e9/0x1c20 [ 83.245766] __do_softirq+0x213/0x8fa Fixes: 1e3a3abd8b28 ("gtp: make GTP sockets in gtp_newlink optional") Signed-off-by: Taehee Yoo <ap420073@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2019-07-02 23:22:25 +08:00
if (gtp->sk0 == sk)
gtp->sk0 = NULL;
else
gtp->sk1u = NULL;
udp_sk(sk)->encap_type = 0;
rcu_assign_sk_user_data(sk, NULL);
sock_put(sk);
}
gtp: fix suspicious RCU usage gtp_encap_enable_socket() and gtp_encap_destroy() are not protected by rcu_read_lock(). and it's not safe to write sk->sk_user_data. This patch make these functions to use lock_sock() instead of rcu_dereference_sk_user_data(). Test commands: gtp-link add gtp1 Splat looks like: [ 83.238315] ============================= [ 83.239127] WARNING: suspicious RCU usage [ 83.239702] 5.2.0-rc6+ #49 Not tainted [ 83.240268] ----------------------------- [ 83.241205] drivers/net/gtp.c:799 suspicious rcu_dereference_check() usage! [ 83.243828] [ 83.243828] other info that might help us debug this: [ 83.243828] [ 83.246325] [ 83.246325] rcu_scheduler_active = 2, debug_locks = 1 [ 83.247314] 1 lock held by gtp-link/1008: [ 83.248523] #0: 0000000017772c7f (rtnl_mutex){+.+.}, at: __rtnl_newlink+0x5f5/0x11b0 [ 83.251503] [ 83.251503] stack backtrace: [ 83.252173] CPU: 0 PID: 1008 Comm: gtp-link Not tainted 5.2.0-rc6+ #49 [ 83.253271] Hardware name: innotek GmbH VirtualBox/VirtualBox, BIOS VirtualBox 12/01/2006 [ 83.254562] Call Trace: [ 83.254995] dump_stack+0x7c/0xbb [ 83.255567] gtp_encap_enable_socket+0x2df/0x360 [gtp] [ 83.256415] ? gtp_find_dev+0x1a0/0x1a0 [gtp] [ 83.257161] ? memset+0x1f/0x40 [ 83.257843] gtp_newlink+0x90/0xa21 [gtp] [ 83.258497] ? __netlink_ns_capable+0xc3/0xf0 [ 83.259260] __rtnl_newlink+0xb9f/0x11b0 [ 83.260022] ? rtnl_link_unregister+0x230/0x230 [ ... ] Fixes: 1e3a3abd8b28 ("gtp: make GTP sockets in gtp_newlink optional") Signed-off-by: Taehee Yoo <ap420073@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2019-07-02 23:20:51 +08:00
release_sock(sk);
}
gtp: fix use-after-free in gtp_encap_destroy() gtp_encap_destroy() is called twice. 1. When interface is deleted. 2. When udp socket is destroyed. either gtp->sk0 or gtp->sk1u could be freed by sock_put() in gtp_encap_destroy(). so, when gtp_encap_destroy() is called again, it would uses freed sk pointer. patch makes gtp_encap_destroy() to set either gtp->sk0 or gtp->sk1u to null. in addition, both gtp->sk0 and gtp->sk1u pointer are protected by rtnl_lock. so, rtnl_lock() is added. Test command: gtp-link add gtp1 & killall gtp-link ip link del gtp1 Splat looks like: [ 83.182767] BUG: KASAN: use-after-free in __lock_acquire+0x3a20/0x46a0 [ 83.184128] Read of size 8 at addr ffff8880cc7d5360 by task ip/1008 [ 83.185567] CPU: 1 PID: 1008 Comm: ip Not tainted 5.2.0-rc6+ #50 [ 83.188469] Call Trace: [ ... ] [ 83.200126] lock_acquire+0x141/0x380 [ 83.200575] ? lock_sock_nested+0x3a/0xf0 [ 83.201069] _raw_spin_lock_bh+0x38/0x70 [ 83.201551] ? lock_sock_nested+0x3a/0xf0 [ 83.202044] lock_sock_nested+0x3a/0xf0 [ 83.202520] gtp_encap_destroy+0x18/0xe0 [gtp] [ 83.203065] gtp_encap_disable.isra.14+0x13/0x50 [gtp] [ 83.203687] gtp_dellink+0x56/0x170 [gtp] [ 83.204190] rtnl_delete_link+0xb4/0x100 [ ... ] [ 83.236513] Allocated by task 976: [ 83.236925] save_stack+0x19/0x80 [ 83.237332] __kasan_kmalloc.constprop.3+0xa0/0xd0 [ 83.237894] kmem_cache_alloc+0xd8/0x280 [ 83.238360] sk_prot_alloc.isra.42+0x50/0x200 [ 83.238874] sk_alloc+0x32/0x940 [ 83.239264] inet_create+0x283/0xc20 [ 83.239684] __sock_create+0x2dd/0x540 [ 83.240136] __sys_socket+0xca/0x1a0 [ 83.240550] __x64_sys_socket+0x6f/0xb0 [ 83.240998] do_syscall_64+0x9c/0x450 [ 83.241466] entry_SYSCALL_64_after_hwframe+0x49/0xbe [ 83.242061] [ 83.242249] Freed by task 0: [ 83.242616] save_stack+0x19/0x80 [ 83.243013] __kasan_slab_free+0x111/0x150 [ 83.243498] kmem_cache_free+0x89/0x250 [ 83.244444] __sk_destruct+0x38f/0x5a0 [ 83.245366] rcu_core+0x7e9/0x1c20 [ 83.245766] __do_softirq+0x213/0x8fa Fixes: 1e3a3abd8b28 ("gtp: make GTP sockets in gtp_newlink optional") Signed-off-by: Taehee Yoo <ap420073@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2019-07-02 23:22:25 +08:00
static void gtp_encap_destroy(struct sock *sk)
{
rtnl_lock();
__gtp_encap_destroy(sk);
rtnl_unlock();
}
static void gtp_encap_disable_sock(struct sock *sk)
{
if (!sk)
return;
gtp: fix use-after-free in gtp_encap_destroy() gtp_encap_destroy() is called twice. 1. When interface is deleted. 2. When udp socket is destroyed. either gtp->sk0 or gtp->sk1u could be freed by sock_put() in gtp_encap_destroy(). so, when gtp_encap_destroy() is called again, it would uses freed sk pointer. patch makes gtp_encap_destroy() to set either gtp->sk0 or gtp->sk1u to null. in addition, both gtp->sk0 and gtp->sk1u pointer are protected by rtnl_lock. so, rtnl_lock() is added. Test command: gtp-link add gtp1 & killall gtp-link ip link del gtp1 Splat looks like: [ 83.182767] BUG: KASAN: use-after-free in __lock_acquire+0x3a20/0x46a0 [ 83.184128] Read of size 8 at addr ffff8880cc7d5360 by task ip/1008 [ 83.185567] CPU: 1 PID: 1008 Comm: ip Not tainted 5.2.0-rc6+ #50 [ 83.188469] Call Trace: [ ... ] [ 83.200126] lock_acquire+0x141/0x380 [ 83.200575] ? lock_sock_nested+0x3a/0xf0 [ 83.201069] _raw_spin_lock_bh+0x38/0x70 [ 83.201551] ? lock_sock_nested+0x3a/0xf0 [ 83.202044] lock_sock_nested+0x3a/0xf0 [ 83.202520] gtp_encap_destroy+0x18/0xe0 [gtp] [ 83.203065] gtp_encap_disable.isra.14+0x13/0x50 [gtp] [ 83.203687] gtp_dellink+0x56/0x170 [gtp] [ 83.204190] rtnl_delete_link+0xb4/0x100 [ ... ] [ 83.236513] Allocated by task 976: [ 83.236925] save_stack+0x19/0x80 [ 83.237332] __kasan_kmalloc.constprop.3+0xa0/0xd0 [ 83.237894] kmem_cache_alloc+0xd8/0x280 [ 83.238360] sk_prot_alloc.isra.42+0x50/0x200 [ 83.238874] sk_alloc+0x32/0x940 [ 83.239264] inet_create+0x283/0xc20 [ 83.239684] __sock_create+0x2dd/0x540 [ 83.240136] __sys_socket+0xca/0x1a0 [ 83.240550] __x64_sys_socket+0x6f/0xb0 [ 83.240998] do_syscall_64+0x9c/0x450 [ 83.241466] entry_SYSCALL_64_after_hwframe+0x49/0xbe [ 83.242061] [ 83.242249] Freed by task 0: [ 83.242616] save_stack+0x19/0x80 [ 83.243013] __kasan_slab_free+0x111/0x150 [ 83.243498] kmem_cache_free+0x89/0x250 [ 83.244444] __sk_destruct+0x38f/0x5a0 [ 83.245366] rcu_core+0x7e9/0x1c20 [ 83.245766] __do_softirq+0x213/0x8fa Fixes: 1e3a3abd8b28 ("gtp: make GTP sockets in gtp_newlink optional") Signed-off-by: Taehee Yoo <ap420073@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2019-07-02 23:22:25 +08:00
__gtp_encap_destroy(sk);
}
static void gtp_encap_disable(struct gtp_dev *gtp)
{
if (gtp->sk_created) {
udp_tunnel_sock_release(gtp->sk0->sk_socket);
udp_tunnel_sock_release(gtp->sk1u->sk_socket);
gtp->sk_created = false;
gtp->sk0 = NULL;
gtp->sk1u = NULL;
} else {
gtp_encap_disable_sock(gtp->sk0);
gtp_encap_disable_sock(gtp->sk1u);
}
}
/* UDP encapsulation receive handler. See net/ipv4/udp.c.
* Return codes: 0: success, <0: error, >0: pass up to userspace UDP socket.
*/
static int gtp_encap_recv(struct sock *sk, struct sk_buff *skb)
{
struct gtp_dev *gtp;
int ret = 0;
gtp = rcu_dereference_sk_user_data(sk);
if (!gtp)
return 1;
netdev_dbg(gtp->dev, "encap_recv sk=%p\n", sk);
switch (udp_sk(sk)->encap_type) {
case UDP_ENCAP_GTP0:
netdev_dbg(gtp->dev, "received GTP0 packet\n");
ret = gtp0_udp_encap_recv(gtp, skb);
break;
case UDP_ENCAP_GTP1U:
netdev_dbg(gtp->dev, "received GTP1U packet\n");
ret = gtp1u_udp_encap_recv(gtp, skb);
break;
default:
ret = -1; /* Shouldn't happen. */
}
switch (ret) {
case 1:
netdev_dbg(gtp->dev, "pass up to the process\n");
break;
case 0:
break;
case -1:
netdev_dbg(gtp->dev, "GTP packet has been dropped\n");
kfree_skb(skb);
ret = 0;
break;
}
return ret;
}
static int gtp_dev_init(struct net_device *dev)
{
struct gtp_dev *gtp = netdev_priv(dev);
gtp->dev = dev;
dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
if (!dev->tstats)
return -ENOMEM;
return 0;
}
static void gtp_dev_uninit(struct net_device *dev)
{
struct gtp_dev *gtp = netdev_priv(dev);
gtp_encap_disable(gtp);
free_percpu(dev->tstats);
}
static inline void gtp0_push_header(struct sk_buff *skb, struct pdp_ctx *pctx)
{
int payload_len = skb->len;
struct gtp0_header *gtp0;
gtp0 = skb_push(skb, sizeof(*gtp0));
gtp0->flags = 0x1e; /* v0, GTP-non-prime. */
gtp0->type = GTP_TPDU;
gtp0->length = htons(payload_len);
gtp0->seq = htons((atomic_inc_return(&pctx->tx_seq) - 1) % 0xffff);
gtp0->flow = htons(pctx->u.v0.flow);
gtp0->number = 0xff;
gtp0->spare[0] = gtp0->spare[1] = gtp0->spare[2] = 0xff;
gtp0->tid = cpu_to_be64(pctx->u.v0.tid);
}
static inline void gtp1_push_header(struct sk_buff *skb, struct pdp_ctx *pctx)
{
int payload_len = skb->len;
struct gtp1_header *gtp1;
gtp1 = skb_push(skb, sizeof(*gtp1));
/* Bits 8 7 6 5 4 3 2 1
* +--+--+--+--+--+--+--+--+
* |version |PT| 0| E| S|PN|
* +--+--+--+--+--+--+--+--+
* 0 0 1 1 1 0 0 0
*/
gtp1->flags = 0x30; /* v1, GTP-non-prime. */
gtp1->type = GTP_TPDU;
gtp1->length = htons(payload_len);
gtp1->tid = htonl(pctx->u.v1.o_tei);
/* TODO: Support for extension header, sequence number and N-PDU.
* Update the length field if any of them is available.
*/
}
struct gtp_pktinfo {
struct sock *sk;
struct iphdr *iph;
struct flowi4 fl4;
struct rtable *rt;
struct pdp_ctx *pctx;
struct net_device *dev;
__be16 gtph_port;
};
static void gtp_push_header(struct sk_buff *skb, struct gtp_pktinfo *pktinfo)
{
switch (pktinfo->pctx->gtp_version) {
case GTP_V0:
pktinfo->gtph_port = htons(GTP0_PORT);
gtp0_push_header(skb, pktinfo->pctx);
break;
case GTP_V1:
pktinfo->gtph_port = htons(GTP1U_PORT);
gtp1_push_header(skb, pktinfo->pctx);
break;
}
}
static inline void gtp_set_pktinfo_ipv4(struct gtp_pktinfo *pktinfo,
struct sock *sk, struct iphdr *iph,
struct pdp_ctx *pctx, struct rtable *rt,
struct flowi4 *fl4,
struct net_device *dev)
{
pktinfo->sk = sk;
pktinfo->iph = iph;
pktinfo->pctx = pctx;
pktinfo->rt = rt;
pktinfo->fl4 = *fl4;
pktinfo->dev = dev;
}
static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev,
struct gtp_pktinfo *pktinfo)
{
struct gtp_dev *gtp = netdev_priv(dev);
struct pdp_ctx *pctx;
struct rtable *rt;
struct flowi4 fl4;
struct iphdr *iph;
__be16 df;
int mtu;
/* Read the IP destination address and resolve the PDP context.
* Prepend PDP header with TEI/TID from PDP ctx.
*/
iph = ip_hdr(skb);
if (gtp->role == GTP_ROLE_SGSN)
pctx = ipv4_pdp_find(gtp, iph->saddr);
else
pctx = ipv4_pdp_find(gtp, iph->daddr);
if (!pctx) {
netdev_dbg(dev, "no PDP ctx found for %pI4, skip\n",
&iph->daddr);
return -ENOENT;
}
netdev_dbg(dev, "found PDP context %p\n", pctx);
rt = ip4_route_output_gtp(&fl4, pctx->sk, pctx->peer_addr_ip4.s_addr,
inet_sk(pctx->sk)->inet_saddr);
if (IS_ERR(rt)) {
netdev_dbg(dev, "no route to SSGN %pI4\n",
&pctx->peer_addr_ip4.s_addr);
dev->stats.tx_carrier_errors++;
goto err;
}
if (rt->dst.dev == dev) {
netdev_dbg(dev, "circular route to SSGN %pI4\n",
&pctx->peer_addr_ip4.s_addr);
dev->stats.collisions++;
goto err_rt;
}
/* This is similar to tnl_update_pmtu(). */
df = iph->frag_off;
if (df) {
mtu = dst_mtu(&rt->dst) - dev->hard_header_len -
sizeof(struct iphdr) - sizeof(struct udphdr);
switch (pctx->gtp_version) {
case GTP_V0:
mtu -= sizeof(struct gtp0_header);
break;
case GTP_V1:
mtu -= sizeof(struct gtp1_header);
break;
}
} else {
mtu = dst_mtu(&rt->dst);
}
skb_dst_update_pmtu_no_confirm(skb, mtu);
if (!skb_is_gso(skb) && (iph->frag_off & htons(IP_DF)) &&
mtu < ntohs(iph->tot_len)) {
netdev_dbg(dev, "packet too big, fragmentation needed\n");
icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
htonl(mtu));
goto err_rt;
}
gtp_set_pktinfo_ipv4(pktinfo, pctx->sk, iph, pctx, rt, &fl4, dev);
gtp_push_header(skb, pktinfo);
return 0;
err_rt:
ip_rt_put(rt);
err:
return -EBADMSG;
}
static netdev_tx_t gtp_dev_xmit(struct sk_buff *skb, struct net_device *dev)
{
unsigned int proto = ntohs(skb->protocol);
struct gtp_pktinfo pktinfo;
int err;
/* Ensure there is sufficient headroom. */
if (skb_cow_head(skb, dev->needed_headroom))
goto tx_err;
skb_reset_inner_headers(skb);
/* PDP context lookups in gtp_build_skb_*() need rcu read-side lock. */
rcu_read_lock();
switch (proto) {
case ETH_P_IP:
err = gtp_build_skb_ip4(skb, dev, &pktinfo);
break;
default:
err = -EOPNOTSUPP;
break;
}
rcu_read_unlock();
if (err < 0)
goto tx_err;
switch (proto) {
case ETH_P_IP:
netdev_dbg(pktinfo.dev, "gtp -> IP src: %pI4 dst: %pI4\n",
&pktinfo.iph->saddr, &pktinfo.iph->daddr);
udp_tunnel_xmit_skb(pktinfo.rt, pktinfo.sk, skb,
pktinfo.fl4.saddr, pktinfo.fl4.daddr,
pktinfo.iph->tos,
ip4_dst_hoplimit(&pktinfo.rt->dst),
0,
pktinfo.gtph_port, pktinfo.gtph_port,
!net_eq(sock_net(pktinfo.pctx->sk),
dev_net(dev)),
false);
break;
}
return NETDEV_TX_OK;
tx_err:
dev->stats.tx_errors++;
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
static const struct net_device_ops gtp_netdev_ops = {
.ndo_init = gtp_dev_init,
.ndo_uninit = gtp_dev_uninit,
.ndo_start_xmit = gtp_dev_xmit,
.ndo_get_stats64 = dev_get_tstats64,
};
static const struct device_type gtp_type = {
.name = "gtp",
};
static void gtp_link_setup(struct net_device *dev)
{
unsigned int max_gtp_header_len = sizeof(struct iphdr) +
sizeof(struct udphdr) +
sizeof(struct gtp0_header);
dev->netdev_ops = &gtp_netdev_ops;
net: Fix inconsistent teardown and release of private netdev state. Network devices can allocate reasources and private memory using netdev_ops->ndo_init(). However, the release of these resources can occur in one of two different places. Either netdev_ops->ndo_uninit() or netdev->destructor(). The decision of which operation frees the resources depends upon whether it is necessary for all netdev refs to be released before it is safe to perform the freeing. netdev_ops->ndo_uninit() presumably can occur right after the NETDEV_UNREGISTER notifier completes and the unicast and multicast address lists are flushed. netdev->destructor(), on the other hand, does not run until the netdev references all go away. Further complicating the situation is that netdev->destructor() almost universally does also a free_netdev(). This creates a problem for the logic in register_netdevice(). Because all callers of register_netdevice() manage the freeing of the netdev, and invoke free_netdev(dev) if register_netdevice() fails. If netdev_ops->ndo_init() succeeds, but something else fails inside of register_netdevice(), it does call ndo_ops->ndo_uninit(). But it is not able to invoke netdev->destructor(). This is because netdev->destructor() will do a free_netdev() and then the caller of register_netdevice() will do the same. However, this means that the resources that would normally be released by netdev->destructor() will not be. Over the years drivers have added local hacks to deal with this, by invoking their destructor parts by hand when register_netdevice() fails. Many drivers do not try to deal with this, and instead we have leaks. Let's close this hole by formalizing the distinction between what private things need to be freed up by netdev->destructor() and whether the driver needs unregister_netdevice() to perform the free_netdev(). netdev->priv_destructor() performs all actions to free up the private resources that used to be freed by netdev->destructor(), except for free_netdev(). netdev->needs_free_netdev is a boolean that indicates whether free_netdev() should be done at the end of unregister_netdevice(). Now, register_netdevice() can sanely release all resources after ndo_ops->ndo_init() succeeds, by invoking both ndo_ops->ndo_uninit() and netdev->priv_destructor(). And at the end of unregister_netdevice(), we invoke netdev->priv_destructor() and optionally call free_netdev(). Signed-off-by: David S. Miller <davem@davemloft.net>
2017-05-09 00:52:56 +08:00
dev->needs_free_netdev = true;
SET_NETDEV_DEVTYPE(dev, &gtp_type);
dev->hard_header_len = 0;
dev->addr_len = 0;
dev->mtu = ETH_DATA_LEN - max_gtp_header_len;
/* Zero header length. */
dev->type = ARPHRD_NONE;
dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
dev->priv_flags |= IFF_NO_QUEUE;
dev->features |= NETIF_F_LLTX;
netif_keep_dst(dev);
dev->needed_headroom = LL_MAX_HEADER + max_gtp_header_len;
}
static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize);
static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[]);
gtp: fix an use-after-free in ipv4_pdp_find() ipv4_pdp_find() is called in TX packet path of GTP. ipv4_pdp_find() internally uses gtp->tid_hash to lookup pdp context. In the current code, gtp->tid_hash and gtp->addr_hash are freed by ->dellink(), which is gtp_dellink(). But gtp_dellink() would be called while packets are processing. So, gtp_dellink() should not free gtp->tid_hash and gtp->addr_hash. Instead, dev->priv_destructor() would be used because this callback is called after all packet processing safely. Test commands: ip link add veth1 type veth peer name veth2 ip a a 172.0.0.1/24 dev veth1 ip link set veth1 up ip a a 172.99.0.1/32 dev lo gtp-link add gtp1 & gtp-tunnel add gtp1 v1 200 100 172.99.0.2 172.0.0.2 ip r a 172.99.0.2/32 dev gtp1 ip link set gtp1 mtu 1500 ip netns add ns2 ip link set veth2 netns ns2 ip netns exec ns2 ip a a 172.0.0.2/24 dev veth2 ip netns exec ns2 ip link set veth2 up ip netns exec ns2 ip a a 172.99.0.2/32 dev lo ip netns exec ns2 ip link set lo up ip netns exec ns2 gtp-link add gtp2 & ip netns exec ns2 gtp-tunnel add gtp2 v1 100 200 172.99.0.1 172.0.0.1 ip netns exec ns2 ip r a 172.99.0.1/32 dev gtp2 ip netns exec ns2 ip link set gtp2 mtu 1500 hping3 172.99.0.2 -2 --flood & ip link del gtp1 Splat looks like: [ 72.568081][ T1195] BUG: KASAN: use-after-free in ipv4_pdp_find.isra.12+0x130/0x170 [gtp] [ 72.568916][ T1195] Read of size 8 at addr ffff8880b9a35d28 by task hping3/1195 [ 72.569631][ T1195] [ 72.569861][ T1195] CPU: 2 PID: 1195 Comm: hping3 Not tainted 5.5.0-rc1 #199 [ 72.570547][ T1195] Hardware name: innotek GmbH VirtualBox/VirtualBox, BIOS VirtualBox 12/01/2006 [ 72.571438][ T1195] Call Trace: [ 72.571764][ T1195] dump_stack+0x96/0xdb [ 72.572171][ T1195] ? ipv4_pdp_find.isra.12+0x130/0x170 [gtp] [ 72.572761][ T1195] print_address_description.constprop.5+0x1be/0x360 [ 72.573400][ T1195] ? ipv4_pdp_find.isra.12+0x130/0x170 [gtp] [ 72.573971][ T1195] ? ipv4_pdp_find.isra.12+0x130/0x170 [gtp] [ 72.574544][ T1195] __kasan_report+0x12a/0x16f [ 72.575014][ T1195] ? ipv4_pdp_find.isra.12+0x130/0x170 [gtp] [ 72.575593][ T1195] kasan_report+0xe/0x20 [ 72.576004][ T1195] ipv4_pdp_find.isra.12+0x130/0x170 [gtp] [ 72.576577][ T1195] gtp_build_skb_ip4+0x199/0x1420 [gtp] [ ... ] [ 72.647671][ T1195] BUG: unable to handle page fault for address: ffff8880b9a35d28 [ 72.648512][ T1195] #PF: supervisor read access in kernel mode [ 72.649158][ T1195] #PF: error_code(0x0000) - not-present page [ 72.649849][ T1195] PGD a6c01067 P4D a6c01067 PUD 11fb07067 PMD 11f939067 PTE 800fffff465ca060 [ 72.652958][ T1195] Oops: 0000 [#1] SMP DEBUG_PAGEALLOC KASAN PTI [ 72.653834][ T1195] CPU: 2 PID: 1195 Comm: hping3 Tainted: G B 5.5.0-rc1 #199 [ 72.668062][ T1195] RIP: 0010:ipv4_pdp_find.isra.12+0x86/0x170 [gtp] [ ... ] [ 72.679168][ T1195] Call Trace: [ 72.679603][ T1195] gtp_build_skb_ip4+0x199/0x1420 [gtp] [ 72.681915][ T1195] ? ipv4_pdp_find.isra.12+0x170/0x170 [gtp] [ 72.682513][ T1195] ? lock_acquire+0x164/0x3b0 [ 72.682966][ T1195] ? gtp_dev_xmit+0x35e/0x890 [gtp] [ 72.683481][ T1195] gtp_dev_xmit+0x3c2/0x890 [gtp] [ ... ] Fixes: 459aa660eb1d ("gtp: add initial driver for datapath of GPRS Tunneling Protocol (GTP-U)") Signed-off-by: Taehee Yoo <ap420073@gmail.com> Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
2019-12-11 16:23:34 +08:00
static void gtp_destructor(struct net_device *dev)
{
struct gtp_dev *gtp = netdev_priv(dev);
kfree(gtp->addr_hash);
kfree(gtp->tid_hash);
}
static struct sock *gtp_create_sock(int type, struct gtp_dev *gtp)
{
struct udp_tunnel_sock_cfg tuncfg = {};
struct udp_port_cfg udp_conf = {
.local_ip.s_addr = htonl(INADDR_ANY),
.family = AF_INET,
};
struct net *net = gtp->net;
struct socket *sock;
int err;
if (type == UDP_ENCAP_GTP0)
udp_conf.local_udp_port = htons(GTP0_PORT);
else if (type == UDP_ENCAP_GTP1U)
udp_conf.local_udp_port = htons(GTP1U_PORT);
else
return ERR_PTR(-EINVAL);
err = udp_sock_create(net, &udp_conf, &sock);
if (err)
return ERR_PTR(err);
tuncfg.sk_user_data = gtp;
tuncfg.encap_type = type;
tuncfg.encap_rcv = gtp_encap_recv;
tuncfg.encap_destroy = NULL;
setup_udp_tunnel_sock(net, sock, &tuncfg);
return sock->sk;
}
static int gtp_create_sockets(struct gtp_dev *gtp, struct nlattr *data[])
{
struct sock *sk1u = NULL;
struct sock *sk0 = NULL;
sk0 = gtp_create_sock(UDP_ENCAP_GTP0, gtp);
if (IS_ERR(sk0))
return PTR_ERR(sk0);
sk1u = gtp_create_sock(UDP_ENCAP_GTP1U, gtp);
if (IS_ERR(sk1u)) {
udp_tunnel_sock_release(sk0->sk_socket);
return PTR_ERR(sk1u);
}
gtp->sk_created = true;
gtp->sk0 = sk0;
gtp->sk1u = sk1u;
return 0;
}
static int gtp_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
unsigned int role = GTP_ROLE_GGSN;
struct gtp_dev *gtp;
struct gtp_net *gn;
int hashsize, err;
gtp = netdev_priv(dev);
if (!data[IFLA_GTP_PDP_HASHSIZE]) {
hashsize = 1024;
} else {
hashsize = nla_get_u32(data[IFLA_GTP_PDP_HASHSIZE]);
if (!hashsize)
hashsize = 1024;
}
if (data[IFLA_GTP_ROLE]) {
role = nla_get_u32(data[IFLA_GTP_ROLE]);
if (role > GTP_ROLE_SGSN)
return -EINVAL;
}
gtp->role = role;
if (!data[IFLA_GTP_RESTART_COUNT])
gtp->restart_count = 0;
else
gtp->restart_count = nla_get_u8(data[IFLA_GTP_RESTART_COUNT]);
gtp->net = src_net;
err = gtp_hashtable_new(gtp, hashsize);
if (err < 0)
return err;
if (data[IFLA_GTP_CREATE_SOCKETS])
err = gtp_create_sockets(gtp, data);
else
err = gtp_encap_enable(gtp, data);
if (err < 0)
goto out_hashtable;
err = register_netdevice(dev);
if (err < 0) {
netdev_dbg(dev, "failed to register new netdev %d\n", err);
goto out_encap;
}
gn = net_generic(dev_net(dev), gtp_net_id);
list_add_rcu(&gtp->list, &gn->gtp_dev_list);
gtp: fix an use-after-free in ipv4_pdp_find() ipv4_pdp_find() is called in TX packet path of GTP. ipv4_pdp_find() internally uses gtp->tid_hash to lookup pdp context. In the current code, gtp->tid_hash and gtp->addr_hash are freed by ->dellink(), which is gtp_dellink(). But gtp_dellink() would be called while packets are processing. So, gtp_dellink() should not free gtp->tid_hash and gtp->addr_hash. Instead, dev->priv_destructor() would be used because this callback is called after all packet processing safely. Test commands: ip link add veth1 type veth peer name veth2 ip a a 172.0.0.1/24 dev veth1 ip link set veth1 up ip a a 172.99.0.1/32 dev lo gtp-link add gtp1 & gtp-tunnel add gtp1 v1 200 100 172.99.0.2 172.0.0.2 ip r a 172.99.0.2/32 dev gtp1 ip link set gtp1 mtu 1500 ip netns add ns2 ip link set veth2 netns ns2 ip netns exec ns2 ip a a 172.0.0.2/24 dev veth2 ip netns exec ns2 ip link set veth2 up ip netns exec ns2 ip a a 172.99.0.2/32 dev lo ip netns exec ns2 ip link set lo up ip netns exec ns2 gtp-link add gtp2 & ip netns exec ns2 gtp-tunnel add gtp2 v1 100 200 172.99.0.1 172.0.0.1 ip netns exec ns2 ip r a 172.99.0.1/32 dev gtp2 ip netns exec ns2 ip link set gtp2 mtu 1500 hping3 172.99.0.2 -2 --flood & ip link del gtp1 Splat looks like: [ 72.568081][ T1195] BUG: KASAN: use-after-free in ipv4_pdp_find.isra.12+0x130/0x170 [gtp] [ 72.568916][ T1195] Read of size 8 at addr ffff8880b9a35d28 by task hping3/1195 [ 72.569631][ T1195] [ 72.569861][ T1195] CPU: 2 PID: 1195 Comm: hping3 Not tainted 5.5.0-rc1 #199 [ 72.570547][ T1195] Hardware name: innotek GmbH VirtualBox/VirtualBox, BIOS VirtualBox 12/01/2006 [ 72.571438][ T1195] Call Trace: [ 72.571764][ T1195] dump_stack+0x96/0xdb [ 72.572171][ T1195] ? ipv4_pdp_find.isra.12+0x130/0x170 [gtp] [ 72.572761][ T1195] print_address_description.constprop.5+0x1be/0x360 [ 72.573400][ T1195] ? ipv4_pdp_find.isra.12+0x130/0x170 [gtp] [ 72.573971][ T1195] ? ipv4_pdp_find.isra.12+0x130/0x170 [gtp] [ 72.574544][ T1195] __kasan_report+0x12a/0x16f [ 72.575014][ T1195] ? ipv4_pdp_find.isra.12+0x130/0x170 [gtp] [ 72.575593][ T1195] kasan_report+0xe/0x20 [ 72.576004][ T1195] ipv4_pdp_find.isra.12+0x130/0x170 [gtp] [ 72.576577][ T1195] gtp_build_skb_ip4+0x199/0x1420 [gtp] [ ... ] [ 72.647671][ T1195] BUG: unable to handle page fault for address: ffff8880b9a35d28 [ 72.648512][ T1195] #PF: supervisor read access in kernel mode [ 72.649158][ T1195] #PF: error_code(0x0000) - not-present page [ 72.649849][ T1195] PGD a6c01067 P4D a6c01067 PUD 11fb07067 PMD 11f939067 PTE 800fffff465ca060 [ 72.652958][ T1195] Oops: 0000 [#1] SMP DEBUG_PAGEALLOC KASAN PTI [ 72.653834][ T1195] CPU: 2 PID: 1195 Comm: hping3 Tainted: G B 5.5.0-rc1 #199 [ 72.668062][ T1195] RIP: 0010:ipv4_pdp_find.isra.12+0x86/0x170 [gtp] [ ... ] [ 72.679168][ T1195] Call Trace: [ 72.679603][ T1195] gtp_build_skb_ip4+0x199/0x1420 [gtp] [ 72.681915][ T1195] ? ipv4_pdp_find.isra.12+0x170/0x170 [gtp] [ 72.682513][ T1195] ? lock_acquire+0x164/0x3b0 [ 72.682966][ T1195] ? gtp_dev_xmit+0x35e/0x890 [gtp] [ 72.683481][ T1195] gtp_dev_xmit+0x3c2/0x890 [gtp] [ ... ] Fixes: 459aa660eb1d ("gtp: add initial driver for datapath of GPRS Tunneling Protocol (GTP-U)") Signed-off-by: Taehee Yoo <ap420073@gmail.com> Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
2019-12-11 16:23:34 +08:00
dev->priv_destructor = gtp_destructor;
netdev_dbg(dev, "registered new GTP interface\n");
return 0;
out_encap:
gtp_encap_disable(gtp);
out_hashtable:
gtp: fix an use-after-free in ipv4_pdp_find() ipv4_pdp_find() is called in TX packet path of GTP. ipv4_pdp_find() internally uses gtp->tid_hash to lookup pdp context. In the current code, gtp->tid_hash and gtp->addr_hash are freed by ->dellink(), which is gtp_dellink(). But gtp_dellink() would be called while packets are processing. So, gtp_dellink() should not free gtp->tid_hash and gtp->addr_hash. Instead, dev->priv_destructor() would be used because this callback is called after all packet processing safely. Test commands: ip link add veth1 type veth peer name veth2 ip a a 172.0.0.1/24 dev veth1 ip link set veth1 up ip a a 172.99.0.1/32 dev lo gtp-link add gtp1 & gtp-tunnel add gtp1 v1 200 100 172.99.0.2 172.0.0.2 ip r a 172.99.0.2/32 dev gtp1 ip link set gtp1 mtu 1500 ip netns add ns2 ip link set veth2 netns ns2 ip netns exec ns2 ip a a 172.0.0.2/24 dev veth2 ip netns exec ns2 ip link set veth2 up ip netns exec ns2 ip a a 172.99.0.2/32 dev lo ip netns exec ns2 ip link set lo up ip netns exec ns2 gtp-link add gtp2 & ip netns exec ns2 gtp-tunnel add gtp2 v1 100 200 172.99.0.1 172.0.0.1 ip netns exec ns2 ip r a 172.99.0.1/32 dev gtp2 ip netns exec ns2 ip link set gtp2 mtu 1500 hping3 172.99.0.2 -2 --flood & ip link del gtp1 Splat looks like: [ 72.568081][ T1195] BUG: KASAN: use-after-free in ipv4_pdp_find.isra.12+0x130/0x170 [gtp] [ 72.568916][ T1195] Read of size 8 at addr ffff8880b9a35d28 by task hping3/1195 [ 72.569631][ T1195] [ 72.569861][ T1195] CPU: 2 PID: 1195 Comm: hping3 Not tainted 5.5.0-rc1 #199 [ 72.570547][ T1195] Hardware name: innotek GmbH VirtualBox/VirtualBox, BIOS VirtualBox 12/01/2006 [ 72.571438][ T1195] Call Trace: [ 72.571764][ T1195] dump_stack+0x96/0xdb [ 72.572171][ T1195] ? ipv4_pdp_find.isra.12+0x130/0x170 [gtp] [ 72.572761][ T1195] print_address_description.constprop.5+0x1be/0x360 [ 72.573400][ T1195] ? ipv4_pdp_find.isra.12+0x130/0x170 [gtp] [ 72.573971][ T1195] ? ipv4_pdp_find.isra.12+0x130/0x170 [gtp] [ 72.574544][ T1195] __kasan_report+0x12a/0x16f [ 72.575014][ T1195] ? ipv4_pdp_find.isra.12+0x130/0x170 [gtp] [ 72.575593][ T1195] kasan_report+0xe/0x20 [ 72.576004][ T1195] ipv4_pdp_find.isra.12+0x130/0x170 [gtp] [ 72.576577][ T1195] gtp_build_skb_ip4+0x199/0x1420 [gtp] [ ... ] [ 72.647671][ T1195] BUG: unable to handle page fault for address: ffff8880b9a35d28 [ 72.648512][ T1195] #PF: supervisor read access in kernel mode [ 72.649158][ T1195] #PF: error_code(0x0000) - not-present page [ 72.649849][ T1195] PGD a6c01067 P4D a6c01067 PUD 11fb07067 PMD 11f939067 PTE 800fffff465ca060 [ 72.652958][ T1195] Oops: 0000 [#1] SMP DEBUG_PAGEALLOC KASAN PTI [ 72.653834][ T1195] CPU: 2 PID: 1195 Comm: hping3 Tainted: G B 5.5.0-rc1 #199 [ 72.668062][ T1195] RIP: 0010:ipv4_pdp_find.isra.12+0x86/0x170 [gtp] [ ... ] [ 72.679168][ T1195] Call Trace: [ 72.679603][ T1195] gtp_build_skb_ip4+0x199/0x1420 [gtp] [ 72.681915][ T1195] ? ipv4_pdp_find.isra.12+0x170/0x170 [gtp] [ 72.682513][ T1195] ? lock_acquire+0x164/0x3b0 [ 72.682966][ T1195] ? gtp_dev_xmit+0x35e/0x890 [gtp] [ 72.683481][ T1195] gtp_dev_xmit+0x3c2/0x890 [gtp] [ ... ] Fixes: 459aa660eb1d ("gtp: add initial driver for datapath of GPRS Tunneling Protocol (GTP-U)") Signed-off-by: Taehee Yoo <ap420073@gmail.com> Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
2019-12-11 16:23:34 +08:00
kfree(gtp->addr_hash);
kfree(gtp->tid_hash);
return err;
}
static void gtp_dellink(struct net_device *dev, struct list_head *head)
{
struct gtp_dev *gtp = netdev_priv(dev);
gtp: fix an use-after-free in ipv4_pdp_find() ipv4_pdp_find() is called in TX packet path of GTP. ipv4_pdp_find() internally uses gtp->tid_hash to lookup pdp context. In the current code, gtp->tid_hash and gtp->addr_hash are freed by ->dellink(), which is gtp_dellink(). But gtp_dellink() would be called while packets are processing. So, gtp_dellink() should not free gtp->tid_hash and gtp->addr_hash. Instead, dev->priv_destructor() would be used because this callback is called after all packet processing safely. Test commands: ip link add veth1 type veth peer name veth2 ip a a 172.0.0.1/24 dev veth1 ip link set veth1 up ip a a 172.99.0.1/32 dev lo gtp-link add gtp1 & gtp-tunnel add gtp1 v1 200 100 172.99.0.2 172.0.0.2 ip r a 172.99.0.2/32 dev gtp1 ip link set gtp1 mtu 1500 ip netns add ns2 ip link set veth2 netns ns2 ip netns exec ns2 ip a a 172.0.0.2/24 dev veth2 ip netns exec ns2 ip link set veth2 up ip netns exec ns2 ip a a 172.99.0.2/32 dev lo ip netns exec ns2 ip link set lo up ip netns exec ns2 gtp-link add gtp2 & ip netns exec ns2 gtp-tunnel add gtp2 v1 100 200 172.99.0.1 172.0.0.1 ip netns exec ns2 ip r a 172.99.0.1/32 dev gtp2 ip netns exec ns2 ip link set gtp2 mtu 1500 hping3 172.99.0.2 -2 --flood & ip link del gtp1 Splat looks like: [ 72.568081][ T1195] BUG: KASAN: use-after-free in ipv4_pdp_find.isra.12+0x130/0x170 [gtp] [ 72.568916][ T1195] Read of size 8 at addr ffff8880b9a35d28 by task hping3/1195 [ 72.569631][ T1195] [ 72.569861][ T1195] CPU: 2 PID: 1195 Comm: hping3 Not tainted 5.5.0-rc1 #199 [ 72.570547][ T1195] Hardware name: innotek GmbH VirtualBox/VirtualBox, BIOS VirtualBox 12/01/2006 [ 72.571438][ T1195] Call Trace: [ 72.571764][ T1195] dump_stack+0x96/0xdb [ 72.572171][ T1195] ? ipv4_pdp_find.isra.12+0x130/0x170 [gtp] [ 72.572761][ T1195] print_address_description.constprop.5+0x1be/0x360 [ 72.573400][ T1195] ? ipv4_pdp_find.isra.12+0x130/0x170 [gtp] [ 72.573971][ T1195] ? ipv4_pdp_find.isra.12+0x130/0x170 [gtp] [ 72.574544][ T1195] __kasan_report+0x12a/0x16f [ 72.575014][ T1195] ? ipv4_pdp_find.isra.12+0x130/0x170 [gtp] [ 72.575593][ T1195] kasan_report+0xe/0x20 [ 72.576004][ T1195] ipv4_pdp_find.isra.12+0x130/0x170 [gtp] [ 72.576577][ T1195] gtp_build_skb_ip4+0x199/0x1420 [gtp] [ ... ] [ 72.647671][ T1195] BUG: unable to handle page fault for address: ffff8880b9a35d28 [ 72.648512][ T1195] #PF: supervisor read access in kernel mode [ 72.649158][ T1195] #PF: error_code(0x0000) - not-present page [ 72.649849][ T1195] PGD a6c01067 P4D a6c01067 PUD 11fb07067 PMD 11f939067 PTE 800fffff465ca060 [ 72.652958][ T1195] Oops: 0000 [#1] SMP DEBUG_PAGEALLOC KASAN PTI [ 72.653834][ T1195] CPU: 2 PID: 1195 Comm: hping3 Tainted: G B 5.5.0-rc1 #199 [ 72.668062][ T1195] RIP: 0010:ipv4_pdp_find.isra.12+0x86/0x170 [gtp] [ ... ] [ 72.679168][ T1195] Call Trace: [ 72.679603][ T1195] gtp_build_skb_ip4+0x199/0x1420 [gtp] [ 72.681915][ T1195] ? ipv4_pdp_find.isra.12+0x170/0x170 [gtp] [ 72.682513][ T1195] ? lock_acquire+0x164/0x3b0 [ 72.682966][ T1195] ? gtp_dev_xmit+0x35e/0x890 [gtp] [ 72.683481][ T1195] gtp_dev_xmit+0x3c2/0x890 [gtp] [ ... ] Fixes: 459aa660eb1d ("gtp: add initial driver for datapath of GPRS Tunneling Protocol (GTP-U)") Signed-off-by: Taehee Yoo <ap420073@gmail.com> Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
2019-12-11 16:23:34 +08:00
struct pdp_ctx *pctx;
int i;
for (i = 0; i < gtp->hash_size; i++)
hlist_for_each_entry_rcu(pctx, &gtp->tid_hash[i], hlist_tid)
pdp_context_delete(pctx);
list_del_rcu(&gtp->list);
unregister_netdevice_queue(dev, head);
}
static const struct nla_policy gtp_policy[IFLA_GTP_MAX + 1] = {
[IFLA_GTP_FD0] = { .type = NLA_U32 },
[IFLA_GTP_FD1] = { .type = NLA_U32 },
[IFLA_GTP_PDP_HASHSIZE] = { .type = NLA_U32 },
[IFLA_GTP_ROLE] = { .type = NLA_U32 },
[IFLA_GTP_CREATE_SOCKETS] = { .type = NLA_U8 },
[IFLA_GTP_RESTART_COUNT] = { .type = NLA_U8 },
};
static int gtp_validate(struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
if (!data)
return -EINVAL;
return 0;
}
static size_t gtp_get_size(const struct net_device *dev)
{
return nla_total_size(sizeof(__u32)) + /* IFLA_GTP_PDP_HASHSIZE */
nla_total_size(sizeof(__u32)) + /* IFLA_GTP_ROLE */
nla_total_size(sizeof(__u8)); /* IFLA_GTP_RESTART_COUNT */
}
static int gtp_fill_info(struct sk_buff *skb, const struct net_device *dev)
{
struct gtp_dev *gtp = netdev_priv(dev);
if (nla_put_u32(skb, IFLA_GTP_PDP_HASHSIZE, gtp->hash_size))
goto nla_put_failure;
if (nla_put_u32(skb, IFLA_GTP_ROLE, gtp->role))
goto nla_put_failure;
if (nla_put_u8(skb, IFLA_GTP_RESTART_COUNT, gtp->restart_count))
goto nla_put_failure;
return 0;
nla_put_failure:
return -EMSGSIZE;
}
static struct rtnl_link_ops gtp_link_ops __read_mostly = {
.kind = "gtp",
.maxtype = IFLA_GTP_MAX,
.policy = gtp_policy,
.priv_size = sizeof(struct gtp_dev),
.setup = gtp_link_setup,
.validate = gtp_validate,
.newlink = gtp_newlink,
.dellink = gtp_dellink,
.get_size = gtp_get_size,
.fill_info = gtp_fill_info,
};
static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize)
{
int i;
treewide: kmalloc() -> kmalloc_array() The kmalloc() function has a 2-factor argument form, kmalloc_array(). This patch replaces cases of: kmalloc(a * b, gfp) with: kmalloc_array(a * b, gfp) as well as handling cases of: kmalloc(a * b * c, gfp) with: kmalloc(array3_size(a, b, c), gfp) as it's slightly less ugly than: kmalloc_array(array_size(a, b), c, gfp) This does, however, attempt to ignore constant size factors like: kmalloc(4 * 1024, gfp) though any constants defined via macros get caught up in the conversion. Any factors with a sizeof() of "unsigned char", "char", and "u8" were dropped, since they're redundant. The tools/ directory was manually excluded, since it has its own implementation of kmalloc(). The Coccinelle script used for this was: // Fix redundant parens around sizeof(). @@ type TYPE; expression THING, E; @@ ( kmalloc( - (sizeof(TYPE)) * E + sizeof(TYPE) * E , ...) | kmalloc( - (sizeof(THING)) * E + sizeof(THING) * E , ...) ) // Drop single-byte sizes and redundant parens. @@ expression COUNT; typedef u8; typedef __u8; @@ ( kmalloc( - sizeof(u8) * (COUNT) + COUNT , ...) | kmalloc( - sizeof(__u8) * (COUNT) + COUNT , ...) | kmalloc( - sizeof(char) * (COUNT) + COUNT , ...) | kmalloc( - sizeof(unsigned char) * (COUNT) + COUNT , ...) | kmalloc( - sizeof(u8) * COUNT + COUNT , ...) | kmalloc( - sizeof(__u8) * COUNT + COUNT , ...) | kmalloc( - sizeof(char) * COUNT + COUNT , ...) | kmalloc( - sizeof(unsigned char) * COUNT + COUNT , ...) ) // 2-factor product with sizeof(type/expression) and identifier or constant. @@ type TYPE; expression THING; identifier COUNT_ID; constant COUNT_CONST; @@ ( - kmalloc + kmalloc_array ( - sizeof(TYPE) * (COUNT_ID) + COUNT_ID, sizeof(TYPE) , ...) | - kmalloc + kmalloc_array ( - sizeof(TYPE) * COUNT_ID + COUNT_ID, sizeof(TYPE) , ...) | - kmalloc + kmalloc_array ( - sizeof(TYPE) * (COUNT_CONST) + COUNT_CONST, sizeof(TYPE) , ...) | - kmalloc + kmalloc_array ( - sizeof(TYPE) * COUNT_CONST + COUNT_CONST, sizeof(TYPE) , ...) | - kmalloc + kmalloc_array ( - sizeof(THING) * (COUNT_ID) + COUNT_ID, sizeof(THING) , ...) | - kmalloc + kmalloc_array ( - sizeof(THING) * COUNT_ID + COUNT_ID, sizeof(THING) , ...) | - kmalloc + kmalloc_array ( - sizeof(THING) * (COUNT_CONST) + COUNT_CONST, sizeof(THING) , ...) | - kmalloc + kmalloc_array ( - sizeof(THING) * COUNT_CONST + COUNT_CONST, sizeof(THING) , ...) ) // 2-factor product, only identifiers. @@ identifier SIZE, COUNT; @@ - kmalloc + kmalloc_array ( - SIZE * COUNT + COUNT, SIZE , ...) // 3-factor product with 1 sizeof(type) or sizeof(expression), with // redundant parens removed. @@ expression THING; identifier STRIDE, COUNT; type TYPE; @@ ( kmalloc( - sizeof(TYPE) * (COUNT) * (STRIDE) + array3_size(COUNT, STRIDE, sizeof(TYPE)) , ...) | kmalloc( - sizeof(TYPE) * (COUNT) * STRIDE + array3_size(COUNT, STRIDE, sizeof(TYPE)) , ...) | kmalloc( - sizeof(TYPE) * COUNT * (STRIDE) + array3_size(COUNT, STRIDE, sizeof(TYPE)) , ...) | kmalloc( - sizeof(TYPE) * COUNT * STRIDE + array3_size(COUNT, STRIDE, sizeof(TYPE)) , ...) | kmalloc( - sizeof(THING) * (COUNT) * (STRIDE) + array3_size(COUNT, STRIDE, sizeof(THING)) , ...) | kmalloc( - sizeof(THING) * (COUNT) * STRIDE + array3_size(COUNT, STRIDE, sizeof(THING)) , ...) | kmalloc( - sizeof(THING) * COUNT * (STRIDE) + array3_size(COUNT, STRIDE, sizeof(THING)) , ...) | kmalloc( - sizeof(THING) * COUNT * STRIDE + array3_size(COUNT, STRIDE, sizeof(THING)) , ...) ) // 3-factor product with 2 sizeof(variable), with redundant parens removed. @@ expression THING1, THING2; identifier COUNT; type TYPE1, TYPE2; @@ ( kmalloc( - sizeof(TYPE1) * sizeof(TYPE2) * COUNT + array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2)) , ...) | kmalloc( - sizeof(TYPE1) * sizeof(THING2) * (COUNT) + array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2)) , ...) | kmalloc( - sizeof(THING1) * sizeof(THING2) * COUNT + array3_size(COUNT, sizeof(THING1), sizeof(THING2)) , ...) | kmalloc( - sizeof(THING1) * sizeof(THING2) * (COUNT) + array3_size(COUNT, sizeof(THING1), sizeof(THING2)) , ...) | kmalloc( - sizeof(TYPE1) * sizeof(THING2) * COUNT + array3_size(COUNT, sizeof(TYPE1), sizeof(THING2)) , ...) | kmalloc( - sizeof(TYPE1) * sizeof(THING2) * (COUNT) + array3_size(COUNT, sizeof(TYPE1), sizeof(THING2)) , ...) ) // 3-factor product, only identifiers, with redundant parens removed. @@ identifier STRIDE, SIZE, COUNT; @@ ( kmalloc( - (COUNT) * STRIDE * SIZE + array3_size(COUNT, STRIDE, SIZE) , ...) | kmalloc( - COUNT * (STRIDE) * SIZE + array3_size(COUNT, STRIDE, SIZE) , ...) | kmalloc( - COUNT * STRIDE * (SIZE) + array3_size(COUNT, STRIDE, SIZE) , ...) | kmalloc( - (COUNT) * (STRIDE) * SIZE + array3_size(COUNT, STRIDE, SIZE) , ...) | kmalloc( - COUNT * (STRIDE) * (SIZE) + array3_size(COUNT, STRIDE, SIZE) , ...) | kmalloc( - (COUNT) * STRIDE * (SIZE) + array3_size(COUNT, STRIDE, SIZE) , ...) | kmalloc( - (COUNT) * (STRIDE) * (SIZE) + array3_size(COUNT, STRIDE, SIZE) , ...) | kmalloc( - COUNT * STRIDE * SIZE + array3_size(COUNT, STRIDE, SIZE) , ...) ) // Any remaining multi-factor products, first at least 3-factor products, // when they're not all constants... @@ expression E1, E2, E3; constant C1, C2, C3; @@ ( kmalloc(C1 * C2 * C3, ...) | kmalloc( - (E1) * E2 * E3 + array3_size(E1, E2, E3) , ...) | kmalloc( - (E1) * (E2) * E3 + array3_size(E1, E2, E3) , ...) | kmalloc( - (E1) * (E2) * (E3) + array3_size(E1, E2, E3) , ...) | kmalloc( - E1 * E2 * E3 + array3_size(E1, E2, E3) , ...) ) // And then all remaining 2 factors products when they're not all constants, // keeping sizeof() as the second factor argument. @@ expression THING, E1, E2; type TYPE; constant C1, C2, C3; @@ ( kmalloc(sizeof(THING) * C2, ...) | kmalloc(sizeof(TYPE) * C2, ...) | kmalloc(C1 * C2 * C3, ...) | kmalloc(C1 * C2, ...) | - kmalloc + kmalloc_array ( - sizeof(TYPE) * (E2) + E2, sizeof(TYPE) , ...) | - kmalloc + kmalloc_array ( - sizeof(TYPE) * E2 + E2, sizeof(TYPE) , ...) | - kmalloc + kmalloc_array ( - sizeof(THING) * (E2) + E2, sizeof(THING) , ...) | - kmalloc + kmalloc_array ( - sizeof(THING) * E2 + E2, sizeof(THING) , ...) | - kmalloc + kmalloc_array ( - (E1) * E2 + E1, E2 , ...) | - kmalloc + kmalloc_array ( - (E1) * (E2) + E1, E2 , ...) | - kmalloc + kmalloc_array ( - E1 * E2 + E1, E2 , ...) ) Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-13 04:55:00 +08:00
gtp->addr_hash = kmalloc_array(hsize, sizeof(struct hlist_head),
gtp: use __GFP_NOWARN to avoid memalloc warning gtp hashtable size is received by user-space. So, this hashtable size could be too large. If so, kmalloc will internally print a warning message. This warning message is actually not necessary for the gtp module. So, this patch adds __GFP_NOWARN to avoid this message. Splat looks like: [ 2171.200049][ T1860] WARNING: CPU: 1 PID: 1860 at mm/page_alloc.c:4713 __alloc_pages_nodemask+0x2f3/0x740 [ 2171.238885][ T1860] Modules linked in: gtp veth openvswitch nsh nf_conncount nf_nat nf_conntrack nf_defrag_ipv] [ 2171.262680][ T1860] CPU: 1 PID: 1860 Comm: gtp-link Not tainted 5.5.0+ #321 [ 2171.263567][ T1860] Hardware name: innotek GmbH VirtualBox/VirtualBox, BIOS VirtualBox 12/01/2006 [ 2171.264681][ T1860] RIP: 0010:__alloc_pages_nodemask+0x2f3/0x740 [ 2171.265332][ T1860] Code: 64 fe ff ff 65 48 8b 04 25 c0 0f 02 00 48 05 f0 12 00 00 41 be 01 00 00 00 49 89 47 0 [ 2171.267301][ T1860] RSP: 0018:ffff8880b51af1f0 EFLAGS: 00010246 [ 2171.268320][ T1860] RAX: ffffed1016a35e43 RBX: 0000000000000000 RCX: 0000000000000000 [ 2171.269517][ T1860] RDX: 0000000000000000 RSI: 000000000000000b RDI: 0000000000000000 [ 2171.270305][ T1860] RBP: 0000000000040cc0 R08: ffffed1018893109 R09: dffffc0000000000 [ 2171.275973][ T1860] R10: 0000000000000001 R11: ffffed1018893108 R12: 1ffff11016a35e43 [ 2171.291039][ T1860] R13: 000000000000000b R14: 000000000000000b R15: 00000000000f4240 [ 2171.292328][ T1860] FS: 00007f53cbc83740(0000) GS:ffff8880da000000(0000) knlGS:0000000000000000 [ 2171.293409][ T1860] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 2171.294586][ T1860] CR2: 000055f540014508 CR3: 00000000b49f2004 CR4: 00000000000606e0 [ 2171.295424][ T1860] Call Trace: [ 2171.295756][ T1860] ? mark_held_locks+0xa5/0xe0 [ 2171.296659][ T1860] ? __alloc_pages_slowpath+0x21b0/0x21b0 [ 2171.298283][ T1860] ? gtp_encap_enable_socket+0x13e/0x400 [gtp] [ 2171.298962][ T1860] ? alloc_pages_current+0xc1/0x1a0 [ 2171.299475][ T1860] kmalloc_order+0x22/0x80 [ 2171.299936][ T1860] kmalloc_order_trace+0x1d/0x140 [ 2171.300437][ T1860] __kmalloc+0x302/0x3a0 [ 2171.300896][ T1860] gtp_newlink+0x293/0xba0 [gtp] [ ... ] Fixes: 459aa660eb1d ("gtp: add initial driver for datapath of GPRS Tunneling Protocol (GTP-U)") Signed-off-by: Taehee Yoo <ap420073@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2020-02-04 11:24:59 +08:00
GFP_KERNEL | __GFP_NOWARN);
if (gtp->addr_hash == NULL)
return -ENOMEM;
treewide: kmalloc() -> kmalloc_array() The kmalloc() function has a 2-factor argument form, kmalloc_array(). This patch replaces cases of: kmalloc(a * b, gfp) with: kmalloc_array(a * b, gfp) as well as handling cases of: kmalloc(a * b * c, gfp) with: kmalloc(array3_size(a, b, c), gfp) as it's slightly less ugly than: kmalloc_array(array_size(a, b), c, gfp) This does, however, attempt to ignore constant size factors like: kmalloc(4 * 1024, gfp) though any constants defined via macros get caught up in the conversion. Any factors with a sizeof() of "unsigned char", "char", and "u8" were dropped, since they're redundant. The tools/ directory was manually excluded, since it has its own implementation of kmalloc(). The Coccinelle script used for this was: // Fix redundant parens around sizeof(). @@ type TYPE; expression THING, E; @@ ( kmalloc( - (sizeof(TYPE)) * E + sizeof(TYPE) * E , ...) | kmalloc( - (sizeof(THING)) * E + sizeof(THING) * E , ...) ) // Drop single-byte sizes and redundant parens. @@ expression COUNT; typedef u8; typedef __u8; @@ ( kmalloc( - sizeof(u8) * (COUNT) + COUNT , ...) | kmalloc( - sizeof(__u8) * (COUNT) + COUNT , ...) | kmalloc( - sizeof(char) * (COUNT) + COUNT , ...) | kmalloc( - sizeof(unsigned char) * (COUNT) + COUNT , ...) | kmalloc( - sizeof(u8) * COUNT + COUNT , ...) | kmalloc( - sizeof(__u8) * COUNT + COUNT , ...) | kmalloc( - sizeof(char) * COUNT + COUNT , ...) | kmalloc( - sizeof(unsigned char) * COUNT + COUNT , ...) ) // 2-factor product with sizeof(type/expression) and identifier or constant. @@ type TYPE; expression THING; identifier COUNT_ID; constant COUNT_CONST; @@ ( - kmalloc + kmalloc_array ( - sizeof(TYPE) * (COUNT_ID) + COUNT_ID, sizeof(TYPE) , ...) | - kmalloc + kmalloc_array ( - sizeof(TYPE) * COUNT_ID + COUNT_ID, sizeof(TYPE) , ...) | - kmalloc + kmalloc_array ( - sizeof(TYPE) * (COUNT_CONST) + COUNT_CONST, sizeof(TYPE) , ...) | - kmalloc + kmalloc_array ( - sizeof(TYPE) * COUNT_CONST + COUNT_CONST, sizeof(TYPE) , ...) | - kmalloc + kmalloc_array ( - sizeof(THING) * (COUNT_ID) + COUNT_ID, sizeof(THING) , ...) | - kmalloc + kmalloc_array ( - sizeof(THING) * COUNT_ID + COUNT_ID, sizeof(THING) , ...) | - kmalloc + kmalloc_array ( - sizeof(THING) * (COUNT_CONST) + COUNT_CONST, sizeof(THING) , ...) | - kmalloc + kmalloc_array ( - sizeof(THING) * COUNT_CONST + COUNT_CONST, sizeof(THING) , ...) ) // 2-factor product, only identifiers. @@ identifier SIZE, COUNT; @@ - kmalloc + kmalloc_array ( - SIZE * COUNT + COUNT, SIZE , ...) // 3-factor product with 1 sizeof(type) or sizeof(expression), with // redundant parens removed. @@ expression THING; identifier STRIDE, COUNT; type TYPE; @@ ( kmalloc( - sizeof(TYPE) * (COUNT) * (STRIDE) + array3_size(COUNT, STRIDE, sizeof(TYPE)) , ...) | kmalloc( - sizeof(TYPE) * (COUNT) * STRIDE + array3_size(COUNT, STRIDE, sizeof(TYPE)) , ...) | kmalloc( - sizeof(TYPE) * COUNT * (STRIDE) + array3_size(COUNT, STRIDE, sizeof(TYPE)) , ...) | kmalloc( - sizeof(TYPE) * COUNT * STRIDE + array3_size(COUNT, STRIDE, sizeof(TYPE)) , ...) | kmalloc( - sizeof(THING) * (COUNT) * (STRIDE) + array3_size(COUNT, STRIDE, sizeof(THING)) , ...) | kmalloc( - sizeof(THING) * (COUNT) * STRIDE + array3_size(COUNT, STRIDE, sizeof(THING)) , ...) | kmalloc( - sizeof(THING) * COUNT * (STRIDE) + array3_size(COUNT, STRIDE, sizeof(THING)) , ...) | kmalloc( - sizeof(THING) * COUNT * STRIDE + array3_size(COUNT, STRIDE, sizeof(THING)) , ...) ) // 3-factor product with 2 sizeof(variable), with redundant parens removed. @@ expression THING1, THING2; identifier COUNT; type TYPE1, TYPE2; @@ ( kmalloc( - sizeof(TYPE1) * sizeof(TYPE2) * COUNT + array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2)) , ...) | kmalloc( - sizeof(TYPE1) * sizeof(THING2) * (COUNT) + array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2)) , ...) | kmalloc( - sizeof(THING1) * sizeof(THING2) * COUNT + array3_size(COUNT, sizeof(THING1), sizeof(THING2)) , ...) | kmalloc( - sizeof(THING1) * sizeof(THING2) * (COUNT) + array3_size(COUNT, sizeof(THING1), sizeof(THING2)) , ...) | kmalloc( - sizeof(TYPE1) * sizeof(THING2) * COUNT + array3_size(COUNT, sizeof(TYPE1), sizeof(THING2)) , ...) | kmalloc( - sizeof(TYPE1) * sizeof(THING2) * (COUNT) + array3_size(COUNT, sizeof(TYPE1), sizeof(THING2)) , ...) ) // 3-factor product, only identifiers, with redundant parens removed. @@ identifier STRIDE, SIZE, COUNT; @@ ( kmalloc( - (COUNT) * STRIDE * SIZE + array3_size(COUNT, STRIDE, SIZE) , ...) | kmalloc( - COUNT * (STRIDE) * SIZE + array3_size(COUNT, STRIDE, SIZE) , ...) | kmalloc( - COUNT * STRIDE * (SIZE) + array3_size(COUNT, STRIDE, SIZE) , ...) | kmalloc( - (COUNT) * (STRIDE) * SIZE + array3_size(COUNT, STRIDE, SIZE) , ...) | kmalloc( - COUNT * (STRIDE) * (SIZE) + array3_size(COUNT, STRIDE, SIZE) , ...) | kmalloc( - (COUNT) * STRIDE * (SIZE) + array3_size(COUNT, STRIDE, SIZE) , ...) | kmalloc( - (COUNT) * (STRIDE) * (SIZE) + array3_size(COUNT, STRIDE, SIZE) , ...) | kmalloc( - COUNT * STRIDE * SIZE + array3_size(COUNT, STRIDE, SIZE) , ...) ) // Any remaining multi-factor products, first at least 3-factor products, // when they're not all constants... @@ expression E1, E2, E3; constant C1, C2, C3; @@ ( kmalloc(C1 * C2 * C3, ...) | kmalloc( - (E1) * E2 * E3 + array3_size(E1, E2, E3) , ...) | kmalloc( - (E1) * (E2) * E3 + array3_size(E1, E2, E3) , ...) | kmalloc( - (E1) * (E2) * (E3) + array3_size(E1, E2, E3) , ...) | kmalloc( - E1 * E2 * E3 + array3_size(E1, E2, E3) , ...) ) // And then all remaining 2 factors products when they're not all constants, // keeping sizeof() as the second factor argument. @@ expression THING, E1, E2; type TYPE; constant C1, C2, C3; @@ ( kmalloc(sizeof(THING) * C2, ...) | kmalloc(sizeof(TYPE) * C2, ...) | kmalloc(C1 * C2 * C3, ...) | kmalloc(C1 * C2, ...) | - kmalloc + kmalloc_array ( - sizeof(TYPE) * (E2) + E2, sizeof(TYPE) , ...) | - kmalloc + kmalloc_array ( - sizeof(TYPE) * E2 + E2, sizeof(TYPE) , ...) | - kmalloc + kmalloc_array ( - sizeof(THING) * (E2) + E2, sizeof(THING) , ...) | - kmalloc + kmalloc_array ( - sizeof(THING) * E2 + E2, sizeof(THING) , ...) | - kmalloc + kmalloc_array ( - (E1) * E2 + E1, E2 , ...) | - kmalloc + kmalloc_array ( - (E1) * (E2) + E1, E2 , ...) | - kmalloc + kmalloc_array ( - E1 * E2 + E1, E2 , ...) ) Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-13 04:55:00 +08:00
gtp->tid_hash = kmalloc_array(hsize, sizeof(struct hlist_head),
gtp: use __GFP_NOWARN to avoid memalloc warning gtp hashtable size is received by user-space. So, this hashtable size could be too large. If so, kmalloc will internally print a warning message. This warning message is actually not necessary for the gtp module. So, this patch adds __GFP_NOWARN to avoid this message. Splat looks like: [ 2171.200049][ T1860] WARNING: CPU: 1 PID: 1860 at mm/page_alloc.c:4713 __alloc_pages_nodemask+0x2f3/0x740 [ 2171.238885][ T1860] Modules linked in: gtp veth openvswitch nsh nf_conncount nf_nat nf_conntrack nf_defrag_ipv] [ 2171.262680][ T1860] CPU: 1 PID: 1860 Comm: gtp-link Not tainted 5.5.0+ #321 [ 2171.263567][ T1860] Hardware name: innotek GmbH VirtualBox/VirtualBox, BIOS VirtualBox 12/01/2006 [ 2171.264681][ T1860] RIP: 0010:__alloc_pages_nodemask+0x2f3/0x740 [ 2171.265332][ T1860] Code: 64 fe ff ff 65 48 8b 04 25 c0 0f 02 00 48 05 f0 12 00 00 41 be 01 00 00 00 49 89 47 0 [ 2171.267301][ T1860] RSP: 0018:ffff8880b51af1f0 EFLAGS: 00010246 [ 2171.268320][ T1860] RAX: ffffed1016a35e43 RBX: 0000000000000000 RCX: 0000000000000000 [ 2171.269517][ T1860] RDX: 0000000000000000 RSI: 000000000000000b RDI: 0000000000000000 [ 2171.270305][ T1860] RBP: 0000000000040cc0 R08: ffffed1018893109 R09: dffffc0000000000 [ 2171.275973][ T1860] R10: 0000000000000001 R11: ffffed1018893108 R12: 1ffff11016a35e43 [ 2171.291039][ T1860] R13: 000000000000000b R14: 000000000000000b R15: 00000000000f4240 [ 2171.292328][ T1860] FS: 00007f53cbc83740(0000) GS:ffff8880da000000(0000) knlGS:0000000000000000 [ 2171.293409][ T1860] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 2171.294586][ T1860] CR2: 000055f540014508 CR3: 00000000b49f2004 CR4: 00000000000606e0 [ 2171.295424][ T1860] Call Trace: [ 2171.295756][ T1860] ? mark_held_locks+0xa5/0xe0 [ 2171.296659][ T1860] ? __alloc_pages_slowpath+0x21b0/0x21b0 [ 2171.298283][ T1860] ? gtp_encap_enable_socket+0x13e/0x400 [gtp] [ 2171.298962][ T1860] ? alloc_pages_current+0xc1/0x1a0 [ 2171.299475][ T1860] kmalloc_order+0x22/0x80 [ 2171.299936][ T1860] kmalloc_order_trace+0x1d/0x140 [ 2171.300437][ T1860] __kmalloc+0x302/0x3a0 [ 2171.300896][ T1860] gtp_newlink+0x293/0xba0 [gtp] [ ... ] Fixes: 459aa660eb1d ("gtp: add initial driver for datapath of GPRS Tunneling Protocol (GTP-U)") Signed-off-by: Taehee Yoo <ap420073@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2020-02-04 11:24:59 +08:00
GFP_KERNEL | __GFP_NOWARN);
if (gtp->tid_hash == NULL)
goto err1;
gtp->hash_size = hsize;
for (i = 0; i < hsize; i++) {
INIT_HLIST_HEAD(&gtp->addr_hash[i]);
INIT_HLIST_HEAD(&gtp->tid_hash[i]);
}
return 0;
err1:
kfree(gtp->addr_hash);
return -ENOMEM;
}
static struct sock *gtp_encap_enable_socket(int fd, int type,
struct gtp_dev *gtp)
{
struct udp_tunnel_sock_cfg tuncfg = {NULL};
struct socket *sock;
struct sock *sk;
int err;
pr_debug("enable gtp on %d, %d\n", fd, type);
sock = sockfd_lookup(fd, &err);
if (!sock) {
pr_debug("gtp socket fd=%d not found\n", fd);
return NULL;
}
gtp: make sure only SOCK_DGRAM UDP sockets are accepted A malicious user could use RAW sockets and fool GTP using them as standard SOCK_DGRAM UDP sockets. BUG: KMSAN: uninit-value in udp_tunnel_encap_enable include/net/udp_tunnel.h:174 [inline] BUG: KMSAN: uninit-value in setup_udp_tunnel_sock+0x45e/0x6f0 net/ipv4/udp_tunnel.c:85 CPU: 0 PID: 11262 Comm: syz-executor613 Not tainted 5.5.0-rc5-syzkaller #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 Call Trace: __dump_stack lib/dump_stack.c:77 [inline] dump_stack+0x1c9/0x220 lib/dump_stack.c:118 kmsan_report+0xf7/0x1e0 mm/kmsan/kmsan_report.c:118 __msan_warning+0x58/0xa0 mm/kmsan/kmsan_instr.c:215 udp_tunnel_encap_enable include/net/udp_tunnel.h:174 [inline] setup_udp_tunnel_sock+0x45e/0x6f0 net/ipv4/udp_tunnel.c:85 gtp_encap_enable_socket+0x37f/0x5a0 drivers/net/gtp.c:827 gtp_encap_enable drivers/net/gtp.c:844 [inline] gtp_newlink+0xfb/0x1e50 drivers/net/gtp.c:666 __rtnl_newlink net/core/rtnetlink.c:3305 [inline] rtnl_newlink+0x2973/0x3920 net/core/rtnetlink.c:3363 rtnetlink_rcv_msg+0x1153/0x1570 net/core/rtnetlink.c:5424 netlink_rcv_skb+0x451/0x650 net/netlink/af_netlink.c:2477 rtnetlink_rcv+0x50/0x60 net/core/rtnetlink.c:5442 netlink_unicast_kernel net/netlink/af_netlink.c:1302 [inline] netlink_unicast+0xf9e/0x1100 net/netlink/af_netlink.c:1328 netlink_sendmsg+0x1248/0x14d0 net/netlink/af_netlink.c:1917 sock_sendmsg_nosec net/socket.c:639 [inline] sock_sendmsg net/socket.c:659 [inline] ____sys_sendmsg+0x12b6/0x1350 net/socket.c:2330 ___sys_sendmsg net/socket.c:2384 [inline] __sys_sendmsg+0x451/0x5f0 net/socket.c:2417 __do_sys_sendmsg net/socket.c:2426 [inline] __se_sys_sendmsg+0x97/0xb0 net/socket.c:2424 __x64_sys_sendmsg+0x4a/0x70 net/socket.c:2424 do_syscall_64+0xb8/0x160 arch/x86/entry/common.c:296 entry_SYSCALL_64_after_hwframe+0x44/0xa9 RIP: 0033:0x441359 Code: e8 ac e8 ff ff 48 83 c4 18 c3 0f 1f 80 00 00 00 00 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 0f 83 eb 08 fc ff c3 66 2e 0f 1f 84 00 00 00 00 RSP: 002b:00007fff1cd0ac28 EFLAGS: 00000246 ORIG_RAX: 000000000000002e RAX: ffffffffffffffda RBX: 0000000000000000 RCX: 0000000000441359 RDX: 0000000000000000 RSI: 0000000020000100 RDI: 0000000000000003 RBP: 00000000006cb018 R08: 00000000004002c8 R09: 00000000004002c8 R10: 00000000004002c8 R11: 0000000000000246 R12: 00000000004020d0 R13: 0000000000402160 R14: 0000000000000000 R15: 0000000000000000 Uninit was created at: kmsan_save_stack_with_flags+0x3c/0x90 mm/kmsan/kmsan.c:144 kmsan_internal_alloc_meta_for_pages mm/kmsan/kmsan_shadow.c:307 [inline] kmsan_alloc_page+0x12a/0x310 mm/kmsan/kmsan_shadow.c:336 __alloc_pages_nodemask+0x57f2/0x5f60 mm/page_alloc.c:4800 alloc_pages_current+0x67d/0x990 mm/mempolicy.c:2207 alloc_pages include/linux/gfp.h:534 [inline] alloc_slab_page+0x111/0x12f0 mm/slub.c:1511 allocate_slab mm/slub.c:1656 [inline] new_slab+0x2bc/0x1130 mm/slub.c:1722 new_slab_objects mm/slub.c:2473 [inline] ___slab_alloc+0x1533/0x1f30 mm/slub.c:2624 __slab_alloc mm/slub.c:2664 [inline] slab_alloc_node mm/slub.c:2738 [inline] slab_alloc mm/slub.c:2783 [inline] kmem_cache_alloc+0xb23/0xd70 mm/slub.c:2788 sk_prot_alloc+0xf2/0x620 net/core/sock.c:1597 sk_alloc+0xf0/0xbe0 net/core/sock.c:1657 inet_create+0x7c7/0x1370 net/ipv4/af_inet.c:321 __sock_create+0x8eb/0xf00 net/socket.c:1420 sock_create net/socket.c:1471 [inline] __sys_socket+0x1a1/0x600 net/socket.c:1513 __do_sys_socket net/socket.c:1522 [inline] __se_sys_socket+0x8d/0xb0 net/socket.c:1520 __x64_sys_socket+0x4a/0x70 net/socket.c:1520 do_syscall_64+0xb8/0x160 arch/x86/entry/common.c:296 entry_SYSCALL_64_after_hwframe+0x44/0xa9 Fixes: 459aa660eb1d ("gtp: add initial driver for datapath of GPRS Tunneling Protocol (GTP-U)") Signed-off-by: Eric Dumazet <edumazet@google.com> Cc: Pablo Neira <pablo@netfilter.org> Reported-by: syzbot <syzkaller@googlegroups.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2020-01-22 15:17:14 +08:00
sk = sock->sk;
if (sk->sk_protocol != IPPROTO_UDP ||
sk->sk_type != SOCK_DGRAM ||
(sk->sk_family != AF_INET && sk->sk_family != AF_INET6)) {
pr_debug("socket fd=%d not UDP\n", fd);
sk = ERR_PTR(-EINVAL);
goto out_sock;
}
gtp: make sure only SOCK_DGRAM UDP sockets are accepted A malicious user could use RAW sockets and fool GTP using them as standard SOCK_DGRAM UDP sockets. BUG: KMSAN: uninit-value in udp_tunnel_encap_enable include/net/udp_tunnel.h:174 [inline] BUG: KMSAN: uninit-value in setup_udp_tunnel_sock+0x45e/0x6f0 net/ipv4/udp_tunnel.c:85 CPU: 0 PID: 11262 Comm: syz-executor613 Not tainted 5.5.0-rc5-syzkaller #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 Call Trace: __dump_stack lib/dump_stack.c:77 [inline] dump_stack+0x1c9/0x220 lib/dump_stack.c:118 kmsan_report+0xf7/0x1e0 mm/kmsan/kmsan_report.c:118 __msan_warning+0x58/0xa0 mm/kmsan/kmsan_instr.c:215 udp_tunnel_encap_enable include/net/udp_tunnel.h:174 [inline] setup_udp_tunnel_sock+0x45e/0x6f0 net/ipv4/udp_tunnel.c:85 gtp_encap_enable_socket+0x37f/0x5a0 drivers/net/gtp.c:827 gtp_encap_enable drivers/net/gtp.c:844 [inline] gtp_newlink+0xfb/0x1e50 drivers/net/gtp.c:666 __rtnl_newlink net/core/rtnetlink.c:3305 [inline] rtnl_newlink+0x2973/0x3920 net/core/rtnetlink.c:3363 rtnetlink_rcv_msg+0x1153/0x1570 net/core/rtnetlink.c:5424 netlink_rcv_skb+0x451/0x650 net/netlink/af_netlink.c:2477 rtnetlink_rcv+0x50/0x60 net/core/rtnetlink.c:5442 netlink_unicast_kernel net/netlink/af_netlink.c:1302 [inline] netlink_unicast+0xf9e/0x1100 net/netlink/af_netlink.c:1328 netlink_sendmsg+0x1248/0x14d0 net/netlink/af_netlink.c:1917 sock_sendmsg_nosec net/socket.c:639 [inline] sock_sendmsg net/socket.c:659 [inline] ____sys_sendmsg+0x12b6/0x1350 net/socket.c:2330 ___sys_sendmsg net/socket.c:2384 [inline] __sys_sendmsg+0x451/0x5f0 net/socket.c:2417 __do_sys_sendmsg net/socket.c:2426 [inline] __se_sys_sendmsg+0x97/0xb0 net/socket.c:2424 __x64_sys_sendmsg+0x4a/0x70 net/socket.c:2424 do_syscall_64+0xb8/0x160 arch/x86/entry/common.c:296 entry_SYSCALL_64_after_hwframe+0x44/0xa9 RIP: 0033:0x441359 Code: e8 ac e8 ff ff 48 83 c4 18 c3 0f 1f 80 00 00 00 00 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 0f 83 eb 08 fc ff c3 66 2e 0f 1f 84 00 00 00 00 RSP: 002b:00007fff1cd0ac28 EFLAGS: 00000246 ORIG_RAX: 000000000000002e RAX: ffffffffffffffda RBX: 0000000000000000 RCX: 0000000000441359 RDX: 0000000000000000 RSI: 0000000020000100 RDI: 0000000000000003 RBP: 00000000006cb018 R08: 00000000004002c8 R09: 00000000004002c8 R10: 00000000004002c8 R11: 0000000000000246 R12: 00000000004020d0 R13: 0000000000402160 R14: 0000000000000000 R15: 0000000000000000 Uninit was created at: kmsan_save_stack_with_flags+0x3c/0x90 mm/kmsan/kmsan.c:144 kmsan_internal_alloc_meta_for_pages mm/kmsan/kmsan_shadow.c:307 [inline] kmsan_alloc_page+0x12a/0x310 mm/kmsan/kmsan_shadow.c:336 __alloc_pages_nodemask+0x57f2/0x5f60 mm/page_alloc.c:4800 alloc_pages_current+0x67d/0x990 mm/mempolicy.c:2207 alloc_pages include/linux/gfp.h:534 [inline] alloc_slab_page+0x111/0x12f0 mm/slub.c:1511 allocate_slab mm/slub.c:1656 [inline] new_slab+0x2bc/0x1130 mm/slub.c:1722 new_slab_objects mm/slub.c:2473 [inline] ___slab_alloc+0x1533/0x1f30 mm/slub.c:2624 __slab_alloc mm/slub.c:2664 [inline] slab_alloc_node mm/slub.c:2738 [inline] slab_alloc mm/slub.c:2783 [inline] kmem_cache_alloc+0xb23/0xd70 mm/slub.c:2788 sk_prot_alloc+0xf2/0x620 net/core/sock.c:1597 sk_alloc+0xf0/0xbe0 net/core/sock.c:1657 inet_create+0x7c7/0x1370 net/ipv4/af_inet.c:321 __sock_create+0x8eb/0xf00 net/socket.c:1420 sock_create net/socket.c:1471 [inline] __sys_socket+0x1a1/0x600 net/socket.c:1513 __do_sys_socket net/socket.c:1522 [inline] __se_sys_socket+0x8d/0xb0 net/socket.c:1520 __x64_sys_socket+0x4a/0x70 net/socket.c:1520 do_syscall_64+0xb8/0x160 arch/x86/entry/common.c:296 entry_SYSCALL_64_after_hwframe+0x44/0xa9 Fixes: 459aa660eb1d ("gtp: add initial driver for datapath of GPRS Tunneling Protocol (GTP-U)") Signed-off-by: Eric Dumazet <edumazet@google.com> Cc: Pablo Neira <pablo@netfilter.org> Reported-by: syzbot <syzkaller@googlegroups.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2020-01-22 15:17:14 +08:00
lock_sock(sk);
if (sk->sk_user_data) {
sk = ERR_PTR(-EBUSY);
goto out_rel_sock;
}
sock_hold(sk);
tuncfg.sk_user_data = gtp;
tuncfg.encap_type = type;
tuncfg.encap_rcv = gtp_encap_recv;
tuncfg.encap_destroy = gtp_encap_destroy;
setup_udp_tunnel_sock(sock_net(sock->sk), sock, &tuncfg);
out_rel_sock:
release_sock(sock->sk);
out_sock:
sockfd_put(sock);
return sk;
}
static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[])
{
struct sock *sk1u = NULL;
struct sock *sk0 = NULL;
if (!data[IFLA_GTP_FD0] && !data[IFLA_GTP_FD1])
return -EINVAL;
if (data[IFLA_GTP_FD0]) {
u32 fd0 = nla_get_u32(data[IFLA_GTP_FD0]);
sk0 = gtp_encap_enable_socket(fd0, UDP_ENCAP_GTP0, gtp);
if (IS_ERR(sk0))
return PTR_ERR(sk0);
}
if (data[IFLA_GTP_FD1]) {
u32 fd1 = nla_get_u32(data[IFLA_GTP_FD1]);
sk1u = gtp_encap_enable_socket(fd1, UDP_ENCAP_GTP1U, gtp);
if (IS_ERR(sk1u)) {
gtp_encap_disable_sock(sk0);
return PTR_ERR(sk1u);
}
}
gtp->sk0 = sk0;
gtp->sk1u = sk1u;
return 0;
}
static struct gtp_dev *gtp_find_dev(struct net *src_net, struct nlattr *nla[])
{
struct gtp_dev *gtp = NULL;
struct net_device *dev;
struct net *net;
/* Examine the link attributes and figure out which network namespace
* we are talking about.
*/
if (nla[GTPA_NET_NS_FD])
net = get_net_ns_by_fd(nla_get_u32(nla[GTPA_NET_NS_FD]));
else
net = get_net(src_net);
if (IS_ERR(net))
return NULL;
/* Check if there's an existing gtpX device to configure */
dev = dev_get_by_index_rcu(net, nla_get_u32(nla[GTPA_LINK]));
if (dev && dev->netdev_ops == &gtp_netdev_ops)
gtp = netdev_priv(dev);
put_net(net);
return gtp;
}
static void ipv4_pdp_fill(struct pdp_ctx *pctx, struct genl_info *info)
{
pctx->gtp_version = nla_get_u32(info->attrs[GTPA_VERSION]);
pctx->af = AF_INET;
pctx->peer_addr_ip4.s_addr =
nla_get_be32(info->attrs[GTPA_PEER_ADDRESS]);
pctx->ms_addr_ip4.s_addr =
nla_get_be32(info->attrs[GTPA_MS_ADDRESS]);
switch (pctx->gtp_version) {
case GTP_V0:
/* According to TS 09.60, sections 7.5.1 and 7.5.2, the flow
* label needs to be the same for uplink and downlink packets,
* so let's annotate this.
*/
pctx->u.v0.tid = nla_get_u64(info->attrs[GTPA_TID]);
pctx->u.v0.flow = nla_get_u16(info->attrs[GTPA_FLOW]);
break;
case GTP_V1:
pctx->u.v1.i_tei = nla_get_u32(info->attrs[GTPA_I_TEI]);
pctx->u.v1.o_tei = nla_get_u32(info->attrs[GTPA_O_TEI]);
break;
default:
break;
}
}
static struct pdp_ctx *gtp_pdp_add(struct gtp_dev *gtp, struct sock *sk,
struct genl_info *info)
{
struct pdp_ctx *pctx, *pctx_tid = NULL;
struct net_device *dev = gtp->dev;
u32 hash_ms, hash_tid = 0;
unsigned int version;
bool found = false;
__be32 ms_addr;
ms_addr = nla_get_be32(info->attrs[GTPA_MS_ADDRESS]);
hash_ms = ipv4_hashfn(ms_addr) % gtp->hash_size;
version = nla_get_u32(info->attrs[GTPA_VERSION]);
pctx = ipv4_pdp_find(gtp, ms_addr);
if (pctx)
found = true;
if (version == GTP_V0)
pctx_tid = gtp0_pdp_find(gtp,
nla_get_u64(info->attrs[GTPA_TID]));
else if (version == GTP_V1)
pctx_tid = gtp1_pdp_find(gtp,
nla_get_u32(info->attrs[GTPA_I_TEI]));
if (pctx_tid)
found = true;
if (found) {
if (info->nlhdr->nlmsg_flags & NLM_F_EXCL)
return ERR_PTR(-EEXIST);
if (info->nlhdr->nlmsg_flags & NLM_F_REPLACE)
return ERR_PTR(-EOPNOTSUPP);
if (pctx && pctx_tid)
return ERR_PTR(-EEXIST);
if (!pctx)
pctx = pctx_tid;
ipv4_pdp_fill(pctx, info);
if (pctx->gtp_version == GTP_V0)
netdev_dbg(dev, "GTPv0-U: update tunnel id = %llx (pdp %p)\n",
pctx->u.v0.tid, pctx);
else if (pctx->gtp_version == GTP_V1)
netdev_dbg(dev, "GTPv1-U: update tunnel id = %x/%x (pdp %p)\n",
pctx->u.v1.i_tei, pctx->u.v1.o_tei, pctx);
return pctx;
}
gtp: fix Illegal context switch in RCU read-side critical section. ipv4_pdp_add() is called in RCU read-side critical section. So GFP_KERNEL should not be used in the function. This patch make ipv4_pdp_add() to use GFP_ATOMIC instead of GFP_KERNEL. Test commands: gtp-link add gtp1 & gtp-tunnel add gtp1 v1 100 200 1.1.1.1 2.2.2.2 Splat looks like: [ 130.618881] ============================= [ 130.626382] WARNING: suspicious RCU usage [ 130.626994] 5.2.0-rc6+ #50 Not tainted [ 130.627622] ----------------------------- [ 130.628223] ./include/linux/rcupdate.h:266 Illegal context switch in RCU read-side critical section! [ 130.629684] [ 130.629684] other info that might help us debug this: [ 130.629684] [ 130.631022] [ 130.631022] rcu_scheduler_active = 2, debug_locks = 1 [ 130.632136] 4 locks held by gtp-tunnel/1025: [ 130.632925] #0: 000000002b93c8b7 (cb_lock){++++}, at: genl_rcv+0x15/0x40 [ 130.634159] #1: 00000000f17bc999 (genl_mutex){+.+.}, at: genl_rcv_msg+0xfb/0x130 [ 130.635487] #2: 00000000c644ed8e (rtnl_mutex){+.+.}, at: gtp_genl_new_pdp+0x18c/0x1150 [gtp] [ 130.636936] #3: 0000000007a1cde7 (rcu_read_lock){....}, at: gtp_genl_new_pdp+0x187/0x1150 [gtp] [ 130.638348] [ 130.638348] stack backtrace: [ 130.639062] CPU: 1 PID: 1025 Comm: gtp-tunnel Not tainted 5.2.0-rc6+ #50 [ 130.641318] Call Trace: [ 130.641707] dump_stack+0x7c/0xbb [ 130.642252] ___might_sleep+0x2c0/0x3b0 [ 130.642862] kmem_cache_alloc_trace+0x1cd/0x2b0 [ 130.643591] gtp_genl_new_pdp+0x6c5/0x1150 [gtp] [ 130.644371] genl_family_rcv_msg+0x63a/0x1030 [ 130.645074] ? mutex_lock_io_nested+0x1090/0x1090 [ 130.645845] ? genl_unregister_family+0x630/0x630 [ 130.646592] ? debug_show_all_locks+0x2d0/0x2d0 [ 130.647293] ? check_flags.part.40+0x440/0x440 [ 130.648099] genl_rcv_msg+0xa3/0x130 [ ... ] Fixes: 459aa660eb1d ("gtp: add initial driver for datapath of GPRS Tunneling Protocol (GTP-U)") Signed-off-by: Taehee Yoo <ap420073@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2019-07-02 23:23:13 +08:00
pctx = kmalloc(sizeof(*pctx), GFP_ATOMIC);
if (pctx == NULL)
return ERR_PTR(-ENOMEM);
sock_hold(sk);
pctx->sk = sk;
pctx->dev = gtp->dev;
ipv4_pdp_fill(pctx, info);
atomic_set(&pctx->tx_seq, 0);
switch (pctx->gtp_version) {
case GTP_V0:
/* TS 09.60: "The flow label identifies unambiguously a GTP
* flow.". We use the tid for this instead, I cannot find a
* situation in which this doesn't unambiguosly identify the
* PDP context.
*/
hash_tid = gtp0_hashfn(pctx->u.v0.tid) % gtp->hash_size;
break;
case GTP_V1:
hash_tid = gtp1u_hashfn(pctx->u.v1.i_tei) % gtp->hash_size;
break;
}
hlist_add_head_rcu(&pctx->hlist_addr, &gtp->addr_hash[hash_ms]);
hlist_add_head_rcu(&pctx->hlist_tid, &gtp->tid_hash[hash_tid]);
switch (pctx->gtp_version) {
case GTP_V0:
netdev_dbg(dev, "GTPv0-U: new PDP ctx id=%llx ssgn=%pI4 ms=%pI4 (pdp=%p)\n",
pctx->u.v0.tid, &pctx->peer_addr_ip4,
&pctx->ms_addr_ip4, pctx);
break;
case GTP_V1:
netdev_dbg(dev, "GTPv1-U: new PDP ctx id=%x/%x ssgn=%pI4 ms=%pI4 (pdp=%p)\n",
pctx->u.v1.i_tei, pctx->u.v1.o_tei,
&pctx->peer_addr_ip4, &pctx->ms_addr_ip4, pctx);
break;
}
return pctx;
}
static void pdp_context_free(struct rcu_head *head)
{
struct pdp_ctx *pctx = container_of(head, struct pdp_ctx, rcu_head);
sock_put(pctx->sk);
kfree(pctx);
}
static void pdp_context_delete(struct pdp_ctx *pctx)
{
hlist_del_rcu(&pctx->hlist_tid);
hlist_del_rcu(&pctx->hlist_addr);
call_rcu(&pctx->rcu_head, pdp_context_free);
}
static int gtp_tunnel_notify(struct pdp_ctx *pctx, u8 cmd, gfp_t allocation);
static int gtp_genl_new_pdp(struct sk_buff *skb, struct genl_info *info)
{
unsigned int version;
struct pdp_ctx *pctx;
struct gtp_dev *gtp;
struct sock *sk;
int err;
if (!info->attrs[GTPA_VERSION] ||
!info->attrs[GTPA_LINK] ||
!info->attrs[GTPA_PEER_ADDRESS] ||
!info->attrs[GTPA_MS_ADDRESS])
return -EINVAL;
version = nla_get_u32(info->attrs[GTPA_VERSION]);
switch (version) {
case GTP_V0:
if (!info->attrs[GTPA_TID] ||
!info->attrs[GTPA_FLOW])
return -EINVAL;
break;
case GTP_V1:
if (!info->attrs[GTPA_I_TEI] ||
!info->attrs[GTPA_O_TEI])
return -EINVAL;
break;
default:
return -EINVAL;
}
gtp: fix use-after-free in gtp_encap_destroy() gtp_encap_destroy() is called twice. 1. When interface is deleted. 2. When udp socket is destroyed. either gtp->sk0 or gtp->sk1u could be freed by sock_put() in gtp_encap_destroy(). so, when gtp_encap_destroy() is called again, it would uses freed sk pointer. patch makes gtp_encap_destroy() to set either gtp->sk0 or gtp->sk1u to null. in addition, both gtp->sk0 and gtp->sk1u pointer are protected by rtnl_lock. so, rtnl_lock() is added. Test command: gtp-link add gtp1 & killall gtp-link ip link del gtp1 Splat looks like: [ 83.182767] BUG: KASAN: use-after-free in __lock_acquire+0x3a20/0x46a0 [ 83.184128] Read of size 8 at addr ffff8880cc7d5360 by task ip/1008 [ 83.185567] CPU: 1 PID: 1008 Comm: ip Not tainted 5.2.0-rc6+ #50 [ 83.188469] Call Trace: [ ... ] [ 83.200126] lock_acquire+0x141/0x380 [ 83.200575] ? lock_sock_nested+0x3a/0xf0 [ 83.201069] _raw_spin_lock_bh+0x38/0x70 [ 83.201551] ? lock_sock_nested+0x3a/0xf0 [ 83.202044] lock_sock_nested+0x3a/0xf0 [ 83.202520] gtp_encap_destroy+0x18/0xe0 [gtp] [ 83.203065] gtp_encap_disable.isra.14+0x13/0x50 [gtp] [ 83.203687] gtp_dellink+0x56/0x170 [gtp] [ 83.204190] rtnl_delete_link+0xb4/0x100 [ ... ] [ 83.236513] Allocated by task 976: [ 83.236925] save_stack+0x19/0x80 [ 83.237332] __kasan_kmalloc.constprop.3+0xa0/0xd0 [ 83.237894] kmem_cache_alloc+0xd8/0x280 [ 83.238360] sk_prot_alloc.isra.42+0x50/0x200 [ 83.238874] sk_alloc+0x32/0x940 [ 83.239264] inet_create+0x283/0xc20 [ 83.239684] __sock_create+0x2dd/0x540 [ 83.240136] __sys_socket+0xca/0x1a0 [ 83.240550] __x64_sys_socket+0x6f/0xb0 [ 83.240998] do_syscall_64+0x9c/0x450 [ 83.241466] entry_SYSCALL_64_after_hwframe+0x49/0xbe [ 83.242061] [ 83.242249] Freed by task 0: [ 83.242616] save_stack+0x19/0x80 [ 83.243013] __kasan_slab_free+0x111/0x150 [ 83.243498] kmem_cache_free+0x89/0x250 [ 83.244444] __sk_destruct+0x38f/0x5a0 [ 83.245366] rcu_core+0x7e9/0x1c20 [ 83.245766] __do_softirq+0x213/0x8fa Fixes: 1e3a3abd8b28 ("gtp: make GTP sockets in gtp_newlink optional") Signed-off-by: Taehee Yoo <ap420073@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2019-07-02 23:22:25 +08:00
rtnl_lock();
gtp = gtp_find_dev(sock_net(skb->sk), info->attrs);
if (!gtp) {
err = -ENODEV;
goto out_unlock;
}
if (version == GTP_V0)
sk = gtp->sk0;
else if (version == GTP_V1)
sk = gtp->sk1u;
else
sk = NULL;
if (!sk) {
err = -ENODEV;
goto out_unlock;
}
pctx = gtp_pdp_add(gtp, sk, info);
if (IS_ERR(pctx)) {
err = PTR_ERR(pctx);
} else {
gtp_tunnel_notify(pctx, GTP_CMD_NEWPDP, GFP_KERNEL);
err = 0;
}
out_unlock:
gtp: fix use-after-free in gtp_encap_destroy() gtp_encap_destroy() is called twice. 1. When interface is deleted. 2. When udp socket is destroyed. either gtp->sk0 or gtp->sk1u could be freed by sock_put() in gtp_encap_destroy(). so, when gtp_encap_destroy() is called again, it would uses freed sk pointer. patch makes gtp_encap_destroy() to set either gtp->sk0 or gtp->sk1u to null. in addition, both gtp->sk0 and gtp->sk1u pointer are protected by rtnl_lock. so, rtnl_lock() is added. Test command: gtp-link add gtp1 & killall gtp-link ip link del gtp1 Splat looks like: [ 83.182767] BUG: KASAN: use-after-free in __lock_acquire+0x3a20/0x46a0 [ 83.184128] Read of size 8 at addr ffff8880cc7d5360 by task ip/1008 [ 83.185567] CPU: 1 PID: 1008 Comm: ip Not tainted 5.2.0-rc6+ #50 [ 83.188469] Call Trace: [ ... ] [ 83.200126] lock_acquire+0x141/0x380 [ 83.200575] ? lock_sock_nested+0x3a/0xf0 [ 83.201069] _raw_spin_lock_bh+0x38/0x70 [ 83.201551] ? lock_sock_nested+0x3a/0xf0 [ 83.202044] lock_sock_nested+0x3a/0xf0 [ 83.202520] gtp_encap_destroy+0x18/0xe0 [gtp] [ 83.203065] gtp_encap_disable.isra.14+0x13/0x50 [gtp] [ 83.203687] gtp_dellink+0x56/0x170 [gtp] [ 83.204190] rtnl_delete_link+0xb4/0x100 [ ... ] [ 83.236513] Allocated by task 976: [ 83.236925] save_stack+0x19/0x80 [ 83.237332] __kasan_kmalloc.constprop.3+0xa0/0xd0 [ 83.237894] kmem_cache_alloc+0xd8/0x280 [ 83.238360] sk_prot_alloc.isra.42+0x50/0x200 [ 83.238874] sk_alloc+0x32/0x940 [ 83.239264] inet_create+0x283/0xc20 [ 83.239684] __sock_create+0x2dd/0x540 [ 83.240136] __sys_socket+0xca/0x1a0 [ 83.240550] __x64_sys_socket+0x6f/0xb0 [ 83.240998] do_syscall_64+0x9c/0x450 [ 83.241466] entry_SYSCALL_64_after_hwframe+0x49/0xbe [ 83.242061] [ 83.242249] Freed by task 0: [ 83.242616] save_stack+0x19/0x80 [ 83.243013] __kasan_slab_free+0x111/0x150 [ 83.243498] kmem_cache_free+0x89/0x250 [ 83.244444] __sk_destruct+0x38f/0x5a0 [ 83.245366] rcu_core+0x7e9/0x1c20 [ 83.245766] __do_softirq+0x213/0x8fa Fixes: 1e3a3abd8b28 ("gtp: make GTP sockets in gtp_newlink optional") Signed-off-by: Taehee Yoo <ap420073@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2019-07-02 23:22:25 +08:00
rtnl_unlock();
return err;
}
static struct pdp_ctx *gtp_find_pdp_by_link(struct net *net,
struct nlattr *nla[])
{
struct gtp_dev *gtp;
gtp = gtp_find_dev(net, nla);
if (!gtp)
return ERR_PTR(-ENODEV);
if (nla[GTPA_MS_ADDRESS]) {
__be32 ip = nla_get_be32(nla[GTPA_MS_ADDRESS]);
return ipv4_pdp_find(gtp, ip);
} else if (nla[GTPA_VERSION]) {
u32 gtp_version = nla_get_u32(nla[GTPA_VERSION]);
if (gtp_version == GTP_V0 && nla[GTPA_TID])
return gtp0_pdp_find(gtp, nla_get_u64(nla[GTPA_TID]));
else if (gtp_version == GTP_V1 && nla[GTPA_I_TEI])
return gtp1_pdp_find(gtp, nla_get_u32(nla[GTPA_I_TEI]));
}
return ERR_PTR(-EINVAL);
}
static struct pdp_ctx *gtp_find_pdp(struct net *net, struct nlattr *nla[])
{
struct pdp_ctx *pctx;
if (nla[GTPA_LINK])
pctx = gtp_find_pdp_by_link(net, nla);
else
pctx = ERR_PTR(-EINVAL);
if (!pctx)
pctx = ERR_PTR(-ENOENT);
return pctx;
}
static int gtp_genl_del_pdp(struct sk_buff *skb, struct genl_info *info)
{
struct pdp_ctx *pctx;
int err = 0;
if (!info->attrs[GTPA_VERSION])
return -EINVAL;
rcu_read_lock();
pctx = gtp_find_pdp(sock_net(skb->sk), info->attrs);
if (IS_ERR(pctx)) {
err = PTR_ERR(pctx);
goto out_unlock;
}
if (pctx->gtp_version == GTP_V0)
netdev_dbg(pctx->dev, "GTPv0-U: deleting tunnel id = %llx (pdp %p)\n",
pctx->u.v0.tid, pctx);
else if (pctx->gtp_version == GTP_V1)
netdev_dbg(pctx->dev, "GTPv1-U: deleting tunnel id = %x/%x (pdp %p)\n",
pctx->u.v1.i_tei, pctx->u.v1.o_tei, pctx);
gtp_tunnel_notify(pctx, GTP_CMD_DELPDP, GFP_ATOMIC);
pdp_context_delete(pctx);
out_unlock:
rcu_read_unlock();
return err;
}
static int gtp_genl_fill_info(struct sk_buff *skb, u32 snd_portid, u32 snd_seq,
int flags, u32 type, struct pdp_ctx *pctx)
{
void *genlh;
genlh = genlmsg_put(skb, snd_portid, snd_seq, &gtp_genl_family, flags,
type);
if (genlh == NULL)
goto nlmsg_failure;
if (nla_put_u32(skb, GTPA_VERSION, pctx->gtp_version) ||
nla_put_u32(skb, GTPA_LINK, pctx->dev->ifindex) ||
nla_put_be32(skb, GTPA_PEER_ADDRESS, pctx->peer_addr_ip4.s_addr) ||
nla_put_be32(skb, GTPA_MS_ADDRESS, pctx->ms_addr_ip4.s_addr))
goto nla_put_failure;
switch (pctx->gtp_version) {
case GTP_V0:
if (nla_put_u64_64bit(skb, GTPA_TID, pctx->u.v0.tid, GTPA_PAD) ||
nla_put_u16(skb, GTPA_FLOW, pctx->u.v0.flow))
goto nla_put_failure;
break;
case GTP_V1:
if (nla_put_u32(skb, GTPA_I_TEI, pctx->u.v1.i_tei) ||
nla_put_u32(skb, GTPA_O_TEI, pctx->u.v1.o_tei))
goto nla_put_failure;
break;
}
genlmsg_end(skb, genlh);
return 0;
nlmsg_failure:
nla_put_failure:
genlmsg_cancel(skb, genlh);
return -EMSGSIZE;
}
static int gtp_tunnel_notify(struct pdp_ctx *pctx, u8 cmd, gfp_t allocation)
{
struct sk_buff *msg;
int ret;
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, allocation);
if (!msg)
return -ENOMEM;
ret = gtp_genl_fill_info(msg, 0, 0, 0, cmd, pctx);
if (ret < 0) {
nlmsg_free(msg);
return ret;
}
ret = genlmsg_multicast_netns(&gtp_genl_family, dev_net(pctx->dev), msg,
0, GTP_GENL_MCGRP, GFP_ATOMIC);
return ret;
}
static int gtp_genl_get_pdp(struct sk_buff *skb, struct genl_info *info)
{
struct pdp_ctx *pctx = NULL;
struct sk_buff *skb2;
int err;
if (!info->attrs[GTPA_VERSION])
return -EINVAL;
rcu_read_lock();
pctx = gtp_find_pdp(sock_net(skb->sk), info->attrs);
if (IS_ERR(pctx)) {
err = PTR_ERR(pctx);
goto err_unlock;
}
skb2 = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
if (skb2 == NULL) {
err = -ENOMEM;
goto err_unlock;
}
err = gtp_genl_fill_info(skb2, NETLINK_CB(skb).portid, info->snd_seq,
0, info->nlhdr->nlmsg_type, pctx);
if (err < 0)
goto err_unlock_free;
rcu_read_unlock();
return genlmsg_unicast(genl_info_net(info), skb2, info->snd_portid);
err_unlock_free:
kfree_skb(skb2);
err_unlock:
rcu_read_unlock();
return err;
}
static int gtp_genl_dump_pdp(struct sk_buff *skb,
struct netlink_callback *cb)
{
struct gtp_dev *last_gtp = (struct gtp_dev *)cb->args[2], *gtp;
int i, j, bucket = cb->args[0], skip = cb->args[1];
struct net *net = sock_net(skb->sk);
struct pdp_ctx *pctx;
struct gtp_net *gn;
gn = net_generic(net, gtp_net_id);
if (cb->args[4])
return 0;
rcu_read_lock();
list_for_each_entry_rcu(gtp, &gn->gtp_dev_list, list) {
if (last_gtp && last_gtp != gtp)
continue;
else
last_gtp = NULL;
for (i = bucket; i < gtp->hash_size; i++) {
j = 0;
hlist_for_each_entry_rcu(pctx, &gtp->tid_hash[i],
hlist_tid) {
if (j >= skip &&
gtp_genl_fill_info(skb,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
NLM_F_MULTI,
cb->nlh->nlmsg_type, pctx)) {
cb->args[0] = i;
cb->args[1] = j;
cb->args[2] = (unsigned long)gtp;
goto out;
}
j++;
}
skip = 0;
}
bucket = 0;
}
cb->args[4] = 1;
out:
rcu_read_unlock();
return skb->len;
}
static int gtp_genl_send_echo_req(struct sk_buff *skb, struct genl_info *info)
{
struct sk_buff *skb_to_send;
__be32 src_ip, dst_ip;
unsigned int version;
struct gtp_dev *gtp;
struct flowi4 fl4;
struct rtable *rt;
struct sock *sk;
__be16 port;
int len;
if (!info->attrs[GTPA_VERSION] ||
!info->attrs[GTPA_LINK] ||
!info->attrs[GTPA_PEER_ADDRESS] ||
!info->attrs[GTPA_MS_ADDRESS])
return -EINVAL;
version = nla_get_u32(info->attrs[GTPA_VERSION]);
dst_ip = nla_get_be32(info->attrs[GTPA_PEER_ADDRESS]);
src_ip = nla_get_be32(info->attrs[GTPA_MS_ADDRESS]);
gtp = gtp_find_dev(sock_net(skb->sk), info->attrs);
if (!gtp)
return -ENODEV;
if (!gtp->sk_created)
return -EOPNOTSUPP;
if (!(gtp->dev->flags & IFF_UP))
return -ENETDOWN;
if (version == GTP_V0) {
struct gtp0_header *gtp0_h;
len = LL_RESERVED_SPACE(gtp->dev) + sizeof(struct gtp0_header) +
sizeof(struct iphdr) + sizeof(struct udphdr);
skb_to_send = netdev_alloc_skb_ip_align(gtp->dev, len);
if (!skb_to_send)
return -ENOMEM;
sk = gtp->sk0;
port = htons(GTP0_PORT);
gtp0_h = skb_push(skb_to_send, sizeof(struct gtp0_header));
memset(gtp0_h, 0, sizeof(struct gtp0_header));
gtp0_build_echo_msg(gtp0_h, GTP_ECHO_REQ);
} else if (version == GTP_V1) {
struct gtp1_header_long *gtp1u_h;
len = LL_RESERVED_SPACE(gtp->dev) +
sizeof(struct gtp1_header_long) +
sizeof(struct iphdr) + sizeof(struct udphdr);
skb_to_send = netdev_alloc_skb_ip_align(gtp->dev, len);
if (!skb_to_send)
return -ENOMEM;
sk = gtp->sk1u;
port = htons(GTP1U_PORT);
gtp1u_h = skb_push(skb_to_send,
sizeof(struct gtp1_header_long));
memset(gtp1u_h, 0, sizeof(struct gtp1_header_long));
gtp1u_build_echo_msg(gtp1u_h, GTP_ECHO_REQ);
} else {
return -ENODEV;
}
rt = ip4_route_output_gtp(&fl4, sk, dst_ip, src_ip);
if (IS_ERR(rt)) {
netdev_dbg(gtp->dev, "no route for echo request to %pI4\n",
&dst_ip);
kfree_skb(skb_to_send);
return -ENODEV;
}
udp_tunnel_xmit_skb(rt, sk, skb_to_send,
fl4.saddr, fl4.daddr,
fl4.flowi4_tos,
ip4_dst_hoplimit(&rt->dst),
0,
port, port,
!net_eq(sock_net(sk),
dev_net(gtp->dev)),
false);
return 0;
}
static const struct nla_policy gtp_genl_policy[GTPA_MAX + 1] = {
[GTPA_LINK] = { .type = NLA_U32, },
[GTPA_VERSION] = { .type = NLA_U32, },
[GTPA_TID] = { .type = NLA_U64, },
[GTPA_PEER_ADDRESS] = { .type = NLA_U32, },
[GTPA_MS_ADDRESS] = { .type = NLA_U32, },
[GTPA_FLOW] = { .type = NLA_U16, },
[GTPA_NET_NS_FD] = { .type = NLA_U32, },
[GTPA_I_TEI] = { .type = NLA_U32, },
[GTPA_O_TEI] = { .type = NLA_U32, },
};
static const struct genl_small_ops gtp_genl_ops[] = {
{
.cmd = GTP_CMD_NEWPDP,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = gtp_genl_new_pdp,
.flags = GENL_ADMIN_PERM,
},
{
.cmd = GTP_CMD_DELPDP,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = gtp_genl_del_pdp,
.flags = GENL_ADMIN_PERM,
},
{
.cmd = GTP_CMD_GETPDP,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = gtp_genl_get_pdp,
.dumpit = gtp_genl_dump_pdp,
.flags = GENL_ADMIN_PERM,
},
{
.cmd = GTP_CMD_ECHOREQ,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = gtp_genl_send_echo_req,
.flags = GENL_ADMIN_PERM,
},
};
static struct genl_family gtp_genl_family __ro_after_init = {
.name = "gtp",
.version = 0,
.hdrsize = 0,
.maxattr = GTPA_MAX,
.policy = gtp_genl_policy,
.netnsok = true,
.module = THIS_MODULE,
.small_ops = gtp_genl_ops,
.n_small_ops = ARRAY_SIZE(gtp_genl_ops),
.mcgrps = gtp_genl_mcgrps,
.n_mcgrps = ARRAY_SIZE(gtp_genl_mcgrps),
};
static int __net_init gtp_net_init(struct net *net)
{
struct gtp_net *gn = net_generic(net, gtp_net_id);
INIT_LIST_HEAD(&gn->gtp_dev_list);
return 0;
}
static void __net_exit gtp_net_exit(struct net *net)
{
struct gtp_net *gn = net_generic(net, gtp_net_id);
struct gtp_dev *gtp;
LIST_HEAD(list);
rtnl_lock();
list_for_each_entry(gtp, &gn->gtp_dev_list, list)
gtp_dellink(gtp->dev, &list);
unregister_netdevice_many(&list);
rtnl_unlock();
}
static struct pernet_operations gtp_net_ops = {
.init = gtp_net_init,
.exit = gtp_net_exit,
.id = &gtp_net_id,
.size = sizeof(struct gtp_net),
};
static int __init gtp_init(void)
{
int err;
get_random_bytes(&gtp_h_initval, sizeof(gtp_h_initval));
err = rtnl_link_register(&gtp_link_ops);
if (err < 0)
goto error_out;
err = genl_register_family(&gtp_genl_family);
if (err < 0)
goto unreg_rtnl_link;
err = register_pernet_subsys(&gtp_net_ops);
if (err < 0)
goto unreg_genl_family;
pr_info("GTP module loaded (pdp ctx size %zd bytes)\n",
sizeof(struct pdp_ctx));
return 0;
unreg_genl_family:
genl_unregister_family(&gtp_genl_family);
unreg_rtnl_link:
rtnl_link_unregister(&gtp_link_ops);
error_out:
pr_err("error loading GTP module loaded\n");
return err;
}
late_initcall(gtp_init);
static void __exit gtp_fini(void)
{
genl_unregister_family(&gtp_genl_family);
rtnl_link_unregister(&gtp_link_ops);
gtp: fix use-after-free in gtp_newlink() Current gtp_newlink() could be called after unregister_pernet_subsys(). gtp_newlink() uses gtp_net but it can be destroyed by unregister_pernet_subsys(). So unregister_pernet_subsys() should be called after rtnl_link_unregister(). Test commands: #SHELL 1 while : do for i in {1..5} do ./gtp-link add gtp$i & done killall gtp-link done #SHELL 2 while : do modprobe -rv gtp done Splat looks like: [ 753.176631] BUG: KASAN: use-after-free in gtp_newlink+0x9b4/0xa5c [gtp] [ 753.177722] Read of size 8 at addr ffff8880d48f2458 by task gtp-link/7126 [ 753.179082] CPU: 0 PID: 7126 Comm: gtp-link Tainted: G W 5.2.0-rc6+ #50 [ 753.185801] Call Trace: [ 753.186264] dump_stack+0x7c/0xbb [ 753.186863] ? gtp_newlink+0x9b4/0xa5c [gtp] [ 753.187583] print_address_description+0xc7/0x240 [ 753.188382] ? gtp_newlink+0x9b4/0xa5c [gtp] [ 753.189097] ? gtp_newlink+0x9b4/0xa5c [gtp] [ 753.189846] __kasan_report+0x12a/0x16f [ 753.190542] ? gtp_newlink+0x9b4/0xa5c [gtp] [ 753.191298] kasan_report+0xe/0x20 [ 753.191893] gtp_newlink+0x9b4/0xa5c [gtp] [ 753.192580] ? __netlink_ns_capable+0xc3/0xf0 [ 753.193370] __rtnl_newlink+0xb9f/0x11b0 [ ... ] [ 753.241201] Allocated by task 7186: [ 753.241844] save_stack+0x19/0x80 [ 753.242399] __kasan_kmalloc.constprop.3+0xa0/0xd0 [ 753.243192] __kmalloc+0x13e/0x300 [ 753.243764] ops_init+0xd6/0x350 [ 753.244314] register_pernet_operations+0x249/0x6f0 [ ... ] [ 753.251770] Freed by task 7178: [ 753.252288] save_stack+0x19/0x80 [ 753.252833] __kasan_slab_free+0x111/0x150 [ 753.253962] kfree+0xc7/0x280 [ 753.254509] ops_free_list.part.11+0x1c4/0x2d0 [ 753.255241] unregister_pernet_operations+0x262/0x390 [ ... ] [ 753.285883] list_add corruption. next->prev should be prev (ffff8880d48f2458), but was ffff8880d497d878. (next. [ 753.287241] ------------[ cut here ]------------ [ 753.287794] kernel BUG at lib/list_debug.c:25! [ 753.288364] invalid opcode: 0000 [#1] SMP DEBUG_PAGEALLOC KASAN PTI [ 753.289099] CPU: 0 PID: 7126 Comm: gtp-link Tainted: G B W 5.2.0-rc6+ #50 [ 753.291036] RIP: 0010:__list_add_valid+0x74/0xd0 [ 753.291589] Code: 48 39 da 75 27 48 39 f5 74 36 48 39 dd 74 31 48 83 c4 08 b8 01 00 00 00 5b 5d c3 48 89 d9 48b [ 753.293779] RSP: 0018:ffff8880cae8f398 EFLAGS: 00010286 [ 753.294401] RAX: 0000000000000075 RBX: ffff8880d497d878 RCX: 0000000000000000 [ 753.296260] RDX: 0000000000000075 RSI: 0000000000000008 RDI: ffffed10195d1e69 [ 753.297070] RBP: ffff8880cd250ae0 R08: ffffed101b4bff21 R09: ffffed101b4bff21 [ 753.297899] R10: 0000000000000001 R11: ffffed101b4bff20 R12: ffff8880d497d878 [ 753.298703] R13: 0000000000000000 R14: ffff8880cd250ae0 R15: ffff8880d48f2458 [ 753.299564] FS: 00007f5f79805740(0000) GS:ffff8880da400000(0000) knlGS:0000000000000000 [ 753.300533] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 753.301231] CR2: 00007fe8c7ef4f10 CR3: 00000000b71a6006 CR4: 00000000000606f0 [ 753.302183] Call Trace: [ 753.302530] gtp_newlink+0x5f6/0xa5c [gtp] [ 753.303037] ? __netlink_ns_capable+0xc3/0xf0 [ 753.303576] __rtnl_newlink+0xb9f/0x11b0 [ 753.304092] ? rtnl_link_unregister+0x230/0x230 Fixes: 459aa660eb1d ("gtp: add initial driver for datapath of GPRS Tunneling Protocol (GTP-U)") Signed-off-by: Taehee Yoo <ap420073@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2019-07-02 23:23:42 +08:00
unregister_pernet_subsys(&gtp_net_ops);
pr_info("GTP module unloaded\n");
}
module_exit(gtp_fini);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Harald Welte <hwelte@sysmocom.de>");
MODULE_DESCRIPTION("Interface driver for GTP encapsulated traffic");
MODULE_ALIAS_RTNL_LINK("gtp");
MODULE_ALIAS_GENL_FAMILY("gtp");