2019-05-27 14:55:01 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2011-12-09 14:23:51 +08:00
|
|
|
/*
|
|
|
|
* udp_diag.c Module for monitoring UDP transport protocols sockets.
|
|
|
|
*
|
|
|
|
* Authors: Pavel Emelyanov, <xemul@parallels.com>
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/inet_diag.h>
|
|
|
|
#include <linux/udp.h>
|
|
|
|
#include <net/udp.h>
|
|
|
|
#include <net/udplite.h>
|
|
|
|
#include <linux/sock_diag.h>
|
|
|
|
|
2011-12-09 14:24:21 +08:00
|
|
|
static int sk_diag_dump(struct sock *sk, struct sk_buff *skb,
|
2015-03-10 22:15:54 +08:00
|
|
|
struct netlink_callback *cb,
|
|
|
|
const struct inet_diag_req_v2 *req,
|
2016-09-07 23:42:25 +08:00
|
|
|
struct nlattr *bc, bool net_admin)
|
2011-12-09 14:24:21 +08:00
|
|
|
{
|
|
|
|
if (!inet_diag_bc_sk(bc, sk))
|
|
|
|
return 0;
|
|
|
|
|
2024-06-11 20:26:44 +08:00
|
|
|
return inet_sk_diag_fill(sk, NULL, skb, cb, req, NLM_F_MULTI,
|
|
|
|
net_admin);
|
2011-12-09 14:24:21 +08:00
|
|
|
}
|
|
|
|
|
2024-06-11 20:26:44 +08:00
|
|
|
static int udp_dump_one(struct udp_table *tbl,
|
|
|
|
struct netlink_callback *cb,
|
2015-03-10 22:15:54 +08:00
|
|
|
const struct inet_diag_req_v2 *req)
|
2011-12-09 14:23:51 +08:00
|
|
|
{
|
2024-06-11 20:26:44 +08:00
|
|
|
struct sk_buff *in_skb = cb->skb;
|
2011-12-09 14:24:06 +08:00
|
|
|
int err = -EINVAL;
|
2016-04-01 23:52:13 +08:00
|
|
|
struct sock *sk = NULL;
|
2011-12-09 14:24:06 +08:00
|
|
|
struct sk_buff *rep;
|
2012-07-16 12:28:49 +08:00
|
|
|
struct net *net = sock_net(in_skb->sk);
|
2011-12-09 14:24:06 +08:00
|
|
|
|
2016-04-01 23:52:13 +08:00
|
|
|
rcu_read_lock();
|
2011-12-09 14:24:06 +08:00
|
|
|
if (req->sdiag_family == AF_INET)
|
2018-10-29 08:15:22 +08:00
|
|
|
/* src and dst are swapped for historical reasons */
|
2012-07-16 12:28:49 +08:00
|
|
|
sk = __udp4_lib_lookup(net,
|
2011-12-09 14:24:06 +08:00
|
|
|
req->id.idiag_src[0], req->id.idiag_sport,
|
|
|
|
req->id.idiag_dst[0], req->id.idiag_dport,
|
2017-08-07 23:44:16 +08:00
|
|
|
req->id.idiag_if, 0, tbl, NULL);
|
2011-12-10 07:35:07 +08:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
2011-12-09 14:24:06 +08:00
|
|
|
else if (req->sdiag_family == AF_INET6)
|
2012-07-16 12:28:49 +08:00
|
|
|
sk = __udp6_lib_lookup(net,
|
2011-12-09 14:24:06 +08:00
|
|
|
(struct in6_addr *)req->id.idiag_src,
|
|
|
|
req->id.idiag_sport,
|
|
|
|
(struct in6_addr *)req->id.idiag_dst,
|
|
|
|
req->id.idiag_dport,
|
2017-08-07 23:44:20 +08:00
|
|
|
req->id.idiag_if, 0, tbl, NULL);
|
2011-12-10 07:35:07 +08:00
|
|
|
#endif
|
2017-06-30 18:08:01 +08:00
|
|
|
if (sk && !refcount_inc_not_zero(&sk->sk_refcnt))
|
2016-04-01 23:52:13 +08:00
|
|
|
sk = NULL;
|
|
|
|
rcu_read_unlock();
|
2011-12-09 14:24:06 +08:00
|
|
|
err = -ENOENT;
|
2015-04-03 16:17:26 +08:00
|
|
|
if (!sk)
|
2011-12-09 14:24:06 +08:00
|
|
|
goto out_nosk;
|
|
|
|
|
2011-12-15 10:43:44 +08:00
|
|
|
err = sock_diag_check_cookie(sk, req->id.idiag_cookie);
|
2011-12-09 14:24:06 +08:00
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
err = -ENOMEM;
|
2024-06-11 20:08:33 +08:00
|
|
|
rep = nlmsg_new(nla_total_size(sizeof(struct inet_diag_msg)) +
|
|
|
|
inet_diag_msg_attrs_size() +
|
|
|
|
nla_total_size(sizeof(struct inet_diag_meminfo)) + 64,
|
2013-03-27 14:47:04 +08:00
|
|
|
GFP_KERNEL);
|
2011-12-09 14:24:06 +08:00
|
|
|
if (!rep)
|
|
|
|
goto out;
|
|
|
|
|
2024-06-11 20:26:44 +08:00
|
|
|
err = inet_sk_diag_fill(sk, NULL, rep, cb, req, 0,
|
|
|
|
netlink_net_capable(in_skb, CAP_NET_ADMIN));
|
2011-12-09 14:24:06 +08:00
|
|
|
if (err < 0) {
|
|
|
|
WARN_ON(err == -EMSGSIZE);
|
|
|
|
kfree_skb(rep);
|
|
|
|
goto out;
|
|
|
|
}
|
2012-09-08 04:12:54 +08:00
|
|
|
err = netlink_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid,
|
2011-12-09 14:24:06 +08:00
|
|
|
MSG_DONTWAIT);
|
|
|
|
if (err > 0)
|
|
|
|
err = 0;
|
|
|
|
out:
|
|
|
|
if (sk)
|
|
|
|
sock_put(sk);
|
|
|
|
out_nosk:
|
|
|
|
return err;
|
2011-12-09 14:23:51 +08:00
|
|
|
}
|
|
|
|
|
2015-03-10 22:15:54 +08:00
|
|
|
static void udp_dump(struct udp_table *table, struct sk_buff *skb,
|
|
|
|
struct netlink_callback *cb,
|
2024-06-11 20:26:44 +08:00
|
|
|
const struct inet_diag_req_v2 *r)
|
2011-12-09 14:23:51 +08:00
|
|
|
{
|
2016-09-07 23:42:25 +08:00
|
|
|
bool net_admin = netlink_net_capable(cb->skb, CAP_NET_ADMIN);
|
2012-07-16 12:28:49 +08:00
|
|
|
struct net *net = sock_net(skb->sk);
|
2024-06-11 20:26:44 +08:00
|
|
|
struct inet_diag_dump_data *cb_data;
|
2016-04-01 23:52:13 +08:00
|
|
|
int num, s_num, slot, s_slot;
|
2024-06-11 20:26:44 +08:00
|
|
|
struct nlattr *bc;
|
2011-12-09 14:24:21 +08:00
|
|
|
|
2024-06-11 20:26:44 +08:00
|
|
|
cb_data = cb->data;
|
|
|
|
bc = cb_data->inet_diag_nla_bc;
|
2011-12-09 14:24:21 +08:00
|
|
|
s_slot = cb->args[0];
|
|
|
|
num = s_num = cb->args[1];
|
|
|
|
|
2015-01-24 05:02:40 +08:00
|
|
|
for (slot = s_slot; slot <= table->mask; s_num = 0, slot++) {
|
2011-12-09 14:24:21 +08:00
|
|
|
struct udp_hslot *hslot = &table->hash[slot];
|
2016-04-01 23:52:13 +08:00
|
|
|
struct sock *sk;
|
2011-12-09 14:24:21 +08:00
|
|
|
|
2015-01-24 05:02:40 +08:00
|
|
|
num = 0;
|
|
|
|
|
2016-04-01 23:52:13 +08:00
|
|
|
if (hlist_empty(&hslot->head))
|
2011-12-09 14:24:21 +08:00
|
|
|
continue;
|
|
|
|
|
|
|
|
spin_lock_bh(&hslot->lock);
|
2016-04-01 23:52:13 +08:00
|
|
|
sk_for_each(sk, &hslot->head) {
|
2011-12-09 14:24:21 +08:00
|
|
|
struct inet_sock *inet = inet_sk(sk);
|
|
|
|
|
2012-07-16 12:28:49 +08:00
|
|
|
if (!net_eq(sock_net(sk), net))
|
|
|
|
continue;
|
2011-12-09 14:24:21 +08:00
|
|
|
if (num < s_num)
|
|
|
|
goto next;
|
|
|
|
if (!(r->idiag_states & (1 << sk->sk_state)))
|
|
|
|
goto next;
|
|
|
|
if (r->sdiag_family != AF_UNSPEC &&
|
|
|
|
sk->sk_family != r->sdiag_family)
|
|
|
|
goto next;
|
|
|
|
if (r->id.idiag_sport != inet->inet_sport &&
|
|
|
|
r->id.idiag_sport)
|
|
|
|
goto next;
|
|
|
|
if (r->id.idiag_dport != inet->inet_dport &&
|
|
|
|
r->id.idiag_dport)
|
|
|
|
goto next;
|
|
|
|
|
2016-09-07 23:42:25 +08:00
|
|
|
if (sk_diag_dump(sk, skb, cb, r, bc, net_admin) < 0) {
|
2011-12-09 14:24:21 +08:00
|
|
|
spin_unlock_bh(&hslot->lock);
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
next:
|
|
|
|
num++;
|
|
|
|
}
|
|
|
|
spin_unlock_bh(&hslot->lock);
|
|
|
|
}
|
|
|
|
done:
|
|
|
|
cb->args[0] = slot;
|
|
|
|
cb->args[1] = num;
|
2011-12-09 14:23:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void udp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
|
2024-06-11 20:26:44 +08:00
|
|
|
const struct inet_diag_req_v2 *r)
|
2011-12-09 14:23:51 +08:00
|
|
|
{
|
2024-06-11 20:26:44 +08:00
|
|
|
udp_dump(&udp_table, skb, cb, r);
|
2011-12-09 14:23:51 +08:00
|
|
|
}
|
|
|
|
|
2024-06-11 20:26:44 +08:00
|
|
|
static int udp_diag_dump_one(struct netlink_callback *cb,
|
2015-03-10 22:15:54 +08:00
|
|
|
const struct inet_diag_req_v2 *req)
|
2011-12-09 14:23:51 +08:00
|
|
|
{
|
2024-06-11 20:26:44 +08:00
|
|
|
return udp_dump_one(&udp_table, cb, req);
|
2011-12-09 14:23:51 +08:00
|
|
|
}
|
|
|
|
|
2012-04-25 02:15:41 +08:00
|
|
|
static void udp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
|
|
|
|
void *info)
|
|
|
|
{
|
2018-06-08 17:35:40 +08:00
|
|
|
r->idiag_rqueue = udp_rqueue_get(sk);
|
2012-04-25 02:15:41 +08:00
|
|
|
r->idiag_wqueue = sk_wmem_alloc_get(sk);
|
|
|
|
}
|
|
|
|
|
2016-08-24 12:06:33 +08:00
|
|
|
#ifdef CONFIG_INET_DIAG_DESTROY
|
|
|
|
static int __udp_diag_destroy(struct sk_buff *in_skb,
|
|
|
|
const struct inet_diag_req_v2 *req,
|
|
|
|
struct udp_table *tbl)
|
|
|
|
{
|
|
|
|
struct net *net = sock_net(in_skb->sk);
|
|
|
|
struct sock *sk;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
|
|
|
|
if (req->sdiag_family == AF_INET)
|
|
|
|
sk = __udp4_lib_lookup(net,
|
|
|
|
req->id.idiag_dst[0], req->id.idiag_dport,
|
|
|
|
req->id.idiag_src[0], req->id.idiag_sport,
|
2017-08-07 23:44:16 +08:00
|
|
|
req->id.idiag_if, 0, tbl, NULL);
|
2016-08-24 12:06:33 +08:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
|
|
else if (req->sdiag_family == AF_INET6) {
|
|
|
|
if (ipv6_addr_v4mapped((struct in6_addr *)req->id.idiag_dst) &&
|
|
|
|
ipv6_addr_v4mapped((struct in6_addr *)req->id.idiag_src))
|
|
|
|
sk = __udp4_lib_lookup(net,
|
2016-09-07 12:38:35 +08:00
|
|
|
req->id.idiag_dst[3], req->id.idiag_dport,
|
|
|
|
req->id.idiag_src[3], req->id.idiag_sport,
|
2017-08-07 23:44:16 +08:00
|
|
|
req->id.idiag_if, 0, tbl, NULL);
|
2016-08-24 12:06:33 +08:00
|
|
|
|
|
|
|
else
|
|
|
|
sk = __udp6_lib_lookup(net,
|
|
|
|
(struct in6_addr *)req->id.idiag_dst,
|
|
|
|
req->id.idiag_dport,
|
|
|
|
(struct in6_addr *)req->id.idiag_src,
|
|
|
|
req->id.idiag_sport,
|
2017-08-07 23:44:20 +08:00
|
|
|
req->id.idiag_if, 0, tbl, NULL);
|
2016-08-24 12:06:33 +08:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
else {
|
|
|
|
rcu_read_unlock();
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2017-06-30 18:08:01 +08:00
|
|
|
if (sk && !refcount_inc_not_zero(&sk->sk_refcnt))
|
2016-08-24 12:06:33 +08:00
|
|
|
sk = NULL;
|
|
|
|
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
|
|
if (!sk)
|
|
|
|
return -ENOENT;
|
|
|
|
|
|
|
|
if (sock_diag_check_cookie(sk, req->id.idiag_cookie)) {
|
|
|
|
sock_put(sk);
|
|
|
|
return -ENOENT;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = sock_diag_destroy(sk, ECONNABORTED);
|
|
|
|
|
|
|
|
sock_put(sk);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int udp_diag_destroy(struct sk_buff *in_skb,
|
|
|
|
const struct inet_diag_req_v2 *req)
|
|
|
|
{
|
|
|
|
return __udp_diag_destroy(in_skb, req, &udp_table);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int udplite_diag_destroy(struct sk_buff *in_skb,
|
|
|
|
const struct inet_diag_req_v2 *req)
|
|
|
|
{
|
|
|
|
return __udp_diag_destroy(in_skb, req, &udplite_table);
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
2011-12-09 14:23:51 +08:00
|
|
|
static const struct inet_diag_handler udp_diag_handler = {
|
|
|
|
.dump = udp_diag_dump,
|
|
|
|
.dump_one = udp_diag_dump_one,
|
2012-04-25 02:15:41 +08:00
|
|
|
.idiag_get_info = udp_diag_get_info,
|
2011-12-09 14:23:51 +08:00
|
|
|
.idiag_type = IPPROTO_UDP,
|
2015-06-15 23:26:19 +08:00
|
|
|
.idiag_info_size = 0,
|
2016-08-24 12:06:33 +08:00
|
|
|
#ifdef CONFIG_INET_DIAG_DESTROY
|
|
|
|
.destroy = udp_diag_destroy,
|
|
|
|
#endif
|
2011-12-09 14:23:51 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static void udplite_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
|
2024-06-11 20:26:44 +08:00
|
|
|
const struct inet_diag_req_v2 *r)
|
2011-12-09 14:23:51 +08:00
|
|
|
{
|
2024-06-11 20:26:44 +08:00
|
|
|
udp_dump(&udplite_table, skb, cb, r);
|
2011-12-09 14:23:51 +08:00
|
|
|
}
|
|
|
|
|
2024-06-11 20:26:44 +08:00
|
|
|
static int udplite_diag_dump_one(struct netlink_callback *cb,
|
2015-03-10 22:15:54 +08:00
|
|
|
const struct inet_diag_req_v2 *req)
|
2011-12-09 14:23:51 +08:00
|
|
|
{
|
2024-06-11 20:26:44 +08:00
|
|
|
return udp_dump_one(&udplite_table, cb, req);
|
2011-12-09 14:23:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static const struct inet_diag_handler udplite_diag_handler = {
|
|
|
|
.dump = udplite_diag_dump,
|
|
|
|
.dump_one = udplite_diag_dump_one,
|
2012-04-25 02:15:41 +08:00
|
|
|
.idiag_get_info = udp_diag_get_info,
|
2011-12-09 14:23:51 +08:00
|
|
|
.idiag_type = IPPROTO_UDPLITE,
|
2015-06-15 23:26:19 +08:00
|
|
|
.idiag_info_size = 0,
|
2016-08-24 12:06:33 +08:00
|
|
|
#ifdef CONFIG_INET_DIAG_DESTROY
|
|
|
|
.destroy = udplite_diag_destroy,
|
|
|
|
#endif
|
2011-12-09 14:23:51 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static int __init udp_diag_init(void)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = inet_diag_register(&udp_diag_handler);
|
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
err = inet_diag_register(&udplite_diag_handler);
|
|
|
|
if (err)
|
|
|
|
goto out_lite;
|
|
|
|
out:
|
|
|
|
return err;
|
|
|
|
out_lite:
|
|
|
|
inet_diag_unregister(&udp_diag_handler);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __exit udp_diag_exit(void)
|
|
|
|
{
|
|
|
|
inet_diag_unregister(&udplite_diag_handler);
|
|
|
|
inet_diag_unregister(&udp_diag_handler);
|
|
|
|
}
|
|
|
|
|
|
|
|
module_init(udp_diag_init);
|
|
|
|
module_exit(udp_diag_exit);
|
|
|
|
MODULE_LICENSE("GPL");
|
2011-12-15 10:43:27 +08:00
|
|
|
MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 2-17 /* AF_INET - IPPROTO_UDP */);
|
|
|
|
MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 2-136 /* AF_INET - IPPROTO_UDPLITE */);
|