2005-04-17 06:20:36 +08:00
|
|
|
#ifndef __LINUX_NETLINK_H
|
|
|
|
#define __LINUX_NETLINK_H
|
|
|
|
|
|
|
|
|
|
|
|
#include <linux/capability.h>
|
|
|
|
#include <linux/skbuff.h>
|
2012-09-21 17:35:38 +08:00
|
|
|
#include <linux/export.h>
|
2012-09-07 02:20:01 +08:00
|
|
|
#include <net/scm.h>
|
2012-10-13 17:46:48 +08:00
|
|
|
#include <uapi/linux/netlink.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2010-09-22 13:54:54 +08:00
|
|
|
struct net;
|
|
|
|
|
2007-04-26 10:08:35 +08:00
|
|
|
static inline struct nlmsghdr *nlmsg_hdr(const struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
return (struct nlmsghdr *)skb->data;
|
|
|
|
}
|
|
|
|
|
2013-04-17 14:47:02 +08:00
|
|
|
enum netlink_skb_flags {
|
2014-05-31 02:04:00 +08:00
|
|
|
NETLINK_SKB_MMAPED = 0x1, /* Packet data is mmaped */
|
|
|
|
NETLINK_SKB_TX = 0x2, /* Packet was sent by userspace */
|
|
|
|
NETLINK_SKB_DELIVERED = 0x4, /* Packet was delivered */
|
|
|
|
NETLINK_SKB_DST = 0x8, /* Dst set in sendto or sendmsg */
|
2013-04-17 14:47:02 +08:00
|
|
|
};
|
|
|
|
|
2009-11-05 01:50:58 +08:00
|
|
|
struct netlink_skb_parms {
|
2012-09-07 02:20:01 +08:00
|
|
|
struct scm_creds creds; /* Skb credentials */
|
2012-09-08 04:12:54 +08:00
|
|
|
__u32 portid;
|
2005-08-15 10:27:50 +08:00
|
|
|
__u32 dst_group;
|
2013-04-17 14:47:02 +08:00
|
|
|
__u32 flags;
|
2013-04-17 14:46:57 +08:00
|
|
|
struct sock *sk;
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
#define NETLINK_CB(skb) (*(struct netlink_skb_parms*)&((skb)->cb))
|
|
|
|
#define NETLINK_CREDS(skb) (&NETLINK_CB((skb)).creds)
|
|
|
|
|
|
|
|
|
2009-09-12 11:03:15 +08:00
|
|
|
extern void netlink_table_grab(void);
|
|
|
|
extern void netlink_table_ungrab(void);
|
|
|
|
|
2012-09-08 10:53:53 +08:00
|
|
|
#define NL_CFG_F_NONROOT_RECV (1 << 0)
|
|
|
|
#define NL_CFG_F_NONROOT_SEND (1 << 1)
|
|
|
|
|
2012-06-29 14:15:21 +08:00
|
|
|
/* optional Netlink kernel configuration parameters */
|
|
|
|
struct netlink_kernel_cfg {
|
|
|
|
unsigned int groups;
|
2012-09-23 14:09:23 +08:00
|
|
|
unsigned int flags;
|
2012-06-29 14:15:21 +08:00
|
|
|
void (*input)(struct sk_buff *skb);
|
|
|
|
struct mutex *cb_mutex;
|
2014-04-23 09:31:54 +08:00
|
|
|
int (*bind)(int group);
|
|
|
|
void (*unbind)(int group);
|
2013-06-06 14:49:11 +08:00
|
|
|
bool (*compare)(struct net *net, struct sock *sk);
|
2012-06-29 14:15:21 +08:00
|
|
|
};
|
|
|
|
|
2012-09-08 10:53:54 +08:00
|
|
|
extern struct sock *__netlink_kernel_create(struct net *net, int unit,
|
|
|
|
struct module *module,
|
|
|
|
struct netlink_kernel_cfg *cfg);
|
|
|
|
static inline struct sock *
|
|
|
|
netlink_kernel_create(struct net *net, int unit, struct netlink_kernel_cfg *cfg)
|
|
|
|
{
|
|
|
|
return __netlink_kernel_create(net, unit, THIS_MODULE, cfg);
|
|
|
|
}
|
|
|
|
|
2008-01-29 06:41:19 +08:00
|
|
|
extern void netlink_kernel_release(struct sock *sk);
|
2009-09-12 11:03:15 +08:00
|
|
|
extern int __netlink_change_ngroups(struct sock *sk, unsigned int groups);
|
2007-07-19 06:46:06 +08:00
|
|
|
extern int netlink_change_ngroups(struct sock *sk, unsigned int groups);
|
2009-09-25 06:44:05 +08:00
|
|
|
extern void __netlink_clear_multicast_users(struct sock *sk, unsigned int group);
|
2005-04-17 06:20:36 +08:00
|
|
|
extern void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err);
|
2006-03-21 10:52:01 +08:00
|
|
|
extern int netlink_has_listeners(struct sock *sk, unsigned int group);
|
2013-04-17 14:47:04 +08:00
|
|
|
extern struct sk_buff *netlink_alloc_skb(struct sock *ssk, unsigned int size,
|
|
|
|
u32 dst_portid, gfp_t gfp_mask);
|
2012-09-08 04:12:54 +08:00
|
|
|
extern int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 portid, int nonblock);
|
|
|
|
extern int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 portid,
|
2005-10-07 14:46:04 +08:00
|
|
|
__u32 group, gfp_t allocation);
|
2010-05-05 08:36:46 +08:00
|
|
|
extern int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb,
|
2012-09-08 04:12:54 +08:00
|
|
|
__u32 portid, __u32 group, gfp_t allocation,
|
2010-05-05 08:36:46 +08:00
|
|
|
int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data),
|
|
|
|
void *filter_data);
|
2012-09-08 04:12:54 +08:00
|
|
|
extern int netlink_set_err(struct sock *ssk, __u32 portid, __u32 group, int code);
|
2005-04-17 06:20:36 +08:00
|
|
|
extern int netlink_register_notifier(struct notifier_block *nb);
|
|
|
|
extern int netlink_unregister_notifier(struct notifier_block *nb);
|
|
|
|
|
|
|
|
/* finegrained unicast helpers: */
|
|
|
|
struct sock *netlink_getsockbyfilp(struct file *filp);
|
2008-06-06 02:23:39 +08:00
|
|
|
int netlink_attachskb(struct sock *sk, struct sk_buff *skb,
|
2007-11-07 18:42:09 +08:00
|
|
|
long *timeo, struct sock *ssk);
|
2005-04-17 06:20:36 +08:00
|
|
|
void netlink_detachskb(struct sock *sk, struct sk_buff *skb);
|
2007-10-11 12:14:03 +08:00
|
|
|
int netlink_sendskb(struct sock *sk, struct sk_buff *skb);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2013-06-28 09:04:23 +08:00
|
|
|
static inline struct sk_buff *
|
|
|
|
netlink_skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
|
|
|
|
{
|
|
|
|
struct sk_buff *nskb;
|
|
|
|
|
|
|
|
nskb = skb_clone(skb, gfp_mask);
|
|
|
|
if (!nskb)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
/* This is a large skb, set destructor callback to release head */
|
|
|
|
if (is_vmalloc_addr(skb->head))
|
|
|
|
nskb->destructor = skb->destructor;
|
|
|
|
|
|
|
|
return nskb;
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* skb should fit one page. This choice is good for headerless malloc.
|
2007-03-26 11:27:59 +08:00
|
|
|
* But we should limit to 8K so that userspace does not have to
|
|
|
|
* use enormous buffer sizes on recvmsg() calls just to avoid
|
|
|
|
* MSG_TRUNC when PAGE_SIZE is very large.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2007-03-26 11:27:59 +08:00
|
|
|
#if PAGE_SIZE < 8192UL
|
|
|
|
#define NLMSG_GOODSIZE SKB_WITH_OVERHEAD(PAGE_SIZE)
|
|
|
|
#else
|
|
|
|
#define NLMSG_GOODSIZE SKB_WITH_OVERHEAD(8192UL)
|
|
|
|
#endif
|
|
|
|
|
2006-11-11 06:10:15 +08:00
|
|
|
#define NLMSG_DEFAULT_SIZE (NLMSG_GOODSIZE - NLMSG_HDRLEN)
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
|
2009-11-05 01:50:58 +08:00
|
|
|
struct netlink_callback {
|
2009-08-25 22:07:40 +08:00
|
|
|
struct sk_buff *skb;
|
|
|
|
const struct nlmsghdr *nlh;
|
|
|
|
int (*dump)(struct sk_buff * skb,
|
|
|
|
struct netlink_callback *cb);
|
|
|
|
int (*done)(struct netlink_callback *cb);
|
2012-02-24 22:30:16 +08:00
|
|
|
void *data;
|
2012-10-05 04:15:48 +08:00
|
|
|
/* the module that dump function belong to */
|
|
|
|
struct module *module;
|
2011-06-10 09:27:09 +08:00
|
|
|
u16 family;
|
|
|
|
u16 min_dump_alloc;
|
netlink: advertise incomplete dumps
Consider the following situation:
* a dump that would show 8 entries, four in the first
round, and four in the second
* between the first and second rounds, 6 entries are
removed
* now the second round will not show any entry, and
even if there is a sequence/generation counter the
application will not know
To solve this problem, add a new flag NLM_F_DUMP_INTR
to the netlink header that indicates the dump wasn't
consistent, this flag can also be set on the MSG_DONE
message that terminates the dump, and as such above
situation can be detected.
To achieve this, add a sequence counter to the netlink
callback struct. Of course, netlink code still needs
to use this new functionality. The correct way to do
that is to always set cb->seq when a dumpit callback
is invoked and call nl_dump_check_consistent() for
each new message. The core code will also call this
function for the final MSG_DONE message.
To make it usable with generic netlink, a new function
genlmsg_nlhdr() is needed to obtain the netlink header
from the genetlink user header.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Acked-by: David S. Miller <davem@davemloft.net>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
2011-06-20 19:40:46 +08:00
|
|
|
unsigned int prev_seq, seq;
|
2009-08-25 22:07:40 +08:00
|
|
|
long args[6];
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
|
|
|
|
2009-11-05 01:50:58 +08:00
|
|
|
struct netlink_notify {
|
2007-09-12 19:05:38 +08:00
|
|
|
struct net *net;
|
2012-09-08 04:12:54 +08:00
|
|
|
int portid;
|
2005-04-17 06:20:36 +08:00
|
|
|
int protocol;
|
|
|
|
};
|
|
|
|
|
2012-01-31 04:22:06 +08:00
|
|
|
struct nlmsghdr *
|
2012-09-08 04:12:54 +08:00
|
|
|
__nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, int type, int len, int flags);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2012-02-24 22:30:15 +08:00
|
|
|
struct netlink_dump_control {
|
|
|
|
int (*dump)(struct sk_buff *skb, struct netlink_callback *);
|
2012-10-05 04:15:48 +08:00
|
|
|
int (*done)(struct netlink_callback *);
|
2012-02-24 22:30:16 +08:00
|
|
|
void *data;
|
2012-10-05 04:15:48 +08:00
|
|
|
struct module *module;
|
2012-02-24 22:30:15 +08:00
|
|
|
u16 min_dump_alloc;
|
|
|
|
};
|
|
|
|
|
2012-10-05 04:15:48 +08:00
|
|
|
extern int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
|
|
|
|
const struct nlmsghdr *nlh,
|
|
|
|
struct netlink_dump_control *control);
|
|
|
|
static inline int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
|
|
|
|
const struct nlmsghdr *nlh,
|
|
|
|
struct netlink_dump_control *control)
|
|
|
|
{
|
|
|
|
if (!control->module)
|
|
|
|
control->module = THIS_MODULE;
|
|
|
|
|
|
|
|
return __netlink_dump_start(ssk, skb, nlh, control);
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2013-06-22 01:38:07 +08:00
|
|
|
struct netlink_tap {
|
|
|
|
struct net_device *dev;
|
|
|
|
struct module *module;
|
|
|
|
struct list_head list;
|
|
|
|
};
|
|
|
|
|
|
|
|
extern int netlink_add_tap(struct netlink_tap *nt);
|
|
|
|
extern int netlink_remove_tap(struct netlink_tap *nt);
|
|
|
|
|
2014-04-24 05:28:03 +08:00
|
|
|
bool __netlink_ns_capable(const struct netlink_skb_parms *nsp,
|
|
|
|
struct user_namespace *ns, int cap);
|
|
|
|
bool netlink_ns_capable(const struct sk_buff *skb,
|
|
|
|
struct user_namespace *ns, int cap);
|
|
|
|
bool netlink_capable(const struct sk_buff *skb, int cap);
|
|
|
|
bool netlink_net_capable(const struct sk_buff *skb, int cap);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
#endif /* __LINUX_NETLINK_H */
|