Merge branch 'master' of git://blackhole.kfki.hu/nf-next
Jozsef Kadlecsik says: ==================== ipset patches for nf-next Please consider to apply the next bunch of patches for ipset. First comes the small changes, then the bugfixes and at the end the RCU related patches. * Use MSEC_PER_SEC consistently instead of the number. * Use SET_WITH_*() helpers to test set extensions from Sergey Popovich. * Check extensions attributes before getting extensions from Sergey Popovich. * Permit CIDR equal to the host address CIDR in IPv6 from Sergey Popovich. * Make sure we always return line number on batch in the case of error from Sergey Popovich. * Check CIDR value only when attribute is given from Sergey Popovich. * Fix cidr handling for hash:*net* types, reported by Jonathan Johnson. * Fix parallel resizing and listing of the same set so that the original set is kept for the whole dumping. * Make sure listing doesn't grab a set which is just being destroyed. * Remove rbtree from ip_set_hash_netiface.c in order to introduce RCU. * Replace rwlock_t with spinlock_t in "struct ip_set", change the locking in the core and simplifications in the timeout routines. * Introduce RCU locking in bitmap:* types with a slight modification in the logic on how an element is added. * Introduce RCU locking in hash:* types. This is the most complex part of the changes. * Introduce RCU locking in list type where standard rculist is used. * Fix coding styles reported by checkpatch.pl. ==================== Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
This commit is contained in:
commit
53b8762727
|
@ -108,8 +108,13 @@ struct ip_set_counter {
|
|||
atomic64_t packets;
|
||||
};
|
||||
|
||||
struct ip_set_comment_rcu {
|
||||
struct rcu_head rcu;
|
||||
char str[0];
|
||||
};
|
||||
|
||||
struct ip_set_comment {
|
||||
char *str;
|
||||
struct ip_set_comment_rcu __rcu *c;
|
||||
};
|
||||
|
||||
struct ip_set_skbinfo {
|
||||
|
@ -176,6 +181,9 @@ struct ip_set_type_variant {
|
|||
/* List elements */
|
||||
int (*list)(const struct ip_set *set, struct sk_buff *skb,
|
||||
struct netlink_callback *cb);
|
||||
/* Keep listing private when resizing runs parallel */
|
||||
void (*uref)(struct ip_set *set, struct netlink_callback *cb,
|
||||
bool start);
|
||||
|
||||
/* Return true if "b" set is the same as "a"
|
||||
* according to the create set parameters */
|
||||
|
@ -223,7 +231,7 @@ struct ip_set {
|
|||
/* The name of the set */
|
||||
char name[IPSET_MAXNAMELEN];
|
||||
/* Lock protecting the set data */
|
||||
rwlock_t lock;
|
||||
spinlock_t lock;
|
||||
/* References to the set */
|
||||
u32 ref;
|
||||
/* The core set type */
|
||||
|
@ -341,12 +349,11 @@ ip_set_put_skbinfo(struct sk_buff *skb, struct ip_set_skbinfo *skbinfo)
|
|||
cpu_to_be64((u64)skbinfo->skbmark << 32 |
|
||||
skbinfo->skbmarkmask))) ||
|
||||
(skbinfo->skbprio &&
|
||||
nla_put_net32(skb, IPSET_ATTR_SKBPRIO,
|
||||
nla_put_net32(skb, IPSET_ATTR_SKBPRIO,
|
||||
cpu_to_be32(skbinfo->skbprio))) ||
|
||||
(skbinfo->skbqueue &&
|
||||
nla_put_net16(skb, IPSET_ATTR_SKBQUEUE,
|
||||
nla_put_net16(skb, IPSET_ATTR_SKBQUEUE,
|
||||
cpu_to_be16(skbinfo->skbqueue)));
|
||||
|
||||
}
|
||||
|
||||
static inline void
|
||||
|
@ -380,12 +387,12 @@ ip_set_init_counter(struct ip_set_counter *counter,
|
|||
|
||||
/* Netlink CB args */
|
||||
enum {
|
||||
IPSET_CB_NET = 0,
|
||||
IPSET_CB_DUMP,
|
||||
IPSET_CB_INDEX,
|
||||
IPSET_CB_ARG0,
|
||||
IPSET_CB_NET = 0, /* net namespace */
|
||||
IPSET_CB_DUMP, /* dump single set/all sets */
|
||||
IPSET_CB_INDEX, /* set index */
|
||||
IPSET_CB_PRIVATE, /* set private data */
|
||||
IPSET_CB_ARG0, /* type specific */
|
||||
IPSET_CB_ARG1,
|
||||
IPSET_CB_ARG2,
|
||||
};
|
||||
|
||||
/* register and unregister set references */
|
||||
|
@ -545,8 +552,6 @@ ip_set_put_extensions(struct sk_buff *skb, const struct ip_set *set,
|
|||
{ .bytes = ULLONG_MAX, .packets = ULLONG_MAX, \
|
||||
.timeout = (set)->timeout }
|
||||
|
||||
#define IP_SET_INIT_CIDR(a, b) ((a) ? (a) : (b))
|
||||
|
||||
#define IPSET_CONCAT(a, b) a##b
|
||||
#define IPSET_TOKEN(a, b) IPSET_CONCAT(a, b)
|
||||
|
||||
|
|
|
@ -16,41 +16,57 @@ ip_set_comment_uget(struct nlattr *tb)
|
|||
return nla_data(tb);
|
||||
}
|
||||
|
||||
/* Called from uadd only, protected by the set spinlock.
|
||||
* The kadt functions don't use the comment extensions in any way.
|
||||
*/
|
||||
static inline void
|
||||
ip_set_init_comment(struct ip_set_comment *comment,
|
||||
const struct ip_set_ext *ext)
|
||||
{
|
||||
struct ip_set_comment_rcu *c = rcu_dereference_protected(comment->c, 1);
|
||||
size_t len = ext->comment ? strlen(ext->comment) : 0;
|
||||
|
||||
if (unlikely(comment->str)) {
|
||||
kfree(comment->str);
|
||||
comment->str = NULL;
|
||||
if (unlikely(c)) {
|
||||
kfree_rcu(c, rcu);
|
||||
rcu_assign_pointer(comment->c, NULL);
|
||||
}
|
||||
if (!len)
|
||||
return;
|
||||
if (unlikely(len > IPSET_MAX_COMMENT_SIZE))
|
||||
len = IPSET_MAX_COMMENT_SIZE;
|
||||
comment->str = kzalloc(len + 1, GFP_ATOMIC);
|
||||
if (unlikely(!comment->str))
|
||||
c = kzalloc(sizeof(*c) + len + 1, GFP_ATOMIC);
|
||||
if (unlikely(!c))
|
||||
return;
|
||||
strlcpy(comment->str, ext->comment, len + 1);
|
||||
strlcpy(c->str, ext->comment, len + 1);
|
||||
rcu_assign_pointer(comment->c, c);
|
||||
}
|
||||
|
||||
/* Used only when dumping a set, protected by rcu_read_lock_bh() */
|
||||
static inline int
|
||||
ip_set_put_comment(struct sk_buff *skb, struct ip_set_comment *comment)
|
||||
{
|
||||
if (!comment->str)
|
||||
struct ip_set_comment_rcu *c = rcu_dereference_bh(comment->c);
|
||||
|
||||
if (!c)
|
||||
return 0;
|
||||
return nla_put_string(skb, IPSET_ATTR_COMMENT, comment->str);
|
||||
return nla_put_string(skb, IPSET_ATTR_COMMENT, c->str);
|
||||
}
|
||||
|
||||
/* Called from uadd/udel, flush or the garbage collectors protected
|
||||
* by the set spinlock.
|
||||
* Called when the set is destroyed and when there can't be any user
|
||||
* of the set data anymore.
|
||||
*/
|
||||
static inline void
|
||||
ip_set_comment_free(struct ip_set_comment *comment)
|
||||
{
|
||||
if (unlikely(!comment->str))
|
||||
struct ip_set_comment_rcu *c;
|
||||
|
||||
c = rcu_dereference_protected(comment->c, 1);
|
||||
if (unlikely(!c))
|
||||
return;
|
||||
kfree(comment->str);
|
||||
comment->str = NULL;
|
||||
kfree_rcu(c, rcu);
|
||||
rcu_assign_pointer(comment->c, NULL);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -40,38 +40,33 @@ ip_set_timeout_uget(struct nlattr *tb)
|
|||
}
|
||||
|
||||
static inline bool
|
||||
ip_set_timeout_test(unsigned long timeout)
|
||||
ip_set_timeout_expired(unsigned long *t)
|
||||
{
|
||||
return timeout == IPSET_ELEM_PERMANENT ||
|
||||
time_is_after_jiffies(timeout);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
ip_set_timeout_expired(unsigned long *timeout)
|
||||
{
|
||||
return *timeout != IPSET_ELEM_PERMANENT &&
|
||||
time_is_before_jiffies(*timeout);
|
||||
return *t != IPSET_ELEM_PERMANENT && time_is_before_jiffies(*t);
|
||||
}
|
||||
|
||||
static inline void
|
||||
ip_set_timeout_set(unsigned long *timeout, u32 t)
|
||||
ip_set_timeout_set(unsigned long *timeout, u32 value)
|
||||
{
|
||||
if (!t) {
|
||||
unsigned long t;
|
||||
|
||||
if (!value) {
|
||||
*timeout = IPSET_ELEM_PERMANENT;
|
||||
return;
|
||||
}
|
||||
|
||||
*timeout = msecs_to_jiffies(t * 1000) + jiffies;
|
||||
if (*timeout == IPSET_ELEM_PERMANENT)
|
||||
t = msecs_to_jiffies(value * MSEC_PER_SEC) + jiffies;
|
||||
if (t == IPSET_ELEM_PERMANENT)
|
||||
/* Bingo! :-) */
|
||||
(*timeout)--;
|
||||
t--;
|
||||
*timeout = t;
|
||||
}
|
||||
|
||||
static inline u32
|
||||
ip_set_timeout_get(unsigned long *timeout)
|
||||
{
|
||||
return *timeout == IPSET_ELEM_PERMANENT ? 0 :
|
||||
jiffies_to_msecs(*timeout - jiffies)/1000;
|
||||
jiffies_to_msecs(*timeout - jiffies)/MSEC_PER_SEC;
|
||||
}
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
|
|
@ -15,12 +15,12 @@
|
|||
/* The protocol version */
|
||||
#define IPSET_PROTOCOL 6
|
||||
|
||||
/* The maximum permissible comment length we will accept over netlink */
|
||||
#define IPSET_MAX_COMMENT_SIZE 255
|
||||
|
||||
/* The max length of strings including NUL: set and type identifiers */
|
||||
#define IPSET_MAXNAMELEN 32
|
||||
|
||||
/* The maximum permissible comment length we will accept over netlink */
|
||||
#define IPSET_MAX_COMMENT_SIZE 255
|
||||
|
||||
/* Message types and commands */
|
||||
enum ipset_cmd {
|
||||
IPSET_CMD_NONE,
|
||||
|
|
|
@ -41,7 +41,7 @@ mtype_gc_init(struct ip_set *set, void (*gc)(unsigned long ul_set))
|
|||
struct mtype *map = set->data;
|
||||
|
||||
init_timer(&map->gc);
|
||||
map->gc.data = (unsigned long) set;
|
||||
map->gc.data = (unsigned long)set;
|
||||
map->gc.function = gc;
|
||||
map->gc.expires = jiffies + IPSET_GC_PERIOD(set->timeout) * HZ;
|
||||
add_timer(&map->gc);
|
||||
|
@ -144,10 +144,12 @@ mtype_add(struct ip_set *set, void *value, const struct ip_set_ext *ext,
|
|||
|
||||
if (ret == IPSET_ADD_FAILED) {
|
||||
if (SET_WITH_TIMEOUT(set) &&
|
||||
ip_set_timeout_expired(ext_timeout(x, set)))
|
||||
ip_set_timeout_expired(ext_timeout(x, set))) {
|
||||
ret = 0;
|
||||
else if (!(flags & IPSET_FLAG_EXIST))
|
||||
} else if (!(flags & IPSET_FLAG_EXIST)) {
|
||||
set_bit(e->id, map->members);
|
||||
return -IPSET_ERR_EXIST;
|
||||
}
|
||||
/* Element is re-added, cleanup extensions */
|
||||
ip_set_ext_destroy(set, x);
|
||||
}
|
||||
|
@ -165,6 +167,10 @@ mtype_add(struct ip_set *set, void *value, const struct ip_set_ext *ext,
|
|||
ip_set_init_comment(ext_comment(x, set), ext);
|
||||
if (SET_WITH_SKBINFO(set))
|
||||
ip_set_init_skbinfo(ext_skbinfo(x, set), ext);
|
||||
|
||||
/* Activate element */
|
||||
set_bit(e->id, map->members);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -203,10 +209,13 @@ mtype_list(const struct ip_set *set,
|
|||
struct nlattr *adt, *nested;
|
||||
void *x;
|
||||
u32 id, first = cb->args[IPSET_CB_ARG0];
|
||||
int ret = 0;
|
||||
|
||||
adt = ipset_nest_start(skb, IPSET_ATTR_ADT);
|
||||
if (!adt)
|
||||
return -EMSGSIZE;
|
||||
/* Extensions may be replaced */
|
||||
rcu_read_lock();
|
||||
for (; cb->args[IPSET_CB_ARG0] < map->elements;
|
||||
cb->args[IPSET_CB_ARG0]++) {
|
||||
id = cb->args[IPSET_CB_ARG0];
|
||||
|
@ -214,7 +223,7 @@ mtype_list(const struct ip_set *set,
|
|||
if (!test_bit(id, map->members) ||
|
||||
(SET_WITH_TIMEOUT(set) &&
|
||||
#ifdef IP_SET_BITMAP_STORED_TIMEOUT
|
||||
mtype_is_filled((const struct mtype_elem *) x) &&
|
||||
mtype_is_filled((const struct mtype_elem *)x) &&
|
||||
#endif
|
||||
ip_set_timeout_expired(ext_timeout(x, set))))
|
||||
continue;
|
||||
|
@ -222,14 +231,16 @@ mtype_list(const struct ip_set *set,
|
|||
if (!nested) {
|
||||
if (id == first) {
|
||||
nla_nest_cancel(skb, adt);
|
||||
return -EMSGSIZE;
|
||||
} else
|
||||
goto nla_put_failure;
|
||||
ret = -EMSGSIZE;
|
||||
goto out;
|
||||
}
|
||||
|
||||
goto nla_put_failure;
|
||||
}
|
||||
if (mtype_do_list(skb, map, id, set->dsize))
|
||||
goto nla_put_failure;
|
||||
if (ip_set_put_extensions(skb, set, x,
|
||||
mtype_is_filled((const struct mtype_elem *) x)))
|
||||
mtype_is_filled((const struct mtype_elem *)x)))
|
||||
goto nla_put_failure;
|
||||
ipset_nest_end(skb, nested);
|
||||
}
|
||||
|
@ -238,29 +249,32 @@ mtype_list(const struct ip_set *set,
|
|||
/* Set listing finished */
|
||||
cb->args[IPSET_CB_ARG0] = 0;
|
||||
|
||||
return 0;
|
||||
goto out;
|
||||
|
||||
nla_put_failure:
|
||||
nla_nest_cancel(skb, nested);
|
||||
if (unlikely(id == first)) {
|
||||
cb->args[IPSET_CB_ARG0] = 0;
|
||||
return -EMSGSIZE;
|
||||
ret = -EMSGSIZE;
|
||||
}
|
||||
ipset_nest_end(skb, adt);
|
||||
return 0;
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void
|
||||
mtype_gc(unsigned long ul_set)
|
||||
{
|
||||
struct ip_set *set = (struct ip_set *) ul_set;
|
||||
struct ip_set *set = (struct ip_set *)ul_set;
|
||||
struct mtype *map = set->data;
|
||||
void *x;
|
||||
u32 id;
|
||||
|
||||
/* We run parallel with other readers (test element)
|
||||
* but adding/deleting new entries is locked out */
|
||||
read_lock_bh(&set->lock);
|
||||
* but adding/deleting new entries is locked out
|
||||
*/
|
||||
spin_lock_bh(&set->lock);
|
||||
for (id = 0; id < map->elements; id++)
|
||||
if (mtype_gc_test(id, map, set->dsize)) {
|
||||
x = get_ext(set, map, id);
|
||||
|
@ -269,7 +283,7 @@ mtype_gc(unsigned long ul_set)
|
|||
ip_set_ext_destroy(set, x);
|
||||
}
|
||||
}
|
||||
read_unlock_bh(&set->lock);
|
||||
spin_unlock_bh(&set->lock);
|
||||
|
||||
map->gc.expires = jiffies + IPSET_GC_PERIOD(set->timeout) * HZ;
|
||||
add_timer(&map->gc);
|
||||
|
|
|
@ -59,7 +59,7 @@ struct bitmap_ip_adt_elem {
|
|||
static inline u32
|
||||
ip_to_id(const struct bitmap_ip *m, u32 ip)
|
||||
{
|
||||
return ((ip & ip_set_hostmask(m->netmask)) - m->first_ip)/m->hosts;
|
||||
return ((ip & ip_set_hostmask(m->netmask)) - m->first_ip) / m->hosts;
|
||||
}
|
||||
|
||||
/* Common functions */
|
||||
|
@ -81,7 +81,7 @@ static inline int
|
|||
bitmap_ip_do_add(const struct bitmap_ip_adt_elem *e, struct bitmap_ip *map,
|
||||
u32 flags, size_t dsize)
|
||||
{
|
||||
return !!test_and_set_bit(e->id, map->members);
|
||||
return !!test_bit(e->id, map->members);
|
||||
}
|
||||
|
||||
static inline int
|
||||
|
@ -138,18 +138,12 @@ bitmap_ip_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
|
||||
int ret = 0;
|
||||
|
||||
if (unlikely(!tb[IPSET_ATTR_IP] ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE)))
|
||||
return -IPSET_ERR_PROTOCOL;
|
||||
|
||||
if (tb[IPSET_ATTR_LINENO])
|
||||
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
|
||||
|
||||
if (unlikely(!tb[IPSET_ATTR_IP]))
|
||||
return -IPSET_ERR_PROTOCOL;
|
||||
|
||||
ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -181,8 +175,9 @@ bitmap_ip_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
if (!cidr || cidr > HOST_MASK)
|
||||
return -IPSET_ERR_INVALID_CIDR;
|
||||
ip_set_mask_from_to(ip, ip_to, cidr);
|
||||
} else
|
||||
} else {
|
||||
ip_to = ip;
|
||||
}
|
||||
|
||||
if (ip_to > map->last_ip)
|
||||
return -IPSET_ERR_BITMAP_RANGE;
|
||||
|
@ -193,8 +188,8 @@ bitmap_ip_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
|
||||
if (ret && !ip_set_eexist(ret, flags))
|
||||
return ret;
|
||||
else
|
||||
ret = 0;
|
||||
|
||||
ret = 0;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
@ -284,8 +279,9 @@ bitmap_ip_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
|
|||
if (cidr >= HOST_MASK)
|
||||
return -IPSET_ERR_INVALID_CIDR;
|
||||
ip_set_mask_from_to(first_ip, last_ip, cidr);
|
||||
} else
|
||||
} else {
|
||||
return -IPSET_ERR_PROTOCOL;
|
||||
}
|
||||
|
||||
if (tb[IPSET_ATTR_NETMASK]) {
|
||||
netmask = nla_get_u8(tb[IPSET_ATTR_NETMASK]);
|
||||
|
@ -382,6 +378,7 @@ bitmap_ip_init(void)
|
|||
static void __exit
|
||||
bitmap_ip_fini(void)
|
||||
{
|
||||
rcu_barrier();
|
||||
ip_set_type_unregister(&bitmap_ip_type);
|
||||
}
|
||||
|
||||
|
|
|
@ -90,7 +90,7 @@ bitmap_ipmac_do_test(const struct bitmap_ipmac_adt_elem *e,
|
|||
return 0;
|
||||
elem = get_elem(map->extensions, e->id, dsize);
|
||||
if (elem->filled == MAC_FILLED)
|
||||
return e->ether == NULL ||
|
||||
return !e->ether ||
|
||||
ether_addr_equal(e->ether, elem->ether);
|
||||
/* Trigger kernel to fill out the ethernet address */
|
||||
return -EAGAIN;
|
||||
|
@ -131,7 +131,8 @@ bitmap_ipmac_add_timeout(unsigned long *timeout,
|
|||
/* If MAC is unset yet, we store plain timeout value
|
||||
* because the timer is not activated yet
|
||||
* and we can reuse it later when MAC is filled out,
|
||||
* possibly by the kernel */
|
||||
* possibly by the kernel
|
||||
*/
|
||||
if (e->ether)
|
||||
ip_set_timeout_set(timeout, t);
|
||||
else
|
||||
|
@ -147,28 +148,35 @@ bitmap_ipmac_do_add(const struct bitmap_ipmac_adt_elem *e,
|
|||
struct bitmap_ipmac_elem *elem;
|
||||
|
||||
elem = get_elem(map->extensions, e->id, dsize);
|
||||
if (test_and_set_bit(e->id, map->members)) {
|
||||
if (test_bit(e->id, map->members)) {
|
||||
if (elem->filled == MAC_FILLED) {
|
||||
if (e->ether && (flags & IPSET_FLAG_EXIST))
|
||||
memcpy(elem->ether, e->ether, ETH_ALEN);
|
||||
if (e->ether &&
|
||||
(flags & IPSET_FLAG_EXIST) &&
|
||||
!ether_addr_equal(e->ether, elem->ether)) {
|
||||
/* memcpy isn't atomic */
|
||||
clear_bit(e->id, map->members);
|
||||
smp_mb__after_atomic();
|
||||
ether_addr_copy(elem->ether, e->ether);
|
||||
}
|
||||
return IPSET_ADD_FAILED;
|
||||
} else if (!e->ether)
|
||||
/* Already added without ethernet address */
|
||||
return IPSET_ADD_FAILED;
|
||||
/* Fill the MAC address and trigger the timer activation */
|
||||
memcpy(elem->ether, e->ether, ETH_ALEN);
|
||||
clear_bit(e->id, map->members);
|
||||
smp_mb__after_atomic();
|
||||
ether_addr_copy(elem->ether, e->ether);
|
||||
elem->filled = MAC_FILLED;
|
||||
return IPSET_ADD_START_STORED_TIMEOUT;
|
||||
} else if (e->ether) {
|
||||
/* We can store MAC too */
|
||||
memcpy(elem->ether, e->ether, ETH_ALEN);
|
||||
ether_addr_copy(elem->ether, e->ether);
|
||||
elem->filled = MAC_FILLED;
|
||||
return 0;
|
||||
} else {
|
||||
elem->filled = MAC_UNSET;
|
||||
/* MAC is not stored yet, don't start timer */
|
||||
return IPSET_ADD_STORE_PLAIN_TIMEOUT;
|
||||
}
|
||||
elem->filled = MAC_UNSET;
|
||||
/* MAC is not stored yet, don't start timer */
|
||||
return IPSET_ADD_STORE_PLAIN_TIMEOUT;
|
||||
}
|
||||
|
||||
static inline int
|
||||
|
@ -239,18 +247,12 @@ bitmap_ipmac_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
u32 ip = 0;
|
||||
int ret = 0;
|
||||
|
||||
if (unlikely(!tb[IPSET_ATTR_IP] ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE)))
|
||||
return -IPSET_ERR_PROTOCOL;
|
||||
|
||||
if (tb[IPSET_ATTR_LINENO])
|
||||
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
|
||||
|
||||
if (unlikely(!tb[IPSET_ATTR_IP]))
|
||||
return -IPSET_ERR_PROTOCOL;
|
||||
|
||||
ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -350,8 +352,9 @@ bitmap_ipmac_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
|
|||
if (cidr >= HOST_MASK)
|
||||
return -IPSET_ERR_INVALID_CIDR;
|
||||
ip_set_mask_from_to(first_ip, last_ip, cidr);
|
||||
} else
|
||||
} else {
|
||||
return -IPSET_ERR_PROTOCOL;
|
||||
}
|
||||
|
||||
elements = (u64)last_ip - first_ip + 1;
|
||||
|
||||
|
@ -419,6 +422,7 @@ bitmap_ipmac_init(void)
|
|||
static void __exit
|
||||
bitmap_ipmac_fini(void)
|
||||
{
|
||||
rcu_barrier();
|
||||
ip_set_type_unregister(&bitmap_ipmac_type);
|
||||
}
|
||||
|
||||
|
|
|
@ -73,7 +73,7 @@ static inline int
|
|||
bitmap_port_do_add(const struct bitmap_port_adt_elem *e,
|
||||
struct bitmap_port *map, u32 flags, size_t dsize)
|
||||
{
|
||||
return !!test_and_set_bit(e->id, map->members);
|
||||
return !!test_bit(e->id, map->members);
|
||||
}
|
||||
|
||||
static inline int
|
||||
|
@ -136,19 +136,13 @@ bitmap_port_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
u16 port_to;
|
||||
int ret = 0;
|
||||
|
||||
if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE)))
|
||||
return -IPSET_ERR_PROTOCOL;
|
||||
|
||||
if (tb[IPSET_ATTR_LINENO])
|
||||
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
|
||||
|
||||
if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO)))
|
||||
return -IPSET_ERR_PROTOCOL;
|
||||
|
||||
port = ip_set_get_h16(tb[IPSET_ATTR_PORT]);
|
||||
if (port < map->first_port || port > map->last_port)
|
||||
return -IPSET_ERR_BITMAP_RANGE;
|
||||
|
@ -168,8 +162,9 @@ bitmap_port_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
if (port < map->first_port)
|
||||
return -IPSET_ERR_BITMAP_RANGE;
|
||||
}
|
||||
} else
|
||||
} else {
|
||||
port_to = port;
|
||||
}
|
||||
|
||||
if (port_to > map->last_port)
|
||||
return -IPSET_ERR_BITMAP_RANGE;
|
||||
|
@ -180,8 +175,8 @@ bitmap_port_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
|
||||
if (ret && !ip_set_eexist(ret, flags))
|
||||
return ret;
|
||||
else
|
||||
ret = 0;
|
||||
|
||||
ret = 0;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
@ -312,6 +307,7 @@ bitmap_port_init(void)
|
|||
static void __exit
|
||||
bitmap_port_fini(void)
|
||||
{
|
||||
rcu_barrier();
|
||||
ip_set_type_unregister(&bitmap_port_type);
|
||||
}
|
||||
|
||||
|
|
|
@ -32,8 +32,10 @@ static DEFINE_RWLOCK(ip_set_ref_lock); /* protects the set refs */
|
|||
struct ip_set_net {
|
||||
struct ip_set * __rcu *ip_set_list; /* all individual sets */
|
||||
ip_set_id_t ip_set_max; /* max number of sets */
|
||||
int is_deleted; /* deleted by ip_set_net_exit */
|
||||
bool is_deleted; /* deleted by ip_set_net_exit */
|
||||
bool is_destroyed; /* all sets are destroyed */
|
||||
};
|
||||
|
||||
static int ip_set_net_id __read_mostly;
|
||||
|
||||
static inline struct ip_set_net *ip_set_pernet(struct net *net)
|
||||
|
@ -59,8 +61,7 @@ MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_IPSET);
|
|||
#define ip_set(inst, id) \
|
||||
ip_set_dereference((inst)->ip_set_list)[id]
|
||||
|
||||
/*
|
||||
* The set types are implemented in modules and registered set types
|
||||
/* The set types are implemented in modules and registered set types
|
||||
* can be found in ip_set_type_list. Adding/deleting types is
|
||||
* serialized by ip_set_type_mutex.
|
||||
*/
|
||||
|
@ -130,7 +131,8 @@ __find_set_type_get(const char *name, u8 family, u8 revision,
|
|||
goto unlock;
|
||||
}
|
||||
/* Make sure the type is already loaded
|
||||
* but we don't support the revision */
|
||||
* but we don't support the revision
|
||||
*/
|
||||
list_for_each_entry_rcu(type, &ip_set_type_list, list)
|
||||
if (STRNCMP(type->name, name)) {
|
||||
err = -IPSET_ERR_FIND_TYPE;
|
||||
|
@ -208,15 +210,15 @@ ip_set_type_register(struct ip_set_type *type)
|
|||
pr_warn("ip_set type %s, family %s with revision min %u already registered!\n",
|
||||
type->name, family_name(type->family),
|
||||
type->revision_min);
|
||||
ret = -EINVAL;
|
||||
goto unlock;
|
||||
ip_set_type_unlock();
|
||||
return -EINVAL;
|
||||
}
|
||||
list_add_rcu(&type->list, &ip_set_type_list);
|
||||
pr_debug("type %s, family %s, revision %u:%u registered.\n",
|
||||
type->name, family_name(type->family),
|
||||
type->revision_min, type->revision_max);
|
||||
unlock:
|
||||
ip_set_type_unlock();
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ip_set_type_register);
|
||||
|
@ -230,12 +232,12 @@ ip_set_type_unregister(struct ip_set_type *type)
|
|||
pr_warn("ip_set type %s, family %s with revision min %u not registered\n",
|
||||
type->name, family_name(type->family),
|
||||
type->revision_min);
|
||||
goto unlock;
|
||||
ip_set_type_unlock();
|
||||
return;
|
||||
}
|
||||
list_del_rcu(&type->list);
|
||||
pr_debug("type %s, family %s with revision min %u unregistered.\n",
|
||||
type->name, family_name(type->family), type->revision_min);
|
||||
unlock:
|
||||
ip_set_type_unlock();
|
||||
|
||||
synchronize_rcu();
|
||||
|
@ -289,7 +291,7 @@ static const struct nla_policy ipaddr_policy[IPSET_ATTR_IPADDR_MAX + 1] = {
|
|||
int
|
||||
ip_set_get_ipaddr4(struct nlattr *nla, __be32 *ipaddr)
|
||||
{
|
||||
struct nlattr *tb[IPSET_ATTR_IPADDR_MAX+1];
|
||||
struct nlattr *tb[IPSET_ATTR_IPADDR_MAX + 1];
|
||||
|
||||
if (unlikely(!flag_nested(nla)))
|
||||
return -IPSET_ERR_PROTOCOL;
|
||||
|
@ -306,7 +308,7 @@ EXPORT_SYMBOL_GPL(ip_set_get_ipaddr4);
|
|||
int
|
||||
ip_set_get_ipaddr6(struct nlattr *nla, union nf_inet_addr *ipaddr)
|
||||
{
|
||||
struct nlattr *tb[IPSET_ATTR_IPADDR_MAX+1];
|
||||
struct nlattr *tb[IPSET_ATTR_IPADDR_MAX + 1];
|
||||
|
||||
if (unlikely(!flag_nested(nla)))
|
||||
return -IPSET_ERR_PROTOCOL;
|
||||
|
@ -317,7 +319,7 @@ ip_set_get_ipaddr6(struct nlattr *nla, union nf_inet_addr *ipaddr)
|
|||
return -IPSET_ERR_PROTOCOL;
|
||||
|
||||
memcpy(ipaddr, nla_data(tb[IPSET_ATTR_IPADDR_IPV6]),
|
||||
sizeof(struct in6_addr));
|
||||
sizeof(struct in6_addr));
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ip_set_get_ipaddr6);
|
||||
|
@ -389,13 +391,22 @@ ip_set_get_extensions(struct ip_set *set, struct nlattr *tb[],
|
|||
struct ip_set_ext *ext)
|
||||
{
|
||||
u64 fullmark;
|
||||
|
||||
if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE)))
|
||||
return -IPSET_ERR_PROTOCOL;
|
||||
|
||||
if (tb[IPSET_ATTR_TIMEOUT]) {
|
||||
if (!(set->extensions & IPSET_EXT_TIMEOUT))
|
||||
if (!SET_WITH_TIMEOUT(set))
|
||||
return -IPSET_ERR_TIMEOUT;
|
||||
ext->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
|
||||
}
|
||||
if (tb[IPSET_ATTR_BYTES] || tb[IPSET_ATTR_PACKETS]) {
|
||||
if (!(set->extensions & IPSET_EXT_COUNTER))
|
||||
if (!SET_WITH_COUNTER(set))
|
||||
return -IPSET_ERR_COUNTER;
|
||||
if (tb[IPSET_ATTR_BYTES])
|
||||
ext->bytes = be64_to_cpu(nla_get_be64(
|
||||
|
@ -405,25 +416,25 @@ ip_set_get_extensions(struct ip_set *set, struct nlattr *tb[],
|
|||
tb[IPSET_ATTR_PACKETS]));
|
||||
}
|
||||
if (tb[IPSET_ATTR_COMMENT]) {
|
||||
if (!(set->extensions & IPSET_EXT_COMMENT))
|
||||
if (!SET_WITH_COMMENT(set))
|
||||
return -IPSET_ERR_COMMENT;
|
||||
ext->comment = ip_set_comment_uget(tb[IPSET_ATTR_COMMENT]);
|
||||
}
|
||||
if (tb[IPSET_ATTR_SKBMARK]) {
|
||||
if (!(set->extensions & IPSET_EXT_SKBINFO))
|
||||
if (!SET_WITH_SKBINFO(set))
|
||||
return -IPSET_ERR_SKBINFO;
|
||||
fullmark = be64_to_cpu(nla_get_be64(tb[IPSET_ATTR_SKBMARK]));
|
||||
ext->skbmark = fullmark >> 32;
|
||||
ext->skbmarkmask = fullmark & 0xffffffff;
|
||||
}
|
||||
if (tb[IPSET_ATTR_SKBPRIO]) {
|
||||
if (!(set->extensions & IPSET_EXT_SKBINFO))
|
||||
if (!SET_WITH_SKBINFO(set))
|
||||
return -IPSET_ERR_SKBINFO;
|
||||
ext->skbprio = be32_to_cpu(nla_get_be32(
|
||||
tb[IPSET_ATTR_SKBPRIO]));
|
||||
}
|
||||
if (tb[IPSET_ATTR_SKBQUEUE]) {
|
||||
if (!(set->extensions & IPSET_EXT_SKBINFO))
|
||||
if (!SET_WITH_SKBINFO(set))
|
||||
return -IPSET_ERR_SKBINFO;
|
||||
ext->skbqueue = be16_to_cpu(nla_get_be16(
|
||||
tb[IPSET_ATTR_SKBQUEUE]));
|
||||
|
@ -457,8 +468,7 @@ ip_set_put_extensions(struct sk_buff *skb, const struct ip_set *set,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(ip_set_put_extensions);
|
||||
|
||||
/*
|
||||
* Creating/destroying/renaming/swapping affect the existence and
|
||||
/* Creating/destroying/renaming/swapping affect the existence and
|
||||
* the properties of a set. All of these can be executed from userspace
|
||||
* only and serialized by the nfnl mutex indirectly from nfnetlink.
|
||||
*
|
||||
|
@ -485,8 +495,7 @@ __ip_set_put(struct ip_set *set)
|
|||
write_unlock_bh(&ip_set_ref_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* Add, del and test set entries from kernel.
|
||||
/* Add, del and test set entries from kernel.
|
||||
*
|
||||
* The set behind the index must exist and must be referenced
|
||||
* so it can't be destroyed (or changed) under our foot.
|
||||
|
@ -514,23 +523,23 @@ ip_set_test(ip_set_id_t index, const struct sk_buff *skb,
|
|||
dev_net(par->in ? par->in : par->out), index);
|
||||
int ret = 0;
|
||||
|
||||
BUG_ON(set == NULL);
|
||||
BUG_ON(!set);
|
||||
pr_debug("set %s, index %u\n", set->name, index);
|
||||
|
||||
if (opt->dim < set->type->dimension ||
|
||||
!(opt->family == set->family || set->family == NFPROTO_UNSPEC))
|
||||
return 0;
|
||||
|
||||
read_lock_bh(&set->lock);
|
||||
rcu_read_lock_bh();
|
||||
ret = set->variant->kadt(set, skb, par, IPSET_TEST, opt);
|
||||
read_unlock_bh(&set->lock);
|
||||
rcu_read_unlock_bh();
|
||||
|
||||
if (ret == -EAGAIN) {
|
||||
/* Type requests element to be completed */
|
||||
pr_debug("element must be completed, ADD is triggered\n");
|
||||
write_lock_bh(&set->lock);
|
||||
spin_lock_bh(&set->lock);
|
||||
set->variant->kadt(set, skb, par, IPSET_ADD, opt);
|
||||
write_unlock_bh(&set->lock);
|
||||
spin_unlock_bh(&set->lock);
|
||||
ret = 1;
|
||||
} else {
|
||||
/* --return-nomatch: invert matched element */
|
||||
|
@ -553,16 +562,16 @@ ip_set_add(ip_set_id_t index, const struct sk_buff *skb,
|
|||
dev_net(par->in ? par->in : par->out), index);
|
||||
int ret;
|
||||
|
||||
BUG_ON(set == NULL);
|
||||
BUG_ON(!set);
|
||||
pr_debug("set %s, index %u\n", set->name, index);
|
||||
|
||||
if (opt->dim < set->type->dimension ||
|
||||
!(opt->family == set->family || set->family == NFPROTO_UNSPEC))
|
||||
return -IPSET_ERR_TYPE_MISMATCH;
|
||||
|
||||
write_lock_bh(&set->lock);
|
||||
spin_lock_bh(&set->lock);
|
||||
ret = set->variant->kadt(set, skb, par, IPSET_ADD, opt);
|
||||
write_unlock_bh(&set->lock);
|
||||
spin_unlock_bh(&set->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -576,23 +585,22 @@ ip_set_del(ip_set_id_t index, const struct sk_buff *skb,
|
|||
dev_net(par->in ? par->in : par->out), index);
|
||||
int ret = 0;
|
||||
|
||||
BUG_ON(set == NULL);
|
||||
BUG_ON(!set);
|
||||
pr_debug("set %s, index %u\n", set->name, index);
|
||||
|
||||
if (opt->dim < set->type->dimension ||
|
||||
!(opt->family == set->family || set->family == NFPROTO_UNSPEC))
|
||||
return -IPSET_ERR_TYPE_MISMATCH;
|
||||
|
||||
write_lock_bh(&set->lock);
|
||||
spin_lock_bh(&set->lock);
|
||||
ret = set->variant->kadt(set, skb, par, IPSET_DEL, opt);
|
||||
write_unlock_bh(&set->lock);
|
||||
spin_unlock_bh(&set->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ip_set_del);
|
||||
|
||||
/*
|
||||
* Find set by name, reference it once. The reference makes sure the
|
||||
/* Find set by name, reference it once. The reference makes sure the
|
||||
* thing pointed to, does not go away under our feet.
|
||||
*
|
||||
*/
|
||||
|
@ -606,7 +614,7 @@ ip_set_get_byname(struct net *net, const char *name, struct ip_set **set)
|
|||
rcu_read_lock();
|
||||
for (i = 0; i < inst->ip_set_max; i++) {
|
||||
s = rcu_dereference(inst->ip_set_list)[i];
|
||||
if (s != NULL && STRNCMP(s->name, name)) {
|
||||
if (s && STRNCMP(s->name, name)) {
|
||||
__ip_set_get(s);
|
||||
index = i;
|
||||
*set = s;
|
||||
|
@ -619,8 +627,7 @@ ip_set_get_byname(struct net *net, const char *name, struct ip_set **set)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(ip_set_get_byname);
|
||||
|
||||
/*
|
||||
* If the given set pointer points to a valid set, decrement
|
||||
/* If the given set pointer points to a valid set, decrement
|
||||
* reference count by 1. The caller shall not assume the index
|
||||
* to be valid, after calling this function.
|
||||
*
|
||||
|
@ -633,7 +640,7 @@ __ip_set_put_byindex(struct ip_set_net *inst, ip_set_id_t index)
|
|||
|
||||
rcu_read_lock();
|
||||
set = rcu_dereference(inst->ip_set_list)[index];
|
||||
if (set != NULL)
|
||||
if (set)
|
||||
__ip_set_put(set);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
@ -647,8 +654,7 @@ ip_set_put_byindex(struct net *net, ip_set_id_t index)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(ip_set_put_byindex);
|
||||
|
||||
/*
|
||||
* Get the name of a set behind a set index.
|
||||
/* Get the name of a set behind a set index.
|
||||
* We assume the set is referenced, so it does exist and
|
||||
* can't be destroyed. The set cannot be renamed due to
|
||||
* the referencing either.
|
||||
|
@ -659,7 +665,7 @@ ip_set_name_byindex(struct net *net, ip_set_id_t index)
|
|||
{
|
||||
const struct ip_set *set = ip_set_rcu_get(net, index);
|
||||
|
||||
BUG_ON(set == NULL);
|
||||
BUG_ON(!set);
|
||||
BUG_ON(set->ref == 0);
|
||||
|
||||
/* Referenced, so it's safe */
|
||||
|
@ -667,13 +673,11 @@ ip_set_name_byindex(struct net *net, ip_set_id_t index)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(ip_set_name_byindex);
|
||||
|
||||
/*
|
||||
* Routines to call by external subsystems, which do not
|
||||
/* Routines to call by external subsystems, which do not
|
||||
* call nfnl_lock for us.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Find set by index, reference it once. The reference makes sure the
|
||||
/* Find set by index, reference it once. The reference makes sure the
|
||||
* thing pointed to, does not go away under our feet.
|
||||
*
|
||||
* The nfnl mutex is used in the function.
|
||||
|
@ -699,8 +703,7 @@ ip_set_nfnl_get_byindex(struct net *net, ip_set_id_t index)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(ip_set_nfnl_get_byindex);
|
||||
|
||||
/*
|
||||
* If the given set pointer points to a valid set, decrement
|
||||
/* If the given set pointer points to a valid set, decrement
|
||||
* reference count by 1. The caller shall not assume the index
|
||||
* to be valid, after calling this function.
|
||||
*
|
||||
|
@ -715,15 +718,14 @@ ip_set_nfnl_put(struct net *net, ip_set_id_t index)
|
|||
nfnl_lock(NFNL_SUBSYS_IPSET);
|
||||
if (!inst->is_deleted) { /* already deleted from ip_set_net_exit() */
|
||||
set = ip_set(inst, index);
|
||||
if (set != NULL)
|
||||
if (set)
|
||||
__ip_set_put(set);
|
||||
}
|
||||
nfnl_unlock(NFNL_SUBSYS_IPSET);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ip_set_nfnl_put);
|
||||
|
||||
/*
|
||||
* Communication protocol with userspace over netlink.
|
||||
/* Communication protocol with userspace over netlink.
|
||||
*
|
||||
* The commands are serialized by the nfnl mutex.
|
||||
*/
|
||||
|
@ -750,7 +752,7 @@ start_msg(struct sk_buff *skb, u32 portid, u32 seq, unsigned int flags,
|
|||
|
||||
nlh = nlmsg_put(skb, portid, seq, cmd | (NFNL_SUBSYS_IPSET << 8),
|
||||
sizeof(*nfmsg), flags);
|
||||
if (nlh == NULL)
|
||||
if (!nlh)
|
||||
return NULL;
|
||||
|
||||
nfmsg = nlmsg_data(nlh);
|
||||
|
@ -783,7 +785,7 @@ find_set_and_id(struct ip_set_net *inst, const char *name, ip_set_id_t *id)
|
|||
*id = IPSET_INVALID_ID;
|
||||
for (i = 0; i < inst->ip_set_max; i++) {
|
||||
set = ip_set(inst, i);
|
||||
if (set != NULL && STRNCMP(set->name, name)) {
|
||||
if (set && STRNCMP(set->name, name)) {
|
||||
*id = i;
|
||||
break;
|
||||
}
|
||||
|
@ -809,7 +811,7 @@ find_free_id(struct ip_set_net *inst, const char *name, ip_set_id_t *index,
|
|||
*index = IPSET_INVALID_ID;
|
||||
for (i = 0; i < inst->ip_set_max; i++) {
|
||||
s = ip_set(inst, i);
|
||||
if (s == NULL) {
|
||||
if (!s) {
|
||||
if (*index == IPSET_INVALID_ID)
|
||||
*index = i;
|
||||
} else if (STRNCMP(name, s->name)) {
|
||||
|
@ -841,18 +843,18 @@ ip_set_create(struct sock *ctnl, struct sk_buff *skb,
|
|||
struct ip_set_net *inst = ip_set_pernet(net);
|
||||
struct ip_set *set, *clash = NULL;
|
||||
ip_set_id_t index = IPSET_INVALID_ID;
|
||||
struct nlattr *tb[IPSET_ATTR_CREATE_MAX+1] = {};
|
||||
struct nlattr *tb[IPSET_ATTR_CREATE_MAX + 1] = {};
|
||||
const char *name, *typename;
|
||||
u8 family, revision;
|
||||
u32 flags = flag_exist(nlh);
|
||||
int ret = 0;
|
||||
|
||||
if (unlikely(protocol_failed(attr) ||
|
||||
attr[IPSET_ATTR_SETNAME] == NULL ||
|
||||
attr[IPSET_ATTR_TYPENAME] == NULL ||
|
||||
attr[IPSET_ATTR_REVISION] == NULL ||
|
||||
attr[IPSET_ATTR_FAMILY] == NULL ||
|
||||
(attr[IPSET_ATTR_DATA] != NULL &&
|
||||
!attr[IPSET_ATTR_SETNAME] ||
|
||||
!attr[IPSET_ATTR_TYPENAME] ||
|
||||
!attr[IPSET_ATTR_REVISION] ||
|
||||
!attr[IPSET_ATTR_FAMILY] ||
|
||||
(attr[IPSET_ATTR_DATA] &&
|
||||
!flag_nested(attr[IPSET_ATTR_DATA]))))
|
||||
return -IPSET_ERR_PROTOCOL;
|
||||
|
||||
|
@ -863,33 +865,29 @@ ip_set_create(struct sock *ctnl, struct sk_buff *skb,
|
|||
pr_debug("setname: %s, typename: %s, family: %s, revision: %u\n",
|
||||
name, typename, family_name(family), revision);
|
||||
|
||||
/*
|
||||
* First, and without any locks, allocate and initialize
|
||||
/* First, and without any locks, allocate and initialize
|
||||
* a normal base set structure.
|
||||
*/
|
||||
set = kzalloc(sizeof(struct ip_set), GFP_KERNEL);
|
||||
set = kzalloc(sizeof(*set), GFP_KERNEL);
|
||||
if (!set)
|
||||
return -ENOMEM;
|
||||
rwlock_init(&set->lock);
|
||||
spin_lock_init(&set->lock);
|
||||
strlcpy(set->name, name, IPSET_MAXNAMELEN);
|
||||
set->family = family;
|
||||
set->revision = revision;
|
||||
|
||||
/*
|
||||
* Next, check that we know the type, and take
|
||||
/* Next, check that we know the type, and take
|
||||
* a reference on the type, to make sure it stays available
|
||||
* while constructing our new set.
|
||||
*
|
||||
* After referencing the type, we try to create the type
|
||||
* specific part of the set without holding any locks.
|
||||
*/
|
||||
ret = find_set_type_get(typename, family, revision, &(set->type));
|
||||
ret = find_set_type_get(typename, family, revision, &set->type);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* Without holding any locks, create private part.
|
||||
*/
|
||||
/* Without holding any locks, create private part. */
|
||||
if (attr[IPSET_ATTR_DATA] &&
|
||||
nla_parse_nested(tb, IPSET_ATTR_CREATE_MAX, attr[IPSET_ATTR_DATA],
|
||||
set->type->create_policy)) {
|
||||
|
@ -903,8 +901,7 @@ ip_set_create(struct sock *ctnl, struct sk_buff *skb,
|
|||
|
||||
/* BTW, ret==0 here. */
|
||||
|
||||
/*
|
||||
* Here, we have a valid, constructed set and we are protected
|
||||
/* Here, we have a valid, constructed set and we are protected
|
||||
* by the nfnl mutex. Find the first free index in ip_set_list
|
||||
* and check clashing.
|
||||
*/
|
||||
|
@ -927,7 +924,7 @@ ip_set_create(struct sock *ctnl, struct sk_buff *skb,
|
|||
/* Wraparound */
|
||||
goto cleanup;
|
||||
|
||||
list = kzalloc(sizeof(struct ip_set *) * i, GFP_KERNEL);
|
||||
list = kcalloc(i, sizeof(struct ip_set *), GFP_KERNEL);
|
||||
if (!list)
|
||||
goto cleanup;
|
||||
/* nfnl mutex is held, both lists are valid */
|
||||
|
@ -941,12 +938,11 @@ ip_set_create(struct sock *ctnl, struct sk_buff *skb,
|
|||
inst->ip_set_max = i;
|
||||
kfree(tmp);
|
||||
ret = 0;
|
||||
} else if (ret)
|
||||
} else if (ret) {
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
/*
|
||||
* Finally! Add our shiny new set to the list, and be done.
|
||||
*/
|
||||
/* Finally! Add our shiny new set to the list, and be done. */
|
||||
pr_debug("create: '%s' created with index %u!\n", set->name, index);
|
||||
ip_set(inst, index) = set;
|
||||
|
||||
|
@ -971,12 +967,9 @@ ip_set_setname_policy[IPSET_ATTR_CMD_MAX + 1] = {
|
|||
};
|
||||
|
||||
static void
|
||||
ip_set_destroy_set(struct ip_set_net *inst, ip_set_id_t index)
|
||||
ip_set_destroy_set(struct ip_set *set)
|
||||
{
|
||||
struct ip_set *set = ip_set(inst, index);
|
||||
|
||||
pr_debug("set: %s\n", set->name);
|
||||
ip_set(inst, index) = NULL;
|
||||
|
||||
/* Must call it without holding any lock */
|
||||
set->variant->destroy(set);
|
||||
|
@ -1011,30 +1004,36 @@ ip_set_destroy(struct sock *ctnl, struct sk_buff *skb,
|
|||
if (!attr[IPSET_ATTR_SETNAME]) {
|
||||
for (i = 0; i < inst->ip_set_max; i++) {
|
||||
s = ip_set(inst, i);
|
||||
if (s != NULL && s->ref) {
|
||||
if (s && s->ref) {
|
||||
ret = -IPSET_ERR_BUSY;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
inst->is_destroyed = true;
|
||||
read_unlock_bh(&ip_set_ref_lock);
|
||||
for (i = 0; i < inst->ip_set_max; i++) {
|
||||
s = ip_set(inst, i);
|
||||
if (s != NULL)
|
||||
ip_set_destroy_set(inst, i);
|
||||
if (s) {
|
||||
ip_set(inst, i) = NULL;
|
||||
ip_set_destroy_set(s);
|
||||
}
|
||||
}
|
||||
/* Modified by ip_set_destroy() only, which is serialized */
|
||||
inst->is_destroyed = false;
|
||||
} else {
|
||||
s = find_set_and_id(inst, nla_data(attr[IPSET_ATTR_SETNAME]),
|
||||
&i);
|
||||
if (s == NULL) {
|
||||
if (!s) {
|
||||
ret = -ENOENT;
|
||||
goto out;
|
||||
} else if (s->ref) {
|
||||
ret = -IPSET_ERR_BUSY;
|
||||
goto out;
|
||||
}
|
||||
ip_set(inst, i) = NULL;
|
||||
read_unlock_bh(&ip_set_ref_lock);
|
||||
|
||||
ip_set_destroy_set(inst, i);
|
||||
ip_set_destroy_set(s);
|
||||
}
|
||||
return 0;
|
||||
out:
|
||||
|
@ -1049,9 +1048,9 @@ ip_set_flush_set(struct ip_set *set)
|
|||
{
|
||||
pr_debug("set: %s\n", set->name);
|
||||
|
||||
write_lock_bh(&set->lock);
|
||||
spin_lock_bh(&set->lock);
|
||||
set->variant->flush(set);
|
||||
write_unlock_bh(&set->lock);
|
||||
spin_unlock_bh(&set->lock);
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -1069,12 +1068,12 @@ ip_set_flush(struct sock *ctnl, struct sk_buff *skb,
|
|||
if (!attr[IPSET_ATTR_SETNAME]) {
|
||||
for (i = 0; i < inst->ip_set_max; i++) {
|
||||
s = ip_set(inst, i);
|
||||
if (s != NULL)
|
||||
if (s)
|
||||
ip_set_flush_set(s);
|
||||
}
|
||||
} else {
|
||||
s = find_set(inst, nla_data(attr[IPSET_ATTR_SETNAME]));
|
||||
if (s == NULL)
|
||||
if (!s)
|
||||
return -ENOENT;
|
||||
|
||||
ip_set_flush_set(s);
|
||||
|
@ -1106,12 +1105,12 @@ ip_set_rename(struct sock *ctnl, struct sk_buff *skb,
|
|||
int ret = 0;
|
||||
|
||||
if (unlikely(protocol_failed(attr) ||
|
||||
attr[IPSET_ATTR_SETNAME] == NULL ||
|
||||
attr[IPSET_ATTR_SETNAME2] == NULL))
|
||||
!attr[IPSET_ATTR_SETNAME] ||
|
||||
!attr[IPSET_ATTR_SETNAME2]))
|
||||
return -IPSET_ERR_PROTOCOL;
|
||||
|
||||
set = find_set(inst, nla_data(attr[IPSET_ATTR_SETNAME]));
|
||||
if (set == NULL)
|
||||
if (!set)
|
||||
return -ENOENT;
|
||||
|
||||
read_lock_bh(&ip_set_ref_lock);
|
||||
|
@ -1123,7 +1122,7 @@ ip_set_rename(struct sock *ctnl, struct sk_buff *skb,
|
|||
name2 = nla_data(attr[IPSET_ATTR_SETNAME2]);
|
||||
for (i = 0; i < inst->ip_set_max; i++) {
|
||||
s = ip_set(inst, i);
|
||||
if (s != NULL && STRNCMP(s->name, name2)) {
|
||||
if (s && STRNCMP(s->name, name2)) {
|
||||
ret = -IPSET_ERR_EXIST_SETNAME2;
|
||||
goto out;
|
||||
}
|
||||
|
@ -1155,23 +1154,24 @@ ip_set_swap(struct sock *ctnl, struct sk_buff *skb,
|
|||
char from_name[IPSET_MAXNAMELEN];
|
||||
|
||||
if (unlikely(protocol_failed(attr) ||
|
||||
attr[IPSET_ATTR_SETNAME] == NULL ||
|
||||
attr[IPSET_ATTR_SETNAME2] == NULL))
|
||||
!attr[IPSET_ATTR_SETNAME] ||
|
||||
!attr[IPSET_ATTR_SETNAME2]))
|
||||
return -IPSET_ERR_PROTOCOL;
|
||||
|
||||
from = find_set_and_id(inst, nla_data(attr[IPSET_ATTR_SETNAME]),
|
||||
&from_id);
|
||||
if (from == NULL)
|
||||
if (!from)
|
||||
return -ENOENT;
|
||||
|
||||
to = find_set_and_id(inst, nla_data(attr[IPSET_ATTR_SETNAME2]),
|
||||
&to_id);
|
||||
if (to == NULL)
|
||||
if (!to)
|
||||
return -IPSET_ERR_EXIST_SETNAME2;
|
||||
|
||||
/* Features must not change.
|
||||
* Not an artificial restriction anymore, as we must prevent
|
||||
* possible loops created by swapping in setlist type of sets. */
|
||||
* Not an artifical restriction anymore, as we must prevent
|
||||
* possible loops created by swapping in setlist type of sets.
|
||||
*/
|
||||
if (!(from->type->features == to->type->features &&
|
||||
from->family == to->family))
|
||||
return -IPSET_ERR_TYPE_MISMATCH;
|
||||
|
@ -1202,12 +1202,16 @@ ip_set_swap(struct sock *ctnl, struct sk_buff *skb,
|
|||
static int
|
||||
ip_set_dump_done(struct netlink_callback *cb)
|
||||
{
|
||||
struct ip_set_net *inst = (struct ip_set_net *)cb->args[IPSET_CB_NET];
|
||||
if (cb->args[IPSET_CB_ARG0]) {
|
||||
pr_debug("release set %s\n",
|
||||
ip_set(inst, cb->args[IPSET_CB_INDEX])->name);
|
||||
__ip_set_put_byindex(inst,
|
||||
(ip_set_id_t) cb->args[IPSET_CB_INDEX]);
|
||||
struct ip_set_net *inst =
|
||||
(struct ip_set_net *)cb->args[IPSET_CB_NET];
|
||||
ip_set_id_t index = (ip_set_id_t)cb->args[IPSET_CB_INDEX];
|
||||
struct ip_set *set = ip_set(inst, index);
|
||||
|
||||
if (set->variant->uref)
|
||||
set->variant->uref(set, cb, false);
|
||||
pr_debug("release set %s\n", set->name);
|
||||
__ip_set_put_byindex(inst, index);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -1229,7 +1233,7 @@ dump_init(struct netlink_callback *cb, struct ip_set_net *inst)
|
|||
{
|
||||
struct nlmsghdr *nlh = nlmsg_hdr(cb->skb);
|
||||
int min_len = nlmsg_total_size(sizeof(struct nfgenmsg));
|
||||
struct nlattr *cda[IPSET_ATTR_CMD_MAX+1];
|
||||
struct nlattr *cda[IPSET_ATTR_CMD_MAX + 1];
|
||||
struct nlattr *attr = (void *)nlh + min_len;
|
||||
u32 dump_type;
|
||||
ip_set_id_t index;
|
||||
|
@ -1238,27 +1242,23 @@ dump_init(struct netlink_callback *cb, struct ip_set_net *inst)
|
|||
nla_parse(cda, IPSET_ATTR_CMD_MAX,
|
||||
attr, nlh->nlmsg_len - min_len, ip_set_setname_policy);
|
||||
|
||||
/* cb->args[IPSET_CB_NET]: net namespace
|
||||
* [IPSET_CB_DUMP]: dump single set/all sets
|
||||
* [IPSET_CB_INDEX]: set index
|
||||
* [IPSET_CB_ARG0]: type specific
|
||||
*/
|
||||
|
||||
if (cda[IPSET_ATTR_SETNAME]) {
|
||||
struct ip_set *set;
|
||||
|
||||
set = find_set_and_id(inst, nla_data(cda[IPSET_ATTR_SETNAME]),
|
||||
&index);
|
||||
if (set == NULL)
|
||||
if (!set)
|
||||
return -ENOENT;
|
||||
|
||||
dump_type = DUMP_ONE;
|
||||
cb->args[IPSET_CB_INDEX] = index;
|
||||
} else
|
||||
} else {
|
||||
dump_type = DUMP_ALL;
|
||||
}
|
||||
|
||||
if (cda[IPSET_ATTR_FLAGS]) {
|
||||
u32 f = ip_set_get_h32(cda[IPSET_ATTR_FLAGS]);
|
||||
|
||||
dump_type |= (f << 16);
|
||||
}
|
||||
cb->args[IPSET_CB_NET] = (unsigned long)inst;
|
||||
|
@ -1276,6 +1276,7 @@ ip_set_dump_start(struct sk_buff *skb, struct netlink_callback *cb)
|
|||
unsigned int flags = NETLINK_CB(cb->skb).portid ? NLM_F_MULTI : 0;
|
||||
struct ip_set_net *inst = ip_set_pernet(sock_net(skb->sk));
|
||||
u32 dump_type, dump_flags;
|
||||
bool is_destroyed;
|
||||
int ret = 0;
|
||||
|
||||
if (!cb->args[IPSET_CB_DUMP]) {
|
||||
|
@ -1283,7 +1284,8 @@ ip_set_dump_start(struct sk_buff *skb, struct netlink_callback *cb)
|
|||
if (ret < 0) {
|
||||
nlh = nlmsg_hdr(cb->skb);
|
||||
/* We have to create and send the error message
|
||||
* manually :-( */
|
||||
* manually :-(
|
||||
*/
|
||||
if (nlh->nlmsg_flags & NLM_F_ACK)
|
||||
netlink_ack(cb->skb, nlh, ret);
|
||||
return ret;
|
||||
|
@ -1301,13 +1303,21 @@ dump_last:
|
|||
pr_debug("dump type, flag: %u %u index: %ld\n",
|
||||
dump_type, dump_flags, cb->args[IPSET_CB_INDEX]);
|
||||
for (; cb->args[IPSET_CB_INDEX] < max; cb->args[IPSET_CB_INDEX]++) {
|
||||
index = (ip_set_id_t) cb->args[IPSET_CB_INDEX];
|
||||
index = (ip_set_id_t)cb->args[IPSET_CB_INDEX];
|
||||
write_lock_bh(&ip_set_ref_lock);
|
||||
set = ip_set(inst, index);
|
||||
if (set == NULL) {
|
||||
is_destroyed = inst->is_destroyed;
|
||||
if (!set || is_destroyed) {
|
||||
write_unlock_bh(&ip_set_ref_lock);
|
||||
if (dump_type == DUMP_ONE) {
|
||||
ret = -ENOENT;
|
||||
goto out;
|
||||
}
|
||||
if (is_destroyed) {
|
||||
/* All sets are just being destroyed */
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
/* When dumping all sets, we must dump "sorted"
|
||||
|
@ -1315,14 +1325,17 @@ dump_last:
|
|||
*/
|
||||
if (dump_type != DUMP_ONE &&
|
||||
((dump_type == DUMP_ALL) ==
|
||||
!!(set->type->features & IPSET_DUMP_LAST)))
|
||||
!!(set->type->features & IPSET_DUMP_LAST))) {
|
||||
write_unlock_bh(&ip_set_ref_lock);
|
||||
continue;
|
||||
}
|
||||
pr_debug("List set: %s\n", set->name);
|
||||
if (!cb->args[IPSET_CB_ARG0]) {
|
||||
/* Start listing: make sure set won't be destroyed */
|
||||
pr_debug("reference set\n");
|
||||
__ip_set_get(set);
|
||||
set->ref++;
|
||||
}
|
||||
write_unlock_bh(&ip_set_ref_lock);
|
||||
nlh = start_msg(skb, NETLINK_CB(cb->skb).portid,
|
||||
cb->nlh->nlmsg_seq, flags,
|
||||
IPSET_CMD_LIST);
|
||||
|
@ -1350,11 +1363,13 @@ dump_last:
|
|||
goto release_refcount;
|
||||
if (dump_flags & IPSET_FLAG_LIST_HEADER)
|
||||
goto next_set;
|
||||
if (set->variant->uref)
|
||||
set->variant->uref(set, cb, true);
|
||||
/* Fall through and add elements */
|
||||
default:
|
||||
read_lock_bh(&set->lock);
|
||||
rcu_read_lock_bh();
|
||||
ret = set->variant->list(set, skb, cb);
|
||||
read_unlock_bh(&set->lock);
|
||||
rcu_read_unlock_bh();
|
||||
if (!cb->args[IPSET_CB_ARG0])
|
||||
/* Set is done, proceed with next one */
|
||||
goto next_set;
|
||||
|
@ -1366,6 +1381,8 @@ dump_last:
|
|||
dump_type = DUMP_LAST;
|
||||
cb->args[IPSET_CB_DUMP] = dump_type | (dump_flags << 16);
|
||||
cb->args[IPSET_CB_INDEX] = 0;
|
||||
if (set && set->variant->uref)
|
||||
set->variant->uref(set, cb, false);
|
||||
goto dump_last;
|
||||
}
|
||||
goto out;
|
||||
|
@ -1380,7 +1397,10 @@ next_set:
|
|||
release_refcount:
|
||||
/* If there was an error or set is done, release set */
|
||||
if (ret || !cb->args[IPSET_CB_ARG0]) {
|
||||
pr_debug("release set %s\n", ip_set(inst, index)->name);
|
||||
set = ip_set(inst, index);
|
||||
if (set->variant->uref)
|
||||
set->variant->uref(set, cb, false);
|
||||
pr_debug("release set %s\n", set->name);
|
||||
__ip_set_put_byindex(inst, index);
|
||||
cb->args[IPSET_CB_ARG0] = 0;
|
||||
}
|
||||
|
@ -1432,9 +1452,9 @@ call_ad(struct sock *ctnl, struct sk_buff *skb, struct ip_set *set,
|
|||
bool eexist = flags & IPSET_FLAG_EXIST, retried = false;
|
||||
|
||||
do {
|
||||
write_lock_bh(&set->lock);
|
||||
spin_lock_bh(&set->lock);
|
||||
ret = set->variant->uadt(set, tb, adt, &lineno, flags, retried);
|
||||
write_unlock_bh(&set->lock);
|
||||
spin_unlock_bh(&set->lock);
|
||||
retried = true;
|
||||
} while (ret == -EAGAIN &&
|
||||
set->variant->resize &&
|
||||
|
@ -1450,12 +1470,12 @@ call_ad(struct sock *ctnl, struct sk_buff *skb, struct ip_set *set,
|
|||
size_t payload = min(SIZE_MAX,
|
||||
sizeof(*errmsg) + nlmsg_len(nlh));
|
||||
int min_len = nlmsg_total_size(sizeof(struct nfgenmsg));
|
||||
struct nlattr *cda[IPSET_ATTR_CMD_MAX+1];
|
||||
struct nlattr *cda[IPSET_ATTR_CMD_MAX + 1];
|
||||
struct nlattr *cmdattr;
|
||||
u32 *errline;
|
||||
|
||||
skb2 = nlmsg_new(payload, GFP_KERNEL);
|
||||
if (skb2 == NULL)
|
||||
if (!skb2)
|
||||
return -ENOMEM;
|
||||
rep = __nlmsg_put(skb2, NETLINK_CB(skb).portid,
|
||||
nlh->nlmsg_seq, NLMSG_ERROR, payload, 0);
|
||||
|
@ -1472,7 +1492,8 @@ call_ad(struct sock *ctnl, struct sk_buff *skb, struct ip_set *set,
|
|||
|
||||
*errline = lineno;
|
||||
|
||||
netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid, MSG_DONTWAIT);
|
||||
netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid,
|
||||
MSG_DONTWAIT);
|
||||
/* Signal netlink not to send its ACK/errmsg. */
|
||||
return -EINTR;
|
||||
}
|
||||
|
@ -1487,25 +1508,25 @@ ip_set_uadd(struct sock *ctnl, struct sk_buff *skb,
|
|||
{
|
||||
struct ip_set_net *inst = ip_set_pernet(sock_net(ctnl));
|
||||
struct ip_set *set;
|
||||
struct nlattr *tb[IPSET_ATTR_ADT_MAX+1] = {};
|
||||
struct nlattr *tb[IPSET_ATTR_ADT_MAX + 1] = {};
|
||||
const struct nlattr *nla;
|
||||
u32 flags = flag_exist(nlh);
|
||||
bool use_lineno;
|
||||
int ret = 0;
|
||||
|
||||
if (unlikely(protocol_failed(attr) ||
|
||||
attr[IPSET_ATTR_SETNAME] == NULL ||
|
||||
!attr[IPSET_ATTR_SETNAME] ||
|
||||
!((attr[IPSET_ATTR_DATA] != NULL) ^
|
||||
(attr[IPSET_ATTR_ADT] != NULL)) ||
|
||||
(attr[IPSET_ATTR_DATA] != NULL &&
|
||||
(attr[IPSET_ATTR_DATA] &&
|
||||
!flag_nested(attr[IPSET_ATTR_DATA])) ||
|
||||
(attr[IPSET_ATTR_ADT] != NULL &&
|
||||
(attr[IPSET_ATTR_ADT] &&
|
||||
(!flag_nested(attr[IPSET_ATTR_ADT]) ||
|
||||
attr[IPSET_ATTR_LINENO] == NULL))))
|
||||
!attr[IPSET_ATTR_LINENO]))))
|
||||
return -IPSET_ERR_PROTOCOL;
|
||||
|
||||
set = find_set(inst, nla_data(attr[IPSET_ATTR_SETNAME]));
|
||||
if (set == NULL)
|
||||
if (!set)
|
||||
return -ENOENT;
|
||||
|
||||
use_lineno = !!attr[IPSET_ATTR_LINENO];
|
||||
|
@ -1542,25 +1563,25 @@ ip_set_udel(struct sock *ctnl, struct sk_buff *skb,
|
|||
{
|
||||
struct ip_set_net *inst = ip_set_pernet(sock_net(ctnl));
|
||||
struct ip_set *set;
|
||||
struct nlattr *tb[IPSET_ATTR_ADT_MAX+1] = {};
|
||||
struct nlattr *tb[IPSET_ATTR_ADT_MAX + 1] = {};
|
||||
const struct nlattr *nla;
|
||||
u32 flags = flag_exist(nlh);
|
||||
bool use_lineno;
|
||||
int ret = 0;
|
||||
|
||||
if (unlikely(protocol_failed(attr) ||
|
||||
attr[IPSET_ATTR_SETNAME] == NULL ||
|
||||
!attr[IPSET_ATTR_SETNAME] ||
|
||||
!((attr[IPSET_ATTR_DATA] != NULL) ^
|
||||
(attr[IPSET_ATTR_ADT] != NULL)) ||
|
||||
(attr[IPSET_ATTR_DATA] != NULL &&
|
||||
(attr[IPSET_ATTR_DATA] &&
|
||||
!flag_nested(attr[IPSET_ATTR_DATA])) ||
|
||||
(attr[IPSET_ATTR_ADT] != NULL &&
|
||||
(attr[IPSET_ATTR_ADT] &&
|
||||
(!flag_nested(attr[IPSET_ATTR_ADT]) ||
|
||||
attr[IPSET_ATTR_LINENO] == NULL))))
|
||||
!attr[IPSET_ATTR_LINENO]))))
|
||||
return -IPSET_ERR_PROTOCOL;
|
||||
|
||||
set = find_set(inst, nla_data(attr[IPSET_ATTR_SETNAME]));
|
||||
if (set == NULL)
|
||||
if (!set)
|
||||
return -ENOENT;
|
||||
|
||||
use_lineno = !!attr[IPSET_ATTR_LINENO];
|
||||
|
@ -1597,26 +1618,26 @@ ip_set_utest(struct sock *ctnl, struct sk_buff *skb,
|
|||
{
|
||||
struct ip_set_net *inst = ip_set_pernet(sock_net(ctnl));
|
||||
struct ip_set *set;
|
||||
struct nlattr *tb[IPSET_ATTR_ADT_MAX+1] = {};
|
||||
struct nlattr *tb[IPSET_ATTR_ADT_MAX + 1] = {};
|
||||
int ret = 0;
|
||||
|
||||
if (unlikely(protocol_failed(attr) ||
|
||||
attr[IPSET_ATTR_SETNAME] == NULL ||
|
||||
attr[IPSET_ATTR_DATA] == NULL ||
|
||||
!attr[IPSET_ATTR_SETNAME] ||
|
||||
!attr[IPSET_ATTR_DATA] ||
|
||||
!flag_nested(attr[IPSET_ATTR_DATA])))
|
||||
return -IPSET_ERR_PROTOCOL;
|
||||
|
||||
set = find_set(inst, nla_data(attr[IPSET_ATTR_SETNAME]));
|
||||
if (set == NULL)
|
||||
if (!set)
|
||||
return -ENOENT;
|
||||
|
||||
if (nla_parse_nested(tb, IPSET_ATTR_ADT_MAX, attr[IPSET_ATTR_DATA],
|
||||
set->type->adt_policy))
|
||||
return -IPSET_ERR_PROTOCOL;
|
||||
|
||||
read_lock_bh(&set->lock);
|
||||
rcu_read_lock_bh();
|
||||
ret = set->variant->uadt(set, tb, IPSET_TEST, NULL, 0, 0);
|
||||
read_unlock_bh(&set->lock);
|
||||
rcu_read_unlock_bh();
|
||||
/* Userspace can't trigger element to be re-added */
|
||||
if (ret == -EAGAIN)
|
||||
ret = 1;
|
||||
|
@ -1638,15 +1659,15 @@ ip_set_header(struct sock *ctnl, struct sk_buff *skb,
|
|||
int ret = 0;
|
||||
|
||||
if (unlikely(protocol_failed(attr) ||
|
||||
attr[IPSET_ATTR_SETNAME] == NULL))
|
||||
!attr[IPSET_ATTR_SETNAME]))
|
||||
return -IPSET_ERR_PROTOCOL;
|
||||
|
||||
set = find_set(inst, nla_data(attr[IPSET_ATTR_SETNAME]));
|
||||
if (set == NULL)
|
||||
if (!set)
|
||||
return -ENOENT;
|
||||
|
||||
skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
|
||||
if (skb2 == NULL)
|
||||
if (!skb2)
|
||||
return -ENOMEM;
|
||||
|
||||
nlh2 = start_msg(skb2, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
|
||||
|
@ -1695,8 +1716,8 @@ ip_set_type(struct sock *ctnl, struct sk_buff *skb,
|
|||
int ret = 0;
|
||||
|
||||
if (unlikely(protocol_failed(attr) ||
|
||||
attr[IPSET_ATTR_TYPENAME] == NULL ||
|
||||
attr[IPSET_ATTR_FAMILY] == NULL))
|
||||
!attr[IPSET_ATTR_TYPENAME] ||
|
||||
!attr[IPSET_ATTR_FAMILY]))
|
||||
return -IPSET_ERR_PROTOCOL;
|
||||
|
||||
family = nla_get_u8(attr[IPSET_ATTR_FAMILY]);
|
||||
|
@ -1706,7 +1727,7 @@ ip_set_type(struct sock *ctnl, struct sk_buff *skb,
|
|||
return ret;
|
||||
|
||||
skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
|
||||
if (skb2 == NULL)
|
||||
if (!skb2)
|
||||
return -ENOMEM;
|
||||
|
||||
nlh2 = start_msg(skb2, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
|
||||
|
@ -1751,11 +1772,11 @@ ip_set_protocol(struct sock *ctnl, struct sk_buff *skb,
|
|||
struct nlmsghdr *nlh2;
|
||||
int ret = 0;
|
||||
|
||||
if (unlikely(attr[IPSET_ATTR_PROTOCOL] == NULL))
|
||||
if (unlikely(!attr[IPSET_ATTR_PROTOCOL]))
|
||||
return -IPSET_ERR_PROTOCOL;
|
||||
|
||||
skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
|
||||
if (skb2 == NULL)
|
||||
if (!skb2)
|
||||
return -ENOMEM;
|
||||
|
||||
nlh2 = start_msg(skb2, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
|
||||
|
@ -1883,7 +1904,7 @@ ip_set_sockfn_get(struct sock *sk, int optval, void __user *user, int *len)
|
|||
ret = -EFAULT;
|
||||
goto done;
|
||||
}
|
||||
op = (unsigned int *) data;
|
||||
op = (unsigned int *)data;
|
||||
|
||||
if (*op < IP_SET_OP_VERSION) {
|
||||
/* Check the version at the beginning of operations */
|
||||
|
@ -1995,10 +2016,11 @@ ip_set_net_init(struct net *net)
|
|||
if (inst->ip_set_max >= IPSET_INVALID_ID)
|
||||
inst->ip_set_max = IPSET_INVALID_ID - 1;
|
||||
|
||||
list = kzalloc(sizeof(struct ip_set *) * inst->ip_set_max, GFP_KERNEL);
|
||||
list = kcalloc(inst->ip_set_max, sizeof(struct ip_set *), GFP_KERNEL);
|
||||
if (!list)
|
||||
return -ENOMEM;
|
||||
inst->is_deleted = 0;
|
||||
inst->is_deleted = false;
|
||||
inst->is_destroyed = false;
|
||||
rcu_assign_pointer(inst->ip_set_list, list);
|
||||
return 0;
|
||||
}
|
||||
|
@ -2011,12 +2033,14 @@ ip_set_net_exit(struct net *net)
|
|||
struct ip_set *set = NULL;
|
||||
ip_set_id_t i;
|
||||
|
||||
inst->is_deleted = 1; /* flag for ip_set_nfnl_put */
|
||||
inst->is_deleted = true; /* flag for ip_set_nfnl_put */
|
||||
|
||||
for (i = 0; i < inst->ip_set_max; i++) {
|
||||
set = ip_set(inst, i);
|
||||
if (set != NULL)
|
||||
ip_set_destroy_set(inst, i);
|
||||
if (set) {
|
||||
ip_set(inst, i) = NULL;
|
||||
ip_set_destroy_set(set);
|
||||
}
|
||||
}
|
||||
kfree(rcu_dereference_protected(inst->ip_set_list, 1));
|
||||
}
|
||||
|
@ -2028,11 +2052,11 @@ static struct pernet_operations ip_set_net_ops = {
|
|||
.size = sizeof(struct ip_set_net)
|
||||
};
|
||||
|
||||
|
||||
static int __init
|
||||
ip_set_init(void)
|
||||
{
|
||||
int ret = nfnetlink_subsys_register(&ip_set_netlink_subsys);
|
||||
|
||||
if (ret != 0) {
|
||||
pr_err("ip_set: cannot register with nfnetlink.\n");
|
||||
return ret;
|
||||
|
|
|
@ -30,7 +30,7 @@ get_port(const struct sk_buff *skb, int protocol, unsigned int protooff,
|
|||
const struct tcphdr *th;
|
||||
|
||||
th = skb_header_pointer(skb, protooff, sizeof(_tcph), &_tcph);
|
||||
if (th == NULL)
|
||||
if (!th)
|
||||
/* No choice either */
|
||||
return false;
|
||||
|
||||
|
@ -42,7 +42,7 @@ get_port(const struct sk_buff *skb, int protocol, unsigned int protooff,
|
|||
const sctp_sctphdr_t *sh;
|
||||
|
||||
sh = skb_header_pointer(skb, protooff, sizeof(_sh), &_sh);
|
||||
if (sh == NULL)
|
||||
if (!sh)
|
||||
/* No choice either */
|
||||
return false;
|
||||
|
||||
|
@ -55,7 +55,7 @@ get_port(const struct sk_buff *skb, int protocol, unsigned int protooff,
|
|||
const struct udphdr *uh;
|
||||
|
||||
uh = skb_header_pointer(skb, protooff, sizeof(_udph), &_udph);
|
||||
if (uh == NULL)
|
||||
if (!uh)
|
||||
/* No choice either */
|
||||
return false;
|
||||
|
||||
|
@ -67,7 +67,7 @@ get_port(const struct sk_buff *skb, int protocol, unsigned int protooff,
|
|||
const struct icmphdr *ic;
|
||||
|
||||
ic = skb_header_pointer(skb, protooff, sizeof(_ich), &_ich);
|
||||
if (ic == NULL)
|
||||
if (!ic)
|
||||
return false;
|
||||
|
||||
*port = (__force __be16)htons((ic->type << 8) | ic->code);
|
||||
|
@ -78,7 +78,7 @@ get_port(const struct sk_buff *skb, int protocol, unsigned int protooff,
|
|||
const struct icmp6hdr *ic;
|
||||
|
||||
ic = skb_header_pointer(skb, protooff, sizeof(_ich), &_ich);
|
||||
if (ic == NULL)
|
||||
if (!ic)
|
||||
return false;
|
||||
|
||||
*port = (__force __be16)
|
||||
|
@ -116,7 +116,8 @@ ip_set_get_ip4_port(const struct sk_buff *skb, bool src,
|
|||
return false;
|
||||
default:
|
||||
/* Other protocols doesn't have ports,
|
||||
so we can match fragments */
|
||||
* so we can match fragments.
|
||||
*/
|
||||
*proto = protocol;
|
||||
return true;
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -108,18 +108,12 @@ hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
u32 ip = 0, ip_to = 0, hosts;
|
||||
int ret = 0;
|
||||
|
||||
if (unlikely(!tb[IPSET_ATTR_IP] ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE)))
|
||||
return -IPSET_ERR_PROTOCOL;
|
||||
|
||||
if (tb[IPSET_ATTR_LINENO])
|
||||
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
|
||||
|
||||
if (unlikely(!tb[IPSET_ATTR_IP]))
|
||||
return -IPSET_ERR_PROTOCOL;
|
||||
|
||||
ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -164,8 +158,8 @@ hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
|
||||
if (ret && !ip_set_eexist(ret, flags))
|
||||
return ret;
|
||||
else
|
||||
ret = 0;
|
||||
|
||||
ret = 0;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
@ -246,20 +240,20 @@ hash_ip6_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
|
||||
int ret;
|
||||
|
||||
if (unlikely(!tb[IPSET_ATTR_IP] ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE) ||
|
||||
tb[IPSET_ATTR_IP_TO] ||
|
||||
tb[IPSET_ATTR_CIDR]))
|
||||
return -IPSET_ERR_PROTOCOL;
|
||||
|
||||
if (tb[IPSET_ATTR_LINENO])
|
||||
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
|
||||
|
||||
if (unlikely(!tb[IPSET_ATTR_IP]))
|
||||
return -IPSET_ERR_PROTOCOL;
|
||||
if (unlikely(tb[IPSET_ATTR_IP_TO]))
|
||||
return -IPSET_ERR_HASH_RANGE_UNSUPPORTED;
|
||||
if (unlikely(tb[IPSET_ATTR_CIDR])) {
|
||||
u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
|
||||
|
||||
if (cidr != HOST_MASK)
|
||||
return -IPSET_ERR_INVALID_CIDR;
|
||||
}
|
||||
|
||||
ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -321,6 +315,7 @@ hash_ip_init(void)
|
|||
static void __exit
|
||||
hash_ip_fini(void)
|
||||
{
|
||||
rcu_barrier();
|
||||
ip_set_type_unregister(&hash_ip_type);
|
||||
}
|
||||
|
||||
|
|
|
@ -108,19 +108,13 @@ hash_ipmark4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
u32 ip, ip_to = 0;
|
||||
int ret;
|
||||
|
||||
if (unlikely(!tb[IPSET_ATTR_IP] ||
|
||||
!ip_set_attr_netorder(tb, IPSET_ATTR_MARK) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE)))
|
||||
return -IPSET_ERR_PROTOCOL;
|
||||
|
||||
if (tb[IPSET_ATTR_LINENO])
|
||||
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
|
||||
|
||||
if (unlikely(!tb[IPSET_ATTR_IP] ||
|
||||
!ip_set_attr_netorder(tb, IPSET_ATTR_MARK)))
|
||||
return -IPSET_ERR_PROTOCOL;
|
||||
|
||||
ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &e.ip);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -161,8 +155,8 @@ hash_ipmark4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
|
||||
if (ret && !ip_set_eexist(ret, flags))
|
||||
return ret;
|
||||
else
|
||||
ret = 0;
|
||||
|
||||
ret = 0;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
@ -212,7 +206,6 @@ hash_ipmark6_data_next(struct hash_ipmark4_elem *next,
|
|||
#define IP_SET_EMIT_CREATE
|
||||
#include "ip_set_hash_gen.h"
|
||||
|
||||
|
||||
static int
|
||||
hash_ipmark6_kadt(struct ip_set *set, const struct sk_buff *skb,
|
||||
const struct xt_action_param *par,
|
||||
|
@ -240,21 +233,21 @@ hash_ipmark6_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
|
||||
int ret;
|
||||
|
||||
if (unlikely(!tb[IPSET_ATTR_IP] ||
|
||||
!ip_set_attr_netorder(tb, IPSET_ATTR_MARK) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE) ||
|
||||
tb[IPSET_ATTR_IP_TO] ||
|
||||
tb[IPSET_ATTR_CIDR]))
|
||||
return -IPSET_ERR_PROTOCOL;
|
||||
|
||||
if (tb[IPSET_ATTR_LINENO])
|
||||
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
|
||||
|
||||
if (unlikely(!tb[IPSET_ATTR_IP] ||
|
||||
!ip_set_attr_netorder(tb, IPSET_ATTR_MARK)))
|
||||
return -IPSET_ERR_PROTOCOL;
|
||||
if (unlikely(tb[IPSET_ATTR_IP_TO]))
|
||||
return -IPSET_ERR_HASH_RANGE_UNSUPPORTED;
|
||||
if (unlikely(tb[IPSET_ATTR_CIDR])) {
|
||||
u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
|
||||
|
||||
if (cidr != HOST_MASK)
|
||||
return -IPSET_ERR_INVALID_CIDR;
|
||||
}
|
||||
|
||||
ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -274,10 +267,8 @@ hash_ipmark6_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
ret = adtfn(set, &e, &ext, &ext, flags);
|
||||
if (ret && !ip_set_eexist(ret, flags))
|
||||
return ret;
|
||||
else
|
||||
ret = 0;
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct ip_set_type hash_ipmark_type __read_mostly = {
|
||||
|
@ -325,6 +316,7 @@ hash_ipmark_init(void)
|
|||
static void __exit
|
||||
hash_ipmark_fini(void)
|
||||
{
|
||||
rcu_barrier();
|
||||
ip_set_type_unregister(&hash_ipmark_type);
|
||||
}
|
||||
|
||||
|
|
|
@ -116,20 +116,14 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
bool with_ports = false;
|
||||
int ret;
|
||||
|
||||
if (unlikely(!tb[IPSET_ATTR_IP] ||
|
||||
!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE)))
|
||||
return -IPSET_ERR_PROTOCOL;
|
||||
|
||||
if (tb[IPSET_ATTR_LINENO])
|
||||
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
|
||||
|
||||
if (unlikely(!tb[IPSET_ATTR_IP] ||
|
||||
!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO)))
|
||||
return -IPSET_ERR_PROTOCOL;
|
||||
|
||||
ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &e.ip);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -146,8 +140,9 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
|
||||
if (e.proto == 0)
|
||||
return -IPSET_ERR_INVALID_PROTO;
|
||||
} else
|
||||
} else {
|
||||
return -IPSET_ERR_MISSING_PROTO;
|
||||
}
|
||||
|
||||
if (!(with_ports || e.proto == IPPROTO_ICMP))
|
||||
e.port = 0;
|
||||
|
@ -193,8 +188,8 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
|
||||
if (ret && !ip_set_eexist(ret, flags))
|
||||
return ret;
|
||||
else
|
||||
ret = 0;
|
||||
|
||||
ret = 0;
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
|
@ -279,22 +274,22 @@ hash_ipport6_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
bool with_ports = false;
|
||||
int ret;
|
||||
|
||||
if (unlikely(!tb[IPSET_ATTR_IP] ||
|
||||
!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE) ||
|
||||
tb[IPSET_ATTR_IP_TO] ||
|
||||
tb[IPSET_ATTR_CIDR]))
|
||||
return -IPSET_ERR_PROTOCOL;
|
||||
|
||||
if (tb[IPSET_ATTR_LINENO])
|
||||
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
|
||||
|
||||
if (unlikely(!tb[IPSET_ATTR_IP] ||
|
||||
!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO)))
|
||||
return -IPSET_ERR_PROTOCOL;
|
||||
if (unlikely(tb[IPSET_ATTR_IP_TO]))
|
||||
return -IPSET_ERR_HASH_RANGE_UNSUPPORTED;
|
||||
if (unlikely(tb[IPSET_ATTR_CIDR])) {
|
||||
u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
|
||||
|
||||
if (cidr != HOST_MASK)
|
||||
return -IPSET_ERR_INVALID_CIDR;
|
||||
}
|
||||
|
||||
ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -311,8 +306,9 @@ hash_ipport6_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
|
||||
if (e.proto == 0)
|
||||
return -IPSET_ERR_INVALID_PROTO;
|
||||
} else
|
||||
} else {
|
||||
return -IPSET_ERR_MISSING_PROTO;
|
||||
}
|
||||
|
||||
if (!(with_ports || e.proto == IPPROTO_ICMPV6))
|
||||
e.port = 0;
|
||||
|
@ -335,8 +331,8 @@ hash_ipport6_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
|
||||
if (ret && !ip_set_eexist(ret, flags))
|
||||
return ret;
|
||||
else
|
||||
ret = 0;
|
||||
|
||||
ret = 0;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
@ -388,6 +384,7 @@ hash_ipport_init(void)
|
|||
static void __exit
|
||||
hash_ipport_fini(void)
|
||||
{
|
||||
rcu_barrier();
|
||||
ip_set_type_unregister(&hash_ipport_type);
|
||||
}
|
||||
|
||||
|
|
|
@ -63,7 +63,7 @@ hash_ipportip4_data_equal(const struct hash_ipportip4_elem *ip1,
|
|||
|
||||
static bool
|
||||
hash_ipportip4_data_list(struct sk_buff *skb,
|
||||
const struct hash_ipportip4_elem *data)
|
||||
const struct hash_ipportip4_elem *data)
|
||||
{
|
||||
if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) ||
|
||||
nla_put_ipaddr4(skb, IPSET_ATTR_IP2, data->ip2) ||
|
||||
|
@ -119,20 +119,14 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
bool with_ports = false;
|
||||
int ret;
|
||||
|
||||
if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
|
||||
!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE)))
|
||||
return -IPSET_ERR_PROTOCOL;
|
||||
|
||||
if (tb[IPSET_ATTR_LINENO])
|
||||
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
|
||||
|
||||
if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
|
||||
!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO)))
|
||||
return -IPSET_ERR_PROTOCOL;
|
||||
|
||||
ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &e.ip);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -153,8 +147,9 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
|
||||
if (e.proto == 0)
|
||||
return -IPSET_ERR_INVALID_PROTO;
|
||||
} else
|
||||
} else {
|
||||
return -IPSET_ERR_MISSING_PROTO;
|
||||
}
|
||||
|
||||
if (!(with_ports || e.proto == IPPROTO_ICMP))
|
||||
e.port = 0;
|
||||
|
@ -200,8 +195,8 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
|
||||
if (ret && !ip_set_eexist(ret, flags))
|
||||
return ret;
|
||||
else
|
||||
ret = 0;
|
||||
|
||||
ret = 0;
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
|
@ -290,22 +285,22 @@ hash_ipportip6_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
bool with_ports = false;
|
||||
int ret;
|
||||
|
||||
if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
|
||||
!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE) ||
|
||||
tb[IPSET_ATTR_IP_TO] ||
|
||||
tb[IPSET_ATTR_CIDR]))
|
||||
return -IPSET_ERR_PROTOCOL;
|
||||
|
||||
if (tb[IPSET_ATTR_LINENO])
|
||||
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
|
||||
|
||||
if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
|
||||
!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO)))
|
||||
return -IPSET_ERR_PROTOCOL;
|
||||
if (unlikely(tb[IPSET_ATTR_IP_TO]))
|
||||
return -IPSET_ERR_HASH_RANGE_UNSUPPORTED;
|
||||
if (unlikely(tb[IPSET_ATTR_CIDR])) {
|
||||
u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
|
||||
|
||||
if (cidr != HOST_MASK)
|
||||
return -IPSET_ERR_INVALID_CIDR;
|
||||
}
|
||||
|
||||
ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -326,8 +321,9 @@ hash_ipportip6_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
|
||||
if (e.proto == 0)
|
||||
return -IPSET_ERR_INVALID_PROTO;
|
||||
} else
|
||||
} else {
|
||||
return -IPSET_ERR_MISSING_PROTO;
|
||||
}
|
||||
|
||||
if (!(with_ports || e.proto == IPPROTO_ICMPV6))
|
||||
e.port = 0;
|
||||
|
@ -350,8 +346,8 @@ hash_ipportip6_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
|
||||
if (ret && !ip_set_eexist(ret, flags))
|
||||
return ret;
|
||||
else
|
||||
ret = 0;
|
||||
|
||||
ret = 0;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
@ -403,6 +399,7 @@ hash_ipportip_init(void)
|
|||
static void __exit
|
||||
hash_ipportip_fini(void)
|
||||
{
|
||||
rcu_barrier();
|
||||
ip_set_type_unregister(&hash_ipportip_type);
|
||||
}
|
||||
|
||||
|
|
|
@ -141,7 +141,7 @@ hash_ipportnet4_kadt(struct ip_set *set, const struct sk_buff *skb,
|
|||
const struct hash_ipportnet *h = set->data;
|
||||
ipset_adtfn adtfn = set->variant->adt[adt];
|
||||
struct hash_ipportnet4_elem e = {
|
||||
.cidr = IP_SET_INIT_CIDR(h->nets[0].cidr[0], HOST_MASK) - 1,
|
||||
.cidr = INIT_CIDR(h->nets[0].cidr[0], HOST_MASK),
|
||||
};
|
||||
struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
|
||||
|
||||
|
@ -173,21 +173,15 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
u8 cidr;
|
||||
int ret;
|
||||
|
||||
if (tb[IPSET_ATTR_LINENO])
|
||||
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
|
||||
|
||||
if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
|
||||
!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE)))
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
|
||||
return -IPSET_ERR_PROTOCOL;
|
||||
|
||||
if (tb[IPSET_ATTR_LINENO])
|
||||
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
|
||||
|
||||
ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -215,14 +209,16 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
|
||||
if (e.proto == 0)
|
||||
return -IPSET_ERR_INVALID_PROTO;
|
||||
} else
|
||||
} else {
|
||||
return -IPSET_ERR_MISSING_PROTO;
|
||||
}
|
||||
|
||||
if (!(with_ports || e.proto == IPPROTO_ICMP))
|
||||
e.port = 0;
|
||||
|
||||
if (tb[IPSET_ATTR_CADT_FLAGS]) {
|
||||
u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
|
||||
|
||||
if (cadt_flags & IPSET_FLAG_NOMATCH)
|
||||
flags |= (IPSET_FLAG_NOMATCH << 16);
|
||||
}
|
||||
|
@ -269,8 +265,9 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
swap(ip2_from, ip2_to);
|
||||
if (ip2_from + UINT_MAX == ip2_to)
|
||||
return -IPSET_ERR_HASH_RANGE;
|
||||
} else
|
||||
} else {
|
||||
ip_set_mask_from_to(ip2_from, ip2_to, e.cidr + 1);
|
||||
}
|
||||
|
||||
if (retried)
|
||||
ip = ntohl(h->next.ip);
|
||||
|
@ -293,8 +290,8 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
|
||||
if (ret && !ip_set_eexist(ret, flags))
|
||||
return ret;
|
||||
else
|
||||
ret = 0;
|
||||
|
||||
ret = 0;
|
||||
ip2 = ip2_last + 1;
|
||||
}
|
||||
}
|
||||
|
@ -395,7 +392,7 @@ hash_ipportnet6_kadt(struct ip_set *set, const struct sk_buff *skb,
|
|||
const struct hash_ipportnet *h = set->data;
|
||||
ipset_adtfn adtfn = set->variant->adt[adt];
|
||||
struct hash_ipportnet6_elem e = {
|
||||
.cidr = IP_SET_INIT_CIDR(h->nets[0].cidr[0], HOST_MASK) - 1,
|
||||
.cidr = INIT_CIDR(h->nets[0].cidr[0], HOST_MASK),
|
||||
};
|
||||
struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
|
||||
|
||||
|
@ -426,24 +423,22 @@ hash_ipportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
u8 cidr;
|
||||
int ret;
|
||||
|
||||
if (tb[IPSET_ATTR_LINENO])
|
||||
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
|
||||
|
||||
if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
|
||||
!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE) ||
|
||||
tb[IPSET_ATTR_IP_TO] ||
|
||||
tb[IPSET_ATTR_CIDR]))
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
|
||||
return -IPSET_ERR_PROTOCOL;
|
||||
if (unlikely(tb[IPSET_ATTR_IP_TO]))
|
||||
return -IPSET_ERR_HASH_RANGE_UNSUPPORTED;
|
||||
if (unlikely(tb[IPSET_ATTR_CIDR])) {
|
||||
u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
|
||||
|
||||
if (tb[IPSET_ATTR_LINENO])
|
||||
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
|
||||
if (cidr != HOST_MASK)
|
||||
return -IPSET_ERR_INVALID_CIDR;
|
||||
}
|
||||
|
||||
ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip);
|
||||
if (ret)
|
||||
|
@ -474,14 +469,16 @@ hash_ipportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
|
||||
if (e.proto == 0)
|
||||
return -IPSET_ERR_INVALID_PROTO;
|
||||
} else
|
||||
} else {
|
||||
return -IPSET_ERR_MISSING_PROTO;
|
||||
}
|
||||
|
||||
if (!(with_ports || e.proto == IPPROTO_ICMPV6))
|
||||
e.port = 0;
|
||||
|
||||
if (tb[IPSET_ATTR_CADT_FLAGS]) {
|
||||
u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
|
||||
|
||||
if (cadt_flags & IPSET_FLAG_NOMATCH)
|
||||
flags |= (IPSET_FLAG_NOMATCH << 16);
|
||||
}
|
||||
|
@ -505,8 +502,8 @@ hash_ipportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
|
||||
if (ret && !ip_set_eexist(ret, flags))
|
||||
return ret;
|
||||
else
|
||||
ret = 0;
|
||||
|
||||
ret = 0;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
@ -562,6 +559,7 @@ hash_ipportnet_init(void)
|
|||
static void __exit
|
||||
hash_ipportnet_fini(void)
|
||||
{
|
||||
rcu_barrier();
|
||||
ip_set_type_unregister(&hash_ipportnet_type);
|
||||
}
|
||||
|
||||
|
|
|
@ -89,10 +89,10 @@ hash_mac4_kadt(struct ip_set *set, const struct sk_buff *skb,
|
|||
return 0;
|
||||
|
||||
if (skb_mac_header(skb) < skb->head ||
|
||||
(skb_mac_header(skb) + ETH_HLEN) > skb->data)
|
||||
(skb_mac_header(skb) + ETH_HLEN) > skb->data)
|
||||
return -EINVAL;
|
||||
|
||||
memcpy(e.ether, eth_hdr(skb)->h_source, ETH_ALEN);
|
||||
ether_addr_copy(e.ether, eth_hdr(skb)->h_source);
|
||||
if (memcmp(e.ether, invalid_ether, ETH_ALEN) == 0)
|
||||
return -EINVAL;
|
||||
return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
|
||||
|
@ -107,22 +107,16 @@ hash_mac4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
|
||||
int ret;
|
||||
|
||||
if (unlikely(!tb[IPSET_ATTR_ETHER] ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE)))
|
||||
return -IPSET_ERR_PROTOCOL;
|
||||
|
||||
if (tb[IPSET_ATTR_LINENO])
|
||||
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
|
||||
|
||||
if (unlikely(!tb[IPSET_ATTR_ETHER]))
|
||||
return -IPSET_ERR_PROTOCOL;
|
||||
|
||||
ret = ip_set_get_extensions(set, tb, &ext);
|
||||
if (ret)
|
||||
return ret;
|
||||
memcpy(e.ether, nla_data(tb[IPSET_ATTR_ETHER]), ETH_ALEN);
|
||||
ether_addr_copy(e.ether, nla_data(tb[IPSET_ATTR_ETHER]));
|
||||
if (memcmp(e.ether, invalid_ether, ETH_ALEN) == 0)
|
||||
return -IPSET_ERR_HASH_ELEM;
|
||||
|
||||
|
@ -171,6 +165,7 @@ hash_mac_init(void)
|
|||
static void __exit
|
||||
hash_mac_fini(void)
|
||||
{
|
||||
rcu_barrier();
|
||||
ip_set_type_unregister(&hash_mac_type);
|
||||
}
|
||||
|
||||
|
|
|
@ -120,7 +120,7 @@ hash_net4_kadt(struct ip_set *set, const struct sk_buff *skb,
|
|||
const struct hash_net *h = set->data;
|
||||
ipset_adtfn adtfn = set->variant->adt[adt];
|
||||
struct hash_net4_elem e = {
|
||||
.cidr = IP_SET_INIT_CIDR(h->nets[0].cidr[0], HOST_MASK),
|
||||
.cidr = INIT_CIDR(h->nets[0].cidr[0], HOST_MASK),
|
||||
};
|
||||
struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
|
||||
|
||||
|
@ -146,19 +146,13 @@ hash_net4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
u32 ip = 0, ip_to = 0, last;
|
||||
int ret;
|
||||
|
||||
if (unlikely(!tb[IPSET_ATTR_IP] ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE)))
|
||||
return -IPSET_ERR_PROTOCOL;
|
||||
|
||||
if (tb[IPSET_ATTR_LINENO])
|
||||
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
|
||||
|
||||
if (unlikely(!tb[IPSET_ATTR_IP] ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
|
||||
return -IPSET_ERR_PROTOCOL;
|
||||
|
||||
ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -175,6 +169,7 @@ hash_net4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
|
||||
if (tb[IPSET_ATTR_CADT_FLAGS]) {
|
||||
u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
|
||||
|
||||
if (cadt_flags & IPSET_FLAG_NOMATCH)
|
||||
flags |= (IPSET_FLAG_NOMATCH << 16);
|
||||
}
|
||||
|
@ -182,7 +177,7 @@ hash_net4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
if (adt == IPSET_TEST || !tb[IPSET_ATTR_IP_TO]) {
|
||||
e.ip = htonl(ip & ip_set_hostmask(e.cidr));
|
||||
ret = adtfn(set, &e, &ext, &ext, flags);
|
||||
return ip_set_enomatch(ret, flags, adt, set) ? -ret:
|
||||
return ip_set_enomatch(ret, flags, adt, set) ? -ret :
|
||||
ip_set_eexist(ret, flags) ? 0 : ret;
|
||||
}
|
||||
|
||||
|
@ -204,8 +199,8 @@ hash_net4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
ret = adtfn(set, &e, &ext, &ext, flags);
|
||||
if (ret && !ip_set_eexist(ret, flags))
|
||||
return ret;
|
||||
else
|
||||
ret = 0;
|
||||
|
||||
ret = 0;
|
||||
ip = last + 1;
|
||||
}
|
||||
return ret;
|
||||
|
@ -294,7 +289,7 @@ hash_net6_kadt(struct ip_set *set, const struct sk_buff *skb,
|
|||
const struct hash_net *h = set->data;
|
||||
ipset_adtfn adtfn = set->variant->adt[adt];
|
||||
struct hash_net6_elem e = {
|
||||
.cidr = IP_SET_INIT_CIDR(h->nets[0].cidr[0], HOST_MASK),
|
||||
.cidr = INIT_CIDR(h->nets[0].cidr[0], HOST_MASK),
|
||||
};
|
||||
struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
|
||||
|
||||
|
@ -318,21 +313,15 @@ hash_net6_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
|
||||
int ret;
|
||||
|
||||
if (tb[IPSET_ATTR_LINENO])
|
||||
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
|
||||
|
||||
if (unlikely(!tb[IPSET_ATTR_IP] ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE)))
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
|
||||
return -IPSET_ERR_PROTOCOL;
|
||||
if (unlikely(tb[IPSET_ATTR_IP_TO]))
|
||||
return -IPSET_ERR_HASH_RANGE_UNSUPPORTED;
|
||||
|
||||
if (tb[IPSET_ATTR_LINENO])
|
||||
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
|
||||
|
||||
ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -341,16 +330,17 @@ hash_net6_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (tb[IPSET_ATTR_CIDR])
|
||||
if (tb[IPSET_ATTR_CIDR]) {
|
||||
e.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
|
||||
|
||||
if (!e.cidr || e.cidr > HOST_MASK)
|
||||
return -IPSET_ERR_INVALID_CIDR;
|
||||
if (!e.cidr || e.cidr > HOST_MASK)
|
||||
return -IPSET_ERR_INVALID_CIDR;
|
||||
}
|
||||
|
||||
ip6_netmask(&e.ip, e.cidr);
|
||||
|
||||
if (tb[IPSET_ATTR_CADT_FLAGS]) {
|
||||
u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
|
||||
|
||||
if (cadt_flags & IPSET_FLAG_NOMATCH)
|
||||
flags |= (IPSET_FLAG_NOMATCH << 16);
|
||||
}
|
||||
|
@ -404,6 +394,7 @@ hash_net_init(void)
|
|||
static void __exit
|
||||
hash_net_fini(void)
|
||||
{
|
||||
rcu_barrier();
|
||||
ip_set_type_unregister(&hash_net_type);
|
||||
}
|
||||
|
||||
|
|
|
@ -13,7 +13,6 @@
|
|||
#include <linux/skbuff.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/random.h>
|
||||
#include <linux/rbtree.h>
|
||||
#include <net/ip.h>
|
||||
#include <net/ipv6.h>
|
||||
#include <net/netlink.h>
|
||||
|
@ -37,88 +36,13 @@ MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
|
|||
IP_SET_MODULE_DESC("hash:net,iface", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
|
||||
MODULE_ALIAS("ip_set_hash:net,iface");
|
||||
|
||||
/* Interface name rbtree */
|
||||
|
||||
struct iface_node {
|
||||
struct rb_node node;
|
||||
char iface[IFNAMSIZ];
|
||||
};
|
||||
|
||||
#define iface_data(n) (rb_entry(n, struct iface_node, node)->iface)
|
||||
|
||||
static void
|
||||
rbtree_destroy(struct rb_root *root)
|
||||
{
|
||||
struct iface_node *node, *next;
|
||||
|
||||
rbtree_postorder_for_each_entry_safe(node, next, root, node)
|
||||
kfree(node);
|
||||
|
||||
*root = RB_ROOT;
|
||||
}
|
||||
|
||||
static int
|
||||
iface_test(struct rb_root *root, const char **iface)
|
||||
{
|
||||
struct rb_node *n = root->rb_node;
|
||||
|
||||
while (n) {
|
||||
const char *d = iface_data(n);
|
||||
int res = strcmp(*iface, d);
|
||||
|
||||
if (res < 0)
|
||||
n = n->rb_left;
|
||||
else if (res > 0)
|
||||
n = n->rb_right;
|
||||
else {
|
||||
*iface = d;
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
iface_add(struct rb_root *root, const char **iface)
|
||||
{
|
||||
struct rb_node **n = &(root->rb_node), *p = NULL;
|
||||
struct iface_node *d;
|
||||
|
||||
while (*n) {
|
||||
char *ifname = iface_data(*n);
|
||||
int res = strcmp(*iface, ifname);
|
||||
|
||||
p = *n;
|
||||
if (res < 0)
|
||||
n = &((*n)->rb_left);
|
||||
else if (res > 0)
|
||||
n = &((*n)->rb_right);
|
||||
else {
|
||||
*iface = ifname;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
d = kzalloc(sizeof(*d), GFP_ATOMIC);
|
||||
if (!d)
|
||||
return -ENOMEM;
|
||||
strcpy(d->iface, *iface);
|
||||
|
||||
rb_link_node(&d->node, p, n);
|
||||
rb_insert_color(&d->node, root);
|
||||
|
||||
*iface = d->iface;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Type specific function prefix */
|
||||
#define HTYPE hash_netiface
|
||||
#define IP_SET_HASH_WITH_NETS
|
||||
#define IP_SET_HASH_WITH_RBTREE
|
||||
#define IP_SET_HASH_WITH_MULTI
|
||||
#define IP_SET_HASH_WITH_NET0
|
||||
|
||||
#define STREQ(a, b) (strcmp(a, b) == 0)
|
||||
#define STRLCPY(a, b) strlcpy(a, b, IFNAMSIZ)
|
||||
|
||||
/* IPv4 variant */
|
||||
|
||||
|
@ -137,7 +61,7 @@ struct hash_netiface4_elem {
|
|||
u8 cidr;
|
||||
u8 nomatch;
|
||||
u8 elem;
|
||||
const char *iface;
|
||||
char iface[IFNAMSIZ];
|
||||
};
|
||||
|
||||
/* Common functions */
|
||||
|
@ -151,7 +75,7 @@ hash_netiface4_data_equal(const struct hash_netiface4_elem *ip1,
|
|||
ip1->cidr == ip2->cidr &&
|
||||
(++*multi) &&
|
||||
ip1->physdev == ip2->physdev &&
|
||||
ip1->iface == ip2->iface;
|
||||
strcmp(ip1->iface, ip2->iface) == 0;
|
||||
}
|
||||
|
||||
static inline int
|
||||
|
@ -219,7 +143,7 @@ static const char *get_physindev_name(const struct sk_buff *skb)
|
|||
return dev ? dev->name : NULL;
|
||||
}
|
||||
|
||||
static const char *get_phyoutdev_name(const struct sk_buff *skb)
|
||||
static const char *get_physoutdev_name(const struct sk_buff *skb)
|
||||
{
|
||||
struct net_device *dev = nf_bridge_get_physoutdev(skb);
|
||||
|
||||
|
@ -235,11 +159,10 @@ hash_netiface4_kadt(struct ip_set *set, const struct sk_buff *skb,
|
|||
struct hash_netiface *h = set->data;
|
||||
ipset_adtfn adtfn = set->variant->adt[adt];
|
||||
struct hash_netiface4_elem e = {
|
||||
.cidr = IP_SET_INIT_CIDR(h->nets[0].cidr[0], HOST_MASK),
|
||||
.cidr = INIT_CIDR(h->nets[0].cidr[0], HOST_MASK),
|
||||
.elem = 1,
|
||||
};
|
||||
struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
|
||||
int ret;
|
||||
|
||||
if (e.cidr == 0)
|
||||
return -EINVAL;
|
||||
|
@ -249,35 +172,25 @@ hash_netiface4_kadt(struct ip_set *set, const struct sk_buff *skb,
|
|||
ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip);
|
||||
e.ip &= ip_set_netmask(e.cidr);
|
||||
|
||||
#define IFACE(dir) (par->dir ? par->dir->name : NULL)
|
||||
#define IFACE(dir) (par->dir ? par->dir->name : "")
|
||||
#define SRCDIR (opt->flags & IPSET_DIM_TWO_SRC)
|
||||
|
||||
if (opt->cmdflags & IPSET_FLAG_PHYSDEV) {
|
||||
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
|
||||
e.iface = SRCDIR ? get_physindev_name(skb) :
|
||||
get_phyoutdev_name(skb);
|
||||
const char *eiface = SRCDIR ? get_physindev_name(skb) :
|
||||
get_physoutdev_name(skb);
|
||||
|
||||
if (!e.iface)
|
||||
if (!eiface)
|
||||
return -EINVAL;
|
||||
STRLCPY(e.iface, eiface);
|
||||
e.physdev = 1;
|
||||
#else
|
||||
e.iface = NULL;
|
||||
#endif
|
||||
} else
|
||||
e.iface = SRCDIR ? IFACE(in) : IFACE(out);
|
||||
} else {
|
||||
STRLCPY(e.iface, SRCDIR ? IFACE(in) : IFACE(out));
|
||||
}
|
||||
|
||||
if (!e.iface)
|
||||
if (strlen(e.iface) == 0)
|
||||
return -EINVAL;
|
||||
ret = iface_test(&h->rbtree, &e.iface);
|
||||
if (adt == IPSET_ADD) {
|
||||
if (!ret) {
|
||||
ret = iface_add(&h->rbtree, &e.iface);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
} else if (!ret)
|
||||
return ret;
|
||||
|
||||
return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
|
||||
}
|
||||
|
||||
|
@ -290,23 +203,16 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
struct hash_netiface4_elem e = { .cidr = HOST_MASK, .elem = 1 };
|
||||
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
|
||||
u32 ip = 0, ip_to = 0, last;
|
||||
char iface[IFNAMSIZ];
|
||||
int ret;
|
||||
|
||||
if (unlikely(!tb[IPSET_ATTR_IP] ||
|
||||
!tb[IPSET_ATTR_IFACE] ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE)))
|
||||
return -IPSET_ERR_PROTOCOL;
|
||||
|
||||
if (tb[IPSET_ATTR_LINENO])
|
||||
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
|
||||
|
||||
if (unlikely(!tb[IPSET_ATTR_IP] ||
|
||||
!tb[IPSET_ATTR_IFACE] ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
|
||||
return -IPSET_ERR_PROTOCOL;
|
||||
|
||||
ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -320,21 +226,11 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
if (e.cidr > HOST_MASK)
|
||||
return -IPSET_ERR_INVALID_CIDR;
|
||||
}
|
||||
|
||||
strcpy(iface, nla_data(tb[IPSET_ATTR_IFACE]));
|
||||
e.iface = iface;
|
||||
ret = iface_test(&h->rbtree, &e.iface);
|
||||
if (adt == IPSET_ADD) {
|
||||
if (!ret) {
|
||||
ret = iface_add(&h->rbtree, &e.iface);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
} else if (!ret)
|
||||
return ret;
|
||||
nla_strlcpy(e.iface, tb[IPSET_ATTR_IFACE], IFNAMSIZ);
|
||||
|
||||
if (tb[IPSET_ATTR_CADT_FLAGS]) {
|
||||
u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
|
||||
|
||||
if (cadt_flags & IPSET_FLAG_PHYSDEV)
|
||||
e.physdev = 1;
|
||||
if (cadt_flags & IPSET_FLAG_NOMATCH)
|
||||
|
@ -355,8 +251,9 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
swap(ip, ip_to);
|
||||
if (ip + UINT_MAX == ip_to)
|
||||
return -IPSET_ERR_HASH_RANGE;
|
||||
} else
|
||||
} else {
|
||||
ip_set_mask_from_to(ip, ip_to, e.cidr);
|
||||
}
|
||||
|
||||
if (retried)
|
||||
ip = ntohl(h->next.ip);
|
||||
|
@ -367,8 +264,8 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
|
||||
if (ret && !ip_set_eexist(ret, flags))
|
||||
return ret;
|
||||
else
|
||||
ret = 0;
|
||||
|
||||
ret = 0;
|
||||
ip = last + 1;
|
||||
}
|
||||
return ret;
|
||||
|
@ -390,7 +287,7 @@ struct hash_netiface6_elem {
|
|||
u8 cidr;
|
||||
u8 nomatch;
|
||||
u8 elem;
|
||||
const char *iface;
|
||||
char iface[IFNAMSIZ];
|
||||
};
|
||||
|
||||
/* Common functions */
|
||||
|
@ -404,7 +301,7 @@ hash_netiface6_data_equal(const struct hash_netiface6_elem *ip1,
|
|||
ip1->cidr == ip2->cidr &&
|
||||
(++*multi) &&
|
||||
ip1->physdev == ip2->physdev &&
|
||||
ip1->iface == ip2->iface;
|
||||
strcmp(ip1->iface, ip2->iface) == 0;
|
||||
}
|
||||
|
||||
static inline int
|
||||
|
@ -475,11 +372,10 @@ hash_netiface6_kadt(struct ip_set *set, const struct sk_buff *skb,
|
|||
struct hash_netiface *h = set->data;
|
||||
ipset_adtfn adtfn = set->variant->adt[adt];
|
||||
struct hash_netiface6_elem e = {
|
||||
.cidr = IP_SET_INIT_CIDR(h->nets[0].cidr[0], HOST_MASK),
|
||||
.cidr = INIT_CIDR(h->nets[0].cidr[0], HOST_MASK),
|
||||
.elem = 1,
|
||||
};
|
||||
struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
|
||||
int ret;
|
||||
|
||||
if (e.cidr == 0)
|
||||
return -EINVAL;
|
||||
|
@ -491,60 +387,43 @@ hash_netiface6_kadt(struct ip_set *set, const struct sk_buff *skb,
|
|||
|
||||
if (opt->cmdflags & IPSET_FLAG_PHYSDEV) {
|
||||
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
|
||||
e.iface = SRCDIR ? get_physindev_name(skb) :
|
||||
get_phyoutdev_name(skb);
|
||||
if (!e.iface)
|
||||
const char *eiface = SRCDIR ? get_physindev_name(skb) :
|
||||
get_physoutdev_name(skb);
|
||||
|
||||
if (!eiface)
|
||||
return -EINVAL;
|
||||
|
||||
STRLCPY(e.iface, eiface);
|
||||
e.physdev = 1;
|
||||
#else
|
||||
e.iface = NULL;
|
||||
#endif
|
||||
} else
|
||||
e.iface = SRCDIR ? IFACE(in) : IFACE(out);
|
||||
} else {
|
||||
STRLCPY(e.iface, SRCDIR ? IFACE(in) : IFACE(out));
|
||||
}
|
||||
|
||||
if (!e.iface)
|
||||
if (strlen(e.iface) == 0)
|
||||
return -EINVAL;
|
||||
ret = iface_test(&h->rbtree, &e.iface);
|
||||
if (adt == IPSET_ADD) {
|
||||
if (!ret) {
|
||||
ret = iface_add(&h->rbtree, &e.iface);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
} else if (!ret)
|
||||
return ret;
|
||||
|
||||
return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
|
||||
}
|
||||
|
||||
static int
|
||||
hash_netiface6_uadt(struct ip_set *set, struct nlattr *tb[],
|
||||
enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
|
||||
enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
|
||||
{
|
||||
struct hash_netiface *h = set->data;
|
||||
ipset_adtfn adtfn = set->variant->adt[adt];
|
||||
struct hash_netiface6_elem e = { .cidr = HOST_MASK, .elem = 1 };
|
||||
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
|
||||
char iface[IFNAMSIZ];
|
||||
int ret;
|
||||
|
||||
if (unlikely(!tb[IPSET_ATTR_IP] ||
|
||||
!tb[IPSET_ATTR_IFACE] ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE)))
|
||||
return -IPSET_ERR_PROTOCOL;
|
||||
if (unlikely(tb[IPSET_ATTR_IP_TO]))
|
||||
return -IPSET_ERR_HASH_RANGE_UNSUPPORTED;
|
||||
|
||||
if (tb[IPSET_ATTR_LINENO])
|
||||
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
|
||||
|
||||
if (unlikely(!tb[IPSET_ATTR_IP] ||
|
||||
!tb[IPSET_ATTR_IFACE] ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
|
||||
return -IPSET_ERR_PROTOCOL;
|
||||
if (unlikely(tb[IPSET_ATTR_IP_TO]))
|
||||
return -IPSET_ERR_HASH_RANGE_UNSUPPORTED;
|
||||
|
||||
ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -553,26 +432,19 @@ hash_netiface6_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (tb[IPSET_ATTR_CIDR])
|
||||
if (tb[IPSET_ATTR_CIDR]) {
|
||||
e.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
|
||||
if (e.cidr > HOST_MASK)
|
||||
return -IPSET_ERR_INVALID_CIDR;
|
||||
if (e.cidr > HOST_MASK)
|
||||
return -IPSET_ERR_INVALID_CIDR;
|
||||
}
|
||||
|
||||
ip6_netmask(&e.ip, e.cidr);
|
||||
|
||||
strcpy(iface, nla_data(tb[IPSET_ATTR_IFACE]));
|
||||
e.iface = iface;
|
||||
ret = iface_test(&h->rbtree, &e.iface);
|
||||
if (adt == IPSET_ADD) {
|
||||
if (!ret) {
|
||||
ret = iface_add(&h->rbtree, &e.iface);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
} else if (!ret)
|
||||
return ret;
|
||||
nla_strlcpy(e.iface, tb[IPSET_ATTR_IFACE], IFNAMSIZ);
|
||||
|
||||
if (tb[IPSET_ATTR_CADT_FLAGS]) {
|
||||
u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
|
||||
|
||||
if (cadt_flags & IPSET_FLAG_PHYSDEV)
|
||||
e.physdev = 1;
|
||||
if (cadt_flags & IPSET_FLAG_NOMATCH)
|
||||
|
@ -633,6 +505,7 @@ hash_netiface_init(void)
|
|||
static void __exit
|
||||
hash_netiface_fini(void)
|
||||
{
|
||||
rcu_barrier();
|
||||
ip_set_type_unregister(&hash_netiface_type);
|
||||
}
|
||||
|
||||
|
|
|
@ -57,8 +57,8 @@ struct hash_netnet4_elem {
|
|||
|
||||
static inline bool
|
||||
hash_netnet4_data_equal(const struct hash_netnet4_elem *ip1,
|
||||
const struct hash_netnet4_elem *ip2,
|
||||
u32 *multi)
|
||||
const struct hash_netnet4_elem *ip2,
|
||||
u32 *multi)
|
||||
{
|
||||
return ip1->ipcmp == ip2->ipcmp &&
|
||||
ip1->ccmp == ip2->ccmp;
|
||||
|
@ -84,7 +84,7 @@ hash_netnet4_data_reset_flags(struct hash_netnet4_elem *elem, u8 *flags)
|
|||
|
||||
static inline void
|
||||
hash_netnet4_data_reset_elem(struct hash_netnet4_elem *elem,
|
||||
struct hash_netnet4_elem *orig)
|
||||
struct hash_netnet4_elem *orig)
|
||||
{
|
||||
elem->ip[1] = orig->ip[1];
|
||||
}
|
||||
|
@ -103,7 +103,7 @@ hash_netnet4_data_netmask(struct hash_netnet4_elem *elem, u8 cidr, bool inner)
|
|||
|
||||
static bool
|
||||
hash_netnet4_data_list(struct sk_buff *skb,
|
||||
const struct hash_netnet4_elem *data)
|
||||
const struct hash_netnet4_elem *data)
|
||||
{
|
||||
u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
|
||||
|
||||
|
@ -122,7 +122,7 @@ nla_put_failure:
|
|||
|
||||
static inline void
|
||||
hash_netnet4_data_next(struct hash_netnet4_elem *next,
|
||||
const struct hash_netnet4_elem *d)
|
||||
const struct hash_netnet4_elem *d)
|
||||
{
|
||||
next->ipcmp = d->ipcmp;
|
||||
}
|
||||
|
@ -133,16 +133,16 @@ hash_netnet4_data_next(struct hash_netnet4_elem *next,
|
|||
|
||||
static int
|
||||
hash_netnet4_kadt(struct ip_set *set, const struct sk_buff *skb,
|
||||
const struct xt_action_param *par,
|
||||
enum ipset_adt adt, struct ip_set_adt_opt *opt)
|
||||
const struct xt_action_param *par,
|
||||
enum ipset_adt adt, struct ip_set_adt_opt *opt)
|
||||
{
|
||||
const struct hash_netnet *h = set->data;
|
||||
ipset_adtfn adtfn = set->variant->adt[adt];
|
||||
struct hash_netnet4_elem e = { };
|
||||
struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
|
||||
|
||||
e.cidr[0] = IP_SET_INIT_CIDR(h->nets[0].cidr[0], HOST_MASK);
|
||||
e.cidr[1] = IP_SET_INIT_CIDR(h->nets[0].cidr[1], HOST_MASK);
|
||||
e.cidr[0] = INIT_CIDR(h->nets[0].cidr[0], HOST_MASK);
|
||||
e.cidr[1] = INIT_CIDR(h->nets[0].cidr[1], HOST_MASK);
|
||||
if (adt == IPSET_TEST)
|
||||
e.ccmp = (HOST_MASK << (sizeof(e.cidr[0]) * 8)) | HOST_MASK;
|
||||
|
||||
|
@ -156,31 +156,23 @@ hash_netnet4_kadt(struct ip_set *set, const struct sk_buff *skb,
|
|||
|
||||
static int
|
||||
hash_netnet4_uadt(struct ip_set *set, struct nlattr *tb[],
|
||||
enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
|
||||
enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
|
||||
{
|
||||
const struct hash_netnet *h = set->data;
|
||||
ipset_adtfn adtfn = set->variant->adt[adt];
|
||||
struct hash_netnet4_elem e = { };
|
||||
struct hash_netnet4_elem e = { .cidr = { HOST_MASK, HOST_MASK, }, };
|
||||
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
|
||||
u32 ip = 0, ip_to = 0, last;
|
||||
u32 ip2 = 0, ip2_from = 0, ip2_to = 0, last2;
|
||||
u8 cidr, cidr2;
|
||||
int ret;
|
||||
|
||||
e.cidr[0] = e.cidr[1] = HOST_MASK;
|
||||
if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE)))
|
||||
return -IPSET_ERR_PROTOCOL;
|
||||
|
||||
if (tb[IPSET_ATTR_LINENO])
|
||||
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
|
||||
|
||||
if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
|
||||
return -IPSET_ERR_PROTOCOL;
|
||||
|
||||
ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -194,21 +186,20 @@ hash_netnet4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
return ret;
|
||||
|
||||
if (tb[IPSET_ATTR_CIDR]) {
|
||||
cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
|
||||
if (!cidr || cidr > HOST_MASK)
|
||||
e.cidr[0] = nla_get_u8(tb[IPSET_ATTR_CIDR]);
|
||||
if (!e.cidr[0] || e.cidr[0] > HOST_MASK)
|
||||
return -IPSET_ERR_INVALID_CIDR;
|
||||
e.cidr[0] = cidr;
|
||||
}
|
||||
|
||||
if (tb[IPSET_ATTR_CIDR2]) {
|
||||
cidr2 = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
|
||||
if (!cidr2 || cidr2 > HOST_MASK)
|
||||
e.cidr[1] = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
|
||||
if (!e.cidr[1] || e.cidr[1] > HOST_MASK)
|
||||
return -IPSET_ERR_INVALID_CIDR;
|
||||
e.cidr[1] = cidr2;
|
||||
}
|
||||
|
||||
if (tb[IPSET_ATTR_CADT_FLAGS]) {
|
||||
u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
|
||||
|
||||
if (cadt_flags & IPSET_FLAG_NOMATCH)
|
||||
flags |= (IPSET_FLAG_NOMATCH << 16);
|
||||
}
|
||||
|
@ -231,8 +222,9 @@ hash_netnet4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
swap(ip, ip_to);
|
||||
if (unlikely(ip + UINT_MAX == ip_to))
|
||||
return -IPSET_ERR_HASH_RANGE;
|
||||
} else
|
||||
} else {
|
||||
ip_set_mask_from_to(ip, ip_to, e.cidr[0]);
|
||||
}
|
||||
|
||||
ip2_to = ip2_from;
|
||||
if (tb[IPSET_ATTR_IP2_TO]) {
|
||||
|
@ -243,28 +235,27 @@ hash_netnet4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
swap(ip2_from, ip2_to);
|
||||
if (unlikely(ip2_from + UINT_MAX == ip2_to))
|
||||
return -IPSET_ERR_HASH_RANGE;
|
||||
} else
|
||||
} else {
|
||||
ip_set_mask_from_to(ip2_from, ip2_to, e.cidr[1]);
|
||||
}
|
||||
|
||||
if (retried)
|
||||
ip = ntohl(h->next.ip[0]);
|
||||
|
||||
while (!after(ip, ip_to)) {
|
||||
e.ip[0] = htonl(ip);
|
||||
last = ip_set_range_to_cidr(ip, ip_to, &cidr);
|
||||
e.cidr[0] = cidr;
|
||||
last = ip_set_range_to_cidr(ip, ip_to, &e.cidr[0]);
|
||||
ip2 = (retried &&
|
||||
ip == ntohl(h->next.ip[0])) ? ntohl(h->next.ip[1])
|
||||
: ip2_from;
|
||||
while (!after(ip2, ip2_to)) {
|
||||
e.ip[1] = htonl(ip2);
|
||||
last2 = ip_set_range_to_cidr(ip2, ip2_to, &cidr2);
|
||||
e.cidr[1] = cidr2;
|
||||
last2 = ip_set_range_to_cidr(ip2, ip2_to, &e.cidr[1]);
|
||||
ret = adtfn(set, &e, &ext, &ext, flags);
|
||||
if (ret && !ip_set_eexist(ret, flags))
|
||||
return ret;
|
||||
else
|
||||
ret = 0;
|
||||
|
||||
ret = 0;
|
||||
ip2 = last2 + 1;
|
||||
}
|
||||
ip = last + 1;
|
||||
|
@ -288,8 +279,8 @@ struct hash_netnet6_elem {
|
|||
|
||||
static inline bool
|
||||
hash_netnet6_data_equal(const struct hash_netnet6_elem *ip1,
|
||||
const struct hash_netnet6_elem *ip2,
|
||||
u32 *multi)
|
||||
const struct hash_netnet6_elem *ip2,
|
||||
u32 *multi)
|
||||
{
|
||||
return ipv6_addr_equal(&ip1->ip[0].in6, &ip2->ip[0].in6) &&
|
||||
ipv6_addr_equal(&ip1->ip[1].in6, &ip2->ip[1].in6) &&
|
||||
|
@ -316,7 +307,7 @@ hash_netnet6_data_reset_flags(struct hash_netnet6_elem *elem, u8 *flags)
|
|||
|
||||
static inline void
|
||||
hash_netnet6_data_reset_elem(struct hash_netnet6_elem *elem,
|
||||
struct hash_netnet6_elem *orig)
|
||||
struct hash_netnet6_elem *orig)
|
||||
{
|
||||
elem->ip[1] = orig->ip[1];
|
||||
}
|
||||
|
@ -335,7 +326,7 @@ hash_netnet6_data_netmask(struct hash_netnet6_elem *elem, u8 cidr, bool inner)
|
|||
|
||||
static bool
|
||||
hash_netnet6_data_list(struct sk_buff *skb,
|
||||
const struct hash_netnet6_elem *data)
|
||||
const struct hash_netnet6_elem *data)
|
||||
{
|
||||
u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
|
||||
|
||||
|
@ -354,7 +345,7 @@ nla_put_failure:
|
|||
|
||||
static inline void
|
||||
hash_netnet6_data_next(struct hash_netnet4_elem *next,
|
||||
const struct hash_netnet6_elem *d)
|
||||
const struct hash_netnet6_elem *d)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -368,18 +359,18 @@ hash_netnet6_data_next(struct hash_netnet4_elem *next,
|
|||
|
||||
static int
|
||||
hash_netnet6_kadt(struct ip_set *set, const struct sk_buff *skb,
|
||||
const struct xt_action_param *par,
|
||||
enum ipset_adt adt, struct ip_set_adt_opt *opt)
|
||||
const struct xt_action_param *par,
|
||||
enum ipset_adt adt, struct ip_set_adt_opt *opt)
|
||||
{
|
||||
const struct hash_netnet *h = set->data;
|
||||
ipset_adtfn adtfn = set->variant->adt[adt];
|
||||
struct hash_netnet6_elem e = { };
|
||||
struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
|
||||
|
||||
e.cidr[0] = IP_SET_INIT_CIDR(h->nets[0].cidr[0], HOST_MASK);
|
||||
e.cidr[1] = IP_SET_INIT_CIDR(h->nets[0].cidr[1], HOST_MASK);
|
||||
e.cidr[0] = INIT_CIDR(h->nets[0].cidr[0], HOST_MASK);
|
||||
e.cidr[1] = INIT_CIDR(h->nets[0].cidr[1], HOST_MASK);
|
||||
if (adt == IPSET_TEST)
|
||||
e.ccmp = (HOST_MASK << (sizeof(u8)*8)) | HOST_MASK;
|
||||
e.ccmp = (HOST_MASK << (sizeof(u8) * 8)) | HOST_MASK;
|
||||
|
||||
ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip[0].in6);
|
||||
ip6addrptr(skb, opt->flags & IPSET_DIM_TWO_SRC, &e.ip[1].in6);
|
||||
|
@ -391,29 +382,22 @@ hash_netnet6_kadt(struct ip_set *set, const struct sk_buff *skb,
|
|||
|
||||
static int
|
||||
hash_netnet6_uadt(struct ip_set *set, struct nlattr *tb[],
|
||||
enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
|
||||
enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
|
||||
{
|
||||
ipset_adtfn adtfn = set->variant->adt[adt];
|
||||
struct hash_netnet6_elem e = { };
|
||||
struct hash_netnet6_elem e = { .cidr = { HOST_MASK, HOST_MASK, }, };
|
||||
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
|
||||
int ret;
|
||||
|
||||
e.cidr[0] = e.cidr[1] = HOST_MASK;
|
||||
if (tb[IPSET_ATTR_LINENO])
|
||||
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
|
||||
|
||||
if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE)))
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
|
||||
return -IPSET_ERR_PROTOCOL;
|
||||
if (unlikely(tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_IP2_TO]))
|
||||
return -IPSET_ERR_HASH_RANGE_UNSUPPORTED;
|
||||
|
||||
if (tb[IPSET_ATTR_LINENO])
|
||||
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
|
||||
|
||||
ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip[0]);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -426,21 +410,24 @@ hash_netnet6_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (tb[IPSET_ATTR_CIDR])
|
||||
if (tb[IPSET_ATTR_CIDR]) {
|
||||
e.cidr[0] = nla_get_u8(tb[IPSET_ATTR_CIDR]);
|
||||
if (!e.cidr[0] || e.cidr[0] > HOST_MASK)
|
||||
return -IPSET_ERR_INVALID_CIDR;
|
||||
}
|
||||
|
||||
if (tb[IPSET_ATTR_CIDR2])
|
||||
if (tb[IPSET_ATTR_CIDR2]) {
|
||||
e.cidr[1] = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
|
||||
|
||||
if (!e.cidr[0] || e.cidr[0] > HOST_MASK || !e.cidr[1] ||
|
||||
e.cidr[1] > HOST_MASK)
|
||||
return -IPSET_ERR_INVALID_CIDR;
|
||||
if (!e.cidr[1] || e.cidr[1] > HOST_MASK)
|
||||
return -IPSET_ERR_INVALID_CIDR;
|
||||
}
|
||||
|
||||
ip6_netmask(&e.ip[0], e.cidr[0]);
|
||||
ip6_netmask(&e.ip[1], e.cidr[1]);
|
||||
|
||||
if (tb[IPSET_ATTR_CADT_FLAGS]) {
|
||||
u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
|
||||
|
||||
if (cadt_flags & IPSET_FLAG_NOMATCH)
|
||||
flags |= (IPSET_FLAG_NOMATCH << 16);
|
||||
}
|
||||
|
@ -497,6 +484,7 @@ hash_netnet_init(void)
|
|||
static void __exit
|
||||
hash_netnet_fini(void)
|
||||
{
|
||||
rcu_barrier();
|
||||
ip_set_type_unregister(&hash_netnet_type);
|
||||
}
|
||||
|
||||
|
|
|
@ -136,7 +136,7 @@ hash_netport4_kadt(struct ip_set *set, const struct sk_buff *skb,
|
|||
const struct hash_netport *h = set->data;
|
||||
ipset_adtfn adtfn = set->variant->adt[adt];
|
||||
struct hash_netport4_elem e = {
|
||||
.cidr = IP_SET_INIT_CIDR(h->nets[0].cidr[0], HOST_MASK) - 1,
|
||||
.cidr = INIT_CIDR(h->nets[0].cidr[0], HOST_MASK),
|
||||
};
|
||||
struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
|
||||
|
||||
|
@ -166,21 +166,15 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
u8 cidr;
|
||||
int ret;
|
||||
|
||||
if (tb[IPSET_ATTR_LINENO])
|
||||
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
|
||||
|
||||
if (unlikely(!tb[IPSET_ATTR_IP] ||
|
||||
!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE)))
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
|
||||
return -IPSET_ERR_PROTOCOL;
|
||||
|
||||
if (tb[IPSET_ATTR_LINENO])
|
||||
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
|
||||
|
||||
ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -204,8 +198,9 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
|
||||
if (e.proto == 0)
|
||||
return -IPSET_ERR_INVALID_PROTO;
|
||||
} else
|
||||
} else {
|
||||
return -IPSET_ERR_MISSING_PROTO;
|
||||
}
|
||||
|
||||
if (!(with_ports || e.proto == IPPROTO_ICMP))
|
||||
e.port = 0;
|
||||
|
@ -214,6 +209,7 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
|
||||
if (tb[IPSET_ATTR_CADT_FLAGS]) {
|
||||
u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
|
||||
|
||||
if (cadt_flags & IPSET_FLAG_NOMATCH)
|
||||
flags |= (IPSET_FLAG_NOMATCH << 16);
|
||||
}
|
||||
|
@ -239,8 +235,9 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
swap(ip, ip_to);
|
||||
if (ip + UINT_MAX == ip_to)
|
||||
return -IPSET_ERR_HASH_RANGE;
|
||||
} else
|
||||
} else {
|
||||
ip_set_mask_from_to(ip, ip_to, e.cidr + 1);
|
||||
}
|
||||
|
||||
if (retried)
|
||||
ip = ntohl(h->next.ip);
|
||||
|
@ -256,8 +253,8 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
|
||||
if (ret && !ip_set_eexist(ret, flags))
|
||||
return ret;
|
||||
else
|
||||
ret = 0;
|
||||
|
||||
ret = 0;
|
||||
}
|
||||
ip = last + 1;
|
||||
}
|
||||
|
@ -354,7 +351,7 @@ hash_netport6_kadt(struct ip_set *set, const struct sk_buff *skb,
|
|||
const struct hash_netport *h = set->data;
|
||||
ipset_adtfn adtfn = set->variant->adt[adt];
|
||||
struct hash_netport6_elem e = {
|
||||
.cidr = IP_SET_INIT_CIDR(h->nets[0].cidr[0], HOST_MASK) - 1,
|
||||
.cidr = INIT_CIDR(h->nets[0].cidr[0], HOST_MASK),
|
||||
};
|
||||
struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
|
||||
|
||||
|
@ -384,23 +381,17 @@ hash_netport6_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
u8 cidr;
|
||||
int ret;
|
||||
|
||||
if (tb[IPSET_ATTR_LINENO])
|
||||
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
|
||||
|
||||
if (unlikely(!tb[IPSET_ATTR_IP] ||
|
||||
!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE)))
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
|
||||
return -IPSET_ERR_PROTOCOL;
|
||||
if (unlikely(tb[IPSET_ATTR_IP_TO]))
|
||||
return -IPSET_ERR_HASH_RANGE_UNSUPPORTED;
|
||||
|
||||
if (tb[IPSET_ATTR_LINENO])
|
||||
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
|
||||
|
||||
ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -425,14 +416,16 @@ hash_netport6_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
|
||||
if (e.proto == 0)
|
||||
return -IPSET_ERR_INVALID_PROTO;
|
||||
} else
|
||||
} else {
|
||||
return -IPSET_ERR_MISSING_PROTO;
|
||||
}
|
||||
|
||||
if (!(with_ports || e.proto == IPPROTO_ICMPV6))
|
||||
e.port = 0;
|
||||
|
||||
if (tb[IPSET_ATTR_CADT_FLAGS]) {
|
||||
u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
|
||||
|
||||
if (cadt_flags & IPSET_FLAG_NOMATCH)
|
||||
flags |= (IPSET_FLAG_NOMATCH << 16);
|
||||
}
|
||||
|
@ -456,8 +449,8 @@ hash_netport6_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
|
||||
if (ret && !ip_set_eexist(ret, flags))
|
||||
return ret;
|
||||
else
|
||||
ret = 0;
|
||||
|
||||
ret = 0;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
@ -510,6 +503,7 @@ hash_netport_init(void)
|
|||
static void __exit
|
||||
hash_netport_fini(void)
|
||||
{
|
||||
rcu_barrier();
|
||||
ip_set_type_unregister(&hash_netport_type);
|
||||
}
|
||||
|
||||
|
|
|
@ -62,8 +62,8 @@ struct hash_netportnet4_elem {
|
|||
|
||||
static inline bool
|
||||
hash_netportnet4_data_equal(const struct hash_netportnet4_elem *ip1,
|
||||
const struct hash_netportnet4_elem *ip2,
|
||||
u32 *multi)
|
||||
const struct hash_netportnet4_elem *ip2,
|
||||
u32 *multi)
|
||||
{
|
||||
return ip1->ipcmp == ip2->ipcmp &&
|
||||
ip1->ccmp == ip2->ccmp &&
|
||||
|
@ -91,7 +91,7 @@ hash_netportnet4_data_reset_flags(struct hash_netportnet4_elem *elem, u8 *flags)
|
|||
|
||||
static inline void
|
||||
hash_netportnet4_data_reset_elem(struct hash_netportnet4_elem *elem,
|
||||
struct hash_netportnet4_elem *orig)
|
||||
struct hash_netportnet4_elem *orig)
|
||||
{
|
||||
elem->ip[1] = orig->ip[1];
|
||||
}
|
||||
|
@ -111,7 +111,7 @@ hash_netportnet4_data_netmask(struct hash_netportnet4_elem *elem,
|
|||
|
||||
static bool
|
||||
hash_netportnet4_data_list(struct sk_buff *skb,
|
||||
const struct hash_netportnet4_elem *data)
|
||||
const struct hash_netportnet4_elem *data)
|
||||
{
|
||||
u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
|
||||
|
||||
|
@ -132,7 +132,7 @@ nla_put_failure:
|
|||
|
||||
static inline void
|
||||
hash_netportnet4_data_next(struct hash_netportnet4_elem *next,
|
||||
const struct hash_netportnet4_elem *d)
|
||||
const struct hash_netportnet4_elem *d)
|
||||
{
|
||||
next->ipcmp = d->ipcmp;
|
||||
next->port = d->port;
|
||||
|
@ -144,16 +144,16 @@ hash_netportnet4_data_next(struct hash_netportnet4_elem *next,
|
|||
|
||||
static int
|
||||
hash_netportnet4_kadt(struct ip_set *set, const struct sk_buff *skb,
|
||||
const struct xt_action_param *par,
|
||||
enum ipset_adt adt, struct ip_set_adt_opt *opt)
|
||||
const struct xt_action_param *par,
|
||||
enum ipset_adt adt, struct ip_set_adt_opt *opt)
|
||||
{
|
||||
const struct hash_netportnet *h = set->data;
|
||||
ipset_adtfn adtfn = set->variant->adt[adt];
|
||||
struct hash_netportnet4_elem e = { };
|
||||
struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
|
||||
|
||||
e.cidr[0] = IP_SET_INIT_CIDR(h->nets[0].cidr[0], HOST_MASK);
|
||||
e.cidr[1] = IP_SET_INIT_CIDR(h->nets[0].cidr[1], HOST_MASK);
|
||||
e.cidr[0] = INIT_CIDR(h->nets[0].cidr[0], HOST_MASK);
|
||||
e.cidr[1] = INIT_CIDR(h->nets[0].cidr[1], HOST_MASK);
|
||||
if (adt == IPSET_TEST)
|
||||
e.ccmp = (HOST_MASK << (sizeof(e.cidr[0]) * 8)) | HOST_MASK;
|
||||
|
||||
|
@ -171,34 +171,26 @@ hash_netportnet4_kadt(struct ip_set *set, const struct sk_buff *skb,
|
|||
|
||||
static int
|
||||
hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
|
||||
enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
|
||||
enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
|
||||
{
|
||||
const struct hash_netportnet *h = set->data;
|
||||
ipset_adtfn adtfn = set->variant->adt[adt];
|
||||
struct hash_netportnet4_elem e = { };
|
||||
struct hash_netportnet4_elem e = { .cidr = { HOST_MASK, HOST_MASK, }, };
|
||||
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
|
||||
u32 ip = 0, ip_to = 0, ip_last, p = 0, port, port_to;
|
||||
u32 ip2_from = 0, ip2_to = 0, ip2_last, ip2;
|
||||
bool with_ports = false;
|
||||
u8 cidr, cidr2;
|
||||
int ret;
|
||||
|
||||
e.cidr[0] = e.cidr[1] = HOST_MASK;
|
||||
if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
|
||||
!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE)))
|
||||
return -IPSET_ERR_PROTOCOL;
|
||||
|
||||
if (tb[IPSET_ATTR_LINENO])
|
||||
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
|
||||
|
||||
if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
|
||||
!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
|
||||
return -IPSET_ERR_PROTOCOL;
|
||||
|
||||
ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -212,17 +204,15 @@ hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
return ret;
|
||||
|
||||
if (tb[IPSET_ATTR_CIDR]) {
|
||||
cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
|
||||
if (!cidr || cidr > HOST_MASK)
|
||||
e.cidr[0] = nla_get_u8(tb[IPSET_ATTR_CIDR]);
|
||||
if (!e.cidr[0] || e.cidr[0] > HOST_MASK)
|
||||
return -IPSET_ERR_INVALID_CIDR;
|
||||
e.cidr[0] = cidr;
|
||||
}
|
||||
|
||||
if (tb[IPSET_ATTR_CIDR2]) {
|
||||
cidr = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
|
||||
if (!cidr || cidr > HOST_MASK)
|
||||
e.cidr[1] = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
|
||||
if (!e.cidr[1] || e.cidr[1] > HOST_MASK)
|
||||
return -IPSET_ERR_INVALID_CIDR;
|
||||
e.cidr[1] = cidr;
|
||||
}
|
||||
|
||||
e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
|
||||
|
@ -233,14 +223,16 @@ hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
|
||||
if (e.proto == 0)
|
||||
return -IPSET_ERR_INVALID_PROTO;
|
||||
} else
|
||||
} else {
|
||||
return -IPSET_ERR_MISSING_PROTO;
|
||||
}
|
||||
|
||||
if (!(with_ports || e.proto == IPPROTO_ICMP))
|
||||
e.port = 0;
|
||||
|
||||
if (tb[IPSET_ATTR_CADT_FLAGS]) {
|
||||
u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
|
||||
|
||||
if (cadt_flags & IPSET_FLAG_NOMATCH)
|
||||
flags |= (IPSET_FLAG_NOMATCH << 16);
|
||||
}
|
||||
|
@ -264,8 +256,9 @@ hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
swap(ip, ip_to);
|
||||
if (unlikely(ip + UINT_MAX == ip_to))
|
||||
return -IPSET_ERR_HASH_RANGE;
|
||||
} else
|
||||
} else {
|
||||
ip_set_mask_from_to(ip, ip_to, e.cidr[0]);
|
||||
}
|
||||
|
||||
port_to = port = ntohs(e.port);
|
||||
if (tb[IPSET_ATTR_PORT_TO]) {
|
||||
|
@ -283,16 +276,16 @@ hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
swap(ip2_from, ip2_to);
|
||||
if (unlikely(ip2_from + UINT_MAX == ip2_to))
|
||||
return -IPSET_ERR_HASH_RANGE;
|
||||
} else
|
||||
} else {
|
||||
ip_set_mask_from_to(ip2_from, ip2_to, e.cidr[1]);
|
||||
}
|
||||
|
||||
if (retried)
|
||||
ip = ntohl(h->next.ip[0]);
|
||||
|
||||
while (!after(ip, ip_to)) {
|
||||
e.ip[0] = htonl(ip);
|
||||
ip_last = ip_set_range_to_cidr(ip, ip_to, &cidr);
|
||||
e.cidr[0] = cidr;
|
||||
ip_last = ip_set_range_to_cidr(ip, ip_to, &e.cidr[0]);
|
||||
p = retried && ip == ntohl(h->next.ip[0]) ? ntohs(h->next.port)
|
||||
: port;
|
||||
for (; p <= port_to; p++) {
|
||||
|
@ -303,13 +296,12 @@ hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
while (!after(ip2, ip2_to)) {
|
||||
e.ip[1] = htonl(ip2);
|
||||
ip2_last = ip_set_range_to_cidr(ip2, ip2_to,
|
||||
&cidr2);
|
||||
e.cidr[1] = cidr2;
|
||||
&e.cidr[1]);
|
||||
ret = adtfn(set, &e, &ext, &ext, flags);
|
||||
if (ret && !ip_set_eexist(ret, flags))
|
||||
return ret;
|
||||
else
|
||||
ret = 0;
|
||||
|
||||
ret = 0;
|
||||
ip2 = ip2_last + 1;
|
||||
}
|
||||
}
|
||||
|
@ -336,8 +328,8 @@ struct hash_netportnet6_elem {
|
|||
|
||||
static inline bool
|
||||
hash_netportnet6_data_equal(const struct hash_netportnet6_elem *ip1,
|
||||
const struct hash_netportnet6_elem *ip2,
|
||||
u32 *multi)
|
||||
const struct hash_netportnet6_elem *ip2,
|
||||
u32 *multi)
|
||||
{
|
||||
return ipv6_addr_equal(&ip1->ip[0].in6, &ip2->ip[0].in6) &&
|
||||
ipv6_addr_equal(&ip1->ip[1].in6, &ip2->ip[1].in6) &&
|
||||
|
@ -366,7 +358,7 @@ hash_netportnet6_data_reset_flags(struct hash_netportnet6_elem *elem, u8 *flags)
|
|||
|
||||
static inline void
|
||||
hash_netportnet6_data_reset_elem(struct hash_netportnet6_elem *elem,
|
||||
struct hash_netportnet6_elem *orig)
|
||||
struct hash_netportnet6_elem *orig)
|
||||
{
|
||||
elem->ip[1] = orig->ip[1];
|
||||
}
|
||||
|
@ -386,7 +378,7 @@ hash_netportnet6_data_netmask(struct hash_netportnet6_elem *elem,
|
|||
|
||||
static bool
|
||||
hash_netportnet6_data_list(struct sk_buff *skb,
|
||||
const struct hash_netportnet6_elem *data)
|
||||
const struct hash_netportnet6_elem *data)
|
||||
{
|
||||
u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
|
||||
|
||||
|
@ -407,7 +399,7 @@ nla_put_failure:
|
|||
|
||||
static inline void
|
||||
hash_netportnet6_data_next(struct hash_netportnet4_elem *next,
|
||||
const struct hash_netportnet6_elem *d)
|
||||
const struct hash_netportnet6_elem *d)
|
||||
{
|
||||
next->port = d->port;
|
||||
}
|
||||
|
@ -422,16 +414,16 @@ hash_netportnet6_data_next(struct hash_netportnet4_elem *next,
|
|||
|
||||
static int
|
||||
hash_netportnet6_kadt(struct ip_set *set, const struct sk_buff *skb,
|
||||
const struct xt_action_param *par,
|
||||
enum ipset_adt adt, struct ip_set_adt_opt *opt)
|
||||
const struct xt_action_param *par,
|
||||
enum ipset_adt adt, struct ip_set_adt_opt *opt)
|
||||
{
|
||||
const struct hash_netportnet *h = set->data;
|
||||
ipset_adtfn adtfn = set->variant->adt[adt];
|
||||
struct hash_netportnet6_elem e = { };
|
||||
struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
|
||||
|
||||
e.cidr[0] = IP_SET_INIT_CIDR(h->nets[0].cidr[0], HOST_MASK);
|
||||
e.cidr[1] = IP_SET_INIT_CIDR(h->nets[0].cidr[1], HOST_MASK);
|
||||
e.cidr[0] = INIT_CIDR(h->nets[0].cidr[0], HOST_MASK);
|
||||
e.cidr[1] = INIT_CIDR(h->nets[0].cidr[1], HOST_MASK);
|
||||
if (adt == IPSET_TEST)
|
||||
e.ccmp = (HOST_MASK << (sizeof(u8) * 8)) | HOST_MASK;
|
||||
|
||||
|
@ -449,34 +441,27 @@ hash_netportnet6_kadt(struct ip_set *set, const struct sk_buff *skb,
|
|||
|
||||
static int
|
||||
hash_netportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
|
||||
enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
|
||||
enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
|
||||
{
|
||||
const struct hash_netportnet *h = set->data;
|
||||
ipset_adtfn adtfn = set->variant->adt[adt];
|
||||
struct hash_netportnet6_elem e = { };
|
||||
struct hash_netportnet6_elem e = { .cidr = { HOST_MASK, HOST_MASK, }, };
|
||||
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
|
||||
u32 port, port_to;
|
||||
bool with_ports = false;
|
||||
int ret;
|
||||
|
||||
e.cidr[0] = e.cidr[1] = HOST_MASK;
|
||||
if (tb[IPSET_ATTR_LINENO])
|
||||
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
|
||||
|
||||
if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
|
||||
!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE)))
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
|
||||
return -IPSET_ERR_PROTOCOL;
|
||||
if (unlikely(tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_IP2_TO]))
|
||||
return -IPSET_ERR_HASH_RANGE_UNSUPPORTED;
|
||||
|
||||
if (tb[IPSET_ATTR_LINENO])
|
||||
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
|
||||
|
||||
ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip[0]);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -489,15 +474,17 @@ hash_netportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (tb[IPSET_ATTR_CIDR])
|
||||
if (tb[IPSET_ATTR_CIDR]) {
|
||||
e.cidr[0] = nla_get_u8(tb[IPSET_ATTR_CIDR]);
|
||||
if (!e.cidr[0] || e.cidr[0] > HOST_MASK)
|
||||
return -IPSET_ERR_INVALID_CIDR;
|
||||
}
|
||||
|
||||
if (tb[IPSET_ATTR_CIDR2])
|
||||
if (tb[IPSET_ATTR_CIDR2]) {
|
||||
e.cidr[1] = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
|
||||
|
||||
if (unlikely(!e.cidr[0] || e.cidr[0] > HOST_MASK || !e.cidr[1] ||
|
||||
e.cidr[1] > HOST_MASK))
|
||||
return -IPSET_ERR_INVALID_CIDR;
|
||||
if (!e.cidr[1] || e.cidr[1] > HOST_MASK)
|
||||
return -IPSET_ERR_INVALID_CIDR;
|
||||
}
|
||||
|
||||
ip6_netmask(&e.ip[0], e.cidr[0]);
|
||||
ip6_netmask(&e.ip[1], e.cidr[1]);
|
||||
|
@ -510,14 +497,16 @@ hash_netportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
|
||||
if (e.proto == 0)
|
||||
return -IPSET_ERR_INVALID_PROTO;
|
||||
} else
|
||||
} else {
|
||||
return -IPSET_ERR_MISSING_PROTO;
|
||||
}
|
||||
|
||||
if (!(with_ports || e.proto == IPPROTO_ICMPV6))
|
||||
e.port = 0;
|
||||
|
||||
if (tb[IPSET_ATTR_CADT_FLAGS]) {
|
||||
u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
|
||||
|
||||
if (cadt_flags & IPSET_FLAG_NOMATCH)
|
||||
flags |= (IPSET_FLAG_NOMATCH << 16);
|
||||
}
|
||||
|
@ -541,8 +530,8 @@ hash_netportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
|
||||
if (ret && !ip_set_eexist(ret, flags))
|
||||
return ret;
|
||||
else
|
||||
ret = 0;
|
||||
|
||||
ret = 0;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
@ -598,6 +587,7 @@ hash_netportnet_init(void)
|
|||
static void __exit
|
||||
hash_netportnet_fini(void)
|
||||
{
|
||||
rcu_barrier();
|
||||
ip_set_type_unregister(&hash_netportnet_type);
|
||||
}
|
||||
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
|
||||
#include <linux/module.h>
|
||||
#include <linux/ip.h>
|
||||
#include <linux/rculist.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/errno.h>
|
||||
|
||||
|
@ -27,6 +28,8 @@ MODULE_ALIAS("ip_set_list:set");
|
|||
|
||||
/* Member elements */
|
||||
struct set_elem {
|
||||
struct rcu_head rcu;
|
||||
struct list_head list;
|
||||
ip_set_id_t id;
|
||||
};
|
||||
|
||||
|
@ -41,12 +44,9 @@ struct list_set {
|
|||
u32 size; /* size of set list array */
|
||||
struct timer_list gc; /* garbage collection */
|
||||
struct net *net; /* namespace */
|
||||
struct set_elem members[0]; /* the set members */
|
||||
struct list_head members; /* the set members */
|
||||
};
|
||||
|
||||
#define list_set_elem(set, map, id) \
|
||||
(struct set_elem *)((void *)(map)->members + (id) * (set)->dsize)
|
||||
|
||||
static int
|
||||
list_set_ktest(struct ip_set *set, const struct sk_buff *skb,
|
||||
const struct xt_action_param *par,
|
||||
|
@ -54,17 +54,14 @@ list_set_ktest(struct ip_set *set, const struct sk_buff *skb,
|
|||
{
|
||||
struct list_set *map = set->data;
|
||||
struct set_elem *e;
|
||||
u32 i, cmdflags = opt->cmdflags;
|
||||
u32 cmdflags = opt->cmdflags;
|
||||
int ret;
|
||||
|
||||
/* Don't lookup sub-counters at all */
|
||||
opt->cmdflags &= ~IPSET_FLAG_MATCH_COUNTERS;
|
||||
if (opt->cmdflags & IPSET_FLAG_SKIP_SUBCOUNTER_UPDATE)
|
||||
opt->cmdflags &= ~IPSET_FLAG_SKIP_COUNTER_UPDATE;
|
||||
for (i = 0; i < map->size; i++) {
|
||||
e = list_set_elem(set, map, i);
|
||||
if (e->id == IPSET_INVALID_ID)
|
||||
return 0;
|
||||
list_for_each_entry_rcu(e, &map->members, list) {
|
||||
if (SET_WITH_TIMEOUT(set) &&
|
||||
ip_set_timeout_expired(ext_timeout(e, set)))
|
||||
continue;
|
||||
|
@ -91,13 +88,9 @@ list_set_kadd(struct ip_set *set, const struct sk_buff *skb,
|
|||
{
|
||||
struct list_set *map = set->data;
|
||||
struct set_elem *e;
|
||||
u32 i;
|
||||
int ret;
|
||||
|
||||
for (i = 0; i < map->size; i++) {
|
||||
e = list_set_elem(set, map, i);
|
||||
if (e->id == IPSET_INVALID_ID)
|
||||
return 0;
|
||||
list_for_each_entry(e, &map->members, list) {
|
||||
if (SET_WITH_TIMEOUT(set) &&
|
||||
ip_set_timeout_expired(ext_timeout(e, set)))
|
||||
continue;
|
||||
|
@ -115,13 +108,9 @@ list_set_kdel(struct ip_set *set, const struct sk_buff *skb,
|
|||
{
|
||||
struct list_set *map = set->data;
|
||||
struct set_elem *e;
|
||||
u32 i;
|
||||
int ret;
|
||||
|
||||
for (i = 0; i < map->size; i++) {
|
||||
e = list_set_elem(set, map, i);
|
||||
if (e->id == IPSET_INVALID_ID)
|
||||
return 0;
|
||||
list_for_each_entry(e, &map->members, list) {
|
||||
if (SET_WITH_TIMEOUT(set) &&
|
||||
ip_set_timeout_expired(ext_timeout(e, set)))
|
||||
continue;
|
||||
|
@ -138,110 +127,65 @@ list_set_kadt(struct ip_set *set, const struct sk_buff *skb,
|
|||
enum ipset_adt adt, struct ip_set_adt_opt *opt)
|
||||
{
|
||||
struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
|
||||
int ret = -EINVAL;
|
||||
|
||||
rcu_read_lock();
|
||||
switch (adt) {
|
||||
case IPSET_TEST:
|
||||
return list_set_ktest(set, skb, par, opt, &ext);
|
||||
ret = list_set_ktest(set, skb, par, opt, &ext);
|
||||
break;
|
||||
case IPSET_ADD:
|
||||
return list_set_kadd(set, skb, par, opt, &ext);
|
||||
ret = list_set_kadd(set, skb, par, opt, &ext);
|
||||
break;
|
||||
case IPSET_DEL:
|
||||
return list_set_kdel(set, skb, par, opt, &ext);
|
||||
ret = list_set_kdel(set, skb, par, opt, &ext);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return -EINVAL;
|
||||
rcu_read_unlock();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool
|
||||
id_eq(const struct ip_set *set, u32 i, ip_set_id_t id)
|
||||
{
|
||||
const struct list_set *map = set->data;
|
||||
const struct set_elem *e;
|
||||
/* Userspace interfaces: we are protected by the nfnl mutex */
|
||||
|
||||
if (i >= map->size)
|
||||
return 0;
|
||||
|
||||
e = list_set_elem(set, map, i);
|
||||
return !!(e->id == id &&
|
||||
!(SET_WITH_TIMEOUT(set) &&
|
||||
ip_set_timeout_expired(ext_timeout(e, set))));
|
||||
}
|
||||
|
||||
static int
|
||||
list_set_add(struct ip_set *set, u32 i, struct set_adt_elem *d,
|
||||
const struct ip_set_ext *ext)
|
||||
static void
|
||||
__list_set_del(struct ip_set *set, struct set_elem *e)
|
||||
{
|
||||
struct list_set *map = set->data;
|
||||
struct set_elem *e = list_set_elem(set, map, i);
|
||||
|
||||
if (e->id != IPSET_INVALID_ID) {
|
||||
if (i == map->size - 1) {
|
||||
/* Last element replaced: e.g. add new,before,last */
|
||||
ip_set_put_byindex(map->net, e->id);
|
||||
ip_set_ext_destroy(set, e);
|
||||
} else {
|
||||
struct set_elem *x = list_set_elem(set, map,
|
||||
map->size - 1);
|
||||
|
||||
/* Last element pushed off */
|
||||
if (x->id != IPSET_INVALID_ID) {
|
||||
ip_set_put_byindex(map->net, x->id);
|
||||
ip_set_ext_destroy(set, x);
|
||||
}
|
||||
memmove(list_set_elem(set, map, i + 1), e,
|
||||
set->dsize * (map->size - (i + 1)));
|
||||
/* Extensions must be initialized to zero */
|
||||
memset(e, 0, set->dsize);
|
||||
}
|
||||
}
|
||||
|
||||
e->id = d->id;
|
||||
if (SET_WITH_TIMEOUT(set))
|
||||
ip_set_timeout_set(ext_timeout(e, set), ext->timeout);
|
||||
if (SET_WITH_COUNTER(set))
|
||||
ip_set_init_counter(ext_counter(e, set), ext);
|
||||
if (SET_WITH_COMMENT(set))
|
||||
ip_set_init_comment(ext_comment(e, set), ext);
|
||||
if (SET_WITH_SKBINFO(set))
|
||||
ip_set_init_skbinfo(ext_skbinfo(e, set), ext);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
list_set_del(struct ip_set *set, u32 i)
|
||||
{
|
||||
struct list_set *map = set->data;
|
||||
struct set_elem *e = list_set_elem(set, map, i);
|
||||
|
||||
ip_set_put_byindex(map->net, e->id);
|
||||
/* We may call it, because we don't have a to be destroyed
|
||||
* extension which is used by the kernel.
|
||||
*/
|
||||
ip_set_ext_destroy(set, e);
|
||||
kfree_rcu(e, rcu);
|
||||
}
|
||||
|
||||
if (i < map->size - 1)
|
||||
memmove(e, list_set_elem(set, map, i + 1),
|
||||
set->dsize * (map->size - (i + 1)));
|
||||
static inline void
|
||||
list_set_del(struct ip_set *set, struct set_elem *e)
|
||||
{
|
||||
list_del_rcu(&e->list);
|
||||
__list_set_del(set, e);
|
||||
}
|
||||
|
||||
/* Last element */
|
||||
e = list_set_elem(set, map, map->size - 1);
|
||||
e->id = IPSET_INVALID_ID;
|
||||
return 0;
|
||||
static inline void
|
||||
list_set_replace(struct ip_set *set, struct set_elem *e, struct set_elem *old)
|
||||
{
|
||||
list_replace_rcu(&old->list, &e->list);
|
||||
__list_set_del(set, old);
|
||||
}
|
||||
|
||||
static void
|
||||
set_cleanup_entries(struct ip_set *set)
|
||||
{
|
||||
struct list_set *map = set->data;
|
||||
struct set_elem *e;
|
||||
u32 i = 0;
|
||||
struct set_elem *e, *n;
|
||||
|
||||
while (i < map->size) {
|
||||
e = list_set_elem(set, map, i);
|
||||
if (e->id != IPSET_INVALID_ID &&
|
||||
ip_set_timeout_expired(ext_timeout(e, set)))
|
||||
list_set_del(set, i);
|
||||
/* Check element moved to position i in next loop */
|
||||
else
|
||||
i++;
|
||||
}
|
||||
list_for_each_entry_safe(e, n, &map->members, list)
|
||||
if (ip_set_timeout_expired(ext_timeout(e, set)))
|
||||
list_set_del(set, e);
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -250,31 +194,46 @@ list_set_utest(struct ip_set *set, void *value, const struct ip_set_ext *ext,
|
|||
{
|
||||
struct list_set *map = set->data;
|
||||
struct set_adt_elem *d = value;
|
||||
struct set_elem *e;
|
||||
u32 i;
|
||||
struct set_elem *e, *next, *prev = NULL;
|
||||
int ret;
|
||||
|
||||
for (i = 0; i < map->size; i++) {
|
||||
e = list_set_elem(set, map, i);
|
||||
if (e->id == IPSET_INVALID_ID)
|
||||
return 0;
|
||||
else if (SET_WITH_TIMEOUT(set) &&
|
||||
ip_set_timeout_expired(ext_timeout(e, set)))
|
||||
list_for_each_entry(e, &map->members, list) {
|
||||
if (SET_WITH_TIMEOUT(set) &&
|
||||
ip_set_timeout_expired(ext_timeout(e, set)))
|
||||
continue;
|
||||
else if (e->id != d->id)
|
||||
else if (e->id != d->id) {
|
||||
prev = e;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (d->before == 0)
|
||||
return 1;
|
||||
else if (d->before > 0)
|
||||
ret = id_eq(set, i + 1, d->refid);
|
||||
else
|
||||
ret = i > 0 && id_eq(set, i - 1, d->refid);
|
||||
if (d->before == 0) {
|
||||
ret = 1;
|
||||
} else if (d->before > 0) {
|
||||
next = list_next_entry(e, list);
|
||||
ret = !list_is_last(&e->list, &map->members) &&
|
||||
next->id == d->refid;
|
||||
} else {
|
||||
ret = prev && prev->id == d->refid;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
list_set_init_extensions(struct ip_set *set, const struct ip_set_ext *ext,
|
||||
struct set_elem *e)
|
||||
{
|
||||
if (SET_WITH_COUNTER(set))
|
||||
ip_set_init_counter(ext_counter(e, set), ext);
|
||||
if (SET_WITH_COMMENT(set))
|
||||
ip_set_init_comment(ext_comment(e, set), ext);
|
||||
if (SET_WITH_SKBINFO(set))
|
||||
ip_set_init_skbinfo(ext_skbinfo(e, set), ext);
|
||||
/* Update timeout last */
|
||||
if (SET_WITH_TIMEOUT(set))
|
||||
ip_set_timeout_set(ext_timeout(e, set), ext->timeout);
|
||||
}
|
||||
|
||||
static int
|
||||
list_set_uadd(struct ip_set *set, void *value, const struct ip_set_ext *ext,
|
||||
|
@ -282,60 +241,78 @@ list_set_uadd(struct ip_set *set, void *value, const struct ip_set_ext *ext,
|
|||
{
|
||||
struct list_set *map = set->data;
|
||||
struct set_adt_elem *d = value;
|
||||
struct set_elem *e;
|
||||
struct set_elem *e, *n, *prev, *next;
|
||||
bool flag_exist = flags & IPSET_FLAG_EXIST;
|
||||
u32 i, ret = 0;
|
||||
|
||||
if (SET_WITH_TIMEOUT(set))
|
||||
set_cleanup_entries(set);
|
||||
|
||||
/* Check already added element */
|
||||
for (i = 0; i < map->size; i++) {
|
||||
e = list_set_elem(set, map, i);
|
||||
if (e->id == IPSET_INVALID_ID)
|
||||
goto insert;
|
||||
else if (e->id != d->id)
|
||||
/* Find where to add the new entry */
|
||||
n = prev = next = NULL;
|
||||
list_for_each_entry(e, &map->members, list) {
|
||||
if (SET_WITH_TIMEOUT(set) &&
|
||||
ip_set_timeout_expired(ext_timeout(e, set)))
|
||||
continue;
|
||||
|
||||
if ((d->before > 1 && !id_eq(set, i + 1, d->refid)) ||
|
||||
(d->before < 0 &&
|
||||
(i == 0 || !id_eq(set, i - 1, d->refid))))
|
||||
/* Before/after doesn't match */
|
||||
else if (d->id == e->id)
|
||||
n = e;
|
||||
else if (d->before == 0 || e->id != d->refid)
|
||||
continue;
|
||||
else if (d->before > 0)
|
||||
next = e;
|
||||
else
|
||||
prev = e;
|
||||
}
|
||||
/* Re-add already existing element */
|
||||
if (n) {
|
||||
if ((d->before > 0 && !next) ||
|
||||
(d->before < 0 && !prev))
|
||||
return -IPSET_ERR_REF_EXIST;
|
||||
if (!flag_exist)
|
||||
/* Can't re-add */
|
||||
return -IPSET_ERR_EXIST;
|
||||
/* Update extensions */
|
||||
ip_set_ext_destroy(set, e);
|
||||
ip_set_ext_destroy(set, n);
|
||||
list_set_init_extensions(set, ext, n);
|
||||
|
||||
if (SET_WITH_TIMEOUT(set))
|
||||
ip_set_timeout_set(ext_timeout(e, set), ext->timeout);
|
||||
if (SET_WITH_COUNTER(set))
|
||||
ip_set_init_counter(ext_counter(e, set), ext);
|
||||
if (SET_WITH_COMMENT(set))
|
||||
ip_set_init_comment(ext_comment(e, set), ext);
|
||||
if (SET_WITH_SKBINFO(set))
|
||||
ip_set_init_skbinfo(ext_skbinfo(e, set), ext);
|
||||
/* Set is already added to the list */
|
||||
ip_set_put_byindex(map->net, d->id);
|
||||
return 0;
|
||||
}
|
||||
insert:
|
||||
ret = -IPSET_ERR_LIST_FULL;
|
||||
for (i = 0; i < map->size && ret == -IPSET_ERR_LIST_FULL; i++) {
|
||||
e = list_set_elem(set, map, i);
|
||||
if (e->id == IPSET_INVALID_ID)
|
||||
ret = d->before != 0 ? -IPSET_ERR_REF_EXIST
|
||||
: list_set_add(set, i, d, ext);
|
||||
else if (e->id != d->refid)
|
||||
continue;
|
||||
else if (d->before > 0)
|
||||
ret = list_set_add(set, i, d, ext);
|
||||
else if (i + 1 < map->size)
|
||||
ret = list_set_add(set, i + 1, d, ext);
|
||||
/* Add new entry */
|
||||
if (d->before == 0) {
|
||||
/* Append */
|
||||
n = list_empty(&map->members) ? NULL :
|
||||
list_last_entry(&map->members, struct set_elem, list);
|
||||
} else if (d->before > 0) {
|
||||
/* Insert after next element */
|
||||
if (!list_is_last(&next->list, &map->members))
|
||||
n = list_next_entry(next, list);
|
||||
} else {
|
||||
/* Insert before prev element */
|
||||
if (prev->list.prev != &map->members)
|
||||
n = list_prev_entry(prev, list);
|
||||
}
|
||||
/* Can we replace a timed out entry? */
|
||||
if (n &&
|
||||
!(SET_WITH_TIMEOUT(set) &&
|
||||
ip_set_timeout_expired(ext_timeout(n, set))))
|
||||
n = NULL;
|
||||
|
||||
return ret;
|
||||
e = kzalloc(set->dsize, GFP_KERNEL);
|
||||
if (!e)
|
||||
return -ENOMEM;
|
||||
e->id = d->id;
|
||||
INIT_LIST_HEAD(&e->list);
|
||||
list_set_init_extensions(set, ext, e);
|
||||
if (n)
|
||||
list_set_replace(set, e, n);
|
||||
else if (next)
|
||||
list_add_tail_rcu(&e->list, &next->list);
|
||||
else if (prev)
|
||||
list_add_rcu(&e->list, &prev->list);
|
||||
else
|
||||
list_add_tail_rcu(&e->list, &map->members);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -344,32 +321,30 @@ list_set_udel(struct ip_set *set, void *value, const struct ip_set_ext *ext,
|
|||
{
|
||||
struct list_set *map = set->data;
|
||||
struct set_adt_elem *d = value;
|
||||
struct set_elem *e;
|
||||
u32 i;
|
||||
struct set_elem *e, *next, *prev = NULL;
|
||||
|
||||
for (i = 0; i < map->size; i++) {
|
||||
e = list_set_elem(set, map, i);
|
||||
if (e->id == IPSET_INVALID_ID)
|
||||
return d->before != 0 ? -IPSET_ERR_REF_EXIST
|
||||
: -IPSET_ERR_EXIST;
|
||||
else if (SET_WITH_TIMEOUT(set) &&
|
||||
ip_set_timeout_expired(ext_timeout(e, set)))
|
||||
list_for_each_entry(e, &map->members, list) {
|
||||
if (SET_WITH_TIMEOUT(set) &&
|
||||
ip_set_timeout_expired(ext_timeout(e, set)))
|
||||
continue;
|
||||
else if (e->id != d->id)
|
||||
else if (e->id != d->id) {
|
||||
prev = e;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (d->before == 0)
|
||||
return list_set_del(set, i);
|
||||
else if (d->before > 0) {
|
||||
if (!id_eq(set, i + 1, d->refid))
|
||||
if (d->before > 0) {
|
||||
next = list_next_entry(e, list);
|
||||
if (list_is_last(&e->list, &map->members) ||
|
||||
next->id != d->refid)
|
||||
return -IPSET_ERR_REF_EXIST;
|
||||
return list_set_del(set, i);
|
||||
} else if (i == 0 || !id_eq(set, i - 1, d->refid))
|
||||
return -IPSET_ERR_REF_EXIST;
|
||||
else
|
||||
return list_set_del(set, i);
|
||||
} else if (d->before < 0) {
|
||||
if (!prev || prev->id != d->refid)
|
||||
return -IPSET_ERR_REF_EXIST;
|
||||
}
|
||||
list_set_del(set, e);
|
||||
return 0;
|
||||
}
|
||||
return -IPSET_ERR_EXIST;
|
||||
return d->before != 0 ? -IPSET_ERR_REF_EXIST : -IPSET_ERR_EXIST;
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -383,19 +358,13 @@ list_set_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
struct ip_set *s;
|
||||
int ret = 0;
|
||||
|
||||
if (unlikely(!tb[IPSET_ATTR_NAME] ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE)))
|
||||
return -IPSET_ERR_PROTOCOL;
|
||||
|
||||
if (tb[IPSET_ATTR_LINENO])
|
||||
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
|
||||
|
||||
if (unlikely(!tb[IPSET_ATTR_NAME] ||
|
||||
!ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
|
||||
return -IPSET_ERR_PROTOCOL;
|
||||
|
||||
ret = ip_set_get_extensions(set, tb, &ext);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -410,6 +379,7 @@ list_set_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
|
||||
if (tb[IPSET_ATTR_CADT_FLAGS]) {
|
||||
u32 f = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
|
||||
|
||||
e.before = f & IPSET_FLAG_BEFORE;
|
||||
}
|
||||
|
||||
|
@ -447,27 +417,26 @@ static void
|
|||
list_set_flush(struct ip_set *set)
|
||||
{
|
||||
struct list_set *map = set->data;
|
||||
struct set_elem *e;
|
||||
u32 i;
|
||||
struct set_elem *e, *n;
|
||||
|
||||
for (i = 0; i < map->size; i++) {
|
||||
e = list_set_elem(set, map, i);
|
||||
if (e->id != IPSET_INVALID_ID) {
|
||||
ip_set_put_byindex(map->net, e->id);
|
||||
ip_set_ext_destroy(set, e);
|
||||
e->id = IPSET_INVALID_ID;
|
||||
}
|
||||
}
|
||||
list_for_each_entry_safe(e, n, &map->members, list)
|
||||
list_set_del(set, e);
|
||||
}
|
||||
|
||||
static void
|
||||
list_set_destroy(struct ip_set *set)
|
||||
{
|
||||
struct list_set *map = set->data;
|
||||
struct set_elem *e, *n;
|
||||
|
||||
if (SET_WITH_TIMEOUT(set))
|
||||
del_timer_sync(&map->gc);
|
||||
list_set_flush(set);
|
||||
list_for_each_entry_safe(e, n, &map->members, list) {
|
||||
list_del(&e->list);
|
||||
ip_set_put_byindex(map->net, e->id);
|
||||
ip_set_ext_destroy(set, e);
|
||||
kfree(e);
|
||||
}
|
||||
kfree(map);
|
||||
|
||||
set->data = NULL;
|
||||
|
@ -478,6 +447,11 @@ list_set_head(struct ip_set *set, struct sk_buff *skb)
|
|||
{
|
||||
const struct list_set *map = set->data;
|
||||
struct nlattr *nested;
|
||||
struct set_elem *e;
|
||||
u32 n = 0;
|
||||
|
||||
list_for_each_entry(e, &map->members, list)
|
||||
n++;
|
||||
|
||||
nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
|
||||
if (!nested)
|
||||
|
@ -485,7 +459,7 @@ list_set_head(struct ip_set *set, struct sk_buff *skb)
|
|||
if (nla_put_net32(skb, IPSET_ATTR_SIZE, htonl(map->size)) ||
|
||||
nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) ||
|
||||
nla_put_net32(skb, IPSET_ATTR_MEMSIZE,
|
||||
htonl(sizeof(*map) + map->size * set->dsize)))
|
||||
htonl(sizeof(*map) + n * set->dsize)))
|
||||
goto nla_put_failure;
|
||||
if (unlikely(ip_set_put_flags(skb, set)))
|
||||
goto nla_put_failure;
|
||||
|
@ -502,18 +476,22 @@ list_set_list(const struct ip_set *set,
|
|||
{
|
||||
const struct list_set *map = set->data;
|
||||
struct nlattr *atd, *nested;
|
||||
u32 i, first = cb->args[IPSET_CB_ARG0];
|
||||
const struct set_elem *e;
|
||||
u32 i = 0, first = cb->args[IPSET_CB_ARG0];
|
||||
struct set_elem *e;
|
||||
int ret = 0;
|
||||
|
||||
atd = ipset_nest_start(skb, IPSET_ATTR_ADT);
|
||||
if (!atd)
|
||||
return -EMSGSIZE;
|
||||
for (; cb->args[IPSET_CB_ARG0] < map->size;
|
||||
cb->args[IPSET_CB_ARG0]++) {
|
||||
i = cb->args[IPSET_CB_ARG0];
|
||||
e = list_set_elem(set, map, i);
|
||||
if (e->id == IPSET_INVALID_ID)
|
||||
goto finish;
|
||||
list_for_each_entry(e, &map->members, list) {
|
||||
if (i == first)
|
||||
break;
|
||||
i++;
|
||||
}
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_from(e, &map->members, list) {
|
||||
i++;
|
||||
if (SET_WITH_TIMEOUT(set) &&
|
||||
ip_set_timeout_expired(ext_timeout(e, set)))
|
||||
continue;
|
||||
|
@ -521,9 +499,10 @@ list_set_list(const struct ip_set *set,
|
|||
if (!nested) {
|
||||
if (i == first) {
|
||||
nla_nest_cancel(skb, atd);
|
||||
return -EMSGSIZE;
|
||||
} else
|
||||
goto nla_put_failure;
|
||||
ret = -EMSGSIZE;
|
||||
goto out;
|
||||
}
|
||||
goto nla_put_failure;
|
||||
}
|
||||
if (nla_put_string(skb, IPSET_ATTR_NAME,
|
||||
ip_set_name_byindex(map->net, e->id)))
|
||||
|
@ -532,20 +511,23 @@ list_set_list(const struct ip_set *set,
|
|||
goto nla_put_failure;
|
||||
ipset_nest_end(skb, nested);
|
||||
}
|
||||
finish:
|
||||
|
||||
ipset_nest_end(skb, atd);
|
||||
/* Set listing finished */
|
||||
cb->args[IPSET_CB_ARG0] = 0;
|
||||
return 0;
|
||||
goto out;
|
||||
|
||||
nla_put_failure:
|
||||
nla_nest_cancel(skb, nested);
|
||||
if (unlikely(i == first)) {
|
||||
cb->args[IPSET_CB_ARG0] = 0;
|
||||
return -EMSGSIZE;
|
||||
ret = -EMSGSIZE;
|
||||
}
|
||||
cb->args[IPSET_CB_ARG0] = i - 1;
|
||||
ipset_nest_end(skb, atd);
|
||||
return 0;
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool
|
||||
|
@ -577,12 +559,12 @@ static const struct ip_set_type_variant set_variant = {
|
|||
static void
|
||||
list_set_gc(unsigned long ul_set)
|
||||
{
|
||||
struct ip_set *set = (struct ip_set *) ul_set;
|
||||
struct ip_set *set = (struct ip_set *)ul_set;
|
||||
struct list_set *map = set->data;
|
||||
|
||||
write_lock_bh(&set->lock);
|
||||
spin_lock_bh(&set->lock);
|
||||
set_cleanup_entries(set);
|
||||
write_unlock_bh(&set->lock);
|
||||
spin_unlock_bh(&set->lock);
|
||||
|
||||
map->gc.expires = jiffies + IPSET_GC_PERIOD(set->timeout) * HZ;
|
||||
add_timer(&map->gc);
|
||||
|
@ -594,7 +576,7 @@ list_set_gc_init(struct ip_set *set, void (*gc)(unsigned long ul_set))
|
|||
struct list_set *map = set->data;
|
||||
|
||||
init_timer(&map->gc);
|
||||
map->gc.data = (unsigned long) set;
|
||||
map->gc.data = (unsigned long)set;
|
||||
map->gc.function = gc;
|
||||
map->gc.expires = jiffies + IPSET_GC_PERIOD(set->timeout) * HZ;
|
||||
add_timer(&map->gc);
|
||||
|
@ -606,24 +588,16 @@ static bool
|
|||
init_list_set(struct net *net, struct ip_set *set, u32 size)
|
||||
{
|
||||
struct list_set *map;
|
||||
struct set_elem *e;
|
||||
u32 i;
|
||||
|
||||
map = kzalloc(sizeof(*map) +
|
||||
min_t(u32, size, IP_SET_LIST_MAX_SIZE) * set->dsize,
|
||||
GFP_KERNEL);
|
||||
map = kzalloc(sizeof(*map), GFP_KERNEL);
|
||||
if (!map)
|
||||
return false;
|
||||
|
||||
map->size = size;
|
||||
map->net = net;
|
||||
INIT_LIST_HEAD(&map->members);
|
||||
set->data = map;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
e = list_set_elem(set, map, i);
|
||||
e->id = IPSET_INVALID_ID;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -696,6 +670,7 @@ list_set_init(void)
|
|||
static void __exit
|
||||
list_set_fini(void)
|
||||
{
|
||||
rcu_barrier();
|
||||
ip_set_type_unregister(&list_set_type);
|
||||
}
|
||||
|
||||
|
|
|
@ -1,9 +1,7 @@
|
|||
#include <linux/export.h>
|
||||
#include <linux/netfilter/ipset/pfxlen.h>
|
||||
|
||||
/*
|
||||
* Prefixlen maps for fast conversions, by Jan Engelhardt.
|
||||
*/
|
||||
/* Prefixlen maps for fast conversions, by Jan Engelhardt. */
|
||||
|
||||
#define E(a, b, c, d) \
|
||||
{.ip6 = { \
|
||||
|
@ -11,8 +9,7 @@
|
|||
htonl(c), htonl(d), \
|
||||
} }
|
||||
|
||||
/*
|
||||
* This table works for both IPv4 and IPv6;
|
||||
/* This table works for both IPv4 and IPv6;
|
||||
* just use prefixlen_netmask_map[prefixlength].ip.
|
||||
*/
|
||||
const union nf_inet_addr ip_set_netmask_map[] = {
|
||||
|
@ -149,13 +146,12 @@ const union nf_inet_addr ip_set_netmask_map[] = {
|
|||
EXPORT_SYMBOL_GPL(ip_set_netmask_map);
|
||||
|
||||
#undef E
|
||||
#define E(a, b, c, d) \
|
||||
{.ip6 = { (__force __be32) a, (__force __be32) b, \
|
||||
(__force __be32) c, (__force __be32) d, \
|
||||
#define E(a, b, c, d) \
|
||||
{.ip6 = { (__force __be32)a, (__force __be32)b, \
|
||||
(__force __be32)c, (__force __be32)d, \
|
||||
} }
|
||||
|
||||
/*
|
||||
* This table works for both IPv4 and IPv6;
|
||||
/* This table works for both IPv4 and IPv6;
|
||||
* just use prefixlen_hostmask_map[prefixlength].ip.
|
||||
*/
|
||||
const union nf_inet_addr ip_set_hostmask_map[] = {
|
||||
|
|
|
@ -9,7 +9,8 @@
|
|||
*/
|
||||
|
||||
/* Kernel module which implements the set match and SET target
|
||||
* for netfilter/iptables. */
|
||||
* for netfilter/iptables.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/skbuff.h>
|
||||
|
@ -53,6 +54,7 @@ static bool
|
|||
set_match_v0(const struct sk_buff *skb, struct xt_action_param *par)
|
||||
{
|
||||
const struct xt_set_info_match_v0 *info = par->matchinfo;
|
||||
|
||||
ADT_OPT(opt, par->family, info->match_set.u.compat.dim,
|
||||
info->match_set.u.compat.flags, 0, UINT_MAX);
|
||||
|
||||
|
@ -69,10 +71,10 @@ compat_flags(struct xt_set_info_v0 *info)
|
|||
info->u.compat.dim = IPSET_DIM_ZERO;
|
||||
if (info->u.flags[0] & IPSET_MATCH_INV)
|
||||
info->u.compat.flags |= IPSET_INV_MATCH;
|
||||
for (i = 0; i < IPSET_DIM_MAX-1 && info->u.flags[i]; i++) {
|
||||
for (i = 0; i < IPSET_DIM_MAX - 1 && info->u.flags[i]; i++) {
|
||||
info->u.compat.dim++;
|
||||
if (info->u.flags[i] & IPSET_SRC)
|
||||
info->u.compat.flags |= (1<<info->u.compat.dim);
|
||||
info->u.compat.flags |= (1 << info->u.compat.dim);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -89,7 +91,7 @@ set_match_v0_checkentry(const struct xt_mtchk_param *par)
|
|||
info->match_set.index);
|
||||
return -ENOENT;
|
||||
}
|
||||
if (info->match_set.u.flags[IPSET_DIM_MAX-1] != 0) {
|
||||
if (info->match_set.u.flags[IPSET_DIM_MAX - 1] != 0) {
|
||||
pr_warn("Protocol error: set match dimension is over the limit!\n");
|
||||
ip_set_nfnl_put(par->net, info->match_set.index);
|
||||
return -ERANGE;
|
||||
|
@ -115,6 +117,7 @@ static bool
|
|||
set_match_v1(const struct sk_buff *skb, struct xt_action_param *par)
|
||||
{
|
||||
const struct xt_set_info_match_v1 *info = par->matchinfo;
|
||||
|
||||
ADT_OPT(opt, par->family, info->match_set.dim,
|
||||
info->match_set.flags, 0, UINT_MAX);
|
||||
|
||||
|
@ -179,9 +182,10 @@ static bool
|
|||
set_match_v3(const struct sk_buff *skb, struct xt_action_param *par)
|
||||
{
|
||||
const struct xt_set_info_match_v3 *info = par->matchinfo;
|
||||
int ret;
|
||||
|
||||
ADT_OPT(opt, par->family, info->match_set.dim,
|
||||
info->match_set.flags, info->flags, UINT_MAX);
|
||||
int ret;
|
||||
|
||||
if (info->packets.op != IPSET_COUNTER_NONE ||
|
||||
info->bytes.op != IPSET_COUNTER_NONE)
|
||||
|
@ -225,9 +229,10 @@ static bool
|
|||
set_match_v4(const struct sk_buff *skb, struct xt_action_param *par)
|
||||
{
|
||||
const struct xt_set_info_match_v4 *info = par->matchinfo;
|
||||
int ret;
|
||||
|
||||
ADT_OPT(opt, par->family, info->match_set.dim,
|
||||
info->match_set.flags, info->flags, UINT_MAX);
|
||||
int ret;
|
||||
|
||||
if (info->packets.op != IPSET_COUNTER_NONE ||
|
||||
info->bytes.op != IPSET_COUNTER_NONE)
|
||||
|
@ -253,6 +258,7 @@ static unsigned int
|
|||
set_target_v0(struct sk_buff *skb, const struct xt_action_param *par)
|
||||
{
|
||||
const struct xt_set_info_target_v0 *info = par->targinfo;
|
||||
|
||||
ADT_OPT(add_opt, par->family, info->add_set.u.compat.dim,
|
||||
info->add_set.u.compat.flags, 0, UINT_MAX);
|
||||
ADT_OPT(del_opt, par->family, info->del_set.u.compat.dim,
|
||||
|
@ -291,8 +297,8 @@ set_target_v0_checkentry(const struct xt_tgchk_param *par)
|
|||
return -ENOENT;
|
||||
}
|
||||
}
|
||||
if (info->add_set.u.flags[IPSET_DIM_MAX-1] != 0 ||
|
||||
info->del_set.u.flags[IPSET_DIM_MAX-1] != 0) {
|
||||
if (info->add_set.u.flags[IPSET_DIM_MAX - 1] != 0 ||
|
||||
info->del_set.u.flags[IPSET_DIM_MAX - 1] != 0) {
|
||||
pr_warn("Protocol error: SET target dimension is over the limit!\n");
|
||||
if (info->add_set.index != IPSET_INVALID_ID)
|
||||
ip_set_nfnl_put(par->net, info->add_set.index);
|
||||
|
@ -325,6 +331,7 @@ static unsigned int
|
|||
set_target_v1(struct sk_buff *skb, const struct xt_action_param *par)
|
||||
{
|
||||
const struct xt_set_info_target_v1 *info = par->targinfo;
|
||||
|
||||
ADT_OPT(add_opt, par->family, info->add_set.dim,
|
||||
info->add_set.flags, 0, UINT_MAX);
|
||||
ADT_OPT(del_opt, par->family, info->del_set.dim,
|
||||
|
@ -393,6 +400,7 @@ static unsigned int
|
|||
set_target_v2(struct sk_buff *skb, const struct xt_action_param *par)
|
||||
{
|
||||
const struct xt_set_info_target_v2 *info = par->targinfo;
|
||||
|
||||
ADT_OPT(add_opt, par->family, info->add_set.dim,
|
||||
info->add_set.flags, info->flags, info->timeout);
|
||||
ADT_OPT(del_opt, par->family, info->del_set.dim,
|
||||
|
@ -400,8 +408,8 @@ set_target_v2(struct sk_buff *skb, const struct xt_action_param *par)
|
|||
|
||||
/* Normalize to fit into jiffies */
|
||||
if (add_opt.ext.timeout != IPSET_NO_TIMEOUT &&
|
||||
add_opt.ext.timeout > UINT_MAX/MSEC_PER_SEC)
|
||||
add_opt.ext.timeout = UINT_MAX/MSEC_PER_SEC;
|
||||
add_opt.ext.timeout > UINT_MAX / MSEC_PER_SEC)
|
||||
add_opt.ext.timeout = UINT_MAX / MSEC_PER_SEC;
|
||||
if (info->add_set.index != IPSET_INVALID_ID)
|
||||
ip_set_add(info->add_set.index, skb, par, &add_opt);
|
||||
if (info->del_set.index != IPSET_INVALID_ID)
|
||||
|
@ -419,6 +427,8 @@ static unsigned int
|
|||
set_target_v3(struct sk_buff *skb, const struct xt_action_param *par)
|
||||
{
|
||||
const struct xt_set_info_target_v3 *info = par->targinfo;
|
||||
int ret;
|
||||
|
||||
ADT_OPT(add_opt, par->family, info->add_set.dim,
|
||||
info->add_set.flags, info->flags, info->timeout);
|
||||
ADT_OPT(del_opt, par->family, info->del_set.dim,
|
||||
|
@ -426,12 +436,10 @@ set_target_v3(struct sk_buff *skb, const struct xt_action_param *par)
|
|||
ADT_OPT(map_opt, par->family, info->map_set.dim,
|
||||
info->map_set.flags, 0, UINT_MAX);
|
||||
|
||||
int ret;
|
||||
|
||||
/* Normalize to fit into jiffies */
|
||||
if (add_opt.ext.timeout != IPSET_NO_TIMEOUT &&
|
||||
add_opt.ext.timeout > UINT_MAX/MSEC_PER_SEC)
|
||||
add_opt.ext.timeout = UINT_MAX/MSEC_PER_SEC;
|
||||
add_opt.ext.timeout > UINT_MAX / MSEC_PER_SEC)
|
||||
add_opt.ext.timeout = UINT_MAX / MSEC_PER_SEC;
|
||||
if (info->add_set.index != IPSET_INVALID_ID)
|
||||
ip_set_add(info->add_set.index, skb, par, &add_opt);
|
||||
if (info->del_set.index != IPSET_INVALID_ID)
|
||||
|
@ -457,7 +465,6 @@ set_target_v3(struct sk_buff *skb, const struct xt_action_param *par)
|
|||
return XT_CONTINUE;
|
||||
}
|
||||
|
||||
|
||||
static int
|
||||
set_target_v3_checkentry(const struct xt_tgchk_param *par)
|
||||
{
|
||||
|
@ -497,8 +504,7 @@ set_target_v3_checkentry(const struct xt_tgchk_param *par)
|
|||
!(par->hook_mask & (1 << NF_INET_FORWARD |
|
||||
1 << NF_INET_LOCAL_OUT |
|
||||
1 << NF_INET_POST_ROUTING))) {
|
||||
pr_warn("mapping of prio or/and queue is allowed only"
|
||||
"from OUTPUT/FORWARD/POSTROUTING chains\n");
|
||||
pr_warn("mapping of prio or/and queue is allowed only from OUTPUT/FORWARD/POSTROUTING chains\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
index = ip_set_nfnl_get_byindex(par->net,
|
||||
|
@ -519,8 +525,7 @@ set_target_v3_checkentry(const struct xt_tgchk_param *par)
|
|||
if (info->add_set.dim > IPSET_DIM_MAX ||
|
||||
info->del_set.dim > IPSET_DIM_MAX ||
|
||||
info->map_set.dim > IPSET_DIM_MAX) {
|
||||
pr_warn("Protocol error: SET target dimension "
|
||||
"is over the limit!\n");
|
||||
pr_warn("Protocol error: SET target dimension is over the limit!\n");
|
||||
if (info->add_set.index != IPSET_INVALID_ID)
|
||||
ip_set_nfnl_put(par->net, info->add_set.index);
|
||||
if (info->del_set.index != IPSET_INVALID_ID)
|
||||
|
@ -546,7 +551,6 @@ set_target_v3_destroy(const struct xt_tgdtor_param *par)
|
|||
ip_set_nfnl_put(par->net, info->map_set.index);
|
||||
}
|
||||
|
||||
|
||||
static struct xt_match set_matches[] __read_mostly = {
|
||||
{
|
||||
.name = "set",
|
||||
|
|
Loading…
Reference in New Issue