[NETFILTER]: x_tables: struct xt_table_info diet
Instead of using a big array of NR_CPUS entries, we can compute the size needed at runtime, using nr_cpu_ids This should save some ram (especially on David's machines where NR_CPUS=4096 : 32 KB can be saved per table, and 64KB for dynamically allocated ones (because of slab/slub alignements) ) In particular, the 'bootstrap' tables are not any more static (in data section) but on stack as their size is now very small. This also should reduce the size used on stack in compat functions (get_info() declares an automatic variable, that could be bigger than kernel stack size for big NR_CPUS) Signed-off-by: Eric Dumazet <dada1@cosmosbay.com> Signed-off-by: Patrick McHardy <kaber@trash.net> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
d3c5ee6d54
commit
259d4e41f3
|
@ -269,9 +269,12 @@ struct xt_table_info
|
|||
unsigned int underflow[NF_INET_NUMHOOKS];
|
||||
|
||||
/* ipt_entry tables: one per CPU */
|
||||
char *entries[NR_CPUS];
|
||||
/* Note : this field MUST be the last one, see XT_TABLE_INFO_SZ */
|
||||
char *entries[1];
|
||||
};
|
||||
|
||||
#define XT_TABLE_INFO_SZ (offsetof(struct xt_table_info, entries) \
|
||||
+ nr_cpu_ids * sizeof(char *))
|
||||
extern int xt_register_target(struct xt_target *target);
|
||||
extern void xt_unregister_target(struct xt_target *target);
|
||||
extern int xt_register_targets(struct xt_target *target, unsigned int n);
|
||||
|
|
|
@ -811,8 +811,7 @@ static int do_replace(void __user *user, unsigned int len)
|
|||
return -ENOPROTOOPT;
|
||||
|
||||
/* overflow check */
|
||||
if (tmp.size >= (INT_MAX - sizeof(struct xt_table_info)) / NR_CPUS -
|
||||
SMP_CACHE_BYTES)
|
||||
if (tmp.size >= INT_MAX / num_possible_cpus())
|
||||
return -ENOMEM;
|
||||
if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
|
||||
return -ENOMEM;
|
||||
|
@ -1090,7 +1089,7 @@ int arpt_register_table(struct arpt_table *table,
|
|||
{
|
||||
int ret;
|
||||
struct xt_table_info *newinfo;
|
||||
static struct xt_table_info bootstrap
|
||||
struct xt_table_info bootstrap
|
||||
= { 0, 0, 0, { 0 }, { 0 }, { } };
|
||||
void *loc_cpu_entry;
|
||||
|
||||
|
|
|
@ -1090,7 +1090,8 @@ compat_calc_match(struct ipt_entry_match *m, int * size)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int compat_calc_entry(struct ipt_entry *e, struct xt_table_info *info,
|
||||
static int compat_calc_entry(struct ipt_entry *e,
|
||||
const struct xt_table_info *info,
|
||||
void *base, struct xt_table_info *newinfo)
|
||||
{
|
||||
struct ipt_entry_target *t;
|
||||
|
@ -1118,22 +1119,17 @@ static int compat_calc_entry(struct ipt_entry *e, struct xt_table_info *info,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int compat_table_info(struct xt_table_info *info,
|
||||
static int compat_table_info(const struct xt_table_info *info,
|
||||
struct xt_table_info *newinfo)
|
||||
{
|
||||
void *loc_cpu_entry;
|
||||
int i;
|
||||
|
||||
if (!newinfo || !info)
|
||||
return -EINVAL;
|
||||
|
||||
memset(newinfo, 0, sizeof(struct xt_table_info));
|
||||
newinfo->size = info->size;
|
||||
newinfo->number = info->number;
|
||||
for (i = 0; i < NF_INET_NUMHOOKS; i++) {
|
||||
newinfo->hook_entry[i] = info->hook_entry[i];
|
||||
newinfo->underflow[i] = info->underflow[i];
|
||||
}
|
||||
/* we dont care about newinfo->entries[] */
|
||||
memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
|
||||
newinfo->initial_entries = 0;
|
||||
loc_cpu_entry = info->entries[raw_smp_processor_id()];
|
||||
return IPT_ENTRY_ITERATE(loc_cpu_entry, info->size,
|
||||
compat_calc_entry, info, loc_cpu_entry, newinfo);
|
||||
|
@ -1327,8 +1323,7 @@ do_replace(void __user *user, unsigned int len)
|
|||
return -ENOPROTOOPT;
|
||||
|
||||
/* overflow check */
|
||||
if (tmp.size >= (INT_MAX - sizeof(struct xt_table_info)) / NR_CPUS -
|
||||
SMP_CACHE_BYTES)
|
||||
if (tmp.size >= INT_MAX / num_possible_cpus())
|
||||
return -ENOMEM;
|
||||
if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
|
||||
return -ENOMEM;
|
||||
|
@ -1868,8 +1863,7 @@ compat_do_replace(void __user *user, unsigned int len)
|
|||
return -ENOPROTOOPT;
|
||||
|
||||
/* overflow check */
|
||||
if (tmp.size >= (INT_MAX - sizeof(struct xt_table_info)) / NR_CPUS -
|
||||
SMP_CACHE_BYTES)
|
||||
if (tmp.size >= INT_MAX / num_possible_cpus())
|
||||
return -ENOMEM;
|
||||
if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
|
||||
return -ENOMEM;
|
||||
|
@ -2126,7 +2120,7 @@ int ipt_register_table(struct xt_table *table, const struct ipt_replace *repl)
|
|||
{
|
||||
int ret;
|
||||
struct xt_table_info *newinfo;
|
||||
static struct xt_table_info bootstrap
|
||||
struct xt_table_info bootstrap
|
||||
= { 0, 0, 0, { 0 }, { 0 }, { } };
|
||||
void *loc_cpu_entry;
|
||||
|
||||
|
|
|
@ -1042,8 +1042,7 @@ do_replace(void __user *user, unsigned int len)
|
|||
return -EFAULT;
|
||||
|
||||
/* overflow check */
|
||||
if (tmp.size >= (INT_MAX - sizeof(struct xt_table_info)) / NR_CPUS -
|
||||
SMP_CACHE_BYTES)
|
||||
if (tmp.size >= INT_MAX / num_possible_cpus())
|
||||
return -ENOMEM;
|
||||
if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
|
||||
return -ENOMEM;
|
||||
|
@ -1339,7 +1338,7 @@ int ip6t_register_table(struct xt_table *table,
|
|||
{
|
||||
int ret;
|
||||
struct xt_table_info *newinfo;
|
||||
static struct xt_table_info bootstrap
|
||||
struct xt_table_info bootstrap
|
||||
= { 0, 0, 0, { 0 }, { 0 }, { } };
|
||||
void *loc_cpu_entry;
|
||||
|
||||
|
|
|
@ -499,7 +499,7 @@ struct xt_table_info *xt_alloc_table_info(unsigned int size)
|
|||
if ((SMP_ALIGN(size) >> PAGE_SHIFT) + 2 > num_physpages)
|
||||
return NULL;
|
||||
|
||||
newinfo = kzalloc(sizeof(struct xt_table_info), GFP_KERNEL);
|
||||
newinfo = kzalloc(XT_TABLE_INFO_SZ, GFP_KERNEL);
|
||||
if (!newinfo)
|
||||
return NULL;
|
||||
|
||||
|
|
Loading…
Reference in New Issue