bpf: add helper for copying attrs to struct bpf_map
All map types reimplement the field-by-field copy of union bpf_attr members into struct bpf_map. Add a helper to perform this operation. Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com> Reviewed-by: Quentin Monnet <quentin.monnet@netronome.com> Acked-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
This commit is contained in:
parent
9328e0d1bc
commit
bd475643d7
|
@ -378,6 +378,7 @@ void bpf_map_put(struct bpf_map *map);
|
||||||
int bpf_map_precharge_memlock(u32 pages);
|
int bpf_map_precharge_memlock(u32 pages);
|
||||||
void *bpf_map_area_alloc(size_t size, int numa_node);
|
void *bpf_map_area_alloc(size_t size, int numa_node);
|
||||||
void bpf_map_area_free(void *base);
|
void bpf_map_area_free(void *base);
|
||||||
|
void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr);
|
||||||
|
|
||||||
extern int sysctl_unprivileged_bpf_disabled;
|
extern int sysctl_unprivileged_bpf_disabled;
|
||||||
|
|
||||||
|
|
|
@ -94,13 +94,7 @@ static struct bpf_map *cpu_map_alloc(union bpf_attr *attr)
|
||||||
if (!cmap)
|
if (!cmap)
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
|
|
||||||
/* mandatory map attributes */
|
bpf_map_init_from_attr(&cmap->map, attr);
|
||||||
cmap->map.map_type = attr->map_type;
|
|
||||||
cmap->map.key_size = attr->key_size;
|
|
||||||
cmap->map.value_size = attr->value_size;
|
|
||||||
cmap->map.max_entries = attr->max_entries;
|
|
||||||
cmap->map.map_flags = attr->map_flags;
|
|
||||||
cmap->map.numa_node = bpf_map_attr_numa_node(attr);
|
|
||||||
|
|
||||||
/* Pre-limit array size based on NR_CPUS, not final CPU check */
|
/* Pre-limit array size based on NR_CPUS, not final CPU check */
|
||||||
if (cmap->map.max_entries > NR_CPUS) {
|
if (cmap->map.max_entries > NR_CPUS) {
|
||||||
|
|
|
@ -93,13 +93,7 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
|
||||||
if (!dtab)
|
if (!dtab)
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
|
|
||||||
/* mandatory map attributes */
|
bpf_map_init_from_attr(&dtab->map, attr);
|
||||||
dtab->map.map_type = attr->map_type;
|
|
||||||
dtab->map.key_size = attr->key_size;
|
|
||||||
dtab->map.value_size = attr->value_size;
|
|
||||||
dtab->map.max_entries = attr->max_entries;
|
|
||||||
dtab->map.map_flags = attr->map_flags;
|
|
||||||
dtab->map.numa_node = bpf_map_attr_numa_node(attr);
|
|
||||||
|
|
||||||
/* make sure page count doesn't overflow */
|
/* make sure page count doesn't overflow */
|
||||||
cost = (u64) dtab->map.max_entries * sizeof(struct bpf_dtab_netdev *);
|
cost = (u64) dtab->map.max_entries * sizeof(struct bpf_dtab_netdev *);
|
||||||
|
|
|
@ -304,7 +304,6 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
|
||||||
*/
|
*/
|
||||||
bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU);
|
bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU);
|
||||||
bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC);
|
bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC);
|
||||||
int numa_node = bpf_map_attr_numa_node(attr);
|
|
||||||
struct bpf_htab *htab;
|
struct bpf_htab *htab;
|
||||||
int err, i;
|
int err, i;
|
||||||
u64 cost;
|
u64 cost;
|
||||||
|
@ -313,13 +312,7 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
|
||||||
if (!htab)
|
if (!htab)
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
|
|
||||||
/* mandatory map attributes */
|
bpf_map_init_from_attr(&htab->map, attr);
|
||||||
htab->map.map_type = attr->map_type;
|
|
||||||
htab->map.key_size = attr->key_size;
|
|
||||||
htab->map.value_size = attr->value_size;
|
|
||||||
htab->map.max_entries = attr->max_entries;
|
|
||||||
htab->map.map_flags = attr->map_flags;
|
|
||||||
htab->map.numa_node = numa_node;
|
|
||||||
|
|
||||||
if (percpu_lru) {
|
if (percpu_lru) {
|
||||||
/* ensure each CPU's lru list has >=1 elements.
|
/* ensure each CPU's lru list has >=1 elements.
|
||||||
|
|
|
@ -522,12 +522,7 @@ static struct bpf_map *trie_alloc(union bpf_attr *attr)
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
|
|
||||||
/* copy mandatory map attributes */
|
/* copy mandatory map attributes */
|
||||||
trie->map.map_type = attr->map_type;
|
bpf_map_init_from_attr(&trie->map, attr);
|
||||||
trie->map.key_size = attr->key_size;
|
|
||||||
trie->map.value_size = attr->value_size;
|
|
||||||
trie->map.max_entries = attr->max_entries;
|
|
||||||
trie->map.map_flags = attr->map_flags;
|
|
||||||
trie->map.numa_node = bpf_map_attr_numa_node(attr);
|
|
||||||
trie->data_size = attr->key_size -
|
trie->data_size = attr->key_size -
|
||||||
offsetof(struct bpf_lpm_trie_key, data);
|
offsetof(struct bpf_lpm_trie_key, data);
|
||||||
trie->max_prefixlen = trie->data_size * 8;
|
trie->max_prefixlen = trie->data_size * 8;
|
||||||
|
|
|
@ -513,13 +513,7 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
|
||||||
if (!stab)
|
if (!stab)
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
|
|
||||||
/* mandatory map attributes */
|
bpf_map_init_from_attr(&stab->map, attr);
|
||||||
stab->map.map_type = attr->map_type;
|
|
||||||
stab->map.key_size = attr->key_size;
|
|
||||||
stab->map.value_size = attr->value_size;
|
|
||||||
stab->map.max_entries = attr->max_entries;
|
|
||||||
stab->map.map_flags = attr->map_flags;
|
|
||||||
stab->map.numa_node = bpf_map_attr_numa_node(attr);
|
|
||||||
|
|
||||||
/* make sure page count doesn't overflow */
|
/* make sure page count doesn't overflow */
|
||||||
cost = (u64) stab->map.max_entries * sizeof(struct sock *);
|
cost = (u64) stab->map.max_entries * sizeof(struct sock *);
|
||||||
|
|
|
@ -88,14 +88,10 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
|
||||||
if (cost >= U32_MAX - PAGE_SIZE)
|
if (cost >= U32_MAX - PAGE_SIZE)
|
||||||
goto free_smap;
|
goto free_smap;
|
||||||
|
|
||||||
smap->map.map_type = attr->map_type;
|
bpf_map_init_from_attr(&smap->map, attr);
|
||||||
smap->map.key_size = attr->key_size;
|
|
||||||
smap->map.value_size = value_size;
|
smap->map.value_size = value_size;
|
||||||
smap->map.max_entries = attr->max_entries;
|
|
||||||
smap->map.map_flags = attr->map_flags;
|
|
||||||
smap->n_buckets = n_buckets;
|
smap->n_buckets = n_buckets;
|
||||||
smap->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
|
smap->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
|
||||||
smap->map.numa_node = bpf_map_attr_numa_node(attr);
|
|
||||||
|
|
||||||
err = bpf_map_precharge_memlock(smap->map.pages);
|
err = bpf_map_precharge_memlock(smap->map.pages);
|
||||||
if (err)
|
if (err)
|
||||||
|
|
|
@ -143,6 +143,16 @@ void bpf_map_area_free(void *area)
|
||||||
kvfree(area);
|
kvfree(area);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr)
|
||||||
|
{
|
||||||
|
map->map_type = attr->map_type;
|
||||||
|
map->key_size = attr->key_size;
|
||||||
|
map->value_size = attr->value_size;
|
||||||
|
map->max_entries = attr->max_entries;
|
||||||
|
map->map_flags = attr->map_flags;
|
||||||
|
map->numa_node = bpf_map_attr_numa_node(attr);
|
||||||
|
}
|
||||||
|
|
||||||
int bpf_map_precharge_memlock(u32 pages)
|
int bpf_map_precharge_memlock(u32 pages)
|
||||||
{
|
{
|
||||||
struct user_struct *user = get_current_user();
|
struct user_struct *user = get_current_user();
|
||||||
|
|
Loading…
Reference in New Issue