bpf: export bpf_map_inc_not_zero

Rename existing bpf_map_inc_not_zero to __bpf_map_inc_not_zero to
indicate that it's caller's responsibility to do proper locking.
Create and export bpf_map_inc_not_zero wrapper that properly
locks map_idr_lock. Will be used in the next commit to
hold a map while cloning a socket.

Cc: Martin KaFai Lau <kafai@fb.com>
Cc: Yonghong Song <yhs@fb.com>
Acked-by: Martin KaFai Lau <kafai@fb.com>
Acked-by: Yonghong Song <yhs@fb.com>
Signed-off-by: Stanislav Fomichev <sdf@google.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
This commit is contained in:
Stanislav Fomichev 2019-08-14 10:37:48 -07:00 committed by Daniel Borkmann
parent fae55527ac
commit b0e4701ce1
2 changed files with 15 additions and 3 deletions

View File

@ -647,6 +647,8 @@ void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock);
struct bpf_map *bpf_map_get_with_uref(u32 ufd); struct bpf_map *bpf_map_get_with_uref(u32 ufd);
struct bpf_map *__bpf_map_get(struct fd f); struct bpf_map *__bpf_map_get(struct fd f);
struct bpf_map * __must_check bpf_map_inc(struct bpf_map *map, bool uref); struct bpf_map * __must_check bpf_map_inc(struct bpf_map *map, bool uref);
struct bpf_map * __must_check bpf_map_inc_not_zero(struct bpf_map *map,
bool uref);
void bpf_map_put_with_uref(struct bpf_map *map); void bpf_map_put_with_uref(struct bpf_map *map);
void bpf_map_put(struct bpf_map *map); void bpf_map_put(struct bpf_map *map);
int bpf_map_charge_memlock(struct bpf_map *map, u32 pages); int bpf_map_charge_memlock(struct bpf_map *map, u32 pages);

View File

@ -683,8 +683,8 @@ struct bpf_map *bpf_map_get_with_uref(u32 ufd)
} }
/* map_idr_lock should have been held */ /* map_idr_lock should have been held */
static struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map, static struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map,
bool uref) bool uref)
{ {
int refold; int refold;
@ -704,6 +704,16 @@ static struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map,
return map; return map;
} }
struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map, bool uref)
{
spin_lock_bh(&map_idr_lock);
map = __bpf_map_inc_not_zero(map, uref);
spin_unlock_bh(&map_idr_lock);
return map;
}
EXPORT_SYMBOL_GPL(bpf_map_inc_not_zero);
int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value) int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
{ {
return -ENOTSUPP; return -ENOTSUPP;
@ -2177,7 +2187,7 @@ static int bpf_map_get_fd_by_id(const union bpf_attr *attr)
spin_lock_bh(&map_idr_lock); spin_lock_bh(&map_idr_lock);
map = idr_find(&map_idr, id); map = idr_find(&map_idr, id);
if (map) if (map)
map = bpf_map_inc_not_zero(map, true); map = __bpf_map_inc_not_zero(map, true);
else else
map = ERR_PTR(-ENOENT); map = ERR_PTR(-ENOENT);
spin_unlock_bh(&map_idr_lock); spin_unlock_bh(&map_idr_lock);