bpf: Drop always true do_idr_lock parameter to bpf_map_free_id
The do_idr_lock parameter to bpf_map_free_id was introduced by commitbd5f5f4ecb
("bpf: Add BPF_MAP_GET_FD_BY_ID"). However, all callers set do_idr_lock = true since commit1e0bd5a091
("bpf: Switch bpf_map ref counter to atomic64_t so bpf_map_inc() never fails"). While at it also inline __bpf_map_put into its only caller bpf_map_put now that do_idr_lock can be dropped from its signature. Signed-off-by: Tobias Klauser <tklauser@distanz.ch> Link: https://lore.kernel.org/r/20230202141921.4424-1-tklauser@distanz.ch Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
parent
d9e44c324c
commit
158e5e9eea
|
@ -1846,7 +1846,7 @@ struct bpf_prog * __must_check bpf_prog_inc_not_zero(struct bpf_prog *prog);
|
||||||
void bpf_prog_put(struct bpf_prog *prog);
|
void bpf_prog_put(struct bpf_prog *prog);
|
||||||
|
|
||||||
void bpf_prog_free_id(struct bpf_prog *prog);
|
void bpf_prog_free_id(struct bpf_prog *prog);
|
||||||
void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock);
|
void bpf_map_free_id(struct bpf_map *map);
|
||||||
|
|
||||||
struct btf_field *btf_record_find(const struct btf_record *rec,
|
struct btf_field *btf_record_find(const struct btf_record *rec,
|
||||||
u32 offset, enum btf_field_type type);
|
u32 offset, enum btf_field_type type);
|
||||||
|
|
|
@ -136,7 +136,7 @@ static void __bpf_map_offload_destroy(struct bpf_offloaded_map *offmap)
|
||||||
{
|
{
|
||||||
WARN_ON(bpf_map_offload_ndo(offmap, BPF_OFFLOAD_MAP_FREE));
|
WARN_ON(bpf_map_offload_ndo(offmap, BPF_OFFLOAD_MAP_FREE));
|
||||||
/* Make sure BPF_MAP_GET_NEXT_ID can't find this dead map */
|
/* Make sure BPF_MAP_GET_NEXT_ID can't find this dead map */
|
||||||
bpf_map_free_id(&offmap->map, true);
|
bpf_map_free_id(&offmap->map);
|
||||||
list_del_init(&offmap->offloads);
|
list_del_init(&offmap->offloads);
|
||||||
offmap->netdev = NULL;
|
offmap->netdev = NULL;
|
||||||
}
|
}
|
||||||
|
|
|
@ -390,7 +390,7 @@ static int bpf_map_alloc_id(struct bpf_map *map)
|
||||||
return id > 0 ? 0 : id;
|
return id > 0 ? 0 : id;
|
||||||
}
|
}
|
||||||
|
|
||||||
void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock)
|
void bpf_map_free_id(struct bpf_map *map)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
|
@ -402,18 +402,12 @@ void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock)
|
||||||
if (!map->id)
|
if (!map->id)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (do_idr_lock)
|
spin_lock_irqsave(&map_idr_lock, flags);
|
||||||
spin_lock_irqsave(&map_idr_lock, flags);
|
|
||||||
else
|
|
||||||
__acquire(&map_idr_lock);
|
|
||||||
|
|
||||||
idr_remove(&map_idr, map->id);
|
idr_remove(&map_idr, map->id);
|
||||||
map->id = 0;
|
map->id = 0;
|
||||||
|
|
||||||
if (do_idr_lock)
|
spin_unlock_irqrestore(&map_idr_lock, flags);
|
||||||
spin_unlock_irqrestore(&map_idr_lock, flags);
|
|
||||||
else
|
|
||||||
__release(&map_idr_lock);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_MEMCG_KMEM
|
#ifdef CONFIG_MEMCG_KMEM
|
||||||
|
@ -706,13 +700,13 @@ static void bpf_map_put_uref(struct bpf_map *map)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* decrement map refcnt and schedule it for freeing via workqueue
|
/* decrement map refcnt and schedule it for freeing via workqueue
|
||||||
* (unrelying map implementation ops->map_free() might sleep)
|
* (underlying map implementation ops->map_free() might sleep)
|
||||||
*/
|
*/
|
||||||
static void __bpf_map_put(struct bpf_map *map, bool do_idr_lock)
|
void bpf_map_put(struct bpf_map *map)
|
||||||
{
|
{
|
||||||
if (atomic64_dec_and_test(&map->refcnt)) {
|
if (atomic64_dec_and_test(&map->refcnt)) {
|
||||||
/* bpf_map_free_id() must be called first */
|
/* bpf_map_free_id() must be called first */
|
||||||
bpf_map_free_id(map, do_idr_lock);
|
bpf_map_free_id(map);
|
||||||
btf_put(map->btf);
|
btf_put(map->btf);
|
||||||
INIT_WORK(&map->work, bpf_map_free_deferred);
|
INIT_WORK(&map->work, bpf_map_free_deferred);
|
||||||
/* Avoid spawning kworkers, since they all might contend
|
/* Avoid spawning kworkers, since they all might contend
|
||||||
|
@ -721,11 +715,6 @@ static void __bpf_map_put(struct bpf_map *map, bool do_idr_lock)
|
||||||
queue_work(system_unbound_wq, &map->work);
|
queue_work(system_unbound_wq, &map->work);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void bpf_map_put(struct bpf_map *map)
|
|
||||||
{
|
|
||||||
__bpf_map_put(map, true);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(bpf_map_put);
|
EXPORT_SYMBOL_GPL(bpf_map_put);
|
||||||
|
|
||||||
void bpf_map_put_with_uref(struct bpf_map *map)
|
void bpf_map_put_with_uref(struct bpf_map *map)
|
||||||
|
|
Loading…
Reference in New Issue