bpf: Avoid deadlock when using queue and stack maps from NMI

[ Upstream commit a34a9f1a19 ]

Sysbot discovered that the queue and stack maps can deadlock if they are
being used from a BPF program that can be called from NMI context (such as
one that is attached to a perf HW counter event). To fix this, add an
in_nmi() check and use raw_spin_trylock() in NMI context, erroring out if
grabbing the lock fails.

Fixes: f1a2e44a3a ("bpf: add queue and stack maps")
Reported-by: Hsin-Wei Hung <hsinweih@uci.edu>
Tested-by: Hsin-Wei Hung <hsinweih@uci.edu>
Co-developed-by: Hsin-Wei Hung <hsinweih@uci.edu>
Signed-off-by: Toke Høiland-Jørgensen <toke@redhat.com>
Link: https://lore.kernel.org/r/20230911132815.717240-1-toke@redhat.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Sasha Levin <sashal@kernel.org>
Signed-off-by: Jianping Liu <frankjpliu@tencent.com>
This commit is contained in:
Toke Høiland-Jørgensen 2023-09-11 15:28:14 +02:00 committed by Jianping Liu
parent a52663f05b
commit ecab4d8a7f
1 changed files with 18 additions and 3 deletions

View File

@ -118,7 +118,12 @@ static int __queue_map_get(struct bpf_map *map, void *value, bool delete)
int err = 0; int err = 0;
void *ptr; void *ptr;
if (in_nmi()) {
if (!raw_spin_trylock_irqsave(&qs->lock, flags))
return -EBUSY;
} else {
raw_spin_lock_irqsave(&qs->lock, flags); raw_spin_lock_irqsave(&qs->lock, flags);
}
if (queue_stack_map_is_empty(qs)) { if (queue_stack_map_is_empty(qs)) {
memset(value, 0, qs->map.value_size); memset(value, 0, qs->map.value_size);
@ -148,7 +153,12 @@ static int __stack_map_get(struct bpf_map *map, void *value, bool delete)
void *ptr; void *ptr;
u32 index; u32 index;
if (in_nmi()) {
if (!raw_spin_trylock_irqsave(&qs->lock, flags))
return -EBUSY;
} else {
raw_spin_lock_irqsave(&qs->lock, flags); raw_spin_lock_irqsave(&qs->lock, flags);
}
if (queue_stack_map_is_empty(qs)) { if (queue_stack_map_is_empty(qs)) {
memset(value, 0, qs->map.value_size); memset(value, 0, qs->map.value_size);
@ -213,7 +223,12 @@ static int queue_stack_map_push_elem(struct bpf_map *map, void *value,
if (flags & BPF_NOEXIST || flags > BPF_EXIST) if (flags & BPF_NOEXIST || flags > BPF_EXIST)
return -EINVAL; return -EINVAL;
if (in_nmi()) {
if (!raw_spin_trylock_irqsave(&qs->lock, irq_flags))
return -EBUSY;
} else {
raw_spin_lock_irqsave(&qs->lock, irq_flags); raw_spin_lock_irqsave(&qs->lock, irq_flags);
}
if (queue_stack_map_is_full(qs)) { if (queue_stack_map_is_full(qs)) {
if (!replace) { if (!replace) {