bpf: Avoid deadlock when using queue and stack maps from NMI
[ Upstream commita34a9f1a19
] Sysbot discovered that the queue and stack maps can deadlock if they are being used from a BPF program that can be called from NMI context (such as one that is attached to a perf HW counter event). To fix this, add an in_nmi() check and use raw_spin_trylock() in NMI context, erroring out if grabbing the lock fails. Fixes:f1a2e44a3a
("bpf: add queue and stack maps") Reported-by: Hsin-Wei Hung <hsinweih@uci.edu> Tested-by: Hsin-Wei Hung <hsinweih@uci.edu> Co-developed-by: Hsin-Wei Hung <hsinweih@uci.edu> Signed-off-by: Toke Høiland-Jørgensen <toke@redhat.com> Link: https://lore.kernel.org/r/20230911132815.717240-1-toke@redhat.com Signed-off-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: Sasha Levin <sashal@kernel.org> Signed-off-by: Jianping Liu <frankjpliu@tencent.com>
This commit is contained in:
parent
a52663f05b
commit
ecab4d8a7f
|
@ -118,7 +118,12 @@ static int __queue_map_get(struct bpf_map *map, void *value, bool delete)
|
|||
int err = 0;
|
||||
void *ptr;
|
||||
|
||||
raw_spin_lock_irqsave(&qs->lock, flags);
|
||||
if (in_nmi()) {
|
||||
if (!raw_spin_trylock_irqsave(&qs->lock, flags))
|
||||
return -EBUSY;
|
||||
} else {
|
||||
raw_spin_lock_irqsave(&qs->lock, flags);
|
||||
}
|
||||
|
||||
if (queue_stack_map_is_empty(qs)) {
|
||||
memset(value, 0, qs->map.value_size);
|
||||
|
@ -148,7 +153,12 @@ static int __stack_map_get(struct bpf_map *map, void *value, bool delete)
|
|||
void *ptr;
|
||||
u32 index;
|
||||
|
||||
raw_spin_lock_irqsave(&qs->lock, flags);
|
||||
if (in_nmi()) {
|
||||
if (!raw_spin_trylock_irqsave(&qs->lock, flags))
|
||||
return -EBUSY;
|
||||
} else {
|
||||
raw_spin_lock_irqsave(&qs->lock, flags);
|
||||
}
|
||||
|
||||
if (queue_stack_map_is_empty(qs)) {
|
||||
memset(value, 0, qs->map.value_size);
|
||||
|
@ -213,7 +223,12 @@ static int queue_stack_map_push_elem(struct bpf_map *map, void *value,
|
|||
if (flags & BPF_NOEXIST || flags > BPF_EXIST)
|
||||
return -EINVAL;
|
||||
|
||||
raw_spin_lock_irqsave(&qs->lock, irq_flags);
|
||||
if (in_nmi()) {
|
||||
if (!raw_spin_trylock_irqsave(&qs->lock, irq_flags))
|
||||
return -EBUSY;
|
||||
} else {
|
||||
raw_spin_lock_irqsave(&qs->lock, irq_flags);
|
||||
}
|
||||
|
||||
if (queue_stack_map_is_full(qs)) {
|
||||
if (!replace) {
|
||||
|
|
Loading…
Reference in New Issue