bpf-for-netdev
-----BEGIN PGP SIGNATURE----- iHUEABYIAB0WIQTFp0I1jqZrAX+hPRXbK58LschIgwUCY8WHEgAKCRDbK58LschI g+gDAP9K24FwikbCHy145i/KHFaIk4ZDSIfjff8uyKDq73h9QwEAvBvrxko7d+dh EHdhJGoqufV8n5wilYOrOGN7ShMwFAg= =aFQl -----END PGP SIGNATURE----- Merge tag 'for-netdev' of https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf Daniel Borkmann says: ==================== bpf 2023-01-16 We've added 6 non-merge commits during the last 8 day(s) which contain a total of 6 files changed, 22 insertions(+), 24 deletions(-). The main changes are: 1) Mitigate a Spectre v4 leak in unprivileged BPF from speculative pointer-as-scalar type confusion, from Luis Gerhorst. 2) Fix a splat when pid 1 attaches a BPF program that attempts to send killing signal to itself, from Hao Sun. 3) Fix BPF program ID information in BPF_AUDIT_UNLOAD as well as PERF_BPF_EVENT_PROG_UNLOAD events, from Paul Moore. 4) Fix BPF verifier warning triggered from invalid kfunc call in backtrack_insn, also from Hao Sun. 5) Fix potential deadlock in htab_lock_bucket from same bucket index but different map_locked index, from Tonghao Zhang. * tag 'for-netdev' of https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf: bpf: Fix pointer-leak due to insufficient speculative store bypass mitigation bpf: hash map, avoid deadlock with suitable hash mask bpf: remove the do_idr_lock parameter from bpf_prog_free_id() bpf: restore the ebpf program ID for BPF_AUDIT_UNLOAD and PERF_BPF_EVENT_PROG_UNLOAD bpf: Skip task with pid=1 in send_signal_common() bpf: Skip invalid kfunc call in backtrack_insn ==================== Link: https://lore.kernel.org/r/20230116230745.21742-1-daniel@iogearbox.net Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
423c1d363c
|
@ -1832,7 +1832,7 @@ void bpf_prog_inc(struct bpf_prog *prog);
|
|||
struct bpf_prog * __must_check bpf_prog_inc_not_zero(struct bpf_prog *prog);
|
||||
void bpf_prog_put(struct bpf_prog *prog);
|
||||
|
||||
void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock);
|
||||
void bpf_prog_free_id(struct bpf_prog *prog);
|
||||
void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock);
|
||||
|
||||
struct btf_field *btf_record_find(const struct btf_record *rec,
|
||||
|
|
|
@ -152,7 +152,7 @@ static inline int htab_lock_bucket(const struct bpf_htab *htab,
|
|||
{
|
||||
unsigned long flags;
|
||||
|
||||
hash = hash & HASHTAB_MAP_LOCK_MASK;
|
||||
hash = hash & min_t(u32, HASHTAB_MAP_LOCK_MASK, htab->n_buckets - 1);
|
||||
|
||||
preempt_disable();
|
||||
if (unlikely(__this_cpu_inc_return(*(htab->map_locked[hash])) != 1)) {
|
||||
|
@ -171,7 +171,7 @@ static inline void htab_unlock_bucket(const struct bpf_htab *htab,
|
|||
struct bucket *b, u32 hash,
|
||||
unsigned long flags)
|
||||
{
|
||||
hash = hash & HASHTAB_MAP_LOCK_MASK;
|
||||
hash = hash & min_t(u32, HASHTAB_MAP_LOCK_MASK, htab->n_buckets - 1);
|
||||
raw_spin_unlock_irqrestore(&b->raw_lock, flags);
|
||||
__this_cpu_dec(*(htab->map_locked[hash]));
|
||||
preempt_enable();
|
||||
|
|
|
@ -216,9 +216,6 @@ static void __bpf_prog_offload_destroy(struct bpf_prog *prog)
|
|||
if (offload->dev_state)
|
||||
offload->offdev->ops->destroy(prog);
|
||||
|
||||
/* Make sure BPF_PROG_GET_NEXT_ID can't find this dead program */
|
||||
bpf_prog_free_id(prog, true);
|
||||
|
||||
list_del_init(&offload->offloads);
|
||||
kfree(offload);
|
||||
prog->aux->offload = NULL;
|
||||
|
|
|
@ -1972,7 +1972,7 @@ static void bpf_audit_prog(const struct bpf_prog *prog, unsigned int op)
|
|||
return;
|
||||
if (audit_enabled == AUDIT_OFF)
|
||||
return;
|
||||
if (op == BPF_AUDIT_LOAD)
|
||||
if (!in_irq() && !irqs_disabled())
|
||||
ctx = audit_context();
|
||||
ab = audit_log_start(ctx, GFP_ATOMIC, AUDIT_BPF);
|
||||
if (unlikely(!ab))
|
||||
|
@ -2001,7 +2001,7 @@ static int bpf_prog_alloc_id(struct bpf_prog *prog)
|
|||
return id > 0 ? 0 : id;
|
||||
}
|
||||
|
||||
void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock)
|
||||
void bpf_prog_free_id(struct bpf_prog *prog)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
|
@ -2013,18 +2013,10 @@ void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock)
|
|||
if (!prog->aux->id)
|
||||
return;
|
||||
|
||||
if (do_idr_lock)
|
||||
spin_lock_irqsave(&prog_idr_lock, flags);
|
||||
else
|
||||
__acquire(&prog_idr_lock);
|
||||
|
||||
spin_lock_irqsave(&prog_idr_lock, flags);
|
||||
idr_remove(&prog_idr, prog->aux->id);
|
||||
prog->aux->id = 0;
|
||||
|
||||
if (do_idr_lock)
|
||||
spin_unlock_irqrestore(&prog_idr_lock, flags);
|
||||
else
|
||||
__release(&prog_idr_lock);
|
||||
spin_unlock_irqrestore(&prog_idr_lock, flags);
|
||||
}
|
||||
|
||||
static void __bpf_prog_put_rcu(struct rcu_head *rcu)
|
||||
|
@ -2067,17 +2059,15 @@ static void bpf_prog_put_deferred(struct work_struct *work)
|
|||
prog = aux->prog;
|
||||
perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_UNLOAD, 0);
|
||||
bpf_audit_prog(prog, BPF_AUDIT_UNLOAD);
|
||||
bpf_prog_free_id(prog);
|
||||
__bpf_prog_put_noref(prog, true);
|
||||
}
|
||||
|
||||
static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock)
|
||||
static void __bpf_prog_put(struct bpf_prog *prog)
|
||||
{
|
||||
struct bpf_prog_aux *aux = prog->aux;
|
||||
|
||||
if (atomic64_dec_and_test(&aux->refcnt)) {
|
||||
/* bpf_prog_free_id() must be called first */
|
||||
bpf_prog_free_id(prog, do_idr_lock);
|
||||
|
||||
if (in_irq() || irqs_disabled()) {
|
||||
INIT_WORK(&aux->work, bpf_prog_put_deferred);
|
||||
schedule_work(&aux->work);
|
||||
|
@ -2089,7 +2079,7 @@ static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock)
|
|||
|
||||
void bpf_prog_put(struct bpf_prog *prog)
|
||||
{
|
||||
__bpf_prog_put(prog, true);
|
||||
__bpf_prog_put(prog);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bpf_prog_put);
|
||||
|
||||
|
|
|
@ -2748,6 +2748,12 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx,
|
|||
*/
|
||||
if (insn->src_reg == 0 && is_callback_calling_function(insn->imm))
|
||||
return -ENOTSUPP;
|
||||
/* kfunc with imm==0 is invalid and fixup_kfunc_call will
|
||||
* catch this error later. Make backtracking conservative
|
||||
* with ENOTSUPP.
|
||||
*/
|
||||
if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL && insn->imm == 0)
|
||||
return -ENOTSUPP;
|
||||
/* regular helper call sets R0 */
|
||||
*reg_mask &= ~1;
|
||||
if (*reg_mask & 0x3f) {
|
||||
|
@ -3289,7 +3295,9 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
|
|||
bool sanitize = reg && is_spillable_regtype(reg->type);
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
if (state->stack[spi].slot_type[i] == STACK_INVALID) {
|
||||
u8 type = state->stack[spi].slot_type[i];
|
||||
|
||||
if (type != STACK_MISC && type != STACK_ZERO) {
|
||||
sanitize = true;
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -848,6 +848,9 @@ static int bpf_send_signal_common(u32 sig, enum pid_type type)
|
|||
return -EPERM;
|
||||
if (unlikely(!nmi_uaccess_okay()))
|
||||
return -EPERM;
|
||||
/* Task should not be pid=1 to avoid kernel panic. */
|
||||
if (unlikely(is_global_init(current)))
|
||||
return -EPERM;
|
||||
|
||||
if (irqs_disabled()) {
|
||||
/* Do an early check on signal validity. Otherwise,
|
||||
|
|
Loading…
Reference in New Issue