bpf: drop unnecessary context cast from BPF_PROG_RUN
Since long already bpf_func is not only about struct sk_buff * as input anymore. Make it generic as void *, so that callers don't need to cast for it each time they call BPF_PROG_RUN(). Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
e373909927
commit
88575199cc
|
@ -1518,7 +1518,7 @@ static int nfp_net_run_xdp(struct bpf_prog *prog, void *data, unsigned int len)
|
||||||
xdp.data = data;
|
xdp.data = data;
|
||||||
xdp.data_end = data + len;
|
xdp.data_end = data + len;
|
||||||
|
|
||||||
return BPF_PROG_RUN(prog, (void *)&xdp);
|
return BPF_PROG_RUN(prog, &xdp);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -408,8 +408,8 @@ struct bpf_prog {
|
||||||
enum bpf_prog_type type; /* Type of BPF program */
|
enum bpf_prog_type type; /* Type of BPF program */
|
||||||
struct bpf_prog_aux *aux; /* Auxiliary fields */
|
struct bpf_prog_aux *aux; /* Auxiliary fields */
|
||||||
struct sock_fprog_kern *orig_prog; /* Original BPF program */
|
struct sock_fprog_kern *orig_prog; /* Original BPF program */
|
||||||
unsigned int (*bpf_func)(const struct sk_buff *skb,
|
unsigned int (*bpf_func)(const void *ctx,
|
||||||
const struct bpf_insn *filter);
|
const struct bpf_insn *insn);
|
||||||
/* Instructions for interpreter */
|
/* Instructions for interpreter */
|
||||||
union {
|
union {
|
||||||
struct sock_filter insns[0];
|
struct sock_filter insns[0];
|
||||||
|
@ -504,7 +504,7 @@ static inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog,
|
||||||
u32 ret;
|
u32 ret;
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
ret = BPF_PROG_RUN(prog, (void *)xdp);
|
ret = BPF_PROG_RUN(prog, xdp);
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
|
|
@ -7726,7 +7726,7 @@ static void bpf_overflow_handler(struct perf_event *event,
|
||||||
if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1))
|
if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1))
|
||||||
goto out;
|
goto out;
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
ret = BPF_PROG_RUN(event->prog, (void *)&ctx);
|
ret = BPF_PROG_RUN(event->prog, &ctx);
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
out:
|
out:
|
||||||
__this_cpu_dec(bpf_prog_active);
|
__this_cpu_dec(bpf_prog_active);
|
||||||
|
|
|
@ -195,7 +195,7 @@ static u32 seccomp_run_filters(const struct seccomp_data *sd)
|
||||||
* value always takes priority (ignoring the DATA).
|
* value always takes priority (ignoring the DATA).
|
||||||
*/
|
*/
|
||||||
for (; f; f = f->prev) {
|
for (; f; f = f->prev) {
|
||||||
u32 cur_ret = BPF_PROG_RUN(f->prog, (void *)sd);
|
u32 cur_ret = BPF_PROG_RUN(f->prog, sd);
|
||||||
|
|
||||||
if ((cur_ret & SECCOMP_RET_ACTION) < (ret & SECCOMP_RET_ACTION))
|
if ((cur_ret & SECCOMP_RET_ACTION) < (ret & SECCOMP_RET_ACTION))
|
||||||
ret = cur_ret;
|
ret = cur_ret;
|
||||||
|
|
Loading…
Reference in New Issue