Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf

Daniel Borkmann says:

====================
pull-request: bpf 2018-03-21

The following pull-request contains BPF updates for your *net* tree.

The main changes are:

1) Follow-up fix to the fault injection framework to prevent jump
   optimization on the kprobe by installing a dummy post-handler,
   from Masami.

2) Drop bpf_perf_prog_read_value helper from tracepoint type programs
   which was mistakenly added there and would otherwise crash due to
   wrong input context, from Yonghong.

3) Fix a crash in BPF fs when compiled with clang. Code appears to
   be fine just that clang tries to overly aggressive optimize in
   non C conform ways, therefore fix the kernel's Makefile to
   generally prevent such issues, from Daniel.

4) Skip unnecessary capability checks in bpf syscall, which is otherwise
   triggering unnecessary security hooks on capability checking and
   causing false alarms on unprivileged processes trying to access
   CAP_SYS_ADMIN restricted infra, from Chenbo.

5) Fix the test_bpf.ko module when CONFIG_BPF_JIT_ALWAYS_ON is set
   with regards to a test case that is really just supposed to fail
   on x8_64 JIT but not others, from Thadeu.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2018-03-21 12:09:04 -04:00
commit 3d27484eba
5 changed files with 62 additions and 31 deletions

View File

@ -826,6 +826,15 @@ KBUILD_CFLAGS += $(call cc-disable-warning, pointer-sign)
# disable invalid "can't wrap" optimizations for signed / pointers # disable invalid "can't wrap" optimizations for signed / pointers
KBUILD_CFLAGS += $(call cc-option,-fno-strict-overflow) KBUILD_CFLAGS += $(call cc-option,-fno-strict-overflow)
# clang sets -fmerge-all-constants by default as optimization, but this
# is non-conforming behavior for C and in fact breaks the kernel, so we
# need to disable it here generally.
KBUILD_CFLAGS += $(call cc-option,-fno-merge-all-constants)
# for gcc -fno-merge-all-constants disables everything, but it is fine
# to have actual conforming behavior enabled.
KBUILD_CFLAGS += $(call cc-option,-fmerge-constants)
# Make sure -fstack-check isn't enabled (like gentoo apparently did) # Make sure -fstack-check isn't enabled (like gentoo apparently did)
KBUILD_CFLAGS += $(call cc-option,-fno-stack-check,) KBUILD_CFLAGS += $(call cc-option,-fno-stack-check,)

View File

@ -1845,7 +1845,7 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz
union bpf_attr attr = {}; union bpf_attr attr = {};
int err; int err;
if (!capable(CAP_SYS_ADMIN) && sysctl_unprivileged_bpf_disabled) if (sysctl_unprivileged_bpf_disabled && !capable(CAP_SYS_ADMIN))
return -EPERM; return -EPERM;
err = check_uarg_tail_zero(uattr, sizeof(attr), size); err = check_uarg_tail_zero(uattr, sizeof(attr), size);

View File

@ -14,6 +14,15 @@
static int fei_kprobe_handler(struct kprobe *kp, struct pt_regs *regs); static int fei_kprobe_handler(struct kprobe *kp, struct pt_regs *regs);
static void fei_post_handler(struct kprobe *kp, struct pt_regs *regs,
unsigned long flags)
{
/*
* A dummy post handler is required to prohibit optimizing, because
* jump optimization does not support execution path overriding.
*/
}
struct fei_attr { struct fei_attr {
struct list_head list; struct list_head list;
struct kprobe kp; struct kprobe kp;
@ -56,6 +65,7 @@ static struct fei_attr *fei_attr_new(const char *sym, unsigned long addr)
return NULL; return NULL;
} }
attr->kp.pre_handler = fei_kprobe_handler; attr->kp.pre_handler = fei_kprobe_handler;
attr->kp.post_handler = fei_post_handler;
attr->retval = adjust_error_retval(addr, 0); attr->retval = adjust_error_retval(addr, 0);
INIT_LIST_HEAD(&attr->list); INIT_LIST_HEAD(&attr->list);
} }

View File

@ -661,32 +661,6 @@ static const struct bpf_func_proto bpf_get_stackid_proto_tp = {
.arg3_type = ARG_ANYTHING, .arg3_type = ARG_ANYTHING,
}; };
BPF_CALL_3(bpf_perf_prog_read_value_tp, struct bpf_perf_event_data_kern *, ctx,
struct bpf_perf_event_value *, buf, u32, size)
{
int err = -EINVAL;
if (unlikely(size != sizeof(struct bpf_perf_event_value)))
goto clear;
err = perf_event_read_local(ctx->event, &buf->counter, &buf->enabled,
&buf->running);
if (unlikely(err))
goto clear;
return 0;
clear:
memset(buf, 0, size);
return err;
}
static const struct bpf_func_proto bpf_perf_prog_read_value_proto_tp = {
.func = bpf_perf_prog_read_value_tp,
.gpl_only = true,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_PTR_TO_UNINIT_MEM,
.arg3_type = ARG_CONST_SIZE,
};
static const struct bpf_func_proto *tp_prog_func_proto(enum bpf_func_id func_id) static const struct bpf_func_proto *tp_prog_func_proto(enum bpf_func_id func_id)
{ {
switch (func_id) { switch (func_id) {
@ -694,8 +668,6 @@ static const struct bpf_func_proto *tp_prog_func_proto(enum bpf_func_id func_id)
return &bpf_perf_event_output_proto_tp; return &bpf_perf_event_output_proto_tp;
case BPF_FUNC_get_stackid: case BPF_FUNC_get_stackid:
return &bpf_get_stackid_proto_tp; return &bpf_get_stackid_proto_tp;
case BPF_FUNC_perf_prog_read_value:
return &bpf_perf_prog_read_value_proto_tp;
default: default:
return tracing_func_proto(func_id); return tracing_func_proto(func_id);
} }
@ -723,6 +695,46 @@ const struct bpf_verifier_ops tracepoint_verifier_ops = {
const struct bpf_prog_ops tracepoint_prog_ops = { const struct bpf_prog_ops tracepoint_prog_ops = {
}; };
BPF_CALL_3(bpf_perf_prog_read_value, struct bpf_perf_event_data_kern *, ctx,
struct bpf_perf_event_value *, buf, u32, size)
{
int err = -EINVAL;
if (unlikely(size != sizeof(struct bpf_perf_event_value)))
goto clear;
err = perf_event_read_local(ctx->event, &buf->counter, &buf->enabled,
&buf->running);
if (unlikely(err))
goto clear;
return 0;
clear:
memset(buf, 0, size);
return err;
}
static const struct bpf_func_proto bpf_perf_prog_read_value_proto = {
.func = bpf_perf_prog_read_value,
.gpl_only = true,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_PTR_TO_UNINIT_MEM,
.arg3_type = ARG_CONST_SIZE,
};
static const struct bpf_func_proto *pe_prog_func_proto(enum bpf_func_id func_id)
{
switch (func_id) {
case BPF_FUNC_perf_event_output:
return &bpf_perf_event_output_proto_tp;
case BPF_FUNC_get_stackid:
return &bpf_get_stackid_proto_tp;
case BPF_FUNC_perf_prog_read_value:
return &bpf_perf_prog_read_value_proto;
default:
return tracing_func_proto(func_id);
}
}
static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type, static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
struct bpf_insn_access_aux *info) struct bpf_insn_access_aux *info)
{ {
@ -779,7 +791,7 @@ static u32 pe_prog_convert_ctx_access(enum bpf_access_type type,
} }
const struct bpf_verifier_ops perf_event_verifier_ops = { const struct bpf_verifier_ops perf_event_verifier_ops = {
.get_func_proto = tp_prog_func_proto, .get_func_proto = pe_prog_func_proto,
.is_valid_access = pe_prog_is_valid_access, .is_valid_access = pe_prog_is_valid_access,
.convert_ctx_access = pe_prog_convert_ctx_access, .convert_ctx_access = pe_prog_convert_ctx_access,
}; };

View File

@ -5467,7 +5467,7 @@ static struct bpf_test tests[] = {
{ {
"BPF_MAXINSNS: Jump, gap, jump, ...", "BPF_MAXINSNS: Jump, gap, jump, ...",
{ }, { },
#ifdef CONFIG_BPF_JIT_ALWAYS_ON #if defined(CONFIG_BPF_JIT_ALWAYS_ON) && defined(CONFIG_X86)
CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL, CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
#else #else
CLASSIC | FLAG_NO_DATA, CLASSIC | FLAG_NO_DATA,