Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf
Daniel Borkmann says: ==================== pull-request: bpf 2020-04-10 The following pull-request contains BPF updates for your *net* tree. We've added 13 non-merge commits during the last 7 day(s) which contain a total of 13 files changed, 137 insertions(+), 43 deletions(-). The main changes are: 1) JIT code emission fixes for riscv and arm32, from Luke Nelson and Xi Wang. 2) Disable vmlinux BTF info if GCC_PLUGIN_RANDSTRUCT is used, from Slava Bacherikov. 3) Fix oob write in AF_XDP when meta data is used, from Li RongQing. 4) Fix bpf_get_link_xdp_id() handling on single prog when flags are specified, from Andrey Ignatov. 5) Fix sk_assign() BPF helper for request sockets that can have sk_reuseport field uninitialized, from Joe Stringer. 6) Fix mprotect() test case for the BPF LSM, from KP Singh. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
40fc7ad2c8
|
@ -929,7 +929,11 @@ static inline void emit_a32_rsh_i64(const s8 dst[],
|
|||
rd = arm_bpf_get_reg64(dst, tmp, ctx);
|
||||
|
||||
/* Do LSR operation */
|
||||
if (val < 32) {
|
||||
if (val == 0) {
|
||||
/* An immediate value of 0 encodes a shift amount of 32
|
||||
* for LSR. To shift by 0, don't do anything.
|
||||
*/
|
||||
} else if (val < 32) {
|
||||
emit(ARM_MOV_SI(tmp2[1], rd[1], SRTYPE_LSR, val), ctx);
|
||||
emit(ARM_ORR_SI(rd[1], tmp2[1], rd[0], SRTYPE_ASL, 32 - val), ctx);
|
||||
emit(ARM_MOV_SI(rd[0], rd[0], SRTYPE_LSR, val), ctx);
|
||||
|
@ -955,7 +959,11 @@ static inline void emit_a32_arsh_i64(const s8 dst[],
|
|||
rd = arm_bpf_get_reg64(dst, tmp, ctx);
|
||||
|
||||
/* Do ARSH operation */
|
||||
if (val < 32) {
|
||||
if (val == 0) {
|
||||
/* An immediate value of 0 encodes a shift amount of 32
|
||||
* for ASR. To shift by 0, don't do anything.
|
||||
*/
|
||||
} else if (val < 32) {
|
||||
emit(ARM_MOV_SI(tmp2[1], rd[1], SRTYPE_LSR, val), ctx);
|
||||
emit(ARM_ORR_SI(rd[1], tmp2[1], rd[0], SRTYPE_ASL, 32 - val), ctx);
|
||||
emit(ARM_MOV_SI(rd[0], rd[0], SRTYPE_ASR, val), ctx);
|
||||
|
|
|
@ -55,7 +55,7 @@ config RISCV
|
|||
select ARCH_HAS_PTE_SPECIAL
|
||||
select ARCH_HAS_MMIOWB
|
||||
select ARCH_HAS_DEBUG_VIRTUAL
|
||||
select HAVE_EBPF_JIT
|
||||
select HAVE_EBPF_JIT if MMU
|
||||
select EDAC_SUPPORT
|
||||
select ARCH_HAS_GIGANTIC_PAGE
|
||||
select ARCH_WANT_HUGE_PMD_SHARE if 64BIT
|
||||
|
|
|
@ -110,6 +110,16 @@ static bool is_32b_int(s64 val)
|
|||
return -(1L << 31) <= val && val < (1L << 31);
|
||||
}
|
||||
|
||||
static bool in_auipc_jalr_range(s64 val)
|
||||
{
|
||||
/*
|
||||
* auipc+jalr can reach any signed PC-relative offset in the range
|
||||
* [-2^31 - 2^11, 2^31 - 2^11).
|
||||
*/
|
||||
return (-(1L << 31) - (1L << 11)) <= val &&
|
||||
val < ((1L << 31) - (1L << 11));
|
||||
}
|
||||
|
||||
static void emit_imm(u8 rd, s64 val, struct rv_jit_context *ctx)
|
||||
{
|
||||
/* Note that the immediate from the add is sign-extended,
|
||||
|
@ -380,20 +390,24 @@ static void emit_sext_32_rd(u8 *rd, struct rv_jit_context *ctx)
|
|||
*rd = RV_REG_T2;
|
||||
}
|
||||
|
||||
static void emit_jump_and_link(u8 rd, s64 rvoff, bool force_jalr,
|
||||
static int emit_jump_and_link(u8 rd, s64 rvoff, bool force_jalr,
|
||||
struct rv_jit_context *ctx)
|
||||
{
|
||||
s64 upper, lower;
|
||||
|
||||
if (rvoff && is_21b_int(rvoff) && !force_jalr) {
|
||||
emit(rv_jal(rd, rvoff >> 1), ctx);
|
||||
return;
|
||||
}
|
||||
|
||||
return 0;
|
||||
} else if (in_auipc_jalr_range(rvoff)) {
|
||||
upper = (rvoff + (1 << 11)) >> 12;
|
||||
lower = rvoff & 0xfff;
|
||||
emit(rv_auipc(RV_REG_T1, upper), ctx);
|
||||
emit(rv_jalr(rd, RV_REG_T1, lower), ctx);
|
||||
return 0;
|
||||
}
|
||||
|
||||
pr_err("bpf-jit: target offset 0x%llx is out of range\n", rvoff);
|
||||
return -ERANGE;
|
||||
}
|
||||
|
||||
static bool is_signed_bpf_cond(u8 cond)
|
||||
|
@ -407,18 +421,16 @@ static int emit_call(bool fixed, u64 addr, struct rv_jit_context *ctx)
|
|||
s64 off = 0;
|
||||
u64 ip;
|
||||
u8 rd;
|
||||
int ret;
|
||||
|
||||
if (addr && ctx->insns) {
|
||||
ip = (u64)(long)(ctx->insns + ctx->ninsns);
|
||||
off = addr - ip;
|
||||
if (!is_32b_int(off)) {
|
||||
pr_err("bpf-jit: target call addr %pK is out of range\n",
|
||||
(void *)addr);
|
||||
return -ERANGE;
|
||||
}
|
||||
}
|
||||
|
||||
emit_jump_and_link(RV_REG_RA, off, !fixed, ctx);
|
||||
ret = emit_jump_and_link(RV_REG_RA, off, !fixed, ctx);
|
||||
if (ret)
|
||||
return ret;
|
||||
rd = bpf_to_rv_reg(BPF_REG_0, ctx);
|
||||
emit(rv_addi(rd, RV_REG_A0, 0), ctx);
|
||||
return 0;
|
||||
|
@ -429,7 +441,7 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
|
|||
{
|
||||
bool is64 = BPF_CLASS(insn->code) == BPF_ALU64 ||
|
||||
BPF_CLASS(insn->code) == BPF_JMP;
|
||||
int s, e, rvoff, i = insn - ctx->prog->insnsi;
|
||||
int s, e, rvoff, ret, i = insn - ctx->prog->insnsi;
|
||||
struct bpf_prog_aux *aux = ctx->prog->aux;
|
||||
u8 rd = -1, rs = -1, code = insn->code;
|
||||
s16 off = insn->off;
|
||||
|
@ -699,7 +711,9 @@ out_be:
|
|||
/* JUMP off */
|
||||
case BPF_JMP | BPF_JA:
|
||||
rvoff = rv_offset(i, off, ctx);
|
||||
emit_jump_and_link(RV_REG_ZERO, rvoff, false, ctx);
|
||||
ret = emit_jump_and_link(RV_REG_ZERO, rvoff, false, ctx);
|
||||
if (ret)
|
||||
return ret;
|
||||
break;
|
||||
|
||||
/* IF (dst COND src) JUMP off */
|
||||
|
@ -801,7 +815,6 @@ out_be:
|
|||
case BPF_JMP | BPF_CALL:
|
||||
{
|
||||
bool fixed;
|
||||
int ret;
|
||||
u64 addr;
|
||||
|
||||
mark_call(ctx);
|
||||
|
@ -826,7 +839,9 @@ out_be:
|
|||
break;
|
||||
|
||||
rvoff = epilogue_offset(ctx);
|
||||
emit_jump_and_link(RV_REG_ZERO, rvoff, false, ctx);
|
||||
ret = emit_jump_and_link(RV_REG_ZERO, rvoff, false, ctx);
|
||||
if (ret)
|
||||
return ret;
|
||||
break;
|
||||
|
||||
/* dst = imm64 */
|
||||
|
|
|
@ -30,7 +30,7 @@ struct bpf_lru_node {
|
|||
struct bpf_lru_list {
|
||||
struct list_head lists[NR_BPF_LRU_LIST_T];
|
||||
unsigned int counts[NR_BPF_LRU_LIST_COUNT];
|
||||
/* The next inacitve list rotation starts from here */
|
||||
/* The next inactive list rotation starts from here */
|
||||
struct list_head *next_inactive_rotation;
|
||||
|
||||
raw_spinlock_t lock ____cacheline_aligned_in_smp;
|
||||
|
|
|
@ -242,6 +242,8 @@ config DEBUG_INFO_DWARF4
|
|||
config DEBUG_INFO_BTF
|
||||
bool "Generate BTF typeinfo"
|
||||
depends on DEBUG_INFO
|
||||
depends on !DEBUG_INFO_SPLIT && !DEBUG_INFO_REDUCED
|
||||
depends on !GCC_PLUGIN_RANDSTRUCT || COMPILE_TEST
|
||||
help
|
||||
Generate deduplicated BTF type information from DWARF debug info.
|
||||
Turning this on expects presence of pahole tool, which will convert
|
||||
|
|
|
@ -5925,7 +5925,7 @@ BPF_CALL_3(bpf_sk_assign, struct sk_buff *, skb, struct sock *, sk, u64, flags)
|
|||
return -EOPNOTSUPP;
|
||||
if (unlikely(dev_net(skb->dev) != sock_net(sk)))
|
||||
return -ENETUNREACH;
|
||||
if (unlikely(sk->sk_reuseport))
|
||||
if (unlikely(sk_fullsock(sk) && sk->sk_reuseport))
|
||||
return -ESOCKTNOSUPPORT;
|
||||
if (sk_is_refcounted(sk) &&
|
||||
unlikely(!refcount_inc_not_zero(&sk->sk_refcnt)))
|
||||
|
|
|
@ -1872,7 +1872,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
|
|||
* as not suitable for copying when cloning.
|
||||
*/
|
||||
if (sk_user_data_is_nocopy(newsk))
|
||||
RCU_INIT_POINTER(newsk->sk_user_data, NULL);
|
||||
newsk->sk_user_data = NULL;
|
||||
|
||||
newsk->sk_err = 0;
|
||||
newsk->sk_err_soft = 0;
|
||||
|
|
|
@ -131,8 +131,9 @@ static void __xsk_rcv_memcpy(struct xdp_umem *umem, u64 addr, void *from_buf,
|
|||
u64 page_start = addr & ~(PAGE_SIZE - 1);
|
||||
u64 first_len = PAGE_SIZE - (addr - page_start);
|
||||
|
||||
memcpy(to_buf, from_buf, first_len + metalen);
|
||||
memcpy(next_pg_addr, from_buf + first_len, len - first_len);
|
||||
memcpy(to_buf, from_buf, first_len);
|
||||
memcpy(next_pg_addr, from_buf + first_len,
|
||||
len + metalen - first_len);
|
||||
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -142,7 +142,7 @@ static int __bpf_set_link_xdp_fd_replace(int ifindex, int fd, int old_fd,
|
|||
struct ifinfomsg ifinfo;
|
||||
char attrbuf[64];
|
||||
} req;
|
||||
__u32 nl_pid;
|
||||
__u32 nl_pid = 0;
|
||||
|
||||
sock = libbpf_netlink_open(&nl_pid);
|
||||
if (sock < 0)
|
||||
|
@ -288,7 +288,7 @@ int bpf_get_link_xdp_info(int ifindex, struct xdp_link_info *info,
|
|||
{
|
||||
struct xdp_id_md xdp_id = {};
|
||||
int sock, ret;
|
||||
__u32 nl_pid;
|
||||
__u32 nl_pid = 0;
|
||||
__u32 mask;
|
||||
|
||||
if (flags & ~XDP_FLAGS_MASK || !info_size)
|
||||
|
@ -321,7 +321,7 @@ int bpf_get_link_xdp_info(int ifindex, struct xdp_link_info *info,
|
|||
|
||||
static __u32 get_xdp_id(struct xdp_link_info *info, __u32 flags)
|
||||
{
|
||||
if (info->attach_mode != XDP_ATTACHED_MULTI)
|
||||
if (info->attach_mode != XDP_ATTACHED_MULTI && !flags)
|
||||
return info->prog_id;
|
||||
if (flags & XDP_FLAGS_DRV_MODE)
|
||||
return info->drv_prog_id;
|
||||
|
|
|
@ -15,7 +15,10 @@
|
|||
|
||||
char *CMD_ARGS[] = {"true", NULL};
|
||||
|
||||
int heap_mprotect(void)
|
||||
#define GET_PAGE_ADDR(ADDR, PAGE_SIZE) \
|
||||
(char *)(((unsigned long) (ADDR + PAGE_SIZE)) & ~(PAGE_SIZE-1))
|
||||
|
||||
int stack_mprotect(void)
|
||||
{
|
||||
void *buf;
|
||||
long sz;
|
||||
|
@ -25,12 +28,9 @@ int heap_mprotect(void)
|
|||
if (sz < 0)
|
||||
return sz;
|
||||
|
||||
buf = memalign(sz, 2 * sz);
|
||||
if (buf == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = mprotect(buf, sz, PROT_READ | PROT_WRITE | PROT_EXEC);
|
||||
free(buf);
|
||||
buf = alloca(sz * 3);
|
||||
ret = mprotect(GET_PAGE_ADDR(buf, sz), sz,
|
||||
PROT_READ | PROT_WRITE | PROT_EXEC);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -73,8 +73,8 @@ void test_test_lsm(void)
|
|||
|
||||
skel->bss->monitored_pid = getpid();
|
||||
|
||||
err = heap_mprotect();
|
||||
if (CHECK(errno != EPERM, "heap_mprotect", "want errno=EPERM, got %d\n",
|
||||
err = stack_mprotect();
|
||||
if (CHECK(errno != EPERM, "stack_mprotect", "want err=EPERM, got %d\n",
|
||||
errno))
|
||||
goto close_prog;
|
||||
|
||||
|
|
|
@ -0,0 +1,68 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <linux/if_link.h>
|
||||
#include <test_progs.h>
|
||||
|
||||
#define IFINDEX_LO 1
|
||||
|
||||
void test_xdp_info(void)
|
||||
{
|
||||
__u32 len = sizeof(struct bpf_prog_info), duration = 0, prog_id;
|
||||
const char *file = "./xdp_dummy.o";
|
||||
struct bpf_prog_info info = {};
|
||||
struct bpf_object *obj;
|
||||
int err, prog_fd;
|
||||
|
||||
/* Get prog_id for XDP_ATTACHED_NONE mode */
|
||||
|
||||
err = bpf_get_link_xdp_id(IFINDEX_LO, &prog_id, 0);
|
||||
if (CHECK(err, "get_xdp_none", "errno=%d\n", errno))
|
||||
return;
|
||||
if (CHECK(prog_id, "prog_id_none", "unexpected prog_id=%u\n", prog_id))
|
||||
return;
|
||||
|
||||
err = bpf_get_link_xdp_id(IFINDEX_LO, &prog_id, XDP_FLAGS_SKB_MODE);
|
||||
if (CHECK(err, "get_xdp_none_skb", "errno=%d\n", errno))
|
||||
return;
|
||||
if (CHECK(prog_id, "prog_id_none_skb", "unexpected prog_id=%u\n",
|
||||
prog_id))
|
||||
return;
|
||||
|
||||
/* Setup prog */
|
||||
|
||||
err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
|
||||
if (CHECK_FAIL(err))
|
||||
return;
|
||||
|
||||
err = bpf_obj_get_info_by_fd(prog_fd, &info, &len);
|
||||
if (CHECK(err, "get_prog_info", "errno=%d\n", errno))
|
||||
goto out_close;
|
||||
|
||||
err = bpf_set_link_xdp_fd(IFINDEX_LO, prog_fd, XDP_FLAGS_SKB_MODE);
|
||||
if (CHECK(err, "set_xdp_skb", "errno=%d\n", errno))
|
||||
goto out_close;
|
||||
|
||||
/* Get prog_id for single prog mode */
|
||||
|
||||
err = bpf_get_link_xdp_id(IFINDEX_LO, &prog_id, 0);
|
||||
if (CHECK(err, "get_xdp", "errno=%d\n", errno))
|
||||
goto out;
|
||||
if (CHECK(prog_id != info.id, "prog_id", "prog_id not available\n"))
|
||||
goto out;
|
||||
|
||||
err = bpf_get_link_xdp_id(IFINDEX_LO, &prog_id, XDP_FLAGS_SKB_MODE);
|
||||
if (CHECK(err, "get_xdp_skb", "errno=%d\n", errno))
|
||||
goto out;
|
||||
if (CHECK(prog_id != info.id, "prog_id_skb", "prog_id not available\n"))
|
||||
goto out;
|
||||
|
||||
err = bpf_get_link_xdp_id(IFINDEX_LO, &prog_id, XDP_FLAGS_DRV_MODE);
|
||||
if (CHECK(err, "get_xdp_drv", "errno=%d\n", errno))
|
||||
goto out;
|
||||
if (CHECK(prog_id, "prog_id_drv", "unexpected prog_id=%u\n", prog_id))
|
||||
goto out;
|
||||
|
||||
out:
|
||||
bpf_set_link_xdp_fd(IFINDEX_LO, -1, 0);
|
||||
out_close:
|
||||
bpf_object__close(obj);
|
||||
}
|
|
@ -23,12 +23,12 @@ int BPF_PROG(test_int_hook, struct vm_area_struct *vma,
|
|||
return ret;
|
||||
|
||||
__u32 pid = bpf_get_current_pid_tgid() >> 32;
|
||||
int is_heap = 0;
|
||||
int is_stack = 0;
|
||||
|
||||
is_heap = (vma->vm_start >= vma->vm_mm->start_brk &&
|
||||
vma->vm_end <= vma->vm_mm->brk);
|
||||
is_stack = (vma->vm_start <= vma->vm_mm->start_stack &&
|
||||
vma->vm_end >= vma->vm_mm->start_stack);
|
||||
|
||||
if (is_heap && monitored_pid == pid) {
|
||||
if (is_stack && monitored_pid == pid) {
|
||||
mprotect_count++;
|
||||
ret = -EPERM;
|
||||
}
|
||||
|
|
|
@ -501,7 +501,7 @@
|
|||
.result = REJECT
|
||||
},
|
||||
{
|
||||
"bounds check mixed 32bit and 64bit arithmatic. test1",
|
||||
"bounds check mixed 32bit and 64bit arithmetic. test1",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_1, -1),
|
||||
|
@ -520,7 +520,7 @@
|
|||
.result = ACCEPT
|
||||
},
|
||||
{
|
||||
"bounds check mixed 32bit and 64bit arithmatic. test2",
|
||||
"bounds check mixed 32bit and 64bit arithmetic. test2",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_1, -1),
|
||||
|
|
Loading…
Reference in New Issue