bpf: fix narrower loads on s390
The very first check in test_pkt_md_access is failing on s390, which
happens because loading a part of a struct __sk_buff field produces
an incorrect result.
The preprocessed code of the check is:
{
__u8 tmp = *((volatile __u8 *)&skb->len +
((sizeof(skb->len) - sizeof(__u8)) / sizeof(__u8)));
if (tmp != ((*(volatile __u32 *)&skb->len) & 0xFF)) return 2;
};
clang generates the following code for it:
0: 71 21 00 03 00 00 00 00 r2 = *(u8 *)(r1 + 3)
1: 61 31 00 00 00 00 00 00 r3 = *(u32 *)(r1 + 0)
2: 57 30 00 00 00 00 00 ff r3 &= 255
3: 5d 23 00 1d 00 00 00 00 if r2 != r3 goto +29 <LBB0_10>
Finally, verifier transforms it to:
0: (61) r2 = *(u32 *)(r1 +104)
1: (bc) w2 = w2
2: (74) w2 >>= 24
3: (bc) w2 = w2
4: (54) w2 &= 255
5: (bc) w2 = w2
The problem is that when verifier emits the code to replace a partial
load of a struct __sk_buff field (*(u8 *)(r1 + 3)) with a full load of
struct sk_buff field (*(u32 *)(r1 + 104)), an optional shift and a
bitwise AND, it assumes that the machine is little endian and
incorrectly decides to use a shift.
Adjust shift count calculation to account for endianness.
Fixes: 31fd85816d
("bpf: permits narrower load from bpf program context fields")
Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
parent
c8eee4135a
commit
d9b8aadaff
|
@ -24,6 +24,7 @@
|
||||||
|
|
||||||
#include <net/sch_generic.h>
|
#include <net/sch_generic.h>
|
||||||
|
|
||||||
|
#include <asm/byteorder.h>
|
||||||
#include <uapi/linux/filter.h>
|
#include <uapi/linux/filter.h>
|
||||||
#include <uapi/linux/bpf.h>
|
#include <uapi/linux/bpf.h>
|
||||||
|
|
||||||
|
@ -747,6 +748,18 @@ bpf_ctx_narrow_access_ok(u32 off, u32 size, u32 size_default)
|
||||||
return size <= size_default && (size & (size - 1)) == 0;
|
return size <= size_default && (size & (size - 1)) == 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline u8
|
||||||
|
bpf_ctx_narrow_load_shift(u32 off, u32 size, u32 size_default)
|
||||||
|
{
|
||||||
|
u8 load_off = off & (size_default - 1);
|
||||||
|
|
||||||
|
#ifdef __LITTLE_ENDIAN
|
||||||
|
return load_off * 8;
|
||||||
|
#else
|
||||||
|
return (size_default - (load_off + size)) * 8;
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
#define bpf_ctx_wide_access_ok(off, size, type, field) \
|
#define bpf_ctx_wide_access_ok(off, size, type, field) \
|
||||||
(size == sizeof(__u64) && \
|
(size == sizeof(__u64) && \
|
||||||
off >= offsetof(type, field) && \
|
off >= offsetof(type, field) && \
|
||||||
|
|
|
@ -8616,8 +8616,8 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (is_narrower_load && size < target_size) {
|
if (is_narrower_load && size < target_size) {
|
||||||
u8 shift = (off & (size_default - 1)) * 8;
|
u8 shift = bpf_ctx_narrow_load_shift(off, size,
|
||||||
|
size_default);
|
||||||
if (ctx_field_size <= 4) {
|
if (ctx_field_size <= 4) {
|
||||||
if (shift)
|
if (shift)
|
||||||
insn_buf[cnt++] = BPF_ALU32_IMM(BPF_RSH,
|
insn_buf[cnt++] = BPF_ALU32_IMM(BPF_RSH,
|
||||||
|
|
Loading…
Reference in New Issue