samples/bpf: extend test_cgrp2_attach2 test to use per-cpu cgroup storage
This commit extends the test_cgrp2_attach2 test to cover per-cpu cgroup storage. Bpf program will use shared and per-cpu cgroup storages simultaneously, so a better coverage of corresponding core code will be achieved. Expected output: $ ./test_cgrp2_attach2 Attached DROP prog. This ping in cgroup /foo should fail... ping: sendmsg: Operation not permitted Attached DROP prog. This ping in cgroup /foo/bar should fail... ping: sendmsg: Operation not permitted Attached PASS prog. This ping in cgroup /foo/bar should pass... Detached PASS from /foo/bar while DROP is attached to /foo. This ping in cgroup /foo/bar should fail... ping: sendmsg: Operation not permitted Attached PASS from /foo/bar and detached DROP from /foo. This ping in cgroup /foo/bar should pass... ### override:PASS ### multi:PASS Signed-off-by: Roman Gushchin <guro@fb.com> Acked-by: Song Liu <songliubraving@fb.com> Cc: Daniel Borkmann <daniel@iogearbox.net> Cc: Alexei Starovoitov <ast@kernel.org> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
This commit is contained in:
parent
919646d2a3
commit
5fcbd29b37
|
@ -209,7 +209,7 @@ static int map_fd = -1;
|
||||||
|
|
||||||
static int prog_load_cnt(int verdict, int val)
|
static int prog_load_cnt(int verdict, int val)
|
||||||
{
|
{
|
||||||
int cgroup_storage_fd;
|
int cgroup_storage_fd, percpu_cgroup_storage_fd;
|
||||||
|
|
||||||
if (map_fd < 0)
|
if (map_fd < 0)
|
||||||
map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, 4, 8, 1, 0);
|
map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, 4, 8, 1, 0);
|
||||||
|
@ -225,6 +225,14 @@ static int prog_load_cnt(int verdict, int val)
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
percpu_cgroup_storage_fd = bpf_create_map(
|
||||||
|
BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE,
|
||||||
|
sizeof(struct bpf_cgroup_storage_key), 8, 0, 0);
|
||||||
|
if (percpu_cgroup_storage_fd < 0) {
|
||||||
|
printf("failed to create map '%s'\n", strerror(errno));
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
struct bpf_insn prog[] = {
|
struct bpf_insn prog[] = {
|
||||||
BPF_MOV32_IMM(BPF_REG_0, 0),
|
BPF_MOV32_IMM(BPF_REG_0, 0),
|
||||||
BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -4), /* *(u32 *)(fp - 4) = r0 */
|
BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -4), /* *(u32 *)(fp - 4) = r0 */
|
||||||
|
@ -235,11 +243,20 @@ static int prog_load_cnt(int verdict, int val)
|
||||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
|
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
|
||||||
BPF_MOV64_IMM(BPF_REG_1, val), /* r1 = 1 */
|
BPF_MOV64_IMM(BPF_REG_1, val), /* r1 = 1 */
|
||||||
BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_0, BPF_REG_1, 0, 0), /* xadd r0 += r1 */
|
BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_0, BPF_REG_1, 0, 0), /* xadd r0 += r1 */
|
||||||
|
|
||||||
BPF_LD_MAP_FD(BPF_REG_1, cgroup_storage_fd),
|
BPF_LD_MAP_FD(BPF_REG_1, cgroup_storage_fd),
|
||||||
BPF_MOV64_IMM(BPF_REG_2, 0),
|
BPF_MOV64_IMM(BPF_REG_2, 0),
|
||||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
|
||||||
BPF_MOV64_IMM(BPF_REG_1, val),
|
BPF_MOV64_IMM(BPF_REG_1, val),
|
||||||
BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_W, BPF_REG_0, BPF_REG_1, 0, 0),
|
BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_W, BPF_REG_0, BPF_REG_1, 0, 0),
|
||||||
|
|
||||||
|
BPF_LD_MAP_FD(BPF_REG_1, percpu_cgroup_storage_fd),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_2, 0),
|
||||||
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
|
||||||
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
|
||||||
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 0x1),
|
||||||
|
BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0),
|
||||||
|
|
||||||
BPF_MOV64_IMM(BPF_REG_0, verdict), /* r0 = verdict */
|
BPF_MOV64_IMM(BPF_REG_0, verdict), /* r0 = verdict */
|
||||||
BPF_EXIT_INSN(),
|
BPF_EXIT_INSN(),
|
||||||
};
|
};
|
||||||
|
|
Loading…
Reference in New Issue