Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf
Daniel Borkmann says: ==================== pull-request: bpf 2020-05-22 The following pull-request contains BPF updates for your *net* tree. We've added 3 non-merge commits during the last 3 day(s) which contain a total of 5 files changed, 69 insertions(+), 11 deletions(-). The main changes are: 1) Fix to reject mmap()'ing read-only array maps as writable since BPF verifier relies on such map content to be frozen, from Andrii Nakryiko. 2) Fix breaking audit from secid_to_secctx() LSM hook by avoiding to use call_int_hook() since this hook is not stackable, from KP Singh. 3) Fix BPF flow dissector program ref leak on netns cleanup, from Jakub Sitnicki. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
d3b968bc2d
|
@ -623,9 +623,20 @@ static int bpf_map_mmap(struct file *filp, struct vm_area_struct *vma)
|
||||||
|
|
||||||
mutex_lock(&map->freeze_mutex);
|
mutex_lock(&map->freeze_mutex);
|
||||||
|
|
||||||
if ((vma->vm_flags & VM_WRITE) && map->frozen) {
|
if (vma->vm_flags & VM_WRITE) {
|
||||||
err = -EPERM;
|
if (map->frozen) {
|
||||||
goto out;
|
err = -EPERM;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
/* map is meant to be read-only, so do not allow mapping as
|
||||||
|
* writable, because it's possible to leak a writable page
|
||||||
|
* reference and allows user-space to still modify it after
|
||||||
|
* freezing, while verifier will assume contents do not change
|
||||||
|
*/
|
||||||
|
if (map->map_flags & BPF_F_RDONLY_PROG) {
|
||||||
|
err = -EACCES;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* set default open/close callbacks */
|
/* set default open/close callbacks */
|
||||||
|
|
|
@ -160,12 +160,10 @@ out:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
int skb_flow_dissector_bpf_prog_detach(const union bpf_attr *attr)
|
static int flow_dissector_bpf_prog_detach(struct net *net)
|
||||||
{
|
{
|
||||||
struct bpf_prog *attached;
|
struct bpf_prog *attached;
|
||||||
struct net *net;
|
|
||||||
|
|
||||||
net = current->nsproxy->net_ns;
|
|
||||||
mutex_lock(&flow_dissector_mutex);
|
mutex_lock(&flow_dissector_mutex);
|
||||||
attached = rcu_dereference_protected(net->flow_dissector_prog,
|
attached = rcu_dereference_protected(net->flow_dissector_prog,
|
||||||
lockdep_is_held(&flow_dissector_mutex));
|
lockdep_is_held(&flow_dissector_mutex));
|
||||||
|
@ -179,6 +177,24 @@ int skb_flow_dissector_bpf_prog_detach(const union bpf_attr *attr)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int skb_flow_dissector_bpf_prog_detach(const union bpf_attr *attr)
|
||||||
|
{
|
||||||
|
return flow_dissector_bpf_prog_detach(current->nsproxy->net_ns);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void __net_exit flow_dissector_pernet_pre_exit(struct net *net)
|
||||||
|
{
|
||||||
|
/* We're not racing with attach/detach because there are no
|
||||||
|
* references to netns left when pre_exit gets called.
|
||||||
|
*/
|
||||||
|
if (rcu_access_pointer(net->flow_dissector_prog))
|
||||||
|
flow_dissector_bpf_prog_detach(net);
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct pernet_operations flow_dissector_pernet_ops __net_initdata = {
|
||||||
|
.pre_exit = flow_dissector_pernet_pre_exit,
|
||||||
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* __skb_flow_get_ports - extract the upper layer ports and return them
|
* __skb_flow_get_ports - extract the upper layer ports and return them
|
||||||
* @skb: sk_buff to extract the ports from
|
* @skb: sk_buff to extract the ports from
|
||||||
|
@ -1836,7 +1852,7 @@ static int __init init_default_flow_dissectors(void)
|
||||||
skb_flow_dissector_init(&flow_keys_basic_dissector,
|
skb_flow_dissector_init(&flow_keys_basic_dissector,
|
||||||
flow_keys_basic_dissector_keys,
|
flow_keys_basic_dissector_keys,
|
||||||
ARRAY_SIZE(flow_keys_basic_dissector_keys));
|
ARRAY_SIZE(flow_keys_basic_dissector_keys));
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
return register_pernet_subsys(&flow_dissector_pernet_ops);
|
||||||
|
}
|
||||||
core_initcall(init_default_flow_dissectors);
|
core_initcall(init_default_flow_dissectors);
|
||||||
|
|
|
@ -1965,8 +1965,20 @@ EXPORT_SYMBOL(security_ismaclabel);
|
||||||
|
|
||||||
int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen)
|
int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen)
|
||||||
{
|
{
|
||||||
return call_int_hook(secid_to_secctx, -EOPNOTSUPP, secid, secdata,
|
struct security_hook_list *hp;
|
||||||
seclen);
|
int rc;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Currently, only one LSM can implement secid_to_secctx (i.e this
|
||||||
|
* LSM hook is not "stackable").
|
||||||
|
*/
|
||||||
|
hlist_for_each_entry(hp, &security_hook_heads.secid_to_secctx, list) {
|
||||||
|
rc = hp->hook.secid_to_secctx(secid, secdata, seclen);
|
||||||
|
if (rc != LSM_RET_DEFAULT(secid_to_secctx))
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
|
return LSM_RET_DEFAULT(secid_to_secctx);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(security_secid_to_secctx);
|
EXPORT_SYMBOL(security_secid_to_secctx);
|
||||||
|
|
||||||
|
|
|
@ -19,7 +19,7 @@ void test_mmap(void)
|
||||||
const size_t map_sz = roundup_page(sizeof(struct map_data));
|
const size_t map_sz = roundup_page(sizeof(struct map_data));
|
||||||
const int zero = 0, one = 1, two = 2, far = 1500;
|
const int zero = 0, one = 1, two = 2, far = 1500;
|
||||||
const long page_size = sysconf(_SC_PAGE_SIZE);
|
const long page_size = sysconf(_SC_PAGE_SIZE);
|
||||||
int err, duration = 0, i, data_map_fd, data_map_id, tmp_fd;
|
int err, duration = 0, i, data_map_fd, data_map_id, tmp_fd, rdmap_fd;
|
||||||
struct bpf_map *data_map, *bss_map;
|
struct bpf_map *data_map, *bss_map;
|
||||||
void *bss_mmaped = NULL, *map_mmaped = NULL, *tmp1, *tmp2;
|
void *bss_mmaped = NULL, *map_mmaped = NULL, *tmp1, *tmp2;
|
||||||
struct test_mmap__bss *bss_data;
|
struct test_mmap__bss *bss_data;
|
||||||
|
@ -37,6 +37,17 @@ void test_mmap(void)
|
||||||
data_map = skel->maps.data_map;
|
data_map = skel->maps.data_map;
|
||||||
data_map_fd = bpf_map__fd(data_map);
|
data_map_fd = bpf_map__fd(data_map);
|
||||||
|
|
||||||
|
rdmap_fd = bpf_map__fd(skel->maps.rdonly_map);
|
||||||
|
tmp1 = mmap(NULL, 4096, PROT_READ | PROT_WRITE, MAP_SHARED, rdmap_fd, 0);
|
||||||
|
if (CHECK(tmp1 != MAP_FAILED, "rdonly_write_mmap", "unexpected success\n")) {
|
||||||
|
munmap(tmp1, 4096);
|
||||||
|
goto cleanup;
|
||||||
|
}
|
||||||
|
/* now double-check if it's mmap()'able at all */
|
||||||
|
tmp1 = mmap(NULL, 4096, PROT_READ, MAP_SHARED, rdmap_fd, 0);
|
||||||
|
if (CHECK(tmp1 == MAP_FAILED, "rdonly_read_mmap", "failed: %d\n", errno))
|
||||||
|
goto cleanup;
|
||||||
|
|
||||||
/* get map's ID */
|
/* get map's ID */
|
||||||
memset(&map_info, 0, map_info_sz);
|
memset(&map_info, 0, map_info_sz);
|
||||||
err = bpf_obj_get_info_by_fd(data_map_fd, &map_info, &map_info_sz);
|
err = bpf_obj_get_info_by_fd(data_map_fd, &map_info, &map_info_sz);
|
||||||
|
|
|
@ -7,6 +7,14 @@
|
||||||
|
|
||||||
char _license[] SEC("license") = "GPL";
|
char _license[] SEC("license") = "GPL";
|
||||||
|
|
||||||
|
struct {
|
||||||
|
__uint(type, BPF_MAP_TYPE_ARRAY);
|
||||||
|
__uint(max_entries, 4096);
|
||||||
|
__uint(map_flags, BPF_F_MMAPABLE | BPF_F_RDONLY_PROG);
|
||||||
|
__type(key, __u32);
|
||||||
|
__type(value, char);
|
||||||
|
} rdonly_map SEC(".maps");
|
||||||
|
|
||||||
struct {
|
struct {
|
||||||
__uint(type, BPF_MAP_TYPE_ARRAY);
|
__uint(type, BPF_MAP_TYPE_ARRAY);
|
||||||
__uint(max_entries, 512 * 4); /* at least 4 pages of data */
|
__uint(max_entries, 512 * 4); /* at least 4 pages of data */
|
||||||
|
|
Loading…
Reference in New Issue