Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf
Daniel Borkmann says: ==================== pull-request: bpf 2020-06-30 The following pull-request contains BPF updates for your *net* tree. We've added 28 non-merge commits during the last 9 day(s) which contain a total of 35 files changed, 486 insertions(+), 232 deletions(-). The main changes are: 1) Fix an incorrect verifier branch elimination for PTR_TO_BTF_ID pointer types, from Yonghong Song. 2) Fix UAPI for sockmap and flow_dissector progs that were ignoring various arguments passed to BPF_PROG_{ATTACH,DETACH}, from Lorenz Bauer & Jakub Sitnicki. 3) Fix broken AF_XDP DMA hacks that are poking into dma-direct and swiotlb internals and integrate it properly into DMA core, from Christoph Hellwig. 4) Fix RCU splat from recent changes to avoid skipping ingress policy when kTLS is enabled, from John Fastabend. 5) Fix BPF ringbuf map to enforce size to be the power of 2 in order for its position masking to work, from Andrii Nakryiko. 6) Fix regression from CAP_BPF work to re-allow CAP_SYS_ADMIN for loading of network programs, from Maciej Żenczykowski. 7) Fix libbpf section name prefix for devmap progs, from Jesper Dangaard Brouer. 8) Fix formatting in UAPI documentation for BPF helpers, from Quentin Monnet. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
e708e2bd55
|
@ -204,6 +204,14 @@ Returns the maximum size of a mapping for the device. The size parameter
|
|||
of the mapping functions like dma_map_single(), dma_map_page() and
|
||||
others should not be larger than the returned value.
|
||||
|
||||
::
|
||||
|
||||
bool
|
||||
dma_need_sync(struct device *dev, dma_addr_t dma_addr);
|
||||
|
||||
Returns %true if dma_sync_single_for_{device,cpu} calls are required to
|
||||
transfer memory ownership. Returns %false if those calls can be skipped.
|
||||
|
||||
::
|
||||
|
||||
unsigned long
|
||||
|
|
|
@ -33,7 +33,7 @@ int netns_bpf_prog_query(const union bpf_attr *attr,
|
|||
union bpf_attr __user *uattr);
|
||||
int netns_bpf_prog_attach(const union bpf_attr *attr,
|
||||
struct bpf_prog *prog);
|
||||
int netns_bpf_prog_detach(const union bpf_attr *attr);
|
||||
int netns_bpf_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype);
|
||||
int netns_bpf_link_create(const union bpf_attr *attr,
|
||||
struct bpf_prog *prog);
|
||||
#else
|
||||
|
@ -49,7 +49,8 @@ static inline int netns_bpf_prog_attach(const union bpf_attr *attr,
|
|||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static inline int netns_bpf_prog_detach(const union bpf_attr *attr)
|
||||
static inline int netns_bpf_prog_detach(const union bpf_attr *attr,
|
||||
enum bpf_prog_type ptype)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
|
|
@ -1543,13 +1543,16 @@ static inline void bpf_map_offload_map_free(struct bpf_map *map)
|
|||
#endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */
|
||||
|
||||
#if defined(CONFIG_BPF_STREAM_PARSER)
|
||||
int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog, u32 which);
|
||||
int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog,
|
||||
struct bpf_prog *old, u32 which);
|
||||
int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog);
|
||||
int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype);
|
||||
void sock_map_unhash(struct sock *sk);
|
||||
void sock_map_close(struct sock *sk, long timeout);
|
||||
#else
|
||||
static inline int sock_map_prog_update(struct bpf_map *map,
|
||||
struct bpf_prog *prog, u32 which)
|
||||
struct bpf_prog *prog,
|
||||
struct bpf_prog *old, u32 which)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
@ -1559,6 +1562,12 @@ static inline int sock_map_get_from_fd(const union bpf_attr *attr,
|
|||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline int sock_map_prog_detach(const union bpf_attr *attr,
|
||||
enum bpf_prog_type ptype)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
#endif /* CONFIG_BPF_STREAM_PARSER */
|
||||
|
||||
#if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL)
|
||||
|
|
|
@ -82,6 +82,11 @@ static inline bool btf_type_is_int(const struct btf_type *t)
|
|||
return BTF_INFO_KIND(t->info) == BTF_KIND_INT;
|
||||
}
|
||||
|
||||
static inline bool btf_type_is_small_int(const struct btf_type *t)
|
||||
{
|
||||
return btf_type_is_int(t) && t->size <= sizeof(u64);
|
||||
}
|
||||
|
||||
static inline bool btf_type_is_enum(const struct btf_type *t)
|
||||
{
|
||||
return BTF_INFO_KIND(t->info) == BTF_KIND_ENUM;
|
||||
|
|
|
@ -87,4 +87,5 @@ int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
|
|||
void *cpu_addr, dma_addr_t dma_addr, size_t size,
|
||||
unsigned long attrs);
|
||||
int dma_direct_supported(struct device *dev, u64 mask);
|
||||
bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr);
|
||||
#endif /* _LINUX_DMA_DIRECT_H */
|
||||
|
|
|
@ -461,6 +461,7 @@ int dma_set_mask(struct device *dev, u64 mask);
|
|||
int dma_set_coherent_mask(struct device *dev, u64 mask);
|
||||
u64 dma_get_required_mask(struct device *dev);
|
||||
size_t dma_max_mapping_size(struct device *dev);
|
||||
bool dma_need_sync(struct device *dev, dma_addr_t dma_addr);
|
||||
unsigned long dma_get_merge_boundary(struct device *dev);
|
||||
#else /* CONFIG_HAS_DMA */
|
||||
static inline dma_addr_t dma_map_page_attrs(struct device *dev,
|
||||
|
@ -571,6 +572,10 @@ static inline size_t dma_max_mapping_size(struct device *dev)
|
|||
{
|
||||
return 0;
|
||||
}
|
||||
static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
static inline unsigned long dma_get_merge_boundary(struct device *dev)
|
||||
{
|
||||
return 0;
|
||||
|
|
|
@ -430,6 +430,19 @@ static inline void psock_set_prog(struct bpf_prog **pprog,
|
|||
bpf_prog_put(prog);
|
||||
}
|
||||
|
||||
static inline int psock_replace_prog(struct bpf_prog **pprog,
|
||||
struct bpf_prog *prog,
|
||||
struct bpf_prog *old)
|
||||
{
|
||||
if (cmpxchg(pprog, old, prog) != old)
|
||||
return -ENOENT;
|
||||
|
||||
if (old)
|
||||
bpf_prog_put(old);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void psock_progs_drop(struct sk_psock_progs *progs)
|
||||
{
|
||||
psock_set_prog(&progs->msg_parser, NULL);
|
||||
|
|
|
@ -372,7 +372,8 @@ flow_dissector_init_keys(struct flow_dissector_key_control *key_control,
|
|||
}
|
||||
|
||||
#ifdef CONFIG_BPF_SYSCALL
|
||||
int flow_dissector_bpf_prog_attach(struct net *net, struct bpf_prog *prog);
|
||||
int flow_dissector_bpf_prog_attach_check(struct net *net,
|
||||
struct bpf_prog *prog);
|
||||
#endif /* CONFIG_BPF_SYSCALL */
|
||||
|
||||
#endif
|
||||
|
|
|
@ -9,10 +9,13 @@
|
|||
#include <linux/bpf-netns.h>
|
||||
|
||||
struct bpf_prog;
|
||||
struct bpf_prog_array;
|
||||
|
||||
struct netns_bpf {
|
||||
struct bpf_prog __rcu *progs[MAX_NETNS_BPF_ATTACH_TYPE];
|
||||
struct bpf_link *links[MAX_NETNS_BPF_ATTACH_TYPE];
|
||||
/* Array of programs to run compiled from progs or links */
|
||||
struct bpf_prog_array __rcu *run_array[MAX_NETNS_BPF_ATTACH_TYPE];
|
||||
struct bpf_prog *progs[MAX_NETNS_BPF_ATTACH_TYPE];
|
||||
struct list_head links[MAX_NETNS_BPF_ATTACH_TYPE];
|
||||
};
|
||||
|
||||
#endif /* __NETNS_BPF_H__ */
|
||||
|
|
|
@ -40,7 +40,7 @@ struct xsk_buff_pool {
|
|||
u32 headroom;
|
||||
u32 chunk_size;
|
||||
u32 frame_len;
|
||||
bool cheap_dma;
|
||||
bool dma_need_sync;
|
||||
bool unaligned;
|
||||
void *addrs;
|
||||
struct device *dev;
|
||||
|
@ -80,7 +80,7 @@ static inline dma_addr_t xp_get_frame_dma(struct xdp_buff_xsk *xskb)
|
|||
void xp_dma_sync_for_cpu_slow(struct xdp_buff_xsk *xskb);
|
||||
static inline void xp_dma_sync_for_cpu(struct xdp_buff_xsk *xskb)
|
||||
{
|
||||
if (xskb->pool->cheap_dma)
|
||||
if (!xskb->pool->dma_need_sync)
|
||||
return;
|
||||
|
||||
xp_dma_sync_for_cpu_slow(xskb);
|
||||
|
@ -91,7 +91,7 @@ void xp_dma_sync_for_device_slow(struct xsk_buff_pool *pool, dma_addr_t dma,
|
|||
static inline void xp_dma_sync_for_device(struct xsk_buff_pool *pool,
|
||||
dma_addr_t dma, size_t size)
|
||||
{
|
||||
if (pool->cheap_dma)
|
||||
if (!pool->dma_need_sync)
|
||||
return;
|
||||
|
||||
xp_dma_sync_for_device_slow(pool, dma, size);
|
||||
|
|
|
@ -3171,13 +3171,12 @@ union bpf_attr {
|
|||
* int bpf_ringbuf_output(void *ringbuf, void *data, u64 size, u64 flags)
|
||||
* Description
|
||||
* Copy *size* bytes from *data* into a ring buffer *ringbuf*.
|
||||
* If BPF_RB_NO_WAKEUP is specified in *flags*, no notification of
|
||||
* new data availability is sent.
|
||||
* IF BPF_RB_FORCE_WAKEUP is specified in *flags*, notification of
|
||||
* new data availability is sent unconditionally.
|
||||
* If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification
|
||||
* of new data availability is sent.
|
||||
* If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification
|
||||
* of new data availability is sent unconditionally.
|
||||
* Return
|
||||
* 0, on success;
|
||||
* < 0, on error.
|
||||
* 0 on success, or a negative error in case of failure.
|
||||
*
|
||||
* void *bpf_ringbuf_reserve(void *ringbuf, u64 size, u64 flags)
|
||||
* Description
|
||||
|
@ -3189,20 +3188,20 @@ union bpf_attr {
|
|||
* void bpf_ringbuf_submit(void *data, u64 flags)
|
||||
* Description
|
||||
* Submit reserved ring buffer sample, pointed to by *data*.
|
||||
* If BPF_RB_NO_WAKEUP is specified in *flags*, no notification of
|
||||
* new data availability is sent.
|
||||
* IF BPF_RB_FORCE_WAKEUP is specified in *flags*, notification of
|
||||
* new data availability is sent unconditionally.
|
||||
* If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification
|
||||
* of new data availability is sent.
|
||||
* If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification
|
||||
* of new data availability is sent unconditionally.
|
||||
* Return
|
||||
* Nothing. Always succeeds.
|
||||
*
|
||||
* void bpf_ringbuf_discard(void *data, u64 flags)
|
||||
* Description
|
||||
* Discard reserved ring buffer sample, pointed to by *data*.
|
||||
* If BPF_RB_NO_WAKEUP is specified in *flags*, no notification of
|
||||
* new data availability is sent.
|
||||
* IF BPF_RB_FORCE_WAKEUP is specified in *flags*, notification of
|
||||
* new data availability is sent unconditionally.
|
||||
* If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification
|
||||
* of new data availability is sent.
|
||||
* If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification
|
||||
* of new data availability is sent unconditionally.
|
||||
* Return
|
||||
* Nothing. Always succeeds.
|
||||
*
|
||||
|
@ -3210,16 +3209,18 @@ union bpf_attr {
|
|||
* Description
|
||||
* Query various characteristics of provided ring buffer. What
|
||||
* exactly is queries is determined by *flags*:
|
||||
* - BPF_RB_AVAIL_DATA - amount of data not yet consumed;
|
||||
* - BPF_RB_RING_SIZE - the size of ring buffer;
|
||||
* - BPF_RB_CONS_POS - consumer position (can wrap around);
|
||||
* - BPF_RB_PROD_POS - producer(s) position (can wrap around);
|
||||
* Data returned is just a momentary snapshots of actual values
|
||||
*
|
||||
* * **BPF_RB_AVAIL_DATA**: Amount of data not yet consumed.
|
||||
* * **BPF_RB_RING_SIZE**: The size of ring buffer.
|
||||
* * **BPF_RB_CONS_POS**: Consumer position (can wrap around).
|
||||
* * **BPF_RB_PROD_POS**: Producer(s) position (can wrap around).
|
||||
*
|
||||
* Data returned is just a momentary snapshot of actual values
|
||||
* and could be inaccurate, so this facility should be used to
|
||||
* power heuristics and for reporting, not to make 100% correct
|
||||
* calculation.
|
||||
* Return
|
||||
* Requested value, or 0, if flags are not recognized.
|
||||
* Requested value, or 0, if *flags* are not recognized.
|
||||
*
|
||||
* int bpf_csum_level(struct sk_buff *skb, u64 level)
|
||||
* Description
|
||||
|
|
|
@ -3746,7 +3746,7 @@ bool btf_ctx_access(int off, int size, enum bpf_access_type type,
|
|||
return false;
|
||||
|
||||
t = btf_type_skip_modifiers(btf, t->type, NULL);
|
||||
if (!btf_type_is_int(t)) {
|
||||
if (!btf_type_is_small_int(t)) {
|
||||
bpf_log(log,
|
||||
"ret type %s not allowed for fmod_ret\n",
|
||||
btf_kind_str[BTF_INFO_KIND(t->info)]);
|
||||
|
@ -3768,7 +3768,7 @@ bool btf_ctx_access(int off, int size, enum bpf_access_type type,
|
|||
/* skip modifiers */
|
||||
while (btf_type_is_modifier(t))
|
||||
t = btf_type_by_id(btf, t->type);
|
||||
if (btf_type_is_int(t) || btf_type_is_enum(t))
|
||||
if (btf_type_is_small_int(t) || btf_type_is_enum(t))
|
||||
/* accessing a scalar */
|
||||
return true;
|
||||
if (!btf_type_is_ptr(t)) {
|
||||
|
|
|
@ -19,18 +19,21 @@ struct bpf_netns_link {
|
|||
* with netns_bpf_mutex held.
|
||||
*/
|
||||
struct net *net;
|
||||
struct list_head node; /* node in list of links attached to net */
|
||||
};
|
||||
|
||||
/* Protects updates to netns_bpf */
|
||||
DEFINE_MUTEX(netns_bpf_mutex);
|
||||
|
||||
/* Must be called with netns_bpf_mutex held. */
|
||||
static void __net_exit bpf_netns_link_auto_detach(struct bpf_link *link)
|
||||
static void netns_bpf_run_array_detach(struct net *net,
|
||||
enum netns_bpf_attach_type type)
|
||||
{
|
||||
struct bpf_netns_link *net_link =
|
||||
container_of(link, struct bpf_netns_link, link);
|
||||
struct bpf_prog_array *run_array;
|
||||
|
||||
net_link->net = NULL;
|
||||
run_array = rcu_replace_pointer(net->bpf.run_array[type], NULL,
|
||||
lockdep_is_held(&netns_bpf_mutex));
|
||||
bpf_prog_array_free(run_array);
|
||||
}
|
||||
|
||||
static void bpf_netns_link_release(struct bpf_link *link)
|
||||
|
@ -40,22 +43,18 @@ static void bpf_netns_link_release(struct bpf_link *link)
|
|||
enum netns_bpf_attach_type type = net_link->netns_type;
|
||||
struct net *net;
|
||||
|
||||
/* Link auto-detached by dying netns. */
|
||||
if (!net_link->net)
|
||||
return;
|
||||
|
||||
mutex_lock(&netns_bpf_mutex);
|
||||
|
||||
/* Recheck after potential sleep. We can race with cleanup_net
|
||||
* here, but if we see a non-NULL struct net pointer pre_exit
|
||||
* has not happened yet and will block on netns_bpf_mutex.
|
||||
/* We can race with cleanup_net, but if we see a non-NULL
|
||||
* struct net pointer, pre_exit has not run yet and wait for
|
||||
* netns_bpf_mutex.
|
||||
*/
|
||||
net = net_link->net;
|
||||
if (!net)
|
||||
goto out_unlock;
|
||||
|
||||
net->bpf.links[type] = NULL;
|
||||
RCU_INIT_POINTER(net->bpf.progs[type], NULL);
|
||||
netns_bpf_run_array_detach(net, type);
|
||||
list_del(&net_link->node);
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&netns_bpf_mutex);
|
||||
|
@ -76,6 +75,7 @@ static int bpf_netns_link_update_prog(struct bpf_link *link,
|
|||
struct bpf_netns_link *net_link =
|
||||
container_of(link, struct bpf_netns_link, link);
|
||||
enum netns_bpf_attach_type type = net_link->netns_type;
|
||||
struct bpf_prog_array *run_array;
|
||||
struct net *net;
|
||||
int ret = 0;
|
||||
|
||||
|
@ -93,8 +93,11 @@ static int bpf_netns_link_update_prog(struct bpf_link *link,
|
|||
goto out_unlock;
|
||||
}
|
||||
|
||||
run_array = rcu_dereference_protected(net->bpf.run_array[type],
|
||||
lockdep_is_held(&netns_bpf_mutex));
|
||||
WRITE_ONCE(run_array->items[0].prog, new_prog);
|
||||
|
||||
old_prog = xchg(&link->prog, new_prog);
|
||||
rcu_assign_pointer(net->bpf.progs[type], new_prog);
|
||||
bpf_prog_put(old_prog);
|
||||
|
||||
out_unlock:
|
||||
|
@ -142,14 +145,38 @@ static const struct bpf_link_ops bpf_netns_link_ops = {
|
|||
.show_fdinfo = bpf_netns_link_show_fdinfo,
|
||||
};
|
||||
|
||||
/* Must be called with netns_bpf_mutex held. */
|
||||
static int __netns_bpf_prog_query(const union bpf_attr *attr,
|
||||
union bpf_attr __user *uattr,
|
||||
struct net *net,
|
||||
enum netns_bpf_attach_type type)
|
||||
{
|
||||
__u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids);
|
||||
struct bpf_prog_array *run_array;
|
||||
u32 prog_cnt = 0, flags = 0;
|
||||
|
||||
run_array = rcu_dereference_protected(net->bpf.run_array[type],
|
||||
lockdep_is_held(&netns_bpf_mutex));
|
||||
if (run_array)
|
||||
prog_cnt = bpf_prog_array_length(run_array);
|
||||
|
||||
if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags)))
|
||||
return -EFAULT;
|
||||
if (copy_to_user(&uattr->query.prog_cnt, &prog_cnt, sizeof(prog_cnt)))
|
||||
return -EFAULT;
|
||||
if (!attr->query.prog_cnt || !prog_ids || !prog_cnt)
|
||||
return 0;
|
||||
|
||||
return bpf_prog_array_copy_to_user(run_array, prog_ids,
|
||||
attr->query.prog_cnt);
|
||||
}
|
||||
|
||||
int netns_bpf_prog_query(const union bpf_attr *attr,
|
||||
union bpf_attr __user *uattr)
|
||||
{
|
||||
__u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids);
|
||||
u32 prog_id, prog_cnt = 0, flags = 0;
|
||||
enum netns_bpf_attach_type type;
|
||||
struct bpf_prog *attached;
|
||||
struct net *net;
|
||||
int ret;
|
||||
|
||||
if (attr->query.query_flags)
|
||||
return -EINVAL;
|
||||
|
@ -162,36 +189,25 @@ int netns_bpf_prog_query(const union bpf_attr *attr,
|
|||
if (IS_ERR(net))
|
||||
return PTR_ERR(net);
|
||||
|
||||
rcu_read_lock();
|
||||
attached = rcu_dereference(net->bpf.progs[type]);
|
||||
if (attached) {
|
||||
prog_cnt = 1;
|
||||
prog_id = attached->aux->id;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
mutex_lock(&netns_bpf_mutex);
|
||||
ret = __netns_bpf_prog_query(attr, uattr, net, type);
|
||||
mutex_unlock(&netns_bpf_mutex);
|
||||
|
||||
put_net(net);
|
||||
|
||||
if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags)))
|
||||
return -EFAULT;
|
||||
if (copy_to_user(&uattr->query.prog_cnt, &prog_cnt, sizeof(prog_cnt)))
|
||||
return -EFAULT;
|
||||
|
||||
if (!attr->query.prog_cnt || !prog_ids || !prog_cnt)
|
||||
return 0;
|
||||
|
||||
if (copy_to_user(prog_ids, &prog_id, sizeof(u32)))
|
||||
return -EFAULT;
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
int netns_bpf_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog)
|
||||
{
|
||||
struct bpf_prog_array *run_array;
|
||||
enum netns_bpf_attach_type type;
|
||||
struct bpf_prog *attached;
|
||||
struct net *net;
|
||||
int ret;
|
||||
|
||||
if (attr->target_fd || attr->attach_flags || attr->replace_bpf_fd)
|
||||
return -EINVAL;
|
||||
|
||||
type = to_netns_bpf_attach_type(attr->attach_type);
|
||||
if (type < 0)
|
||||
return -EINVAL;
|
||||
|
@ -200,19 +216,47 @@ int netns_bpf_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog)
|
|||
mutex_lock(&netns_bpf_mutex);
|
||||
|
||||
/* Attaching prog directly is not compatible with links */
|
||||
if (net->bpf.links[type]) {
|
||||
if (!list_empty(&net->bpf.links[type])) {
|
||||
ret = -EEXIST;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
switch (type) {
|
||||
case NETNS_BPF_FLOW_DISSECTOR:
|
||||
ret = flow_dissector_bpf_prog_attach(net, prog);
|
||||
ret = flow_dissector_bpf_prog_attach_check(net, prog);
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
|
||||
attached = net->bpf.progs[type];
|
||||
if (attached == prog) {
|
||||
/* The same program cannot be attached twice */
|
||||
ret = -EINVAL;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
run_array = rcu_dereference_protected(net->bpf.run_array[type],
|
||||
lockdep_is_held(&netns_bpf_mutex));
|
||||
if (run_array) {
|
||||
WRITE_ONCE(run_array->items[0].prog, prog);
|
||||
} else {
|
||||
run_array = bpf_prog_array_alloc(1, GFP_KERNEL);
|
||||
if (!run_array) {
|
||||
ret = -ENOMEM;
|
||||
goto out_unlock;
|
||||
}
|
||||
run_array->items[0].prog = prog;
|
||||
rcu_assign_pointer(net->bpf.run_array[type], run_array);
|
||||
}
|
||||
|
||||
net->bpf.progs[type] = prog;
|
||||
if (attached)
|
||||
bpf_prog_put(attached);
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&netns_bpf_mutex);
|
||||
|
||||
|
@ -221,63 +265,74 @@ out_unlock:
|
|||
|
||||
/* Must be called with netns_bpf_mutex held. */
|
||||
static int __netns_bpf_prog_detach(struct net *net,
|
||||
enum netns_bpf_attach_type type)
|
||||
enum netns_bpf_attach_type type,
|
||||
struct bpf_prog *old)
|
||||
{
|
||||
struct bpf_prog *attached;
|
||||
|
||||
/* Progs attached via links cannot be detached */
|
||||
if (net->bpf.links[type])
|
||||
if (!list_empty(&net->bpf.links[type]))
|
||||
return -EINVAL;
|
||||
|
||||
attached = rcu_dereference_protected(net->bpf.progs[type],
|
||||
lockdep_is_held(&netns_bpf_mutex));
|
||||
if (!attached)
|
||||
attached = net->bpf.progs[type];
|
||||
if (!attached || attached != old)
|
||||
return -ENOENT;
|
||||
RCU_INIT_POINTER(net->bpf.progs[type], NULL);
|
||||
netns_bpf_run_array_detach(net, type);
|
||||
net->bpf.progs[type] = NULL;
|
||||
bpf_prog_put(attached);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int netns_bpf_prog_detach(const union bpf_attr *attr)
|
||||
int netns_bpf_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype)
|
||||
{
|
||||
enum netns_bpf_attach_type type;
|
||||
struct bpf_prog *prog;
|
||||
int ret;
|
||||
|
||||
if (attr->target_fd)
|
||||
return -EINVAL;
|
||||
|
||||
type = to_netns_bpf_attach_type(attr->attach_type);
|
||||
if (type < 0)
|
||||
return -EINVAL;
|
||||
|
||||
prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
|
||||
if (IS_ERR(prog))
|
||||
return PTR_ERR(prog);
|
||||
|
||||
mutex_lock(&netns_bpf_mutex);
|
||||
ret = __netns_bpf_prog_detach(current->nsproxy->net_ns, type);
|
||||
ret = __netns_bpf_prog_detach(current->nsproxy->net_ns, type, prog);
|
||||
mutex_unlock(&netns_bpf_mutex);
|
||||
|
||||
bpf_prog_put(prog);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int netns_bpf_link_attach(struct net *net, struct bpf_link *link,
|
||||
enum netns_bpf_attach_type type)
|
||||
{
|
||||
struct bpf_prog *prog;
|
||||
struct bpf_netns_link *net_link =
|
||||
container_of(link, struct bpf_netns_link, link);
|
||||
struct bpf_prog_array *run_array;
|
||||
int err;
|
||||
|
||||
mutex_lock(&netns_bpf_mutex);
|
||||
|
||||
/* Allow attaching only one prog or link for now */
|
||||
if (net->bpf.links[type]) {
|
||||
if (!list_empty(&net->bpf.links[type])) {
|
||||
err = -E2BIG;
|
||||
goto out_unlock;
|
||||
}
|
||||
/* Links are not compatible with attaching prog directly */
|
||||
prog = rcu_dereference_protected(net->bpf.progs[type],
|
||||
lockdep_is_held(&netns_bpf_mutex));
|
||||
if (prog) {
|
||||
if (net->bpf.progs[type]) {
|
||||
err = -EEXIST;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
switch (type) {
|
||||
case NETNS_BPF_FLOW_DISSECTOR:
|
||||
err = flow_dissector_bpf_prog_attach(net, link->prog);
|
||||
err = flow_dissector_bpf_prog_attach_check(net, link->prog);
|
||||
break;
|
||||
default:
|
||||
err = -EINVAL;
|
||||
|
@ -286,7 +341,15 @@ static int netns_bpf_link_attach(struct net *net, struct bpf_link *link,
|
|||
if (err)
|
||||
goto out_unlock;
|
||||
|
||||
net->bpf.links[type] = link;
|
||||
run_array = bpf_prog_array_alloc(1, GFP_KERNEL);
|
||||
if (!run_array) {
|
||||
err = -ENOMEM;
|
||||
goto out_unlock;
|
||||
}
|
||||
run_array->items[0].prog = link->prog;
|
||||
rcu_assign_pointer(net->bpf.run_array[type], run_array);
|
||||
|
||||
list_add_tail(&net_link->node, &net->bpf.links[type]);
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&netns_bpf_mutex);
|
||||
|
@ -345,23 +408,34 @@ out_put_net:
|
|||
return err;
|
||||
}
|
||||
|
||||
static int __net_init netns_bpf_pernet_init(struct net *net)
|
||||
{
|
||||
int type;
|
||||
|
||||
for (type = 0; type < MAX_NETNS_BPF_ATTACH_TYPE; type++)
|
||||
INIT_LIST_HEAD(&net->bpf.links[type]);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __net_exit netns_bpf_pernet_pre_exit(struct net *net)
|
||||
{
|
||||
enum netns_bpf_attach_type type;
|
||||
struct bpf_link *link;
|
||||
struct bpf_netns_link *net_link;
|
||||
|
||||
mutex_lock(&netns_bpf_mutex);
|
||||
for (type = 0; type < MAX_NETNS_BPF_ATTACH_TYPE; type++) {
|
||||
link = net->bpf.links[type];
|
||||
if (link)
|
||||
bpf_netns_link_auto_detach(link);
|
||||
else
|
||||
__netns_bpf_prog_detach(net, type);
|
||||
netns_bpf_run_array_detach(net, type);
|
||||
list_for_each_entry(net_link, &net->bpf.links[type], node)
|
||||
net_link->net = NULL; /* auto-detach link */
|
||||
if (net->bpf.progs[type])
|
||||
bpf_prog_put(net->bpf.progs[type]);
|
||||
}
|
||||
mutex_unlock(&netns_bpf_mutex);
|
||||
}
|
||||
|
||||
static struct pernet_operations netns_bpf_pernet_ops __net_initdata = {
|
||||
.init = netns_bpf_pernet_init,
|
||||
.pre_exit = netns_bpf_pernet_pre_exit,
|
||||
};
|
||||
|
||||
|
|
|
@ -132,15 +132,6 @@ static struct bpf_ringbuf *bpf_ringbuf_alloc(size_t data_sz, int numa_node)
|
|||
{
|
||||
struct bpf_ringbuf *rb;
|
||||
|
||||
if (!data_sz || !PAGE_ALIGNED(data_sz))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
/* on 32-bit arch, it's impossible to overflow record's hdr->pgoff */
|
||||
if (data_sz > RINGBUF_MAX_DATA_SZ)
|
||||
return ERR_PTR(-E2BIG);
|
||||
#endif
|
||||
|
||||
rb = bpf_ringbuf_area_alloc(data_sz, numa_node);
|
||||
if (!rb)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
@ -166,9 +157,16 @@ static struct bpf_map *ringbuf_map_alloc(union bpf_attr *attr)
|
|||
return ERR_PTR(-EINVAL);
|
||||
|
||||
if (attr->key_size || attr->value_size ||
|
||||
attr->max_entries == 0 || !PAGE_ALIGNED(attr->max_entries))
|
||||
!is_power_of_2(attr->max_entries) ||
|
||||
!PAGE_ALIGNED(attr->max_entries))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
/* on 32-bit arch, it's impossible to overflow record's hdr->pgoff */
|
||||
if (attr->max_entries > RINGBUF_MAX_DATA_SZ)
|
||||
return ERR_PTR(-E2BIG);
|
||||
#endif
|
||||
|
||||
rb_map = kzalloc(sizeof(*rb_map), GFP_USER);
|
||||
if (!rb_map)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
|
|
@ -2121,7 +2121,7 @@ static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr)
|
|||
!bpf_capable())
|
||||
return -EPERM;
|
||||
|
||||
if (is_net_admin_prog_type(type) && !capable(CAP_NET_ADMIN))
|
||||
if (is_net_admin_prog_type(type) && !capable(CAP_NET_ADMIN) && !capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
if (is_perfmon_prog_type(type) && !perfmon_capable())
|
||||
return -EPERM;
|
||||
|
@ -2893,13 +2893,11 @@ static int bpf_prog_detach(const union bpf_attr *attr)
|
|||
switch (ptype) {
|
||||
case BPF_PROG_TYPE_SK_MSG:
|
||||
case BPF_PROG_TYPE_SK_SKB:
|
||||
return sock_map_get_from_fd(attr, NULL);
|
||||
return sock_map_prog_detach(attr, ptype);
|
||||
case BPF_PROG_TYPE_LIRC_MODE2:
|
||||
return lirc_prog_detach(attr);
|
||||
case BPF_PROG_TYPE_FLOW_DISSECTOR:
|
||||
if (!capable(CAP_NET_ADMIN))
|
||||
return -EPERM;
|
||||
return netns_bpf_prog_detach(attr);
|
||||
return netns_bpf_prog_detach(attr, ptype);
|
||||
case BPF_PROG_TYPE_CGROUP_DEVICE:
|
||||
case BPF_PROG_TYPE_CGROUP_SKB:
|
||||
case BPF_PROG_TYPE_CGROUP_SOCK:
|
||||
|
|
|
@ -399,8 +399,7 @@ static bool reg_type_not_null(enum bpf_reg_type type)
|
|||
return type == PTR_TO_SOCKET ||
|
||||
type == PTR_TO_TCP_SOCK ||
|
||||
type == PTR_TO_MAP_VALUE ||
|
||||
type == PTR_TO_SOCK_COMMON ||
|
||||
type == PTR_TO_BTF_ID;
|
||||
type == PTR_TO_SOCK_COMMON;
|
||||
}
|
||||
|
||||
static bool reg_type_may_be_null(enum bpf_reg_type type)
|
||||
|
@ -9801,7 +9800,7 @@ static int jit_subprogs(struct bpf_verifier_env *env)
|
|||
int i, j, subprog_start, subprog_end = 0, len, subprog;
|
||||
struct bpf_insn *insn;
|
||||
void *old_bpf_func;
|
||||
int err;
|
||||
int err, num_exentries;
|
||||
|
||||
if (env->subprog_cnt <= 1)
|
||||
return 0;
|
||||
|
@ -9876,6 +9875,14 @@ static int jit_subprogs(struct bpf_verifier_env *env)
|
|||
func[i]->aux->nr_linfo = prog->aux->nr_linfo;
|
||||
func[i]->aux->jited_linfo = prog->aux->jited_linfo;
|
||||
func[i]->aux->linfo_idx = env->subprog_info[i].linfo_idx;
|
||||
num_exentries = 0;
|
||||
insn = func[i]->insnsi;
|
||||
for (j = 0; j < func[i]->len; j++, insn++) {
|
||||
if (BPF_CLASS(insn->code) == BPF_LDX &&
|
||||
BPF_MODE(insn->code) == BPF_PROBE_MEM)
|
||||
num_exentries++;
|
||||
}
|
||||
func[i]->aux->num_exentries = num_exentries;
|
||||
func[i] = bpf_int_jit_compile(func[i]);
|
||||
if (!func[i]->jited) {
|
||||
err = -ENOTSUPP;
|
||||
|
|
|
@ -530,3 +530,9 @@ size_t dma_direct_max_mapping_size(struct device *dev)
|
|||
return swiotlb_max_mapping_size(dev);
|
||||
return SIZE_MAX;
|
||||
}
|
||||
|
||||
bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr)
|
||||
{
|
||||
return !dev_is_dma_coherent(dev) ||
|
||||
is_swiotlb_buffer(dma_to_phys(dev, dma_addr));
|
||||
}
|
||||
|
|
|
@ -397,6 +397,16 @@ size_t dma_max_mapping_size(struct device *dev)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(dma_max_mapping_size);
|
||||
|
||||
bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
|
||||
{
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
if (dma_is_direct(ops))
|
||||
return dma_direct_need_sync(dev, dma_addr);
|
||||
return ops->sync_single_for_cpu || ops->sync_single_for_device;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dma_need_sync);
|
||||
|
||||
unsigned long dma_get_merge_boundary(struct device *dev)
|
||||
{
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
|
|
@ -147,6 +147,20 @@ int noinline bpf_fentry_test6(u64 a, void *b, short c, int d, void *e, u64 f)
|
|||
return a + (long)b + c + d + (long)e + f;
|
||||
}
|
||||
|
||||
struct bpf_fentry_test_t {
|
||||
struct bpf_fentry_test_t *a;
|
||||
};
|
||||
|
||||
int noinline bpf_fentry_test7(struct bpf_fentry_test_t *arg)
|
||||
{
|
||||
return (long)arg;
|
||||
}
|
||||
|
||||
int noinline bpf_fentry_test8(struct bpf_fentry_test_t *arg)
|
||||
{
|
||||
return (long)arg->a;
|
||||
}
|
||||
|
||||
int noinline bpf_modify_return_test(int a, int *b)
|
||||
{
|
||||
*b += 1;
|
||||
|
@ -185,6 +199,7 @@ int bpf_prog_test_run_tracing(struct bpf_prog *prog,
|
|||
const union bpf_attr *kattr,
|
||||
union bpf_attr __user *uattr)
|
||||
{
|
||||
struct bpf_fentry_test_t arg = {};
|
||||
u16 side_effect = 0, ret = 0;
|
||||
int b = 2, err = -EFAULT;
|
||||
u32 retval = 0;
|
||||
|
@ -197,7 +212,9 @@ int bpf_prog_test_run_tracing(struct bpf_prog *prog,
|
|||
bpf_fentry_test3(4, 5, 6) != 15 ||
|
||||
bpf_fentry_test4((void *)7, 8, 9, 10) != 34 ||
|
||||
bpf_fentry_test5(11, (void *)12, 13, 14, 15) != 65 ||
|
||||
bpf_fentry_test6(16, (void *)17, 18, 19, (void *)20, 21) != 111)
|
||||
bpf_fentry_test6(16, (void *)17, 18, 19, (void *)20, 21) != 111 ||
|
||||
bpf_fentry_test7((struct bpf_fentry_test_t *)0) != 0 ||
|
||||
bpf_fentry_test8(&arg) != 0)
|
||||
goto out;
|
||||
break;
|
||||
case BPF_MODIFY_RETURN:
|
||||
|
|
|
@ -70,10 +70,10 @@ void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
|
|||
EXPORT_SYMBOL(skb_flow_dissector_init);
|
||||
|
||||
#ifdef CONFIG_BPF_SYSCALL
|
||||
int flow_dissector_bpf_prog_attach(struct net *net, struct bpf_prog *prog)
|
||||
int flow_dissector_bpf_prog_attach_check(struct net *net,
|
||||
struct bpf_prog *prog)
|
||||
{
|
||||
enum netns_bpf_attach_type type = NETNS_BPF_FLOW_DISSECTOR;
|
||||
struct bpf_prog *attached;
|
||||
|
||||
if (net == &init_net) {
|
||||
/* BPF flow dissector in the root namespace overrides
|
||||
|
@ -86,26 +86,17 @@ int flow_dissector_bpf_prog_attach(struct net *net, struct bpf_prog *prog)
|
|||
for_each_net(ns) {
|
||||
if (ns == &init_net)
|
||||
continue;
|
||||
if (rcu_access_pointer(ns->bpf.progs[type]))
|
||||
if (rcu_access_pointer(ns->bpf.run_array[type]))
|
||||
return -EEXIST;
|
||||
}
|
||||
} else {
|
||||
/* Make sure root flow dissector is not attached
|
||||
* when attaching to the non-root namespace.
|
||||
*/
|
||||
if (rcu_access_pointer(init_net.bpf.progs[type]))
|
||||
if (rcu_access_pointer(init_net.bpf.run_array[type]))
|
||||
return -EEXIST;
|
||||
}
|
||||
|
||||
attached = rcu_dereference_protected(net->bpf.progs[type],
|
||||
lockdep_is_held(&netns_bpf_mutex));
|
||||
if (attached == prog)
|
||||
/* The same program cannot be attached twice */
|
||||
return -EINVAL;
|
||||
|
||||
rcu_assign_pointer(net->bpf.progs[type], prog);
|
||||
if (attached)
|
||||
bpf_prog_put(attached);
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_BPF_SYSCALL */
|
||||
|
@ -903,7 +894,6 @@ bool __skb_flow_dissect(const struct net *net,
|
|||
struct flow_dissector_key_addrs *key_addrs;
|
||||
struct flow_dissector_key_tags *key_tags;
|
||||
struct flow_dissector_key_vlan *key_vlan;
|
||||
struct bpf_prog *attached = NULL;
|
||||
enum flow_dissect_ret fdret;
|
||||
enum flow_dissector_key_id dissector_vlan = FLOW_DISSECTOR_KEY_MAX;
|
||||
bool mpls_el = false;
|
||||
|
@ -960,14 +950,14 @@ bool __skb_flow_dissect(const struct net *net,
|
|||
WARN_ON_ONCE(!net);
|
||||
if (net) {
|
||||
enum netns_bpf_attach_type type = NETNS_BPF_FLOW_DISSECTOR;
|
||||
struct bpf_prog_array *run_array;
|
||||
|
||||
rcu_read_lock();
|
||||
attached = rcu_dereference(init_net.bpf.progs[type]);
|
||||
run_array = rcu_dereference(init_net.bpf.run_array[type]);
|
||||
if (!run_array)
|
||||
run_array = rcu_dereference(net->bpf.run_array[type]);
|
||||
|
||||
if (!attached)
|
||||
attached = rcu_dereference(net->bpf.progs[type]);
|
||||
|
||||
if (attached) {
|
||||
if (run_array) {
|
||||
struct bpf_flow_keys flow_keys;
|
||||
struct bpf_flow_dissector ctx = {
|
||||
.flow_keys = &flow_keys,
|
||||
|
@ -975,6 +965,7 @@ bool __skb_flow_dissect(const struct net *net,
|
|||
.data_end = data + hlen,
|
||||
};
|
||||
__be16 n_proto = proto;
|
||||
struct bpf_prog *prog;
|
||||
|
||||
if (skb) {
|
||||
ctx.skb = skb;
|
||||
|
@ -985,7 +976,8 @@ bool __skb_flow_dissect(const struct net *net,
|
|||
n_proto = skb->protocol;
|
||||
}
|
||||
|
||||
ret = bpf_flow_dissect(attached, &ctx, n_proto, nhoff,
|
||||
prog = READ_ONCE(run_array->items[0].prog);
|
||||
ret = bpf_flow_dissect(prog, &ctx, n_proto, nhoff,
|
||||
hlen, flags);
|
||||
__skb_flow_bpf_to_target(&flow_keys, flow_dissector,
|
||||
target_container);
|
||||
|
|
|
@ -683,7 +683,7 @@ static struct sk_psock *sk_psock_from_strp(struct strparser *strp)
|
|||
return container_of(parser, struct sk_psock, parser);
|
||||
}
|
||||
|
||||
static void sk_psock_skb_redirect(struct sk_psock *psock, struct sk_buff *skb)
|
||||
static void sk_psock_skb_redirect(struct sk_buff *skb)
|
||||
{
|
||||
struct sk_psock *psock_other;
|
||||
struct sock *sk_other;
|
||||
|
@ -715,12 +715,11 @@ static void sk_psock_skb_redirect(struct sk_psock *psock, struct sk_buff *skb)
|
|||
}
|
||||
}
|
||||
|
||||
static void sk_psock_tls_verdict_apply(struct sk_psock *psock,
|
||||
struct sk_buff *skb, int verdict)
|
||||
static void sk_psock_tls_verdict_apply(struct sk_buff *skb, int verdict)
|
||||
{
|
||||
switch (verdict) {
|
||||
case __SK_REDIRECT:
|
||||
sk_psock_skb_redirect(psock, skb);
|
||||
sk_psock_skb_redirect(skb);
|
||||
break;
|
||||
case __SK_PASS:
|
||||
case __SK_DROP:
|
||||
|
@ -741,8 +740,8 @@ int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb)
|
|||
ret = sk_psock_bpf_run(psock, prog, skb);
|
||||
ret = sk_psock_map_verd(ret, tcp_skb_bpf_redirect_fetch(skb));
|
||||
}
|
||||
sk_psock_tls_verdict_apply(skb, ret);
|
||||
rcu_read_unlock();
|
||||
sk_psock_tls_verdict_apply(psock, skb, ret);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sk_psock_tls_strp_read);
|
||||
|
@ -770,7 +769,7 @@ static void sk_psock_verdict_apply(struct sk_psock *psock,
|
|||
}
|
||||
goto out_free;
|
||||
case __SK_REDIRECT:
|
||||
sk_psock_skb_redirect(psock, skb);
|
||||
sk_psock_skb_redirect(skb);
|
||||
break;
|
||||
case __SK_DROP:
|
||||
/* fall-through */
|
||||
|
@ -782,11 +781,18 @@ out_free:
|
|||
|
||||
static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb)
|
||||
{
|
||||
struct sk_psock *psock = sk_psock_from_strp(strp);
|
||||
struct sk_psock *psock;
|
||||
struct bpf_prog *prog;
|
||||
int ret = __SK_DROP;
|
||||
struct sock *sk;
|
||||
|
||||
rcu_read_lock();
|
||||
sk = strp->sk;
|
||||
psock = sk_psock(sk);
|
||||
if (unlikely(!psock)) {
|
||||
kfree_skb(skb);
|
||||
goto out;
|
||||
}
|
||||
prog = READ_ONCE(psock->progs.skb_verdict);
|
||||
if (likely(prog)) {
|
||||
skb_orphan(skb);
|
||||
|
@ -794,8 +800,9 @@ static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb)
|
|||
ret = sk_psock_bpf_run(psock, prog, skb);
|
||||
ret = sk_psock_map_verd(ret, tcp_skb_bpf_redirect_fetch(skb));
|
||||
}
|
||||
rcu_read_unlock();
|
||||
sk_psock_verdict_apply(psock, skb, ret);
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static int sk_psock_strp_read_done(struct strparser *strp, int err)
|
||||
|
|
|
@ -70,11 +70,49 @@ int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog)
|
|||
struct fd f;
|
||||
int ret;
|
||||
|
||||
if (attr->attach_flags || attr->replace_bpf_fd)
|
||||
return -EINVAL;
|
||||
|
||||
f = fdget(ufd);
|
||||
map = __bpf_map_get(f);
|
||||
if (IS_ERR(map))
|
||||
return PTR_ERR(map);
|
||||
ret = sock_map_prog_update(map, prog, attr->attach_type);
|
||||
ret = sock_map_prog_update(map, prog, NULL, attr->attach_type);
|
||||
fdput(f);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype)
|
||||
{
|
||||
u32 ufd = attr->target_fd;
|
||||
struct bpf_prog *prog;
|
||||
struct bpf_map *map;
|
||||
struct fd f;
|
||||
int ret;
|
||||
|
||||
if (attr->attach_flags || attr->replace_bpf_fd)
|
||||
return -EINVAL;
|
||||
|
||||
f = fdget(ufd);
|
||||
map = __bpf_map_get(f);
|
||||
if (IS_ERR(map))
|
||||
return PTR_ERR(map);
|
||||
|
||||
prog = bpf_prog_get(attr->attach_bpf_fd);
|
||||
if (IS_ERR(prog)) {
|
||||
ret = PTR_ERR(prog);
|
||||
goto put_map;
|
||||
}
|
||||
|
||||
if (prog->type != ptype) {
|
||||
ret = -EINVAL;
|
||||
goto put_prog;
|
||||
}
|
||||
|
||||
ret = sock_map_prog_update(map, NULL, prog, attr->attach_type);
|
||||
put_prog:
|
||||
bpf_prog_put(prog);
|
||||
put_map:
|
||||
fdput(f);
|
||||
return ret;
|
||||
}
|
||||
|
@ -1203,27 +1241,32 @@ static struct sk_psock_progs *sock_map_progs(struct bpf_map *map)
|
|||
}
|
||||
|
||||
int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog,
|
||||
u32 which)
|
||||
struct bpf_prog *old, u32 which)
|
||||
{
|
||||
struct sk_psock_progs *progs = sock_map_progs(map);
|
||||
struct bpf_prog **pprog;
|
||||
|
||||
if (!progs)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
switch (which) {
|
||||
case BPF_SK_MSG_VERDICT:
|
||||
psock_set_prog(&progs->msg_parser, prog);
|
||||
pprog = &progs->msg_parser;
|
||||
break;
|
||||
case BPF_SK_SKB_STREAM_PARSER:
|
||||
psock_set_prog(&progs->skb_parser, prog);
|
||||
pprog = &progs->skb_parser;
|
||||
break;
|
||||
case BPF_SK_SKB_STREAM_VERDICT:
|
||||
psock_set_prog(&progs->skb_verdict, prog);
|
||||
pprog = &progs->skb_verdict;
|
||||
break;
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
if (old)
|
||||
return psock_replace_prog(pprog, prog, old);
|
||||
|
||||
psock_set_prog(pprog, prog);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -2,9 +2,6 @@
|
|||
|
||||
#include <net/xsk_buff_pool.h>
|
||||
#include <net/xdp_sock.h>
|
||||
#include <linux/dma-direct.h>
|
||||
#include <linux/dma-noncoherent.h>
|
||||
#include <linux/swiotlb.h>
|
||||
|
||||
#include "xsk_queue.h"
|
||||
|
||||
|
@ -55,7 +52,6 @@ struct xsk_buff_pool *xp_create(struct page **pages, u32 nr_pages, u32 chunks,
|
|||
pool->free_heads_cnt = chunks;
|
||||
pool->headroom = headroom;
|
||||
pool->chunk_size = chunk_size;
|
||||
pool->cheap_dma = true;
|
||||
pool->unaligned = unaligned;
|
||||
pool->frame_len = chunk_size - headroom - XDP_PACKET_HEADROOM;
|
||||
INIT_LIST_HEAD(&pool->free_list);
|
||||
|
@ -125,48 +121,6 @@ static void xp_check_dma_contiguity(struct xsk_buff_pool *pool)
|
|||
}
|
||||
}
|
||||
|
||||
static bool __maybe_unused xp_check_swiotlb_dma(struct xsk_buff_pool *pool)
|
||||
{
|
||||
#if defined(CONFIG_SWIOTLB)
|
||||
phys_addr_t paddr;
|
||||
u32 i;
|
||||
|
||||
for (i = 0; i < pool->dma_pages_cnt; i++) {
|
||||
paddr = dma_to_phys(pool->dev, pool->dma_pages[i]);
|
||||
if (is_swiotlb_buffer(paddr))
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool xp_check_cheap_dma(struct xsk_buff_pool *pool)
|
||||
{
|
||||
#if defined(CONFIG_HAS_DMA)
|
||||
const struct dma_map_ops *ops = get_dma_ops(pool->dev);
|
||||
|
||||
if (ops) {
|
||||
return !ops->sync_single_for_cpu &&
|
||||
!ops->sync_single_for_device;
|
||||
}
|
||||
|
||||
if (!dma_is_direct(ops))
|
||||
return false;
|
||||
|
||||
if (!xp_check_swiotlb_dma(pool))
|
||||
return false;
|
||||
|
||||
if (!dev_is_dma_coherent(pool->dev)) {
|
||||
#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
|
||||
defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \
|
||||
defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE)
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
return true;
|
||||
}
|
||||
|
||||
int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,
|
||||
unsigned long attrs, struct page **pages, u32 nr_pages)
|
||||
{
|
||||
|
@ -180,6 +134,7 @@ int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,
|
|||
|
||||
pool->dev = dev;
|
||||
pool->dma_pages_cnt = nr_pages;
|
||||
pool->dma_need_sync = false;
|
||||
|
||||
for (i = 0; i < pool->dma_pages_cnt; i++) {
|
||||
dma = dma_map_page_attrs(dev, pages[i], 0, PAGE_SIZE,
|
||||
|
@ -188,14 +143,13 @@ int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,
|
|||
xp_dma_unmap(pool, attrs);
|
||||
return -ENOMEM;
|
||||
}
|
||||
if (dma_need_sync(dev, dma))
|
||||
pool->dma_need_sync = true;
|
||||
pool->dma_pages[i] = dma;
|
||||
}
|
||||
|
||||
if (pool->unaligned)
|
||||
xp_check_dma_contiguity(pool);
|
||||
|
||||
pool->dev = dev;
|
||||
pool->cheap_dma = xp_check_cheap_dma(pool);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(xp_dma_map);
|
||||
|
@ -280,7 +234,7 @@ struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool)
|
|||
xskb->xdp.data = xskb->xdp.data_hard_start + XDP_PACKET_HEADROOM;
|
||||
xskb->xdp.data_meta = xskb->xdp.data;
|
||||
|
||||
if (!pool->cheap_dma) {
|
||||
if (pool->dma_need_sync) {
|
||||
dma_sync_single_range_for_device(pool->dev, xskb->dma, 0,
|
||||
pool->frame_len,
|
||||
DMA_BIDIRECTIONAL);
|
||||
|
|
|
@ -3171,13 +3171,12 @@ union bpf_attr {
|
|||
* int bpf_ringbuf_output(void *ringbuf, void *data, u64 size, u64 flags)
|
||||
* Description
|
||||
* Copy *size* bytes from *data* into a ring buffer *ringbuf*.
|
||||
* If BPF_RB_NO_WAKEUP is specified in *flags*, no notification of
|
||||
* new data availability is sent.
|
||||
* IF BPF_RB_FORCE_WAKEUP is specified in *flags*, notification of
|
||||
* new data availability is sent unconditionally.
|
||||
* If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification
|
||||
* of new data availability is sent.
|
||||
* If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification
|
||||
* of new data availability is sent unconditionally.
|
||||
* Return
|
||||
* 0, on success;
|
||||
* < 0, on error.
|
||||
* 0 on success, or a negative error in case of failure.
|
||||
*
|
||||
* void *bpf_ringbuf_reserve(void *ringbuf, u64 size, u64 flags)
|
||||
* Description
|
||||
|
@ -3189,20 +3188,20 @@ union bpf_attr {
|
|||
* void bpf_ringbuf_submit(void *data, u64 flags)
|
||||
* Description
|
||||
* Submit reserved ring buffer sample, pointed to by *data*.
|
||||
* If BPF_RB_NO_WAKEUP is specified in *flags*, no notification of
|
||||
* new data availability is sent.
|
||||
* IF BPF_RB_FORCE_WAKEUP is specified in *flags*, notification of
|
||||
* new data availability is sent unconditionally.
|
||||
* If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification
|
||||
* of new data availability is sent.
|
||||
* If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification
|
||||
* of new data availability is sent unconditionally.
|
||||
* Return
|
||||
* Nothing. Always succeeds.
|
||||
*
|
||||
* void bpf_ringbuf_discard(void *data, u64 flags)
|
||||
* Description
|
||||
* Discard reserved ring buffer sample, pointed to by *data*.
|
||||
* If BPF_RB_NO_WAKEUP is specified in *flags*, no notification of
|
||||
* new data availability is sent.
|
||||
* IF BPF_RB_FORCE_WAKEUP is specified in *flags*, notification of
|
||||
* new data availability is sent unconditionally.
|
||||
* If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification
|
||||
* of new data availability is sent.
|
||||
* If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification
|
||||
* of new data availability is sent unconditionally.
|
||||
* Return
|
||||
* Nothing. Always succeeds.
|
||||
*
|
||||
|
@ -3210,16 +3209,18 @@ union bpf_attr {
|
|||
* Description
|
||||
* Query various characteristics of provided ring buffer. What
|
||||
* exactly is queries is determined by *flags*:
|
||||
* - BPF_RB_AVAIL_DATA - amount of data not yet consumed;
|
||||
* - BPF_RB_RING_SIZE - the size of ring buffer;
|
||||
* - BPF_RB_CONS_POS - consumer position (can wrap around);
|
||||
* - BPF_RB_PROD_POS - producer(s) position (can wrap around);
|
||||
* Data returned is just a momentary snapshots of actual values
|
||||
*
|
||||
* * **BPF_RB_AVAIL_DATA**: Amount of data not yet consumed.
|
||||
* * **BPF_RB_RING_SIZE**: The size of ring buffer.
|
||||
* * **BPF_RB_CONS_POS**: Consumer position (can wrap around).
|
||||
* * **BPF_RB_PROD_POS**: Producer(s) position (can wrap around).
|
||||
*
|
||||
* Data returned is just a momentary snapshot of actual values
|
||||
* and could be inaccurate, so this facility should be used to
|
||||
* power heuristics and for reporting, not to make 100% correct
|
||||
* calculation.
|
||||
* Return
|
||||
* Requested value, or 0, if flags are not recognized.
|
||||
* Requested value, or 0, if *flags* are not recognized.
|
||||
*
|
||||
* int bpf_csum_level(struct sk_buff *skb, u64 level)
|
||||
* Description
|
||||
|
|
|
@ -233,6 +233,8 @@ LIBBPF_API int bpf_load_btf(void *btf, __u32 btf_size, char *log_buf,
|
|||
LIBBPF_API int bpf_task_fd_query(int pid, int fd, __u32 flags, char *buf,
|
||||
__u32 *buf_len, __u32 *prog_id, __u32 *fd_type,
|
||||
__u64 *probe_offset, __u64 *probe_addr);
|
||||
|
||||
enum bpf_stats_type; /* defined in up-to-date linux/bpf.h */
|
||||
LIBBPF_API int bpf_enable_stats(enum bpf_stats_type type);
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
|
|
@ -4818,7 +4818,13 @@ bpf_core_reloc_fields(struct bpf_object *obj, const char *targ_btf_path)
|
|||
err = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
prog = bpf_object__find_program_by_title(obj, sec_name);
|
||||
prog = NULL;
|
||||
for (i = 0; i < obj->nr_programs; i++) {
|
||||
if (!strcmp(obj->programs[i].section_name, sec_name)) {
|
||||
prog = &obj->programs[i];
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!prog) {
|
||||
pr_warn("failed to find program '%s' for CO-RE offset relocation\n",
|
||||
sec_name);
|
||||
|
@ -6653,7 +6659,7 @@ static const struct bpf_sec_def section_defs[] = {
|
|||
.expected_attach_type = BPF_TRACE_ITER,
|
||||
.is_attach_btf = true,
|
||||
.attach_fn = attach_iter),
|
||||
BPF_EAPROG_SEC("xdp_devmap", BPF_PROG_TYPE_XDP,
|
||||
BPF_EAPROG_SEC("xdp_devmap/", BPF_PROG_TYPE_XDP,
|
||||
BPF_XDP_DEVMAP),
|
||||
BPF_PROG_SEC("xdp", BPF_PROG_TYPE_XDP),
|
||||
BPF_PROG_SEC("perf_event", BPF_PROG_TYPE_PERF_EVENT),
|
||||
|
|
|
@ -36,7 +36,7 @@ void test_fentry_fexit(void)
|
|||
fentry_res = (__u64 *)fentry_skel->bss;
|
||||
fexit_res = (__u64 *)fexit_skel->bss;
|
||||
printf("%lld\n", fentry_skel->bss->test1_result);
|
||||
for (i = 0; i < 6; i++) {
|
||||
for (i = 0; i < 8; i++) {
|
||||
CHECK(fentry_res[i] != 1, "result",
|
||||
"fentry_test%d failed err %lld\n", i + 1, fentry_res[i]);
|
||||
CHECK(fexit_res[i] != 1, "result",
|
||||
|
|
|
@ -527,8 +527,8 @@ static void test_skb_less_prog_attach(struct bpf_flow *skel, int tap_fd)
|
|||
|
||||
run_tests_skb_less(tap_fd, skel->maps.last_dissection);
|
||||
|
||||
err = bpf_prog_detach(prog_fd, BPF_FLOW_DISSECTOR);
|
||||
CHECK(err, "bpf_prog_detach", "err %d errno %d\n", err, errno);
|
||||
err = bpf_prog_detach2(prog_fd, 0, BPF_FLOW_DISSECTOR);
|
||||
CHECK(err, "bpf_prog_detach2", "err %d errno %d\n", err, errno);
|
||||
}
|
||||
|
||||
static void test_skb_less_link_create(struct bpf_flow *skel, int tap_fd)
|
||||
|
|
|
@ -1,9 +1,6 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Test that the flow_dissector program can be updated with a single
|
||||
* syscall by attaching a new program that replaces the existing one.
|
||||
*
|
||||
* Corner case - the same program cannot be attached twice.
|
||||
* Tests for attaching, detaching, and replacing flow_dissector BPF program.
|
||||
*/
|
||||
|
||||
#define _GNU_SOURCE
|
||||
|
@ -116,7 +113,7 @@ static void test_prog_attach_prog_attach(int netns, int prog1, int prog2)
|
|||
CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog2));
|
||||
|
||||
out_detach:
|
||||
err = bpf_prog_detach(0, BPF_FLOW_DISSECTOR);
|
||||
err = bpf_prog_detach2(prog2, 0, BPF_FLOW_DISSECTOR);
|
||||
if (CHECK_FAIL(err))
|
||||
perror("bpf_prog_detach");
|
||||
CHECK_FAIL(prog_is_attached(netns));
|
||||
|
@ -152,7 +149,7 @@ static void test_prog_attach_link_create(int netns, int prog1, int prog2)
|
|||
DECLARE_LIBBPF_OPTS(bpf_link_create_opts, opts);
|
||||
int err, link;
|
||||
|
||||
err = bpf_prog_attach(prog1, -1, BPF_FLOW_DISSECTOR, 0);
|
||||
err = bpf_prog_attach(prog1, 0, BPF_FLOW_DISSECTOR, 0);
|
||||
if (CHECK_FAIL(err)) {
|
||||
perror("bpf_prog_attach(prog1)");
|
||||
return;
|
||||
|
@ -168,7 +165,7 @@ static void test_prog_attach_link_create(int netns, int prog1, int prog2)
|
|||
close(link);
|
||||
CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1));
|
||||
|
||||
err = bpf_prog_detach(-1, BPF_FLOW_DISSECTOR);
|
||||
err = bpf_prog_detach2(prog1, 0, BPF_FLOW_DISSECTOR);
|
||||
if (CHECK_FAIL(err))
|
||||
perror("bpf_prog_detach");
|
||||
CHECK_FAIL(prog_is_attached(netns));
|
||||
|
@ -188,7 +185,7 @@ static void test_link_create_prog_attach(int netns, int prog1, int prog2)
|
|||
|
||||
/* Expect failure attaching prog when link exists */
|
||||
errno = 0;
|
||||
err = bpf_prog_attach(prog2, -1, BPF_FLOW_DISSECTOR, 0);
|
||||
err = bpf_prog_attach(prog2, 0, BPF_FLOW_DISSECTOR, 0);
|
||||
if (CHECK_FAIL(!err || errno != EEXIST))
|
||||
perror("bpf_prog_attach(prog2) expected EEXIST");
|
||||
CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1));
|
||||
|
@ -211,7 +208,7 @@ static void test_link_create_prog_detach(int netns, int prog1, int prog2)
|
|||
|
||||
/* Expect failure detaching prog when link exists */
|
||||
errno = 0;
|
||||
err = bpf_prog_detach(-1, BPF_FLOW_DISSECTOR);
|
||||
err = bpf_prog_detach2(prog1, 0, BPF_FLOW_DISSECTOR);
|
||||
if (CHECK_FAIL(!err || errno != EINVAL))
|
||||
perror("bpf_prog_detach expected EINVAL");
|
||||
CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1));
|
||||
|
@ -231,7 +228,7 @@ static void test_prog_attach_detach_query(int netns, int prog1, int prog2)
|
|||
}
|
||||
CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1));
|
||||
|
||||
err = bpf_prog_detach(0, BPF_FLOW_DISSECTOR);
|
||||
err = bpf_prog_detach2(prog1, 0, BPF_FLOW_DISSECTOR);
|
||||
if (CHECK_FAIL(err)) {
|
||||
perror("bpf_prog_detach");
|
||||
return;
|
||||
|
@ -308,6 +305,31 @@ static void test_link_update_replace_old_prog(int netns, int prog1, int prog2)
|
|||
CHECK_FAIL(prog_is_attached(netns));
|
||||
}
|
||||
|
||||
static void test_link_update_same_prog(int netns, int prog1, int prog2)
|
||||
{
|
||||
DECLARE_LIBBPF_OPTS(bpf_link_create_opts, create_opts);
|
||||
DECLARE_LIBBPF_OPTS(bpf_link_update_opts, update_opts);
|
||||
int err, link;
|
||||
|
||||
link = bpf_link_create(prog1, netns, BPF_FLOW_DISSECTOR, &create_opts);
|
||||
if (CHECK_FAIL(link < 0)) {
|
||||
perror("bpf_link_create(prog1)");
|
||||
return;
|
||||
}
|
||||
CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1));
|
||||
|
||||
/* Expect success updating the prog with the same one */
|
||||
update_opts.flags = 0;
|
||||
update_opts.old_prog_fd = 0;
|
||||
err = bpf_link_update(link, prog1, &update_opts);
|
||||
if (CHECK_FAIL(err))
|
||||
perror("bpf_link_update");
|
||||
CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1));
|
||||
|
||||
close(link);
|
||||
CHECK_FAIL(prog_is_attached(netns));
|
||||
}
|
||||
|
||||
static void test_link_update_invalid_opts(int netns, int prog1, int prog2)
|
||||
{
|
||||
DECLARE_LIBBPF_OPTS(bpf_link_create_opts, create_opts);
|
||||
|
@ -571,6 +593,8 @@ static void run_tests(int netns)
|
|||
test_link_update_no_old_prog },
|
||||
{ "link update with replace old prog",
|
||||
test_link_update_replace_old_prog },
|
||||
{ "link update with same prog",
|
||||
test_link_update_same_prog },
|
||||
{ "link update invalid opts",
|
||||
test_link_update_invalid_opts },
|
||||
{ "link update invalid prog",
|
||||
|
|
|
@ -25,7 +25,7 @@ struct bpf_iter__netlink {
|
|||
struct netlink_sock *sk;
|
||||
} __attribute__((preserve_access_index));
|
||||
|
||||
static inline struct inode *SOCK_INODE(struct socket *socket)
|
||||
static __attribute__((noinline)) struct inode *SOCK_INODE(struct socket *socket)
|
||||
{
|
||||
return &container_of(socket, struct socket_alloc, socket)->vfs_inode;
|
||||
}
|
||||
|
|
|
@ -55,3 +55,25 @@ int BPF_PROG(test6, __u64 a, void *b, short c, int d, void * e, __u64 f)
|
|||
e == (void *)20 && f == 21;
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct bpf_fentry_test_t {
|
||||
struct bpf_fentry_test_t *a;
|
||||
};
|
||||
|
||||
__u64 test7_result = 0;
|
||||
SEC("fentry/bpf_fentry_test7")
|
||||
int BPF_PROG(test7, struct bpf_fentry_test_t *arg)
|
||||
{
|
||||
if (arg == 0)
|
||||
test7_result = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
__u64 test8_result = 0;
|
||||
SEC("fentry/bpf_fentry_test8")
|
||||
int BPF_PROG(test8, struct bpf_fentry_test_t *arg)
|
||||
{
|
||||
if (arg->a == 0)
|
||||
test8_result = 1;
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -56,3 +56,25 @@ int BPF_PROG(test6, __u64 a, void *b, short c, int d, void *e, __u64 f, int ret)
|
|||
e == (void *)20 && f == 21 && ret == 111;
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct bpf_fentry_test_t {
|
||||
struct bpf_fentry_test *a;
|
||||
};
|
||||
|
||||
__u64 test7_result = 0;
|
||||
SEC("fexit/bpf_fentry_test7")
|
||||
int BPF_PROG(test7, struct bpf_fentry_test_t *arg)
|
||||
{
|
||||
if (arg == 0)
|
||||
test7_result = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
__u64 test8_result = 0;
|
||||
SEC("fexit/bpf_fentry_test8")
|
||||
int BPF_PROG(test8, struct bpf_fentry_test_t *arg)
|
||||
{
|
||||
if (arg->a == 0)
|
||||
test8_result = 1;
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -79,7 +79,7 @@ struct {
|
|||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_ARRAY);
|
||||
__uint(max_entries, 2);
|
||||
__uint(max_entries, 3);
|
||||
__type(key, int);
|
||||
__type(value, int);
|
||||
} sock_skb_opts SEC(".maps");
|
||||
|
@ -94,6 +94,12 @@ struct {
|
|||
SEC("sk_skb1")
|
||||
int bpf_prog1(struct __sk_buff *skb)
|
||||
{
|
||||
int *f, two = 2;
|
||||
|
||||
f = bpf_map_lookup_elem(&sock_skb_opts, &two);
|
||||
if (f && *f) {
|
||||
return *f;
|
||||
}
|
||||
return skb->len;
|
||||
}
|
||||
|
||||
|
|
|
@ -27,7 +27,7 @@ int xdp_dummy_prog(struct xdp_md *ctx)
|
|||
/* valid program on DEVMAP entry via SEC name;
|
||||
* has access to egress and ingress ifindex
|
||||
*/
|
||||
SEC("xdp_devmap")
|
||||
SEC("xdp_devmap/map_prog")
|
||||
int xdp_dummy_dm(struct xdp_md *ctx)
|
||||
{
|
||||
char fmt[] = "devmap redirect: dev %u -> dev %u len %u\n";
|
||||
|
|
|
@ -85,6 +85,7 @@ int txmsg_ktls_skb_drop;
|
|||
int txmsg_ktls_skb_redir;
|
||||
int ktls;
|
||||
int peek_flag;
|
||||
int skb_use_parser;
|
||||
|
||||
static const struct option long_options[] = {
|
||||
{"help", no_argument, NULL, 'h' },
|
||||
|
@ -174,6 +175,7 @@ static void test_reset(void)
|
|||
txmsg_apply = txmsg_cork = 0;
|
||||
txmsg_ingress = txmsg_redir_skb = 0;
|
||||
txmsg_ktls_skb = txmsg_ktls_skb_drop = txmsg_ktls_skb_redir = 0;
|
||||
skb_use_parser = 0;
|
||||
}
|
||||
|
||||
static int test_start_subtest(const struct _test *t, struct sockmap_options *o)
|
||||
|
@ -1211,6 +1213,11 @@ run:
|
|||
}
|
||||
}
|
||||
|
||||
if (skb_use_parser) {
|
||||
i = 2;
|
||||
err = bpf_map_update_elem(map_fd[7], &i, &skb_use_parser, BPF_ANY);
|
||||
}
|
||||
|
||||
if (txmsg_drop)
|
||||
options->drop_expected = true;
|
||||
|
||||
|
@ -1650,6 +1657,16 @@ static void test_txmsg_cork(int cgrp, struct sockmap_options *opt)
|
|||
test_send(opt, cgrp);
|
||||
}
|
||||
|
||||
static void test_txmsg_ingress_parser(int cgrp, struct sockmap_options *opt)
|
||||
{
|
||||
txmsg_pass = 1;
|
||||
skb_use_parser = 512;
|
||||
opt->iov_length = 256;
|
||||
opt->iov_count = 1;
|
||||
opt->rate = 2;
|
||||
test_exec(cgrp, opt);
|
||||
}
|
||||
|
||||
char *map_names[] = {
|
||||
"sock_map",
|
||||
"sock_map_txmsg",
|
||||
|
@ -1748,6 +1765,7 @@ struct _test test[] = {
|
|||
{"txmsg test pull-data", test_txmsg_pull},
|
||||
{"txmsg test pop-data", test_txmsg_pop},
|
||||
{"txmsg test push/pop data", test_txmsg_push_pop},
|
||||
{"txmsg text ingress parser", test_txmsg_ingress_parser},
|
||||
};
|
||||
|
||||
static int check_whitelist(struct _test *t, struct sockmap_options *opt)
|
||||
|
|
Loading…
Reference in New Issue