2018-01-03 18:25:13 +08:00
|
|
|
/* include/net/xdp.h
|
|
|
|
*
|
|
|
|
* Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc.
|
|
|
|
* Released under terms in GPL version 2. See COPYING.
|
|
|
|
*/
|
|
|
|
#ifndef __LINUX_NET_XDP_H__
|
|
|
|
#define __LINUX_NET_XDP_H__
|
|
|
|
|
|
|
|
/**
|
|
|
|
* DOC: XDP RX-queue information
|
|
|
|
*
|
|
|
|
* The XDP RX-queue info (xdp_rxq_info) is associated with the driver
|
|
|
|
* level RX-ring queues. It is information that is specific to how
|
|
|
|
* the driver have configured a given RX-ring queue.
|
|
|
|
*
|
|
|
|
* Each xdp_buff frame received in the driver carry a (pointer)
|
|
|
|
* reference to this xdp_rxq_info structure. This provides the XDP
|
|
|
|
* data-path read-access to RX-info for both kernel and bpf-side
|
|
|
|
* (limited subset).
|
|
|
|
*
|
|
|
|
* For now, direct access is only safe while running in NAPI/softirq
|
|
|
|
* context. Contents is read-mostly and must not be updated during
|
|
|
|
* driver NAPI/softirq poll.
|
|
|
|
*
|
|
|
|
* The driver usage API is a register and unregister API.
|
|
|
|
*
|
|
|
|
* The struct is not directly tied to the XDP prog. A new XDP prog
|
|
|
|
* can be attached as long as it doesn't change the underlying
|
|
|
|
* RX-ring. If the RX-ring does change significantly, the NIC driver
|
|
|
|
* naturally need to stop the RX-ring before purging and reallocating
|
|
|
|
* memory. In that process the driver MUST call unregistor (which
|
|
|
|
* also apply for driver shutdown and unload). The register API is
|
|
|
|
* also mandatory during RX-ring setup.
|
|
|
|
*/
|
|
|
|
|
2018-04-17 22:45:26 +08:00
|
|
|
enum xdp_mem_type {
|
|
|
|
MEM_TYPE_PAGE_SHARED = 0, /* Split-page refcnt based model */
|
|
|
|
MEM_TYPE_PAGE_ORDER0, /* Orig XDP full page model */
|
2018-04-17 22:46:22 +08:00
|
|
|
MEM_TYPE_PAGE_POOL,
|
2018-04-17 22:45:26 +08:00
|
|
|
MEM_TYPE_MAX,
|
|
|
|
};
|
|
|
|
|
|
|
|
struct xdp_mem_info {
|
|
|
|
u32 type; /* enum xdp_mem_type, but known size type */
|
xdp: rhashtable with allocator ID to pointer mapping
Use the IDA infrastructure for getting a cyclic increasing ID number,
that is used for keeping track of each registered allocator per
RX-queue xdp_rxq_info. Instead of using the IDR infrastructure, which
uses a radix tree, use a dynamic rhashtable, for creating ID to
pointer lookup table, because this is faster.
The problem that is being solved here is that, the xdp_rxq_info
pointer (stored in xdp_buff) cannot be used directly, as the
guaranteed lifetime is too short. The info is needed on a
(potentially) remote CPU during DMA-TX completion time . In an
xdp_frame the xdp_mem_info is stored, when it got converted from an
xdp_buff, which is sufficient for the simple page refcnt based recycle
schemes.
For more advanced allocators there is a need to store a pointer to the
registered allocator. Thus, there is a need to guard the lifetime or
validity of the allocator pointer, which is done through this
rhashtable ID map to pointer. The removal and validity of of the
allocator and helper struct xdp_mem_allocator is guarded by RCU. The
allocator will be created by the driver, and registered with
xdp_rxq_info_reg_mem_model().
It is up-to debate who is responsible for freeing the allocator
pointer or invoking the allocator destructor function. In any case,
this must happen via RCU freeing.
Use the IDA infrastructure for getting a cyclic increasing ID number,
that is used for keeping track of each registered allocator per
RX-queue xdp_rxq_info.
V4: Per req of Jason Wang
- Use xdp_rxq_info_reg_mem_model() in all drivers implementing
XDP_REDIRECT, even-though it's not strictly necessary when
allocator==NULL for type MEM_TYPE_PAGE_SHARED (given it's zero).
V6: Per req of Alex Duyck
- Introduce rhashtable_lookup() call in later patch
V8: Address sparse should be static warnings (from kbuild test robot)
Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-04-17 22:46:12 +08:00
|
|
|
u32 id;
|
2018-04-17 22:45:26 +08:00
|
|
|
};
|
|
|
|
|
2018-04-17 22:46:22 +08:00
|
|
|
struct page_pool;
|
|
|
|
|
2018-01-03 18:25:13 +08:00
|
|
|
struct xdp_rxq_info {
|
|
|
|
struct net_device *dev;
|
|
|
|
u32 queue_index;
|
|
|
|
u32 reg_state;
|
2018-04-17 22:45:26 +08:00
|
|
|
struct xdp_mem_info mem;
|
2018-01-03 18:25:13 +08:00
|
|
|
} ____cacheline_aligned; /* perf critical, avoid false-sharing */
|
|
|
|
|
2018-04-17 22:45:37 +08:00
|
|
|
struct xdp_buff {
|
|
|
|
void *data;
|
|
|
|
void *data_end;
|
|
|
|
void *data_meta;
|
|
|
|
void *data_hard_start;
|
|
|
|
struct xdp_rxq_info *rxq;
|
|
|
|
};
|
2018-04-17 22:45:26 +08:00
|
|
|
|
2018-04-17 22:45:42 +08:00
|
|
|
struct xdp_frame {
|
|
|
|
void *data;
|
|
|
|
u16 len;
|
|
|
|
u16 headroom;
|
|
|
|
u16 metasize;
|
|
|
|
/* Lifetime of xdp_rxq_info is limited to NAPI/enqueue time,
|
|
|
|
* while mem info is valid on remote CPU.
|
|
|
|
*/
|
|
|
|
struct xdp_mem_info mem;
|
2018-04-17 22:45:57 +08:00
|
|
|
struct net_device *dev_rx; /* used by cpumap */
|
2018-04-17 22:45:42 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
/* Convert xdp_buff to xdp_frame */
|
|
|
|
static inline
|
|
|
|
struct xdp_frame *convert_to_xdp_frame(struct xdp_buff *xdp)
|
|
|
|
{
|
|
|
|
struct xdp_frame *xdp_frame;
|
|
|
|
int metasize;
|
|
|
|
int headroom;
|
|
|
|
|
|
|
|
/* Assure headroom is available for storing info */
|
|
|
|
headroom = xdp->data - xdp->data_hard_start;
|
|
|
|
metasize = xdp->data - xdp->data_meta;
|
|
|
|
metasize = metasize > 0 ? metasize : 0;
|
|
|
|
if (unlikely((headroom - metasize) < sizeof(*xdp_frame)))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
/* Store info in top of packet */
|
|
|
|
xdp_frame = xdp->data_hard_start;
|
|
|
|
|
|
|
|
xdp_frame->data = xdp->data;
|
|
|
|
xdp_frame->len = xdp->data_end - xdp->data;
|
|
|
|
xdp_frame->headroom = headroom - sizeof(*xdp_frame);
|
|
|
|
xdp_frame->metasize = metasize;
|
|
|
|
|
|
|
|
/* rxq only valid until napi_schedule ends, convert to xdp_mem_info */
|
|
|
|
xdp_frame->mem = xdp->rxq->mem;
|
|
|
|
|
|
|
|
return xdp_frame;
|
|
|
|
}
|
|
|
|
|
xdp: transition into using xdp_frame for return API
Changing API xdp_return_frame() to take struct xdp_frame as argument,
seems like a natural choice. But there are some subtle performance
details here that needs extra care, which is a deliberate choice.
When de-referencing xdp_frame on a remote CPU during DMA-TX
completion, result in the cache-line is change to "Shared"
state. Later when the page is reused for RX, then this xdp_frame
cache-line is written, which change the state to "Modified".
This situation already happens (naturally) for, virtio_net, tun and
cpumap as the xdp_frame pointer is the queued object. In tun and
cpumap, the ptr_ring is used for efficiently transferring cache-lines
(with pointers) between CPUs. Thus, the only option is to
de-referencing xdp_frame.
It is only the ixgbe driver that had an optimization, in which it can
avoid doing the de-reference of xdp_frame. The driver already have
TX-ring queue, which (in case of remote DMA-TX completion) have to be
transferred between CPUs anyhow. In this data area, we stored a
struct xdp_mem_info and a data pointer, which allowed us to avoid
de-referencing xdp_frame.
To compensate for this, a prefetchw is used for telling the cache
coherency protocol about our access pattern. My benchmarks show that
this prefetchw is enough to compensate the ixgbe driver.
V7: Adjust for commit d9314c474d4f ("i40e: add support for XDP_REDIRECT")
V8: Adjust for commit bd658dda4237 ("net/mlx5e: Separate dma base address
and offset in dma_sync call")
Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-04-17 22:46:32 +08:00
|
|
|
void xdp_return_frame(struct xdp_frame *xdpf);
|
2018-05-24 22:46:07 +08:00
|
|
|
void xdp_return_frame_rx_napi(struct xdp_frame *xdpf);
|
2018-05-02 19:01:27 +08:00
|
|
|
void xdp_return_buff(struct xdp_buff *xdp);
|
2018-04-17 22:45:26 +08:00
|
|
|
|
2018-01-03 18:25:13 +08:00
|
|
|
int xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq,
|
|
|
|
struct net_device *dev, u32 queue_index);
|
|
|
|
void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq);
|
|
|
|
void xdp_rxq_info_unused(struct xdp_rxq_info *xdp_rxq);
|
2018-01-03 18:25:34 +08:00
|
|
|
bool xdp_rxq_info_is_reg(struct xdp_rxq_info *xdp_rxq);
|
2018-04-17 22:45:26 +08:00
|
|
|
int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq,
|
|
|
|
enum xdp_mem_type type, void *allocator);
|
2018-01-03 18:25:13 +08:00
|
|
|
|
2018-04-17 22:45:37 +08:00
|
|
|
/* Drivers not supporting XDP metadata can use this helper, which
|
|
|
|
* rejects any room expansion for metadata as a result.
|
|
|
|
*/
|
|
|
|
static __always_inline void
|
|
|
|
xdp_set_data_meta_invalid(struct xdp_buff *xdp)
|
|
|
|
{
|
|
|
|
xdp->data_meta = xdp->data + 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static __always_inline bool
|
|
|
|
xdp_data_meta_unsupported(const struct xdp_buff *xdp)
|
|
|
|
{
|
|
|
|
return unlikely(xdp->data_meta > xdp->data);
|
|
|
|
}
|
|
|
|
|
2018-01-03 18:25:13 +08:00
|
|
|
#endif /* __LINUX_NET_XDP_H__ */
|