Merge branch 'net-avoid-the-memory-waste-in-some-ethernet-drivers'
Kevin Hao says: ==================== net: Avoid the memory waste in some Ethernet drivers In the current implementation of napi_alloc_frag(), it doesn't have any align guarantee for the returned buffer address. We would have to use some ugly workarounds to make sure that we can get a align buffer address for some Ethernet drivers. This patch series tries to introduce some helper functions to make sure that an align buffer is returned. Then we can drop the ugly workarounds and avoid the unnecessary memory waste. ==================== Link: https://lore.kernel.org/r/20210204105638.1584-1-haokexin@gmail.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
9c2865e3fa
|
@ -764,12 +764,11 @@ static int dpaa2_eth_build_sg_fd(struct dpaa2_eth_priv *priv,
|
|||
/* Prepare the HW SGT structure */
|
||||
sgt_buf_size = priv->tx_data_offset +
|
||||
sizeof(struct dpaa2_sg_entry) * num_dma_bufs;
|
||||
sgt_buf = napi_alloc_frag(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN);
|
||||
sgt_buf = napi_alloc_frag_align(sgt_buf_size, DPAA2_ETH_TX_BUF_ALIGN);
|
||||
if (unlikely(!sgt_buf)) {
|
||||
err = -ENOMEM;
|
||||
goto sgt_buf_alloc_failed;
|
||||
}
|
||||
sgt_buf = PTR_ALIGN(sgt_buf, DPAA2_ETH_TX_BUF_ALIGN);
|
||||
memset(sgt_buf, 0, sgt_buf_size);
|
||||
|
||||
sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
|
||||
|
|
|
@ -488,11 +488,10 @@ dma_addr_t __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool)
|
|||
dma_addr_t iova;
|
||||
u8 *buf;
|
||||
|
||||
buf = napi_alloc_frag(pool->rbsize + OTX2_ALIGN);
|
||||
buf = napi_alloc_frag_align(pool->rbsize, OTX2_ALIGN);
|
||||
if (unlikely(!buf))
|
||||
return -ENOMEM;
|
||||
|
||||
buf = PTR_ALIGN(buf, OTX2_ALIGN);
|
||||
iova = dma_map_single_attrs(pfvf->dev, buf, pool->rbsize,
|
||||
DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
|
||||
if (unlikely(dma_mapping_error(pfvf->dev, iova))) {
|
||||
|
|
|
@ -583,8 +583,16 @@ extern void free_pages(unsigned long addr, unsigned int order);
|
|||
|
||||
struct page_frag_cache;
|
||||
extern void __page_frag_cache_drain(struct page *page, unsigned int count);
|
||||
extern void *page_frag_alloc(struct page_frag_cache *nc,
|
||||
unsigned int fragsz, gfp_t gfp_mask);
|
||||
extern void *page_frag_alloc_align(struct page_frag_cache *nc,
|
||||
unsigned int fragsz, gfp_t gfp_mask,
|
||||
unsigned int align_mask);
|
||||
|
||||
static inline void *page_frag_alloc(struct page_frag_cache *nc,
|
||||
unsigned int fragsz, gfp_t gfp_mask)
|
||||
{
|
||||
return page_frag_alloc_align(nc, fragsz, gfp_mask, ~0u);
|
||||
}
|
||||
|
||||
extern void page_frag_free(void *addr);
|
||||
|
||||
#define __free_page(page) __free_pages((page), 0)
|
||||
|
|
|
@ -2818,7 +2818,26 @@ void skb_queue_purge(struct sk_buff_head *list);
|
|||
|
||||
unsigned int skb_rbtree_purge(struct rb_root *root);
|
||||
|
||||
void *netdev_alloc_frag(unsigned int fragsz);
|
||||
void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_mask);
|
||||
|
||||
/**
|
||||
* netdev_alloc_frag - allocate a page fragment
|
||||
* @fragsz: fragment size
|
||||
*
|
||||
* Allocates a frag from a page for receive buffer.
|
||||
* Uses GFP_ATOMIC allocations.
|
||||
*/
|
||||
static inline void *netdev_alloc_frag(unsigned int fragsz)
|
||||
{
|
||||
return __netdev_alloc_frag_align(fragsz, ~0u);
|
||||
}
|
||||
|
||||
static inline void *netdev_alloc_frag_align(unsigned int fragsz,
|
||||
unsigned int align)
|
||||
{
|
||||
WARN_ON_ONCE(!is_power_of_2(align));
|
||||
return __netdev_alloc_frag_align(fragsz, -align);
|
||||
}
|
||||
|
||||
struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length,
|
||||
gfp_t gfp_mask);
|
||||
|
@ -2877,7 +2896,20 @@ static inline void skb_free_frag(void *addr)
|
|||
page_frag_free(addr);
|
||||
}
|
||||
|
||||
void *napi_alloc_frag(unsigned int fragsz);
|
||||
void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask);
|
||||
|
||||
static inline void *napi_alloc_frag(unsigned int fragsz)
|
||||
{
|
||||
return __napi_alloc_frag_align(fragsz, ~0u);
|
||||
}
|
||||
|
||||
static inline void *napi_alloc_frag_align(unsigned int fragsz,
|
||||
unsigned int align)
|
||||
{
|
||||
WARN_ON_ONCE(!is_power_of_2(align));
|
||||
return __napi_alloc_frag_align(fragsz, -align);
|
||||
}
|
||||
|
||||
struct sk_buff *__napi_alloc_skb(struct napi_struct *napi,
|
||||
unsigned int length, gfp_t gfp_mask);
|
||||
static inline struct sk_buff *napi_alloc_skb(struct napi_struct *napi,
|
||||
|
|
|
@ -5137,8 +5137,9 @@ void __page_frag_cache_drain(struct page *page, unsigned int count)
|
|||
}
|
||||
EXPORT_SYMBOL(__page_frag_cache_drain);
|
||||
|
||||
void *page_frag_alloc(struct page_frag_cache *nc,
|
||||
unsigned int fragsz, gfp_t gfp_mask)
|
||||
void *page_frag_alloc_align(struct page_frag_cache *nc,
|
||||
unsigned int fragsz, gfp_t gfp_mask,
|
||||
unsigned int align_mask)
|
||||
{
|
||||
unsigned int size = PAGE_SIZE;
|
||||
struct page *page;
|
||||
|
@ -5190,11 +5191,12 @@ refill:
|
|||
}
|
||||
|
||||
nc->pagecnt_bias--;
|
||||
offset &= align_mask;
|
||||
nc->offset = offset;
|
||||
|
||||
return nc->va + offset;
|
||||
}
|
||||
EXPORT_SYMBOL(page_frag_alloc);
|
||||
EXPORT_SYMBOL(page_frag_alloc_align);
|
||||
|
||||
/*
|
||||
* Frees a page fragment allocated out of either a compound or order 0 page.
|
||||
|
|
|
@ -374,29 +374,23 @@ struct napi_alloc_cache {
|
|||
static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache);
|
||||
static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache);
|
||||
|
||||
static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
|
||||
static void *__alloc_frag_align(unsigned int fragsz, gfp_t gfp_mask,
|
||||
unsigned int align_mask)
|
||||
{
|
||||
struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
|
||||
|
||||
return page_frag_alloc(&nc->page, fragsz, gfp_mask);
|
||||
return page_frag_alloc_align(&nc->page, fragsz, gfp_mask, align_mask);
|
||||
}
|
||||
|
||||
void *napi_alloc_frag(unsigned int fragsz)
|
||||
void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask)
|
||||
{
|
||||
fragsz = SKB_DATA_ALIGN(fragsz);
|
||||
|
||||
return __napi_alloc_frag(fragsz, GFP_ATOMIC);
|
||||
return __alloc_frag_align(fragsz, GFP_ATOMIC, align_mask);
|
||||
}
|
||||
EXPORT_SYMBOL(napi_alloc_frag);
|
||||
EXPORT_SYMBOL(__napi_alloc_frag_align);
|
||||
|
||||
/**
|
||||
* netdev_alloc_frag - allocate a page fragment
|
||||
* @fragsz: fragment size
|
||||
*
|
||||
* Allocates a frag from a page for receive buffer.
|
||||
* Uses GFP_ATOMIC allocations.
|
||||
*/
|
||||
void *netdev_alloc_frag(unsigned int fragsz)
|
||||
void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_mask)
|
||||
{
|
||||
struct page_frag_cache *nc;
|
||||
void *data;
|
||||
|
@ -404,15 +398,15 @@ void *netdev_alloc_frag(unsigned int fragsz)
|
|||
fragsz = SKB_DATA_ALIGN(fragsz);
|
||||
if (in_irq() || irqs_disabled()) {
|
||||
nc = this_cpu_ptr(&netdev_alloc_cache);
|
||||
data = page_frag_alloc(nc, fragsz, GFP_ATOMIC);
|
||||
data = page_frag_alloc_align(nc, fragsz, GFP_ATOMIC, align_mask);
|
||||
} else {
|
||||
local_bh_disable();
|
||||
data = __napi_alloc_frag(fragsz, GFP_ATOMIC);
|
||||
data = __alloc_frag_align(fragsz, GFP_ATOMIC, align_mask);
|
||||
local_bh_enable();
|
||||
}
|
||||
return data;
|
||||
}
|
||||
EXPORT_SYMBOL(netdev_alloc_frag);
|
||||
EXPORT_SYMBOL(__netdev_alloc_frag_align);
|
||||
|
||||
/**
|
||||
* __netdev_alloc_skb - allocate an skbuff for rx on a specific device
|
||||
|
|
Loading…
Reference in New Issue