net: core: another layer of lists, around PF_MEMALLOC skb handling
First example of a layer splitting the list (rather than merely taking individual packets off it). Involves new list.h function, list_cut_before(), like list_cut_position() but cuts on the other side of the given entry. Signed-off-by: Edward Cree <ecree@solarflare.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
7da517a3bc
commit
4ce0017a37
|
@ -285,6 +285,36 @@ static inline void list_cut_position(struct list_head *list,
|
|||
__list_cut_position(list, head, entry);
|
||||
}
|
||||
|
||||
/**
|
||||
* list_cut_before - cut a list into two, before given entry
|
||||
* @list: a new list to add all removed entries
|
||||
* @head: a list with entries
|
||||
* @entry: an entry within head, could be the head itself
|
||||
*
|
||||
* This helper moves the initial part of @head, up to but
|
||||
* excluding @entry, from @head to @list. You should pass
|
||||
* in @entry an element you know is on @head. @list should
|
||||
* be an empty list or a list you do not care about losing
|
||||
* its data.
|
||||
* If @entry == @head, all entries on @head are moved to
|
||||
* @list.
|
||||
*/
|
||||
static inline void list_cut_before(struct list_head *list,
|
||||
struct list_head *head,
|
||||
struct list_head *entry)
|
||||
{
|
||||
if (head->next == entry) {
|
||||
INIT_LIST_HEAD(list);
|
||||
return;
|
||||
}
|
||||
list->next = head->next;
|
||||
list->next->prev = list;
|
||||
list->prev = entry->prev;
|
||||
list->prev->next = list;
|
||||
head->next = entry;
|
||||
entry->prev = head;
|
||||
}
|
||||
|
||||
static inline void __list_splice(const struct list_head *list,
|
||||
struct list_head *prev,
|
||||
struct list_head *next)
|
||||
|
|
|
@ -4784,6 +4784,14 @@ int netif_receive_skb_core(struct sk_buff *skb)
|
|||
}
|
||||
EXPORT_SYMBOL(netif_receive_skb_core);
|
||||
|
||||
static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemalloc)
|
||||
{
|
||||
struct sk_buff *skb, *next;
|
||||
|
||||
list_for_each_entry_safe(skb, next, head, list)
|
||||
__netif_receive_skb_core(skb, pfmemalloc);
|
||||
}
|
||||
|
||||
static int __netif_receive_skb(struct sk_buff *skb)
|
||||
{
|
||||
int ret;
|
||||
|
@ -4809,6 +4817,34 @@ static int __netif_receive_skb(struct sk_buff *skb)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void __netif_receive_skb_list(struct list_head *head)
|
||||
{
|
||||
unsigned long noreclaim_flag = 0;
|
||||
struct sk_buff *skb, *next;
|
||||
bool pfmemalloc = false; /* Is current sublist PF_MEMALLOC? */
|
||||
|
||||
list_for_each_entry_safe(skb, next, head, list) {
|
||||
if ((sk_memalloc_socks() && skb_pfmemalloc(skb)) != pfmemalloc) {
|
||||
struct list_head sublist;
|
||||
|
||||
/* Handle the previous sublist */
|
||||
list_cut_before(&sublist, head, &skb->list);
|
||||
__netif_receive_skb_list_core(&sublist, pfmemalloc);
|
||||
pfmemalloc = !pfmemalloc;
|
||||
/* See comments in __netif_receive_skb */
|
||||
if (pfmemalloc)
|
||||
noreclaim_flag = memalloc_noreclaim_save();
|
||||
else
|
||||
memalloc_noreclaim_restore(noreclaim_flag);
|
||||
}
|
||||
}
|
||||
/* Handle the remaining sublist */
|
||||
__netif_receive_skb_list_core(head, pfmemalloc);
|
||||
/* Restore pflags */
|
||||
if (pfmemalloc)
|
||||
memalloc_noreclaim_restore(noreclaim_flag);
|
||||
}
|
||||
|
||||
static int generic_xdp_install(struct net_device *dev, struct netdev_bpf *xdp)
|
||||
{
|
||||
struct bpf_prog *old = rtnl_dereference(dev->xdp_prog);
|
||||
|
@ -4843,14 +4879,6 @@ static int generic_xdp_install(struct net_device *dev, struct netdev_bpf *xdp)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void __netif_receive_skb_list(struct list_head *head)
|
||||
{
|
||||
struct sk_buff *skb, *next;
|
||||
|
||||
list_for_each_entry_safe(skb, next, head, list)
|
||||
__netif_receive_skb(skb);
|
||||
}
|
||||
|
||||
static int netif_receive_skb_internal(struct sk_buff *skb)
|
||||
{
|
||||
int ret;
|
||||
|
|
Loading…
Reference in New Issue