Merge branch 'netdev-RT'
Sebastian Andrzej Siewior says: ==================== net: dev: PREEMPT_RT fixups. this series removes or replaces preempt_disable() and local_irq_save() sections which are problematic on PREEMPT_RT. Patch 2 makes netif_rx() work from any context after I found suggestions for it in an old thread. Should that work, then the context-specific variants could be removed. v2…v3: - #2 - Export __netif_rx() so it can be used by everyone. - Add a lockdep assert to check for interrupt context. - Update the kernel doc and mention that the skb is posted to backlog NAPI. - Use __netif_rx() also in drivers/net/*.c. - Added Toke''s review tag and kept Eric's desptite the changes made. v1…v2: - #1 and #2 - merge patch 1 und 2 from the series (as per Toke). - updated patch description and corrected the first commit number (as per Eric). - #2 - Provide netif_rx() as in v1 and additionally __netif_rx() without local_bh disable()+enable() for the loopback driver. __netif_rx() is not exported (loopback is built-in only) so it won't be used drivers. If this doesn't work then we can still export/ define a wrapper as Eric suggested. - Added a comment that netif_rx() considered legacy. - #3 - Moved ____napi_schedule() into rps_ipi_queued() and renamed it napi_schedule_rps(). https://lore.kernel.org/all/20220204201259.1095226-1-bigeasy@linutronix.de/ v1: https://lore.kernel.org/all/20220202122848.647635-1-bigeasy@linutronix.de ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
da54d75beb
|
@ -2373,7 +2373,7 @@ static bool amt_membership_query_handler(struct amt_dev *amt,
|
|||
skb->pkt_type = PACKET_MULTICAST;
|
||||
skb->ip_summed = CHECKSUM_NONE;
|
||||
len = skb->len;
|
||||
if (netif_rx(skb) == NET_RX_SUCCESS) {
|
||||
if (__netif_rx(skb) == NET_RX_SUCCESS) {
|
||||
amt_update_gw_status(amt, AMT_STATUS_RECEIVED_QUERY, true);
|
||||
dev_sw_netstats_rx_add(amt->dev, len);
|
||||
} else {
|
||||
|
@ -2470,7 +2470,7 @@ report:
|
|||
skb->pkt_type = PACKET_MULTICAST;
|
||||
skb->ip_summed = CHECKSUM_NONE;
|
||||
len = skb->len;
|
||||
if (netif_rx(skb) == NET_RX_SUCCESS) {
|
||||
if (__netif_rx(skb) == NET_RX_SUCCESS) {
|
||||
amt_update_relay_status(tunnel, AMT_STATUS_RECEIVED_UPDATE,
|
||||
true);
|
||||
dev_sw_netstats_rx_add(amt->dev, len);
|
||||
|
|
|
@ -925,7 +925,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
|
|||
}
|
||||
|
||||
skb->protocol = eth_type_trans(skb, geneve->dev);
|
||||
netif_rx(skb);
|
||||
__netif_rx(skb);
|
||||
dst_release(&rt->dst);
|
||||
return -EMSGSIZE;
|
||||
}
|
||||
|
@ -1021,7 +1021,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
|
|||
}
|
||||
|
||||
skb->protocol = eth_type_trans(skb, geneve->dev);
|
||||
netif_rx(skb);
|
||||
__netif_rx(skb);
|
||||
dst_release(dst);
|
||||
return -EMSGSIZE;
|
||||
}
|
||||
|
|
|
@ -207,7 +207,7 @@ static int gtp_rx(struct pdp_ctx *pctx, struct sk_buff *skb,
|
|||
|
||||
dev_sw_netstats_rx_add(pctx->dev, skb->len);
|
||||
|
||||
netif_rx(skb);
|
||||
__netif_rx(skb);
|
||||
return 0;
|
||||
|
||||
err:
|
||||
|
|
|
@ -78,7 +78,7 @@ static netdev_tx_t loopback_xmit(struct sk_buff *skb,
|
|||
|
||||
skb_orphan(skb);
|
||||
|
||||
/* Before queueing this packet to netif_rx(),
|
||||
/* Before queueing this packet to __netif_rx(),
|
||||
* make sure dst is refcounted.
|
||||
*/
|
||||
skb_dst_force(skb);
|
||||
|
@ -86,7 +86,7 @@ static netdev_tx_t loopback_xmit(struct sk_buff *skb,
|
|||
skb->protocol = eth_type_trans(skb, dev);
|
||||
|
||||
len = skb->len;
|
||||
if (likely(netif_rx(skb) == NET_RX_SUCCESS))
|
||||
if (likely(__netif_rx(skb) == NET_RX_SUCCESS))
|
||||
dev_lstats_add(dev, len);
|
||||
|
||||
return NETDEV_TX_OK;
|
||||
|
|
|
@ -1033,7 +1033,7 @@ static enum rx_handler_result handle_not_macsec(struct sk_buff *skb)
|
|||
else
|
||||
nskb->pkt_type = PACKET_MULTICAST;
|
||||
|
||||
netif_rx(nskb);
|
||||
__netif_rx(nskb);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
@ -1056,7 +1056,7 @@ static enum rx_handler_result handle_not_macsec(struct sk_buff *skb)
|
|||
|
||||
nskb->dev = ndev;
|
||||
|
||||
if (netif_rx(nskb) == NET_RX_SUCCESS) {
|
||||
if (__netif_rx(nskb) == NET_RX_SUCCESS) {
|
||||
u64_stats_update_begin(&secy_stats->syncp);
|
||||
secy_stats->stats.InPktsUntagged++;
|
||||
u64_stats_update_end(&secy_stats->syncp);
|
||||
|
@ -1288,7 +1288,7 @@ nosci:
|
|||
|
||||
macsec_reset_skb(nskb, macsec->secy.netdev);
|
||||
|
||||
ret = netif_rx(nskb);
|
||||
ret = __netif_rx(nskb);
|
||||
if (ret == NET_RX_SUCCESS) {
|
||||
u64_stats_update_begin(&secy_stats->syncp);
|
||||
secy_stats->stats.InPktsUnknownSCI++;
|
||||
|
|
|
@ -410,7 +410,7 @@ static void macvlan_forward_source_one(struct sk_buff *skb,
|
|||
if (ether_addr_equal_64bits(eth_hdr(skb)->h_dest, dev->dev_addr))
|
||||
nskb->pkt_type = PACKET_HOST;
|
||||
|
||||
ret = netif_rx(nskb);
|
||||
ret = __netif_rx(nskb);
|
||||
macvlan_count_rx(vlan, len, ret == NET_RX_SUCCESS, false);
|
||||
}
|
||||
|
||||
|
@ -468,7 +468,7 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
|
|||
/* forward to original port. */
|
||||
vlan = src;
|
||||
ret = macvlan_broadcast_one(skb, vlan, eth, 0) ?:
|
||||
netif_rx(skb);
|
||||
__netif_rx(skb);
|
||||
handle_res = RX_HANDLER_CONSUMED;
|
||||
goto out;
|
||||
}
|
||||
|
|
|
@ -225,7 +225,7 @@ static void mhi_net_dl_callback(struct mhi_device *mhi_dev,
|
|||
u64_stats_inc(&mhi_netdev->stats.rx_packets);
|
||||
u64_stats_add(&mhi_netdev->stats.rx_bytes, skb->len);
|
||||
u64_stats_update_end(&mhi_netdev->stats.rx_syncp);
|
||||
netif_rx(skb);
|
||||
__netif_rx(skb);
|
||||
}
|
||||
|
||||
/* Refill if RX buffers queue becomes low */
|
||||
|
|
|
@ -119,7 +119,7 @@ static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data,
|
|||
skb->protocol = eth_type_trans(skb, ndev);
|
||||
skb->ip_summed = CHECKSUM_NONE;
|
||||
|
||||
if (netif_rx(skb) == NET_RX_DROP) {
|
||||
if (__netif_rx(skb) == NET_RX_DROP) {
|
||||
ndev->stats.rx_errors++;
|
||||
ndev->stats.rx_dropped++;
|
||||
} else {
|
||||
|
|
|
@ -109,7 +109,7 @@ static int rionet_rx_clean(struct net_device *ndev)
|
|||
skb_put(rnet->rx_skb[i], RIO_MAX_MSG_SIZE);
|
||||
rnet->rx_skb[i]->protocol =
|
||||
eth_type_trans(rnet->rx_skb[i], ndev);
|
||||
error = netif_rx(rnet->rx_skb[i]);
|
||||
error = __netif_rx(rnet->rx_skb[i]);
|
||||
|
||||
if (error == NET_RX_DROP) {
|
||||
ndev->stats.rx_dropped++;
|
||||
|
|
|
@ -872,7 +872,7 @@ printk("cm0: IP identification: %02x%02x fragment offset: %02x%02x\n", buffer[3
|
|||
|
||||
/* datagram completed: send to upper level */
|
||||
skb_trim(skb, dlen);
|
||||
netif_rx(skb);
|
||||
__netif_rx(skb);
|
||||
stats->rx_bytes+=dlen;
|
||||
stats->rx_packets++;
|
||||
lp->rx_skb[ns] = NULL;
|
||||
|
|
|
@ -287,7 +287,7 @@ static int veth_forward_skb(struct net_device *dev, struct sk_buff *skb,
|
|||
{
|
||||
return __dev_forward_skb(dev, skb) ?: xdp ?
|
||||
veth_xdp_rx(rq, skb) :
|
||||
netif_rx(skb);
|
||||
__netif_rx(skb);
|
||||
}
|
||||
|
||||
/* return true if the specified skb has chances of GRO aggregation
|
||||
|
|
|
@ -418,7 +418,7 @@ static int vrf_local_xmit(struct sk_buff *skb, struct net_device *dev,
|
|||
|
||||
skb->protocol = eth_type_trans(skb, dev);
|
||||
|
||||
if (likely(netif_rx(skb) == NET_RX_SUCCESS))
|
||||
if (likely(__netif_rx(skb) == NET_RX_SUCCESS))
|
||||
vrf_rx_stats(dev, len);
|
||||
else
|
||||
this_cpu_inc(dev->dstats->rx_drps);
|
||||
|
|
|
@ -2541,7 +2541,7 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
|
|||
tx_stats->tx_bytes += len;
|
||||
u64_stats_update_end(&tx_stats->syncp);
|
||||
|
||||
if (netif_rx(skb) == NET_RX_SUCCESS) {
|
||||
if (__netif_rx(skb) == NET_RX_SUCCESS) {
|
||||
u64_stats_update_begin(&rx_stats->syncp);
|
||||
rx_stats->rx_packets++;
|
||||
rx_stats->rx_bytes += len;
|
||||
|
|
|
@ -3672,8 +3672,18 @@ u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
|
|||
void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog);
|
||||
int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb);
|
||||
int netif_rx(struct sk_buff *skb);
|
||||
int netif_rx_ni(struct sk_buff *skb);
|
||||
int netif_rx_any_context(struct sk_buff *skb);
|
||||
int __netif_rx(struct sk_buff *skb);
|
||||
|
||||
static inline int netif_rx_ni(struct sk_buff *skb)
|
||||
{
|
||||
return netif_rx(skb);
|
||||
}
|
||||
|
||||
static inline int netif_rx_any_context(struct sk_buff *skb)
|
||||
{
|
||||
return netif_rx(skb);
|
||||
}
|
||||
|
||||
int netif_receive_skb(struct sk_buff *skb);
|
||||
int netif_receive_skb_core(struct sk_buff *skb);
|
||||
void netif_receive_skb_list_internal(struct list_head *head);
|
||||
|
|
|
@ -260,13 +260,6 @@ DEFINE_EVENT(net_dev_rx_verbose_template, netif_rx_entry,
|
|||
TP_ARGS(skb)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(net_dev_rx_verbose_template, netif_rx_ni_entry,
|
||||
|
||||
TP_PROTO(const struct sk_buff *skb),
|
||||
|
||||
TP_ARGS(skb)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(net_dev_rx_exit_template,
|
||||
|
||||
TP_PROTO(int ret),
|
||||
|
@ -312,13 +305,6 @@ DEFINE_EVENT(net_dev_rx_exit_template, netif_rx_exit,
|
|||
TP_ARGS(ret)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(net_dev_rx_exit_template, netif_rx_ni_exit,
|
||||
|
||||
TP_PROTO(int ret),
|
||||
|
||||
TP_ARGS(ret)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(net_dev_rx_exit_template, netif_receive_skb_list_exit,
|
||||
|
||||
TP_PROTO(int ret),
|
||||
|
|
148
net/core/dev.c
148
net/core/dev.c
|
@ -216,18 +216,38 @@ static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
|
|||
return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
|
||||
}
|
||||
|
||||
static inline void rps_lock(struct softnet_data *sd)
|
||||
static inline void rps_lock_irqsave(struct softnet_data *sd,
|
||||
unsigned long *flags)
|
||||
{
|
||||
#ifdef CONFIG_RPS
|
||||
spin_lock(&sd->input_pkt_queue.lock);
|
||||
#endif
|
||||
if (IS_ENABLED(CONFIG_RPS))
|
||||
spin_lock_irqsave(&sd->input_pkt_queue.lock, *flags);
|
||||
else if (!IS_ENABLED(CONFIG_PREEMPT_RT))
|
||||
local_irq_save(*flags);
|
||||
}
|
||||
|
||||
static inline void rps_unlock(struct softnet_data *sd)
|
||||
static inline void rps_lock_irq_disable(struct softnet_data *sd)
|
||||
{
|
||||
#ifdef CONFIG_RPS
|
||||
spin_unlock(&sd->input_pkt_queue.lock);
|
||||
#endif
|
||||
if (IS_ENABLED(CONFIG_RPS))
|
||||
spin_lock_irq(&sd->input_pkt_queue.lock);
|
||||
else if (!IS_ENABLED(CONFIG_PREEMPT_RT))
|
||||
local_irq_disable();
|
||||
}
|
||||
|
||||
static inline void rps_unlock_irq_restore(struct softnet_data *sd,
|
||||
unsigned long *flags)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_RPS))
|
||||
spin_unlock_irqrestore(&sd->input_pkt_queue.lock, *flags);
|
||||
else if (!IS_ENABLED(CONFIG_PREEMPT_RT))
|
||||
local_irq_restore(*flags);
|
||||
}
|
||||
|
||||
static inline void rps_unlock_irq_enable(struct softnet_data *sd)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_RPS))
|
||||
spin_unlock_irq(&sd->input_pkt_queue.lock);
|
||||
else if (!IS_ENABLED(CONFIG_PREEMPT_RT))
|
||||
local_irq_enable();
|
||||
}
|
||||
|
||||
static struct netdev_name_node *netdev_name_node_alloc(struct net_device *dev,
|
||||
|
@ -4456,11 +4476,11 @@ static void rps_trigger_softirq(void *data)
|
|||
* If yes, queue it to our IPI list and return 1
|
||||
* If no, return 0
|
||||
*/
|
||||
static int rps_ipi_queued(struct softnet_data *sd)
|
||||
static int napi_schedule_rps(struct softnet_data *sd)
|
||||
{
|
||||
#ifdef CONFIG_RPS
|
||||
struct softnet_data *mysd = this_cpu_ptr(&softnet_data);
|
||||
|
||||
#ifdef CONFIG_RPS
|
||||
if (sd != mysd) {
|
||||
sd->rps_ipi_next = mysd->rps_ipi_list;
|
||||
mysd->rps_ipi_list = sd;
|
||||
|
@ -4469,6 +4489,7 @@ static int rps_ipi_queued(struct softnet_data *sd)
|
|||
return 1;
|
||||
}
|
||||
#endif /* CONFIG_RPS */
|
||||
__napi_schedule_irqoff(&mysd->backlog);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -4525,9 +4546,7 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
|
|||
|
||||
sd = &per_cpu(softnet_data, cpu);
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
rps_lock(sd);
|
||||
rps_lock_irqsave(sd, &flags);
|
||||
if (!netif_running(skb->dev))
|
||||
goto drop;
|
||||
qlen = skb_queue_len(&sd->input_pkt_queue);
|
||||
|
@ -4536,26 +4555,21 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
|
|||
enqueue:
|
||||
__skb_queue_tail(&sd->input_pkt_queue, skb);
|
||||
input_queue_tail_incr_save(sd, qtail);
|
||||
rps_unlock(sd);
|
||||
local_irq_restore(flags);
|
||||
rps_unlock_irq_restore(sd, &flags);
|
||||
return NET_RX_SUCCESS;
|
||||
}
|
||||
|
||||
/* Schedule NAPI for backlog device
|
||||
* We can use non atomic operation since we own the queue lock
|
||||
*/
|
||||
if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
|
||||
if (!rps_ipi_queued(sd))
|
||||
____napi_schedule(sd, &sd->backlog);
|
||||
}
|
||||
if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state))
|
||||
napi_schedule_rps(sd);
|
||||
goto enqueue;
|
||||
}
|
||||
|
||||
drop:
|
||||
sd->dropped++;
|
||||
rps_unlock(sd);
|
||||
|
||||
local_irq_restore(flags);
|
||||
rps_unlock_irq_restore(sd, &flags);
|
||||
|
||||
atomic_long_inc(&skb->dev->rx_dropped);
|
||||
kfree_skb(skb);
|
||||
|
@ -4796,7 +4810,6 @@ static int netif_rx_internal(struct sk_buff *skb)
|
|||
struct rps_dev_flow voidflow, *rflow = &voidflow;
|
||||
int cpu;
|
||||
|
||||
preempt_disable();
|
||||
rcu_read_lock();
|
||||
|
||||
cpu = get_rps_cpu(skb->dev, skb, &rflow);
|
||||
|
@ -4806,78 +4819,67 @@ static int netif_rx_internal(struct sk_buff *skb)
|
|||
ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
|
||||
|
||||
rcu_read_unlock();
|
||||
preempt_enable();
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
unsigned int qtail;
|
||||
|
||||
ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
|
||||
put_cpu();
|
||||
ret = enqueue_to_backlog(skb, smp_processor_id(), &qtail);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* __netif_rx - Slightly optimized version of netif_rx
|
||||
* @skb: buffer to post
|
||||
*
|
||||
* This behaves as netif_rx except that it does not disable bottom halves.
|
||||
* As a result this function may only be invoked from the interrupt context
|
||||
* (either hard or soft interrupt).
|
||||
*/
|
||||
int __netif_rx(struct sk_buff *skb)
|
||||
{
|
||||
int ret;
|
||||
|
||||
lockdep_assert_once(hardirq_count() | softirq_count());
|
||||
|
||||
trace_netif_rx_entry(skb);
|
||||
ret = netif_rx_internal(skb);
|
||||
trace_netif_rx_exit(ret);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(__netif_rx);
|
||||
|
||||
/**
|
||||
* netif_rx - post buffer to the network code
|
||||
* @skb: buffer to post
|
||||
*
|
||||
* This function receives a packet from a device driver and queues it for
|
||||
* the upper (protocol) levels to process. It always succeeds. The buffer
|
||||
* may be dropped during processing for congestion control or by the
|
||||
* protocol layers.
|
||||
* the upper (protocol) levels to process via the backlog NAPI device. It
|
||||
* always succeeds. The buffer may be dropped during processing for
|
||||
* congestion control or by the protocol layers.
|
||||
* The network buffer is passed via the backlog NAPI device. Modern NIC
|
||||
* driver should use NAPI and GRO.
|
||||
* This function can used from any context.
|
||||
*
|
||||
* return values:
|
||||
* NET_RX_SUCCESS (no congestion)
|
||||
* NET_RX_DROP (packet was dropped)
|
||||
*
|
||||
*/
|
||||
|
||||
int netif_rx(struct sk_buff *skb)
|
||||
{
|
||||
int ret;
|
||||
|
||||
local_bh_disable();
|
||||
trace_netif_rx_entry(skb);
|
||||
|
||||
ret = netif_rx_internal(skb);
|
||||
trace_netif_rx_exit(ret);
|
||||
|
||||
local_bh_enable();
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(netif_rx);
|
||||
|
||||
int netif_rx_ni(struct sk_buff *skb)
|
||||
{
|
||||
int err;
|
||||
|
||||
trace_netif_rx_ni_entry(skb);
|
||||
|
||||
preempt_disable();
|
||||
err = netif_rx_internal(skb);
|
||||
if (local_softirq_pending())
|
||||
do_softirq();
|
||||
preempt_enable();
|
||||
trace_netif_rx_ni_exit(err);
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(netif_rx_ni);
|
||||
|
||||
int netif_rx_any_context(struct sk_buff *skb)
|
||||
{
|
||||
/*
|
||||
* If invoked from contexts which do not invoke bottom half
|
||||
* processing either at return from interrupt or when softrqs are
|
||||
* reenabled, use netif_rx_ni() which invokes bottomhalf processing
|
||||
* directly.
|
||||
*/
|
||||
if (in_interrupt())
|
||||
return netif_rx(skb);
|
||||
else
|
||||
return netif_rx_ni(skb);
|
||||
}
|
||||
EXPORT_SYMBOL(netif_rx_any_context);
|
||||
|
||||
static __latent_entropy void net_tx_action(struct softirq_action *h)
|
||||
{
|
||||
struct softnet_data *sd = this_cpu_ptr(&softnet_data);
|
||||
|
@ -5650,8 +5652,7 @@ static void flush_backlog(struct work_struct *work)
|
|||
local_bh_disable();
|
||||
sd = this_cpu_ptr(&softnet_data);
|
||||
|
||||
local_irq_disable();
|
||||
rps_lock(sd);
|
||||
rps_lock_irq_disable(sd);
|
||||
skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
|
||||
if (skb->dev->reg_state == NETREG_UNREGISTERING) {
|
||||
__skb_unlink(skb, &sd->input_pkt_queue);
|
||||
|
@ -5659,8 +5660,7 @@ static void flush_backlog(struct work_struct *work)
|
|||
input_queue_head_incr(sd);
|
||||
}
|
||||
}
|
||||
rps_unlock(sd);
|
||||
local_irq_enable();
|
||||
rps_unlock_irq_enable(sd);
|
||||
|
||||
skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
|
||||
if (skb->dev->reg_state == NETREG_UNREGISTERING) {
|
||||
|
@ -5678,16 +5678,14 @@ static bool flush_required(int cpu)
|
|||
struct softnet_data *sd = &per_cpu(softnet_data, cpu);
|
||||
bool do_flush;
|
||||
|
||||
local_irq_disable();
|
||||
rps_lock(sd);
|
||||
rps_lock_irq_disable(sd);
|
||||
|
||||
/* as insertion into process_queue happens with the rps lock held,
|
||||
* process_queue access may race only with dequeue
|
||||
*/
|
||||
do_flush = !skb_queue_empty(&sd->input_pkt_queue) ||
|
||||
!skb_queue_empty_lockless(&sd->process_queue);
|
||||
rps_unlock(sd);
|
||||
local_irq_enable();
|
||||
rps_unlock_irq_enable(sd);
|
||||
|
||||
return do_flush;
|
||||
#endif
|
||||
|
@ -5802,8 +5800,7 @@ static int process_backlog(struct napi_struct *napi, int quota)
|
|||
|
||||
}
|
||||
|
||||
local_irq_disable();
|
||||
rps_lock(sd);
|
||||
rps_lock_irq_disable(sd);
|
||||
if (skb_queue_empty(&sd->input_pkt_queue)) {
|
||||
/*
|
||||
* Inline a custom version of __napi_complete().
|
||||
|
@ -5819,8 +5816,7 @@ static int process_backlog(struct napi_struct *napi, int quota)
|
|||
skb_queue_splice_tail_init(&sd->input_pkt_queue,
|
||||
&sd->process_queue);
|
||||
}
|
||||
rps_unlock(sd);
|
||||
local_irq_enable();
|
||||
rps_unlock_irq_enable(sd);
|
||||
}
|
||||
|
||||
return work;
|
||||
|
|
Loading…
Reference in New Issue