net: ena: aggregate stats increase into a function

Introduce ena_increase_stat() function to increase statistics by a
certain number.
The function includes the
    - lock aquire (on 32bit machines)
    - stat increase
    - lock release (on 32bit machines)

line sequence that is ubiquitous across the driver.

The function increases a single stat at a time and several stats which
are increased together weren't put into a function to avoid
calling the function several times for each stat which looks bad and
might decrease performance.

Signed-off-by: Shay Agroskin <shayagr@amazon.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Shay Agroskin 2020-12-08 20:02:04 +02:00 committed by Jakub Kicinski
parent 1e5847395e
commit 89dd735e8c
1 changed files with 68 additions and 99 deletions

View File

@ -80,6 +80,15 @@ static void ena_unmap_tx_buff(struct ena_ring *tx_ring,
static int ena_create_io_tx_queues_in_range(struct ena_adapter *adapter, static int ena_create_io_tx_queues_in_range(struct ena_adapter *adapter,
int first_index, int count); int first_index, int count);
/* Increase a stat by cnt while holding syncp seqlock on 32bit machines */
static void ena_increase_stat(u64 *statp, u64 cnt,
struct u64_stats_sync *syncp)
{
u64_stats_update_begin(syncp);
(*statp) += cnt;
u64_stats_update_end(syncp);
}
static void ena_tx_timeout(struct net_device *dev, unsigned int txqueue) static void ena_tx_timeout(struct net_device *dev, unsigned int txqueue)
{ {
struct ena_adapter *adapter = netdev_priv(dev); struct ena_adapter *adapter = netdev_priv(dev);
@ -92,9 +101,7 @@ static void ena_tx_timeout(struct net_device *dev, unsigned int txqueue)
return; return;
adapter->reset_reason = ENA_REGS_RESET_OS_NETDEV_WD; adapter->reset_reason = ENA_REGS_RESET_OS_NETDEV_WD;
u64_stats_update_begin(&adapter->syncp); ena_increase_stat(&adapter->dev_stats.tx_timeout, 1, &adapter->syncp);
adapter->dev_stats.tx_timeout++;
u64_stats_update_end(&adapter->syncp);
netif_err(adapter, tx_err, dev, "Transmit time out\n"); netif_err(adapter, tx_err, dev, "Transmit time out\n");
} }
@ -154,9 +161,8 @@ static int ena_xmit_common(struct net_device *dev,
if (unlikely(rc)) { if (unlikely(rc)) {
netif_err(adapter, tx_queued, dev, netif_err(adapter, tx_queued, dev,
"Failed to prepare tx bufs\n"); "Failed to prepare tx bufs\n");
u64_stats_update_begin(&ring->syncp); ena_increase_stat(&ring->tx_stats.prepare_ctx_err, 1,
ring->tx_stats.prepare_ctx_err++; &ring->syncp);
u64_stats_update_end(&ring->syncp);
if (rc != -ENOMEM) { if (rc != -ENOMEM) {
adapter->reset_reason = adapter->reset_reason =
ENA_REGS_RESET_DRIVER_INVALID_STATE; ENA_REGS_RESET_DRIVER_INVALID_STATE;
@ -264,9 +270,8 @@ static int ena_xdp_tx_map_buff(struct ena_ring *xdp_ring,
return 0; return 0;
error_report_dma_error: error_report_dma_error:
u64_stats_update_begin(&xdp_ring->syncp); ena_increase_stat(&xdp_ring->tx_stats.dma_mapping_err, 1,
xdp_ring->tx_stats.dma_mapping_err++; &xdp_ring->syncp);
u64_stats_update_end(&xdp_ring->syncp);
netif_warn(adapter, tx_queued, adapter->netdev, "Failed to map xdp buff\n"); netif_warn(adapter, tx_queued, adapter->netdev, "Failed to map xdp buff\n");
xdp_return_frame_rx_napi(tx_info->xdpf); xdp_return_frame_rx_napi(tx_info->xdpf);
@ -320,9 +325,7 @@ static int ena_xdp_xmit_buff(struct net_device *dev,
* has a mb * has a mb
*/ */
ena_com_write_sq_doorbell(xdp_ring->ena_com_io_sq); ena_com_write_sq_doorbell(xdp_ring->ena_com_io_sq);
u64_stats_update_begin(&xdp_ring->syncp); ena_increase_stat(&xdp_ring->tx_stats.doorbells, 1, &xdp_ring->syncp);
xdp_ring->tx_stats.doorbells++;
u64_stats_update_end(&xdp_ring->syncp);
return NETDEV_TX_OK; return NETDEV_TX_OK;
@ -369,9 +372,7 @@ static int ena_xdp_execute(struct ena_ring *rx_ring,
xdp_stat = &rx_ring->rx_stats.xdp_invalid; xdp_stat = &rx_ring->rx_stats.xdp_invalid;
} }
u64_stats_update_begin(&rx_ring->syncp); ena_increase_stat(xdp_stat, 1, &rx_ring->syncp);
(*xdp_stat)++;
u64_stats_update_end(&rx_ring->syncp);
out: out:
rcu_read_unlock(); rcu_read_unlock();
@ -924,9 +925,8 @@ static int ena_alloc_rx_page(struct ena_ring *rx_ring,
page = alloc_page(gfp); page = alloc_page(gfp);
if (unlikely(!page)) { if (unlikely(!page)) {
u64_stats_update_begin(&rx_ring->syncp); ena_increase_stat(&rx_ring->rx_stats.page_alloc_fail, 1,
rx_ring->rx_stats.page_alloc_fail++; &rx_ring->syncp);
u64_stats_update_end(&rx_ring->syncp);
return -ENOMEM; return -ENOMEM;
} }
@ -936,9 +936,8 @@ static int ena_alloc_rx_page(struct ena_ring *rx_ring,
dma = dma_map_page(rx_ring->dev, page, 0, ENA_PAGE_SIZE, dma = dma_map_page(rx_ring->dev, page, 0, ENA_PAGE_SIZE,
DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
if (unlikely(dma_mapping_error(rx_ring->dev, dma))) { if (unlikely(dma_mapping_error(rx_ring->dev, dma))) {
u64_stats_update_begin(&rx_ring->syncp); ena_increase_stat(&rx_ring->rx_stats.dma_mapping_err, 1,
rx_ring->rx_stats.dma_mapping_err++; &rx_ring->syncp);
u64_stats_update_end(&rx_ring->syncp);
__free_page(page); __free_page(page);
return -EIO; return -EIO;
@ -1011,9 +1010,8 @@ static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num)
} }
if (unlikely(i < num)) { if (unlikely(i < num)) {
u64_stats_update_begin(&rx_ring->syncp); ena_increase_stat(&rx_ring->rx_stats.refil_partial, 1,
rx_ring->rx_stats.refil_partial++; &rx_ring->syncp);
u64_stats_update_end(&rx_ring->syncp);
netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev, netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev,
"Refilled rx qid %d with only %d buffers (from %d)\n", "Refilled rx qid %d with only %d buffers (from %d)\n",
rx_ring->qid, i, num); rx_ring->qid, i, num);
@ -1189,9 +1187,7 @@ static int handle_invalid_req_id(struct ena_ring *ring, u16 req_id,
"Invalid req_id: %hu\n", "Invalid req_id: %hu\n",
req_id); req_id);
u64_stats_update_begin(&ring->syncp); ena_increase_stat(&ring->tx_stats.bad_req_id, 1, &ring->syncp);
ring->tx_stats.bad_req_id++;
u64_stats_update_end(&ring->syncp);
/* Trigger device reset */ /* Trigger device reset */
ring->adapter->reset_reason = ENA_REGS_RESET_INV_TX_REQ_ID; ring->adapter->reset_reason = ENA_REGS_RESET_INV_TX_REQ_ID;
@ -1302,9 +1298,8 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
if (netif_tx_queue_stopped(txq) && above_thresh && if (netif_tx_queue_stopped(txq) && above_thresh &&
test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags)) { test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags)) {
netif_tx_wake_queue(txq); netif_tx_wake_queue(txq);
u64_stats_update_begin(&tx_ring->syncp); ena_increase_stat(&tx_ring->tx_stats.queue_wakeup, 1,
tx_ring->tx_stats.queue_wakeup++; &tx_ring->syncp);
u64_stats_update_end(&tx_ring->syncp);
} }
__netif_tx_unlock(txq); __netif_tx_unlock(txq);
} }
@ -1323,9 +1318,8 @@ static struct sk_buff *ena_alloc_skb(struct ena_ring *rx_ring, bool frags)
rx_ring->rx_copybreak); rx_ring->rx_copybreak);
if (unlikely(!skb)) { if (unlikely(!skb)) {
u64_stats_update_begin(&rx_ring->syncp); ena_increase_stat(&rx_ring->rx_stats.skb_alloc_fail, 1,
rx_ring->rx_stats.skb_alloc_fail++; &rx_ring->syncp);
u64_stats_update_end(&rx_ring->syncp);
netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev, netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
"Failed to allocate skb. frags: %d\n", frags); "Failed to allocate skb. frags: %d\n", frags);
return NULL; return NULL;
@ -1453,9 +1447,8 @@ static void ena_rx_checksum(struct ena_ring *rx_ring,
(ena_rx_ctx->l3_csum_err))) { (ena_rx_ctx->l3_csum_err))) {
/* ipv4 checksum error */ /* ipv4 checksum error */
skb->ip_summed = CHECKSUM_NONE; skb->ip_summed = CHECKSUM_NONE;
u64_stats_update_begin(&rx_ring->syncp); ena_increase_stat(&rx_ring->rx_stats.bad_csum, 1,
rx_ring->rx_stats.bad_csum++; &rx_ring->syncp);
u64_stats_update_end(&rx_ring->syncp);
netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev, netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
"RX IPv4 header checksum error\n"); "RX IPv4 header checksum error\n");
return; return;
@ -1466,9 +1459,8 @@ static void ena_rx_checksum(struct ena_ring *rx_ring,
(ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP))) { (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP))) {
if (unlikely(ena_rx_ctx->l4_csum_err)) { if (unlikely(ena_rx_ctx->l4_csum_err)) {
/* TCP/UDP checksum error */ /* TCP/UDP checksum error */
u64_stats_update_begin(&rx_ring->syncp); ena_increase_stat(&rx_ring->rx_stats.bad_csum, 1,
rx_ring->rx_stats.bad_csum++; &rx_ring->syncp);
u64_stats_update_end(&rx_ring->syncp);
netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev, netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
"RX L4 checksum error\n"); "RX L4 checksum error\n");
skb->ip_summed = CHECKSUM_NONE; skb->ip_summed = CHECKSUM_NONE;
@ -1477,13 +1469,11 @@ static void ena_rx_checksum(struct ena_ring *rx_ring,
if (likely(ena_rx_ctx->l4_csum_checked)) { if (likely(ena_rx_ctx->l4_csum_checked)) {
skb->ip_summed = CHECKSUM_UNNECESSARY; skb->ip_summed = CHECKSUM_UNNECESSARY;
u64_stats_update_begin(&rx_ring->syncp); ena_increase_stat(&rx_ring->rx_stats.csum_good, 1,
rx_ring->rx_stats.csum_good++; &rx_ring->syncp);
u64_stats_update_end(&rx_ring->syncp);
} else { } else {
u64_stats_update_begin(&rx_ring->syncp); ena_increase_stat(&rx_ring->rx_stats.csum_unchecked, 1,
rx_ring->rx_stats.csum_unchecked++; &rx_ring->syncp);
u64_stats_update_end(&rx_ring->syncp);
skb->ip_summed = CHECKSUM_NONE; skb->ip_summed = CHECKSUM_NONE;
} }
} else { } else {
@ -1675,14 +1665,12 @@ error:
adapter = netdev_priv(rx_ring->netdev); adapter = netdev_priv(rx_ring->netdev);
if (rc == -ENOSPC) { if (rc == -ENOSPC) {
u64_stats_update_begin(&rx_ring->syncp); ena_increase_stat(&rx_ring->rx_stats.bad_desc_num, 1,
rx_ring->rx_stats.bad_desc_num++; &rx_ring->syncp);
u64_stats_update_end(&rx_ring->syncp);
adapter->reset_reason = ENA_REGS_RESET_TOO_MANY_RX_DESCS; adapter->reset_reason = ENA_REGS_RESET_TOO_MANY_RX_DESCS;
} else { } else {
u64_stats_update_begin(&rx_ring->syncp); ena_increase_stat(&rx_ring->rx_stats.bad_req_id, 1,
rx_ring->rx_stats.bad_req_id++; &rx_ring->syncp);
u64_stats_update_end(&rx_ring->syncp);
adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID; adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID;
} }
@ -1743,9 +1731,8 @@ static void ena_unmask_interrupt(struct ena_ring *tx_ring,
tx_ring->smoothed_interval, tx_ring->smoothed_interval,
true); true);
u64_stats_update_begin(&tx_ring->syncp); ena_increase_stat(&tx_ring->tx_stats.unmask_interrupt, 1,
tx_ring->tx_stats.unmask_interrupt++; &tx_ring->syncp);
u64_stats_update_end(&tx_ring->syncp);
/* It is a shared MSI-X. /* It is a shared MSI-X.
* Tx and Rx CQ have pointer to it. * Tx and Rx CQ have pointer to it.
@ -2552,9 +2539,8 @@ static int ena_up(struct ena_adapter *adapter)
if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags)) if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags))
netif_carrier_on(adapter->netdev); netif_carrier_on(adapter->netdev);
u64_stats_update_begin(&adapter->syncp); ena_increase_stat(&adapter->dev_stats.interface_up, 1,
adapter->dev_stats.interface_up++; &adapter->syncp);
u64_stats_update_end(&adapter->syncp);
set_bit(ENA_FLAG_DEV_UP, &adapter->flags); set_bit(ENA_FLAG_DEV_UP, &adapter->flags);
@ -2592,9 +2578,8 @@ static void ena_down(struct ena_adapter *adapter)
clear_bit(ENA_FLAG_DEV_UP, &adapter->flags); clear_bit(ENA_FLAG_DEV_UP, &adapter->flags);
u64_stats_update_begin(&adapter->syncp); ena_increase_stat(&adapter->dev_stats.interface_down, 1,
adapter->dev_stats.interface_down++; &adapter->syncp);
u64_stats_update_end(&adapter->syncp);
netif_carrier_off(adapter->netdev); netif_carrier_off(adapter->netdev);
netif_tx_disable(adapter->netdev); netif_tx_disable(adapter->netdev);
@ -2822,15 +2807,12 @@ static int ena_check_and_linearize_skb(struct ena_ring *tx_ring,
(header_len < tx_ring->tx_max_header_size)) (header_len < tx_ring->tx_max_header_size))
return 0; return 0;
u64_stats_update_begin(&tx_ring->syncp); ena_increase_stat(&tx_ring->tx_stats.linearize, 1, &tx_ring->syncp);
tx_ring->tx_stats.linearize++;
u64_stats_update_end(&tx_ring->syncp);
rc = skb_linearize(skb); rc = skb_linearize(skb);
if (unlikely(rc)) { if (unlikely(rc)) {
u64_stats_update_begin(&tx_ring->syncp); ena_increase_stat(&tx_ring->tx_stats.linearize_failed, 1,
tx_ring->tx_stats.linearize_failed++; &tx_ring->syncp);
u64_stats_update_end(&tx_ring->syncp);
} }
return rc; return rc;
@ -2870,9 +2852,8 @@ static int ena_tx_map_skb(struct ena_ring *tx_ring,
tx_ring->push_buf_intermediate_buf); tx_ring->push_buf_intermediate_buf);
*header_len = push_len; *header_len = push_len;
if (unlikely(skb->data != *push_hdr)) { if (unlikely(skb->data != *push_hdr)) {
u64_stats_update_begin(&tx_ring->syncp); ena_increase_stat(&tx_ring->tx_stats.llq_buffer_copy, 1,
tx_ring->tx_stats.llq_buffer_copy++; &tx_ring->syncp);
u64_stats_update_end(&tx_ring->syncp);
delta = push_len - skb_head_len; delta = push_len - skb_head_len;
} }
@ -2929,9 +2910,8 @@ static int ena_tx_map_skb(struct ena_ring *tx_ring,
return 0; return 0;
error_report_dma_error: error_report_dma_error:
u64_stats_update_begin(&tx_ring->syncp); ena_increase_stat(&tx_ring->tx_stats.dma_mapping_err, 1,
tx_ring->tx_stats.dma_mapping_err++; &tx_ring->syncp);
u64_stats_update_end(&tx_ring->syncp);
netif_warn(adapter, tx_queued, adapter->netdev, "Failed to map skb\n"); netif_warn(adapter, tx_queued, adapter->netdev, "Failed to map skb\n");
tx_info->skb = NULL; tx_info->skb = NULL;
@ -3008,9 +2988,8 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
__func__, qid); __func__, qid);
netif_tx_stop_queue(txq); netif_tx_stop_queue(txq);
u64_stats_update_begin(&tx_ring->syncp); ena_increase_stat(&tx_ring->tx_stats.queue_stop, 1,
tx_ring->tx_stats.queue_stop++; &tx_ring->syncp);
u64_stats_update_end(&tx_ring->syncp);
/* There is a rare condition where this function decide to /* There is a rare condition where this function decide to
* stop the queue but meanwhile clean_tx_irq updates * stop the queue but meanwhile clean_tx_irq updates
@ -3025,9 +3004,8 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, if (ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
ENA_TX_WAKEUP_THRESH)) { ENA_TX_WAKEUP_THRESH)) {
netif_tx_wake_queue(txq); netif_tx_wake_queue(txq);
u64_stats_update_begin(&tx_ring->syncp); ena_increase_stat(&tx_ring->tx_stats.queue_wakeup, 1,
tx_ring->tx_stats.queue_wakeup++; &tx_ring->syncp);
u64_stats_update_end(&tx_ring->syncp);
} }
} }
@ -3036,9 +3014,8 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
* has a mb * has a mb
*/ */
ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq); ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
u64_stats_update_begin(&tx_ring->syncp); ena_increase_stat(&tx_ring->tx_stats.doorbells, 1,
tx_ring->tx_stats.doorbells++; &tx_ring->syncp);
u64_stats_update_end(&tx_ring->syncp);
} }
return NETDEV_TX_OK; return NETDEV_TX_OK;
@ -3673,9 +3650,8 @@ static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter,
rc = -EIO; rc = -EIO;
} }
u64_stats_update_begin(&tx_ring->syncp); ena_increase_stat(&tx_ring->tx_stats.missed_tx, missed_tx,
tx_ring->tx_stats.missed_tx += missed_tx; &tx_ring->syncp);
u64_stats_update_end(&tx_ring->syncp);
return rc; return rc;
} }
@ -3758,9 +3734,8 @@ static void check_for_empty_rx_ring(struct ena_adapter *adapter)
rx_ring->empty_rx_queue++; rx_ring->empty_rx_queue++;
if (rx_ring->empty_rx_queue >= EMPTY_RX_REFILL) { if (rx_ring->empty_rx_queue >= EMPTY_RX_REFILL) {
u64_stats_update_begin(&rx_ring->syncp); ena_increase_stat(&rx_ring->rx_stats.empty_rx_ring, 1,
rx_ring->rx_stats.empty_rx_ring++; &rx_ring->syncp);
u64_stats_update_end(&rx_ring->syncp);
netif_err(adapter, drv, adapter->netdev, netif_err(adapter, drv, adapter->netdev,
"Trigger refill for ring %d\n", i); "Trigger refill for ring %d\n", i);
@ -3790,9 +3765,8 @@ static void check_for_missing_keep_alive(struct ena_adapter *adapter)
if (unlikely(time_is_before_jiffies(keep_alive_expired))) { if (unlikely(time_is_before_jiffies(keep_alive_expired))) {
netif_err(adapter, drv, adapter->netdev, netif_err(adapter, drv, adapter->netdev,
"Keep alive watchdog timeout.\n"); "Keep alive watchdog timeout.\n");
u64_stats_update_begin(&adapter->syncp); ena_increase_stat(&adapter->dev_stats.wd_expired, 1,
adapter->dev_stats.wd_expired++; &adapter->syncp);
u64_stats_update_end(&adapter->syncp);
adapter->reset_reason = ENA_REGS_RESET_KEEP_ALIVE_TO; adapter->reset_reason = ENA_REGS_RESET_KEEP_ALIVE_TO;
set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
} }
@ -3803,9 +3777,8 @@ static void check_for_admin_com_state(struct ena_adapter *adapter)
if (unlikely(!ena_com_get_admin_running_state(adapter->ena_dev))) { if (unlikely(!ena_com_get_admin_running_state(adapter->ena_dev))) {
netif_err(adapter, drv, adapter->netdev, netif_err(adapter, drv, adapter->netdev,
"ENA admin queue is not in running state!\n"); "ENA admin queue is not in running state!\n");
u64_stats_update_begin(&adapter->syncp); ena_increase_stat(&adapter->dev_stats.admin_q_pause, 1,
adapter->dev_stats.admin_q_pause++; &adapter->syncp);
u64_stats_update_end(&adapter->syncp);
adapter->reset_reason = ENA_REGS_RESET_ADMIN_TO; adapter->reset_reason = ENA_REGS_RESET_ADMIN_TO;
set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
} }
@ -4441,9 +4414,7 @@ static int __maybe_unused ena_suspend(struct device *dev_d)
struct pci_dev *pdev = to_pci_dev(dev_d); struct pci_dev *pdev = to_pci_dev(dev_d);
struct ena_adapter *adapter = pci_get_drvdata(pdev); struct ena_adapter *adapter = pci_get_drvdata(pdev);
u64_stats_update_begin(&adapter->syncp); ena_increase_stat(&adapter->dev_stats.suspend, 1, &adapter->syncp);
adapter->dev_stats.suspend++;
u64_stats_update_end(&adapter->syncp);
rtnl_lock(); rtnl_lock();
if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) { if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
@ -4464,9 +4435,7 @@ static int __maybe_unused ena_resume(struct device *dev_d)
struct ena_adapter *adapter = dev_get_drvdata(dev_d); struct ena_adapter *adapter = dev_get_drvdata(dev_d);
int rc; int rc;
u64_stats_update_begin(&adapter->syncp); ena_increase_stat(&adapter->dev_stats.resume, 1, &adapter->syncp);
adapter->dev_stats.resume++;
u64_stats_update_end(&adapter->syncp);
rtnl_lock(); rtnl_lock();
rc = ena_restore_device(adapter); rc = ena_restore_device(adapter);