Merge branch 'ena-next'

Sameeh Jubran says:

====================
Extending the ena driver to support new features and enhance performance

This patchset introduces the following:

* add support for changing the inline header size (max_header_size) for applications
  with overlay and nested headers
* enable automatic fallback to polling mode for admin queue when interrupt is not
  available or missed
* add good checksum counter for Rx ethtool statistics
* update ena.txt
* some minor code clean-up
* some performance enhancements with doorbell calculations

Differences from V1:

* net: ena: add handling of llq max tx burst size (1/11):
 * fixed christmas tree issue

* net: ena: ethtool: add extra properties retrieval via get_priv_flags (2/11):
 * replaced snprintf with strlcpy
 * dropped confusing error message
 * added more details to  the commit message
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2019-06-03 13:30:38 -07:00
commit e2821fc86a
9 changed files with 375 additions and 101 deletions

View File

@ -73,7 +73,7 @@ operation.
AQ is used for submitting management commands, and the AQ is used for submitting management commands, and the
results/responses are reported asynchronously through ACQ. results/responses are reported asynchronously through ACQ.
ENA introduces a very small set of management commands with room for ENA introduces a small set of management commands with room for
vendor-specific extensions. Most of the management operations are vendor-specific extensions. Most of the management operations are
framed in a generic Get/Set feature command. framed in a generic Get/Set feature command.
@ -202,11 +202,14 @@ delay value to each level.
The user can enable/disable adaptive moderation, modify the interrupt The user can enable/disable adaptive moderation, modify the interrupt
delay table and restore its default values through sysfs. delay table and restore its default values through sysfs.
RX copybreak:
=============
The rx_copybreak is initialized by default to ENA_DEFAULT_RX_COPYBREAK The rx_copybreak is initialized by default to ENA_DEFAULT_RX_COPYBREAK
and can be configured by the ETHTOOL_STUNABLE command of the and can be configured by the ETHTOOL_STUNABLE command of the
SIOCETHTOOL ioctl. SIOCETHTOOL ioctl.
SKB: SKB:
====
The driver-allocated SKB for frames received from Rx handling using The driver-allocated SKB for frames received from Rx handling using
NAPI context. The allocation method depends on the size of the packet. NAPI context. The allocation method depends on the size of the packet.
If the frame length is larger than rx_copybreak, napi_get_frags() If the frame length is larger than rx_copybreak, napi_get_frags()

View File

@ -32,6 +32,8 @@
#ifndef _ENA_ADMIN_H_ #ifndef _ENA_ADMIN_H_
#define _ENA_ADMIN_H_ #define _ENA_ADMIN_H_
#define ENA_ADMIN_EXTRA_PROPERTIES_STRING_LEN 32
#define ENA_ADMIN_EXTRA_PROPERTIES_COUNT 32
enum ena_admin_aq_opcode { enum ena_admin_aq_opcode {
ENA_ADMIN_CREATE_SQ = 1, ENA_ADMIN_CREATE_SQ = 1,
@ -60,6 +62,8 @@ enum ena_admin_aq_feature_id {
ENA_ADMIN_MAX_QUEUES_NUM = 2, ENA_ADMIN_MAX_QUEUES_NUM = 2,
ENA_ADMIN_HW_HINTS = 3, ENA_ADMIN_HW_HINTS = 3,
ENA_ADMIN_LLQ = 4, ENA_ADMIN_LLQ = 4,
ENA_ADMIN_EXTRA_PROPERTIES_STRINGS = 5,
ENA_ADMIN_EXTRA_PROPERTIES_FLAGS = 6,
ENA_ADMIN_RSS_HASH_FUNCTION = 10, ENA_ADMIN_RSS_HASH_FUNCTION = 10,
ENA_ADMIN_STATELESS_OFFLOAD_CONFIG = 11, ENA_ADMIN_STATELESS_OFFLOAD_CONFIG = 11,
ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG = 12, ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG = 12,
@ -524,6 +528,11 @@ struct ena_admin_feature_llq_desc {
/* the stride control the driver selected to use */ /* the stride control the driver selected to use */
u16 descriptors_stride_ctrl_enabled; u16 descriptors_stride_ctrl_enabled;
/* Maximum size in bytes taken by llq entries in a single tx burst.
* Set to 0 when there is no such limit.
*/
u32 max_tx_burst_size;
}; };
struct ena_admin_queue_feature_desc { struct ena_admin_queue_feature_desc {
@ -555,6 +564,14 @@ struct ena_admin_set_feature_mtu_desc {
u32 mtu; u32 mtu;
}; };
struct ena_admin_get_extra_properties_strings_desc {
u32 count;
};
struct ena_admin_get_extra_properties_flags_desc {
u32 flags;
};
struct ena_admin_set_feature_host_attr_desc { struct ena_admin_set_feature_host_attr_desc {
/* host OS info base address in OS memory. host info is 4KB of /* host OS info base address in OS memory. host info is 4KB of
* physically contiguous * physically contiguous
@ -859,6 +876,10 @@ struct ena_admin_get_feat_resp {
struct ena_admin_feature_intr_moder_desc intr_moderation; struct ena_admin_feature_intr_moder_desc intr_moderation;
struct ena_admin_ena_hw_hints hw_hints; struct ena_admin_ena_hw_hints hw_hints;
struct ena_admin_get_extra_properties_strings_desc extra_properties_strings;
struct ena_admin_get_extra_properties_flags_desc extra_properties_flags;
} u; } u;
}; };

View File

@ -115,7 +115,7 @@ static int ena_com_admin_init_sq(struct ena_com_admin_queue *queue)
GFP_KERNEL); GFP_KERNEL);
if (!sq->entries) { if (!sq->entries) {
pr_err("memory allocation failed"); pr_err("memory allocation failed\n");
return -ENOMEM; return -ENOMEM;
} }
@ -137,7 +137,7 @@ static int ena_com_admin_init_cq(struct ena_com_admin_queue *queue)
GFP_KERNEL); GFP_KERNEL);
if (!cq->entries) { if (!cq->entries) {
pr_err("memory allocation failed"); pr_err("memory allocation failed\n");
return -ENOMEM; return -ENOMEM;
} }
@ -160,7 +160,7 @@ static int ena_com_admin_init_aenq(struct ena_com_dev *dev,
GFP_KERNEL); GFP_KERNEL);
if (!aenq->entries) { if (!aenq->entries) {
pr_err("memory allocation failed"); pr_err("memory allocation failed\n");
return -ENOMEM; return -ENOMEM;
} }
@ -285,7 +285,7 @@ static inline int ena_com_init_comp_ctxt(struct ena_com_admin_queue *queue)
queue->comp_ctx = devm_kzalloc(queue->q_dmadev, size, GFP_KERNEL); queue->comp_ctx = devm_kzalloc(queue->q_dmadev, size, GFP_KERNEL);
if (unlikely(!queue->comp_ctx)) { if (unlikely(!queue->comp_ctx)) {
pr_err("memory allocation failed"); pr_err("memory allocation failed\n");
return -ENOMEM; return -ENOMEM;
} }
@ -356,7 +356,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
} }
if (!io_sq->desc_addr.virt_addr) { if (!io_sq->desc_addr.virt_addr) {
pr_err("memory allocation failed"); pr_err("memory allocation failed\n");
return -ENOMEM; return -ENOMEM;
} }
} }
@ -382,7 +382,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL); devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
if (!io_sq->bounce_buf_ctrl.base_buffer) { if (!io_sq->bounce_buf_ctrl.base_buffer) {
pr_err("bounce buffer memory allocation failed"); pr_err("bounce buffer memory allocation failed\n");
return -ENOMEM; return -ENOMEM;
} }
@ -396,6 +396,10 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
0x0, io_sq->llq_info.desc_list_entry_size); 0x0, io_sq->llq_info.desc_list_entry_size);
io_sq->llq_buf_ctrl.descs_left_in_line = io_sq->llq_buf_ctrl.descs_left_in_line =
io_sq->llq_info.descs_num_before_header; io_sq->llq_info.descs_num_before_header;
if (io_sq->llq_info.max_entries_in_tx_burst > 0)
io_sq->entries_in_tx_burst_left =
io_sq->llq_info.max_entries_in_tx_burst;
} }
io_sq->tail = 0; io_sq->tail = 0;
@ -436,7 +440,7 @@ static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
} }
if (!io_cq->cdesc_addr.virt_addr) { if (!io_cq->cdesc_addr.virt_addr) {
pr_err("memory allocation failed"); pr_err("memory allocation failed\n");
return -ENOMEM; return -ENOMEM;
} }
@ -727,6 +731,9 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
supported_feat, llq_info->descs_num_before_header); supported_feat, llq_info->descs_num_before_header);
} }
llq_info->max_entries_in_tx_burst =
(u16)(llq_features->max_tx_burst_size / llq_default_cfg->llq_ring_entry_size_value);
rc = ena_com_set_llq(ena_dev); rc = ena_com_set_llq(ena_dev);
if (rc) if (rc)
pr_err("Cannot set LLQ configuration: %d\n", rc); pr_err("Cannot set LLQ configuration: %d\n", rc);
@ -755,16 +762,26 @@ static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *com
admin_queue->stats.no_completion++; admin_queue->stats.no_completion++;
spin_unlock_irqrestore(&admin_queue->q_lock, flags); spin_unlock_irqrestore(&admin_queue->q_lock, flags);
if (comp_ctx->status == ENA_CMD_COMPLETED) if (comp_ctx->status == ENA_CMD_COMPLETED) {
pr_err("The ena device have completion but the driver didn't receive any MSI-X interrupt (cmd %d)\n", pr_err("The ena device sent a completion but the driver didn't receive a MSI-X interrupt (cmd %d), autopolling mode is %s\n",
comp_ctx->cmd_opcode); comp_ctx->cmd_opcode,
else admin_queue->auto_polling ? "ON" : "OFF");
pr_err("The ena device doesn't send any completion for the admin cmd %d status %d\n", /* Check if fallback to polling is enabled */
if (admin_queue->auto_polling)
admin_queue->polling = true;
} else {
pr_err("The ena device doesn't send a completion for the admin cmd %d status %d\n",
comp_ctx->cmd_opcode, comp_ctx->status); comp_ctx->cmd_opcode, comp_ctx->status);
}
admin_queue->running_state = false; /* Check if shifted to polling mode.
ret = -ETIME; * This will happen if there is a completion without an interrupt
goto err; * and autopolling mode is enabled. Continuing normal execution in such case
*/
if (!admin_queue->polling) {
admin_queue->running_state = false;
ret = -ETIME;
goto err;
}
} }
ret = ena_com_comp_status_to_errno(comp_ctx->comp_status); ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
@ -822,7 +839,7 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
} }
if (read_resp->reg_off != offset) { if (read_resp->reg_off != offset) {
pr_err("Read failure: wrong offset provided"); pr_err("Read failure: wrong offset provided\n");
ret = ENA_MMIO_READ_TIMEOUT; ret = ENA_MMIO_READ_TIMEOUT;
} else { } else {
ret = read_resp->reg_val; ret = read_resp->reg_val;
@ -1643,6 +1660,12 @@ void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling)
ena_dev->admin_queue.polling = polling; ena_dev->admin_queue.polling = polling;
} }
void ena_com_set_admin_auto_polling_mode(struct ena_com_dev *ena_dev,
bool polling)
{
ena_dev->admin_queue.auto_polling = polling;
}
int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev) int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
{ {
struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read; struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
@ -1870,6 +1893,62 @@ int ena_com_get_link_params(struct ena_com_dev *ena_dev,
return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG); return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG);
} }
int ena_com_extra_properties_strings_init(struct ena_com_dev *ena_dev)
{
struct ena_admin_get_feat_resp resp;
struct ena_extra_properties_strings *extra_properties_strings =
&ena_dev->extra_properties_strings;
u32 rc;
extra_properties_strings->size = ENA_ADMIN_EXTRA_PROPERTIES_COUNT *
ENA_ADMIN_EXTRA_PROPERTIES_STRING_LEN;
extra_properties_strings->virt_addr =
dma_alloc_coherent(ena_dev->dmadev,
extra_properties_strings->size,
&extra_properties_strings->dma_addr,
GFP_KERNEL);
if (unlikely(!extra_properties_strings->virt_addr)) {
pr_err("Failed to allocate extra properties strings\n");
return 0;
}
rc = ena_com_get_feature_ex(ena_dev, &resp,
ENA_ADMIN_EXTRA_PROPERTIES_STRINGS,
extra_properties_strings->dma_addr,
extra_properties_strings->size);
if (rc) {
pr_debug("Failed to get extra properties strings\n");
goto err;
}
return resp.u.extra_properties_strings.count;
err:
ena_com_delete_extra_properties_strings(ena_dev);
return 0;
}
void ena_com_delete_extra_properties_strings(struct ena_com_dev *ena_dev)
{
struct ena_extra_properties_strings *extra_properties_strings =
&ena_dev->extra_properties_strings;
if (extra_properties_strings->virt_addr) {
dma_free_coherent(ena_dev->dmadev,
extra_properties_strings->size,
extra_properties_strings->virt_addr,
extra_properties_strings->dma_addr);
extra_properties_strings->virt_addr = NULL;
}
}
int ena_com_get_extra_properties_flags(struct ena_com_dev *ena_dev,
struct ena_admin_get_feat_resp *resp)
{
return ena_com_get_feature(ena_dev, resp,
ENA_ADMIN_EXTRA_PROPERTIES_FLAGS);
}
int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev, int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
struct ena_com_dev_get_features_ctx *get_feat_ctx) struct ena_com_dev_get_features_ctx *get_feat_ctx)
{ {
@ -2913,8 +2992,8 @@ int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
struct ena_admin_feature_llq_desc *llq_features, struct ena_admin_feature_llq_desc *llq_features,
struct ena_llq_configurations *llq_default_cfg) struct ena_llq_configurations *llq_default_cfg)
{ {
struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
int rc; int rc;
int size;
if (!llq_features->max_llq_num) { if (!llq_features->max_llq_num) {
ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
@ -2925,12 +3004,10 @@ int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
if (rc) if (rc)
return rc; return rc;
/* Validate the descriptor is not too big */ ena_dev->tx_max_header_size = llq_info->desc_list_entry_size -
size = ena_dev->tx_max_header_size; (llq_info->descs_num_before_header * sizeof(struct ena_eth_io_tx_desc));
size += ena_dev->llq_info.descs_num_before_header *
sizeof(struct ena_eth_io_tx_desc);
if (unlikely(ena_dev->llq_info.desc_list_entry_size < size)) { if (unlikely(ena_dev->tx_max_header_size == 0)) {
pr_err("the size of the LLQ entry is smaller than needed\n"); pr_err("the size of the LLQ entry is smaller than needed\n");
return -EINVAL; return -EINVAL;
} }

View File

@ -159,6 +159,7 @@ struct ena_com_llq_info {
u16 desc_list_entry_size; u16 desc_list_entry_size;
u16 descs_num_before_header; u16 descs_num_before_header;
u16 descs_per_entry; u16 descs_per_entry;
u16 max_entries_in_tx_burst;
}; };
struct ena_com_io_cq { struct ena_com_io_cq {
@ -238,6 +239,7 @@ struct ena_com_io_sq {
u8 phase; u8 phase;
u8 desc_entry_size; u8 desc_entry_size;
u8 dma_addr_bits; u8 dma_addr_bits;
u16 entries_in_tx_burst_left;
} ____cacheline_aligned; } ____cacheline_aligned;
struct ena_com_admin_cq { struct ena_com_admin_cq {
@ -281,6 +283,9 @@ struct ena_com_admin_queue {
/* Indicate if the admin queue should poll for completion */ /* Indicate if the admin queue should poll for completion */
bool polling; bool polling;
/* Define if fallback to polling mode should occur */
bool auto_polling;
u16 curr_cmd_id; u16 curr_cmd_id;
/* Indicate that the ena was initialized and can /* Indicate that the ena was initialized and can
@ -345,6 +350,12 @@ struct ena_host_attribute {
dma_addr_t host_info_dma_addr; dma_addr_t host_info_dma_addr;
}; };
struct ena_extra_properties_strings {
u8 *virt_addr;
dma_addr_t dma_addr;
u32 size;
};
/* Each ena_dev is a PCI function. */ /* Each ena_dev is a PCI function. */
struct ena_com_dev { struct ena_com_dev {
struct ena_com_admin_queue admin_queue; struct ena_com_admin_queue admin_queue;
@ -373,6 +384,7 @@ struct ena_com_dev {
struct ena_intr_moder_entry *intr_moder_tbl; struct ena_intr_moder_entry *intr_moder_tbl;
struct ena_com_llq_info llq_info; struct ena_com_llq_info llq_info;
struct ena_extra_properties_strings extra_properties_strings;
}; };
struct ena_com_dev_get_features_ctx { struct ena_com_dev_get_features_ctx {
@ -536,6 +548,17 @@ void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling);
*/ */
bool ena_com_get_ena_admin_polling_mode(struct ena_com_dev *ena_dev); bool ena_com_get_ena_admin_polling_mode(struct ena_com_dev *ena_dev);
/* ena_com_set_admin_auto_polling_mode - Enable autoswitch to polling mode
* @ena_dev: ENA communication layer struct
* @polling: Enable/Disable polling mode
*
* Set the autopolling mode.
* If autopolling is on:
* In case of missing interrupt when data is available switch to polling.
*/
void ena_com_set_admin_auto_polling_mode(struct ena_com_dev *ena_dev,
bool polling);
/* ena_com_admin_q_comp_intr_handler - admin queue interrupt handler /* ena_com_admin_q_comp_intr_handler - admin queue interrupt handler
* @ena_dev: ENA communication layer struct * @ena_dev: ENA communication layer struct
* *
@ -594,6 +617,31 @@ int ena_com_validate_version(struct ena_com_dev *ena_dev);
int ena_com_get_link_params(struct ena_com_dev *ena_dev, int ena_com_get_link_params(struct ena_com_dev *ena_dev,
struct ena_admin_get_feat_resp *resp); struct ena_admin_get_feat_resp *resp);
/* ena_com_extra_properties_strings_init - Initialize the extra properties strings buffer.
* @ena_dev: ENA communication layer struct
*
* Initialize the extra properties strings buffer.
*/
int ena_com_extra_properties_strings_init(struct ena_com_dev *ena_dev);
/* ena_com_delete_extra_properties_strings - Free the extra properties strings buffer.
* @ena_dev: ENA communication layer struct
*
* Free the allocated extra properties strings buffer.
*/
void ena_com_delete_extra_properties_strings(struct ena_com_dev *ena_dev);
/* ena_com_get_extra_properties_flags - Retrieve extra properties flags.
* @ena_dev: ENA communication layer struct
* @resp: Extra properties flags.
*
* Retrieve the extra properties flags.
*
* @return - 0 on Success negative value otherwise.
*/
int ena_com_get_extra_properties_flags(struct ena_com_dev *ena_dev,
struct ena_admin_get_feat_resp *resp);
/* ena_com_get_dma_width - Retrieve physical dma address width the device /* ena_com_get_dma_width - Retrieve physical dma address width the device
* supports. * supports.
* @ena_dev: ENA communication layer struct * @ena_dev: ENA communication layer struct

View File

@ -82,6 +82,17 @@ static inline int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq
dst_tail_mask = io_sq->tail & (io_sq->q_depth - 1); dst_tail_mask = io_sq->tail & (io_sq->q_depth - 1);
dst_offset = dst_tail_mask * llq_info->desc_list_entry_size; dst_offset = dst_tail_mask * llq_info->desc_list_entry_size;
if (is_llq_max_tx_burst_exists(io_sq)) {
if (unlikely(!io_sq->entries_in_tx_burst_left)) {
pr_err("Error: trying to send more packets than tx burst allows\n");
return -ENOSPC;
}
io_sq->entries_in_tx_burst_left--;
pr_debug("decreasing entries_in_tx_burst_left of queue %d to %d\n",
io_sq->qid, io_sq->entries_in_tx_burst_left);
}
/* Make sure everything was written into the bounce buffer before /* Make sure everything was written into the bounce buffer before
* writing the bounce buffer to the device * writing the bounce buffer to the device
*/ */
@ -274,23 +285,6 @@ static inline u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
return count; return count;
} }
static inline bool ena_com_meta_desc_changed(struct ena_com_io_sq *io_sq,
struct ena_com_tx_ctx *ena_tx_ctx)
{
int rc;
if (ena_tx_ctx->meta_valid) {
rc = memcmp(&io_sq->cached_tx_meta,
&ena_tx_ctx->ena_meta,
sizeof(struct ena_com_tx_meta));
if (unlikely(rc != 0))
return true;
}
return false;
}
static inline int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq, static inline int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
struct ena_com_tx_ctx *ena_tx_ctx) struct ena_com_tx_ctx *ena_tx_ctx)
{ {

View File

@ -125,8 +125,55 @@ static inline bool ena_com_sq_have_enough_space(struct ena_com_io_sq *io_sq,
return ena_com_free_desc(io_sq) > temp; return ena_com_free_desc(io_sq) > temp;
} }
static inline bool ena_com_meta_desc_changed(struct ena_com_io_sq *io_sq,
struct ena_com_tx_ctx *ena_tx_ctx)
{
if (!ena_tx_ctx->meta_valid)
return false;
return !!memcmp(&io_sq->cached_tx_meta,
&ena_tx_ctx->ena_meta,
sizeof(struct ena_com_tx_meta));
}
static inline bool is_llq_max_tx_burst_exists(struct ena_com_io_sq *io_sq)
{
return (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) &&
io_sq->llq_info.max_entries_in_tx_burst > 0;
}
static inline bool ena_com_is_doorbell_needed(struct ena_com_io_sq *io_sq,
struct ena_com_tx_ctx *ena_tx_ctx)
{
struct ena_com_llq_info *llq_info;
int descs_after_first_entry;
int num_entries_needed = 1;
u16 num_descs;
if (!is_llq_max_tx_burst_exists(io_sq))
return false;
llq_info = &io_sq->llq_info;
num_descs = ena_tx_ctx->num_bufs;
if (unlikely(ena_com_meta_desc_changed(io_sq, ena_tx_ctx)))
++num_descs;
if (num_descs > llq_info->descs_num_before_header) {
descs_after_first_entry = num_descs - llq_info->descs_num_before_header;
num_entries_needed += DIV_ROUND_UP(descs_after_first_entry,
llq_info->descs_per_entry);
}
pr_debug("queue: %d num_descs: %d num_entries_needed: %d\n", io_sq->qid,
num_descs, num_entries_needed);
return num_entries_needed > io_sq->entries_in_tx_burst_left;
}
static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq) static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq)
{ {
u16 max_entries_in_tx_burst = io_sq->llq_info.max_entries_in_tx_burst;
u16 tail = io_sq->tail; u16 tail = io_sq->tail;
pr_debug("write submission queue doorbell for queue: %d tail: %d\n", pr_debug("write submission queue doorbell for queue: %d tail: %d\n",
@ -134,6 +181,12 @@ static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq)
writel(tail, io_sq->db_addr); writel(tail, io_sq->db_addr);
if (is_llq_max_tx_burst_exists(io_sq)) {
pr_debug("reset available entries in tx burst for queue %d to %d\n",
io_sq->qid, max_entries_in_tx_burst);
io_sq->entries_in_tx_burst_left = max_entries_in_tx_burst;
}
return 0; return 0;
} }
@ -142,15 +195,17 @@ static inline int ena_com_update_dev_comp_head(struct ena_com_io_cq *io_cq)
u16 unreported_comp, head; u16 unreported_comp, head;
bool need_update; bool need_update;
head = io_cq->head; if (unlikely(io_cq->cq_head_db_reg)) {
unreported_comp = head - io_cq->last_head_update; head = io_cq->head;
need_update = unreported_comp > (io_cq->q_depth / ENA_COMP_HEAD_THRESH); unreported_comp = head - io_cq->last_head_update;
need_update = unreported_comp > (io_cq->q_depth / ENA_COMP_HEAD_THRESH);
if (io_cq->cq_head_db_reg && need_update) { if (unlikely(need_update)) {
pr_debug("Write completion queue doorbell for queue %d: head: %d\n", pr_debug("Write completion queue doorbell for queue %d: head: %d\n",
io_cq->qid, head); io_cq->qid, head);
writel(head, io_cq->cq_head_db_reg); writel(head, io_cq->cq_head_db_reg);
io_cq->last_head_update = head; io_cq->last_head_update = head;
}
} }
return 0; return 0;

View File

@ -88,13 +88,14 @@ static const struct ena_stats ena_stats_tx_strings[] = {
static const struct ena_stats ena_stats_rx_strings[] = { static const struct ena_stats ena_stats_rx_strings[] = {
ENA_STAT_RX_ENTRY(cnt), ENA_STAT_RX_ENTRY(cnt),
ENA_STAT_RX_ENTRY(bytes), ENA_STAT_RX_ENTRY(bytes),
ENA_STAT_RX_ENTRY(rx_copybreak_pkt),
ENA_STAT_RX_ENTRY(csum_good),
ENA_STAT_RX_ENTRY(refil_partial), ENA_STAT_RX_ENTRY(refil_partial),
ENA_STAT_RX_ENTRY(bad_csum), ENA_STAT_RX_ENTRY(bad_csum),
ENA_STAT_RX_ENTRY(page_alloc_fail), ENA_STAT_RX_ENTRY(page_alloc_fail),
ENA_STAT_RX_ENTRY(skb_alloc_fail), ENA_STAT_RX_ENTRY(skb_alloc_fail),
ENA_STAT_RX_ENTRY(dma_mapping_err), ENA_STAT_RX_ENTRY(dma_mapping_err),
ENA_STAT_RX_ENTRY(bad_desc_num), ENA_STAT_RX_ENTRY(bad_desc_num),
ENA_STAT_RX_ENTRY(rx_copybreak_pkt),
ENA_STAT_RX_ENTRY(bad_req_id), ENA_STAT_RX_ENTRY(bad_req_id),
ENA_STAT_RX_ENTRY(empty_rx_ring), ENA_STAT_RX_ENTRY(empty_rx_ring),
ENA_STAT_RX_ENTRY(csum_unchecked), ENA_STAT_RX_ENTRY(csum_unchecked),
@ -197,15 +198,24 @@ static void ena_get_ethtool_stats(struct net_device *netdev,
ena_dev_admin_queue_stats(adapter, &data); ena_dev_admin_queue_stats(adapter, &data);
} }
static int get_stats_sset_count(struct ena_adapter *adapter)
{
return adapter->num_queues * (ENA_STATS_ARRAY_TX + ENA_STATS_ARRAY_RX)
+ ENA_STATS_ARRAY_GLOBAL + ENA_STATS_ARRAY_ENA_COM;
}
int ena_get_sset_count(struct net_device *netdev, int sset) int ena_get_sset_count(struct net_device *netdev, int sset)
{ {
struct ena_adapter *adapter = netdev_priv(netdev); struct ena_adapter *adapter = netdev_priv(netdev);
if (sset != ETH_SS_STATS) switch (sset) {
case ETH_SS_STATS:
return get_stats_sset_count(adapter);
case ETH_SS_PRIV_FLAGS:
return adapter->ena_extra_properties_count;
default:
return -EOPNOTSUPP; return -EOPNOTSUPP;
}
return adapter->num_queues * (ENA_STATS_ARRAY_TX + ENA_STATS_ARRAY_RX)
+ ENA_STATS_ARRAY_GLOBAL + ENA_STATS_ARRAY_ENA_COM;
} }
static void ena_queue_strings(struct ena_adapter *adapter, u8 **data) static void ena_queue_strings(struct ena_adapter *adapter, u8 **data)
@ -247,26 +257,54 @@ static void ena_com_dev_strings(u8 **data)
} }
} }
static void ena_get_strings(struct net_device *netdev, u32 sset, u8 *data) static void get_stats_strings(struct ena_adapter *adapter, u8 *data)
{ {
struct ena_adapter *adapter = netdev_priv(netdev);
const struct ena_stats *ena_stats; const struct ena_stats *ena_stats;
int i; int i;
if (sset != ETH_SS_STATS)
return;
for (i = 0; i < ENA_STATS_ARRAY_GLOBAL; i++) { for (i = 0; i < ENA_STATS_ARRAY_GLOBAL; i++) {
ena_stats = &ena_stats_global_strings[i]; ena_stats = &ena_stats_global_strings[i];
memcpy(data, ena_stats->name, ETH_GSTRING_LEN); memcpy(data, ena_stats->name, ETH_GSTRING_LEN);
data += ETH_GSTRING_LEN; data += ETH_GSTRING_LEN;
} }
ena_queue_strings(adapter, &data); ena_queue_strings(adapter, &data);
ena_com_dev_strings(&data); ena_com_dev_strings(&data);
} }
static void get_private_flags_strings(struct ena_adapter *adapter, u8 *data)
{
struct ena_com_dev *ena_dev = adapter->ena_dev;
u8 *strings = ena_dev->extra_properties_strings.virt_addr;
int i;
if (unlikely(!strings)) {
adapter->ena_extra_properties_count = 0;
return;
}
for (i = 0; i < adapter->ena_extra_properties_count; i++) {
strlcpy(data, strings + ENA_ADMIN_EXTRA_PROPERTIES_STRING_LEN * i,
ETH_GSTRING_LEN);
data += ETH_GSTRING_LEN;
}
}
static void ena_get_strings(struct net_device *netdev, u32 sset, u8 *data)
{
struct ena_adapter *adapter = netdev_priv(netdev);
switch (sset) {
case ETH_SS_STATS:
get_stats_strings(adapter, data);
break;
case ETH_SS_PRIV_FLAGS:
get_private_flags_strings(adapter, data);
break;
default:
break;
}
}
static int ena_get_link_ksettings(struct net_device *netdev, static int ena_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *link_ksettings) struct ethtool_link_ksettings *link_ksettings)
{ {
@ -441,6 +479,7 @@ static void ena_get_drvinfo(struct net_device *dev,
strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
strlcpy(info->bus_info, pci_name(adapter->pdev), strlcpy(info->bus_info, pci_name(adapter->pdev),
sizeof(info->bus_info)); sizeof(info->bus_info));
info->n_priv_flags = adapter->ena_extra_properties_count;
} }
static void ena_get_ringparam(struct net_device *netdev, static void ena_get_ringparam(struct net_device *netdev,
@ -798,6 +837,20 @@ static int ena_set_tunable(struct net_device *netdev,
return ret; return ret;
} }
static u32 ena_get_priv_flags(struct net_device *netdev)
{
struct ena_adapter *adapter = netdev_priv(netdev);
struct ena_com_dev *ena_dev = adapter->ena_dev;
struct ena_admin_get_feat_resp get_resp;
u32 rc;
rc = ena_com_get_extra_properties_flags(ena_dev, &get_resp);
if (!rc)
return get_resp.u.extra_properties_flags.flags;
return 0;
}
static const struct ethtool_ops ena_ethtool_ops = { static const struct ethtool_ops ena_ethtool_ops = {
.get_link_ksettings = ena_get_link_ksettings, .get_link_ksettings = ena_get_link_ksettings,
.get_drvinfo = ena_get_drvinfo, .get_drvinfo = ena_get_drvinfo,
@ -819,6 +872,7 @@ static const struct ethtool_ops ena_ethtool_ops = {
.get_channels = ena_get_channels, .get_channels = ena_get_channels,
.get_tunable = ena_get_tunable, .get_tunable = ena_get_tunable,
.set_tunable = ena_set_tunable, .set_tunable = ena_set_tunable,
.get_priv_flags = ena_get_priv_flags,
}; };
void ena_set_ethtool_ops(struct net_device *netdev) void ena_set_ethtool_ops(struct net_device *netdev)

View File

@ -228,11 +228,11 @@ static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid)
} }
size = sizeof(u16) * tx_ring->ring_size; size = sizeof(u16) * tx_ring->ring_size;
tx_ring->free_tx_ids = vzalloc_node(size, node); tx_ring->free_ids = vzalloc_node(size, node);
if (!tx_ring->free_tx_ids) { if (!tx_ring->free_ids) {
tx_ring->free_tx_ids = vzalloc(size); tx_ring->free_ids = vzalloc(size);
if (!tx_ring->free_tx_ids) if (!tx_ring->free_ids)
goto err_free_tx_ids; goto err_tx_free_ids;
} }
size = tx_ring->tx_max_header_size; size = tx_ring->tx_max_header_size;
@ -245,7 +245,7 @@ static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid)
/* Req id ring for TX out of order completions */ /* Req id ring for TX out of order completions */
for (i = 0; i < tx_ring->ring_size; i++) for (i = 0; i < tx_ring->ring_size; i++)
tx_ring->free_tx_ids[i] = i; tx_ring->free_ids[i] = i;
/* Reset tx statistics */ /* Reset tx statistics */
memset(&tx_ring->tx_stats, 0x0, sizeof(tx_ring->tx_stats)); memset(&tx_ring->tx_stats, 0x0, sizeof(tx_ring->tx_stats));
@ -256,9 +256,9 @@ static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid)
return 0; return 0;
err_push_buf_intermediate_buf: err_push_buf_intermediate_buf:
vfree(tx_ring->free_tx_ids); vfree(tx_ring->free_ids);
tx_ring->free_tx_ids = NULL; tx_ring->free_ids = NULL;
err_free_tx_ids: err_tx_free_ids:
vfree(tx_ring->tx_buffer_info); vfree(tx_ring->tx_buffer_info);
tx_ring->tx_buffer_info = NULL; tx_ring->tx_buffer_info = NULL;
err_tx_buffer_info: err_tx_buffer_info:
@ -278,8 +278,8 @@ static void ena_free_tx_resources(struct ena_adapter *adapter, int qid)
vfree(tx_ring->tx_buffer_info); vfree(tx_ring->tx_buffer_info);
tx_ring->tx_buffer_info = NULL; tx_ring->tx_buffer_info = NULL;
vfree(tx_ring->free_tx_ids); vfree(tx_ring->free_ids);
tx_ring->free_tx_ids = NULL; tx_ring->free_ids = NULL;
vfree(tx_ring->push_buf_intermediate_buf); vfree(tx_ring->push_buf_intermediate_buf);
tx_ring->push_buf_intermediate_buf = NULL; tx_ring->push_buf_intermediate_buf = NULL;
@ -377,10 +377,10 @@ static int ena_setup_rx_resources(struct ena_adapter *adapter,
} }
size = sizeof(u16) * rx_ring->ring_size; size = sizeof(u16) * rx_ring->ring_size;
rx_ring->free_rx_ids = vzalloc_node(size, node); rx_ring->free_ids = vzalloc_node(size, node);
if (!rx_ring->free_rx_ids) { if (!rx_ring->free_ids) {
rx_ring->free_rx_ids = vzalloc(size); rx_ring->free_ids = vzalloc(size);
if (!rx_ring->free_rx_ids) { if (!rx_ring->free_ids) {
vfree(rx_ring->rx_buffer_info); vfree(rx_ring->rx_buffer_info);
rx_ring->rx_buffer_info = NULL; rx_ring->rx_buffer_info = NULL;
return -ENOMEM; return -ENOMEM;
@ -389,7 +389,7 @@ static int ena_setup_rx_resources(struct ena_adapter *adapter,
/* Req id ring for receiving RX pkts out of order */ /* Req id ring for receiving RX pkts out of order */
for (i = 0; i < rx_ring->ring_size; i++) for (i = 0; i < rx_ring->ring_size; i++)
rx_ring->free_rx_ids[i] = i; rx_ring->free_ids[i] = i;
/* Reset rx statistics */ /* Reset rx statistics */
memset(&rx_ring->rx_stats, 0x0, sizeof(rx_ring->rx_stats)); memset(&rx_ring->rx_stats, 0x0, sizeof(rx_ring->rx_stats));
@ -415,8 +415,8 @@ static void ena_free_rx_resources(struct ena_adapter *adapter,
vfree(rx_ring->rx_buffer_info); vfree(rx_ring->rx_buffer_info);
rx_ring->rx_buffer_info = NULL; rx_ring->rx_buffer_info = NULL;
vfree(rx_ring->free_rx_ids); vfree(rx_ring->free_ids);
rx_ring->free_rx_ids = NULL; rx_ring->free_ids = NULL;
} }
/* ena_setup_all_rx_resources - allocate I/O Rx queues resources for all queues /* ena_setup_all_rx_resources - allocate I/O Rx queues resources for all queues
@ -531,7 +531,7 @@ static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num)
for (i = 0; i < num; i++) { for (i = 0; i < num; i++) {
struct ena_rx_buffer *rx_info; struct ena_rx_buffer *rx_info;
req_id = rx_ring->free_rx_ids[next_to_use]; req_id = rx_ring->free_ids[next_to_use];
rc = validate_rx_req_id(rx_ring, req_id); rc = validate_rx_req_id(rx_ring, req_id);
if (unlikely(rc < 0)) if (unlikely(rc < 0))
break; break;
@ -797,7 +797,7 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
tx_pkts++; tx_pkts++;
total_done += tx_info->tx_descs; total_done += tx_info->tx_descs;
tx_ring->free_tx_ids[next_to_clean] = req_id; tx_ring->free_ids[next_to_clean] = req_id;
next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean, next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean,
tx_ring->ring_size); tx_ring->ring_size);
} }
@ -911,7 +911,7 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
skb_put(skb, len); skb_put(skb, len);
skb->protocol = eth_type_trans(skb, rx_ring->netdev); skb->protocol = eth_type_trans(skb, rx_ring->netdev);
rx_ring->free_rx_ids[*next_to_clean] = req_id; rx_ring->free_ids[*next_to_clean] = req_id;
*next_to_clean = ENA_RX_RING_IDX_ADD(*next_to_clean, descs, *next_to_clean = ENA_RX_RING_IDX_ADD(*next_to_clean, descs,
rx_ring->ring_size); rx_ring->ring_size);
return skb; return skb;
@ -935,7 +935,7 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
rx_info->page = NULL; rx_info->page = NULL;
rx_ring->free_rx_ids[*next_to_clean] = req_id; rx_ring->free_ids[*next_to_clean] = req_id;
*next_to_clean = *next_to_clean =
ENA_RX_RING_IDX_NEXT(*next_to_clean, ENA_RX_RING_IDX_NEXT(*next_to_clean,
rx_ring->ring_size); rx_ring->ring_size);
@ -1001,6 +1001,9 @@ static inline void ena_rx_checksum(struct ena_ring *rx_ring,
if (likely(ena_rx_ctx->l4_csum_checked)) { if (likely(ena_rx_ctx->l4_csum_checked)) {
skb->ip_summed = CHECKSUM_UNNECESSARY; skb->ip_summed = CHECKSUM_UNNECESSARY;
u64_stats_update_begin(&rx_ring->syncp);
rx_ring->rx_stats.csum_good++;
u64_stats_update_end(&rx_ring->syncp);
} else { } else {
u64_stats_update_begin(&rx_ring->syncp); u64_stats_update_begin(&rx_ring->syncp);
rx_ring->rx_stats.csum_unchecked++; rx_ring->rx_stats.csum_unchecked++;
@ -1088,7 +1091,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
/* exit if we failed to retrieve a buffer */ /* exit if we failed to retrieve a buffer */
if (unlikely(!skb)) { if (unlikely(!skb)) {
for (i = 0; i < ena_rx_ctx.descs; i++) { for (i = 0; i < ena_rx_ctx.descs; i++) {
rx_ring->free_tx_ids[next_to_clean] = rx_ring->free_ids[next_to_clean] =
rx_ring->ena_bufs[i].req_id; rx_ring->ena_bufs[i].req_id;
next_to_clean = next_to_clean =
ENA_RX_RING_IDX_NEXT(next_to_clean, ENA_RX_RING_IDX_NEXT(next_to_clean,
@ -2152,7 +2155,7 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb_tx_timestamp(skb); skb_tx_timestamp(skb);
next_to_use = tx_ring->next_to_use; next_to_use = tx_ring->next_to_use;
req_id = tx_ring->free_tx_ids[next_to_use]; req_id = tx_ring->free_ids[next_to_use];
tx_info = &tx_ring->tx_buffer_info[req_id]; tx_info = &tx_ring->tx_buffer_info[req_id];
tx_info->num_of_bufs = 0; tx_info->num_of_bufs = 0;
@ -2172,6 +2175,13 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* set flags and meta data */ /* set flags and meta data */
ena_tx_csum(&ena_tx_ctx, skb); ena_tx_csum(&ena_tx_ctx, skb);
if (unlikely(ena_com_is_doorbell_needed(tx_ring->ena_com_io_sq, &ena_tx_ctx))) {
netif_dbg(adapter, tx_queued, dev,
"llq tx max burst size of queue %d achieved, writing doorbell to send burst\n",
qid);
ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
}
/* prepare the packet's descriptors to dma engine */ /* prepare the packet's descriptors to dma engine */
rc = ena_com_prepare_tx(tx_ring->ena_com_io_sq, &ena_tx_ctx, rc = ena_com_prepare_tx(tx_ring->ena_com_io_sq, &ena_tx_ctx,
&nb_hw_desc); &nb_hw_desc);
@ -2362,6 +2372,14 @@ err:
ena_com_delete_debug_area(adapter->ena_dev); ena_com_delete_debug_area(adapter->ena_dev);
} }
static void ena_extra_properties_strings_destroy(struct net_device *netdev)
{
struct ena_adapter *adapter = netdev_priv(netdev);
ena_com_delete_extra_properties_strings(adapter->ena_dev);
adapter->ena_extra_properties_count = 0;
}
static void ena_get_stats64(struct net_device *netdev, static void ena_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *stats) struct rtnl_link_stats64 *stats)
{ {
@ -3266,23 +3284,21 @@ static int ena_calc_queue_size(struct pci_dev *pdev,
static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{ {
struct ena_com_dev_get_features_ctx get_feat_ctx; struct ena_com_dev_get_features_ctx get_feat_ctx;
static int version_printed;
struct net_device *netdev;
struct ena_adapter *adapter;
struct ena_llq_configurations llq_config; struct ena_llq_configurations llq_config;
struct ena_com_dev *ena_dev = NULL; struct ena_com_dev *ena_dev = NULL;
char *queue_type_str; struct ena_adapter *adapter;
static int adapters_found;
int io_queue_num, bars, rc; int io_queue_num, bars, rc;
int queue_size; struct net_device *netdev;
static int adapters_found;
char *queue_type_str;
u16 tx_sgl_size = 0; u16 tx_sgl_size = 0;
u16 rx_sgl_size = 0; u16 rx_sgl_size = 0;
int queue_size;
bool wd_state; bool wd_state;
dev_dbg(&pdev->dev, "%s\n", __func__); dev_dbg(&pdev->dev, "%s\n", __func__);
if (version_printed++ == 0) dev_info_once(&pdev->dev, "%s", version);
dev_info(&pdev->dev, "%s", version);
rc = pci_enable_device_mem(pdev); rc = pci_enable_device_mem(pdev);
if (rc) { if (rc) {
@ -3417,6 +3433,9 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ena_config_debug_area(adapter); ena_config_debug_area(adapter);
adapter->ena_extra_properties_count =
ena_com_extra_properties_strings_init(ena_dev);
memcpy(adapter->netdev->perm_addr, adapter->mac_addr, netdev->addr_len); memcpy(adapter->netdev->perm_addr, adapter->mac_addr, netdev->addr_len);
netif_carrier_off(netdev); netif_carrier_off(netdev);
@ -3456,6 +3475,7 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0; return 0;
err_rss: err_rss:
ena_extra_properties_strings_destroy(netdev);
ena_com_delete_debug_area(ena_dev); ena_com_delete_debug_area(ena_dev);
ena_com_rss_destroy(ena_dev); ena_com_rss_destroy(ena_dev);
err_free_msix: err_free_msix:
@ -3522,6 +3542,8 @@ static void ena_remove(struct pci_dev *pdev)
ena_com_delete_host_info(ena_dev); ena_com_delete_host_info(ena_dev);
ena_extra_properties_strings_destroy(netdev);
ena_release_bars(ena_dev, pdev); ena_release_bars(ena_dev, pdev);
pci_disable_device(pdev); pci_disable_device(pdev);

View File

@ -208,26 +208,24 @@ struct ena_stats_tx {
struct ena_stats_rx { struct ena_stats_rx {
u64 cnt; u64 cnt;
u64 bytes; u64 bytes;
u64 rx_copybreak_pkt;
u64 csum_good;
u64 refil_partial; u64 refil_partial;
u64 bad_csum; u64 bad_csum;
u64 page_alloc_fail; u64 page_alloc_fail;
u64 skb_alloc_fail; u64 skb_alloc_fail;
u64 dma_mapping_err; u64 dma_mapping_err;
u64 bad_desc_num; u64 bad_desc_num;
u64 rx_copybreak_pkt;
u64 bad_req_id; u64 bad_req_id;
u64 empty_rx_ring; u64 empty_rx_ring;
u64 csum_unchecked; u64 csum_unchecked;
}; };
struct ena_ring { struct ena_ring {
union { /* Holds the empty requests for TX/RX
/* Holds the empty requests for TX/RX * out of order completions
* out of order completions */
*/ u16 *free_ids;
u16 *free_tx_ids;
u16 *free_rx_ids;
};
union { union {
struct ena_tx_buffer *tx_buffer_info; struct ena_tx_buffer *tx_buffer_info;
@ -364,6 +362,8 @@ struct ena_adapter {
u32 last_monitored_tx_qid; u32 last_monitored_tx_qid;
enum ena_regs_reset_reason_types reset_reason; enum ena_regs_reset_reason_types reset_reason;
u8 ena_extra_properties_count;
}; };
void ena_set_ethtool_ops(struct net_device *netdev); void ena_set_ethtool_ops(struct net_device *netdev);