Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue
Tony Nguyen says: ==================== Intel Wired LAN Driver Updates 2023-08-17 (ice) This series contains updates to ice driver only. Jan removes unused functions and refactors code to make, possible, functions static. Jake rearranges some functions to be logically grouped. Marcin removes an unnecessary call to disable VLAN stripping. Yang Yingliang utilizes list_for_each_entry() helper for a couple list traversals. Przemek removes some parameters from ice_aq_alloc_free_res() which were always the same and reworks ice_aq_wait_for_event() to reduce chance of race. * '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue: ice: split ice_aq_wait_for_event() func into two ice: embed &ice_rq_event_info event into struct ice_aq_task ice: ice_aq_check_events: fix off-by-one check when filling buffer ice: drop two params from ice_aq_alloc_free_res() ice: use list_for_each_entry() helper ice: Remove redundant VSI configuration in eswitch setup ice: move E810T functions to before device agnostic ones ice: refactor ice_vsi_is_vlan_pruning_ena ice: refactor ice_ptp_hw to make functions static ice: refactor ice_sched to make functions static ice: Utilize assign_bit() helper ice: refactor ice_vf_lib to make functions static ice: refactor ice_lib to make functions static ice: refactor ice_ddp to make functions static ice: remove unused methods ==================== Link: https://lore.kernel.org/r/20230817212239.2601543-1-anthony.l.nguyen@intel.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
c6cfc6cd76
|
@ -917,8 +917,25 @@ void ice_fdir_release_flows(struct ice_hw *hw);
|
|||
void ice_fdir_replay_flows(struct ice_hw *hw);
|
||||
void ice_fdir_replay_fltrs(struct ice_pf *pf);
|
||||
int ice_fdir_create_dflt_rules(struct ice_pf *pf);
|
||||
int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout,
|
||||
struct ice_rq_event_info *event);
|
||||
|
||||
enum ice_aq_task_state {
|
||||
ICE_AQ_TASK_NOT_PREPARED,
|
||||
ICE_AQ_TASK_WAITING,
|
||||
ICE_AQ_TASK_COMPLETE,
|
||||
ICE_AQ_TASK_CANCELED,
|
||||
};
|
||||
|
||||
struct ice_aq_task {
|
||||
struct hlist_node entry;
|
||||
struct ice_rq_event_info event;
|
||||
enum ice_aq_task_state state;
|
||||
u16 opcode;
|
||||
};
|
||||
|
||||
void ice_aq_prep_for_event(struct ice_pf *pf, struct ice_aq_task *task,
|
||||
u16 opcode);
|
||||
int ice_aq_wait_for_event(struct ice_pf *pf, struct ice_aq_task *task,
|
||||
unsigned long timeout);
|
||||
int ice_open(struct net_device *netdev);
|
||||
int ice_open_internal(struct net_device *netdev);
|
||||
int ice_stop(struct net_device *netdev);
|
||||
|
|
|
@ -2000,37 +2000,31 @@ void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
|
|||
/**
|
||||
* ice_aq_alloc_free_res - command to allocate/free resources
|
||||
* @hw: pointer to the HW struct
|
||||
* @num_entries: number of resource entries in buffer
|
||||
* @buf: Indirect buffer to hold data parameters and response
|
||||
* @buf_size: size of buffer for indirect commands
|
||||
* @opc: pass in the command opcode
|
||||
* @cd: pointer to command details structure or NULL
|
||||
*
|
||||
* Helper function to allocate/free resources using the admin queue commands
|
||||
*/
|
||||
int
|
||||
ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
|
||||
struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
|
||||
enum ice_adminq_opc opc, struct ice_sq_cd *cd)
|
||||
int ice_aq_alloc_free_res(struct ice_hw *hw,
|
||||
struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
|
||||
enum ice_adminq_opc opc)
|
||||
{
|
||||
struct ice_aqc_alloc_free_res_cmd *cmd;
|
||||
struct ice_aq_desc desc;
|
||||
|
||||
cmd = &desc.params.sw_res_ctrl;
|
||||
|
||||
if (!buf)
|
||||
return -EINVAL;
|
||||
|
||||
if (buf_size < flex_array_size(buf, elem, num_entries))
|
||||
if (!buf || buf_size < flex_array_size(buf, elem, 1))
|
||||
return -EINVAL;
|
||||
|
||||
ice_fill_dflt_direct_cmd_desc(&desc, opc);
|
||||
|
||||
desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
|
||||
|
||||
cmd->num_entries = cpu_to_le16(num_entries);
|
||||
cmd->num_entries = cpu_to_le16(1);
|
||||
|
||||
return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
|
||||
return ice_aq_send_cmd(hw, &desc, buf, buf_size, NULL);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -2060,8 +2054,7 @@ ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res)
|
|||
if (btm)
|
||||
buf->res_type |= cpu_to_le16(ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM);
|
||||
|
||||
status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
|
||||
ice_aqc_opc_alloc_res, NULL);
|
||||
status = ice_aq_alloc_free_res(hw, buf, buf_len, ice_aqc_opc_alloc_res);
|
||||
if (status)
|
||||
goto ice_alloc_res_exit;
|
||||
|
||||
|
@ -2095,8 +2088,7 @@ int ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
|
|||
buf->res_type = cpu_to_le16(type);
|
||||
memcpy(buf->elem, res, sizeof(*buf->elem) * num);
|
||||
|
||||
status = ice_aq_alloc_free_res(hw, num, buf, buf_len,
|
||||
ice_aqc_opc_free_res, NULL);
|
||||
status = ice_aq_alloc_free_res(hw, buf, buf_len, ice_aqc_opc_free_res);
|
||||
if (status)
|
||||
ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
|
||||
|
||||
|
|
|
@ -38,10 +38,9 @@ int
|
|||
ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res);
|
||||
int
|
||||
ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res);
|
||||
int
|
||||
ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
|
||||
struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
|
||||
enum ice_adminq_opc opc, struct ice_sq_cd *cd);
|
||||
int ice_aq_alloc_free_res(struct ice_hw *hw,
|
||||
struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
|
||||
enum ice_adminq_opc opc);
|
||||
bool ice_is_sbq_supported(struct ice_hw *hw);
|
||||
struct ice_ctl_q_info *ice_get_sbq(struct ice_hw *hw);
|
||||
int
|
||||
|
|
|
@ -30,7 +30,7 @@ static const struct ice_tunnel_type_scan tnls[] = {
|
|||
* Verifies various attributes of the package file, including length, format
|
||||
* version, and the requirement of at least one segment.
|
||||
*/
|
||||
enum ice_ddp_state ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len)
|
||||
static enum ice_ddp_state ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len)
|
||||
{
|
||||
u32 seg_count;
|
||||
u32 i;
|
||||
|
@ -118,7 +118,7 @@ static enum ice_ddp_state ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver)
|
|||
*
|
||||
* This helper function validates a buffer's header.
|
||||
*/
|
||||
struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf)
|
||||
static struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf)
|
||||
{
|
||||
struct ice_buf_hdr *hdr;
|
||||
u16 section_count;
|
||||
|
@ -1152,6 +1152,54 @@ static void ice_release_global_cfg_lock(struct ice_hw *hw)
|
|||
ice_release_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID);
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_aq_download_pkg
|
||||
* @hw: pointer to the hardware structure
|
||||
* @pkg_buf: the package buffer to transfer
|
||||
* @buf_size: the size of the package buffer
|
||||
* @last_buf: last buffer indicator
|
||||
* @error_offset: returns error offset
|
||||
* @error_info: returns error information
|
||||
* @cd: pointer to command details structure or NULL
|
||||
*
|
||||
* Download Package (0x0C40)
|
||||
*/
|
||||
static int
|
||||
ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
|
||||
u16 buf_size, bool last_buf, u32 *error_offset,
|
||||
u32 *error_info, struct ice_sq_cd *cd)
|
||||
{
|
||||
struct ice_aqc_download_pkg *cmd;
|
||||
struct ice_aq_desc desc;
|
||||
int status;
|
||||
|
||||
if (error_offset)
|
||||
*error_offset = 0;
|
||||
if (error_info)
|
||||
*error_info = 0;
|
||||
|
||||
cmd = &desc.params.download_pkg;
|
||||
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_download_pkg);
|
||||
desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
|
||||
|
||||
if (last_buf)
|
||||
cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
|
||||
|
||||
status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
|
||||
if (status == -EIO) {
|
||||
/* Read error from buffer only when the FW returned an error */
|
||||
struct ice_aqc_download_pkg_resp *resp;
|
||||
|
||||
resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
|
||||
if (error_offset)
|
||||
*error_offset = le32_to_cpu(resp->error_offset);
|
||||
if (error_info)
|
||||
*error_info = le32_to_cpu(resp->error_info);
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_dwnld_cfg_bufs
|
||||
* @hw: pointer to the hardware structure
|
||||
|
@ -1293,73 +1341,6 @@ static enum ice_ddp_state ice_download_pkg(struct ice_hw *hw,
|
|||
return status;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_aq_download_pkg
|
||||
* @hw: pointer to the hardware structure
|
||||
* @pkg_buf: the package buffer to transfer
|
||||
* @buf_size: the size of the package buffer
|
||||
* @last_buf: last buffer indicator
|
||||
* @error_offset: returns error offset
|
||||
* @error_info: returns error information
|
||||
* @cd: pointer to command details structure or NULL
|
||||
*
|
||||
* Download Package (0x0C40)
|
||||
*/
|
||||
int ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
|
||||
u16 buf_size, bool last_buf, u32 *error_offset,
|
||||
u32 *error_info, struct ice_sq_cd *cd)
|
||||
{
|
||||
struct ice_aqc_download_pkg *cmd;
|
||||
struct ice_aq_desc desc;
|
||||
int status;
|
||||
|
||||
if (error_offset)
|
||||
*error_offset = 0;
|
||||
if (error_info)
|
||||
*error_info = 0;
|
||||
|
||||
cmd = &desc.params.download_pkg;
|
||||
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_download_pkg);
|
||||
desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
|
||||
|
||||
if (last_buf)
|
||||
cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
|
||||
|
||||
status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
|
||||
if (status == -EIO) {
|
||||
/* Read error from buffer only when the FW returned an error */
|
||||
struct ice_aqc_download_pkg_resp *resp;
|
||||
|
||||
resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
|
||||
if (error_offset)
|
||||
*error_offset = le32_to_cpu(resp->error_offset);
|
||||
if (error_info)
|
||||
*error_info = le32_to_cpu(resp->error_info);
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_aq_upload_section
|
||||
* @hw: pointer to the hardware structure
|
||||
* @pkg_buf: the package buffer which will receive the section
|
||||
* @buf_size: the size of the package buffer
|
||||
* @cd: pointer to command details structure or NULL
|
||||
*
|
||||
* Upload Section (0x0C41)
|
||||
*/
|
||||
int ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
|
||||
u16 buf_size, struct ice_sq_cd *cd)
|
||||
{
|
||||
struct ice_aq_desc desc;
|
||||
|
||||
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_upload_section);
|
||||
desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
|
||||
|
||||
return ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_aq_update_pkg
|
||||
* @hw: pointer to the hardware structure
|
||||
|
@ -1407,6 +1388,26 @@ static int ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
|
|||
return status;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_aq_upload_section
|
||||
* @hw: pointer to the hardware structure
|
||||
* @pkg_buf: the package buffer which will receive the section
|
||||
* @buf_size: the size of the package buffer
|
||||
* @cd: pointer to command details structure or NULL
|
||||
*
|
||||
* Upload Section (0x0C41)
|
||||
*/
|
||||
int ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
|
||||
u16 buf_size, struct ice_sq_cd *cd)
|
||||
{
|
||||
struct ice_aq_desc desc;
|
||||
|
||||
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_upload_section);
|
||||
desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
|
||||
|
||||
return ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_update_pkg_no_lock
|
||||
* @hw: pointer to the hardware structure
|
||||
|
@ -1470,8 +1471,9 @@ int ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
|
|||
* success it returns a pointer to the segment header, otherwise it will
|
||||
* return NULL.
|
||||
*/
|
||||
struct ice_generic_seg_hdr *ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type,
|
||||
struct ice_pkg_hdr *pkg_hdr)
|
||||
static struct ice_generic_seg_hdr *
|
||||
ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type,
|
||||
struct ice_pkg_hdr *pkg_hdr)
|
||||
{
|
||||
u32 i;
|
||||
|
||||
|
|
|
@ -416,21 +416,13 @@ struct ice_pkg_enum {
|
|||
void *(*handler)(u32 sect_type, void *section, u32 index, u32 *offset);
|
||||
};
|
||||
|
||||
int ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
|
||||
u16 buf_size, bool last_buf, u32 *error_offset,
|
||||
u32 *error_info, struct ice_sq_cd *cd);
|
||||
int ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
|
||||
u16 buf_size, struct ice_sq_cd *cd);
|
||||
|
||||
void *ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size);
|
||||
|
||||
enum ice_ddp_state ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len);
|
||||
|
||||
struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw);
|
||||
|
||||
struct ice_generic_seg_hdr *ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type,
|
||||
struct ice_pkg_hdr *pkg_hdr);
|
||||
|
||||
int ice_update_pkg_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 count);
|
||||
int ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count);
|
||||
|
||||
|
@ -439,6 +431,4 @@ u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld);
|
|||
void *ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
|
||||
u32 sect_type);
|
||||
|
||||
struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -84,10 +84,6 @@ static int ice_eswitch_setup_env(struct ice_pf *pf)
|
|||
struct ice_vsi_vlan_ops *vlan_ops;
|
||||
bool rule_added = false;
|
||||
|
||||
vlan_ops = ice_get_compat_vsi_vlan_ops(ctrl_vsi);
|
||||
if (vlan_ops->dis_stripping(ctrl_vsi))
|
||||
return -ENODEV;
|
||||
|
||||
ice_remove_vsi_fltr(&pf->hw, uplink_vsi->idx);
|
||||
|
||||
netif_addr_lock_bh(uplink_netdev);
|
||||
|
|
|
@ -293,16 +293,17 @@ ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
|
|||
{
|
||||
u16 completion_module, completion_retval;
|
||||
struct device *dev = ice_pf_to_dev(pf);
|
||||
struct ice_rq_event_info event;
|
||||
struct ice_aq_task task = {};
|
||||
struct ice_hw *hw = &pf->hw;
|
||||
struct ice_aq_desc *desc;
|
||||
u32 completion_offset;
|
||||
int err;
|
||||
|
||||
memset(&event, 0, sizeof(event));
|
||||
|
||||
dev_dbg(dev, "Writing block of %u bytes for module 0x%02x at offset %u\n",
|
||||
block_size, module, offset);
|
||||
|
||||
ice_aq_prep_for_event(pf, &task, ice_aqc_opc_nvm_write);
|
||||
|
||||
err = ice_aq_update_nvm(hw, module, offset, block_size, block,
|
||||
last_cmd, 0, NULL);
|
||||
if (err) {
|
||||
|
@ -319,7 +320,7 @@ ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
|
|||
* is conservative and is intended to prevent failure to update when
|
||||
* firmware is slow to respond.
|
||||
*/
|
||||
err = ice_aq_wait_for_event(pf, ice_aqc_opc_nvm_write, 15 * HZ, &event);
|
||||
err = ice_aq_wait_for_event(pf, &task, 15 * HZ);
|
||||
if (err) {
|
||||
dev_err(dev, "Timed out while trying to flash module 0x%02x with block of size %u at offset %u, err %d\n",
|
||||
module, block_size, offset, err);
|
||||
|
@ -327,11 +328,12 @@ ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
|
|||
return -EIO;
|
||||
}
|
||||
|
||||
completion_module = le16_to_cpu(event.desc.params.nvm.module_typeid);
|
||||
completion_retval = le16_to_cpu(event.desc.retval);
|
||||
desc = &task.event.desc;
|
||||
completion_module = le16_to_cpu(desc->params.nvm.module_typeid);
|
||||
completion_retval = le16_to_cpu(desc->retval);
|
||||
|
||||
completion_offset = le16_to_cpu(event.desc.params.nvm.offset_low);
|
||||
completion_offset |= event.desc.params.nvm.offset_high << 16;
|
||||
completion_offset = le16_to_cpu(desc->params.nvm.offset_low);
|
||||
completion_offset |= desc->params.nvm.offset_high << 16;
|
||||
|
||||
if (completion_module != module) {
|
||||
dev_err(dev, "Unexpected module_typeid in write completion: got 0x%x, expected 0x%x\n",
|
||||
|
@ -363,8 +365,8 @@ ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
|
|||
*/
|
||||
if (reset_level && last_cmd && module == ICE_SR_1ST_NVM_BANK_PTR) {
|
||||
if (hw->dev_caps.common_cap.pcie_reset_avoidance) {
|
||||
*reset_level = (event.desc.params.nvm.cmd_flags &
|
||||
ICE_AQC_NVM_RESET_LVL_M);
|
||||
*reset_level = desc->params.nvm.cmd_flags &
|
||||
ICE_AQC_NVM_RESET_LVL_M;
|
||||
dev_dbg(dev, "Firmware reported required reset level as %u\n",
|
||||
*reset_level);
|
||||
} else {
|
||||
|
@ -479,19 +481,20 @@ ice_erase_nvm_module(struct ice_pf *pf, u16 module, const char *component,
|
|||
{
|
||||
u16 completion_module, completion_retval;
|
||||
struct device *dev = ice_pf_to_dev(pf);
|
||||
struct ice_rq_event_info event;
|
||||
struct ice_aq_task task = {};
|
||||
struct ice_hw *hw = &pf->hw;
|
||||
struct ice_aq_desc *desc;
|
||||
struct devlink *devlink;
|
||||
int err;
|
||||
|
||||
dev_dbg(dev, "Beginning erase of flash component '%s', module 0x%02x\n", component, module);
|
||||
|
||||
memset(&event, 0, sizeof(event));
|
||||
|
||||
devlink = priv_to_devlink(pf);
|
||||
|
||||
devlink_flash_update_timeout_notify(devlink, "Erasing", component, ICE_FW_ERASE_TIMEOUT);
|
||||
|
||||
ice_aq_prep_for_event(pf, &task, ice_aqc_opc_nvm_erase);
|
||||
|
||||
err = ice_aq_erase_nvm(hw, module, NULL);
|
||||
if (err) {
|
||||
dev_err(dev, "Failed to erase %s (module 0x%02x), err %d aq_err %s\n",
|
||||
|
@ -502,7 +505,7 @@ ice_erase_nvm_module(struct ice_pf *pf, u16 module, const char *component,
|
|||
goto out_notify_devlink;
|
||||
}
|
||||
|
||||
err = ice_aq_wait_for_event(pf, ice_aqc_opc_nvm_erase, ICE_FW_ERASE_TIMEOUT * HZ, &event);
|
||||
err = ice_aq_wait_for_event(pf, &task, ICE_FW_ERASE_TIMEOUT * HZ);
|
||||
if (err) {
|
||||
dev_err(dev, "Timed out waiting for firmware to respond with erase completion for %s (module 0x%02x), err %d\n",
|
||||
component, module, err);
|
||||
|
@ -510,8 +513,9 @@ ice_erase_nvm_module(struct ice_pf *pf, u16 module, const char *component,
|
|||
goto out_notify_devlink;
|
||||
}
|
||||
|
||||
completion_module = le16_to_cpu(event.desc.params.nvm.module_typeid);
|
||||
completion_retval = le16_to_cpu(event.desc.retval);
|
||||
desc = &task.event.desc;
|
||||
completion_module = le16_to_cpu(desc->params.nvm.module_typeid);
|
||||
completion_retval = le16_to_cpu(desc->retval);
|
||||
|
||||
if (completion_module != module) {
|
||||
dev_err(dev, "Unexpected module_typeid in erase completion for %s: got 0x%x, expected 0x%x\n",
|
||||
|
@ -560,13 +564,13 @@ ice_switch_flash_banks(struct ice_pf *pf, u8 activate_flags,
|
|||
u8 *emp_reset_available, struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct device *dev = ice_pf_to_dev(pf);
|
||||
struct ice_rq_event_info event;
|
||||
struct ice_aq_task task = {};
|
||||
struct ice_hw *hw = &pf->hw;
|
||||
u16 completion_retval;
|
||||
u8 response_flags;
|
||||
int err;
|
||||
|
||||
memset(&event, 0, sizeof(event));
|
||||
ice_aq_prep_for_event(pf, &task, ice_aqc_opc_nvm_write_activate);
|
||||
|
||||
err = ice_nvm_write_activate(hw, activate_flags, &response_flags);
|
||||
if (err) {
|
||||
|
@ -592,8 +596,7 @@ ice_switch_flash_banks(struct ice_pf *pf, u8 activate_flags,
|
|||
}
|
||||
}
|
||||
|
||||
err = ice_aq_wait_for_event(pf, ice_aqc_opc_nvm_write_activate, 30 * HZ,
|
||||
&event);
|
||||
err = ice_aq_wait_for_event(pf, &task, 30 * HZ);
|
||||
if (err) {
|
||||
dev_err(dev, "Timed out waiting for firmware to switch active flash banks, err %d\n",
|
||||
err);
|
||||
|
@ -601,7 +604,7 @@ ice_switch_flash_banks(struct ice_pf *pf, u8 activate_flags,
|
|||
return err;
|
||||
}
|
||||
|
||||
completion_retval = le16_to_cpu(event.desc.retval);
|
||||
completion_retval = le16_to_cpu(task.event.desc.retval);
|
||||
if (completion_retval) {
|
||||
dev_err(dev, "Firmware failed to switch active flash banks aq_err %s\n",
|
||||
ice_aq_str((enum ice_aq_err)completion_retval));
|
||||
|
|
|
@ -129,11 +129,9 @@ ice_lag_find_hw_by_lport(struct ice_lag *lag, u8 lport)
|
|||
struct ice_lag_netdev_list *entry;
|
||||
struct net_device *tmp_netdev;
|
||||
struct ice_netdev_priv *np;
|
||||
struct list_head *tmp;
|
||||
struct ice_hw *hw;
|
||||
|
||||
list_for_each(tmp, lag->netdev_head) {
|
||||
entry = list_entry(tmp, struct ice_lag_netdev_list, node);
|
||||
list_for_each_entry(entry, lag->netdev_head, node) {
|
||||
tmp_netdev = entry->netdev;
|
||||
if (!tmp_netdev || !netif_is_ice(tmp_netdev))
|
||||
continue;
|
||||
|
@ -985,9 +983,8 @@ ice_lag_set_swid(u16 primary_swid, struct ice_lag *local_lag,
|
|||
/* if unlinnking need to free the shared resource */
|
||||
if (!link && local_lag->bond_swid) {
|
||||
buf->elem[0].e.sw_resp = cpu_to_le16(local_lag->bond_swid);
|
||||
status = ice_aq_alloc_free_res(&local_lag->pf->hw, 1, buf,
|
||||
buf_len, ice_aqc_opc_free_res,
|
||||
NULL);
|
||||
status = ice_aq_alloc_free_res(&local_lag->pf->hw, buf,
|
||||
buf_len, ice_aqc_opc_free_res);
|
||||
if (status)
|
||||
dev_err(ice_pf_to_dev(local_lag->pf), "Error freeing SWID during LAG unlink\n");
|
||||
local_lag->bond_swid = 0;
|
||||
|
@ -1004,8 +1001,8 @@ ice_lag_set_swid(u16 primary_swid, struct ice_lag *local_lag,
|
|||
cpu_to_le16(local_lag->pf->hw.port_info->sw_id);
|
||||
}
|
||||
|
||||
status = ice_aq_alloc_free_res(&local_lag->pf->hw, 1, buf, buf_len,
|
||||
ice_aqc_opc_alloc_res, NULL);
|
||||
status = ice_aq_alloc_free_res(&local_lag->pf->hw, buf, buf_len,
|
||||
ice_aqc_opc_alloc_res);
|
||||
if (status)
|
||||
dev_err(ice_pf_to_dev(local_lag->pf), "Error subscribing to SWID 0x%04X\n",
|
||||
local_lag->bond_swid);
|
||||
|
@ -1535,11 +1532,9 @@ static void ice_lag_disable_sriov_bond(struct ice_lag *lag)
|
|||
struct ice_lag_netdev_list *entry;
|
||||
struct ice_netdev_priv *np;
|
||||
struct net_device *netdev;
|
||||
struct list_head *tmp;
|
||||
struct ice_pf *pf;
|
||||
|
||||
list_for_each(tmp, lag->netdev_head) {
|
||||
entry = list_entry(tmp, struct ice_lag_netdev_list, node);
|
||||
list_for_each_entry(entry, lag->netdev_head, node) {
|
||||
netdev = entry->netdev;
|
||||
np = netdev_priv(netdev);
|
||||
pf = np->vsi->back;
|
||||
|
|
|
@ -1227,6 +1227,17 @@ ice_chnl_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
|
|||
ctxt->info.q_mapping[1] = cpu_to_le16(qcount);
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_vsi_is_vlan_pruning_ena - check if VLAN pruning is enabled or not
|
||||
* @vsi: VSI to check whether or not VLAN pruning is enabled.
|
||||
*
|
||||
* returns true if Rx VLAN pruning is enabled and false otherwise.
|
||||
*/
|
||||
static bool ice_vsi_is_vlan_pruning_ena(struct ice_vsi *vsi)
|
||||
{
|
||||
return vsi->info.sw_flags2 & ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_vsi_init - Create and initialize a VSI
|
||||
* @vsi: the VSI being configured
|
||||
|
@ -1684,6 +1695,27 @@ static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi)
|
|||
vsi_num, status);
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_vsi_cfg_frame_size - setup max frame size and Rx buffer length
|
||||
* @vsi: VSI
|
||||
*/
|
||||
static void ice_vsi_cfg_frame_size(struct ice_vsi *vsi)
|
||||
{
|
||||
if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) {
|
||||
vsi->max_frame = ICE_MAX_FRAME_LEGACY_RX;
|
||||
vsi->rx_buf_len = ICE_RXBUF_1664;
|
||||
#if (PAGE_SIZE < 8192)
|
||||
} else if (!ICE_2K_TOO_SMALL_WITH_PADDING &&
|
||||
(vsi->netdev->mtu <= ETH_DATA_LEN)) {
|
||||
vsi->max_frame = ICE_RXBUF_1536 - NET_IP_ALIGN;
|
||||
vsi->rx_buf_len = ICE_RXBUF_1536 - NET_IP_ALIGN;
|
||||
#endif
|
||||
} else {
|
||||
vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX;
|
||||
vsi->rx_buf_len = ICE_RXBUF_3072;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_pf_state_is_nominal - checks the PF for nominal state
|
||||
* @pf: pointer to PF to check
|
||||
|
@ -1758,27 +1790,6 @@ void ice_update_eth_stats(struct ice_vsi *vsi)
|
|||
vsi->stat_offsets_loaded = true;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_vsi_cfg_frame_size - setup max frame size and Rx buffer length
|
||||
* @vsi: VSI
|
||||
*/
|
||||
void ice_vsi_cfg_frame_size(struct ice_vsi *vsi)
|
||||
{
|
||||
if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) {
|
||||
vsi->max_frame = ICE_MAX_FRAME_LEGACY_RX;
|
||||
vsi->rx_buf_len = ICE_RXBUF_1664;
|
||||
#if (PAGE_SIZE < 8192)
|
||||
} else if (!ICE_2K_TOO_SMALL_WITH_PADDING &&
|
||||
(vsi->netdev->mtu <= ETH_DATA_LEN)) {
|
||||
vsi->max_frame = ICE_RXBUF_1536 - NET_IP_ALIGN;
|
||||
vsi->rx_buf_len = ICE_RXBUF_1536 - NET_IP_ALIGN;
|
||||
#endif
|
||||
} else {
|
||||
vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX;
|
||||
vsi->rx_buf_len = ICE_RXBUF_3072;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_write_qrxflxp_cntxt - write/configure QRXFLXP_CNTXT register
|
||||
* @hw: HW pointer
|
||||
|
@ -2185,20 +2196,6 @@ bool ice_vsi_is_rx_queue_active(struct ice_vsi *vsi)
|
|||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_vsi_is_vlan_pruning_ena - check if VLAN pruning is enabled or not
|
||||
* @vsi: VSI to check whether or not VLAN pruning is enabled.
|
||||
*
|
||||
* returns true if Rx VLAN pruning is enabled and false otherwise.
|
||||
*/
|
||||
bool ice_vsi_is_vlan_pruning_ena(struct ice_vsi *vsi)
|
||||
{
|
||||
if (!vsi)
|
||||
return false;
|
||||
|
||||
return (vsi->info.sw_flags2 & ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA);
|
||||
}
|
||||
|
||||
static void ice_vsi_set_tc_cfg(struct ice_vsi *vsi)
|
||||
{
|
||||
if (!test_bit(ICE_FLAG_DCB_ENA, vsi->back->flags)) {
|
||||
|
@ -2943,21 +2940,6 @@ void ice_vsi_dis_irq(struct ice_vsi *vsi)
|
|||
synchronize_irq(vsi->q_vectors[i]->irq.virq);
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_napi_del - Remove NAPI handler for the VSI
|
||||
* @vsi: VSI for which NAPI handler is to be removed
|
||||
*/
|
||||
void ice_napi_del(struct ice_vsi *vsi)
|
||||
{
|
||||
int v_idx;
|
||||
|
||||
if (!vsi->netdev)
|
||||
return;
|
||||
|
||||
ice_for_each_q_vector(vsi, v_idx)
|
||||
netif_napi_del(&vsi->q_vectors[v_idx]->napi);
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_vsi_release - Delete a VSI and free its resources
|
||||
* @vsi: the VSI being removed
|
||||
|
|
|
@ -76,8 +76,6 @@ int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi);
|
|||
|
||||
int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi);
|
||||
|
||||
bool ice_vsi_is_vlan_pruning_ena(struct ice_vsi *vsi);
|
||||
|
||||
void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create);
|
||||
|
||||
int ice_set_link(struct ice_vsi *vsi, bool ena);
|
||||
|
@ -93,8 +91,6 @@ void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc);
|
|||
struct ice_vsi *
|
||||
ice_vsi_setup(struct ice_pf *pf, struct ice_vsi_cfg_params *params);
|
||||
|
||||
void ice_napi_del(struct ice_vsi *vsi);
|
||||
|
||||
int ice_vsi_release(struct ice_vsi *vsi);
|
||||
|
||||
void ice_vsi_close(struct ice_vsi *vsi);
|
||||
|
@ -130,7 +126,6 @@ void ice_update_tx_ring_stats(struct ice_tx_ring *ring, u64 pkts, u64 bytes);
|
|||
|
||||
void ice_update_rx_ring_stats(struct ice_rx_ring *ring, u64 pkts, u64 bytes);
|
||||
|
||||
void ice_vsi_cfg_frame_size(struct ice_vsi *vsi);
|
||||
void ice_write_intrl(struct ice_q_vector *q_vector, u8 intrl);
|
||||
void ice_write_itr(struct ice_ring_container *rc, u16 itr);
|
||||
void ice_set_q_vector_intrl(struct ice_q_vector *q_vector);
|
||||
|
|
|
@ -1250,64 +1250,63 @@ ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event)
|
|||
return status;
|
||||
}
|
||||
|
||||
enum ice_aq_task_state {
|
||||
ICE_AQ_TASK_WAITING = 0,
|
||||
ICE_AQ_TASK_COMPLETE,
|
||||
ICE_AQ_TASK_CANCELED,
|
||||
};
|
||||
|
||||
struct ice_aq_task {
|
||||
struct hlist_node entry;
|
||||
|
||||
u16 opcode;
|
||||
struct ice_rq_event_info *event;
|
||||
enum ice_aq_task_state state;
|
||||
};
|
||||
|
||||
/**
|
||||
* ice_aq_wait_for_event - Wait for an AdminQ event from firmware
|
||||
* ice_aq_prep_for_event - Prepare to wait for an AdminQ event from firmware
|
||||
* @pf: pointer to the PF private structure
|
||||
* @task: intermediate helper storage and identifier for waiting
|
||||
* @opcode: the opcode to wait for
|
||||
* @timeout: how long to wait, in jiffies
|
||||
* @event: storage for the event info
|
||||
*
|
||||
* Waits for a specific AdminQ completion event on the ARQ for a given PF. The
|
||||
* current thread will be put to sleep until the specified event occurs or
|
||||
* until the given timeout is reached.
|
||||
* Prepares to wait for a specific AdminQ completion event on the ARQ for
|
||||
* a given PF. Actual wait would be done by a call to ice_aq_wait_for_event().
|
||||
*
|
||||
* To obtain only the descriptor contents, pass an event without an allocated
|
||||
* Calls are separated to allow caller registering for event before sending
|
||||
* the command, which mitigates a race between registering and FW responding.
|
||||
*
|
||||
* To obtain only the descriptor contents, pass an task->event with null
|
||||
* msg_buf. If the complete data buffer is desired, allocate the
|
||||
* event->msg_buf with enough space ahead of time.
|
||||
*
|
||||
* Returns: zero on success, or a negative error code on failure.
|
||||
* task->event.msg_buf with enough space ahead of time.
|
||||
*/
|
||||
int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout,
|
||||
struct ice_rq_event_info *event)
|
||||
void ice_aq_prep_for_event(struct ice_pf *pf, struct ice_aq_task *task,
|
||||
u16 opcode)
|
||||
{
|
||||
struct device *dev = ice_pf_to_dev(pf);
|
||||
struct ice_aq_task *task;
|
||||
unsigned long start;
|
||||
long ret;
|
||||
int err;
|
||||
|
||||
task = kzalloc(sizeof(*task), GFP_KERNEL);
|
||||
if (!task)
|
||||
return -ENOMEM;
|
||||
|
||||
INIT_HLIST_NODE(&task->entry);
|
||||
task->opcode = opcode;
|
||||
task->event = event;
|
||||
task->state = ICE_AQ_TASK_WAITING;
|
||||
|
||||
spin_lock_bh(&pf->aq_wait_lock);
|
||||
hlist_add_head(&task->entry, &pf->aq_wait_list);
|
||||
spin_unlock_bh(&pf->aq_wait_lock);
|
||||
}
|
||||
|
||||
start = jiffies;
|
||||
/**
|
||||
* ice_aq_wait_for_event - Wait for an AdminQ event from firmware
|
||||
* @pf: pointer to the PF private structure
|
||||
* @task: ptr prepared by ice_aq_prep_for_event()
|
||||
* @timeout: how long to wait, in jiffies
|
||||
*
|
||||
* Waits for a specific AdminQ completion event on the ARQ for a given PF. The
|
||||
* current thread will be put to sleep until the specified event occurs or
|
||||
* until the given timeout is reached.
|
||||
*
|
||||
* Returns: zero on success, or a negative error code on failure.
|
||||
*/
|
||||
int ice_aq_wait_for_event(struct ice_pf *pf, struct ice_aq_task *task,
|
||||
unsigned long timeout)
|
||||
{
|
||||
enum ice_aq_task_state *state = &task->state;
|
||||
struct device *dev = ice_pf_to_dev(pf);
|
||||
unsigned long start = jiffies;
|
||||
long ret;
|
||||
int err;
|
||||
|
||||
ret = wait_event_interruptible_timeout(pf->aq_wait_queue, task->state,
|
||||
ret = wait_event_interruptible_timeout(pf->aq_wait_queue,
|
||||
*state != ICE_AQ_TASK_WAITING,
|
||||
timeout);
|
||||
switch (task->state) {
|
||||
switch (*state) {
|
||||
case ICE_AQ_TASK_NOT_PREPARED:
|
||||
WARN(1, "call to %s without ice_aq_prep_for_event()", __func__);
|
||||
err = -EINVAL;
|
||||
break;
|
||||
case ICE_AQ_TASK_WAITING:
|
||||
err = ret < 0 ? ret : -ETIMEDOUT;
|
||||
break;
|
||||
|
@ -1318,7 +1317,7 @@ int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout,
|
|||
err = ret < 0 ? ret : 0;
|
||||
break;
|
||||
default:
|
||||
WARN(1, "Unexpected AdminQ wait task state %u", task->state);
|
||||
WARN(1, "Unexpected AdminQ wait task state %u", *state);
|
||||
err = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
@ -1326,12 +1325,11 @@ int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout,
|
|||
dev_dbg(dev, "Waited %u msecs (max %u msecs) for firmware response to op 0x%04x\n",
|
||||
jiffies_to_msecs(jiffies - start),
|
||||
jiffies_to_msecs(timeout),
|
||||
opcode);
|
||||
task->opcode);
|
||||
|
||||
spin_lock_bh(&pf->aq_wait_lock);
|
||||
hlist_del(&task->entry);
|
||||
spin_unlock_bh(&pf->aq_wait_lock);
|
||||
kfree(task);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -1357,23 +1355,26 @@ int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout,
|
|||
static void ice_aq_check_events(struct ice_pf *pf, u16 opcode,
|
||||
struct ice_rq_event_info *event)
|
||||
{
|
||||
struct ice_rq_event_info *task_ev;
|
||||
struct ice_aq_task *task;
|
||||
bool found = false;
|
||||
|
||||
spin_lock_bh(&pf->aq_wait_lock);
|
||||
hlist_for_each_entry(task, &pf->aq_wait_list, entry) {
|
||||
if (task->state || task->opcode != opcode)
|
||||
if (task->state != ICE_AQ_TASK_WAITING)
|
||||
continue;
|
||||
if (task->opcode != opcode)
|
||||
continue;
|
||||
|
||||
memcpy(&task->event->desc, &event->desc, sizeof(event->desc));
|
||||
task->event->msg_len = event->msg_len;
|
||||
task_ev = &task->event;
|
||||
memcpy(&task_ev->desc, &event->desc, sizeof(event->desc));
|
||||
task_ev->msg_len = event->msg_len;
|
||||
|
||||
/* Only copy the data buffer if a destination was set */
|
||||
if (task->event->msg_buf &&
|
||||
task->event->buf_len > event->buf_len) {
|
||||
memcpy(task->event->msg_buf, event->msg_buf,
|
||||
if (task_ev->msg_buf && task_ev->buf_len >= event->buf_len) {
|
||||
memcpy(task_ev->msg_buf, event->msg_buf,
|
||||
event->buf_len);
|
||||
task->event->buf_len = event->buf_len;
|
||||
task_ev->buf_len = event->buf_len;
|
||||
}
|
||||
|
||||
task->state = ICE_AQ_TASK_COMPLETE;
|
||||
|
|
|
@ -293,7 +293,7 @@ static bool ice_is_40b_phy_reg_e822(u16 low_addr, u16 *high_addr)
|
|||
*
|
||||
* Read a PHY register for the given port over the device sideband queue.
|
||||
*/
|
||||
int
|
||||
static int
|
||||
ice_read_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 *val)
|
||||
{
|
||||
struct ice_sbq_msg_input msg = {0};
|
||||
|
@ -370,7 +370,7 @@ ice_read_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 *val)
|
|||
*
|
||||
* Write a PHY register for the given port over the device sideband queue.
|
||||
*/
|
||||
int
|
||||
static int
|
||||
ice_write_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 val)
|
||||
{
|
||||
struct ice_sbq_msg_input msg = {0};
|
||||
|
@ -1079,7 +1079,7 @@ exit_err:
|
|||
*
|
||||
* Negative adjustments are supported using 2s complement arithmetic.
|
||||
*/
|
||||
int
|
||||
static int
|
||||
ice_ptp_prep_port_adj_e822(struct ice_hw *hw, u8 port, s64 time)
|
||||
{
|
||||
u32 l_time, u_time;
|
||||
|
@ -2869,6 +2869,185 @@ static int ice_ptp_port_cmd_e810(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_get_phy_tx_tstamp_ready_e810 - Read Tx memory status register
|
||||
* @hw: pointer to the HW struct
|
||||
* @port: the PHY port to read
|
||||
* @tstamp_ready: contents of the Tx memory status register
|
||||
*
|
||||
* E810 devices do not use a Tx memory status register. Instead simply
|
||||
* indicate that all timestamps are currently ready.
|
||||
*/
|
||||
static int
|
||||
ice_get_phy_tx_tstamp_ready_e810(struct ice_hw *hw, u8 port, u64 *tstamp_ready)
|
||||
{
|
||||
*tstamp_ready = 0xFFFFFFFFFFFFFFFF;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* E810T SMA functions
|
||||
*
|
||||
* The following functions operate specifically on E810T hardware and are used
|
||||
* to access the extended GPIOs available.
|
||||
*/
|
||||
|
||||
/**
|
||||
* ice_get_pca9575_handle
|
||||
* @hw: pointer to the hw struct
|
||||
* @pca9575_handle: GPIO controller's handle
|
||||
*
|
||||
* Find and return the GPIO controller's handle in the netlist.
|
||||
* When found - the value will be cached in the hw structure and following calls
|
||||
* will return cached value
|
||||
*/
|
||||
static int
|
||||
ice_get_pca9575_handle(struct ice_hw *hw, u16 *pca9575_handle)
|
||||
{
|
||||
struct ice_aqc_get_link_topo *cmd;
|
||||
struct ice_aq_desc desc;
|
||||
int status;
|
||||
u8 idx;
|
||||
|
||||
/* If handle was read previously return cached value */
|
||||
if (hw->io_expander_handle) {
|
||||
*pca9575_handle = hw->io_expander_handle;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* If handle was not detected read it from the netlist */
|
||||
cmd = &desc.params.get_link_topo;
|
||||
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
|
||||
|
||||
/* Set node type to GPIO controller */
|
||||
cmd->addr.topo_params.node_type_ctx =
|
||||
(ICE_AQC_LINK_TOPO_NODE_TYPE_M &
|
||||
ICE_AQC_LINK_TOPO_NODE_TYPE_GPIO_CTRL);
|
||||
|
||||
#define SW_PCA9575_SFP_TOPO_IDX 2
|
||||
#define SW_PCA9575_QSFP_TOPO_IDX 1
|
||||
|
||||
/* Check if the SW IO expander controlling SMA exists in the netlist. */
|
||||
if (hw->device_id == ICE_DEV_ID_E810C_SFP)
|
||||
idx = SW_PCA9575_SFP_TOPO_IDX;
|
||||
else if (hw->device_id == ICE_DEV_ID_E810C_QSFP)
|
||||
idx = SW_PCA9575_QSFP_TOPO_IDX;
|
||||
else
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
cmd->addr.topo_params.index = idx;
|
||||
|
||||
status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
|
||||
if (status)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/* Verify if we found the right IO expander type */
|
||||
if (desc.params.get_link_topo.node_part_num !=
|
||||
ICE_AQC_GET_LINK_TOPO_NODE_NR_PCA9575)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/* If present save the handle and return it */
|
||||
hw->io_expander_handle =
|
||||
le16_to_cpu(desc.params.get_link_topo.addr.handle);
|
||||
*pca9575_handle = hw->io_expander_handle;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_read_sma_ctrl_e810t
|
||||
* @hw: pointer to the hw struct
|
||||
* @data: pointer to data to be read from the GPIO controller
|
||||
*
|
||||
* Read the SMA controller state. It is connected to pins 3-7 of Port 1 of the
|
||||
* PCA9575 expander, so only bits 3-7 in data are valid.
|
||||
*/
|
||||
int ice_read_sma_ctrl_e810t(struct ice_hw *hw, u8 *data)
|
||||
{
|
||||
int status;
|
||||
u16 handle;
|
||||
u8 i;
|
||||
|
||||
status = ice_get_pca9575_handle(hw, &handle);
|
||||
if (status)
|
||||
return status;
|
||||
|
||||
*data = 0;
|
||||
|
||||
for (i = ICE_SMA_MIN_BIT_E810T; i <= ICE_SMA_MAX_BIT_E810T; i++) {
|
||||
bool pin;
|
||||
|
||||
status = ice_aq_get_gpio(hw, handle, i + ICE_PCA9575_P1_OFFSET,
|
||||
&pin, NULL);
|
||||
if (status)
|
||||
break;
|
||||
*data |= (u8)(!pin) << i;
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_write_sma_ctrl_e810t
|
||||
* @hw: pointer to the hw struct
|
||||
* @data: data to be written to the GPIO controller
|
||||
*
|
||||
* Write the data to the SMA controller. It is connected to pins 3-7 of Port 1
|
||||
* of the PCA9575 expander, so only bits 3-7 in data are valid.
|
||||
*/
|
||||
int ice_write_sma_ctrl_e810t(struct ice_hw *hw, u8 data)
|
||||
{
|
||||
int status;
|
||||
u16 handle;
|
||||
u8 i;
|
||||
|
||||
status = ice_get_pca9575_handle(hw, &handle);
|
||||
if (status)
|
||||
return status;
|
||||
|
||||
for (i = ICE_SMA_MIN_BIT_E810T; i <= ICE_SMA_MAX_BIT_E810T; i++) {
|
||||
bool pin;
|
||||
|
||||
pin = !(data & (1 << i));
|
||||
status = ice_aq_set_gpio(hw, handle, i + ICE_PCA9575_P1_OFFSET,
|
||||
pin, NULL);
|
||||
if (status)
|
||||
break;
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_read_pca9575_reg_e810t
|
||||
* @hw: pointer to the hw struct
|
||||
* @offset: GPIO controller register offset
|
||||
* @data: pointer to data to be read from the GPIO controller
|
||||
*
|
||||
* Read the register from the GPIO controller
|
||||
*/
|
||||
int ice_read_pca9575_reg_e810t(struct ice_hw *hw, u8 offset, u8 *data)
|
||||
{
|
||||
struct ice_aqc_link_topo_addr link_topo;
|
||||
__le16 addr;
|
||||
u16 handle;
|
||||
int err;
|
||||
|
||||
memset(&link_topo, 0, sizeof(link_topo));
|
||||
|
||||
err = ice_get_pca9575_handle(hw, &handle);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
link_topo.handle = cpu_to_le16(handle);
|
||||
link_topo.topo_params.node_type_ctx =
|
||||
FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M,
|
||||
ICE_AQC_LINK_TOPO_NODE_CTX_PROVIDED);
|
||||
|
||||
addr = cpu_to_le16((u16)offset);
|
||||
|
||||
return ice_aq_read_i2c(hw, link_topo, 0, addr, 1, data, NULL);
|
||||
}
|
||||
|
||||
/* Device agnostic functions
|
||||
*
|
||||
* The following functions implement shared behavior common to both E822 and
|
||||
|
@ -3129,204 +3308,6 @@ int ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx)
|
|||
return ice_clear_phy_tstamp_e822(hw, block, idx);
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_get_phy_tx_tstamp_ready_e810 - Read Tx memory status register
|
||||
* @hw: pointer to the HW struct
|
||||
* @port: the PHY port to read
|
||||
* @tstamp_ready: contents of the Tx memory status register
|
||||
*
|
||||
* E810 devices do not use a Tx memory status register. Instead simply
|
||||
* indicate that all timestamps are currently ready.
|
||||
*/
|
||||
static int
|
||||
ice_get_phy_tx_tstamp_ready_e810(struct ice_hw *hw, u8 port, u64 *tstamp_ready)
|
||||
{
|
||||
*tstamp_ready = 0xFFFFFFFFFFFFFFFF;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* E810T SMA functions
|
||||
*
|
||||
* The following functions operate specifically on E810T hardware and are used
|
||||
* to access the extended GPIOs available.
|
||||
*/
|
||||
|
||||
/**
|
||||
* ice_get_pca9575_handle
|
||||
* @hw: pointer to the hw struct
|
||||
* @pca9575_handle: GPIO controller's handle
|
||||
*
|
||||
* Find and return the GPIO controller's handle in the netlist.
|
||||
* When found - the value will be cached in the hw structure and following calls
|
||||
* will return cached value
|
||||
*/
|
||||
static int
|
||||
ice_get_pca9575_handle(struct ice_hw *hw, u16 *pca9575_handle)
|
||||
{
|
||||
struct ice_aqc_get_link_topo *cmd;
|
||||
struct ice_aq_desc desc;
|
||||
int status;
|
||||
u8 idx;
|
||||
|
||||
/* If handle was read previously return cached value */
|
||||
if (hw->io_expander_handle) {
|
||||
*pca9575_handle = hw->io_expander_handle;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* If handle was not detected read it from the netlist */
|
||||
cmd = &desc.params.get_link_topo;
|
||||
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
|
||||
|
||||
/* Set node type to GPIO controller */
|
||||
cmd->addr.topo_params.node_type_ctx =
|
||||
(ICE_AQC_LINK_TOPO_NODE_TYPE_M &
|
||||
ICE_AQC_LINK_TOPO_NODE_TYPE_GPIO_CTRL);
|
||||
|
||||
#define SW_PCA9575_SFP_TOPO_IDX 2
|
||||
#define SW_PCA9575_QSFP_TOPO_IDX 1
|
||||
|
||||
/* Check if the SW IO expander controlling SMA exists in the netlist. */
|
||||
if (hw->device_id == ICE_DEV_ID_E810C_SFP)
|
||||
idx = SW_PCA9575_SFP_TOPO_IDX;
|
||||
else if (hw->device_id == ICE_DEV_ID_E810C_QSFP)
|
||||
idx = SW_PCA9575_QSFP_TOPO_IDX;
|
||||
else
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
cmd->addr.topo_params.index = idx;
|
||||
|
||||
status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
|
||||
if (status)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/* Verify if we found the right IO expander type */
|
||||
if (desc.params.get_link_topo.node_part_num !=
|
||||
ICE_AQC_GET_LINK_TOPO_NODE_NR_PCA9575)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/* If present save the handle and return it */
|
||||
hw->io_expander_handle =
|
||||
le16_to_cpu(desc.params.get_link_topo.addr.handle);
|
||||
*pca9575_handle = hw->io_expander_handle;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_read_sma_ctrl_e810t
|
||||
* @hw: pointer to the hw struct
|
||||
* @data: pointer to data to be read from the GPIO controller
|
||||
*
|
||||
* Read the SMA controller state. It is connected to pins 3-7 of Port 1 of the
|
||||
* PCA9575 expander, so only bits 3-7 in data are valid.
|
||||
*/
|
||||
int ice_read_sma_ctrl_e810t(struct ice_hw *hw, u8 *data)
|
||||
{
|
||||
int status;
|
||||
u16 handle;
|
||||
u8 i;
|
||||
|
||||
status = ice_get_pca9575_handle(hw, &handle);
|
||||
if (status)
|
||||
return status;
|
||||
|
||||
*data = 0;
|
||||
|
||||
for (i = ICE_SMA_MIN_BIT_E810T; i <= ICE_SMA_MAX_BIT_E810T; i++) {
|
||||
bool pin;
|
||||
|
||||
status = ice_aq_get_gpio(hw, handle, i + ICE_PCA9575_P1_OFFSET,
|
||||
&pin, NULL);
|
||||
if (status)
|
||||
break;
|
||||
*data |= (u8)(!pin) << i;
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_write_sma_ctrl_e810t
|
||||
* @hw: pointer to the hw struct
|
||||
* @data: data to be written to the GPIO controller
|
||||
*
|
||||
* Write the data to the SMA controller. It is connected to pins 3-7 of Port 1
|
||||
* of the PCA9575 expander, so only bits 3-7 in data are valid.
|
||||
*/
|
||||
int ice_write_sma_ctrl_e810t(struct ice_hw *hw, u8 data)
|
||||
{
|
||||
int status;
|
||||
u16 handle;
|
||||
u8 i;
|
||||
|
||||
status = ice_get_pca9575_handle(hw, &handle);
|
||||
if (status)
|
||||
return status;
|
||||
|
||||
for (i = ICE_SMA_MIN_BIT_E810T; i <= ICE_SMA_MAX_BIT_E810T; i++) {
|
||||
bool pin;
|
||||
|
||||
pin = !(data & (1 << i));
|
||||
status = ice_aq_set_gpio(hw, handle, i + ICE_PCA9575_P1_OFFSET,
|
||||
pin, NULL);
|
||||
if (status)
|
||||
break;
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_read_pca9575_reg_e810t
|
||||
* @hw: pointer to the hw struct
|
||||
* @offset: GPIO controller register offset
|
||||
* @data: pointer to data to be read from the GPIO controller
|
||||
*
|
||||
* Read the register from the GPIO controller
|
||||
*/
|
||||
int ice_read_pca9575_reg_e810t(struct ice_hw *hw, u8 offset, u8 *data)
|
||||
{
|
||||
struct ice_aqc_link_topo_addr link_topo;
|
||||
__le16 addr;
|
||||
u16 handle;
|
||||
int err;
|
||||
|
||||
memset(&link_topo, 0, sizeof(link_topo));
|
||||
|
||||
err = ice_get_pca9575_handle(hw, &handle);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
link_topo.handle = cpu_to_le16(handle);
|
||||
link_topo.topo_params.node_type_ctx =
|
||||
FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M,
|
||||
ICE_AQC_LINK_TOPO_NODE_CTX_PROVIDED);
|
||||
|
||||
addr = cpu_to_le16((u16)offset);
|
||||
|
||||
return ice_aq_read_i2c(hw, link_topo, 0, addr, 1, data, NULL);
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_is_pca9575_present
|
||||
* @hw: pointer to the hw struct
|
||||
*
|
||||
* Check if the SW IO expander is present in the netlist
|
||||
*/
|
||||
bool ice_is_pca9575_present(struct ice_hw *hw)
|
||||
{
|
||||
u16 handle = 0;
|
||||
int status;
|
||||
|
||||
if (!ice_is_e810t(hw))
|
||||
return false;
|
||||
|
||||
status = ice_get_pca9575_handle(hw, &handle);
|
||||
|
||||
return !status && handle;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_ptp_reset_ts_memory - Reset timestamp memory for all blocks
|
||||
* @hw: pointer to the HW struct
|
||||
|
|
|
@ -141,11 +141,8 @@ int ice_ptp_init_phc(struct ice_hw *hw);
|
|||
int ice_get_phy_tx_tstamp_ready(struct ice_hw *hw, u8 block, u64 *tstamp_ready);
|
||||
|
||||
/* E822 family functions */
|
||||
int ice_read_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 *val);
|
||||
int ice_write_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 val);
|
||||
int ice_read_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 *val);
|
||||
int ice_write_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 val);
|
||||
int ice_ptp_prep_port_adj_e822(struct ice_hw *hw, u8 port, s64 time);
|
||||
void ice_ptp_reset_ts_memory_quad_e822(struct ice_hw *hw, u8 quad);
|
||||
|
||||
/**
|
||||
|
@ -199,7 +196,6 @@ int ice_ptp_init_phy_e810(struct ice_hw *hw);
|
|||
int ice_read_sma_ctrl_e810t(struct ice_hw *hw, u8 *data);
|
||||
int ice_write_sma_ctrl_e810t(struct ice_hw *hw, u8 data);
|
||||
int ice_read_pca9575_reg_e810t(struct ice_hw *hw, u8 offset, u8 *data);
|
||||
bool ice_is_pca9575_present(struct ice_hw *hw);
|
||||
|
||||
#define PFTSYN_SEM_BYTES 4
|
||||
|
||||
|
|
|
@ -3971,7 +3971,7 @@ ice_sched_get_node_by_id_type(struct ice_port_info *pi, u32 id,
|
|||
* This function sets BW limit of VSI or Aggregator scheduling node
|
||||
* based on TC information from passed in argument BW.
|
||||
*/
|
||||
int
|
||||
static int
|
||||
ice_sched_set_node_bw_lmt_per_tc(struct ice_port_info *pi, u32 id,
|
||||
enum ice_agg_type agg_type, u8 tc,
|
||||
enum ice_rl_type rl_type, u32 bw)
|
||||
|
|
|
@ -141,10 +141,6 @@ ice_cfg_vsi_bw_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
|
|||
int
|
||||
ice_cfg_vsi_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
|
||||
enum ice_rl_type rl_type);
|
||||
int
|
||||
ice_sched_set_node_bw_lmt_per_tc(struct ice_port_info *pi, u32 id,
|
||||
enum ice_agg_type agg_type, u8 tc,
|
||||
enum ice_rl_type rl_type, u32 bw);
|
||||
int ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes);
|
||||
int
|
||||
ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids,
|
||||
|
|
|
@ -1847,7 +1847,7 @@ ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
|
|||
if (opc == ice_aqc_opc_free_res)
|
||||
sw_buf->elem[0].e.sw_resp = cpu_to_le16(*vsi_list_id);
|
||||
|
||||
status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
|
||||
status = ice_aq_alloc_free_res(hw, sw_buf, buf_len, opc);
|
||||
if (status)
|
||||
goto ice_aq_alloc_free_vsi_list_exit;
|
||||
|
||||
|
@ -2101,8 +2101,8 @@ int ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
|
|||
sw_buf->res_type = cpu_to_le16((ICE_AQC_RES_TYPE_RECIPE <<
|
||||
ICE_AQC_RES_TYPE_S) |
|
||||
ICE_AQC_RES_TYPE_FLAG_SHARED);
|
||||
status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
|
||||
ice_aqc_opc_alloc_res, NULL);
|
||||
status = ice_aq_alloc_free_res(hw, sw_buf, buf_len,
|
||||
ice_aqc_opc_alloc_res);
|
||||
if (!status)
|
||||
*rid = le16_to_cpu(sw_buf->elem[0].e.sw_resp);
|
||||
kfree(sw_buf);
|
||||
|
@ -3408,54 +3408,6 @@ exit:
|
|||
return status;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_mac_fltr_exist - does this MAC filter exist for given VSI
|
||||
* @hw: pointer to the hardware structure
|
||||
* @mac: MAC address to be checked (for MAC filter)
|
||||
* @vsi_handle: check MAC filter for this VSI
|
||||
*/
|
||||
bool ice_mac_fltr_exist(struct ice_hw *hw, u8 *mac, u16 vsi_handle)
|
||||
{
|
||||
struct ice_fltr_mgmt_list_entry *entry;
|
||||
struct list_head *rule_head;
|
||||
struct ice_switch_info *sw;
|
||||
struct mutex *rule_lock; /* Lock to protect filter rule list */
|
||||
u16 hw_vsi_id;
|
||||
|
||||
if (!ice_is_vsi_valid(hw, vsi_handle))
|
||||
return false;
|
||||
|
||||
hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
|
||||
sw = hw->switch_info;
|
||||
rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
|
||||
if (!rule_head)
|
||||
return false;
|
||||
|
||||
rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
|
||||
mutex_lock(rule_lock);
|
||||
list_for_each_entry(entry, rule_head, list_entry) {
|
||||
struct ice_fltr_info *f_info = &entry->fltr_info;
|
||||
u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
|
||||
|
||||
if (is_zero_ether_addr(mac_addr))
|
||||
continue;
|
||||
|
||||
if (f_info->flag != ICE_FLTR_TX ||
|
||||
f_info->src_id != ICE_SRC_ID_VSI ||
|
||||
f_info->lkup_type != ICE_SW_LKUP_MAC ||
|
||||
f_info->fltr_act != ICE_FWD_TO_VSI ||
|
||||
hw_vsi_id != f_info->fwd_id.hw_vsi_id)
|
||||
continue;
|
||||
|
||||
if (ether_addr_equal(mac, mac_addr)) {
|
||||
mutex_unlock(rule_lock);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
mutex_unlock(rule_lock);
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_vlan_fltr_exist - does this VLAN filter exist for given VSI
|
||||
* @hw: pointer to the hardware structure
|
||||
|
@ -4496,8 +4448,7 @@ ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
|
|||
buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
|
||||
ICE_AQC_RES_TYPE_M) | alloc_shared);
|
||||
|
||||
status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
|
||||
ice_aqc_opc_alloc_res, NULL);
|
||||
status = ice_aq_alloc_free_res(hw, buf, buf_len, ice_aqc_opc_alloc_res);
|
||||
if (status)
|
||||
goto exit;
|
||||
|
||||
|
@ -4535,8 +4486,7 @@ ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
|
|||
ICE_AQC_RES_TYPE_M) | alloc_shared);
|
||||
buf->elem[0].e.sw_resp = cpu_to_le16(counter_id);
|
||||
|
||||
status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
|
||||
ice_aqc_opc_free_res, NULL);
|
||||
status = ice_aq_alloc_free_res(hw, buf, buf_len, ice_aqc_opc_free_res);
|
||||
if (status)
|
||||
ice_debug(hw, ICE_DBG_SW, "counter resource could not be freed\n");
|
||||
|
||||
|
@ -4578,8 +4528,8 @@ int ice_share_res(struct ice_hw *hw, u16 type, u8 shared, u16 res_id)
|
|||
~ICE_AQC_RES_TYPE_FLAG_SHARED);
|
||||
|
||||
buf->elem[0].e.sw_resp = cpu_to_le16(res_id);
|
||||
status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
|
||||
ice_aqc_opc_share_res, NULL);
|
||||
status = ice_aq_alloc_free_res(hw, buf, buf_len,
|
||||
ice_aqc_opc_share_res);
|
||||
if (status)
|
||||
ice_debug(hw, ICE_DBG_SW, "Could not set resource type %u id %u to %s\n",
|
||||
type, res_id, shared ? "SHARED" : "DEDICATED");
|
||||
|
|
|
@ -371,7 +371,6 @@ int ice_add_vlan(struct ice_hw *hw, struct list_head *m_list);
|
|||
int ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list);
|
||||
int ice_add_mac(struct ice_hw *hw, struct list_head *m_lst);
|
||||
int ice_remove_mac(struct ice_hw *hw, struct list_head *m_lst);
|
||||
bool ice_mac_fltr_exist(struct ice_hw *hw, u8 *mac, u16 vsi_handle);
|
||||
bool ice_vlan_fltr_exist(struct ice_hw *hw, u16 vlan_id, u16 vsi_handle);
|
||||
int ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list);
|
||||
int ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list);
|
||||
|
|
|
@ -322,6 +322,237 @@ static int ice_vf_rebuild_vsi(struct ice_vf *vf)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_vf_rebuild_host_vlan_cfg - add VLAN 0 filter or rebuild the Port VLAN
|
||||
* @vf: VF to add MAC filters for
|
||||
* @vsi: Pointer to VSI
|
||||
*
|
||||
* Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
|
||||
* always re-adds either a VLAN 0 or port VLAN based filter after reset.
|
||||
*/
|
||||
static int ice_vf_rebuild_host_vlan_cfg(struct ice_vf *vf, struct ice_vsi *vsi)
|
||||
{
|
||||
struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
|
||||
struct device *dev = ice_pf_to_dev(vf->pf);
|
||||
int err;
|
||||
|
||||
if (ice_vf_is_port_vlan_ena(vf)) {
|
||||
err = vlan_ops->set_port_vlan(vsi, &vf->port_vlan_info);
|
||||
if (err) {
|
||||
dev_err(dev, "failed to configure port VLAN via VSI parameters for VF %u, error %d\n",
|
||||
vf->vf_id, err);
|
||||
return err;
|
||||
}
|
||||
|
||||
err = vlan_ops->add_vlan(vsi, &vf->port_vlan_info);
|
||||
} else {
|
||||
err = ice_vsi_add_vlan_zero(vsi);
|
||||
}
|
||||
|
||||
if (err) {
|
||||
dev_err(dev, "failed to add VLAN %u filter for VF %u during VF rebuild, error %d\n",
|
||||
ice_vf_is_port_vlan_ena(vf) ?
|
||||
ice_vf_get_port_vlan_id(vf) : 0, vf->vf_id, err);
|
||||
return err;
|
||||
}
|
||||
|
||||
err = vlan_ops->ena_rx_filtering(vsi);
|
||||
if (err)
|
||||
dev_warn(dev, "failed to enable Rx VLAN filtering for VF %d VSI %d during VF rebuild, error %d\n",
|
||||
vf->vf_id, vsi->idx, err);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_vf_rebuild_host_tx_rate_cfg - re-apply the Tx rate limiting configuration
|
||||
* @vf: VF to re-apply the configuration for
|
||||
*
|
||||
* Called after a VF VSI has been re-added/rebuild during reset. The PF driver
|
||||
* needs to re-apply the host configured Tx rate limiting configuration.
|
||||
*/
|
||||
static int ice_vf_rebuild_host_tx_rate_cfg(struct ice_vf *vf)
|
||||
{
|
||||
struct device *dev = ice_pf_to_dev(vf->pf);
|
||||
struct ice_vsi *vsi = ice_get_vf_vsi(vf);
|
||||
int err;
|
||||
|
||||
if (WARN_ON(!vsi))
|
||||
return -EINVAL;
|
||||
|
||||
if (vf->min_tx_rate) {
|
||||
err = ice_set_min_bw_limit(vsi, (u64)vf->min_tx_rate * 1000);
|
||||
if (err) {
|
||||
dev_err(dev, "failed to set min Tx rate to %d Mbps for VF %u, error %d\n",
|
||||
vf->min_tx_rate, vf->vf_id, err);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
if (vf->max_tx_rate) {
|
||||
err = ice_set_max_bw_limit(vsi, (u64)vf->max_tx_rate * 1000);
|
||||
if (err) {
|
||||
dev_err(dev, "failed to set max Tx rate to %d Mbps for VF %u, error %d\n",
|
||||
vf->max_tx_rate, vf->vf_id, err);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_vf_set_host_trust_cfg - set trust setting based on pre-reset value
|
||||
* @vf: VF to configure trust setting for
|
||||
*/
|
||||
static void ice_vf_set_host_trust_cfg(struct ice_vf *vf)
|
||||
{
|
||||
assign_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps, vf->trusted);
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_vf_rebuild_host_mac_cfg - add broadcast and the VF's perm_addr/LAA
|
||||
* @vf: VF to add MAC filters for
|
||||
*
|
||||
* Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
|
||||
* always re-adds a broadcast filter and the VF's perm_addr/LAA after reset.
|
||||
*/
|
||||
static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf)
|
||||
{
|
||||
struct device *dev = ice_pf_to_dev(vf->pf);
|
||||
struct ice_vsi *vsi = ice_get_vf_vsi(vf);
|
||||
u8 broadcast[ETH_ALEN];
|
||||
int status;
|
||||
|
||||
if (WARN_ON(!vsi))
|
||||
return -EINVAL;
|
||||
|
||||
if (ice_is_eswitch_mode_switchdev(vf->pf))
|
||||
return 0;
|
||||
|
||||
eth_broadcast_addr(broadcast);
|
||||
status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
|
||||
if (status) {
|
||||
dev_err(dev, "failed to add broadcast MAC filter for VF %u, error %d\n",
|
||||
vf->vf_id, status);
|
||||
return status;
|
||||
}
|
||||
|
||||
vf->num_mac++;
|
||||
|
||||
if (is_valid_ether_addr(vf->hw_lan_addr)) {
|
||||
status = ice_fltr_add_mac(vsi, vf->hw_lan_addr,
|
||||
ICE_FWD_TO_VSI);
|
||||
if (status) {
|
||||
dev_err(dev, "failed to add default unicast MAC filter %pM for VF %u, error %d\n",
|
||||
&vf->hw_lan_addr[0], vf->vf_id,
|
||||
status);
|
||||
return status;
|
||||
}
|
||||
vf->num_mac++;
|
||||
|
||||
ether_addr_copy(vf->dev_lan_addr, vf->hw_lan_addr);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_vf_rebuild_aggregator_node_cfg - rebuild aggregator node config
|
||||
* @vsi: Pointer to VSI
|
||||
*
|
||||
* This function moves VSI into corresponding scheduler aggregator node
|
||||
* based on cached value of "aggregator node info" per VSI
|
||||
*/
|
||||
static void ice_vf_rebuild_aggregator_node_cfg(struct ice_vsi *vsi)
|
||||
{
|
||||
struct ice_pf *pf = vsi->back;
|
||||
struct device *dev;
|
||||
int status;
|
||||
|
||||
if (!vsi->agg_node)
|
||||
return;
|
||||
|
||||
dev = ice_pf_to_dev(pf);
|
||||
if (vsi->agg_node->num_vsis == ICE_MAX_VSIS_IN_AGG_NODE) {
|
||||
dev_dbg(dev,
|
||||
"agg_id %u already has reached max_num_vsis %u\n",
|
||||
vsi->agg_node->agg_id, vsi->agg_node->num_vsis);
|
||||
return;
|
||||
}
|
||||
|
||||
status = ice_move_vsi_to_agg(pf->hw.port_info, vsi->agg_node->agg_id,
|
||||
vsi->idx, vsi->tc_cfg.ena_tc);
|
||||
if (status)
|
||||
dev_dbg(dev, "unable to move VSI idx %u into aggregator %u node",
|
||||
vsi->idx, vsi->agg_node->agg_id);
|
||||
else
|
||||
vsi->agg_node->num_vsis++;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_vf_rebuild_host_cfg - host admin configuration is persistent across reset
|
||||
* @vf: VF to rebuild host configuration on
|
||||
*/
|
||||
static void ice_vf_rebuild_host_cfg(struct ice_vf *vf)
|
||||
{
|
||||
struct device *dev = ice_pf_to_dev(vf->pf);
|
||||
struct ice_vsi *vsi = ice_get_vf_vsi(vf);
|
||||
|
||||
if (WARN_ON(!vsi))
|
||||
return;
|
||||
|
||||
ice_vf_set_host_trust_cfg(vf);
|
||||
|
||||
if (ice_vf_rebuild_host_mac_cfg(vf))
|
||||
dev_err(dev, "failed to rebuild default MAC configuration for VF %d\n",
|
||||
vf->vf_id);
|
||||
|
||||
if (ice_vf_rebuild_host_vlan_cfg(vf, vsi))
|
||||
dev_err(dev, "failed to rebuild VLAN configuration for VF %u\n",
|
||||
vf->vf_id);
|
||||
|
||||
if (ice_vf_rebuild_host_tx_rate_cfg(vf))
|
||||
dev_err(dev, "failed to rebuild Tx rate limiting configuration for VF %u\n",
|
||||
vf->vf_id);
|
||||
|
||||
if (ice_vsi_apply_spoofchk(vsi, vf->spoofchk))
|
||||
dev_err(dev, "failed to rebuild spoofchk configuration for VF %d\n",
|
||||
vf->vf_id);
|
||||
|
||||
/* rebuild aggregator node config for main VF VSI */
|
||||
ice_vf_rebuild_aggregator_node_cfg(vsi);
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_set_vf_state_qs_dis - Set VF queues state to disabled
|
||||
* @vf: pointer to the VF structure
|
||||
*/
|
||||
static void ice_set_vf_state_qs_dis(struct ice_vf *vf)
|
||||
{
|
||||
/* Clear Rx/Tx enabled queues flag */
|
||||
bitmap_zero(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF);
|
||||
bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
|
||||
clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_vf_set_initialized - VF is ready for VIRTCHNL communication
|
||||
* @vf: VF to set in initialized state
|
||||
*
|
||||
* After this function the VF will be ready to receive/handle the
|
||||
* VIRTCHNL_OP_GET_VF_RESOURCES message
|
||||
*/
|
||||
static void ice_vf_set_initialized(struct ice_vf *vf)
|
||||
{
|
||||
ice_set_vf_state_qs_dis(vf);
|
||||
clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
|
||||
clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
|
||||
clear_bit(ICE_VF_STATE_DIS, vf->vf_states);
|
||||
set_bit(ICE_VF_STATE_INIT, vf->vf_states);
|
||||
memset(&vf->vlan_v2_caps, 0, sizeof(vf->vlan_v2_caps));
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_vf_post_vsi_rebuild - Reset tasks that occur after VSI rebuild
|
||||
* @vf: the VF being reset
|
||||
|
@ -725,18 +956,6 @@ out_unlock:
|
|||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_set_vf_state_qs_dis - Set VF queues state to disabled
|
||||
* @vf: pointer to the VF structure
|
||||
*/
|
||||
static void ice_set_vf_state_qs_dis(struct ice_vf *vf)
|
||||
{
|
||||
/* Clear Rx/Tx enabled queues flag */
|
||||
bitmap_zero(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF);
|
||||
bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
|
||||
clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_set_vf_state_dis - Set VF state to disabled
|
||||
* @vf: pointer to the VF structure
|
||||
|
@ -977,211 +1196,6 @@ bool ice_is_vf_link_up(struct ice_vf *vf)
|
|||
ICE_AQ_LINK_UP;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_vf_set_host_trust_cfg - set trust setting based on pre-reset value
|
||||
* @vf: VF to configure trust setting for
|
||||
*/
|
||||
static void ice_vf_set_host_trust_cfg(struct ice_vf *vf)
|
||||
{
|
||||
if (vf->trusted)
|
||||
set_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
|
||||
else
|
||||
clear_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_vf_rebuild_host_mac_cfg - add broadcast and the VF's perm_addr/LAA
|
||||
* @vf: VF to add MAC filters for
|
||||
*
|
||||
* Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
|
||||
* always re-adds a broadcast filter and the VF's perm_addr/LAA after reset.
|
||||
*/
|
||||
static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf)
|
||||
{
|
||||
struct device *dev = ice_pf_to_dev(vf->pf);
|
||||
struct ice_vsi *vsi = ice_get_vf_vsi(vf);
|
||||
u8 broadcast[ETH_ALEN];
|
||||
int status;
|
||||
|
||||
if (WARN_ON(!vsi))
|
||||
return -EINVAL;
|
||||
|
||||
if (ice_is_eswitch_mode_switchdev(vf->pf))
|
||||
return 0;
|
||||
|
||||
eth_broadcast_addr(broadcast);
|
||||
status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
|
||||
if (status) {
|
||||
dev_err(dev, "failed to add broadcast MAC filter for VF %u, error %d\n",
|
||||
vf->vf_id, status);
|
||||
return status;
|
||||
}
|
||||
|
||||
vf->num_mac++;
|
||||
|
||||
if (is_valid_ether_addr(vf->hw_lan_addr)) {
|
||||
status = ice_fltr_add_mac(vsi, vf->hw_lan_addr,
|
||||
ICE_FWD_TO_VSI);
|
||||
if (status) {
|
||||
dev_err(dev, "failed to add default unicast MAC filter %pM for VF %u, error %d\n",
|
||||
&vf->hw_lan_addr[0], vf->vf_id,
|
||||
status);
|
||||
return status;
|
||||
}
|
||||
vf->num_mac++;
|
||||
|
||||
ether_addr_copy(vf->dev_lan_addr, vf->hw_lan_addr);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_vf_rebuild_host_vlan_cfg - add VLAN 0 filter or rebuild the Port VLAN
|
||||
* @vf: VF to add MAC filters for
|
||||
* @vsi: Pointer to VSI
|
||||
*
|
||||
* Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
|
||||
* always re-adds either a VLAN 0 or port VLAN based filter after reset.
|
||||
*/
|
||||
static int ice_vf_rebuild_host_vlan_cfg(struct ice_vf *vf, struct ice_vsi *vsi)
|
||||
{
|
||||
struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
|
||||
struct device *dev = ice_pf_to_dev(vf->pf);
|
||||
int err;
|
||||
|
||||
if (ice_vf_is_port_vlan_ena(vf)) {
|
||||
err = vlan_ops->set_port_vlan(vsi, &vf->port_vlan_info);
|
||||
if (err) {
|
||||
dev_err(dev, "failed to configure port VLAN via VSI parameters for VF %u, error %d\n",
|
||||
vf->vf_id, err);
|
||||
return err;
|
||||
}
|
||||
|
||||
err = vlan_ops->add_vlan(vsi, &vf->port_vlan_info);
|
||||
} else {
|
||||
err = ice_vsi_add_vlan_zero(vsi);
|
||||
}
|
||||
|
||||
if (err) {
|
||||
dev_err(dev, "failed to add VLAN %u filter for VF %u during VF rebuild, error %d\n",
|
||||
ice_vf_is_port_vlan_ena(vf) ?
|
||||
ice_vf_get_port_vlan_id(vf) : 0, vf->vf_id, err);
|
||||
return err;
|
||||
}
|
||||
|
||||
err = vlan_ops->ena_rx_filtering(vsi);
|
||||
if (err)
|
||||
dev_warn(dev, "failed to enable Rx VLAN filtering for VF %d VSI %d during VF rebuild, error %d\n",
|
||||
vf->vf_id, vsi->idx, err);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_vf_rebuild_host_tx_rate_cfg - re-apply the Tx rate limiting configuration
|
||||
* @vf: VF to re-apply the configuration for
|
||||
*
|
||||
* Called after a VF VSI has been re-added/rebuild during reset. The PF driver
|
||||
* needs to re-apply the host configured Tx rate limiting configuration.
|
||||
*/
|
||||
static int ice_vf_rebuild_host_tx_rate_cfg(struct ice_vf *vf)
|
||||
{
|
||||
struct device *dev = ice_pf_to_dev(vf->pf);
|
||||
struct ice_vsi *vsi = ice_get_vf_vsi(vf);
|
||||
int err;
|
||||
|
||||
if (WARN_ON(!vsi))
|
||||
return -EINVAL;
|
||||
|
||||
if (vf->min_tx_rate) {
|
||||
err = ice_set_min_bw_limit(vsi, (u64)vf->min_tx_rate * 1000);
|
||||
if (err) {
|
||||
dev_err(dev, "failed to set min Tx rate to %d Mbps for VF %u, error %d\n",
|
||||
vf->min_tx_rate, vf->vf_id, err);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
if (vf->max_tx_rate) {
|
||||
err = ice_set_max_bw_limit(vsi, (u64)vf->max_tx_rate * 1000);
|
||||
if (err) {
|
||||
dev_err(dev, "failed to set max Tx rate to %d Mbps for VF %u, error %d\n",
|
||||
vf->max_tx_rate, vf->vf_id, err);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_vf_rebuild_aggregator_node_cfg - rebuild aggregator node config
|
||||
* @vsi: Pointer to VSI
|
||||
*
|
||||
* This function moves VSI into corresponding scheduler aggregator node
|
||||
* based on cached value of "aggregator node info" per VSI
|
||||
*/
|
||||
static void ice_vf_rebuild_aggregator_node_cfg(struct ice_vsi *vsi)
|
||||
{
|
||||
struct ice_pf *pf = vsi->back;
|
||||
struct device *dev;
|
||||
int status;
|
||||
|
||||
if (!vsi->agg_node)
|
||||
return;
|
||||
|
||||
dev = ice_pf_to_dev(pf);
|
||||
if (vsi->agg_node->num_vsis == ICE_MAX_VSIS_IN_AGG_NODE) {
|
||||
dev_dbg(dev,
|
||||
"agg_id %u already has reached max_num_vsis %u\n",
|
||||
vsi->agg_node->agg_id, vsi->agg_node->num_vsis);
|
||||
return;
|
||||
}
|
||||
|
||||
status = ice_move_vsi_to_agg(pf->hw.port_info, vsi->agg_node->agg_id,
|
||||
vsi->idx, vsi->tc_cfg.ena_tc);
|
||||
if (status)
|
||||
dev_dbg(dev, "unable to move VSI idx %u into aggregator %u node",
|
||||
vsi->idx, vsi->agg_node->agg_id);
|
||||
else
|
||||
vsi->agg_node->num_vsis++;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_vf_rebuild_host_cfg - host admin configuration is persistent across reset
|
||||
* @vf: VF to rebuild host configuration on
|
||||
*/
|
||||
void ice_vf_rebuild_host_cfg(struct ice_vf *vf)
|
||||
{
|
||||
struct device *dev = ice_pf_to_dev(vf->pf);
|
||||
struct ice_vsi *vsi = ice_get_vf_vsi(vf);
|
||||
|
||||
if (WARN_ON(!vsi))
|
||||
return;
|
||||
|
||||
ice_vf_set_host_trust_cfg(vf);
|
||||
|
||||
if (ice_vf_rebuild_host_mac_cfg(vf))
|
||||
dev_err(dev, "failed to rebuild default MAC configuration for VF %d\n",
|
||||
vf->vf_id);
|
||||
|
||||
if (ice_vf_rebuild_host_vlan_cfg(vf, vsi))
|
||||
dev_err(dev, "failed to rebuild VLAN configuration for VF %u\n",
|
||||
vf->vf_id);
|
||||
|
||||
if (ice_vf_rebuild_host_tx_rate_cfg(vf))
|
||||
dev_err(dev, "failed to rebuild Tx rate limiting configuration for VF %u\n",
|
||||
vf->vf_id);
|
||||
|
||||
if (ice_vsi_apply_spoofchk(vsi, vf->spoofchk))
|
||||
dev_err(dev, "failed to rebuild spoofchk configuration for VF %d\n",
|
||||
vf->vf_id);
|
||||
|
||||
/* rebuild aggregator node config for main VF VSI */
|
||||
ice_vf_rebuild_aggregator_node_cfg(vsi);
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_vf_ctrl_invalidate_vsi - invalidate ctrl_vsi_idx to remove VSI access
|
||||
* @vf: VF that control VSI is being invalidated on
|
||||
|
@ -1310,23 +1324,6 @@ void ice_vf_vsi_release(struct ice_vf *vf)
|
|||
ice_vf_invalidate_vsi(vf);
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_vf_set_initialized - VF is ready for VIRTCHNL communication
|
||||
* @vf: VF to set in initialized state
|
||||
*
|
||||
* After this function the VF will be ready to receive/handle the
|
||||
* VIRTCHNL_OP_GET_VF_RESOURCES message
|
||||
*/
|
||||
void ice_vf_set_initialized(struct ice_vf *vf)
|
||||
{
|
||||
ice_set_vf_state_qs_dis(vf);
|
||||
clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
|
||||
clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
|
||||
clear_bit(ICE_VF_STATE_DIS, vf->vf_states);
|
||||
set_bit(ICE_VF_STATE_INIT, vf->vf_states);
|
||||
memset(&vf->vlan_v2_caps, 0, sizeof(vf->vlan_v2_caps));
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_get_vf_ctrl_vsi - Get first VF control VSI pointer
|
||||
* @pf: the PF private structure
|
||||
|
|
|
@ -32,13 +32,11 @@ int ice_vsi_apply_spoofchk(struct ice_vsi *vsi, bool enable);
|
|||
bool ice_is_vf_trusted(struct ice_vf *vf);
|
||||
bool ice_vf_has_no_qs_ena(struct ice_vf *vf);
|
||||
bool ice_is_vf_link_up(struct ice_vf *vf);
|
||||
void ice_vf_rebuild_host_cfg(struct ice_vf *vf);
|
||||
void ice_vf_ctrl_invalidate_vsi(struct ice_vf *vf);
|
||||
void ice_vf_ctrl_vsi_release(struct ice_vf *vf);
|
||||
struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf);
|
||||
int ice_vf_init_host_cfg(struct ice_vf *vf, struct ice_vsi *vsi);
|
||||
void ice_vf_invalidate_vsi(struct ice_vf *vf);
|
||||
void ice_vf_vsi_release(struct ice_vf *vf);
|
||||
void ice_vf_set_initialized(struct ice_vf *vf);
|
||||
|
||||
#endif /* _ICE_VF_LIB_PRIVATE_H_ */
|
||||
|
|
Loading…
Reference in New Issue