Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue
Jeff Kirsher says: ==================== Intel Wired LAN Driver Updates 2015-07-23 This series contains updates to e1000e, igb, ixgbevf, i40e and i40evf. Emil extends the reporting of the RSS key and hash table by adding support for x550 VFs. Jia-Ju Bai fixes a QoS issue in e1000e where the error handling lacked a call to pm_qos_remove_request() to cleanup the QoS request made in e1000_open(). Todd updates igb to report unsupported for ethtool coalesce settings that are not supported. Also updated the driver to use the ARRAY_SIZE() macro. Carolyn fixes and refactors the dynamic ITR code for i40e and i40evf which would never change dynamically. So update the switch() statement to have a default case and switch on "new_latency_range" versus the current ITR setting. Shannon cleans up i40e code, where there were un-needed goto's. Also clean up error status messages that were causing some confusion in PHY and FCoE setup error reports. Mitch updates the virtual channel interface to prepare for the x722 device and other future devices, so that the VF driver can report what its capable of supporting to the PF driver. Updates the i40evf driver to handle resets like Core or EMP resets, where the device is reinitialized and the VF will not get the same VSI. Jesse updates the i40e and i40evf driver to use the kernel BIT() and BIT_ULL() macros. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
a69e5a0dcf
|
@ -4588,6 +4588,7 @@ static int e1000_open(struct net_device *netdev)
|
|||
return 0;
|
||||
|
||||
err_req_irq:
|
||||
pm_qos_remove_request(&adapter->pm_qos_req);
|
||||
e1000e_release_hw_control(adapter);
|
||||
e1000_power_down_phy(adapter);
|
||||
e1000e_free_rx_resources(adapter->rx_ring);
|
||||
|
|
|
@ -98,7 +98,7 @@
|
|||
#define I40E_INT_NAME_STR_LEN (IFNAMSIZ + 9)
|
||||
|
||||
/* Ethtool Private Flags */
|
||||
#define I40E_PRIV_FLAGS_NPAR_FLAG (1 << 0)
|
||||
#define I40E_PRIV_FLAGS_NPAR_FLAG BIT(0)
|
||||
|
||||
#define I40E_NVM_VERSION_LO_SHIFT 0
|
||||
#define I40E_NVM_VERSION_LO_MASK (0xff << I40E_NVM_VERSION_LO_SHIFT)
|
||||
|
@ -289,35 +289,35 @@ struct i40e_pf {
|
|||
struct work_struct service_task;
|
||||
|
||||
u64 flags;
|
||||
#define I40E_FLAG_RX_CSUM_ENABLED (u64)(1 << 1)
|
||||
#define I40E_FLAG_MSI_ENABLED (u64)(1 << 2)
|
||||
#define I40E_FLAG_MSIX_ENABLED (u64)(1 << 3)
|
||||
#define I40E_FLAG_RX_1BUF_ENABLED (u64)(1 << 4)
|
||||
#define I40E_FLAG_RX_PS_ENABLED (u64)(1 << 5)
|
||||
#define I40E_FLAG_RSS_ENABLED (u64)(1 << 6)
|
||||
#define I40E_FLAG_VMDQ_ENABLED (u64)(1 << 7)
|
||||
#define I40E_FLAG_FDIR_REQUIRES_REINIT (u64)(1 << 8)
|
||||
#define I40E_FLAG_NEED_LINK_UPDATE (u64)(1 << 9)
|
||||
#define I40E_FLAG_RX_CSUM_ENABLED BIT_ULL(1)
|
||||
#define I40E_FLAG_MSI_ENABLED BIT_ULL(2)
|
||||
#define I40E_FLAG_MSIX_ENABLED BIT_ULL(3)
|
||||
#define I40E_FLAG_RX_1BUF_ENABLED BIT_ULL(4)
|
||||
#define I40E_FLAG_RX_PS_ENABLED BIT_ULL(5)
|
||||
#define I40E_FLAG_RSS_ENABLED BIT_ULL(6)
|
||||
#define I40E_FLAG_VMDQ_ENABLED BIT_ULL(7)
|
||||
#define I40E_FLAG_FDIR_REQUIRES_REINIT BIT_ULL(8)
|
||||
#define I40E_FLAG_NEED_LINK_UPDATE BIT_ULL(9)
|
||||
#ifdef I40E_FCOE
|
||||
#define I40E_FLAG_FCOE_ENABLED (u64)(1 << 11)
|
||||
#define I40E_FLAG_FCOE_ENABLED BIT_ULL(11)
|
||||
#endif /* I40E_FCOE */
|
||||
#define I40E_FLAG_IN_NETPOLL (u64)(1 << 12)
|
||||
#define I40E_FLAG_16BYTE_RX_DESC_ENABLED (u64)(1 << 13)
|
||||
#define I40E_FLAG_CLEAN_ADMINQ (u64)(1 << 14)
|
||||
#define I40E_FLAG_FILTER_SYNC (u64)(1 << 15)
|
||||
#define I40E_FLAG_PROCESS_MDD_EVENT (u64)(1 << 17)
|
||||
#define I40E_FLAG_PROCESS_VFLR_EVENT (u64)(1 << 18)
|
||||
#define I40E_FLAG_SRIOV_ENABLED (u64)(1 << 19)
|
||||
#define I40E_FLAG_DCB_ENABLED (u64)(1 << 20)
|
||||
#define I40E_FLAG_FD_SB_ENABLED (u64)(1 << 21)
|
||||
#define I40E_FLAG_FD_ATR_ENABLED (u64)(1 << 22)
|
||||
#define I40E_FLAG_PTP (u64)(1 << 25)
|
||||
#define I40E_FLAG_MFP_ENABLED (u64)(1 << 26)
|
||||
#define I40E_FLAG_IN_NETPOLL BIT_ULL(12)
|
||||
#define I40E_FLAG_16BYTE_RX_DESC_ENABLED BIT_ULL(13)
|
||||
#define I40E_FLAG_CLEAN_ADMINQ BIT_ULL(14)
|
||||
#define I40E_FLAG_FILTER_SYNC BIT_ULL(15)
|
||||
#define I40E_FLAG_PROCESS_MDD_EVENT BIT_ULL(17)
|
||||
#define I40E_FLAG_PROCESS_VFLR_EVENT BIT_ULL(18)
|
||||
#define I40E_FLAG_SRIOV_ENABLED BIT_ULL(19)
|
||||
#define I40E_FLAG_DCB_ENABLED BIT_ULL(20)
|
||||
#define I40E_FLAG_FD_SB_ENABLED BIT_ULL(21)
|
||||
#define I40E_FLAG_FD_ATR_ENABLED BIT_ULL(22)
|
||||
#define I40E_FLAG_PTP BIT_ULL(25)
|
||||
#define I40E_FLAG_MFP_ENABLED BIT_ULL(26)
|
||||
#ifdef CONFIG_I40E_VXLAN
|
||||
#define I40E_FLAG_VXLAN_FILTER_SYNC (u64)(1 << 27)
|
||||
#define I40E_FLAG_VXLAN_FILTER_SYNC BIT_ULL(27)
|
||||
#endif
|
||||
#define I40E_FLAG_PORT_ID_VALID (u64)(1 << 28)
|
||||
#define I40E_FLAG_DCB_CAPABLE (u64)(1 << 29)
|
||||
#define I40E_FLAG_PORT_ID_VALID BIT_ULL(28)
|
||||
#define I40E_FLAG_DCB_CAPABLE BIT_ULL(29)
|
||||
#define I40E_FLAG_VEB_MODE_ENABLED BIT_ULL(40)
|
||||
|
||||
/* tracks features that get auto disabled by errors */
|
||||
|
@ -443,8 +443,8 @@ struct i40e_vsi {
|
|||
|
||||
u32 current_netdev_flags;
|
||||
unsigned long state;
|
||||
#define I40E_VSI_FLAG_FILTER_CHANGED (1<<0)
|
||||
#define I40E_VSI_FLAG_VEB_OWNER (1<<1)
|
||||
#define I40E_VSI_FLAG_FILTER_CHANGED BIT(0)
|
||||
#define I40E_VSI_FLAG_VEB_OWNER BIT(1)
|
||||
unsigned long flags;
|
||||
|
||||
struct list_head mac_filter_list;
|
||||
|
|
|
@ -71,6 +71,212 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw)
|
|||
return status;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_aq_str - convert AQ err code to a string
|
||||
* @hw: pointer to the HW structure
|
||||
* @aq_err: the AQ error code to convert
|
||||
**/
|
||||
char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err)
|
||||
{
|
||||
switch (aq_err) {
|
||||
case I40E_AQ_RC_OK:
|
||||
return "OK";
|
||||
case I40E_AQ_RC_EPERM:
|
||||
return "I40E_AQ_RC_EPERM";
|
||||
case I40E_AQ_RC_ENOENT:
|
||||
return "I40E_AQ_RC_ENOENT";
|
||||
case I40E_AQ_RC_ESRCH:
|
||||
return "I40E_AQ_RC_ESRCH";
|
||||
case I40E_AQ_RC_EINTR:
|
||||
return "I40E_AQ_RC_EINTR";
|
||||
case I40E_AQ_RC_EIO:
|
||||
return "I40E_AQ_RC_EIO";
|
||||
case I40E_AQ_RC_ENXIO:
|
||||
return "I40E_AQ_RC_ENXIO";
|
||||
case I40E_AQ_RC_E2BIG:
|
||||
return "I40E_AQ_RC_E2BIG";
|
||||
case I40E_AQ_RC_EAGAIN:
|
||||
return "I40E_AQ_RC_EAGAIN";
|
||||
case I40E_AQ_RC_ENOMEM:
|
||||
return "I40E_AQ_RC_ENOMEM";
|
||||
case I40E_AQ_RC_EACCES:
|
||||
return "I40E_AQ_RC_EACCES";
|
||||
case I40E_AQ_RC_EFAULT:
|
||||
return "I40E_AQ_RC_EFAULT";
|
||||
case I40E_AQ_RC_EBUSY:
|
||||
return "I40E_AQ_RC_EBUSY";
|
||||
case I40E_AQ_RC_EEXIST:
|
||||
return "I40E_AQ_RC_EEXIST";
|
||||
case I40E_AQ_RC_EINVAL:
|
||||
return "I40E_AQ_RC_EINVAL";
|
||||
case I40E_AQ_RC_ENOTTY:
|
||||
return "I40E_AQ_RC_ENOTTY";
|
||||
case I40E_AQ_RC_ENOSPC:
|
||||
return "I40E_AQ_RC_ENOSPC";
|
||||
case I40E_AQ_RC_ENOSYS:
|
||||
return "I40E_AQ_RC_ENOSYS";
|
||||
case I40E_AQ_RC_ERANGE:
|
||||
return "I40E_AQ_RC_ERANGE";
|
||||
case I40E_AQ_RC_EFLUSHED:
|
||||
return "I40E_AQ_RC_EFLUSHED";
|
||||
case I40E_AQ_RC_BAD_ADDR:
|
||||
return "I40E_AQ_RC_BAD_ADDR";
|
||||
case I40E_AQ_RC_EMODE:
|
||||
return "I40E_AQ_RC_EMODE";
|
||||
case I40E_AQ_RC_EFBIG:
|
||||
return "I40E_AQ_RC_EFBIG";
|
||||
}
|
||||
|
||||
snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err);
|
||||
return hw->err_str;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_stat_str - convert status err code to a string
|
||||
* @hw: pointer to the HW structure
|
||||
* @stat_err: the status error code to convert
|
||||
**/
|
||||
char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err)
|
||||
{
|
||||
switch (stat_err) {
|
||||
case 0:
|
||||
return "OK";
|
||||
case I40E_ERR_NVM:
|
||||
return "I40E_ERR_NVM";
|
||||
case I40E_ERR_NVM_CHECKSUM:
|
||||
return "I40E_ERR_NVM_CHECKSUM";
|
||||
case I40E_ERR_PHY:
|
||||
return "I40E_ERR_PHY";
|
||||
case I40E_ERR_CONFIG:
|
||||
return "I40E_ERR_CONFIG";
|
||||
case I40E_ERR_PARAM:
|
||||
return "I40E_ERR_PARAM";
|
||||
case I40E_ERR_MAC_TYPE:
|
||||
return "I40E_ERR_MAC_TYPE";
|
||||
case I40E_ERR_UNKNOWN_PHY:
|
||||
return "I40E_ERR_UNKNOWN_PHY";
|
||||
case I40E_ERR_LINK_SETUP:
|
||||
return "I40E_ERR_LINK_SETUP";
|
||||
case I40E_ERR_ADAPTER_STOPPED:
|
||||
return "I40E_ERR_ADAPTER_STOPPED";
|
||||
case I40E_ERR_INVALID_MAC_ADDR:
|
||||
return "I40E_ERR_INVALID_MAC_ADDR";
|
||||
case I40E_ERR_DEVICE_NOT_SUPPORTED:
|
||||
return "I40E_ERR_DEVICE_NOT_SUPPORTED";
|
||||
case I40E_ERR_MASTER_REQUESTS_PENDING:
|
||||
return "I40E_ERR_MASTER_REQUESTS_PENDING";
|
||||
case I40E_ERR_INVALID_LINK_SETTINGS:
|
||||
return "I40E_ERR_INVALID_LINK_SETTINGS";
|
||||
case I40E_ERR_AUTONEG_NOT_COMPLETE:
|
||||
return "I40E_ERR_AUTONEG_NOT_COMPLETE";
|
||||
case I40E_ERR_RESET_FAILED:
|
||||
return "I40E_ERR_RESET_FAILED";
|
||||
case I40E_ERR_SWFW_SYNC:
|
||||
return "I40E_ERR_SWFW_SYNC";
|
||||
case I40E_ERR_NO_AVAILABLE_VSI:
|
||||
return "I40E_ERR_NO_AVAILABLE_VSI";
|
||||
case I40E_ERR_NO_MEMORY:
|
||||
return "I40E_ERR_NO_MEMORY";
|
||||
case I40E_ERR_BAD_PTR:
|
||||
return "I40E_ERR_BAD_PTR";
|
||||
case I40E_ERR_RING_FULL:
|
||||
return "I40E_ERR_RING_FULL";
|
||||
case I40E_ERR_INVALID_PD_ID:
|
||||
return "I40E_ERR_INVALID_PD_ID";
|
||||
case I40E_ERR_INVALID_QP_ID:
|
||||
return "I40E_ERR_INVALID_QP_ID";
|
||||
case I40E_ERR_INVALID_CQ_ID:
|
||||
return "I40E_ERR_INVALID_CQ_ID";
|
||||
case I40E_ERR_INVALID_CEQ_ID:
|
||||
return "I40E_ERR_INVALID_CEQ_ID";
|
||||
case I40E_ERR_INVALID_AEQ_ID:
|
||||
return "I40E_ERR_INVALID_AEQ_ID";
|
||||
case I40E_ERR_INVALID_SIZE:
|
||||
return "I40E_ERR_INVALID_SIZE";
|
||||
case I40E_ERR_INVALID_ARP_INDEX:
|
||||
return "I40E_ERR_INVALID_ARP_INDEX";
|
||||
case I40E_ERR_INVALID_FPM_FUNC_ID:
|
||||
return "I40E_ERR_INVALID_FPM_FUNC_ID";
|
||||
case I40E_ERR_QP_INVALID_MSG_SIZE:
|
||||
return "I40E_ERR_QP_INVALID_MSG_SIZE";
|
||||
case I40E_ERR_QP_TOOMANY_WRS_POSTED:
|
||||
return "I40E_ERR_QP_TOOMANY_WRS_POSTED";
|
||||
case I40E_ERR_INVALID_FRAG_COUNT:
|
||||
return "I40E_ERR_INVALID_FRAG_COUNT";
|
||||
case I40E_ERR_QUEUE_EMPTY:
|
||||
return "I40E_ERR_QUEUE_EMPTY";
|
||||
case I40E_ERR_INVALID_ALIGNMENT:
|
||||
return "I40E_ERR_INVALID_ALIGNMENT";
|
||||
case I40E_ERR_FLUSHED_QUEUE:
|
||||
return "I40E_ERR_FLUSHED_QUEUE";
|
||||
case I40E_ERR_INVALID_PUSH_PAGE_INDEX:
|
||||
return "I40E_ERR_INVALID_PUSH_PAGE_INDEX";
|
||||
case I40E_ERR_INVALID_IMM_DATA_SIZE:
|
||||
return "I40E_ERR_INVALID_IMM_DATA_SIZE";
|
||||
case I40E_ERR_TIMEOUT:
|
||||
return "I40E_ERR_TIMEOUT";
|
||||
case I40E_ERR_OPCODE_MISMATCH:
|
||||
return "I40E_ERR_OPCODE_MISMATCH";
|
||||
case I40E_ERR_CQP_COMPL_ERROR:
|
||||
return "I40E_ERR_CQP_COMPL_ERROR";
|
||||
case I40E_ERR_INVALID_VF_ID:
|
||||
return "I40E_ERR_INVALID_VF_ID";
|
||||
case I40E_ERR_INVALID_HMCFN_ID:
|
||||
return "I40E_ERR_INVALID_HMCFN_ID";
|
||||
case I40E_ERR_BACKING_PAGE_ERROR:
|
||||
return "I40E_ERR_BACKING_PAGE_ERROR";
|
||||
case I40E_ERR_NO_PBLCHUNKS_AVAILABLE:
|
||||
return "I40E_ERR_NO_PBLCHUNKS_AVAILABLE";
|
||||
case I40E_ERR_INVALID_PBLE_INDEX:
|
||||
return "I40E_ERR_INVALID_PBLE_INDEX";
|
||||
case I40E_ERR_INVALID_SD_INDEX:
|
||||
return "I40E_ERR_INVALID_SD_INDEX";
|
||||
case I40E_ERR_INVALID_PAGE_DESC_INDEX:
|
||||
return "I40E_ERR_INVALID_PAGE_DESC_INDEX";
|
||||
case I40E_ERR_INVALID_SD_TYPE:
|
||||
return "I40E_ERR_INVALID_SD_TYPE";
|
||||
case I40E_ERR_MEMCPY_FAILED:
|
||||
return "I40E_ERR_MEMCPY_FAILED";
|
||||
case I40E_ERR_INVALID_HMC_OBJ_INDEX:
|
||||
return "I40E_ERR_INVALID_HMC_OBJ_INDEX";
|
||||
case I40E_ERR_INVALID_HMC_OBJ_COUNT:
|
||||
return "I40E_ERR_INVALID_HMC_OBJ_COUNT";
|
||||
case I40E_ERR_INVALID_SRQ_ARM_LIMIT:
|
||||
return "I40E_ERR_INVALID_SRQ_ARM_LIMIT";
|
||||
case I40E_ERR_SRQ_ENABLED:
|
||||
return "I40E_ERR_SRQ_ENABLED";
|
||||
case I40E_ERR_ADMIN_QUEUE_ERROR:
|
||||
return "I40E_ERR_ADMIN_QUEUE_ERROR";
|
||||
case I40E_ERR_ADMIN_QUEUE_TIMEOUT:
|
||||
return "I40E_ERR_ADMIN_QUEUE_TIMEOUT";
|
||||
case I40E_ERR_BUF_TOO_SHORT:
|
||||
return "I40E_ERR_BUF_TOO_SHORT";
|
||||
case I40E_ERR_ADMIN_QUEUE_FULL:
|
||||
return "I40E_ERR_ADMIN_QUEUE_FULL";
|
||||
case I40E_ERR_ADMIN_QUEUE_NO_WORK:
|
||||
return "I40E_ERR_ADMIN_QUEUE_NO_WORK";
|
||||
case I40E_ERR_BAD_IWARP_CQE:
|
||||
return "I40E_ERR_BAD_IWARP_CQE";
|
||||
case I40E_ERR_NVM_BLANK_MODE:
|
||||
return "I40E_ERR_NVM_BLANK_MODE";
|
||||
case I40E_ERR_NOT_IMPLEMENTED:
|
||||
return "I40E_ERR_NOT_IMPLEMENTED";
|
||||
case I40E_ERR_PE_DOORBELL_NOT_ENABLED:
|
||||
return "I40E_ERR_PE_DOORBELL_NOT_ENABLED";
|
||||
case I40E_ERR_DIAG_TEST_FAILED:
|
||||
return "I40E_ERR_DIAG_TEST_FAILED";
|
||||
case I40E_ERR_NOT_READY:
|
||||
return "I40E_ERR_NOT_READY";
|
||||
case I40E_NOT_SUPPORTED:
|
||||
return "I40E_NOT_SUPPORTED";
|
||||
case I40E_ERR_FIRMWARE_API_VERSION:
|
||||
return "I40E_ERR_FIRMWARE_API_VERSION";
|
||||
}
|
||||
|
||||
snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err);
|
||||
return hw->err_str;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_debug_aq
|
||||
* @hw: debug mask related to admin queue
|
||||
|
@ -1187,9 +1393,9 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
|
|||
blink = false;
|
||||
|
||||
if (blink)
|
||||
gpio_val |= (1 << I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
|
||||
gpio_val |= BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
|
||||
else
|
||||
gpio_val &= ~(1 << I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
|
||||
gpio_val &= ~BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
|
||||
|
||||
wr32(hw, I40E_GLGEN_GPIO_CTL(i), gpio_val);
|
||||
break;
|
||||
|
|
|
@ -58,9 +58,9 @@
|
|||
#define I40E_IEEE_ETS_MAXTC_SHIFT 0
|
||||
#define I40E_IEEE_ETS_MAXTC_MASK (0x7 << I40E_IEEE_ETS_MAXTC_SHIFT)
|
||||
#define I40E_IEEE_ETS_CBS_SHIFT 6
|
||||
#define I40E_IEEE_ETS_CBS_MASK (0x1 << I40E_IEEE_ETS_CBS_SHIFT)
|
||||
#define I40E_IEEE_ETS_CBS_MASK BIT(I40E_IEEE_ETS_CBS_SHIFT)
|
||||
#define I40E_IEEE_ETS_WILLING_SHIFT 7
|
||||
#define I40E_IEEE_ETS_WILLING_MASK (0x1 << I40E_IEEE_ETS_WILLING_SHIFT)
|
||||
#define I40E_IEEE_ETS_WILLING_MASK BIT(I40E_IEEE_ETS_WILLING_SHIFT)
|
||||
#define I40E_IEEE_ETS_PRIO_0_SHIFT 0
|
||||
#define I40E_IEEE_ETS_PRIO_0_MASK (0x7 << I40E_IEEE_ETS_PRIO_0_SHIFT)
|
||||
#define I40E_IEEE_ETS_PRIO_1_SHIFT 4
|
||||
|
@ -79,9 +79,9 @@
|
|||
#define I40E_IEEE_PFC_CAP_SHIFT 0
|
||||
#define I40E_IEEE_PFC_CAP_MASK (0xF << I40E_IEEE_PFC_CAP_SHIFT)
|
||||
#define I40E_IEEE_PFC_MBC_SHIFT 6
|
||||
#define I40E_IEEE_PFC_MBC_MASK (0x1 << I40E_IEEE_PFC_MBC_SHIFT)
|
||||
#define I40E_IEEE_PFC_MBC_MASK BIT(I40E_IEEE_PFC_MBC_SHIFT)
|
||||
#define I40E_IEEE_PFC_WILLING_SHIFT 7
|
||||
#define I40E_IEEE_PFC_WILLING_MASK (0x1 << I40E_IEEE_PFC_WILLING_SHIFT)
|
||||
#define I40E_IEEE_PFC_WILLING_MASK BIT(I40E_IEEE_PFC_WILLING_SHIFT)
|
||||
|
||||
/* Defines for IEEE APP TLV */
|
||||
#define I40E_IEEE_APP_SEL_SHIFT 0
|
||||
|
|
|
@ -187,7 +187,7 @@ void i40e_dcbnl_set_all(struct i40e_vsi *vsi)
|
|||
/* Set up all the App TLVs if DCBx is negotiated */
|
||||
for (i = 0; i < dcbxcfg->numapps; i++) {
|
||||
prio = dcbxcfg->app[i].priority;
|
||||
tc_map = (1 << dcbxcfg->etscfg.prioritytable[prio]);
|
||||
tc_map = BIT(dcbxcfg->etscfg.prioritytable[prio]);
|
||||
|
||||
/* Add APP only if the TC is enabled for this VSI */
|
||||
if (tc_map & vsi->tc_config.enabled_tc) {
|
||||
|
|
|
@ -964,7 +964,7 @@ static void i40e_dbg_cmd_fd_ctrl(struct i40e_pf *pf, u64 flag, bool enable)
|
|||
pf->auto_disable_flags |= flag;
|
||||
}
|
||||
dev_info(&pf->pdev->dev, "requesting a PF reset\n");
|
||||
i40e_do_reset_safe(pf, (1 << __I40E_PF_RESET_REQUESTED));
|
||||
i40e_do_reset_safe(pf, BIT(__I40E_PF_RESET_REQUESTED));
|
||||
}
|
||||
|
||||
#define I40E_MAX_DEBUG_OUT_BUFFER (4096*4)
|
||||
|
@ -1471,19 +1471,19 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
|
|||
}
|
||||
} else if (strncmp(cmd_buf, "pfr", 3) == 0) {
|
||||
dev_info(&pf->pdev->dev, "debugfs: forcing PFR\n");
|
||||
i40e_do_reset_safe(pf, (1 << __I40E_PF_RESET_REQUESTED));
|
||||
i40e_do_reset_safe(pf, BIT(__I40E_PF_RESET_REQUESTED));
|
||||
|
||||
} else if (strncmp(cmd_buf, "corer", 5) == 0) {
|
||||
dev_info(&pf->pdev->dev, "debugfs: forcing CoreR\n");
|
||||
i40e_do_reset_safe(pf, (1 << __I40E_CORE_RESET_REQUESTED));
|
||||
i40e_do_reset_safe(pf, BIT(__I40E_CORE_RESET_REQUESTED));
|
||||
|
||||
} else if (strncmp(cmd_buf, "globr", 5) == 0) {
|
||||
dev_info(&pf->pdev->dev, "debugfs: forcing GlobR\n");
|
||||
i40e_do_reset_safe(pf, (1 << __I40E_GLOBAL_RESET_REQUESTED));
|
||||
i40e_do_reset_safe(pf, BIT(__I40E_GLOBAL_RESET_REQUESTED));
|
||||
|
||||
} else if (strncmp(cmd_buf, "empr", 4) == 0) {
|
||||
dev_info(&pf->pdev->dev, "debugfs: forcing EMPR\n");
|
||||
i40e_do_reset_safe(pf, (1 << __I40E_EMP_RESET_REQUESTED));
|
||||
i40e_do_reset_safe(pf, BIT(__I40E_EMP_RESET_REQUESTED));
|
||||
|
||||
} else if (strncmp(cmd_buf, "read", 4) == 0) {
|
||||
u32 address;
|
||||
|
|
|
@ -144,11 +144,8 @@ i40e_status i40e_diag_eeprom_test(struct i40e_hw *hw)
|
|||
ret_code = i40e_read_nvm_word(hw, I40E_SR_NVM_CONTROL_WORD, ®_val);
|
||||
if (!ret_code &&
|
||||
((reg_val & I40E_SR_CONTROL_WORD_1_MASK) ==
|
||||
(0x01 << I40E_SR_CONTROL_WORD_1_SHIFT))) {
|
||||
ret_code = i40e_validate_nvm_checksum(hw, NULL);
|
||||
} else {
|
||||
ret_code = I40E_ERR_DIAG_TEST_FAILED;
|
||||
}
|
||||
|
||||
return ret_code;
|
||||
BIT(I40E_SR_CONTROL_WORD_1_SHIFT)))
|
||||
return i40e_validate_nvm_checksum(hw, NULL);
|
||||
else
|
||||
return I40E_ERR_DIAG_TEST_FAILED;
|
||||
}
|
||||
|
|
|
@ -681,15 +681,17 @@ static int i40e_set_settings(struct net_device *netdev,
|
|||
/* make the aq call */
|
||||
status = i40e_aq_set_phy_config(hw, &config, NULL);
|
||||
if (status) {
|
||||
netdev_info(netdev, "Set phy config failed with error %d.\n",
|
||||
status);
|
||||
netdev_info(netdev, "Set phy config failed, err %s aq_err %s\n",
|
||||
i40e_stat_str(hw, status),
|
||||
i40e_aq_str(hw, hw->aq.asq_last_status));
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
status = i40e_aq_get_link_info(hw, true, NULL, NULL);
|
||||
if (status)
|
||||
netdev_info(netdev, "Updating link info failed with error %d\n",
|
||||
status);
|
||||
netdev_info(netdev, "Updating link info failed with err %s aq_err %s\n",
|
||||
i40e_stat_str(hw, status),
|
||||
i40e_aq_str(hw, hw->aq.asq_last_status));
|
||||
|
||||
} else {
|
||||
netdev_info(netdev, "Nothing changed, exiting without setting anything.\n");
|
||||
|
@ -709,8 +711,9 @@ static int i40e_nway_reset(struct net_device *netdev)
|
|||
|
||||
ret = i40e_aq_set_link_restart_an(hw, link_up, NULL);
|
||||
if (ret) {
|
||||
netdev_info(netdev, "link restart failed, aq_err=%d\n",
|
||||
pf->hw.aq.asq_last_status);
|
||||
netdev_info(netdev, "link restart failed, err %s aq_err %s\n",
|
||||
i40e_stat_str(hw, ret),
|
||||
i40e_aq_str(hw, hw->aq.asq_last_status));
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
|
@ -822,18 +825,21 @@ static int i40e_set_pauseparam(struct net_device *netdev,
|
|||
status = i40e_set_fc(hw, &aq_failures, link_up);
|
||||
|
||||
if (aq_failures & I40E_SET_FC_AQ_FAIL_GET) {
|
||||
netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with error %d and status %d\n",
|
||||
status, hw->aq.asq_last_status);
|
||||
netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %s aq_err %s\n",
|
||||
i40e_stat_str(hw, status),
|
||||
i40e_aq_str(hw, hw->aq.asq_last_status));
|
||||
err = -EAGAIN;
|
||||
}
|
||||
if (aq_failures & I40E_SET_FC_AQ_FAIL_SET) {
|
||||
netdev_info(netdev, "Set fc failed on the set_phy_config call with error %d and status %d\n",
|
||||
status, hw->aq.asq_last_status);
|
||||
netdev_info(netdev, "Set fc failed on the set_phy_config call with err %s aq_err %s\n",
|
||||
i40e_stat_str(hw, status),
|
||||
i40e_aq_str(hw, hw->aq.asq_last_status));
|
||||
err = -EAGAIN;
|
||||
}
|
||||
if (aq_failures & I40E_SET_FC_AQ_FAIL_UPDATE) {
|
||||
netdev_info(netdev, "Set fc failed on the get_link_info call with error %d and status %d\n",
|
||||
status, hw->aq.asq_last_status);
|
||||
netdev_info(netdev, "Set fc failed on the get_link_info call with err %s aq_err %s\n",
|
||||
i40e_stat_str(hw, status),
|
||||
i40e_aq_str(hw, hw->aq.asq_last_status));
|
||||
err = -EAGAIN;
|
||||
}
|
||||
|
||||
|
@ -1011,7 +1017,7 @@ static int i40e_get_eeprom_len(struct net_device *netdev)
|
|||
& I40E_GLPCI_LBARCTRL_FL_SIZE_MASK)
|
||||
>> I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT;
|
||||
/* register returns value in power of 2, 64Kbyte chunks. */
|
||||
val = (64 * 1024) * (1 << val);
|
||||
val = (64 * 1024) * BIT(val);
|
||||
return val;
|
||||
}
|
||||
|
||||
|
@ -1464,11 +1470,11 @@ static int i40e_get_ts_info(struct net_device *dev,
|
|||
else
|
||||
info->phc_index = -1;
|
||||
|
||||
info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
|
||||
info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
|
||||
|
||||
info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
|
||||
(1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
|
||||
(1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
|
||||
info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
|
||||
BIT(HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
|
||||
BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1584,7 +1590,7 @@ static void i40e_diag_test(struct net_device *netdev,
|
|||
/* indicate we're in test mode */
|
||||
dev_close(netdev);
|
||||
else
|
||||
i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
|
||||
i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED));
|
||||
|
||||
/* Link test performed before hardware reset
|
||||
* so autoneg doesn't interfere with test result
|
||||
|
@ -1606,7 +1612,7 @@ static void i40e_diag_test(struct net_device *netdev,
|
|||
eth_test->flags |= ETH_TEST_FL_FAILED;
|
||||
|
||||
clear_bit(__I40E_TESTING, &pf->state);
|
||||
i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
|
||||
i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED));
|
||||
|
||||
if (if_running)
|
||||
dev_open(netdev);
|
||||
|
@ -1639,7 +1645,7 @@ static void i40e_get_wol(struct net_device *netdev,
|
|||
|
||||
/* NVM bit on means WoL disabled for the port */
|
||||
i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
|
||||
if ((1 << hw->port) & wol_nvm_bits || hw->partition_id != 1) {
|
||||
if ((BIT(hw->port) & wol_nvm_bits) || (hw->partition_id != 1)) {
|
||||
wol->supported = 0;
|
||||
wol->wolopts = 0;
|
||||
} else {
|
||||
|
@ -1672,7 +1678,7 @@ static int i40e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
|
|||
|
||||
/* NVM bit on means WoL disabled for the port */
|
||||
i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
|
||||
if (((1 << hw->port) & wol_nvm_bits))
|
||||
if (BIT(hw->port) & wol_nvm_bits)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/* only magic packet is supported */
|
||||
|
@ -2018,10 +2024,10 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
|
|||
case TCP_V4_FLOW:
|
||||
switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
|
||||
case 0:
|
||||
hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
|
||||
hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
|
||||
break;
|
||||
case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
|
||||
hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
|
||||
hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
|
@ -2030,10 +2036,10 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
|
|||
case TCP_V6_FLOW:
|
||||
switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
|
||||
case 0:
|
||||
hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
|
||||
hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
|
||||
break;
|
||||
case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
|
||||
hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
|
||||
hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
|
@ -2042,12 +2048,12 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
|
|||
case UDP_V4_FLOW:
|
||||
switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
|
||||
case 0:
|
||||
hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
|
||||
((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
|
||||
hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
|
||||
BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
|
||||
break;
|
||||
case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
|
||||
hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
|
||||
((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
|
||||
hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
|
||||
BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
|
@ -2056,12 +2062,12 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
|
|||
case UDP_V6_FLOW:
|
||||
switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
|
||||
case 0:
|
||||
hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
|
||||
((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
|
||||
hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
|
||||
BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
|
||||
break;
|
||||
case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
|
||||
hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
|
||||
((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
|
||||
hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
|
||||
BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
|
@ -2074,7 +2080,7 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
|
|||
if ((nfc->data & RXH_L4_B_0_1) ||
|
||||
(nfc->data & RXH_L4_B_2_3))
|
||||
return -EINVAL;
|
||||
hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
|
||||
hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
|
||||
break;
|
||||
case AH_ESP_V6_FLOW:
|
||||
case AH_V6_FLOW:
|
||||
|
@ -2083,15 +2089,15 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
|
|||
if ((nfc->data & RXH_L4_B_0_1) ||
|
||||
(nfc->data & RXH_L4_B_2_3))
|
||||
return -EINVAL;
|
||||
hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
|
||||
hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
|
||||
break;
|
||||
case IPV4_FLOW:
|
||||
hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
|
||||
((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4);
|
||||
hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
|
||||
BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4);
|
||||
break;
|
||||
case IPV6_FLOW:
|
||||
hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
|
||||
((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
|
||||
hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
|
||||
BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6);
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
|
|
|
@ -298,8 +298,8 @@ int i40e_init_pf_fcoe(struct i40e_pf *pf)
|
|||
|
||||
/* enable FCoE hash filter */
|
||||
val = rd32(hw, I40E_PFQF_HENA(1));
|
||||
val |= 1 << (I40E_FILTER_PCTYPE_FCOE_OX - 32);
|
||||
val |= 1 << (I40E_FILTER_PCTYPE_FCOE_RX - 32);
|
||||
val |= BIT(I40E_FILTER_PCTYPE_FCOE_OX - 32);
|
||||
val |= BIT(I40E_FILTER_PCTYPE_FCOE_RX - 32);
|
||||
val &= I40E_PFQF_HENA_PTYPE_ENA_MASK;
|
||||
wr32(hw, I40E_PFQF_HENA(1), val);
|
||||
|
||||
|
@ -308,10 +308,10 @@ int i40e_init_pf_fcoe(struct i40e_pf *pf)
|
|||
pf->num_fcoe_qps = I40E_DEFAULT_FCOE;
|
||||
|
||||
/* Reserve 4K DDP contexts and 20K filter size for FCoE */
|
||||
pf->fcoe_hmc_cntx_num = (1 << I40E_DMA_CNTX_SIZE_4K) *
|
||||
I40E_DMA_CNTX_BASE_SIZE;
|
||||
pf->fcoe_hmc_cntx_num = BIT(I40E_DMA_CNTX_SIZE_4K) *
|
||||
I40E_DMA_CNTX_BASE_SIZE;
|
||||
pf->fcoe_hmc_filt_num = pf->fcoe_hmc_cntx_num +
|
||||
(1 << I40E_HASH_FILTER_SIZE_16K) *
|
||||
BIT(I40E_HASH_FILTER_SIZE_16K) *
|
||||
I40E_HASH_FILTER_BASE_SIZE;
|
||||
|
||||
/* FCoE object: max 16K filter buckets and 4K DMA contexts */
|
||||
|
@ -348,7 +348,7 @@ u8 i40e_get_fcoe_tc_map(struct i40e_pf *pf)
|
|||
if (app.selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE &&
|
||||
app.protocolid == ETH_P_FCOE) {
|
||||
tc = dcbcfg->etscfg.prioritytable[app.priority];
|
||||
enabled_tc |= (1 << tc);
|
||||
enabled_tc |= BIT(tc);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -59,9 +59,9 @@
|
|||
(((e) >> I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT) & 0x1)
|
||||
|
||||
#define I40E_RX_PROG_FCOE_ERROR_TBL_FULL_BIT \
|
||||
(1 << I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT)
|
||||
BIT(I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT)
|
||||
#define I40E_RX_PROG_FCOE_ERROR_CONFLICT_BIT \
|
||||
(1 << I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT)
|
||||
BIT(I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT)
|
||||
|
||||
#define I40E_RX_PROG_FCOE_ERROR_INVLFAIL(e) \
|
||||
I40E_RX_PROG_FCOE_ERROR_CONFLICT(e)
|
||||
|
|
|
@ -297,21 +297,15 @@ i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw,
|
|||
u32 idx, bool is_pf)
|
||||
{
|
||||
struct i40e_hmc_sd_entry *sd_entry;
|
||||
i40e_status ret_code = 0;
|
||||
|
||||
if (!is_pf)
|
||||
return I40E_NOT_SUPPORTED;
|
||||
|
||||
/* get the entry and decrease its ref counter */
|
||||
sd_entry = &hmc_info->sd_table.sd_entry[idx];
|
||||
if (is_pf) {
|
||||
I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_DIRECT);
|
||||
} else {
|
||||
ret_code = I40E_NOT_SUPPORTED;
|
||||
goto exit;
|
||||
}
|
||||
ret_code = i40e_free_dma_mem(hw, &(sd_entry->u.bp.addr));
|
||||
if (ret_code)
|
||||
goto exit;
|
||||
exit:
|
||||
return ret_code;
|
||||
I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_DIRECT);
|
||||
|
||||
return i40e_free_dma_mem(hw, &sd_entry->u.bp.addr);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -351,20 +345,13 @@ i40e_status i40e_remove_pd_page_new(struct i40e_hw *hw,
|
|||
struct i40e_hmc_info *hmc_info,
|
||||
u32 idx, bool is_pf)
|
||||
{
|
||||
i40e_status ret_code = 0;
|
||||
struct i40e_hmc_sd_entry *sd_entry;
|
||||
|
||||
if (!is_pf)
|
||||
return I40E_NOT_SUPPORTED;
|
||||
|
||||
sd_entry = &hmc_info->sd_table.sd_entry[idx];
|
||||
if (is_pf) {
|
||||
I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_PAGED);
|
||||
} else {
|
||||
ret_code = I40E_NOT_SUPPORTED;
|
||||
goto exit;
|
||||
}
|
||||
/* free memory here */
|
||||
ret_code = i40e_free_dma_mem(hw, &(sd_entry->u.pd_table.pd_page_addr));
|
||||
if (ret_code)
|
||||
goto exit;
|
||||
exit:
|
||||
return ret_code;
|
||||
I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_PAGED);
|
||||
|
||||
return i40e_free_dma_mem(hw, &sd_entry->u.pd_table.pd_page_addr);
|
||||
}
|
||||
|
|
|
@ -127,8 +127,8 @@ struct i40e_hmc_info {
|
|||
I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \
|
||||
((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \
|
||||
I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) | \
|
||||
(1 << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT); \
|
||||
val3 = (sd_index) | (1u << I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \
|
||||
BIT(I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT); \
|
||||
val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \
|
||||
wr32((hw), I40E_PFHMC_SDDATAHIGH, val1); \
|
||||
wr32((hw), I40E_PFHMC_SDDATALOW, val2); \
|
||||
wr32((hw), I40E_PFHMC_SDCMD, val3); \
|
||||
|
@ -147,7 +147,7 @@ struct i40e_hmc_info {
|
|||
I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \
|
||||
((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \
|
||||
I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT); \
|
||||
val3 = (sd_index) | (1u << I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \
|
||||
val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \
|
||||
wr32((hw), I40E_PFHMC_SDDATAHIGH, 0); \
|
||||
wr32((hw), I40E_PFHMC_SDDATALOW, val2); \
|
||||
wr32((hw), I40E_PFHMC_SDCMD, val3); \
|
||||
|
|
|
@ -129,7 +129,7 @@ i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
|
|||
obj->cnt = txq_num;
|
||||
obj->base = 0;
|
||||
size_exp = rd32(hw, I40E_GLHMC_LANTXOBJSZ);
|
||||
obj->size = (u64)1 << size_exp;
|
||||
obj->size = BIT_ULL(size_exp);
|
||||
|
||||
/* validate values requested by driver don't exceed HMC capacity */
|
||||
if (txq_num > obj->max_cnt) {
|
||||
|
@ -152,7 +152,7 @@ i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
|
|||
hw->hmc.hmc_obj[I40E_HMC_LAN_TX].size);
|
||||
obj->base = i40e_align_l2obj_base(obj->base);
|
||||
size_exp = rd32(hw, I40E_GLHMC_LANRXOBJSZ);
|
||||
obj->size = (u64)1 << size_exp;
|
||||
obj->size = BIT_ULL(size_exp);
|
||||
|
||||
/* validate values requested by driver don't exceed HMC capacity */
|
||||
if (rxq_num > obj->max_cnt) {
|
||||
|
@ -175,7 +175,7 @@ i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
|
|||
hw->hmc.hmc_obj[I40E_HMC_LAN_RX].size);
|
||||
obj->base = i40e_align_l2obj_base(obj->base);
|
||||
size_exp = rd32(hw, I40E_GLHMC_FCOEDDPOBJSZ);
|
||||
obj->size = (u64)1 << size_exp;
|
||||
obj->size = BIT_ULL(size_exp);
|
||||
|
||||
/* validate values requested by driver don't exceed HMC capacity */
|
||||
if (fcoe_cntx_num > obj->max_cnt) {
|
||||
|
@ -198,7 +198,7 @@ i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
|
|||
hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].size);
|
||||
obj->base = i40e_align_l2obj_base(obj->base);
|
||||
size_exp = rd32(hw, I40E_GLHMC_FCOEFOBJSZ);
|
||||
obj->size = (u64)1 << size_exp;
|
||||
obj->size = BIT_ULL(size_exp);
|
||||
|
||||
/* validate values requested by driver don't exceed HMC capacity */
|
||||
if (fcoe_filt_num > obj->max_cnt) {
|
||||
|
@ -763,7 +763,7 @@ static void i40e_write_byte(u8 *hmc_bits,
|
|||
|
||||
/* prepare the bits and mask */
|
||||
shift_width = ce_info->lsb % 8;
|
||||
mask = ((u8)1 << ce_info->width) - 1;
|
||||
mask = BIT(ce_info->width) - 1;
|
||||
|
||||
src_byte = *from;
|
||||
src_byte &= mask;
|
||||
|
@ -804,7 +804,7 @@ static void i40e_write_word(u8 *hmc_bits,
|
|||
|
||||
/* prepare the bits and mask */
|
||||
shift_width = ce_info->lsb % 8;
|
||||
mask = ((u16)1 << ce_info->width) - 1;
|
||||
mask = BIT(ce_info->width) - 1;
|
||||
|
||||
/* don't swizzle the bits until after the mask because the mask bits
|
||||
* will be in a different bit position on big endian machines
|
||||
|
@ -854,7 +854,7 @@ static void i40e_write_dword(u8 *hmc_bits,
|
|||
* to 5 bits so the shift will do nothing
|
||||
*/
|
||||
if (ce_info->width < 32)
|
||||
mask = ((u32)1 << ce_info->width) - 1;
|
||||
mask = BIT(ce_info->width) - 1;
|
||||
else
|
||||
mask = ~(u32)0;
|
||||
|
||||
|
@ -906,7 +906,7 @@ static void i40e_write_qword(u8 *hmc_bits,
|
|||
* to 6 bits so the shift will do nothing
|
||||
*/
|
||||
if (ce_info->width < 64)
|
||||
mask = ((u64)1 << ce_info->width) - 1;
|
||||
mask = BIT_ULL(ce_info->width) - 1;
|
||||
else
|
||||
mask = ~(u64)0;
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -50,7 +50,7 @@ i40e_status i40e_init_nvm(struct i40e_hw *hw)
|
|||
sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >>
|
||||
I40E_GLNVM_GENS_SR_SIZE_SHIFT);
|
||||
/* Switching to words (sr_size contains power of 2KB) */
|
||||
nvm->sr_size = (1 << sr_size) * I40E_SR_WORDS_IN_1KB;
|
||||
nvm->sr_size = BIT(sr_size) * I40E_SR_WORDS_IN_1KB;
|
||||
|
||||
/* Check if we are in the normal or blank NVM programming mode */
|
||||
fla = rd32(hw, I40E_GLNVM_FLA);
|
||||
|
@ -189,8 +189,8 @@ static i40e_status i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
|
|||
ret_code = i40e_poll_sr_srctl_done_bit(hw);
|
||||
if (!ret_code) {
|
||||
/* Write the address and start reading */
|
||||
sr_reg = (u32)(offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
|
||||
(1 << I40E_GLNVM_SRCTL_START_SHIFT);
|
||||
sr_reg = ((u32)offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
|
||||
BIT(I40E_GLNVM_SRCTL_START_SHIFT);
|
||||
wr32(hw, I40E_GLNVM_SRCTL, sr_reg);
|
||||
|
||||
/* Poll I40E_GLNVM_SRCTL until the done bit is set */
|
||||
|
|
|
@ -58,6 +58,8 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask,
|
|||
void i40e_idle_aq(struct i40e_hw *hw);
|
||||
bool i40e_check_asq_alive(struct i40e_hw *hw);
|
||||
i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);
|
||||
char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err);
|
||||
char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err);
|
||||
|
||||
u32 i40e_led_get(struct i40e_hw *hw);
|
||||
void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink);
|
||||
|
|
|
@ -43,9 +43,8 @@
|
|||
#define I40E_PTP_10GB_INCVAL 0x0333333333ULL
|
||||
#define I40E_PTP_1GB_INCVAL 0x2000000000ULL
|
||||
|
||||
#define I40E_PRTTSYN_CTL1_TSYNTYPE_V1 (0x1 << \
|
||||
I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
|
||||
#define I40E_PRTTSYN_CTL1_TSYNTYPE_V2 (0x2 << \
|
||||
#define I40E_PRTTSYN_CTL1_TSYNTYPE_V1 BIT(I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
|
||||
#define I40E_PRTTSYN_CTL1_TSYNTYPE_V2 (2 << \
|
||||
I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
|
||||
|
||||
/**
|
||||
|
@ -357,7 +356,7 @@ void i40e_ptp_rx_hwtstamp(struct i40e_pf *pf, struct sk_buff *skb, u8 index)
|
|||
|
||||
prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_1);
|
||||
|
||||
if (!(prttsyn_stat & (1 << index)))
|
||||
if (!(prttsyn_stat & BIT(index)))
|
||||
return;
|
||||
|
||||
lo = rd32(hw, I40E_PRTTSYN_RXTIME_L(index));
|
||||
|
|
|
@ -464,7 +464,7 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
|
|||
error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
|
||||
I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
|
||||
|
||||
if (error == (0x1 << I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
|
||||
if (error == BIT(I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
|
||||
if ((rx_desc->wb.qword0.hi_dword.fd_id != 0) ||
|
||||
(I40E_DEBUG_FD & pf->hw.debug_mask))
|
||||
dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n",
|
||||
|
@ -509,8 +509,7 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
|
|||
dev_info(&pdev->dev,
|
||||
"FD filter programming failed due to incorrect filter parameters\n");
|
||||
}
|
||||
} else if (error ==
|
||||
(0x1 << I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
|
||||
} else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
|
||||
if (I40E_DEBUG_FD & pf->hw.debug_mask)
|
||||
dev_info(&pdev->dev, "ntuple filter fd_id = %d, could not be removed\n",
|
||||
rx_desc->wb.qword0.hi_dword.fd_id);
|
||||
|
@ -892,7 +891,7 @@ static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
|
|||
* 20-1249MB/s bulk (8000 ints/s)
|
||||
*/
|
||||
bytes_per_int = rc->total_bytes / rc->itr;
|
||||
switch (rc->itr) {
|
||||
switch (new_latency_range) {
|
||||
case I40E_LOWEST_LATENCY:
|
||||
if (bytes_per_int > 10)
|
||||
new_latency_range = I40E_LOW_LATENCY;
|
||||
|
@ -905,9 +904,14 @@ static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
|
|||
break;
|
||||
case I40E_BULK_LATENCY:
|
||||
if (bytes_per_int <= 20)
|
||||
rc->latency_range = I40E_LOW_LATENCY;
|
||||
new_latency_range = I40E_LOW_LATENCY;
|
||||
break;
|
||||
default:
|
||||
if (bytes_per_int <= 20)
|
||||
new_latency_range = I40E_LOW_LATENCY;
|
||||
break;
|
||||
}
|
||||
rc->latency_range = new_latency_range;
|
||||
|
||||
switch (new_latency_range) {
|
||||
case I40E_LOWEST_LATENCY:
|
||||
|
@ -923,41 +927,13 @@ static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
|
|||
break;
|
||||
}
|
||||
|
||||
if (new_itr != rc->itr) {
|
||||
/* do an exponential smoothing */
|
||||
new_itr = (10 * new_itr * rc->itr) /
|
||||
((9 * new_itr) + rc->itr);
|
||||
rc->itr = new_itr & I40E_MAX_ITR;
|
||||
}
|
||||
if (new_itr != rc->itr)
|
||||
rc->itr = new_itr;
|
||||
|
||||
rc->total_bytes = 0;
|
||||
rc->total_packets = 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_update_dynamic_itr - Adjust ITR based on bytes per int
|
||||
* @q_vector: the vector to adjust
|
||||
**/
|
||||
static void i40e_update_dynamic_itr(struct i40e_q_vector *q_vector)
|
||||
{
|
||||
u16 vector = q_vector->vsi->base_vector + q_vector->v_idx;
|
||||
struct i40e_hw *hw = &q_vector->vsi->back->hw;
|
||||
u32 reg_addr;
|
||||
u16 old_itr;
|
||||
|
||||
reg_addr = I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1);
|
||||
old_itr = q_vector->rx.itr;
|
||||
i40e_set_new_dynamic_itr(&q_vector->rx);
|
||||
if (old_itr != q_vector->rx.itr)
|
||||
wr32(hw, reg_addr, q_vector->rx.itr);
|
||||
|
||||
reg_addr = I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1);
|
||||
old_itr = q_vector->tx.itr;
|
||||
i40e_set_new_dynamic_itr(&q_vector->tx);
|
||||
if (old_itr != q_vector->tx.itr)
|
||||
wr32(hw, reg_addr, q_vector->tx.itr);
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_clean_programming_status - clean the programming status descriptor
|
||||
* @rx_ring: the rx ring that has this descriptor
|
||||
|
@ -1386,7 +1362,7 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
|
|||
return;
|
||||
|
||||
/* did the hardware decode the packet and checksum? */
|
||||
if (!(rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
|
||||
if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
|
||||
return;
|
||||
|
||||
/* both known and outer_ip must be set for the below code to work */
|
||||
|
@ -1401,25 +1377,25 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
|
|||
ipv6 = true;
|
||||
|
||||
if (ipv4 &&
|
||||
(rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) |
|
||||
(1 << I40E_RX_DESC_ERROR_EIPE_SHIFT))))
|
||||
(rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
|
||||
BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT))))
|
||||
goto checksum_fail;
|
||||
|
||||
/* likely incorrect csum if alternate IP extension headers found */
|
||||
if (ipv6 &&
|
||||
rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
|
||||
rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
|
||||
/* don't increment checksum err here, non-fatal err */
|
||||
return;
|
||||
|
||||
/* there was some L4 error, count error and punt packet to the stack */
|
||||
if (rx_error & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT))
|
||||
if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT))
|
||||
goto checksum_fail;
|
||||
|
||||
/* handle packets that were not able to be checksummed due
|
||||
* to arrival speed, in this case the stack can compute
|
||||
* the csum.
|
||||
*/
|
||||
if (rx_error & (1 << I40E_RX_DESC_ERROR_PPRS_SHIFT))
|
||||
if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
|
||||
return;
|
||||
|
||||
/* If VXLAN traffic has an outer UDPv4 checksum we need to check
|
||||
|
@ -1543,7 +1519,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
|
|||
rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
|
||||
I40E_RXD_QW1_STATUS_SHIFT;
|
||||
|
||||
if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
|
||||
if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
|
||||
break;
|
||||
|
||||
/* This memory barrier is needed to keep us from reading
|
||||
|
@ -1584,8 +1560,8 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
|
|||
|
||||
rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
|
||||
I40E_RXD_QW1_ERROR_SHIFT;
|
||||
rx_hbo = rx_error & (1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
|
||||
rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
|
||||
rx_hbo = rx_error & BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
|
||||
rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
|
||||
|
||||
rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
|
||||
I40E_RXD_QW1_PTYPE_SHIFT;
|
||||
|
@ -1637,7 +1613,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
|
|||
I40E_RX_INCREMENT(rx_ring, i);
|
||||
|
||||
if (unlikely(
|
||||
!(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
|
||||
!(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
|
||||
struct i40e_rx_buffer *next_buffer;
|
||||
|
||||
next_buffer = &rx_ring->rx_bi[i];
|
||||
|
@ -1647,7 +1623,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
|
|||
}
|
||||
|
||||
/* ERR_MASK will only have valid bits if EOP set */
|
||||
if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
|
||||
if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
|
||||
dev_kfree_skb_any(skb);
|
||||
continue;
|
||||
}
|
||||
|
@ -1669,7 +1645,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
|
|||
|
||||
i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
|
||||
|
||||
vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
|
||||
vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
|
||||
? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
|
||||
: 0;
|
||||
#ifdef I40E_FCOE
|
||||
|
@ -1730,7 +1706,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
|
|||
rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
|
||||
I40E_RXD_QW1_STATUS_SHIFT;
|
||||
|
||||
if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
|
||||
if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
|
||||
break;
|
||||
|
||||
/* This memory barrier is needed to keep us from reading
|
||||
|
@ -1753,7 +1729,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
|
|||
|
||||
rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
|
||||
I40E_RXD_QW1_ERROR_SHIFT;
|
||||
rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
|
||||
rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
|
||||
|
||||
rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
|
||||
I40E_RXD_QW1_PTYPE_SHIFT;
|
||||
|
@ -1771,13 +1747,13 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
|
|||
I40E_RX_INCREMENT(rx_ring, i);
|
||||
|
||||
if (unlikely(
|
||||
!(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
|
||||
!(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
|
||||
rx_ring->rx_stats.non_eop_descs++;
|
||||
continue;
|
||||
}
|
||||
|
||||
/* ERR_MASK will only have valid bits if EOP set */
|
||||
if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
|
||||
if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
|
||||
dev_kfree_skb_any(skb);
|
||||
/* TODO: shouldn't we increment a counter indicating the
|
||||
* drop?
|
||||
|
@ -1802,7 +1778,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
|
|||
|
||||
i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
|
||||
|
||||
vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
|
||||
vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
|
||||
? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
|
||||
: 0;
|
||||
#ifdef I40E_FCOE
|
||||
|
@ -1826,6 +1802,68 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
|
|||
return total_rx_packets;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
|
||||
* @vsi: the VSI we care about
|
||||
* @q_vector: q_vector for which itr is being updated and interrupt enabled
|
||||
*
|
||||
**/
|
||||
static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
|
||||
struct i40e_q_vector *q_vector)
|
||||
{
|
||||
struct i40e_hw *hw = &vsi->back->hw;
|
||||
u16 old_itr;
|
||||
int vector;
|
||||
u32 val;
|
||||
|
||||
vector = (q_vector->v_idx + vsi->base_vector);
|
||||
if (ITR_IS_DYNAMIC(vsi->rx_itr_setting)) {
|
||||
old_itr = q_vector->rx.itr;
|
||||
i40e_set_new_dynamic_itr(&q_vector->rx);
|
||||
if (old_itr != q_vector->rx.itr) {
|
||||
val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
|
||||
I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
|
||||
(I40E_RX_ITR <<
|
||||
I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
|
||||
(q_vector->rx.itr <<
|
||||
I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT);
|
||||
} else {
|
||||
val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
|
||||
I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
|
||||
(I40E_ITR_NONE <<
|
||||
I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
|
||||
}
|
||||
if (!test_bit(__I40E_DOWN, &vsi->state))
|
||||
wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val);
|
||||
} else {
|
||||
i40e_irq_dynamic_enable(vsi,
|
||||
q_vector->v_idx + vsi->base_vector);
|
||||
}
|
||||
if (ITR_IS_DYNAMIC(vsi->tx_itr_setting)) {
|
||||
old_itr = q_vector->tx.itr;
|
||||
i40e_set_new_dynamic_itr(&q_vector->tx);
|
||||
if (old_itr != q_vector->tx.itr) {
|
||||
val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
|
||||
I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
|
||||
(I40E_TX_ITR <<
|
||||
I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
|
||||
(q_vector->tx.itr <<
|
||||
I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT);
|
||||
} else {
|
||||
val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
|
||||
I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
|
||||
(I40E_ITR_NONE <<
|
||||
I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
|
||||
}
|
||||
if (!test_bit(__I40E_DOWN, &vsi->state))
|
||||
wr32(hw, I40E_PFINT_DYN_CTLN(q_vector->v_idx +
|
||||
vsi->base_vector - 1), val);
|
||||
} else {
|
||||
i40e_irq_dynamic_enable(vsi,
|
||||
q_vector->v_idx + vsi->base_vector);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_napi_poll - NAPI polling Rx/Tx cleanup routine
|
||||
* @napi: napi struct with our devices info in it
|
||||
|
@ -1882,33 +1920,24 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
|
|||
|
||||
/* Work is done so exit the polling mode and re-enable the interrupt */
|
||||
napi_complete(napi);
|
||||
if (ITR_IS_DYNAMIC(vsi->rx_itr_setting) ||
|
||||
ITR_IS_DYNAMIC(vsi->tx_itr_setting))
|
||||
i40e_update_dynamic_itr(q_vector);
|
||||
if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
|
||||
i40e_update_enable_itr(vsi, q_vector);
|
||||
} else { /* Legacy mode */
|
||||
struct i40e_hw *hw = &vsi->back->hw;
|
||||
/* We re-enable the queue 0 cause, but
|
||||
* don't worry about dynamic_enable
|
||||
* because we left it on for the other
|
||||
* possible interrupts during napi
|
||||
*/
|
||||
u32 qval = rd32(hw, I40E_QINT_RQCTL(0)) |
|
||||
I40E_QINT_RQCTL_CAUSE_ENA_MASK;
|
||||
|
||||
if (!test_bit(__I40E_DOWN, &vsi->state)) {
|
||||
if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
|
||||
i40e_irq_dynamic_enable(vsi,
|
||||
q_vector->v_idx + vsi->base_vector);
|
||||
} else {
|
||||
struct i40e_hw *hw = &vsi->back->hw;
|
||||
/* We re-enable the queue 0 cause, but
|
||||
* don't worry about dynamic_enable
|
||||
* because we left it on for the other
|
||||
* possible interrupts during napi
|
||||
*/
|
||||
u32 qval = rd32(hw, I40E_QINT_RQCTL(0));
|
||||
qval |= I40E_QINT_RQCTL_CAUSE_ENA_MASK;
|
||||
wr32(hw, I40E_QINT_RQCTL(0), qval);
|
||||
|
||||
qval = rd32(hw, I40E_QINT_TQCTL(0));
|
||||
qval |= I40E_QINT_TQCTL_CAUSE_ENA_MASK;
|
||||
wr32(hw, I40E_QINT_TQCTL(0), qval);
|
||||
|
||||
i40e_irq_dynamic_enable_icr0(vsi->back);
|
||||
}
|
||||
wr32(hw, I40E_QINT_RQCTL(0), qval);
|
||||
qval = rd32(hw, I40E_QINT_TQCTL(0)) |
|
||||
I40E_QINT_TQCTL_CAUSE_ENA_MASK;
|
||||
wr32(hw, I40E_QINT_TQCTL(0), qval);
|
||||
i40e_irq_dynamic_enable_icr0(vsi->back);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -66,17 +66,17 @@ enum i40e_dyn_idx_t {
|
|||
|
||||
/* Supported RSS offloads */
|
||||
#define I40E_DEFAULT_RSS_HENA ( \
|
||||
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
|
||||
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
|
||||
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
|
||||
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
|
||||
((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) | \
|
||||
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
|
||||
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
|
||||
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
|
||||
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
|
||||
((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) | \
|
||||
((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD))
|
||||
BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
|
||||
BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
|
||||
BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
|
||||
BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
|
||||
BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \
|
||||
BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
|
||||
BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
|
||||
BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
|
||||
BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
|
||||
BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \
|
||||
BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD))
|
||||
|
||||
/* Supported Rx Buffer Sizes */
|
||||
#define I40E_RXBUFFER_512 512 /* Used for packet split */
|
||||
|
@ -129,17 +129,17 @@ enum i40e_dyn_idx_t {
|
|||
#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
|
||||
#define I40E_MIN_DESC_PENDING 4
|
||||
|
||||
#define I40E_TX_FLAGS_CSUM (u32)(1)
|
||||
#define I40E_TX_FLAGS_HW_VLAN (u32)(1 << 1)
|
||||
#define I40E_TX_FLAGS_SW_VLAN (u32)(1 << 2)
|
||||
#define I40E_TX_FLAGS_TSO (u32)(1 << 3)
|
||||
#define I40E_TX_FLAGS_IPV4 (u32)(1 << 4)
|
||||
#define I40E_TX_FLAGS_IPV6 (u32)(1 << 5)
|
||||
#define I40E_TX_FLAGS_FCCRC (u32)(1 << 6)
|
||||
#define I40E_TX_FLAGS_FSO (u32)(1 << 7)
|
||||
#define I40E_TX_FLAGS_TSYN (u32)(1 << 8)
|
||||
#define I40E_TX_FLAGS_FD_SB (u32)(1 << 9)
|
||||
#define I40E_TX_FLAGS_VXLAN_TUNNEL (u32)(1 << 10)
|
||||
#define I40E_TX_FLAGS_CSUM BIT(0)
|
||||
#define I40E_TX_FLAGS_HW_VLAN BIT(1)
|
||||
#define I40E_TX_FLAGS_SW_VLAN BIT(2)
|
||||
#define I40E_TX_FLAGS_TSO BIT(3)
|
||||
#define I40E_TX_FLAGS_IPV4 BIT(4)
|
||||
#define I40E_TX_FLAGS_IPV6 BIT(5)
|
||||
#define I40E_TX_FLAGS_FCCRC BIT(6)
|
||||
#define I40E_TX_FLAGS_FSO BIT(7)
|
||||
#define I40E_TX_FLAGS_TSYN BIT(8)
|
||||
#define I40E_TX_FLAGS_FD_SB BIT(9)
|
||||
#define I40E_TX_FLAGS_VXLAN_TUNNEL BIT(10)
|
||||
#define I40E_TX_FLAGS_VLAN_MASK 0xffff0000
|
||||
#define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
|
||||
#define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29
|
||||
|
|
|
@ -497,6 +497,7 @@ struct i40e_hw {
|
|||
|
||||
/* debug mask */
|
||||
u32 debug_mask;
|
||||
char err_str[16];
|
||||
};
|
||||
|
||||
static inline bool i40e_is_vf(struct i40e_hw *hw)
|
||||
|
@ -610,7 +611,7 @@ enum i40e_rx_desc_status_bits {
|
|||
};
|
||||
|
||||
#define I40E_RXD_QW1_STATUS_SHIFT 0
|
||||
#define I40E_RXD_QW1_STATUS_MASK (((1 << I40E_RX_DESC_STATUS_LAST) - 1) \
|
||||
#define I40E_RXD_QW1_STATUS_MASK ((BIT(I40E_RX_DESC_STATUS_LAST) - 1) \
|
||||
<< I40E_RXD_QW1_STATUS_SHIFT)
|
||||
|
||||
#define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT I40E_RX_DESC_STATUS_TSYNINDX_SHIFT
|
||||
|
@ -618,8 +619,8 @@ enum i40e_rx_desc_status_bits {
|
|||
I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT)
|
||||
|
||||
#define I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT I40E_RX_DESC_STATUS_TSYNVALID_SHIFT
|
||||
#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK (0x1UL << \
|
||||
I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT)
|
||||
#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK \
|
||||
BIT_ULL(I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT)
|
||||
|
||||
enum i40e_rx_desc_fltstat_values {
|
||||
I40E_RX_DESC_FLTSTAT_NO_DATA = 0,
|
||||
|
@ -753,8 +754,7 @@ enum i40e_rx_ptype_payload_layer {
|
|||
I40E_RXD_QW1_LENGTH_HBUF_SHIFT)
|
||||
|
||||
#define I40E_RXD_QW1_LENGTH_SPH_SHIFT 63
|
||||
#define I40E_RXD_QW1_LENGTH_SPH_MASK (0x1ULL << \
|
||||
I40E_RXD_QW1_LENGTH_SPH_SHIFT)
|
||||
#define I40E_RXD_QW1_LENGTH_SPH_MASK BIT_ULL(I40E_RXD_QW1_LENGTH_SPH_SHIFT)
|
||||
|
||||
enum i40e_rx_desc_ext_status_bits {
|
||||
/* Note: These are predefined bit offsets */
|
||||
|
@ -930,12 +930,12 @@ enum i40e_tx_ctx_desc_eipt_offload {
|
|||
#define I40E_TXD_CTX_QW0_NATT_SHIFT 9
|
||||
#define I40E_TXD_CTX_QW0_NATT_MASK (0x3ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
|
||||
|
||||
#define I40E_TXD_CTX_UDP_TUNNELING (0x1ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
|
||||
#define I40E_TXD_CTX_UDP_TUNNELING BIT_ULL(I40E_TXD_CTX_QW0_NATT_SHIFT)
|
||||
#define I40E_TXD_CTX_GRE_TUNNELING (0x2ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
|
||||
|
||||
#define I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT 11
|
||||
#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK (0x1ULL << \
|
||||
I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT)
|
||||
#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK \
|
||||
BIT_ULL(I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT)
|
||||
|
||||
#define I40E_TXD_CTX_EIP_NOINC_IPID_CONST I40E_TXD_CTX_QW0_EIP_NOINC_MASK
|
||||
|
||||
|
@ -1000,8 +1000,8 @@ enum i40e_filter_program_desc_fd_status {
|
|||
};
|
||||
|
||||
#define I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT 23
|
||||
#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK (0x1FFUL << \
|
||||
I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
|
||||
#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK \
|
||||
BIT_ULL(I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
|
||||
|
||||
#define I40E_TXD_FLTR_QW1_CMD_SHIFT 4
|
||||
#define I40E_TXD_FLTR_QW1_CMD_MASK (0xFFFFULL << \
|
||||
|
@ -1019,8 +1019,7 @@ enum i40e_filter_program_desc_pcmd {
|
|||
#define I40E_TXD_FLTR_QW1_DEST_MASK (0x3ULL << I40E_TXD_FLTR_QW1_DEST_SHIFT)
|
||||
|
||||
#define I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT (0x7ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
|
||||
#define I40E_TXD_FLTR_QW1_CNT_ENA_MASK (0x1ULL << \
|
||||
I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT)
|
||||
#define I40E_TXD_FLTR_QW1_CNT_ENA_MASK BIT_ULL(I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT)
|
||||
|
||||
#define I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT (0x9ULL + \
|
||||
I40E_TXD_FLTR_QW1_CMD_SHIFT)
|
||||
|
|
|
@ -110,7 +110,9 @@ struct i40e_virtchnl_msg {
|
|||
* error regardless of version mismatch.
|
||||
*/
|
||||
#define I40E_VIRTCHNL_VERSION_MAJOR 1
|
||||
#define I40E_VIRTCHNL_VERSION_MINOR 0
|
||||
#define I40E_VIRTCHNL_VERSION_MINOR 1
|
||||
#define I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS 0
|
||||
|
||||
struct i40e_virtchnl_version_info {
|
||||
u32 major;
|
||||
u32 minor;
|
||||
|
@ -129,7 +131,8 @@ struct i40e_virtchnl_version_info {
|
|||
*/
|
||||
|
||||
/* I40E_VIRTCHNL_OP_GET_VF_RESOURCES
|
||||
* VF sends this request to PF with no parameters
|
||||
* Version 1.0 VF sends this request to PF with no parameters
|
||||
* Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities
|
||||
* PF responds with an indirect message containing
|
||||
* i40e_virtchnl_vf_resource and one or more
|
||||
* i40e_virtchnl_vsi_resource structures.
|
||||
|
@ -143,9 +146,12 @@ struct i40e_virtchnl_vsi_resource {
|
|||
u8 default_mac_addr[ETH_ALEN];
|
||||
};
|
||||
/* VF offload flags */
|
||||
#define I40E_VIRTCHNL_VF_OFFLOAD_L2 0x00000001
|
||||
#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE 0x00000004
|
||||
#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
|
||||
#define I40E_VIRTCHNL_VF_OFFLOAD_L2 0x00000001
|
||||
#define I40E_VIRTCHNL_VF_OFFLOAD_IWARP 0x00000002
|
||||
#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE 0x00000004
|
||||
#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008
|
||||
#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010
|
||||
#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
|
||||
|
||||
struct i40e_virtchnl_vf_resource {
|
||||
u16 num_vsis;
|
||||
|
|
|
@ -277,16 +277,14 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id,
|
|||
}
|
||||
tempmap = vecmap->rxq_map;
|
||||
for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
|
||||
linklistmap |= (1 <<
|
||||
(I40E_VIRTCHNL_SUPPORTED_QTYPES *
|
||||
vsi_queue_id));
|
||||
linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES *
|
||||
vsi_queue_id));
|
||||
}
|
||||
|
||||
tempmap = vecmap->txq_map;
|
||||
for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
|
||||
linklistmap |= (1 <<
|
||||
(I40E_VIRTCHNL_SUPPORTED_QTYPES * vsi_queue_id
|
||||
+ 1));
|
||||
linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES *
|
||||
vsi_queue_id + 1));
|
||||
}
|
||||
|
||||
next_q = find_first_bit(&linklistmap,
|
||||
|
@ -332,7 +330,7 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id,
|
|||
reg = (vector_id) |
|
||||
(qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
|
||||
(pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
|
||||
(1 << I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
|
||||
BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
|
||||
(itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT);
|
||||
wr32(hw, reg_idx, reg);
|
||||
}
|
||||
|
@ -897,7 +895,7 @@ void i40e_free_vfs(struct i40e_pf *pf)
|
|||
for (vf_id = 0; vf_id < tmp; vf_id++) {
|
||||
reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
|
||||
bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
|
||||
wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx));
|
||||
wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
|
||||
}
|
||||
}
|
||||
clear_bit(__I40E_VF_DISABLE, &pf->state);
|
||||
|
@ -1121,12 +1119,16 @@ static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf,
|
|||
*
|
||||
* called from the VF to request the API version used by the PF
|
||||
**/
|
||||
static int i40e_vc_get_version_msg(struct i40e_vf *vf)
|
||||
static int i40e_vc_get_version_msg(struct i40e_vf *vf, u8 *msg)
|
||||
{
|
||||
struct i40e_virtchnl_version_info info = {
|
||||
I40E_VIRTCHNL_VERSION_MAJOR, I40E_VIRTCHNL_VERSION_MINOR
|
||||
};
|
||||
|
||||
vf->vf_ver = *(struct i40e_virtchnl_version_info *)msg;
|
||||
/* VFs running the 1.0 API expect to get 1.0 back or they will cry. */
|
||||
if (VF_IS_V10(vf))
|
||||
info.minor = I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
|
||||
return i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_VERSION,
|
||||
I40E_SUCCESS, (u8 *)&info,
|
||||
sizeof(struct
|
||||
|
@ -1141,7 +1143,7 @@ static int i40e_vc_get_version_msg(struct i40e_vf *vf)
|
|||
*
|
||||
* called from the VF to request its resources
|
||||
**/
|
||||
static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf)
|
||||
static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
|
||||
{
|
||||
struct i40e_virtchnl_vf_resource *vfres = NULL;
|
||||
struct i40e_pf *pf = vf->pf;
|
||||
|
@ -1165,11 +1167,18 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf)
|
|||
len = 0;
|
||||
goto err;
|
||||
}
|
||||
if (VF_IS_V11(vf))
|
||||
vf->driver_caps = *(u32 *)msg;
|
||||
else
|
||||
vf->driver_caps = I40E_VIRTCHNL_VF_OFFLOAD_L2 |
|
||||
I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG |
|
||||
I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
|
||||
|
||||
vfres->vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2;
|
||||
vsi = pf->vsi[vf->lan_vsi_idx];
|
||||
if (!vsi->info.pvid)
|
||||
vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
|
||||
vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_VLAN |
|
||||
I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG;
|
||||
|
||||
vfres->num_vsis = num_vsis;
|
||||
vfres->num_queue_pairs = vf->num_queue_pairs;
|
||||
|
@ -1771,9 +1780,14 @@ static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode,
|
|||
valid_len = sizeof(struct i40e_virtchnl_version_info);
|
||||
break;
|
||||
case I40E_VIRTCHNL_OP_RESET_VF:
|
||||
case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
|
||||
valid_len = 0;
|
||||
break;
|
||||
case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
|
||||
if (VF_IS_V11(vf))
|
||||
valid_len = sizeof(u32);
|
||||
else
|
||||
valid_len = 0;
|
||||
break;
|
||||
case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
|
||||
valid_len = sizeof(struct i40e_virtchnl_txq_info);
|
||||
break;
|
||||
|
@ -1886,10 +1900,10 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
|
|||
|
||||
switch (v_opcode) {
|
||||
case I40E_VIRTCHNL_OP_VERSION:
|
||||
ret = i40e_vc_get_version_msg(vf);
|
||||
ret = i40e_vc_get_version_msg(vf, msg);
|
||||
break;
|
||||
case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
|
||||
ret = i40e_vc_get_vf_resources_msg(vf);
|
||||
ret = i40e_vc_get_vf_resources_msg(vf, msg);
|
||||
break;
|
||||
case I40E_VIRTCHNL_OP_RESET_VF:
|
||||
i40e_vc_reset_vf_msg(vf);
|
||||
|
@ -1967,9 +1981,9 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf)
|
|||
/* read GLGEN_VFLRSTAT register to find out the flr VFs */
|
||||
vf = &pf->vf[vf_id];
|
||||
reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx));
|
||||
if (reg & (1 << bit_idx)) {
|
||||
if (reg & BIT(bit_idx)) {
|
||||
/* clear the bit in GLGEN_VFLRSTAT */
|
||||
wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx));
|
||||
wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
|
||||
|
||||
if (!test_bit(__I40E_DOWN, &pf->state))
|
||||
i40e_reset_vf(vf, true);
|
||||
|
|
|
@ -42,6 +42,9 @@
|
|||
#define I40E_VLAN_MASK 0xFFF
|
||||
#define I40E_PRIORITY_MASK 0x7000
|
||||
|
||||
#define VF_IS_V10(_v) (((_v)->vf_ver.major == 1) && ((_v)->vf_ver.minor == 0))
|
||||
#define VF_IS_V11(_v) (((_v)->vf_ver.major == 1) && ((_v)->vf_ver.minor == 1))
|
||||
|
||||
/* Various queue ctrls */
|
||||
enum i40e_queue_ctrl {
|
||||
I40E_QUEUE_CTRL_UNKNOWN = 0,
|
||||
|
@ -75,6 +78,8 @@ struct i40e_vf {
|
|||
u16 vf_id;
|
||||
/* all VF vsis connect to the same parent */
|
||||
enum i40e_switch_element_types parent_type;
|
||||
struct i40e_virtchnl_version_info vf_ver;
|
||||
u32 driver_caps; /* reported by VF driver */
|
||||
|
||||
/* VF Port Extender (PE) stag if used */
|
||||
u16 stag;
|
||||
|
|
|
@ -71,6 +71,212 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw)
|
|||
return status;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40evf_aq_str - convert AQ err code to a string
|
||||
* @hw: pointer to the HW structure
|
||||
* @aq_err: the AQ error code to convert
|
||||
**/
|
||||
char *i40evf_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err)
|
||||
{
|
||||
switch (aq_err) {
|
||||
case I40E_AQ_RC_OK:
|
||||
return "OK";
|
||||
case I40E_AQ_RC_EPERM:
|
||||
return "I40E_AQ_RC_EPERM";
|
||||
case I40E_AQ_RC_ENOENT:
|
||||
return "I40E_AQ_RC_ENOENT";
|
||||
case I40E_AQ_RC_ESRCH:
|
||||
return "I40E_AQ_RC_ESRCH";
|
||||
case I40E_AQ_RC_EINTR:
|
||||
return "I40E_AQ_RC_EINTR";
|
||||
case I40E_AQ_RC_EIO:
|
||||
return "I40E_AQ_RC_EIO";
|
||||
case I40E_AQ_RC_ENXIO:
|
||||
return "I40E_AQ_RC_ENXIO";
|
||||
case I40E_AQ_RC_E2BIG:
|
||||
return "I40E_AQ_RC_E2BIG";
|
||||
case I40E_AQ_RC_EAGAIN:
|
||||
return "I40E_AQ_RC_EAGAIN";
|
||||
case I40E_AQ_RC_ENOMEM:
|
||||
return "I40E_AQ_RC_ENOMEM";
|
||||
case I40E_AQ_RC_EACCES:
|
||||
return "I40E_AQ_RC_EACCES";
|
||||
case I40E_AQ_RC_EFAULT:
|
||||
return "I40E_AQ_RC_EFAULT";
|
||||
case I40E_AQ_RC_EBUSY:
|
||||
return "I40E_AQ_RC_EBUSY";
|
||||
case I40E_AQ_RC_EEXIST:
|
||||
return "I40E_AQ_RC_EEXIST";
|
||||
case I40E_AQ_RC_EINVAL:
|
||||
return "I40E_AQ_RC_EINVAL";
|
||||
case I40E_AQ_RC_ENOTTY:
|
||||
return "I40E_AQ_RC_ENOTTY";
|
||||
case I40E_AQ_RC_ENOSPC:
|
||||
return "I40E_AQ_RC_ENOSPC";
|
||||
case I40E_AQ_RC_ENOSYS:
|
||||
return "I40E_AQ_RC_ENOSYS";
|
||||
case I40E_AQ_RC_ERANGE:
|
||||
return "I40E_AQ_RC_ERANGE";
|
||||
case I40E_AQ_RC_EFLUSHED:
|
||||
return "I40E_AQ_RC_EFLUSHED";
|
||||
case I40E_AQ_RC_BAD_ADDR:
|
||||
return "I40E_AQ_RC_BAD_ADDR";
|
||||
case I40E_AQ_RC_EMODE:
|
||||
return "I40E_AQ_RC_EMODE";
|
||||
case I40E_AQ_RC_EFBIG:
|
||||
return "I40E_AQ_RC_EFBIG";
|
||||
}
|
||||
|
||||
snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err);
|
||||
return hw->err_str;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40evf_stat_str - convert status err code to a string
|
||||
* @hw: pointer to the HW structure
|
||||
* @stat_err: the status error code to convert
|
||||
**/
|
||||
char *i40evf_stat_str(struct i40e_hw *hw, i40e_status stat_err)
|
||||
{
|
||||
switch (stat_err) {
|
||||
case 0:
|
||||
return "OK";
|
||||
case I40E_ERR_NVM:
|
||||
return "I40E_ERR_NVM";
|
||||
case I40E_ERR_NVM_CHECKSUM:
|
||||
return "I40E_ERR_NVM_CHECKSUM";
|
||||
case I40E_ERR_PHY:
|
||||
return "I40E_ERR_PHY";
|
||||
case I40E_ERR_CONFIG:
|
||||
return "I40E_ERR_CONFIG";
|
||||
case I40E_ERR_PARAM:
|
||||
return "I40E_ERR_PARAM";
|
||||
case I40E_ERR_MAC_TYPE:
|
||||
return "I40E_ERR_MAC_TYPE";
|
||||
case I40E_ERR_UNKNOWN_PHY:
|
||||
return "I40E_ERR_UNKNOWN_PHY";
|
||||
case I40E_ERR_LINK_SETUP:
|
||||
return "I40E_ERR_LINK_SETUP";
|
||||
case I40E_ERR_ADAPTER_STOPPED:
|
||||
return "I40E_ERR_ADAPTER_STOPPED";
|
||||
case I40E_ERR_INVALID_MAC_ADDR:
|
||||
return "I40E_ERR_INVALID_MAC_ADDR";
|
||||
case I40E_ERR_DEVICE_NOT_SUPPORTED:
|
||||
return "I40E_ERR_DEVICE_NOT_SUPPORTED";
|
||||
case I40E_ERR_MASTER_REQUESTS_PENDING:
|
||||
return "I40E_ERR_MASTER_REQUESTS_PENDING";
|
||||
case I40E_ERR_INVALID_LINK_SETTINGS:
|
||||
return "I40E_ERR_INVALID_LINK_SETTINGS";
|
||||
case I40E_ERR_AUTONEG_NOT_COMPLETE:
|
||||
return "I40E_ERR_AUTONEG_NOT_COMPLETE";
|
||||
case I40E_ERR_RESET_FAILED:
|
||||
return "I40E_ERR_RESET_FAILED";
|
||||
case I40E_ERR_SWFW_SYNC:
|
||||
return "I40E_ERR_SWFW_SYNC";
|
||||
case I40E_ERR_NO_AVAILABLE_VSI:
|
||||
return "I40E_ERR_NO_AVAILABLE_VSI";
|
||||
case I40E_ERR_NO_MEMORY:
|
||||
return "I40E_ERR_NO_MEMORY";
|
||||
case I40E_ERR_BAD_PTR:
|
||||
return "I40E_ERR_BAD_PTR";
|
||||
case I40E_ERR_RING_FULL:
|
||||
return "I40E_ERR_RING_FULL";
|
||||
case I40E_ERR_INVALID_PD_ID:
|
||||
return "I40E_ERR_INVALID_PD_ID";
|
||||
case I40E_ERR_INVALID_QP_ID:
|
||||
return "I40E_ERR_INVALID_QP_ID";
|
||||
case I40E_ERR_INVALID_CQ_ID:
|
||||
return "I40E_ERR_INVALID_CQ_ID";
|
||||
case I40E_ERR_INVALID_CEQ_ID:
|
||||
return "I40E_ERR_INVALID_CEQ_ID";
|
||||
case I40E_ERR_INVALID_AEQ_ID:
|
||||
return "I40E_ERR_INVALID_AEQ_ID";
|
||||
case I40E_ERR_INVALID_SIZE:
|
||||
return "I40E_ERR_INVALID_SIZE";
|
||||
case I40E_ERR_INVALID_ARP_INDEX:
|
||||
return "I40E_ERR_INVALID_ARP_INDEX";
|
||||
case I40E_ERR_INVALID_FPM_FUNC_ID:
|
||||
return "I40E_ERR_INVALID_FPM_FUNC_ID";
|
||||
case I40E_ERR_QP_INVALID_MSG_SIZE:
|
||||
return "I40E_ERR_QP_INVALID_MSG_SIZE";
|
||||
case I40E_ERR_QP_TOOMANY_WRS_POSTED:
|
||||
return "I40E_ERR_QP_TOOMANY_WRS_POSTED";
|
||||
case I40E_ERR_INVALID_FRAG_COUNT:
|
||||
return "I40E_ERR_INVALID_FRAG_COUNT";
|
||||
case I40E_ERR_QUEUE_EMPTY:
|
||||
return "I40E_ERR_QUEUE_EMPTY";
|
||||
case I40E_ERR_INVALID_ALIGNMENT:
|
||||
return "I40E_ERR_INVALID_ALIGNMENT";
|
||||
case I40E_ERR_FLUSHED_QUEUE:
|
||||
return "I40E_ERR_FLUSHED_QUEUE";
|
||||
case I40E_ERR_INVALID_PUSH_PAGE_INDEX:
|
||||
return "I40E_ERR_INVALID_PUSH_PAGE_INDEX";
|
||||
case I40E_ERR_INVALID_IMM_DATA_SIZE:
|
||||
return "I40E_ERR_INVALID_IMM_DATA_SIZE";
|
||||
case I40E_ERR_TIMEOUT:
|
||||
return "I40E_ERR_TIMEOUT";
|
||||
case I40E_ERR_OPCODE_MISMATCH:
|
||||
return "I40E_ERR_OPCODE_MISMATCH";
|
||||
case I40E_ERR_CQP_COMPL_ERROR:
|
||||
return "I40E_ERR_CQP_COMPL_ERROR";
|
||||
case I40E_ERR_INVALID_VF_ID:
|
||||
return "I40E_ERR_INVALID_VF_ID";
|
||||
case I40E_ERR_INVALID_HMCFN_ID:
|
||||
return "I40E_ERR_INVALID_HMCFN_ID";
|
||||
case I40E_ERR_BACKING_PAGE_ERROR:
|
||||
return "I40E_ERR_BACKING_PAGE_ERROR";
|
||||
case I40E_ERR_NO_PBLCHUNKS_AVAILABLE:
|
||||
return "I40E_ERR_NO_PBLCHUNKS_AVAILABLE";
|
||||
case I40E_ERR_INVALID_PBLE_INDEX:
|
||||
return "I40E_ERR_INVALID_PBLE_INDEX";
|
||||
case I40E_ERR_INVALID_SD_INDEX:
|
||||
return "I40E_ERR_INVALID_SD_INDEX";
|
||||
case I40E_ERR_INVALID_PAGE_DESC_INDEX:
|
||||
return "I40E_ERR_INVALID_PAGE_DESC_INDEX";
|
||||
case I40E_ERR_INVALID_SD_TYPE:
|
||||
return "I40E_ERR_INVALID_SD_TYPE";
|
||||
case I40E_ERR_MEMCPY_FAILED:
|
||||
return "I40E_ERR_MEMCPY_FAILED";
|
||||
case I40E_ERR_INVALID_HMC_OBJ_INDEX:
|
||||
return "I40E_ERR_INVALID_HMC_OBJ_INDEX";
|
||||
case I40E_ERR_INVALID_HMC_OBJ_COUNT:
|
||||
return "I40E_ERR_INVALID_HMC_OBJ_COUNT";
|
||||
case I40E_ERR_INVALID_SRQ_ARM_LIMIT:
|
||||
return "I40E_ERR_INVALID_SRQ_ARM_LIMIT";
|
||||
case I40E_ERR_SRQ_ENABLED:
|
||||
return "I40E_ERR_SRQ_ENABLED";
|
||||
case I40E_ERR_ADMIN_QUEUE_ERROR:
|
||||
return "I40E_ERR_ADMIN_QUEUE_ERROR";
|
||||
case I40E_ERR_ADMIN_QUEUE_TIMEOUT:
|
||||
return "I40E_ERR_ADMIN_QUEUE_TIMEOUT";
|
||||
case I40E_ERR_BUF_TOO_SHORT:
|
||||
return "I40E_ERR_BUF_TOO_SHORT";
|
||||
case I40E_ERR_ADMIN_QUEUE_FULL:
|
||||
return "I40E_ERR_ADMIN_QUEUE_FULL";
|
||||
case I40E_ERR_ADMIN_QUEUE_NO_WORK:
|
||||
return "I40E_ERR_ADMIN_QUEUE_NO_WORK";
|
||||
case I40E_ERR_BAD_IWARP_CQE:
|
||||
return "I40E_ERR_BAD_IWARP_CQE";
|
||||
case I40E_ERR_NVM_BLANK_MODE:
|
||||
return "I40E_ERR_NVM_BLANK_MODE";
|
||||
case I40E_ERR_NOT_IMPLEMENTED:
|
||||
return "I40E_ERR_NOT_IMPLEMENTED";
|
||||
case I40E_ERR_PE_DOORBELL_NOT_ENABLED:
|
||||
return "I40E_ERR_PE_DOORBELL_NOT_ENABLED";
|
||||
case I40E_ERR_DIAG_TEST_FAILED:
|
||||
return "I40E_ERR_DIAG_TEST_FAILED";
|
||||
case I40E_ERR_NOT_READY:
|
||||
return "I40E_ERR_NOT_READY";
|
||||
case I40E_NOT_SUPPORTED:
|
||||
return "I40E_NOT_SUPPORTED";
|
||||
case I40E_ERR_FIRMWARE_API_VERSION:
|
||||
return "I40E_ERR_FIRMWARE_API_VERSION";
|
||||
}
|
||||
|
||||
snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err);
|
||||
return hw->err_str;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40evf_debug_aq
|
||||
* @hw: debug mask related to admin queue
|
||||
|
|
|
@ -127,8 +127,8 @@ struct i40e_hmc_info {
|
|||
I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \
|
||||
((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \
|
||||
I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) | \
|
||||
(1 << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT); \
|
||||
val3 = (sd_index) | (1u << I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \
|
||||
BIT(I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT); \
|
||||
val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \
|
||||
wr32((hw), I40E_PFHMC_SDDATAHIGH, val1); \
|
||||
wr32((hw), I40E_PFHMC_SDDATALOW, val2); \
|
||||
wr32((hw), I40E_PFHMC_SDCMD, val3); \
|
||||
|
@ -147,7 +147,7 @@ struct i40e_hmc_info {
|
|||
I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \
|
||||
((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \
|
||||
I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT); \
|
||||
val3 = (sd_index) | (1u << I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \
|
||||
val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \
|
||||
wr32((hw), I40E_PFHMC_SDDATAHIGH, 0); \
|
||||
wr32((hw), I40E_PFHMC_SDDATALOW, val2); \
|
||||
wr32((hw), I40E_PFHMC_SDCMD, val3); \
|
||||
|
|
|
@ -60,6 +60,8 @@ void i40e_idle_aq(struct i40e_hw *hw);
|
|||
void i40evf_resume_aq(struct i40e_hw *hw);
|
||||
bool i40evf_check_asq_alive(struct i40e_hw *hw);
|
||||
i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);
|
||||
char *i40evf_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err);
|
||||
char *i40evf_stat_str(struct i40e_hw *hw, i40e_status stat_err);
|
||||
|
||||
i40e_status i40e_set_mac_type(struct i40e_hw *hw);
|
||||
|
||||
|
|
|
@ -404,7 +404,7 @@ static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
|
|||
* 20-1249MB/s bulk (8000 ints/s)
|
||||
*/
|
||||
bytes_per_int = rc->total_bytes / rc->itr;
|
||||
switch (rc->itr) {
|
||||
switch (new_latency_range) {
|
||||
case I40E_LOWEST_LATENCY:
|
||||
if (bytes_per_int > 10)
|
||||
new_latency_range = I40E_LOW_LATENCY;
|
||||
|
@ -417,9 +417,14 @@ static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
|
|||
break;
|
||||
case I40E_BULK_LATENCY:
|
||||
if (bytes_per_int <= 20)
|
||||
rc->latency_range = I40E_LOW_LATENCY;
|
||||
new_latency_range = I40E_LOW_LATENCY;
|
||||
break;
|
||||
default:
|
||||
if (bytes_per_int <= 20)
|
||||
new_latency_range = I40E_LOW_LATENCY;
|
||||
break;
|
||||
}
|
||||
rc->latency_range = new_latency_range;
|
||||
|
||||
switch (new_latency_range) {
|
||||
case I40E_LOWEST_LATENCY:
|
||||
|
@ -435,42 +440,14 @@ static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
|
|||
break;
|
||||
}
|
||||
|
||||
if (new_itr != rc->itr) {
|
||||
/* do an exponential smoothing */
|
||||
new_itr = (10 * new_itr * rc->itr) /
|
||||
((9 * new_itr) + rc->itr);
|
||||
rc->itr = new_itr & I40E_MAX_ITR;
|
||||
}
|
||||
if (new_itr != rc->itr)
|
||||
rc->itr = new_itr;
|
||||
|
||||
rc->total_bytes = 0;
|
||||
rc->total_packets = 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_update_dynamic_itr - Adjust ITR based on bytes per int
|
||||
* @q_vector: the vector to adjust
|
||||
**/
|
||||
static void i40e_update_dynamic_itr(struct i40e_q_vector *q_vector)
|
||||
{
|
||||
u16 vector = q_vector->vsi->base_vector + q_vector->v_idx;
|
||||
struct i40e_hw *hw = &q_vector->vsi->back->hw;
|
||||
u32 reg_addr;
|
||||
u16 old_itr;
|
||||
|
||||
reg_addr = I40E_VFINT_ITRN1(I40E_RX_ITR, vector - 1);
|
||||
old_itr = q_vector->rx.itr;
|
||||
i40e_set_new_dynamic_itr(&q_vector->rx);
|
||||
if (old_itr != q_vector->rx.itr)
|
||||
wr32(hw, reg_addr, q_vector->rx.itr);
|
||||
|
||||
reg_addr = I40E_VFINT_ITRN1(I40E_TX_ITR, vector - 1);
|
||||
old_itr = q_vector->tx.itr;
|
||||
i40e_set_new_dynamic_itr(&q_vector->tx);
|
||||
if (old_itr != q_vector->tx.itr)
|
||||
wr32(hw, reg_addr, q_vector->tx.itr);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* i40evf_setup_tx_descriptors - Allocate the Tx descriptors
|
||||
* @tx_ring: the tx ring to set up
|
||||
*
|
||||
|
@ -873,7 +850,7 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
|
|||
return;
|
||||
|
||||
/* did the hardware decode the packet and checksum? */
|
||||
if (!(rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
|
||||
if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
|
||||
return;
|
||||
|
||||
/* both known and outer_ip must be set for the below code to work */
|
||||
|
@ -888,25 +865,25 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
|
|||
ipv6 = true;
|
||||
|
||||
if (ipv4 &&
|
||||
(rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) |
|
||||
(1 << I40E_RX_DESC_ERROR_EIPE_SHIFT))))
|
||||
(rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
|
||||
BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT))))
|
||||
goto checksum_fail;
|
||||
|
||||
/* likely incorrect csum if alternate IP extension headers found */
|
||||
if (ipv6 &&
|
||||
rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
|
||||
rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
|
||||
/* don't increment checksum err here, non-fatal err */
|
||||
return;
|
||||
|
||||
/* there was some L4 error, count error and punt packet to the stack */
|
||||
if (rx_error & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT))
|
||||
if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT))
|
||||
goto checksum_fail;
|
||||
|
||||
/* handle packets that were not able to be checksummed due
|
||||
* to arrival speed, in this case the stack can compute
|
||||
* the csum.
|
||||
*/
|
||||
if (rx_error & (1 << I40E_RX_DESC_ERROR_PPRS_SHIFT))
|
||||
if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
|
||||
return;
|
||||
|
||||
/* If VXLAN traffic has an outer UDPv4 checksum we need to check
|
||||
|
@ -1027,7 +1004,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
|
|||
rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
|
||||
I40E_RXD_QW1_STATUS_SHIFT;
|
||||
|
||||
if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
|
||||
if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
|
||||
break;
|
||||
|
||||
/* This memory barrier is needed to keep us from reading
|
||||
|
@ -1063,8 +1040,8 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
|
|||
|
||||
rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
|
||||
I40E_RXD_QW1_ERROR_SHIFT;
|
||||
rx_hbo = rx_error & (1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
|
||||
rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
|
||||
rx_hbo = rx_error & BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
|
||||
rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
|
||||
|
||||
rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
|
||||
I40E_RXD_QW1_PTYPE_SHIFT;
|
||||
|
@ -1116,7 +1093,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
|
|||
I40E_RX_INCREMENT(rx_ring, i);
|
||||
|
||||
if (unlikely(
|
||||
!(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
|
||||
!(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
|
||||
struct i40e_rx_buffer *next_buffer;
|
||||
|
||||
next_buffer = &rx_ring->rx_bi[i];
|
||||
|
@ -1126,7 +1103,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
|
|||
}
|
||||
|
||||
/* ERR_MASK will only have valid bits if EOP set */
|
||||
if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
|
||||
if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
|
||||
dev_kfree_skb_any(skb);
|
||||
continue;
|
||||
}
|
||||
|
@ -1141,7 +1118,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
|
|||
|
||||
i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
|
||||
|
||||
vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
|
||||
vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
|
||||
? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
|
||||
: 0;
|
||||
#ifdef I40E_FCOE
|
||||
|
@ -1202,7 +1179,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
|
|||
rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
|
||||
I40E_RXD_QW1_STATUS_SHIFT;
|
||||
|
||||
if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
|
||||
if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
|
||||
break;
|
||||
|
||||
/* This memory barrier is needed to keep us from reading
|
||||
|
@ -1220,7 +1197,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
|
|||
|
||||
rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
|
||||
I40E_RXD_QW1_ERROR_SHIFT;
|
||||
rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
|
||||
rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
|
||||
|
||||
rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
|
||||
I40E_RXD_QW1_PTYPE_SHIFT;
|
||||
|
@ -1238,13 +1215,13 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
|
|||
I40E_RX_INCREMENT(rx_ring, i);
|
||||
|
||||
if (unlikely(
|
||||
!(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
|
||||
!(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
|
||||
rx_ring->rx_stats.non_eop_descs++;
|
||||
continue;
|
||||
}
|
||||
|
||||
/* ERR_MASK will only have valid bits if EOP set */
|
||||
if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
|
||||
if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
|
||||
dev_kfree_skb_any(skb);
|
||||
/* TODO: shouldn't we increment a counter indicating the
|
||||
* drop?
|
||||
|
@ -1262,7 +1239,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
|
|||
|
||||
i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
|
||||
|
||||
vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
|
||||
vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
|
||||
? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
|
||||
: 0;
|
||||
i40e_receive_skb(rx_ring, skb, vlan_tag);
|
||||
|
@ -1280,6 +1257,67 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
|
|||
return total_rx_packets;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
|
||||
* @vsi: the VSI we care about
|
||||
* @q_vector: q_vector for which itr is being updated and interrupt enabled
|
||||
*
|
||||
**/
|
||||
static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
|
||||
struct i40e_q_vector *q_vector)
|
||||
{
|
||||
struct i40e_hw *hw = &vsi->back->hw;
|
||||
u16 old_itr;
|
||||
int vector;
|
||||
u32 val;
|
||||
|
||||
vector = (q_vector->v_idx + vsi->base_vector);
|
||||
if (ITR_IS_DYNAMIC(vsi->rx_itr_setting)) {
|
||||
old_itr = q_vector->rx.itr;
|
||||
i40e_set_new_dynamic_itr(&q_vector->rx);
|
||||
if (old_itr != q_vector->rx.itr) {
|
||||
val = I40E_VFINT_DYN_CTLN_INTENA_MASK |
|
||||
I40E_VFINT_DYN_CTLN_CLEARPBA_MASK |
|
||||
(I40E_RX_ITR <<
|
||||
I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT) |
|
||||
(q_vector->rx.itr <<
|
||||
I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT);
|
||||
} else {
|
||||
val = I40E_VFINT_DYN_CTLN_INTENA_MASK |
|
||||
I40E_VFINT_DYN_CTLN_CLEARPBA_MASK |
|
||||
(I40E_ITR_NONE <<
|
||||
I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT);
|
||||
}
|
||||
if (!test_bit(__I40E_DOWN, &vsi->state))
|
||||
wr32(hw, I40E_VFINT_DYN_CTLN1(vector - 1), val);
|
||||
} else {
|
||||
i40evf_irq_enable_queues(vsi->back, 1
|
||||
<< q_vector->v_idx);
|
||||
}
|
||||
if (ITR_IS_DYNAMIC(vsi->tx_itr_setting)) {
|
||||
old_itr = q_vector->tx.itr;
|
||||
i40e_set_new_dynamic_itr(&q_vector->tx);
|
||||
if (old_itr != q_vector->tx.itr) {
|
||||
val = I40E_VFINT_DYN_CTLN_INTENA_MASK |
|
||||
I40E_VFINT_DYN_CTLN_CLEARPBA_MASK |
|
||||
(I40E_TX_ITR <<
|
||||
I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT) |
|
||||
(q_vector->tx.itr <<
|
||||
I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT);
|
||||
|
||||
} else {
|
||||
val = I40E_VFINT_DYN_CTLN_INTENA_MASK |
|
||||
I40E_VFINT_DYN_CTLN_CLEARPBA_MASK |
|
||||
(I40E_ITR_NONE <<
|
||||
I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT);
|
||||
}
|
||||
if (!test_bit(__I40E_DOWN, &vsi->state))
|
||||
wr32(hw, I40E_VFINT_DYN_CTLN1(vector - 1), val);
|
||||
} else {
|
||||
i40evf_irq_enable_queues(vsi->back, BIT(q_vector->v_idx));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* i40evf_napi_poll - NAPI polling Rx/Tx cleanup routine
|
||||
* @napi: napi struct with our devices info in it
|
||||
|
@ -1336,13 +1374,7 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
|
|||
|
||||
/* Work is done so exit the polling mode and re-enable the interrupt */
|
||||
napi_complete(napi);
|
||||
if (ITR_IS_DYNAMIC(vsi->rx_itr_setting) ||
|
||||
ITR_IS_DYNAMIC(vsi->tx_itr_setting))
|
||||
i40e_update_dynamic_itr(q_vector);
|
||||
|
||||
if (!test_bit(__I40E_DOWN, &vsi->state))
|
||||
i40evf_irq_enable_queues(vsi->back, 1 << q_vector->v_idx);
|
||||
|
||||
i40e_update_enable_itr(vsi, q_vector);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -66,17 +66,17 @@ enum i40e_dyn_idx_t {
|
|||
|
||||
/* Supported RSS offloads */
|
||||
#define I40E_DEFAULT_RSS_HENA ( \
|
||||
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
|
||||
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
|
||||
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
|
||||
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
|
||||
((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) | \
|
||||
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
|
||||
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
|
||||
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
|
||||
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
|
||||
((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) | \
|
||||
((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD))
|
||||
BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
|
||||
BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
|
||||
BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
|
||||
BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
|
||||
BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \
|
||||
BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
|
||||
BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
|
||||
BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
|
||||
BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
|
||||
BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \
|
||||
BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD))
|
||||
|
||||
/* Supported Rx Buffer Sizes */
|
||||
#define I40E_RXBUFFER_512 512 /* Used for packet split */
|
||||
|
@ -129,16 +129,16 @@ enum i40e_dyn_idx_t {
|
|||
#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
|
||||
#define I40E_MIN_DESC_PENDING 4
|
||||
|
||||
#define I40E_TX_FLAGS_CSUM (u32)(1)
|
||||
#define I40E_TX_FLAGS_HW_VLAN (u32)(1 << 1)
|
||||
#define I40E_TX_FLAGS_SW_VLAN (u32)(1 << 2)
|
||||
#define I40E_TX_FLAGS_TSO (u32)(1 << 3)
|
||||
#define I40E_TX_FLAGS_IPV4 (u32)(1 << 4)
|
||||
#define I40E_TX_FLAGS_IPV6 (u32)(1 << 5)
|
||||
#define I40E_TX_FLAGS_FCCRC (u32)(1 << 6)
|
||||
#define I40E_TX_FLAGS_FSO (u32)(1 << 7)
|
||||
#define I40E_TX_FLAGS_FD_SB (u32)(1 << 9)
|
||||
#define I40E_TX_FLAGS_VXLAN_TUNNEL (u32)(1 << 10)
|
||||
#define I40E_TX_FLAGS_CSUM BIT(0)
|
||||
#define I40E_TX_FLAGS_HW_VLAN BIT(1)
|
||||
#define I40E_TX_FLAGS_SW_VLAN BIT(2)
|
||||
#define I40E_TX_FLAGS_TSO BIT(3)
|
||||
#define I40E_TX_FLAGS_IPV4 BIT(4)
|
||||
#define I40E_TX_FLAGS_IPV6 BIT(5)
|
||||
#define I40E_TX_FLAGS_FCCRC BIT(6)
|
||||
#define I40E_TX_FLAGS_FSO BIT(7)
|
||||
#define I40E_TX_FLAGS_FD_SB BIT(9)
|
||||
#define I40E_TX_FLAGS_VXLAN_TUNNEL BIT(10)
|
||||
#define I40E_TX_FLAGS_VLAN_MASK 0xffff0000
|
||||
#define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
|
||||
#define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29
|
||||
|
|
|
@ -491,6 +491,7 @@ struct i40e_hw {
|
|||
|
||||
/* debug mask */
|
||||
u32 debug_mask;
|
||||
char err_str[16];
|
||||
};
|
||||
|
||||
static inline bool i40e_is_vf(struct i40e_hw *hw)
|
||||
|
@ -604,7 +605,7 @@ enum i40e_rx_desc_status_bits {
|
|||
};
|
||||
|
||||
#define I40E_RXD_QW1_STATUS_SHIFT 0
|
||||
#define I40E_RXD_QW1_STATUS_MASK (((1 << I40E_RX_DESC_STATUS_LAST) - 1) \
|
||||
#define I40E_RXD_QW1_STATUS_MASK ((BIT(I40E_RX_DESC_STATUS_LAST) - 1) \
|
||||
<< I40E_RXD_QW1_STATUS_SHIFT)
|
||||
|
||||
#define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT I40E_RX_DESC_STATUS_TSYNINDX_SHIFT
|
||||
|
@ -612,8 +613,8 @@ enum i40e_rx_desc_status_bits {
|
|||
I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT)
|
||||
|
||||
#define I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT I40E_RX_DESC_STATUS_TSYNVALID_SHIFT
|
||||
#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK (0x1UL << \
|
||||
I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT)
|
||||
#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK \
|
||||
BIT_ULL(I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT)
|
||||
|
||||
enum i40e_rx_desc_fltstat_values {
|
||||
I40E_RX_DESC_FLTSTAT_NO_DATA = 0,
|
||||
|
@ -747,8 +748,7 @@ enum i40e_rx_ptype_payload_layer {
|
|||
I40E_RXD_QW1_LENGTH_HBUF_SHIFT)
|
||||
|
||||
#define I40E_RXD_QW1_LENGTH_SPH_SHIFT 63
|
||||
#define I40E_RXD_QW1_LENGTH_SPH_MASK (0x1ULL << \
|
||||
I40E_RXD_QW1_LENGTH_SPH_SHIFT)
|
||||
#define I40E_RXD_QW1_LENGTH_SPH_MASK BIT_ULL(I40E_RXD_QW1_LENGTH_SPH_SHIFT)
|
||||
|
||||
enum i40e_rx_desc_ext_status_bits {
|
||||
/* Note: These are predefined bit offsets */
|
||||
|
@ -924,12 +924,12 @@ enum i40e_tx_ctx_desc_eipt_offload {
|
|||
#define I40E_TXD_CTX_QW0_NATT_SHIFT 9
|
||||
#define I40E_TXD_CTX_QW0_NATT_MASK (0x3ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
|
||||
|
||||
#define I40E_TXD_CTX_UDP_TUNNELING (0x1ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
|
||||
#define I40E_TXD_CTX_UDP_TUNNELING BIT_ULL(I40E_TXD_CTX_QW0_NATT_SHIFT)
|
||||
#define I40E_TXD_CTX_GRE_TUNNELING (0x2ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
|
||||
|
||||
#define I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT 11
|
||||
#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK (0x1ULL << \
|
||||
I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT)
|
||||
#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK \
|
||||
BIT_ULL(I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT)
|
||||
|
||||
#define I40E_TXD_CTX_EIP_NOINC_IPID_CONST I40E_TXD_CTX_QW0_EIP_NOINC_MASK
|
||||
|
||||
|
@ -994,8 +994,8 @@ enum i40e_filter_program_desc_fd_status {
|
|||
};
|
||||
|
||||
#define I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT 23
|
||||
#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK (0x1FFUL << \
|
||||
I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
|
||||
#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK \
|
||||
BIT_ULL(I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
|
||||
|
||||
#define I40E_TXD_FLTR_QW1_CMD_SHIFT 4
|
||||
#define I40E_TXD_FLTR_QW1_CMD_MASK (0xFFFFULL << \
|
||||
|
@ -1013,8 +1013,7 @@ enum i40e_filter_program_desc_pcmd {
|
|||
#define I40E_TXD_FLTR_QW1_DEST_MASK (0x3ULL << I40E_TXD_FLTR_QW1_DEST_SHIFT)
|
||||
|
||||
#define I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT (0x7ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
|
||||
#define I40E_TXD_FLTR_QW1_CNT_ENA_MASK (0x1ULL << \
|
||||
I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT)
|
||||
#define I40E_TXD_FLTR_QW1_CNT_ENA_MASK BIT_ULL(I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT)
|
||||
|
||||
#define I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT (0x9ULL + \
|
||||
I40E_TXD_FLTR_QW1_CMD_SHIFT)
|
||||
|
|
|
@ -110,7 +110,9 @@ struct i40e_virtchnl_msg {
|
|||
* error regardless of version mismatch.
|
||||
*/
|
||||
#define I40E_VIRTCHNL_VERSION_MAJOR 1
|
||||
#define I40E_VIRTCHNL_VERSION_MINOR 0
|
||||
#define I40E_VIRTCHNL_VERSION_MINOR 1
|
||||
#define I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS 0
|
||||
|
||||
struct i40e_virtchnl_version_info {
|
||||
u32 major;
|
||||
u32 minor;
|
||||
|
@ -129,7 +131,8 @@ struct i40e_virtchnl_version_info {
|
|||
*/
|
||||
|
||||
/* I40E_VIRTCHNL_OP_GET_VF_RESOURCES
|
||||
* VF sends this request to PF with no parameters
|
||||
* Version 1.0 VF sends this request to PF with no parameters
|
||||
* Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities
|
||||
* PF responds with an indirect message containing
|
||||
* i40e_virtchnl_vf_resource and one or more
|
||||
* i40e_virtchnl_vsi_resource structures.
|
||||
|
@ -143,9 +146,12 @@ struct i40e_virtchnl_vsi_resource {
|
|||
u8 default_mac_addr[ETH_ALEN];
|
||||
};
|
||||
/* VF offload flags */
|
||||
#define I40E_VIRTCHNL_VF_OFFLOAD_L2 0x00000001
|
||||
#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE 0x00000004
|
||||
#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
|
||||
#define I40E_VIRTCHNL_VF_OFFLOAD_L2 0x00000001
|
||||
#define I40E_VIRTCHNL_VF_OFFLOAD_IWARP 0x00000002
|
||||
#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE 0x00000004
|
||||
#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008
|
||||
#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010
|
||||
#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
|
||||
|
||||
struct i40e_virtchnl_vf_resource {
|
||||
u16 num_vsis;
|
||||
|
|
|
@ -207,17 +207,17 @@ struct i40evf_adapter {
|
|||
struct msix_entry *msix_entries;
|
||||
|
||||
u32 flags;
|
||||
#define I40EVF_FLAG_RX_CSUM_ENABLED (u32)(1)
|
||||
#define I40EVF_FLAG_RX_1BUF_CAPABLE (u32)(1 << 1)
|
||||
#define I40EVF_FLAG_RX_PS_CAPABLE (u32)(1 << 2)
|
||||
#define I40EVF_FLAG_RX_PS_ENABLED (u32)(1 << 3)
|
||||
#define I40EVF_FLAG_IN_NETPOLL (u32)(1 << 4)
|
||||
#define I40EVF_FLAG_IMIR_ENABLED (u32)(1 << 5)
|
||||
#define I40EVF_FLAG_MQ_CAPABLE (u32)(1 << 6)
|
||||
#define I40EVF_FLAG_NEED_LINK_UPDATE (u32)(1 << 7)
|
||||
#define I40EVF_FLAG_PF_COMMS_FAILED (u32)(1 << 8)
|
||||
#define I40EVF_FLAG_RESET_PENDING (u32)(1 << 9)
|
||||
#define I40EVF_FLAG_RESET_NEEDED (u32)(1 << 10)
|
||||
#define I40EVF_FLAG_RX_CSUM_ENABLED BIT(0)
|
||||
#define I40EVF_FLAG_RX_1BUF_CAPABLE BIT(1)
|
||||
#define I40EVF_FLAG_RX_PS_CAPABLE BIT(2)
|
||||
#define I40EVF_FLAG_RX_PS_ENABLED BIT(3)
|
||||
#define I40EVF_FLAG_IN_NETPOLL BIT(4)
|
||||
#define I40EVF_FLAG_IMIR_ENABLED BIT(5)
|
||||
#define I40EVF_FLAG_MQ_CAPABLE BIT(6)
|
||||
#define I40EVF_FLAG_NEED_LINK_UPDATE BIT(7)
|
||||
#define I40EVF_FLAG_PF_COMMS_FAILED BIT(8)
|
||||
#define I40EVF_FLAG_RESET_PENDING BIT(9)
|
||||
#define I40EVF_FLAG_RESET_NEEDED BIT(10)
|
||||
/* duplcates for common code */
|
||||
#define I40E_FLAG_FDIR_ATR_ENABLED 0
|
||||
#define I40E_FLAG_DCB_ENABLED 0
|
||||
|
@ -225,15 +225,16 @@ struct i40evf_adapter {
|
|||
#define I40E_FLAG_RX_CSUM_ENABLED I40EVF_FLAG_RX_CSUM_ENABLED
|
||||
/* flags for admin queue service task */
|
||||
u32 aq_required;
|
||||
#define I40EVF_FLAG_AQ_ENABLE_QUEUES (u32)(1)
|
||||
#define I40EVF_FLAG_AQ_DISABLE_QUEUES (u32)(1 << 1)
|
||||
#define I40EVF_FLAG_AQ_ADD_MAC_FILTER (u32)(1 << 2)
|
||||
#define I40EVF_FLAG_AQ_ADD_VLAN_FILTER (u32)(1 << 3)
|
||||
#define I40EVF_FLAG_AQ_DEL_MAC_FILTER (u32)(1 << 4)
|
||||
#define I40EVF_FLAG_AQ_DEL_VLAN_FILTER (u32)(1 << 5)
|
||||
#define I40EVF_FLAG_AQ_CONFIGURE_QUEUES (u32)(1 << 6)
|
||||
#define I40EVF_FLAG_AQ_MAP_VECTORS (u32)(1 << 7)
|
||||
#define I40EVF_FLAG_AQ_HANDLE_RESET (u32)(1 << 8)
|
||||
#define I40EVF_FLAG_AQ_ENABLE_QUEUES BIT(0)
|
||||
#define I40EVF_FLAG_AQ_DISABLE_QUEUES BIT(1)
|
||||
#define I40EVF_FLAG_AQ_ADD_MAC_FILTER BIT(2)
|
||||
#define I40EVF_FLAG_AQ_ADD_VLAN_FILTER BIT(3)
|
||||
#define I40EVF_FLAG_AQ_DEL_MAC_FILTER BIT(4)
|
||||
#define I40EVF_FLAG_AQ_DEL_VLAN_FILTER BIT(5)
|
||||
#define I40EVF_FLAG_AQ_CONFIGURE_QUEUES BIT(6)
|
||||
#define I40EVF_FLAG_AQ_MAP_VECTORS BIT(7)
|
||||
#define I40EVF_FLAG_AQ_HANDLE_RESET BIT(8)
|
||||
#define I40EVF_FLAG_AQ_GET_CONFIG BIT(10)
|
||||
|
||||
/* OS defined structs */
|
||||
struct net_device *netdev;
|
||||
|
@ -249,8 +250,17 @@ struct i40evf_adapter {
|
|||
bool netdev_registered;
|
||||
bool link_up;
|
||||
enum i40e_virtchnl_ops current_op;
|
||||
#define CLIENT_ENABLED(_a) ((_a)->vf_res->vf_offload_flags & \
|
||||
I40E_VIRTCHNL_VF_OFFLOAD_IWARP)
|
||||
#define RSS_AQ(_a) ((_a)->vf_res->vf_offload_flags & \
|
||||
I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ)
|
||||
#define VLAN_ALLOWED(_a) ((_a)->vf_res->vf_offload_flags & \
|
||||
I40E_VIRTCHNL_VF_OFFLOAD_VLAN)
|
||||
struct i40e_virtchnl_vf_resource *vf_res; /* incl. all VSIs */
|
||||
struct i40e_virtchnl_vsi_resource *vsi_res; /* our LAN VSI */
|
||||
struct i40e_virtchnl_version_info pf_version;
|
||||
#define PF_IS_V11(_a) (((_a)->pf_version.major == 1) && \
|
||||
((_a)->pf_version.minor == 1))
|
||||
u16 msg_enable;
|
||||
struct i40e_eth_stats current_stats;
|
||||
struct i40e_vsi vsi;
|
||||
|
@ -264,6 +274,7 @@ extern const char i40evf_driver_version[];
|
|||
|
||||
int i40evf_up(struct i40evf_adapter *adapter);
|
||||
void i40evf_down(struct i40evf_adapter *adapter);
|
||||
int i40evf_process_config(struct i40evf_adapter *adapter);
|
||||
void i40evf_reset(struct i40evf_adapter *adapter);
|
||||
void i40evf_set_ethtool_ops(struct net_device *netdev);
|
||||
void i40evf_update_stats(struct i40evf_adapter *adapter);
|
||||
|
|
|
@ -381,11 +381,11 @@ static int i40evf_get_rss_hash_opts(struct i40evf_adapter *adapter,
|
|||
|
||||
switch (cmd->flow_type) {
|
||||
case TCP_V4_FLOW:
|
||||
if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP))
|
||||
if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP))
|
||||
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
|
||||
break;
|
||||
case UDP_V4_FLOW:
|
||||
if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP))
|
||||
if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP))
|
||||
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
|
||||
break;
|
||||
|
||||
|
@ -397,11 +397,11 @@ static int i40evf_get_rss_hash_opts(struct i40evf_adapter *adapter,
|
|||
break;
|
||||
|
||||
case TCP_V6_FLOW:
|
||||
if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP))
|
||||
if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP))
|
||||
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
|
||||
break;
|
||||
case UDP_V6_FLOW:
|
||||
if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP))
|
||||
if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP))
|
||||
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
|
||||
break;
|
||||
|
||||
|
@ -479,10 +479,10 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter,
|
|||
case TCP_V4_FLOW:
|
||||
switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
|
||||
case 0:
|
||||
hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
|
||||
hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
|
||||
break;
|
||||
case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
|
||||
hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
|
||||
hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
|
@ -491,10 +491,10 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter,
|
|||
case TCP_V6_FLOW:
|
||||
switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
|
||||
case 0:
|
||||
hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
|
||||
hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
|
||||
break;
|
||||
case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
|
||||
hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
|
||||
hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
|
@ -503,12 +503,12 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter,
|
|||
case UDP_V4_FLOW:
|
||||
switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
|
||||
case 0:
|
||||
hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
|
||||
((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
|
||||
hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
|
||||
BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
|
||||
break;
|
||||
case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
|
||||
hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
|
||||
((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
|
||||
hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
|
||||
BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
|
@ -517,12 +517,12 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter,
|
|||
case UDP_V6_FLOW:
|
||||
switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
|
||||
case 0:
|
||||
hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
|
||||
((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
|
||||
hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
|
||||
BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
|
||||
break;
|
||||
case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
|
||||
hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
|
||||
((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
|
||||
hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
|
||||
BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
|
@ -535,7 +535,7 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter,
|
|||
if ((nfc->data & RXH_L4_B_0_1) ||
|
||||
(nfc->data & RXH_L4_B_2_3))
|
||||
return -EINVAL;
|
||||
hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
|
||||
hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
|
||||
break;
|
||||
case AH_ESP_V6_FLOW:
|
||||
case AH_V6_FLOW:
|
||||
|
@ -544,15 +544,15 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter,
|
|||
if ((nfc->data & RXH_L4_B_0_1) ||
|
||||
(nfc->data & RXH_L4_B_2_3))
|
||||
return -EINVAL;
|
||||
hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
|
||||
hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
|
||||
break;
|
||||
case IPV4_FLOW:
|
||||
hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
|
||||
((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4);
|
||||
hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
|
||||
BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
|
||||
break;
|
||||
case IPV6_FLOW:
|
||||
hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
|
||||
((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
|
||||
hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
|
||||
BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
|
|
|
@ -240,7 +240,7 @@ void i40evf_irq_enable_queues(struct i40evf_adapter *adapter, u32 mask)
|
|||
int i;
|
||||
|
||||
for (i = 1; i < adapter->num_msix_vectors; i++) {
|
||||
if (mask & (1 << (i - 1))) {
|
||||
if (mask & BIT(i - 1)) {
|
||||
wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1),
|
||||
I40E_VFINT_DYN_CTLN1_INTENA_MASK |
|
||||
I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK |
|
||||
|
@ -268,7 +268,7 @@ static void i40evf_fire_sw_int(struct i40evf_adapter *adapter, u32 mask)
|
|||
wr32(hw, I40E_VFINT_DYN_CTL01, dyn_ctl);
|
||||
}
|
||||
for (i = 1; i < adapter->num_msix_vectors; i++) {
|
||||
if (mask & (1 << i)) {
|
||||
if (mask & BIT(i)) {
|
||||
dyn_ctl = rd32(hw, I40E_VFINT_DYN_CTLN1(i - 1));
|
||||
dyn_ctl |= I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK |
|
||||
I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK |
|
||||
|
@ -377,7 +377,7 @@ i40evf_map_vector_to_txq(struct i40evf_adapter *adapter, int v_idx, int t_idx)
|
|||
q_vector->tx.count++;
|
||||
q_vector->tx.latency_range = I40E_LOW_LATENCY;
|
||||
q_vector->num_ringpairs++;
|
||||
q_vector->ring_mask |= (1 << t_idx);
|
||||
q_vector->ring_mask |= BIT(t_idx);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1371,6 +1371,10 @@ static void i40evf_watchdog_task(struct work_struct *work)
|
|||
}
|
||||
goto watchdog_done;
|
||||
}
|
||||
if (adapter->aq_required & I40EVF_FLAG_AQ_GET_CONFIG) {
|
||||
i40evf_send_vf_config_msg(adapter);
|
||||
goto watchdog_done;
|
||||
}
|
||||
|
||||
if (adapter->aq_required & I40EVF_FLAG_AQ_DISABLE_QUEUES) {
|
||||
i40evf_disable_queues(adapter);
|
||||
|
@ -1606,7 +1610,8 @@ continue_reset:
|
|||
dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n",
|
||||
err);
|
||||
|
||||
i40evf_map_queues(adapter);
|
||||
adapter->aq_required = I40EVF_FLAG_AQ_GET_CONFIG;
|
||||
adapter->aq_required |= I40EVF_FLAG_AQ_MAP_VECTORS;
|
||||
|
||||
/* re-add all MAC filters */
|
||||
list_for_each_entry(f, &adapter->mac_filter_list, list) {
|
||||
|
@ -1616,7 +1621,7 @@ continue_reset:
|
|||
list_for_each_entry(f, &adapter->vlan_filter_list, list) {
|
||||
f->add = true;
|
||||
}
|
||||
adapter->aq_required = I40EVF_FLAG_AQ_ADD_MAC_FILTER;
|
||||
adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
|
||||
adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
|
||||
clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
|
||||
i40evf_misc_irq_enable(adapter);
|
||||
|
@ -1981,6 +1986,62 @@ static int i40evf_check_reset_complete(struct i40e_hw *hw)
|
|||
return -EBUSY;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40evf_process_config - Process the config information we got from the PF
|
||||
* @adapter: board private structure
|
||||
*
|
||||
* Verify that we have a valid config struct, and set up our netdev features
|
||||
* and our VSI struct.
|
||||
**/
|
||||
int i40evf_process_config(struct i40evf_adapter *adapter)
|
||||
{
|
||||
struct net_device *netdev = adapter->netdev;
|
||||
int i;
|
||||
|
||||
/* got VF config message back from PF, now we can parse it */
|
||||
for (i = 0; i < adapter->vf_res->num_vsis; i++) {
|
||||
if (adapter->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
|
||||
adapter->vsi_res = &adapter->vf_res->vsi_res[i];
|
||||
}
|
||||
if (!adapter->vsi_res) {
|
||||
dev_err(&adapter->pdev->dev, "No LAN VSI found\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
if (adapter->vf_res->vf_offload_flags
|
||||
& I40E_VIRTCHNL_VF_OFFLOAD_VLAN) {
|
||||
netdev->vlan_features = netdev->features;
|
||||
netdev->features |= NETIF_F_HW_VLAN_CTAG_TX |
|
||||
NETIF_F_HW_VLAN_CTAG_RX |
|
||||
NETIF_F_HW_VLAN_CTAG_FILTER;
|
||||
}
|
||||
netdev->features |= NETIF_F_HIGHDMA |
|
||||
NETIF_F_SG |
|
||||
NETIF_F_IP_CSUM |
|
||||
NETIF_F_SCTP_CSUM |
|
||||
NETIF_F_IPV6_CSUM |
|
||||
NETIF_F_TSO |
|
||||
NETIF_F_TSO6 |
|
||||
NETIF_F_RXCSUM |
|
||||
NETIF_F_GRO;
|
||||
|
||||
/* copy netdev features into list of user selectable features */
|
||||
netdev->hw_features |= netdev->features;
|
||||
netdev->hw_features &= ~NETIF_F_RXCSUM;
|
||||
|
||||
adapter->vsi.id = adapter->vsi_res->vsi_id;
|
||||
|
||||
adapter->vsi.back = adapter;
|
||||
adapter->vsi.base_vector = 1;
|
||||
adapter->vsi.work_limit = I40E_DEFAULT_IRQ_WORK;
|
||||
adapter->vsi.rx_itr_setting = (I40E_ITR_DYNAMIC |
|
||||
ITR_REG_TO_USEC(I40E_ITR_RX_DEF));
|
||||
adapter->vsi.tx_itr_setting = (I40E_ITR_DYNAMIC |
|
||||
ITR_REG_TO_USEC(I40E_ITR_TX_DEF));
|
||||
adapter->vsi.netdev = adapter->netdev;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40evf_init_task - worker thread to perform delayed initialization
|
||||
* @work: pointer to work_struct containing our data
|
||||
|
@ -2001,7 +2062,7 @@ static void i40evf_init_task(struct work_struct *work)
|
|||
struct net_device *netdev = adapter->netdev;
|
||||
struct i40e_hw *hw = &adapter->hw;
|
||||
struct pci_dev *pdev = adapter->pdev;
|
||||
int i, err, bufsz;
|
||||
int err, bufsz;
|
||||
|
||||
switch (adapter->state) {
|
||||
case __I40EVF_STARTUP:
|
||||
|
@ -2052,6 +2113,12 @@ static void i40evf_init_task(struct work_struct *work)
|
|||
if (err) {
|
||||
if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK)
|
||||
err = i40evf_send_api_ver(adapter);
|
||||
else
|
||||
dev_err(&pdev->dev, "Unsupported PF API version %d.%d, expected %d.%d\n",
|
||||
adapter->pf_version.major,
|
||||
adapter->pf_version.minor,
|
||||
I40E_VIRTCHNL_VERSION_MAJOR,
|
||||
I40E_VIRTCHNL_VERSION_MINOR);
|
||||
goto err;
|
||||
}
|
||||
err = i40evf_send_vf_config_msg(adapter);
|
||||
|
@ -2087,42 +2154,15 @@ static void i40evf_init_task(struct work_struct *work)
|
|||
default:
|
||||
goto err_alloc;
|
||||
}
|
||||
/* got VF config message back from PF, now we can parse it */
|
||||
for (i = 0; i < adapter->vf_res->num_vsis; i++) {
|
||||
if (adapter->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
|
||||
adapter->vsi_res = &adapter->vf_res->vsi_res[i];
|
||||
}
|
||||
if (!adapter->vsi_res) {
|
||||
dev_err(&pdev->dev, "No LAN VSI found\n");
|
||||
if (i40evf_process_config(adapter))
|
||||
goto err_alloc;
|
||||
}
|
||||
adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
|
||||
|
||||
adapter->flags |= I40EVF_FLAG_RX_CSUM_ENABLED;
|
||||
|
||||
netdev->netdev_ops = &i40evf_netdev_ops;
|
||||
i40evf_set_ethtool_ops(netdev);
|
||||
netdev->watchdog_timeo = 5 * HZ;
|
||||
netdev->features |= NETIF_F_HIGHDMA |
|
||||
NETIF_F_SG |
|
||||
NETIF_F_IP_CSUM |
|
||||
NETIF_F_SCTP_CSUM |
|
||||
NETIF_F_IPV6_CSUM |
|
||||
NETIF_F_TSO |
|
||||
NETIF_F_TSO6 |
|
||||
NETIF_F_RXCSUM |
|
||||
NETIF_F_GRO;
|
||||
|
||||
if (adapter->vf_res->vf_offload_flags
|
||||
& I40E_VIRTCHNL_VF_OFFLOAD_VLAN) {
|
||||
netdev->vlan_features = netdev->features;
|
||||
netdev->features |= NETIF_F_HW_VLAN_CTAG_TX |
|
||||
NETIF_F_HW_VLAN_CTAG_RX |
|
||||
NETIF_F_HW_VLAN_CTAG_FILTER;
|
||||
}
|
||||
|
||||
/* copy netdev features into list of user selectable features */
|
||||
netdev->hw_features |= netdev->features;
|
||||
netdev->hw_features &= ~NETIF_F_RXCSUM;
|
||||
|
||||
if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
|
||||
dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n",
|
||||
|
@ -2153,17 +2193,6 @@ static void i40evf_init_task(struct work_struct *work)
|
|||
|
||||
netif_carrier_off(netdev);
|
||||
|
||||
adapter->vsi.id = adapter->vsi_res->vsi_id;
|
||||
adapter->vsi.seid = adapter->vsi_res->vsi_id; /* dummy */
|
||||
adapter->vsi.back = adapter;
|
||||
adapter->vsi.base_vector = 1;
|
||||
adapter->vsi.work_limit = I40E_DEFAULT_IRQ_WORK;
|
||||
adapter->vsi.rx_itr_setting = (I40E_ITR_DYNAMIC |
|
||||
ITR_REG_TO_USEC(I40E_ITR_RX_DEF));
|
||||
adapter->vsi.tx_itr_setting = (I40E_ITR_DYNAMIC |
|
||||
ITR_REG_TO_USEC(I40E_ITR_TX_DEF));
|
||||
adapter->vsi.netdev = adapter->netdev;
|
||||
|
||||
if (!adapter->netdev_registered) {
|
||||
err = register_netdev(netdev);
|
||||
if (err)
|
||||
|
@ -2291,7 +2320,7 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
hw = &adapter->hw;
|
||||
hw->back = adapter;
|
||||
|
||||
adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
|
||||
adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
|
||||
adapter->state = __I40EVF_STARTUP;
|
||||
|
||||
/* Call save state here because it relies on the adapter struct. */
|
||||
|
|
|
@ -51,8 +51,9 @@ static int i40evf_send_pf_msg(struct i40evf_adapter *adapter,
|
|||
|
||||
err = i40e_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL);
|
||||
if (err)
|
||||
dev_err(&adapter->pdev->dev, "Unable to send opcode %d to PF, error %d, aq status %d\n",
|
||||
op, err, hw->aq.asq_last_status);
|
||||
dev_err(&adapter->pdev->dev, "Unable to send opcode %d to PF, err %s, aq_err %s\n",
|
||||
op, i40evf_stat_str(hw, err),
|
||||
i40evf_aq_str(hw, hw->aq.asq_last_status));
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -125,8 +126,11 @@ int i40evf_verify_api_ver(struct i40evf_adapter *adapter)
|
|||
}
|
||||
|
||||
pf_vvi = (struct i40e_virtchnl_version_info *)event.msg_buf;
|
||||
if ((pf_vvi->major != I40E_VIRTCHNL_VERSION_MAJOR) ||
|
||||
(pf_vvi->minor != I40E_VIRTCHNL_VERSION_MINOR))
|
||||
adapter->pf_version = *pf_vvi;
|
||||
|
||||
if ((pf_vvi->major > I40E_VIRTCHNL_VERSION_MAJOR) ||
|
||||
((pf_vvi->major == I40E_VIRTCHNL_VERSION_MAJOR) &&
|
||||
(pf_vvi->minor > I40E_VIRTCHNL_VERSION_MINOR)))
|
||||
err = -EIO;
|
||||
|
||||
out_alloc:
|
||||
|
@ -145,8 +149,24 @@ out:
|
|||
**/
|
||||
int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter)
|
||||
{
|
||||
return i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
|
||||
NULL, 0);
|
||||
u32 caps;
|
||||
|
||||
adapter->current_op = I40E_VIRTCHNL_OP_GET_VF_RESOURCES;
|
||||
adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_CONFIG;
|
||||
caps = I40E_VIRTCHNL_VF_OFFLOAD_L2 |
|
||||
I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ |
|
||||
I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG |
|
||||
I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
|
||||
adapter->current_op = I40E_VIRTCHNL_OP_GET_VF_RESOURCES;
|
||||
adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_CONFIG;
|
||||
if (PF_IS_V11(adapter))
|
||||
return i40evf_send_pf_msg(adapter,
|
||||
I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
|
||||
(u8 *)&caps, sizeof(caps));
|
||||
else
|
||||
return i40evf_send_pf_msg(adapter,
|
||||
I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
|
||||
NULL, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -274,7 +294,7 @@ void i40evf_enable_queues(struct i40evf_adapter *adapter)
|
|||
}
|
||||
adapter->current_op = I40E_VIRTCHNL_OP_ENABLE_QUEUES;
|
||||
vqs.vsi_id = adapter->vsi_res->vsi_id;
|
||||
vqs.tx_queues = (1 << adapter->num_active_queues) - 1;
|
||||
vqs.tx_queues = BIT(adapter->num_active_queues) - 1;
|
||||
vqs.rx_queues = vqs.tx_queues;
|
||||
adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_QUEUES;
|
||||
i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
|
||||
|
@ -299,7 +319,7 @@ void i40evf_disable_queues(struct i40evf_adapter *adapter)
|
|||
}
|
||||
adapter->current_op = I40E_VIRTCHNL_OP_DISABLE_QUEUES;
|
||||
vqs.vsi_id = adapter->vsi_res->vsi_id;
|
||||
vqs.tx_queues = (1 << adapter->num_active_queues) - 1;
|
||||
vqs.tx_queues = BIT(adapter->num_active_queues) - 1;
|
||||
vqs.rx_queues = vqs.tx_queues;
|
||||
adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_QUEUES;
|
||||
i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
|
||||
|
@ -708,8 +728,9 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
|
|||
return;
|
||||
}
|
||||
if (v_retval) {
|
||||
dev_err(&adapter->pdev->dev, "%s: PF returned error %d to our request %d\n",
|
||||
__func__, v_retval, v_opcode);
|
||||
dev_err(&adapter->pdev->dev, "%s: PF returned error %d (%s) to our request %d\n",
|
||||
__func__, v_retval,
|
||||
i40evf_stat_str(&adapter->hw, v_retval), v_opcode);
|
||||
}
|
||||
switch (v_opcode) {
|
||||
case I40E_VIRTCHNL_OP_GET_STATS: {
|
||||
|
@ -729,6 +750,15 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
|
|||
adapter->current_stats = *stats;
|
||||
}
|
||||
break;
|
||||
case I40E_VIRTCHNL_OP_GET_VF_RESOURCES: {
|
||||
u16 len = sizeof(struct i40e_virtchnl_vf_resource) +
|
||||
I40E_MAX_VF_VSI *
|
||||
sizeof(struct i40e_virtchnl_vsi_resource);
|
||||
memcpy(adapter->vf_res, msg, min(msglen, len));
|
||||
i40e_vf_parse_hw_config(&adapter->hw, adapter->vf_res);
|
||||
i40evf_process_config(adapter);
|
||||
}
|
||||
break;
|
||||
case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
|
||||
/* enable transmits */
|
||||
i40evf_irq_enable(adapter, true);
|
||||
|
@ -740,7 +770,6 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
|
|||
i40evf_free_all_rx_resources(adapter);
|
||||
break;
|
||||
case I40E_VIRTCHNL_OP_VERSION:
|
||||
case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
|
||||
case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
|
||||
/* Don't display an error if we get these out of sequence.
|
||||
* If the firmware needed to get kicked, we'll get these and
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/* Intel(R) Gigabit Ethernet Linux driver
|
||||
* Copyright(c) 2007-2014 Intel Corporation.
|
||||
* Copyright(c) 2007-2015 Intel Corporation.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
|
@ -36,9 +36,6 @@ static s32 igb_set_master_slave_mode(struct e1000_hw *hw);
|
|||
/* Cable length tables */
|
||||
static const u16 e1000_m88_cable_length_table[] = {
|
||||
0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED };
|
||||
#define M88E1000_CABLE_LENGTH_TABLE_SIZE \
|
||||
(sizeof(e1000_m88_cable_length_table) / \
|
||||
sizeof(e1000_m88_cable_length_table[0]))
|
||||
|
||||
static const u16 e1000_igp_2_cable_length_table[] = {
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21,
|
||||
|
@ -49,9 +46,6 @@ static const u16 e1000_igp_2_cable_length_table[] = {
|
|||
60, 66, 72, 77, 82, 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121,
|
||||
83, 89, 95, 100, 105, 109, 113, 116, 119, 122, 124,
|
||||
104, 109, 114, 118, 121, 124};
|
||||
#define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \
|
||||
(sizeof(e1000_igp_2_cable_length_table) / \
|
||||
sizeof(e1000_igp_2_cable_length_table[0]))
|
||||
|
||||
/**
|
||||
* igb_check_reset_block - Check if PHY reset is blocked
|
||||
|
@ -1700,7 +1694,7 @@ s32 igb_get_cable_length_m88(struct e1000_hw *hw)
|
|||
|
||||
index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
|
||||
M88E1000_PSSR_CABLE_LENGTH_SHIFT;
|
||||
if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) {
|
||||
if (index >= ARRAY_SIZE(e1000_m88_cable_length_table) - 1) {
|
||||
ret_val = -E1000_ERR_PHY;
|
||||
goto out;
|
||||
}
|
||||
|
@ -1796,7 +1790,7 @@ s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw)
|
|||
|
||||
index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
|
||||
M88E1000_PSSR_CABLE_LENGTH_SHIFT;
|
||||
if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) {
|
||||
if (index >= ARRAY_SIZE(e1000_m88_cable_length_table) - 1) {
|
||||
ret_val = -E1000_ERR_PHY;
|
||||
goto out;
|
||||
}
|
||||
|
@ -1840,7 +1834,7 @@ s32 igb_get_cable_length_igp_2(struct e1000_hw *hw)
|
|||
s32 ret_val = 0;
|
||||
u16 phy_data, i, agc_value = 0;
|
||||
u16 cur_agc_index, max_agc_index = 0;
|
||||
u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1;
|
||||
u16 min_agc_index = ARRAY_SIZE(e1000_igp_2_cable_length_table) - 1;
|
||||
static const u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = {
|
||||
IGP02E1000_PHY_AGC_A,
|
||||
IGP02E1000_PHY_AGC_B,
|
||||
|
@ -1863,7 +1857,7 @@ s32 igb_get_cable_length_igp_2(struct e1000_hw *hw)
|
|||
IGP02E1000_AGC_LENGTH_MASK;
|
||||
|
||||
/* Array index bound check. */
|
||||
if ((cur_agc_index >= IGP02E1000_CABLE_LENGTH_TABLE_SIZE) ||
|
||||
if ((cur_agc_index >= ARRAY_SIZE(e1000_igp_2_cable_length_table)) ||
|
||||
(cur_agc_index == 0)) {
|
||||
ret_val = -E1000_ERR_PHY;
|
||||
goto out;
|
||||
|
|
|
@ -2159,6 +2159,27 @@ static int igb_set_coalesce(struct net_device *netdev,
|
|||
struct igb_adapter *adapter = netdev_priv(netdev);
|
||||
int i;
|
||||
|
||||
if (ec->rx_max_coalesced_frames ||
|
||||
ec->rx_coalesce_usecs_irq ||
|
||||
ec->rx_max_coalesced_frames_irq ||
|
||||
ec->tx_max_coalesced_frames ||
|
||||
ec->tx_coalesce_usecs_irq ||
|
||||
ec->stats_block_coalesce_usecs ||
|
||||
ec->use_adaptive_rx_coalesce ||
|
||||
ec->use_adaptive_tx_coalesce ||
|
||||
ec->pkt_rate_low ||
|
||||
ec->rx_coalesce_usecs_low ||
|
||||
ec->rx_max_coalesced_frames_low ||
|
||||
ec->tx_coalesce_usecs_low ||
|
||||
ec->tx_max_coalesced_frames_low ||
|
||||
ec->pkt_rate_high ||
|
||||
ec->rx_coalesce_usecs_high ||
|
||||
ec->rx_max_coalesced_frames_high ||
|
||||
ec->tx_coalesce_usecs_high ||
|
||||
ec->tx_max_coalesced_frames_high ||
|
||||
ec->rate_sample_interval)
|
||||
return -ENOTSUPP;
|
||||
|
||||
if ((ec->rx_coalesce_usecs > IGB_MAX_ITR_USECS) ||
|
||||
((ec->rx_coalesce_usecs > 3) &&
|
||||
(ec->rx_coalesce_usecs < IGB_MIN_ITR_USECS)) ||
|
||||
|
|
|
@ -57,8 +57,8 @@
|
|||
#include "igb.h"
|
||||
|
||||
#define MAJ 5
|
||||
#define MIN 2
|
||||
#define BUILD 18
|
||||
#define MIN 3
|
||||
#define BUILD 0
|
||||
#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
|
||||
__stringify(BUILD) "-k"
|
||||
char igb_driver_name[] = "igb";
|
||||
|
|
|
@ -813,22 +813,15 @@ static u32 ixgbevf_get_rxfh_indir_size(struct net_device *netdev)
|
|||
{
|
||||
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
|
||||
|
||||
/* We support this operation only for 82599 and x540 at the moment */
|
||||
if (adapter->hw.mac.type < ixgbe_mac_X550_vf)
|
||||
return IXGBEVF_82599_RETA_SIZE;
|
||||
if (adapter->hw.mac.type >= ixgbe_mac_X550_vf)
|
||||
return IXGBEVF_X550_VFRETA_SIZE;
|
||||
|
||||
return 0;
|
||||
return IXGBEVF_82599_RETA_SIZE;
|
||||
}
|
||||
|
||||
static u32 ixgbevf_get_rxfh_key_size(struct net_device *netdev)
|
||||
{
|
||||
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
|
||||
|
||||
/* We support this operation only for 82599 and x540 at the moment */
|
||||
if (adapter->hw.mac.type < ixgbe_mac_X550_vf)
|
||||
return IXGBEVF_RSS_HASH_KEY_SIZE;
|
||||
|
||||
return 0;
|
||||
return IXGBEVF_RSS_HASH_KEY_SIZE;
|
||||
}
|
||||
|
||||
static int ixgbevf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
|
||||
|
@ -840,21 +833,33 @@ static int ixgbevf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
|
|||
if (hfunc)
|
||||
*hfunc = ETH_RSS_HASH_TOP;
|
||||
|
||||
/* If neither indirection table nor hash key was requested - just
|
||||
* return a success avoiding taking any locks.
|
||||
*/
|
||||
if (!indir && !key)
|
||||
return 0;
|
||||
if (adapter->hw.mac.type >= ixgbe_mac_X550_vf) {
|
||||
if (key)
|
||||
memcpy(key, adapter->rss_key, sizeof(adapter->rss_key));
|
||||
|
||||
spin_lock_bh(&adapter->mbx_lock);
|
||||
if (indir)
|
||||
err = ixgbevf_get_reta_locked(&adapter->hw, indir,
|
||||
adapter->num_rx_queues);
|
||||
if (indir) {
|
||||
int i;
|
||||
|
||||
if (!err && key)
|
||||
err = ixgbevf_get_rss_key_locked(&adapter->hw, key);
|
||||
for (i = 0; i < IXGBEVF_X550_VFRETA_SIZE; i++)
|
||||
indir[i] = adapter->rss_indir_tbl[i];
|
||||
}
|
||||
} else {
|
||||
/* If neither indirection table nor hash key was requested
|
||||
* - just return a success avoiding taking any locks.
|
||||
*/
|
||||
if (!indir && !key)
|
||||
return 0;
|
||||
|
||||
spin_unlock_bh(&adapter->mbx_lock);
|
||||
spin_lock_bh(&adapter->mbx_lock);
|
||||
if (indir)
|
||||
err = ixgbevf_get_reta_locked(&adapter->hw, indir,
|
||||
adapter->num_rx_queues);
|
||||
|
||||
if (!err && key)
|
||||
err = ixgbevf_get_rss_key_locked(&adapter->hw, key);
|
||||
|
||||
spin_unlock_bh(&adapter->mbx_lock);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -144,9 +144,11 @@ struct ixgbevf_ring {
|
|||
|
||||
#define MAX_RX_QUEUES IXGBE_VF_MAX_RX_QUEUES
|
||||
#define MAX_TX_QUEUES IXGBE_VF_MAX_TX_QUEUES
|
||||
#define IXGBEVF_MAX_RSS_QUEUES 2
|
||||
#define IXGBEVF_82599_RETA_SIZE 128
|
||||
#define IXGBEVF_MAX_RSS_QUEUES 2
|
||||
#define IXGBEVF_82599_RETA_SIZE 128 /* 128 entries */
|
||||
#define IXGBEVF_X550_VFRETA_SIZE 64 /* 64 entries */
|
||||
#define IXGBEVF_RSS_HASH_KEY_SIZE 40
|
||||
#define IXGBEVF_VFRSSRK_REGS 10 /* 10 registers for RSS key */
|
||||
|
||||
#define IXGBEVF_DEFAULT_TXD 1024
|
||||
#define IXGBEVF_DEFAULT_RXD 512
|
||||
|
@ -447,6 +449,9 @@ struct ixgbevf_adapter {
|
|||
|
||||
spinlock_t mbx_lock;
|
||||
unsigned long last_reset;
|
||||
|
||||
u32 rss_key[IXGBEVF_VFRSSRK_REGS];
|
||||
u8 rss_indir_tbl[IXGBEVF_X550_VFRETA_SIZE];
|
||||
};
|
||||
|
||||
enum ixbgevf_state_t {
|
||||
|
|
|
@ -1696,22 +1696,25 @@ static void ixgbevf_setup_vfmrqc(struct ixgbevf_adapter *adapter)
|
|||
{
|
||||
struct ixgbe_hw *hw = &adapter->hw;
|
||||
u32 vfmrqc = 0, vfreta = 0;
|
||||
u32 rss_key[10];
|
||||
u16 rss_i = adapter->num_rx_queues;
|
||||
int i, j;
|
||||
u8 i, j;
|
||||
|
||||
/* Fill out hash function seeds */
|
||||
netdev_rss_key_fill(rss_key, sizeof(rss_key));
|
||||
for (i = 0; i < 10; i++)
|
||||
IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]);
|
||||
netdev_rss_key_fill(adapter->rss_key, sizeof(adapter->rss_key));
|
||||
for (i = 0; i < IXGBEVF_VFRSSRK_REGS; i++)
|
||||
IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), adapter->rss_key[i]);
|
||||
|
||||
/* Fill out redirection table */
|
||||
for (i = 0, j = 0; i < 64; i++, j++) {
|
||||
for (i = 0, j = 0; i < IXGBEVF_X550_VFRETA_SIZE; i++, j++) {
|
||||
if (j == rss_i)
|
||||
j = 0;
|
||||
vfreta = (vfreta << 8) | (j * 0x1);
|
||||
if ((i & 3) == 3)
|
||||
|
||||
adapter->rss_indir_tbl[i] = j;
|
||||
|
||||
vfreta |= j << (i & 0x3) * 8;
|
||||
if ((i & 3) == 3) {
|
||||
IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), vfreta);
|
||||
vfreta = 0;
|
||||
}
|
||||
}
|
||||
|
||||
/* Perform hash on these packet types */
|
||||
|
|
Loading…
Reference in New Issue